query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Sets the 'reindex_data' value in the REST API to 0 to clear it. Splunk then automatically restarts the input.
def clear_checkbox(session_key, stanza): url = f'https://localhost:8089/servicesNS/nobody/TA-strava-for-splunk/data/inputs/strava_api/{stanza}' headers = {'Authorization': f'Splunk {session_key}'} payload = 'reindex_data=0' helper.send_http_request(url, "POST", headers=headers, payload=payload, verify=False, use_proxy=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()", "def restart(self):\n # Note: No need to regenerate the data, just reset the idx\n self.prepare_for_use()", "def restart(self):\n # Note: No need to regenerate the data,...
[ "0.6248837", "0.6248837", "0.6248837", "0.6241557", "0.62066907", "0.6205658", "0.6196166", "0.61584055", "0.6134224", "0.61318415", "0.610773", "0.6060318", "0.60403645", "0.60403645", "0.5998498", "0.5993323", "0.5940291", "0.5935767", "0.5879322", "0.5871808", "0.5868277",...
0.0
-1
Gets all activities, 30 per page as per Strava's default.
def get_activities(ts_activity, access_token): params = {'after': ts_activity, 'access_token': access_token} url = "https://www.strava.com/api/v3/activities" response = return_json(url, "GET", parameters=params) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_page_of_activities_return_all(self, StravaTokens1, Activity1, Activity2, Activity3):\n self.mock_get.return_value = Mock(ok=True)\n self.mock_get.return_value.json.return_value = [Activity1, Activity2, Activity3]\n strava_tokens = StravaTokens1\n response = get_page_of_acti...
[ "0.72672695", "0.70619905", "0.704842", "0.704842", "0.704842", "0.704842", "0.7015815", "0.6921878", "0.6793498", "0.6755012", "0.67519844", "0.6731834", "0.6626525", "0.65563905", "0.6493611", "0.6484105", "0.6461981", "0.64476633", "0.642003", "0.638269", "0.6301335", "0...
0.69375056
7
Gets the activity stream for given activity id.
def get_activity_stream(token, activity, types, series_type='time', resolution='high'): types = ','.join(types) params = {'access_token': token} url = f'https://www.strava.com/api/v3/activities/{activity}/streams/{types}&series_type={series_type}&resolution={resolution}&key_by_type=' response = return_json(url, "GET", parameters=params, timeout=10) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def activity(self, activity_id):\r\n return activities.Activity(self, activity_id)", "def get(self, id):\n activity = Activity().get(id)\n if not activity:\n abort(404, \"Activity not found\")\n return activity", "def activity(self, activity_id):\r\n return resourc...
[ "0.67917997", "0.6699494", "0.65698034", "0.6276299", "0.59187925", "0.5873506", "0.58734196", "0.56921214", "0.56341624", "0.5584019", "0.54834574", "0.54745483", "0.5373628", "0.5367137", "0.53547525", "0.53142124", "0.52937365", "0.5270165", "0.52317965", "0.5202036", "0.5...
0.59823203
4
Gets details on currently logged in athlete.
def get_athlete(token): url = "https://www.strava.com/api/v3/athlete" params = {'access_token': token} response = return_json(url, "GET", parameters=params, timeout=10) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_athlete(response):\n name = response['athlete']['firstname'] + \" \" + response['athlete']['lastname']\n athlete = {\n 'id': response['athlete']['id'],\n 'name': name,\n 'access_token': response['access_token'],\n 'refresh_to...
[ "0.62125313", "0.603639", "0.5979062", "0.5663854", "0.5658526", "0.5654593", "0.55721027", "0.5450002", "0.5400566", "0.5396792", "0.5345879", "0.5323987", "0.53042555", "0.52950203", "0.5274707", "0.5259616", "0.52581507", "0.5236326", "0.52285975", "0.52084494", "0.519947"...
0.636169
0
Converts Strava datetime to epoch timestamp
def get_epoch(timestamp): timestamp_dt = datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%SZ") epoch = calendar.timegm(timestamp_dt.timetuple()) return epoch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def epoch2datetime(t):\n return datetime.fromtimestamp(t/1000.0)", "def datetime_to_epoch(datetime):\n return datetime.astype('int64') // 1e9", "def epoch(value):\n if isinstance(value, datetime.datetime):\n return int(calendar.timegm(value.timetuple())*1000)\n return '' #fails silently for ...
[ "0.82143974", "0.8190802", "0.78351754", "0.7794575", "0.76652604", "0.7584979", "0.7570103", "0.7562643", "0.75359434", "0.7468649", "0.7423047", "0.73089755", "0.73089755", "0.724977", "0.71811444", "0.71131325", "0.71113986", "0.7073459", "0.7016049", "0.7004898", "0.69872...
0.746134
10
Get or refresh access token from Strava API.
def get_token(client_id, client_secret, token, renewal): url = "https://www.strava.com/api/v3/oauth/token" if renewal: payload = { 'client_id': client_id, 'client_secret': client_secret, 'refresh_token': token, 'grant_type': 'refresh_token'} message = "Successfully refreshed Strava token." else: payload = { 'client_id': client_id, 'client_secret': client_secret, 'code': token, 'grant_type': 'authorization_code'} message = "Successfully authenticated with Strava using access code." response = return_json(url, "POST", payload=payload) helper.log_info(message) return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def refresh_token(self):\n # basic function to get an access token\n api_response = requests.get(\n self.api_config.get_api_url() + \"authentication/g?username=\" + self.api_config.get_api_username() + \"&password=\" + self.api_config.get_api_password())\n\n if api_response.status_c...
[ "0.75720465", "0.7227093", "0.7181543", "0.713469", "0.7129814", "0.7058425", "0.7026847", "0.6986984", "0.6963477", "0.6925943", "0.6916204", "0.68907243", "0.6879203", "0.6810618", "0.67986274", "0.67908436", "0.67369515", "0.6724379", "0.67114365", "0.67013663", "0.6646587...
0.7200718
2
Stores athlete's id, first name, last name, weight and ftp into strava_athlete KV Store collection.
def kvstore_save_athlete(session_key, athlete_id, firstname, lastname, weight, ftp): # pylint: disable=too-many-arguments url = 'https://localhost:8089/servicesNS/nobody/TA-strava-for-splunk/storage/collections/data/strava_athlete/batch_save' headers = {'Content-Type': 'application/json', 'Authorization': f'Splunk {session_key}'} payload = [{"_key": athlete_id, "id": athlete_id, "firstname": firstname, "lastname": lastname, "fullname": firstname + " " + lastname, "weight": weight, "ftp": ftp}] helper.send_http_request(url, "POST", headers=headers, payload=payload, verify=False, use_proxy=False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_athlete(response):\n name = response['athlete']['firstname'] + \" \" + response['athlete']['lastname']\n athlete = {\n 'id': response['athlete']['id'],\n 'name': name,\n 'access_token': response['access_token'],\n 'refresh_to...
[ "0.65488034", "0.5803945", "0.56478375", "0.52747154", "0.5271561", "0.52378386", "0.51092994", "0.50849545", "0.50378954", "0.5033445", "0.4992036", "0.49815983", "0.4975764", "0.4963135", "0.49445814", "0.48963758", "0.48822185", "0.48742172", "0.48721516", "0.48705444", "0...
0.8156723
0
Gets raw JSON data, parses it into events and writes those to Splunk.
def parse_data(data, activity_id, activity_start_date): data_dict = {} final_dict = {} for i in data: data_dict[i['type']] = i['data'] counter = 1 nrange = len(data_dict['time']) for item in range(1, nrange + 1): final_dict[item] = {} for key, value in data_dict.items(): counter = 1 for i in value: final_dict[counter][key] = i final_dict[counter]['activity_id'] = activity_id if 'time' in key: final_dict[counter]['time'] = final_dict[counter]['time'] + activity_start_date final_dict[counter]['time'] = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(final_dict[counter]['time'])) if 'latlng' in key: final_dict[counter]['lat'] = final_dict[counter]['latlng'][0] final_dict[counter]['lon'] = final_dict[counter]['latlng'][1] final_dict[counter].pop('latlng') counter += 1 result_list = [value for key, value in final_dict.items()] for event in result_list: write_to_splunk(index=helper.get_output_index(), sourcetype='strava:activities:stream', data=json.dumps(event)) helper.log_info(f'Added activity stream {activity_id} for {athlete_id}.') return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_data(self, raw_data):\n data = json.loads(raw_data)\n\n tweet = None\n includes = {}\n errors = []\n matching_rules = []\n\n if \"data\" in data:\n tweet = Tweet(data[\"data\"])\n self.on_tweet(tweet)\n if \"includes\" in data:\n ...
[ "0.5950303", "0.58063334", "0.5770824", "0.5747445", "0.56922185", "0.5671852", "0.5653861", "0.56337476", "0.5627374", "0.5618672", "0.553584", "0.55230635", "0.54872155", "0.5470855", "0.54696006", "0.5466001", "0.54565036", "0.5411228", "0.53876317", "0.53832644", "0.53710...
0.5168414
48
Gets JSON from URL and parses it for potential error messages.
def return_json(url, method, **kwargs): response = helper.send_http_request(url, method, use_proxy=False, **kwargs) try: response.raise_for_status() except requests.HTTPError as ex: # status code 429 means we hit Strava's API limit, wait till next 15 minute mark (+5 seconds) and try again if ex.response.status_code == 429: # Get the 15m/24h API limits for this user api_usage_15m = response.headers['X-RateLimit-Usage'].split(",")[0] api_usage_24h = response.headers['X-RateLimit-Usage'].split(",")[1] api_limit_15m = response.headers['X-RateLimit-Limit'].split(",")[0] api_limit_24h = response.headers['X-RateLimit-Limit'].split(",")[1] timestamp_now = int(time.time()) modulus_time = timestamp_now % 900 sleepy_time = 0 if modulus_time == 0 else (900 - modulus_time + 5) helper.log_warning(f'Strava API rate limit hit. Used {api_usage_15m}/15min (limit {api_limit_15m}), {api_usage_24h}/24h (limit {api_limit_24h}). Sleeping for {sleepy_time} seconds.') time.sleep(sleepy_time) response = return_json(url, method, **kwargs) helper.log_debug(f'429 detail: {response}') return response if ex.response.status_code in (400, 401): helper.log_error(f'{ex.response.status_code} Error: Strava API credentials invalid or session expired. Make sure Client ID & Client Secret have been added to the Configuration -> Add-On Parameters tab and your access code is valid.') sys.exit(1) if ex.response.status_code == 404: helper.log_warning(f'404 Error: no stream data for url {url}, can happen for manually added activities.') return False if ex.response.status_code == 500: helper.log_warning(f'500 Error: no data received from Strava API for url {url}, it might be corrupt or invalid. Skipping activity.') return False # In case there's any other error than the ones described above, log the error and exit. helper.log_error(f'Error: {ex}') sys.exit(1) # Must have been a 200 status code return response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _process_url(self, url):\n response = requests.get(url, timeout=self.TIMEOUT)\n try:\n ret = response.json()\n except JSONDecodeError:\n self.log.exception(\"JSONDecodeError, response: %r, response.text: %r\", response, response.text)\n ret = {\"error\": \"...
[ "0.79039305", "0.7806594", "0.77826935", "0.74478185", "0.7402855", "0.73680866", "0.7356292", "0.73440784", "0.73038524", "0.729605", "0.729605", "0.729605", "0.729605", "0.72762716", "0.7274411", "0.7236561", "0.7228294", "0.7226799", "0.7206968", "0.71908146", "0.7171582",...
0.0
-1
Creates dict with athlete details, including token expiry.
def set_athlete(response): name = response['athlete']['firstname'] + " " + response['athlete']['lastname'] athlete = { 'id': response['athlete']['id'], 'name': name, 'access_token': response['access_token'], 'refresh_token': response['refresh_token'], 'expires_at': response['expires_at'], 'ts_activity': 0} return athlete
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_athlete(token):\n url = \"https://www.strava.com/api/v3/athlete\"\n params = {'access_token': token}\n response = return_json(url, \"GET\", parameters=params, timeout=10)\n return response", "def asdict(self):\n return {\n \"access_token\": se...
[ "0.6095435", "0.5521886", "0.5451787", "0.5388297", "0.5352944", "0.5180581", "0.5166357", "0.5155647", "0.5131322", "0.51014733", "0.50938517", "0.50851166", "0.5080323", "0.5078669", "0.505904", "0.50572944", "0.5016414", "0.50079054", "0.49948767", "0.49728522", "0.4964966...
0.7620931
0
Writes activity to Splunk index.
def write_to_splunk(**kwargs): event = helper.new_event(**kwargs) ew.write_event(event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save(self, data):\n activities = [json.loads(activity['Json']) for activity in data]\n\n for i in range(len(activities)):\n activities[i]['created_at'] = to_datetime(activities[i]['created_at'])\n\n with Elastic(index='wink', doc_type='activity') as elastic:\n elastic...
[ "0.6238934", "0.5661036", "0.56116706", "0.55407304", "0.54127765", "0.5401947", "0.5389567", "0.5381006", "0.53470606", "0.53407896", "0.5288701", "0.5285223", "0.5240927", "0.5197521", "0.5191341", "0.51774335", "0.5176707", "0.51560175", "0.5123677", "0.50669056", "0.50669...
0.5662735
1
hamgiin baga yoronhii huvaagdagchiig oloh function
def lcm(*values): values = set([abs(int(v)) for v in values]) if values and 0 not in values: n = n0 = max(values) values.remove(n) while any( n % m for m in values ): n += n0 return n return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pohyb(seznam_tahu, seznam_ovoce, tah,radky, sloupce):\n\n x= seznam_tahu [len(seznam_tahu)-1][0] # [x,y] souradnice noveho tahu\n y= seznam_tahu [len(seznam_tahu)-1][1]\n\n if tah == \"s\": #sever\n y -= 1\n elif tah == \"j\": ...
[ "0.73375344", "0.6515627", "0.6515627", "0.6515627", "0.6515627", "0.6515627", "0.63205355", "0.63011354", "0.62365353", "0.60986763", "0.60985714", "0.60890335", "0.5958645", "0.5890988", "0.5886411", "0.58524626", "0.58322126", "0.58077747", "0.57773924", "0.5761866", "0.57...
0.0
-1
objectiin querysetiig avna. Tuhain querysetiin date_time uy deh datag excel export hiine
def export_to_excel(self, worksheet, row_start, col_start, queryset, date_time=timezone.now()): if queryset: [row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start) for q in queryset: # object_excel_write function---date_time uyiin history objectiig excel -ruu horvuulne [row_write, col_write] = q.object_excel_write(worksheet, row_write, col_write, date_time=date_time) else: worksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_ho_dan_as_excel_action(fields=None, exclude=None, header=True):\n def export_as_excel(modeladmin, request, queryset):\n opts = modeladmin.model._meta\n field_names = [\"name\", \"status\", \"location\", \"tinh\",\n \"xa\", \"huyen\", \"phone\", \"cuuho\", \"update_...
[ "0.679835", "0.6526606", "0.6513021", "0.63559425", "0.6347008", "0.6267613", "0.61500996", "0.604096", "0.59455335", "0.5921458", "0.58053875", "0.5804869", "0.57998806", "0.5739592", "0.57366", "0.5705012", "0.5652193", "0.5652061", "0.564152", "0.56380385", "0.5635472", ...
0.75960785
0
objectiin querysetiig avna. Tuhain querysetiin date_time uy deh datag excel export hiine
def export_to_excel(self, workbook, tailan_queryset): # workbook argumentdaa avna if tailan_queryset: #[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start) worksheet = workbook.add_worksheet(u'Гүний худаг') queryset = Hudag.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = Hudag.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.gunii_hudags: queryset = tailan.gunii_hudags.hudags.all() [row_write, col_write] = Hudag.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Цэвэршүүлэх байгууламж') queryset = Ts_baiguulamj.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.tsevershuuleh: queryset = tailan.tsevershuuleh.tsevershuuleh.all() [row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Цэвэрлэх байгууламж') queryset = Ts_baiguulamj.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.tseverleh: queryset = tailan.tseverleh.tseverleh.all() [row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Усан сан') queryset = UsanSan.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = UsanSan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.usansan: queryset = tailan.usansan.usan_sans.all() [row_write, col_write] = UsanSan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Цэвэр усны насос станц') queryset = NasosStants.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.tsever_nasos_stants: queryset = tailan.tsever_nasos_stants.nasos_stantss.all() [row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Бохир усны насос станц') queryset = NasosStants.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.bohir_nasos_stants: queryset = tailan.bohir_nasos_stants.nasos_stantss.all() [row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Лаборатори') queryset = Lab.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = Lab.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.lab: queryset = tailan.lab.labs.all() [row_write, col_write] = Lab.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Цэвэр усны шугам') queryset = Sh_suljee.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.tsever_usnii_shugam: queryset = tailan.tsever_usnii_shugam.sh_suljees.all() [row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Бохир усны шугам') queryset = Sh_suljee.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.bohir_usnii_shugam: queryset = tailan.bohir_usnii_shugam.sh_suljees.all() [row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'АХББ') queryset = ABB.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = ABB.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.abb: queryset = tailan.abb.abbs.all() [row_write, col_write] = ABB.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Ус, дулаан дамжуулах төв') queryset = UsDamjuulahBair.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = UsDamjuulahBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.us_damjuulah_tov: queryset = tailan.us_damjuulah_tov.usDamjuulahBair.all() [row_write, col_write] = UsDamjuulahBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Ус түгээх байр') queryset = UsTugeehBair.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = UsTugeehBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.us_tugeeh: queryset = tailan.us_tugeeh.us_tugeeh_bairs.all() [row_write, col_write] = UsTugeehBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Цэвэр усны машин') queryset = WaterCar.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = WaterCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.water_car: queryset = tailan.water_car.water_cars.all() [row_write, col_write] = WaterCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Бохир усны машин') queryset = BohirCar.objects.none() row_write = 5 col_write = 1 [row_write, col_write] = BohirCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.bohir_car: queryset = tailan.bohir_car.bohir_cars.all() [row_write, col_write] = BohirCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) worksheet = workbook.add_worksheet(u'Ажилчдын судалгаа') row_write = 5 col_write = 1 [row_write, col_write] = Ajiltan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write) for tailan in tailan_queryset: if tailan.ajiltans: queryset = tailan.ajiltans.ajiltans.all() [row_write, col_write] = Ajiltan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date) else: worksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def export_to_excel(self, worksheet, row_start, col_start, queryset, date_time=timezone.now()):\n\t\tif queryset:\n\t\t\t[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start)\n\t\t\tfor q in queryset:\n\t\t\t\t# object_excel_write function---date_time uyiin history objectiig ...
[ "0.7596945", "0.6799895", "0.65263826", "0.6513705", "0.6357183", "0.62692904", "0.61503154", "0.60432583", "0.5946482", "0.5921336", "0.58059806", "0.5805394", "0.5800253", "0.5739457", "0.57385826", "0.57053816", "0.5653567", "0.56533146", "0.56427974", "0.5640495", "0.5637...
0.6348618
5
Originates from external call from trigger system
def start(self, data): log.info(data) self.stop() self.time_start = time.time() - data.get('time_offset', 0) - self.time_offset self.bpm = float(data.get('bpm', self.DEFAULT_BPM)) self.timesigniture = parse_timesigniture(data.get('timesigniture', DEFAULT_TIMESIGNITURE)) if data.get('sequence'): sequence_name = data.get('sequence') assert sequence_name in self.sequences, '{0} is not a known sequence'.format(sequence_name) self.sequence = self.sequences[sequence_name] if data.get('scene'): # Single scene - Fake the sequence list by inserting the name of the single scene required self.sequence = (data.get('scene', self.DEFAULT_SCENE_NAME), ) self.sequence_index = 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, trigger, type, event):", "def triggered(self, *args, **kwargs): # real signature unknown\n pass", "def call(self):", "def trigger(self, type, event):", "def fire(self):", "def __do_trigger(self, request):\n dmp_trigger.DmpTrigger().trigger(request)\n return defines...
[ "0.70280033", "0.6894477", "0.6630215", "0.65832853", "0.6425316", "0.64154744", "0.6267138", "0.62295663", "0.616958", "0.6122845", "0.6121851", "0.6086105", "0.60141927", "0.5989397", "0.5989397", "0.5989397", "0.5989397", "0.5890496", "0.5888235", "0.5888235", "0.5888235",...
0.0
-1
Originates from external call from trigger system
def stop(self, data={}): self.time_start = 0 self.time_mutator = 0 self.sequence = () self.sequence_index = None self.bpm = self.DEFAULT_BPM self.timesigniture = DEFAULT_TIMESIGNITURE_
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, trigger, type, event):", "def triggered(self, *args, **kwargs): # real signature unknown\n pass", "def call(self):", "def trigger(self, type, event):", "def fire(self):", "def __do_trigger(self, request):\n dmp_trigger.DmpTrigger().trigger(request)\n return defines...
[ "0.70280033", "0.6894477", "0.6630215", "0.65832853", "0.6425316", "0.64154744", "0.6267138", "0.62295663", "0.616958", "0.6122845", "0.6121851", "0.6086105", "0.60141927", "0.5989397", "0.5989397", "0.5989397", "0.5989397", "0.5890496", "0.5888235", "0.5888235", "0.5888235",...
0.0
-1
When a track is started we receve a lighting.start event The audio in the html5 player may have a seek time. This is to start part way into a test recording This seek time update is fired the moment the audio starts playing and within milliseconds of the lighting.start event. This 'first seek' sent within milliseconds of the 'start' event should not actually seek in the lighting scene. As self.time_start is calculated with global offset. We need to know the ACTUAL time since the 'trigger started' to detect our seek to ignore
def _seek(self, time_offset): if (time.time() - (self.time_start + self.time_offset)) < 0.1: log.info('Seek recived within 100ms of start - Assuming this is a bounceback from test_audio - applying automatic time mutator of {0}s'.format(time_offset)) self.time_mutator = time_offset self.time_start = time.time() - time_offset log.info('seek {0}'.format(time_offset))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setTrackStartTime() :\n s.startTrack()", "def audio_cd_start_track_time(self, audio_cd_start_track_time):\n self._audio_cd_start_track_time = audio_cd_start_track_time", "def seek_to_start_time(self):\n return 0", "def start_time(self):\n return RPR.GetAudioAccessorStartTime(self....
[ "0.680665", "0.66605866", "0.6150333", "0.6049713", "0.59493154", "0.58593935", "0.58006895", "0.57242954", "0.56212443", "0.55866975", "0.5570046", "0.55602914", "0.55241376", "0.5506695", "0.55048025", "0.55008477", "0.54706705", "0.54700154", "0.5436743", "0.538599", "0.53...
0.5973443
4
Durations are 'dict string keys'. The keys need to be converted to floats. The keys need to be ordered and the scenes returned with calculated durations
def parse_scene_order(self, data, timesigniture): if not data: return () num_scenes = len(data) def attempt_parse_key_timecode(value): if not value: return value try: return float(value) except (ValueError, TypeError): pass try: return timecode_to_beat(value, timesigniture) except (AssertionError, ValueError, AttributeError): pass return value # Surface the original key value in the dict (useful for debugging) for key, value in data.items(): if value: value['key'] = key data_float_indexed = {attempt_parse_key_timecode(k): v for k, v in data.items()} assert len(data_float_indexed) == num_scenes sorted_keys = sorted(data_float_indexed.keys()) assert len(sorted_keys) == num_scenes def normalise_duration(index): """ Convert any time code or alias to a linear float value. e.g. '1.2' parses to -> 1.5 'match_next' resolves to -> 4.0 """ key = sorted_keys[index] item = data_float_indexed[key] if not item: item = {'duration': 'auto'} data_float_indexed[key] = item duration = attempt_parse_key_timecode(item.get('duration')) if duration == 'match_next': duration = normalise_duration(index+1) if duration == 'match_prev': duration = normalise_duration(index-1) if isinstance(duration, str) and duration.startswith('match '): duration = normalise_duration(sorted_keys.index(float(duration.strip('match ')))) if (not duration or duration == 'auto') and index < len(sorted_keys)-1: duration = sorted_keys[index+1] - key if not isinstance(duration, float): #log.info('Unparsed duration: {0}'.format(duration)) duration = self.DEFAULT_DURATION if duration != item.get('duration'): item['duration'] = duration return duration for index in range(len(sorted_keys)): normalise_duration(index) scene_items = [] for key in sorted_keys: scene_item = data_float_indexed[key] assert scene_item and scene_item.get('duration') >= 0, "All scene must have durations. Something has failed in parsing. {0}:{1}".format(key, scene_item) scene_items.append(scene_item) return scene_items
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def normalise_duration(index):\n key = sorted_keys[index]\n item = data_float_indexed[key]\n if not item:\n item = {'duration': 'auto'}\n data_float_indexed[key] = item\n duration = attempt_parse_key_timecode(item.get('duration'))\n ...
[ "0.6176038", "0.59368414", "0.5897897", "0.58419317", "0.5807034", "0.5672501", "0.56141585", "0.5577464", "0.555346", "0.5521533", "0.5519391", "0.55059147", "0.5491283", "0.54520303", "0.5369046", "0.5366904", "0.53086877", "0.52912253", "0.52870804", "0.5256063", "0.522170...
0.6748647
0
Convert any time code or alias to a linear float value. e.g. '1.2' parses to > 1.5 'match_next' resolves to > 4.0
def normalise_duration(index): key = sorted_keys[index] item = data_float_indexed[key] if not item: item = {'duration': 'auto'} data_float_indexed[key] = item duration = attempt_parse_key_timecode(item.get('duration')) if duration == 'match_next': duration = normalise_duration(index+1) if duration == 'match_prev': duration = normalise_duration(index-1) if isinstance(duration, str) and duration.startswith('match '): duration = normalise_duration(sorted_keys.index(float(duration.strip('match ')))) if (not duration or duration == 'auto') and index < len(sorted_keys)-1: duration = sorted_keys[index+1] - key if not isinstance(duration, float): #log.info('Unparsed duration: {0}'.format(duration)) duration = self.DEFAULT_DURATION if duration != item.get('duration'): item['duration'] = duration return duration
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_speed(as_str: str) -> float:\n return float(as_str.rstrip(\"x\"))", "def _fs (v):\r\n try : \r\n v = float(v)\r\n except : \r\n v = tuple([float (ss) for ss in \r\n v.replace('(', '').replace(')', '').split(',')])\r\n ...
[ "0.6765329", "0.66060674", "0.6591223", "0.6582236", "0.65468556", "0.65312254", "0.6518258", "0.645661", "0.6408871", "0.6333807", "0.63263595", "0.62676144", "0.62387794", "0.6207294", "0.61648494", "0.61467373", "0.61412436", "0.61278975", "0.6122148", "0.6112401", "0.6082...
0.0
-1
Once the order of the items is known, we can iterate over the scenes calculating/prerendering the dmx state for each section This make seeking much faster
def pre_render_scene_item(self, current_scene_item, previous_scene_item): assert current_scene_item current_scene_dmx = current_scene_item.setdefault(Scene.SCENE_ITEM_DMX_STATE_KEY, {}) # Aquire a reference to the previous DMX state current_scene_dmx['previous'] = copy.copy(previous_scene_item.get(Scene.SCENE_ITEM_DMX_STATE_KEY, {})['target']) if previous_scene_item else AbstractDMXRenderer.new_dmx_array() # The target state is a copy of the previous state current_scene_dmx['target'] = copy.copy(current_scene_dmx['previous']) # Modify the starting/previous state based on any overrides in this scene (this is a shortcut feature as I kept requireing this) self.render_state_dict(current_scene_item.get('state_start'), current_scene_dmx['previous']) # Modify the target state based on this scene item self.render_state_dict(current_scene_item.get('state'), current_scene_dmx['target'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def loadData(self, actions):\n # begin to clear the scene\n self.scene.clear()\n self.scene.drawGrid()\n \n # and draw all items\n maxItemId = self.itemId\n for graphicalIt...
[ "0.5440126", "0.5405451", "0.5320529", "0.5313562", "0.5288161", "0.52768517", "0.52676857", "0.52513534", "0.52176017", "0.521186", "0.5171025", "0.5155609", "0.5147708", "0.51323056", "0.51169014", "0.5096112", "0.5093142", "0.5089115", "0.50864947", "0.5068929", "0.5037355...
0.56032324
0
Given a state dict in the form of
def render_state_dict(self, target_state, dmx_universe_target): if not target_state: return # Copy the alias over this bytearray if isinstance(target_state, str): target_state = {'use': target_state} alias_name = target_state.get('use') if alias_name: assert alias_name in self.dmx_universe_alias, "alias '{0}' not defined".format(alias_name) dmx_universe_target[:] = self.dmx_universe_alias[alias_name] # Render items for dmx_device_name, color_value in target_state.items(): self.config.render_device(dmx_universe_target, dmx_device_name, color_value) # Mute items for dmx_device_name in self.mute_devices: self.config.render_device(dmx_universe_target, dmx_device_name, None) # Add an alias for this state if a name is provided if target_state.get('name'): self.dmx_universe_alias[target_state.get('name')] = dmx_universe_target
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fromState(state):", "def load_state_dict(self, state_dict: Dict[str, torch.Tensor]):\n pass", "def load_from_state_dict(self, state_dict):\n raise NotImplementedError", "def load_state_dict(\n self,\n state_dict: Mapping[str, Any],\n *args,\n **kwargs,\n ) -> ...
[ "0.77548754", "0.7265476", "0.6977761", "0.69326776", "0.69324243", "0.69324243", "0.693159", "0.68787336", "0.68787336", "0.6834486", "0.6827123", "0.6820761", "0.6802007", "0.6770729", "0.6729137", "0.6722943", "0.6710804", "0.6681896", "0.6674843", "0.66132194", "0.6602126...
0.0
-1
Given a list of parsed scene_items (a plain list of dicts) Provide methods for redering that data timesigniture is only used for debug printing
def __init__(self, scene_items, timesigniture=DEFAULT_TIMESIGNITURE_): self.scene_items = scene_items self.total_beats = sum(scene_item['duration'] for scene_item in self.scene_items) self.timesigniture = timesigniture
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_items_function(self):\n ars = self.ar[2009][11]['general']\n self.assertEqual(list(ars.items()), [('LastLine', ['20091202000343', '1011585', '206082338', '54716901457']), ('FirstTime', ['20091101000237']), ('LastTime', ['20091130234113']), ('LastUpdate', ['20091201094510', '1011585', '0', '8...
[ "0.5740542", "0.56621575", "0.5524638", "0.5478715", "0.54087716", "0.53677016", "0.53536004", "0.52064437", "0.51765895", "0.5169679", "0.51359755", "0.51294976", "0.5126739", "0.5117502", "0.51156247", "0.511032", "0.49845767", "0.49424937", "0.49424642", "0.49300858", "0.4...
0.64365894
0
Return a list of all live Python objects, not including the list itself.
def get_all_objects(): gc.collect() gcl = gc.get_objects() olist = [] seen = {} # Just in case: seen[id(gcl)] = None seen[id(olist)] = None seen[id(seen)] = None # _getr does the real work. _getr(gcl, olist, seen) return olist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist", "def get_all_objects():\n gcl = gc.get_o...
[ "0.736436", "0.729643", "0.6785282", "0.6629329", "0.65524584", "0.6406", "0.6404724", "0.63662046", "0.6298751", "0.62828684", "0.62828684", "0.62828684", "0.62828684", "0.62828684", "0.62828684", "0.626183", "0.6255329", "0.6242195", "0.61547345", "0.6152571", "0.6122982", ...
0.7565201
0
Convert um to m.
def convert_units(self): for prod in ("ier", "ier_inc_rain"): self.data[prod].data[:] /= 1e6
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def NM2m(NM):\n return NM * 1852", "def m_to_mm(meters):\n return meters * 1000.0", "def nm2m(self):\n return self._nm2m", "def m_to_mm(): \n # Set blender unit in mm\n bpy.context.scene.unit_settings.scale_length = 0.001\n bpy.context.scene.unit_settings.length_unit = 'MIL...
[ "0.69458026", "0.68633324", "0.68530375", "0.66572696", "0.65932775", "0.65664166", "0.6551626", "0.65338874", "0.63150936", "0.62804407", "0.6278391", "0.62339175", "0.62174183", "0.6206542", "0.61768055", "0.61204433", "0.6057365", "0.60438204", "0.59712124", "0.5944185", "...
0.0
-1
Adds 8% tax to a restaurant bill.
def tax(bill): bill *= 1.08 print "With tax: %.2f" % bill return bill
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_tax(self,tax):\n return self.price + (self.price * tax)", "def tax(bill):\r\n bill *= 1.08\r\n print(\"With tax: %f\" % bill)\r\n return bill", "def tax(bill):\n bill *= 1.08\n print \"With tax: %f\" % bill\n return bill", "def tax(bill):\n bill *= 1.08\n print \"With t...
[ "0.7644522", "0.758284", "0.7505996", "0.7505996", "0.7157527", "0.7034859", "0.70066726", "0.69643134", "0.65901643", "0.65687346", "0.64239335", "0.6421261", "0.6313586", "0.62876344", "0.62859863", "0.6272636", "0.6272636", "0.6179959", "0.605302", "0.60120994", "0.5976049...
0.7485274
4
Adds 15% tip to a restaurant bill.
def tip(bill): bill *= 1.15 print "With tip: %.2f" % bill return bill
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tip(bill):\r\n bill *= 1.15\r\n print(\"With tip: %f\" % bill)\r\n return bill", "def tip(bill):\n bill *= 1.15\n print \"With tip: %f\" % bill\n return bill", "def tip(bill):\n bill *= 1.15\n print \"With tip: %f\" % bill\n return bill", "def tip(bill):\n bill *= 1.15\n ...
[ "0.7132613", "0.70187575", "0.70187575", "0.70187575", "0.6926353", "0.63815933", "0.6208463", "0.6160494", "0.6047981", "0.6044608", "0.6044608", "0.601777", "0.58578914", "0.5761912", "0.5701724", "0.56948024", "0.565889", "0.55814856", "0.5531272", "0.55295736", "0.550463"...
0.6996032
4
Returns the square of a number.
def square(n): squared = n**2 print "%d squared is %d." % (n, squared) return squared
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def square(num):\n square = num ** 2\n return square", "def square_number(number: int) -> int:\n return number * number", "def square(num):\n return num * num", "def square(value):\n return value ** 2", "def my_square(x):\n return x ** 2", "def square(n: int) -> int:\n return int(n *...
[ "0.86342794", "0.8617322", "0.8552498", "0.80486435", "0.80462384", "0.79968894", "0.7947229", "0.79286706", "0.7911237", "0.7883522", "0.78671736", "0.78318316", "0.78140074", "0.78127563", "0.7788222", "0.77676326", "0.7688305", "0.7677109", "0.76735294", "0.763642", "0.762...
0.78975934
11
The constructor for Particle Class
def __init__(self, position=np.array([0,0,0], dtype=float), velocity=np.array([0,0,0], dtype=float), acceleration=np.array([0,0,0], dtype=float), name='A Particle', restMass=1.0, charge=const.elementary_charge): self.name = name self.position = np.array(position, dtype=float) self.velocity = np.array(velocity, dtype=float) self.acceleration = np.array(acceleration, dtype=float) self.restMass = restMass self.charge = charge self.electricField = PointElectricFieldClass(sourceParticle=self , name='Field from %s'%(self.name)) self.magneticField = PointMagneticFieldClass(sourceParticle=self , name='Field from %s'%(self.name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, particles):\n self.particles = particles", "def __init__(self,particle):\n self.par = particle", "def particle(self) -> Particle:\n return Particle()", "def particle(self) -> Particle:\n return Particle()", "def __init__(self, init_pos_1, init_pos_2, M_1, M_2,...
[ "0.846487", "0.8411835", "0.7966551", "0.7966551", "0.7707181", "0.76299787", "0.74913836", "0.7429638", "0.73224574", "0.72718245", "0.7198867", "0.699789", "0.6965769", "0.6918702", "0.6917369", "0.68837965", "0.68785137", "0.68018293", "0.67048806", "0.6683209", "0.6675657...
0.7450836
7
Method that returns the rest energy of the particle.
def RestEnergy(self): return (self.restMass * const.speed_of_light * const.speed_of_light)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy", "def TotalEnergy(self):\n return (math.sqrt((Particle.RestEnergy(self) ** 2)\n + (np.linalg.norm(Particle.Momentum(sel...
[ "0.7668576", "0.7595888", "0.7482029", "0.7454896", "0.74329084", "0.74117565", "0.7405934", "0.7287761", "0.7283499", "0.70380706", "0.70380706", "0.6973054", "0.69607884", "0.6895652", "0.67723006", "0.6750274", "0.67392176", "0.67377317", "0.6689237", "0.6675212", "0.66727...
0.7802598
0
Method that returns Beta (velocity/speed of light) as a float
def BetaVelocity(self): return np.linalg.norm(self.velocity) / const.speed_of_light
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def betaT(self):\n if self.maTail > 1:\n return 0\n else:\n return sqrt(1 - self.maTail**2)", "def getBeta(self, alpha):\n return 2.0*(2.0-alpha) + -4.0*np.sqrt(1.0-alpha)", "def beta(self):\n eTheta = self.eTheta()\n cosOmg = np.cos(self.omega())\n ...
[ "0.7552084", "0.754992", "0.7535426", "0.7439897", "0.74009764", "0.7362183", "0.7362183", "0.71039945", "0.707216", "0.707216", "0.707216", "0.7020982", "0.69366413", "0.6877576", "0.6866696", "0.67877996", "0.6773138", "0.6760468", "0.6758375", "0.67385536", "0.670362", "...
0.8313839
0
Method that returns the Lorentz Factor of the particle.
def LorentzFactor(self): # Use of abs() and x ** 0.5 provides a more stable calculation of lorentz # factor than math.sqrt() at high velocities. return 1 / abs( 1 - Particle.BetaVelocity(self) * Particle.BetaVelocity(self))**0.5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def L(self) -> float:\n return self._L", "def Lorentz(x, x0, A, B, d):\n return B + A / (((x - x0) / d) ** 2 + 1)", "def lorentz(x, gamma):\n return 1 / cs.pi * 0.5 * gamma / ((0.5 * gamma**2) + x**2)", "def relu(z: float) -> float:\n return z if z > 0 else 0.01 * z", "def lorentz(x, x0...
[ "0.65476906", "0.65036815", "0.6469627", "0.638012", "0.6348522", "0.6347106", "0.6341828", "0.6334289", "0.6333599", "0.63280964", "0.62715966", "0.62416214", "0.6189796", "0.61773413", "0.61520237", "0.6139124", "0.6098995", "0.60558134", "0.60534275", "0.5988797", "0.59480...
0.8493009
0
Method that returns the relativistic mass of the particle
def RelativisticMass(self): return Particle.LorentzFactor(self) * self.restMass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def particleMass(self):\n return self.params['particleMass']", "def mass(self):\n\t\treturn self.volume*self.density", "def get_mass(self):\n return self.m", "def Mass(self):\n mpa = self.MassPerLength()\n if mpa == 0.0:\n return 0.\n L = self.Length()\n m...
[ "0.796206", "0.75452185", "0.7294323", "0.72193676", "0.7178756", "0.7156736", "0.7112089", "0.7013868", "0.697946", "0.69783187", "0.6944408", "0.6921542", "0.6921542", "0.6899986", "0.6871825", "0.6846594", "0.68247277", "0.6723331", "0.6718318", "0.6706135", "0.67014885", ...
0.74535996
2
Method that returns the relativistic momentum of the particle
def Momentum(self): return (np.multiply(Particle.LorentzFactor(self) , np.array(self.velocity,dtype=float))* self.restMass)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getMomentum(self):\n return self.p", "def calcMomentumFromVelocity(self):\n if self.mass is None:\n raise CoordinateVector(\"The particle mass needs to be specified to calculate the particle momentum from velocity.\")\n values = {}\n for direction in self.v.order:\n gamma = self.calcLor...
[ "0.74322164", "0.73959345", "0.73226726", "0.72440016", "0.69533795", "0.69061005", "0.6754555", "0.665831", "0.6585926", "0.6467245", "0.6458798", "0.64507335", "0.6449878", "0.64052224", "0.63391316", "0.63144106", "0.6260632", "0.61805636", "0.61436313", "0.6129185", "0.61...
0.75053847
0
Method that returns the total energy of the particle
def TotalEnergy(self): return (math.sqrt((Particle.RestEnergy(self) ** 2) + (np.linalg.norm(Particle.Momentum(self)) * const.speed_of_light) ** 2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def energy(self):\n energy = -0.5*np.sum(self.phi)+0.5*np.sum(self.mass*np.sqrt(self.particles.momentum[:,0]**2+self.particles.momentum[:,1]**2)**2)\n return energy", "def calcEnergy(self):\n speed_light = constants.physical_constants[\"speed of light in vacuum\"][0]#m/sec by default\n if sel...
[ "0.84132123", "0.8266951", "0.7991302", "0.7920243", "0.7864987", "0.7718018", "0.76988596", "0.76899403", "0.76686484", "0.76565665", "0.76121676", "0.7568719", "0.7546202", "0.75326943", "0.75047314", "0.7501236", "0.74609464", "0.7411283", "0.7411283", "0.74109715", "0.738...
0.821929
2
Method that returns the kinetic energy of the particle
def KineticEnergy(self): return Particle.TotalEnergy(self) - Particle.RestEnergy(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kineticEnergy(self):\n return self.params['kinetic']", "def kinetic_energy(self):\r\n return self.mass * np.dot(self.vel, self.vel) / 2", "def kinetic_energy(self):\r\n position, velocity, escaped_particles,impact, wall_collision,mom = self.box_collision_info()\r\n for j in xran...
[ "0.85219467", "0.85176307", "0.84117717", "0.7932918", "0.7827377", "0.7754195", "0.7666235", "0.7638704", "0.74971205", "0.7490171", "0.74387", "0.7403423", "0.7370556", "0.72361696", "0.7233435", "0.7216185", "0.7176531", "0.7157923", "0.715126", "0.70478064", "0.7043319", ...
0.8440398
2
Method that updates the particle's velocity and position with the Euler Cromer method
def UpdateCromer(self, deltaT): self.velocity += self.acceleration * deltaT self.position += self.velocity * deltaT
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def EulerN(particle,dt):\n particle.acc = particle.acc\n particle.vel = particle.vel + particle.acc*dt\n particle.pos = particle.pos + particle.vel*dt\n\n return particle", "def update(self, delta):\n # Computes new positions\n for part in self.particles:\n part.set_xyvxvy(se...
[ "0.72924954", "0.6944585", "0.66634274", "0.65291256", "0.6407229", "0.640443", "0.6364964", "0.6361303", "0.6322322", "0.63013875", "0.627813", "0.62754977", "0.62680924", "0.626102", "0.62548554", "0.6244272", "0.62382376", "0.62382376", "0.62364995", "0.61982566", "0.61757...
0.64788157
4
Method that updates the particle's velocity and position with the Euler Forward method
def UpdateForward(self, deltaT): self.position += self.velocity * deltaT self.velocity += self.acceleration * deltaT
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def EulerN(particle,dt):\n particle.acc = particle.acc\n particle.vel = particle.vel + particle.acc*dt\n particle.pos = particle.pos + particle.vel*dt\n\n return particle", "def update_position(self):\n self.position[0] += self.velocity[0]\n self.position[1] += self.velocity[1]", "def...
[ "0.7147672", "0.71063995", "0.68057793", "0.6710489", "0.6636017", "0.6588007", "0.65872145", "0.65763974", "0.65613395", "0.6534683", "0.65088755", "0.6481874", "0.6460222", "0.64475214", "0.6444629", "0.63995236", "0.6384533", "0.63719004", "0.63658047", "0.6355781", "0.633...
0.6623841
5
Method that returns the electric field from the particle that affects another particle.
def GenerateElectricField(self, affectedParticle): return self.electricField.GenerateField(affectedParticle)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def electric_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber*r\n ikr = 1j * kr\n\n front_term = (\n (1j * self.omega * self.mu * self.moment) / (4. * np.pi * r**2) *\n (ikr + 1) * np...
[ "0.6565109", "0.6523056", "0.6255628", "0.62081003", "0.61325175", "0.6086595", "0.5954075", "0.5868522", "0.58501065", "0.5768421", "0.5700194", "0.5639009", "0.5562006", "0.55513227", "0.55293006", "0.54763085", "0.5419618", "0.53979456", "0.53979456", "0.53847796", "0.5378...
0.72869897
0
Method that returns the magnetic field from the particle that affects another particle.
def GenerateMagneticField(self, affectedParticle): return self.magneticField.GenerateField(affectedParticle)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber * r\n ikr = 1j*kr\n\n front_term = (\n self.current * self.length / (4 * np.pi * r**2) * (ikr + 1) *\n np.exp(-ikr)\n ...
[ "0.67325306", "0.65006876", "0.64733946", "0.64133286", "0.61525905", "0.6089795", "0.594535", "0.5899009", "0.58383423", "0.5798657", "0.5736236", "0.57189417", "0.5650898", "0.5645273", "0.5644613", "0.55965346", "0.5590425", "0.5575774", "0.5534189", "0.5524397", "0.551204...
0.6843076
0
Returns the initialized component manager. This is used as FastAPI dependency and called for every request.
def get_component_manager( token: str = Depends(get_api_token), ) -> ComponentOperations: session = BaseUrlSession(base_url=CONTAXY_API_ENDPOINT) session.headers = {"Authorization": f"Bearer {token}"} return ComponentClient(session)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_manager():\n\n return multiprocessing.Manager()", "def GetManager(self):\r\n\r\n return self.manager", "def get_manager():\n return __manager__", "def getManager(self):\n return self._manager", "def core(self):\n return CoreManager(self)", "def manager(self):\n ...
[ "0.68361783", "0.6697513", "0.66576326", "0.660257", "0.6231092", "0.61051804", "0.61051804", "0.6104129", "0.5990772", "0.5973677", "0.5936767", "0.5933692", "0.588824", "0.5797466", "0.57823783", "0.5773982", "0.5772796", "0.57626957", "0.5757743", "0.57147866", "0.5693136"...
0.69490683
0
Return a boolean mask for a circular sector. The start/stop angles in `angle_range` should be given in clockwise order.
def sector_mask(shape, centre, radius, angle_range): x,y = np.ogrid[:shape[0],:shape[1]] cx,cy = centre tmin,tmax = np.deg2rad(angle_range) # ensure stop angle > start angle if tmax < tmin: tmax += 2*np.pi # convert cartesian --> polar coordinates r2 = (x-cx)*(x-cx) + (y-cy)*(y-cy) theta = np.arctan2(x-cx, y-cy) - tmin # wrap angles between 0 and 2*pi theta %= (2*np.pi) # circular mask circmask = r2 <= radius*radius # angular mask anglemask = theta <= (tmax-tmin) return circmask*anglemask
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sector_mask(shape,centre,radius,angle_range):\n\n x,y = np.ogrid[:shape[0],:shape[1]]\n cx,cy = centre\n tmin,tmax = np.deg2rad(angle_range)\n\n # ensure stop angle > start angle\n if tmax < tmin:\n tmax += 2*np.pi\n\n # convert cartesian --> polar coordinates\n r2 = (x-cx)*(x-c...
[ "0.7897219", "0.7897219", "0.7897219", "0.76970416", "0.6498443", "0.6337913", "0.6234694", "0.6167133", "0.6006037", "0.59997934", "0.58989346", "0.5845237", "0.5817369", "0.5690966", "0.5672399", "0.55962497", "0.552139", "0.5515878", "0.5482021", "0.54450697", "0.54447085"...
0.7855404
3
Parses a region description to a dict
def parse_region(region, wcs): logger = logging.getLogger(__name__) shape = region.split(',')[0] coord = region.split(',')[1] frame = region.split(',')[2] try: params = region.split(',')[3:] except IndexError: logger.error('No coordinates given.') logger.error('Will exit now.') sys.exit(1) if 'sky' in coord.lower() and frame == '': logger.error('No frame specified.') logger.error('Will exit now.') sys.exit(1) if shape == 'point': # Convert sky coordinates to pixels if required if 'sky' in coord.lower(): coo_sky = SkyCoord(params[0], params[1], frame=frame) params[0:2] = coo_sky.to_pixel(wcs) params = [int(round(float(x))) for x in params] rgn = {'shape':'point', 'params':{'cx':params[0], 'cy':params[1]}} elif shape == 'box': # Convert sky coordinates to pixels if required if 'sky' in coord.lower(): blc_sky = SkyCoord(params[0], params[1], frame=frame) trc_sky = SkyCoord(params[2], params[3], frame=frame) params[0:2] = blc_sky.to_pixel(wcs) params[2:] = trc_sky.to_pixel(wcs) params = [int(round(float(x))) for x in params] rgn = {'shape':'box', 'params':{'blcx':params[0], 'blcy':params[1], 'trcx':params[2], 'trcy':params[3]}} elif shape == 'circle': # Convert sky coordinates to pixels if required if 'sky' in coord.lower(): coo_sky = SkyCoord(params[0], params[1], frame=frame) params[0:2] = coo_sky.to_pixel(wcs) lscale = abs(wcs.pixel_scale_matrix[0,0])*u.deg val, uni = split_str(params[2]) # Add units to the radius r = add_radius_units(val, uni) logger.debug('lscale: {0}'.format(lscale)) logger.debug('radius: {0}'.format(r)) params[2] = (r/lscale).cgs.value params = [float(x) for x in params] rgn = {'shape':'circle', 'params':{'cx':params[0], 'cy':params[1], 'r':params[2]}} elif shape == 'ellipse': # Convert sky coordinates to pixels if required if 'sky' in coord.lower(): coo_sky = SkyCoord(params[0], params[1], frame=frame) params[0:2] = coo_sky.to_pixel(wcs) lscale = abs(wcs.pixel_scale_matrix[0,0])*u.deg logger.debug('lscale: {0}'.format(lscale)) # Major axis. val, uni = split_str(params[2]) # Add units to the major axis. r = add_radius_units(val, uni) logger.debug('major axis: {0}'.format(r)) params[2] = (r/lscale).cgs.value # Minor axis. val, uni = split_str(params[3]) # Add units to the minor axis. r = add_radius_units(val, uni) logger.debug('minor axis: {0}'.format(r)) params[3] = (r/lscale).cgs.value params = [float(x) for x in params] rgn = {'shape':'ellipse', 'params':{'cx':params[0], 'cy':params[1], 'bmaj':params[2], 'bmin':params[3], 'theta':params[4]}} elif shape == 'crtf': # CASA region files are always in sky coordinates polys = ci.read_casa_polys(params[0], wcs=wcs) rgn = {'shape':'polygon', 'params':{'Polygons':polys}} elif shape == 'all': rgn = {'shape':'all', 'params':'all'} else: print('region description not supported.') print('Will exit now.') logger.error('Region description not supported.') logger.error('Will exit now.') sys.exit(1) return rgn
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _decode_region_details(self, region):\r\n\r\n # This whole thing is a bit of a mess but it preserves backwards compat but also allows for\r\n # ad-hoc addition of new regions without code changes\r\n # TODO: Move EC2 builtin details to a config file\r\n region_details = None\r\n\r\n...
[ "0.70874363", "0.70591384", "0.60175997", "0.5904459", "0.585444", "0.5795512", "0.5608974", "0.5576179", "0.5553111", "0.54601663", "0.5458088", "0.54207695", "0.54005855", "0.5378389", "0.5374614", "0.5348017", "0.53426313", "0.5309667", "0.5308136", "0.53045267", "0.529966...
0.5485982
9
Constructs a cube axis header fits cube header header pyfits header axis axis to reconstruct axis int cube axis numpy array
def get_axis(header, axis): logger = logging.getLogger(__name__) logger.debug("Will extract axis: {}.".format(axis)) wcs = WCS(header) wcs_arr_shape = wcs.array_shape logger.debug("WCS array shape: {}".format(wcs_arr_shape)) n_axis = wcs.array_shape[-axis] logger.debug("Axis should have {} elements.".format(n_axis)) if len(wcs_arr_shape) > 3: axis_vals = wcs.pixel_to_world_values(np.c_[np.zeros(n_axis), np.zeros(n_axis), np.arange(0,n_axis), np.zeros(n_axis)]) else: axis_vals = wcs.pixel_to_world_values(np.c_[np.zeros(n_axis), np.zeros(n_axis), np.arange(0,n_axis)]) axis_vals = np.asarray(axis_vals) axis_vals = axis_vals[:,axis-1] return axis_vals
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_phi_to_fits_header(fits_header, phi_array):\n if len(phi_array) < 2:\n raise ShapeError('RM cube should have two or more frames to be a cube')\n fhdr = fits_header.copy()\n fhdr.set('NAXIS3', len(phi_array))\n fhdr.set('CRPIX3', 1.0)\n fhdr.set('CRVAL3', phi_array[0])\n fhdr.set('C...
[ "0.66289526", "0.652385", "0.63435376", "0.5962099", "0.59471786", "0.5827295", "0.5715055", "0.5618013", "0.55621886", "0.55530864", "0.5483086", "0.54277635", "0.5331385", "0.5288723", "0.5281522", "0.5281522", "0.5277689", "0.524827", "0.52445453", "0.5223636", "0.52164155...
0.6036392
3
Sums the pixels inside a region preserving the spectral axis.
def extract_spec(data, region, naxis, mode): logger = logging.getLogger(__name__) logger.debug('Data shape: {0}'.format(data.shape)) if region['shape'] == 'point': if naxis > 3: spec = data[:,:,region['params']['cy'],region['params']['cx']] if mode == 'sum': spec = spec.sum(axis=0) elif mode == 'avg': spec = spec.mean(axis=0) elif 'flux' in mode.lower(): spec = spec.sum(axis=0)/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) elif naxis == 3: spec = data[:,region['params']['cy'],region['params']['cx']] else: spec = data[region['params']['cy'],region['params']['cx']] elif region['shape'] == 'box': area = (region['params']['trcy'] - region['params']['blcy']) * \ (region['params']['trcx'] - region['params']['blcx']) if naxis > 3: spec = data[0,:,region['params']['blcy']:region['params']['trcy'], region['params']['blcx']:region['params']['trcx']] if mode == 'sum': spec = spec.sum(axis=2).sum(axis=1) elif mode == 'avg': spec = spec.mean(axis=2).mean(axis=1) elif 'flux' in mode.lower(): spec = spec.sum(axis=2).sum(axis=1)/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) elif naxis == 3: spec = data[:,region['params']['blcy']:region['params']['trcy'], region['params']['blcx']:region['params']['trcx']] if mode == 'sum': spec = spec.sum(axis=2).sum(axis=1)#/area elif mode == 'avg': spec = spec.mean(axis=2).mean(axis=1)#/area elif 'flux' in mode.lower(): summ = spec.sum(axis=2).sum(axis=1) logger.info('Sum of pixels: {0}'.format(summ)) spec = summ/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) else: spec = data[region['params']['blcy']:region['params']['trcy'], region['params']['blcx']:region['params']['trcx']] if mode == 'sum': spec = spec.sum() elif mode == 'avg': spec = spec.mean() elif 'flux' in mode.lower(): spec = spec.sum()/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) elif region['shape'] == 'circle': logger.info("Circular region has a center " \ "at pixel ({0},{1}) with radius " \ "{2}".format(region['params']['cx'], region['params']['cy'], region['params']['r'])) if naxis > 3: logger.debug("The image has more than 3 axes.") mask = sector_mask(data[0,0].shape, (region['params']['cy'], region['params']['cx']), region['params']['r'], (0, 360)) mdata = data[0][:,mask] logger.debug("Masked data shape: {0}".format(mdata.shape)) if 'sum' in mode.lower(): spec = mdata.sum(axis=1) elif 'avg' in mode.lower(): spec = mdata.mean(axis=1) elif 'flux' in mode.lower(): spec = mdata.sum(axis=1)/region['barea'] elif naxis == 3: mask = sector_mask(data[0].shape, (region['params']['cy'], region['params']['cx']), region['params']['r'], (0, 360)) mdata = data[:,mask] logger.debug("Masked data shape: {0}".format(mdata.shape)) if 'sum' in mode.lower(): spec = mdata.sum(axis=1)#/len(np.where(mask.flatten() == 1)[0]) elif 'avg' in mode.lower(): spec = mdata.mean(axis=1) elif 'flux' in mode.lower(): spec = mdata.sum(axis=1)/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) else: mask = sector_mask(data.shape, (region['params']['cy'], region['params']['cx']), region['params']['r'], (0, 360)) mdata = np.ma.masked_invalid(data[mask]) logger.debug("Masked data shape: {0}".format(mdata.shape)) logger.debug("Masked data sum: {0}".format(mdata)) if 'sum' in mode.lower(): spec = mdata.sum()#/len(np.where(mask.flatten() == 1)[0]) elif 'avg' in mode.lower(): spec = mdata.mean() elif 'flux' in mode.lower(): spec = mdata.sum()/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) elif region['shape'] == 'ellipse': logger.info("Elliptical region has a center " \ "at pixel ({0},{1}) with major and minor axes " \ "{2} and {3} at an angle {4}".format(region['params']['cx'], region['params']['cy'], region['params']['bmaj'], region['params']['bmin'], region['params']['theta'])) logger.debug("Mask shape: {}".format(data.shape[-2:])) mask = ellipse_mask(data.shape[-2:], region['params']['cy'], region['params']['cx'], region['params']['bmaj']/2., region['params']['bmin']/2., region['params']['theta']) logger.debug('Elements in mask: {}'.format(mask.sum())) if naxis > 3: mdata = data[0][:,mask] axis = 1 elif naxis == 3: mdata = data[:,mask] axis = 1 else: mdata = data[mask] axis = 0 logger.debug("Masked data shape: {0}".format(mdata.shape)) if 'sum' in mode.lower(): spec = mdata.sum(axis=axis) elif 'avg' in mode.lower(): spec = mdata.mean(axis=axis) elif 'flux' in mode.lower(): spec = mdata.sum(axis=axis)/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) elif 'poly' in region['shape']: npolys = len(region['params']['Polygons']) if naxis > 3: shape = data[0][0].shape npix3 = data[0].shape[0] elif naxis == 3: shape = data[0].shape npix3 = data.shape[0] else: shape = data.shape npix3 = 0 mask = np.zeros(shape) for poly in region['params']['Polygons']: # Add all the polygons together logger.info("Adding polygons to the mask.") mask += poly.make_mask(shape) logger.info("Normalizing the mask to unity.") mask = np.ceil(mask/npolys) if naxis > 3: mdata = data[0]*np.tile(mask, (npix3,1,1)) else: mdata = data*np.tile(mask, (npix3,1,1)) if mode == 'sum': spec = mdata.sum(axis=1).sum(axis=1) elif 'avg' in mode.lower(): spec = mdata.mean(axis=1).mean(axis=1) elif 'flux' in mode.lower(): spec = mdata.sum(axis=1).sum(axis=1)/region['barea'] else: logger.error('Mode not supported.') logger.error('Will exit now.') sys.exit(1) elif 'all' in region['shape']: if naxis > 3: data = data[0] spec = proc_data(data, mode, region) elif naxis == 3: data = data spec = proc_data(data, mode, region) else: spec = proc_data(data, mode, region) return spec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sumRegion(self, row1, col1, row2, col2):\n\n if not self.sums:\n return 0\n\n r1, c1 = row1+1, col1+1\n r2, c2 = row2+1, col2+1\n return self.sums[r2][c2] + self.sums[r1-1][c1-1] - self.sums[r2][c1-1] - self.sums[r1-1][c2]", "def sumRegion(self, row1, col1, row2, col2):...
[ "0.6154293", "0.603963", "0.600956", "0.59598166", "0.5920878", "0.59154606", "0.59146667", "0.58369637", "0.5797925", "0.57950807", "0.5758008", "0.5724144", "0.56694096", "0.55569243", "0.5546531", "0.54882556", "0.54564047", "0.5439962", "0.5425233", "0.541275", "0.5411576...
0.0
-1
Build a WCS object given the spatial header parameters.
def set_wcs(head): logger = logging.getLogger(__name__) # Create a new WCS object. wcs = WCS(head) if wcs.naxis > 3: wcs = wcs.dropaxis(2) logger.debug('WCS contains {0} axes.'.format(wcs.naxis)) return wcs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readWCS(header):\r\n # Creating the WCS instance\r\n w = wcs.WCS(header = header)\r\n # Filling the WCS\r\n w.wcs.crval = N.array([float(header[\"CRVAL1\"]), float(header[\"CRVAL2\"])])\r\n w.wcs.crpix = N.array([float(header[\"CRPIX1\"]), float(header[\"CRPIX2\"])])\r\n w.wcs.cdelt = N.array...
[ "0.76218075", "0.6315264", "0.62880224", "0.61376697", "0.6050542", "0.59938204", "0.599007", "0.59843117", "0.5939777", "0.59356636", "0.5899153", "0.5899032", "0.58228743", "0.58159834", "0.5745454", "0.5663349", "0.565065", "0.5642652", "0.55831504", "0.55391955", "0.55139...
0.60295844
5
Plots the extraction region.
def show_rgn(ax, rgn, **kwargs): alpha = 0.1 #lw = 0.1 if rgn['shape'] == 'box': ax.plot([rgn['params']['blcx']]*2, [rgn['params']['blcy'],rgn['params']['trcy']], 'r-', **kwargs) ax.plot([rgn['params']['blcx'],rgn['params']['trcx']], [rgn['params']['blcy']]*2, 'r-', **kwargs) ax.plot([rgn['params']['blcx'],rgn['params']['trcx']], [rgn['params']['trcy']]*2, 'r-', **kwargs) ax.plot([rgn['params']['trcx']]*2, [rgn['params']['blcy'],rgn['params']['trcy']], 'r-', **kwargs) elif rgn['shape'] == 'circle': patch = mpatches.Circle((rgn['params']['cx'], rgn['params']['cy']), rgn['params']['r'], alpha=alpha, transform=ax.transData) #plt.figure().artists.append(patch) ax.add_patch(patch) elif rgn['shape'] == 'polygon': for poly in rgn['params']['Polygons']: patch = mpatches.Polygon(poly.get_vertices(), closed=True, alpha=alpha, transform=ax.transData) ax.add_patch(patch) elif rgn['shape'] == 'pixel': ax.plot(region['params']['cy'], region['params']['cx'], 'rs', ms=5)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot(self):\n pass", "def plot(self):\n\t\tself.plotOfXray().plot()", "def show(self):\n \n \n \n \n \n \n r = 4\n f, axarr = plt.subplots(r, r, figsize=(8,8))\n counter = 0\n for i in range(r):\n for j in range(r):\n ...
[ "0.64836633", "0.63798314", "0.63485694", "0.63415164", "0.62981063", "0.6228685", "0.6214679", "0.61689097", "0.6165796", "0.61613774", "0.611915", "0.60805905", "0.60388714", "0.6032662", "0.6006002", "0.5967216", "0.5963205", "0.59328747", "0.5918682", "0.59050053", "0.589...
0.0
-1
Splits text from digits in a string.
def split_str(str): logger = logging.getLogger(__name__) logger.debug('{0}'.format(str)) match = re.match(r"([0-9]+.?\d{0,32}?)(d|m|s)", str) if match: items = match.groups() return items[0], items[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_str_digit(s):\n res = []\n for m in re.finditer('(\\d*)(\\D*)', s):\n for g in m.groups():\n if g != '':\n try:\n res.append(int(g))\n except ValueError:\n res.append(g)\n return tuple(res)", "def split_num(s...
[ "0.69932103", "0.69409627", "0.6516405", "0.6497158", "0.6497158", "0.6333978", "0.62500066", "0.62345093", "0.6181007", "0.61332566", "0.6114605", "0.60910285", "0.6013863", "0.59716225", "0.59670913", "0.59426206", "0.59077483", "0.5873073", "0.58616483", "0.58398443", "0.5...
0.6069437
12
Get the current status of the pool.
def get_pool_status(self, mission): # initialize node status states = dict( idle=0, rebooting=0, reimaging=0, running=0, unusable=0, creating=0, starting=0, waiting_for_start_task=0, start_task_failed=0, unknown=0, leaving_pool=0, offline=0, preempted=0) # if the pool does not exist if not self.batch_client.pool.exists(pool_id=mission.pool_name): return "N/A", "N/A", states # get pool info the_pool = self.batch_client.pool.get(pool_id=mission.pool_name) state = the_pool.state.name allocation_state = the_pool.allocation_state.name # get the list of node at current time point # we check the existance of the pool again to avoid coincidence if self.batch_client.pool.exists(pool_id=mission.pool_name): node_list = self.batch_client.compute_node.list( pool_id=mission.pool_name) # calculate the number of nodes in each status for node in node_list: states[node.state.name] += 1 node_list.reset() return state, allocation_state, states
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pool_status():\n pools_status = split_status_pools(fork_and_get_output(\"zpool status\".split()))\n pools = []\n for p in pools_status:\n pools.append(status.PoolStatus(p))\n return pools", "def status(self):\n self._refresh_state()\n return self._data.get('status')", "...
[ "0.75640756", "0.7310082", "0.73063296", "0.7286715", "0.7214205", "0.7214205", "0.7214205", "0.72092366", "0.71747327", "0.71360385", "0.7126846", "0.71214384", "0.7102766", "0.7091428", "0.70774436", "0.70774436", "0.7071764", "0.70652586", "0.70541", "0.70418775", "0.70287...
0.71968305
8
Get a string for the status overview of the pool and nodes.
def get_pool_overview_string(self, mission): # get statuses pool_status, allocation_status, node_status = self.get_pool_status(mission) s = "Pool status: {}\n".format(pool_status) s += "Allocation status: {}".format(allocation_status) if pool_status != "N/A": other = sum(node_status.values()) - node_status["idle"] - \ node_status["running"] - node_status["unusable"] s += "\n" s += "Node status: " s += "{} idle; ".format(node_status["idle"]) s += "{} running; ".format(node_status["running"]) s += "{} unusable; ".format(node_status["unusable"]) s += "{} other;".format(other) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def status_str(self, spaced=False):\n if self.args.vverbose:\n ## Print profile of all nodes\n status = self.pool.status(string=True)\n\n elif self.args.verbose:\n ## Print profile of usable nodes\n status = self.pool.status(min_state=PLNodeState.usable, string=True)\n\n ...
[ "0.78566015", "0.72541404", "0.690066", "0.68025947", "0.67186147", "0.6582966", "0.65206933", "0.65195024", "0.65195024", "0.6434992", "0.64222366", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.6364477", "0.636447...
0.7833251
1
Get the current status of the job (task scheduler) and tasks.
def get_job_status(self, mission): # initialize task status status = dict(active=0, running=0, succeeded=0, failed=0) # get job status if it exists. Otherwise, return N/A try: the_job = self.batch_client.job.get(job_id=mission.job_name) # get counts of tasks in different statuses status_counts = self.batch_client.job.get_task_counts(mission.job_name) except azure.batch.models.BatchErrorException as err: if err.message.value.startswith("The specified job does not exist"): return "N/A", status # raise an exception for other kinds of errors raise # update the dictionary status["active"] = status_counts.active status["running"] = status_counts.running status["succeeded"] = status_counts.succeeded status["failed"] = status_counts.failed return the_job.state.name, status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def status(self):\n return self.job_proto.status", "def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)", "def celery_task_status(self):\n return self._get_celery_queue_data()", "def get_job_status(self):\n if self.worker_thread is None:\n retu...
[ "0.7439859", "0.74113566", "0.73972505", "0.73632085", "0.7306718", "0.7229825", "0.7227331", "0.719492", "0.71917385", "0.7006022", "0.69687855", "0.69370997", "0.6901852", "0.6846558", "0.68459725", "0.68419343", "0.6834033", "0.6797795", "0.67972744", "0.6779665", "0.67757...
0.6771893
22
Get a string for the status overview of the job and tasks.
def get_job_overview_string(self, mission): # get statuses job_status, task_status = self.get_job_status(mission) s = "Job status: {}".format(job_status) if job_status != "N/A": s += "\n" s += "Tasks status: " s += "{} active; ".format(task_status["active"]) s += "{} running; ".format(task_status["running"]) s += "{} succeeded; ".format(task_status["succeeded"]) s += "{} failed;".format(task_status["failed"]) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_job_status(self):\n total_hits = session.query(BoxHit).filter_by(training_job_id=self.id).count()\n num_hits_left = session.query(BoxHit).filter_by(training_job_id=self.id, outstanding=True).count()\n total_urls = self.num_urls\n num_urls_left = session.query(VideoTrainingURL)....
[ "0.74661416", "0.7176071", "0.7176071", "0.71116614", "0.7109996", "0.7087766", "0.7026905", "0.69023955", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", "0.6890844", ...
0.7895506
0
Get the status of a mission's storage container.
def get_storage_container_status(self, mission): if self.storage_client.exists(container_name=mission.container_name): return "available" # TODO: calculate space used in the container return "N/A"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_storage_container_overview_string(self, mission):\n\n status = self.get_storage_container_status(mission)\n s = \"Storage container status: {}\".format(status)\n return s", "def container_status(self) -> str:\n return pulumi.get(self, \"container_status\")", "def container_s...
[ "0.7662295", "0.68757343", "0.65628034", "0.61149687", "0.61096156", "0.60097855", "0.59428936", "0.5903649", "0.58938575", "0.5616815", "0.560521", "0.5599396", "0.55950266", "0.5577565", "0.55629945", "0.556032", "0.5544695", "0.5521543", "0.5521207", "0.551155", "0.5508292...
0.85038316
0
Get a string for the status of the storage container.
def get_storage_container_overview_string(self, mission): status = self.get_storage_container_status(mission) s = "Storage container status: {}".format(status) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def container_status(self) -> str:\n return pulumi.get(self, \"container_status\")", "def storage_bytes_status(self) -> str:\n return pulumi.get(self, \"storage_bytes_status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return p...
[ "0.8081375", "0.7568696", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", "0.72568953", ...
0.7668451
1
Get the string of an overview to all resources.
def get_overview_string(self, mission): s = self.get_pool_overview_string(mission) + "\n\n" s += self.get_job_overview_string(mission) + "\n\n" s += self.get_storage_container_overview_string(mission) return s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def overview():\n return render_template('api/api.html', title='API Overview')", "def __str__(self):\n return gettext('List of %s') % self.resource.__name__", "def get_overview():\n from app.core.api_views import Api\n from app.modules.overview import inc\n sar = inc.main()\n api = Api()\...
[ "0.6887148", "0.6648697", "0.65732783", "0.63620585", "0.6329359", "0.6313626", "0.6280509", "0.6225066", "0.6117485", "0.6111171", "0.61043644", "0.60946447", "0.6082813", "0.60682595", "0.6067177", "0.6052863", "0.6049613", "0.59981334", "0.59859276", "0.59684676", "0.59644...
0.64852643
3
A generator that can be used in a loop.
def status_generator(self, mission): while True: status = {} status["timestamp"] = datetime.datetime.utcnow().replace( microsecond=0, tzinfo=datetime.timezone.utc).strftime( "%a %b %d %H:%M:%S %Z %Y") status["pool_status"], status["allocation_status"], \ status["node_status"] = self.get_pool_status(mission) status["job_status"], status["task_status"] = \ self.get_job_status(mission) status["storage_status"] = \ self.get_storage_container_status(mission) yield status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __iter__(self):\n yield from self.gen", "def _mc_gen():\r\n n = 1\r\n while True:\r\n yield n\r\n n += 1", "def __iter__(self):\n return self.new_generator()", "def generator():\n mygenerator = (x for x in range(3))\n for element in mygenerator:\n print 'pop...
[ "0.77754736", "0.76618725", "0.7656073", "0.75702596", "0.74272484", "0.74266475", "0.73606414", "0.7329172", "0.7289084", "0.7237256", "0.72144675", "0.7204889", "0.71852434", "0.7164605", "0.7125203", "0.7109289", "0.7065946", "0.70342624", "0.7027617", "0.70258605", "0.702...
0.0
-1
Prints summary statistics for every column of X, split into 2 classes X is the data, y is the class labels, assumed to be 0 or 1. This will also plot the data.
def summarize_source(X, y, decimals=4): # Assumes y is either 1 or 0 pos_idxs = np.where(y == 1)[0] neg_idxs = np.where(y == 0)[0] # Divide dataset into positive and negatives Xs = (X[neg_idxs, :], X[pos_idxs, :]) Ys = (y[neg_idxs], y[pos_idxs]) # Make format string numstr = ", ".join(["{" + str(i) + ":10." + str(decimals) + "f}" for i in range(X.shape[1])]) # Output results print("Total number of samples: " + str(len(y))) print() print(str(len(Ys[1])) + " Positive Samples:") print("\tMin : " + numstr.format( *np.min(Xs[1], axis=0))) print("\tMean : " + numstr.format(*np.mean(Xs[1], axis=0))) print("\tMax : " + numstr.format( *np.max(Xs[1], axis=0))) print() print("\tStdev : " + numstr.format(*np.sqrt(np.var(Xs[1], axis=0)))) print("\tVar : " + numstr.format( *np.var(Xs[1], axis=0))) print() print(str(len(Ys[0])) + " Negative Samples:") print("\tMin : " + numstr.format( *np.min(Xs[0], axis=0))) print("\tMean : " + numstr.format(*np.mean(Xs[0], axis=0))) print("\tMax : " + numstr.format( *np.max(Xs[0], axis=0))) print() print("\tStdev : " + numstr.format(*np.sqrt(np.var(Xs[0], axis=0)))) print("\tVar : " + numstr.format( *np.var(Xs[0], axis=0)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def basic_summary(data):\n headers = [\"Split\", \"Samples\", \"Height\", \"Width\", \"Channels\", \"Classes\"]\n print(table_format(headers, header = True))\n for split in [\"train\", \"valid\", \"test\"]:\n X, y = data[split]\n n, h, w, c = X.shape\n n_classes = np.unique(y).shape[0...
[ "0.66654617", "0.6197573", "0.59729", "0.58434284", "0.58352256", "0.58352256", "0.58282775", "0.5750776", "0.57281625", "0.57217586", "0.5707557", "0.5698241", "0.56464404", "0.56453407", "0.5628074", "0.5584864", "0.55809045", "0.55757785", "0.55118465", "0.55106294", "0.54...
0.5336147
32
Shows a simple scatterplot of X, colored by the classes in y. Technically, this shows the 1st three principal components of X if X has more than 3 dimensions. If X only has 2 dimensions, then just a 2dimensional scatterplot is returned. This will not produce a plot for 1 dimensional data.
def plot_data(X, y): x_dim = X.shape[1] # Ignore 1 dimensional data if x_dim == 1: print("plot_data not gonna bother with 1 dimensional data") return # For 2 dimensional data, just plot it if x_dim == 2: plt.scatter(X[:,0], X[:,1], c=y) plt.show() return # For at least 4 dimensions, do PCA if x_dim >= 4: pca = PCA(n_components=3) pca.fit(X) plot_x = pca.transform(X) else: plot_x = X # Assumes y is either 1 or 0 pos_idxs = np.where(y == 1)[0] neg_idxs = np.where(y == 0)[0] # Plot the now 3 dimensional data fig = plt.figure() ax = fig.add_subplot(111, projection='3d') Xs = plot_x[neg_idxs, :] ax.scatter(Xs[:,0], Xs[:,1], Xs[:,2], color='orange') Xs = plot_x[pos_idxs, :] ax.scatter(Xs[:,0], Xs[:,1], Xs[:,2], color='purple') # Label plot if x_dim >= 4: ax.set_title("PCA of Generated Data") ax.set_xlabel("1st Principal Component") ax.set_ylabel("2nd Principal Component") ax.set_zlabel("3rd Principal Component") else: ax.set_xticklabels([]) ax.set_yticklabels([]) ax.set_zticklabels([]) # Display! plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scatter_plot(self):\n\n X = self.reduce_dimension(n_components=2)\n\n plt.figure()\n plt.scatter(X[:,0], X[:,1])\n\n return plt", "def plot_dataset(X, classes):\n data = pd.DataFrame(X, columns=['x', 'y'])\n data['dataset'] = classes\n sns.lmplot('x', 'y', data=data, hue=...
[ "0.68430007", "0.66364133", "0.6596344", "0.6580677", "0.65084815", "0.645392", "0.6410972", "0.6298724", "0.6219223", "0.621192", "0.61827666", "0.61542743", "0.6125981", "0.60232717", "0.59843695", "0.590425", "0.59014237", "0.5894517", "0.58731264", "0.58717024", "0.586803...
0.7091436
0
This function reads the data from database where the clean data is stored.
def load_data(database_filepath): # load data from database engine = create_engine('sqlite:///' + database_filepath) df = pd.read_sql_table('figure-eight', engine) # There is only one classification class for child_alone which is 0 which indicates that there is no message classified into this class. del df['child_alone'] X = df.message.values Y = df[np.delete(df.columns.values, [0,1,2,3])].values category_names = np.delete(df.columns.values, [0,1,2,3]) return X,Y,category_names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cleandata():\n engine = create_engine('sqlite:///../data/disaster_db.db')\n df = pd.read_sql_table('disaster_db', engine)\n\n return df", "def read_db(self):\n with open(self.filename, 'r') as database:\n data = json.load(database)\n self.data = data", "def read_sql(self):...
[ "0.6639239", "0.65105474", "0.63020116", "0.6263526", "0.61224455", "0.61113507", "0.60522664", "0.59786123", "0.59628767", "0.5925373", "0.59235024", "0.5909515", "0.5893247", "0.5887125", "0.5887125", "0.58866704", "0.58852226", "0.5885046", "0.5872037", "0.5848737", "0.583...
0.0
-1
This function tokenizes the input text and performs necessary cleaning.
def tokenize(text): url_regex = 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+' detected_urls = re.findall(url_regex, text) for url in detected_urls: text = text.replace(url, "urlplaceholder") tokens = word_tokenize(text) lemmatizer = WordNetLemmatizer() clean_tokens = [] for tok in tokens: clean_tok = lemmatizer.lemmatize(tok).lower().strip() clean_tokens.append(clean_tok) return clean_tokens
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean_text(text):\n global cleaned_text\n # remove numbers\n text_nonum = re.sub(r'\\d+', '', text)\n # remove punctuations and convert characters to lower case\n text_nopunct = \"\".join([char.lower() for char in text_nonum if char not in string.punctuation]) \n # sub...
[ "0.75570065", "0.7090177", "0.7041805", "0.7033546", "0.6981678", "0.696288", "0.6919464", "0.691603", "0.6914198", "0.6900118", "0.6900118", "0.6900118", "0.6900118", "0.6900118", "0.6900118", "0.68975586", "0.6874092", "0.6874056", "0.6841664", "0.6819344", "0.6789257", "...
0.0
-1
This function evaluates the new model and generates classification report containing precision, recall, fscore and accuracy information for individual classes.
def evaluate_model(model, X_test, Y_test, category_names): y_pred = model.predict(X_test) for x in range(0, len(category_names)): print(category_names[x]) print(classification_report(Y_test[:,x], y_pred[:,x])) print("Accuracy: " + str(accuracy_score(Y_test[:, x], y_pred[:, x])))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate_model(model, X_test, Y_test, category_names):\n# Print out Precision , recall F1_score and support for each column using classification_report function\n y_pred_test = model.predict(X_test)\n print(classification_report(Y_test, y_pred_test, target_names=category_names))", "def evaluate_model(m...
[ "0.78544277", "0.7645207", "0.7580781", "0.7521916", "0.73978734", "0.7377645", "0.7346552", "0.73453677", "0.73044485", "0.7295265", "0.7271339", "0.7254532", "0.7244622", "0.72413814", "0.7187265", "0.7177157", "0.71704125", "0.71259546", "0.7120069", "0.7119113", "0.708959...
0.70245725
28
This function packages the trained model into the pickle file.
def save_model(model, model_filepath): # save the classifier with open(model_filepath, 'wb') as fid: pkl.dump(model, fid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_model(self):\n\n # =============================================================\n # Default : pickle the trained model. Change this (and the load\n # function, below) only if the library you used does not support\n # pickling.\n # self.Model_made.save(\"Model_made.h5\")...
[ "0.7843905", "0.7577629", "0.73923266", "0.73051053", "0.727403", "0.7219735", "0.71813995", "0.71445173", "0.7117435", "0.7111384", "0.710956", "0.71073145", "0.7095668", "0.70784366", "0.7034732", "0.7018957", "0.7018404", "0.70135003", "0.70103824", "0.700899", "0.6993362"...
0.67822295
36
Get current date and time string.
def now_short(_format="%Y%m%d-%H%M%S"): return time.strftime(_format, time.localtime()) + "\t"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_current_datetime_string ( ) :\n return get_current_datetime( ).strftime( \"%Y%m%d-%H%M%S\" )", "def get_current_time():\n return time.strftime(\"%Y-%m-%d-%H-%M-%S\", time.localtime())", "def get_now():\r\n now = dt.datetime.now()\r\n now_str = now.strftime(\"%d/%m %H:%M\")\r\n return now_...
[ "0.8746683", "0.8295901", "0.8274269", "0.8213029", "0.81925744", "0.81858903", "0.8185167", "0.81688607", "0.80856556", "0.8042373", "0.7992371", "0.78575885", "0.7848898", "0.78472054", "0.78358054", "0.779838", "0.7789519", "0.77884716", "0.77856934", "0.7751212", "0.77394...
0.0
-1
Log and assert based on condition. If condition True, log message as PASS to testcase log file. If condition False, Assert and Print message with status FAIL.
def logfile_assert_message(s, condition, message): if not condition: s.log_to_file += now_short() + message + ": FAIL\r\n" assert 0, message + ": FAIL\r\n" else: log_message(s, message + ": PASS")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test(self):\n self.info(\"LOGGING: Testing log messages\")\n self.debug(\"This is a debugging message\")\n self.info(\"This is an informational message\")\n self.warning(\"This is a warning message\")\n self.error(\"This is an error message\")\n self.critical(\"This is...
[ "0.6288022", "0.61218476", "0.6107574", "0.60947496", "0.5991102", "0.5953129", "0.5885416", "0.587344", "0.5861646", "0.58180374", "0.5753832", "0.5752025", "0.5750852", "0.5717795", "0.57175326", "0.570502", "0.5684395", "0.5653539", "0.5651103", "0.5643155", "0.5638683", ...
0.78782284
1
Write detailed log file for given test.
def write_test_log(t, output_dir): if t.log_to_file is not None and hasattr(t, "stop_time"): filename = type(t).__name__ + "-" + time.strftime("%Y%m%d-%H%M%S") + ".txt" testtime = t.stop_time - t.start_time with open(os.path.join(output_dir, filename), "w") as log: log.write("\t=======================================================") log.write(f"\n\tTest case ID: {type(t).__name__}") log.write(f"\n\tTest case Description: {type(t).__doc__}") log.write("\n\t=======================================================\n") log.write(t.log_to_file) log.write("\n\t=======================================================") log.write(f"\n\t{type(t).__name__} test result: {t.result_grade}") log.write(f"\n\tTotal test time: {testtime} seconds") log.write("\n\t=======================================================")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def write_test_log(t, output_dir):\n if t.log_to_file is not None and hasattr(t, \"stop_time\"):\n filename = type(t).__name__ + \"-\" + time.strftime(\"%Y%m%d-%H%M%S\") + \".txt\"\n testtime = t.stop_time - t.start_time\n with open(os.path.join(output_dir, filename), \"w\") as log:\n ...
[ "0.77759415", "0.6469761", "0.6405734", "0.63734347", "0.6358866", "0.6292069", "0.627844", "0.6241813", "0.62395686", "0.6190359", "0.618326", "0.6178866", "0.6163366", "0.61453724", "0.61151177", "0.6075739", "0.6044056", "0.6033274", "0.5966068", "0.59578186", "0.5957704",...
0.7797923
0
Write log messages to console and to log file(with timestamp).
def log_message(s, msg, header=False): if s.log_to_file is None: s.log_to_file = "" line_sep = "=" * min(len(msg), 80) full_msg = "\n\t\t" + line_sep + "\n\t\t" + msg + "\n\t\t" + line_sep + "\n" if header: logger.debug("\n\n\t\t\t***" + msg + "***\n\n") s.log_to_file += now_short() + full_msg + "\r\n" else: logger.debug(full_msg) s.log_to_file += now_short() + msg + "\r\n"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _log_to_file(self, message):\n if self.log is not None:\n message = \"[%s] %s\" % (datetime.datetime.utcnow().strftime('%H:%M:%S'), message)\n self.log.write(\"%s\\n\" % (message,))\n self.log.flush()\n print message", "def writeToLogFile(self, event):\n ...
[ "0.76535463", "0.7256361", "0.7207056", "0.71898395", "0.7076056", "0.70700955", "0.70448047", "0.7034109", "0.70302784", "0.70248175", "0.69756", "0.69753206", "0.6955353", "0.6929519", "0.6922523", "0.6889003", "0.68705016", "0.67957056", "0.67859244", "0.6784583", "0.67739...
0.0
-1
Instance initialisation to handle the output logging.
def __init__(self, parent, out, color): self.color = color or "white" self.out = out self.parent = parent self.first_write = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n\n self.logger = logging.getLogger('sound-count')\n\n self.logger.setLevel(logging.DEBUG)\n\n self.formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')\n\n self.stdout_handler = logging.StreamHandler()\n self.stdout_handler.setFormatter(self....
[ "0.7915068", "0.78774565", "0.7758291", "0.7710645", "0.76501584", "0.76135844", "0.75693744", "0.7531081", "0.7513416", "0.7488386", "0.74827266", "0.74706954", "0.7453568", "0.7426138", "0.7380624", "0.73672324", "0.7357756", "0.7321987", "0.73012114", "0.7299283", "0.72518...
0.0
-1
Write or stdout input messages in colored(if defined). Create the file if not already present.
def write(self, string): if self.out is not None: if self.first_write: self.first_write = False string = "\r\n" + string if self.color is not None: self.out.write(colored(string, self.color)) else: self.out.write(string) current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) # check for the split case if ( len(self.parent.log) > 1 and self.parent.log[-1] == "\r" and string[0] == "\n" ): string = f"\n{current_time} {string[1:]}" to_log = re.sub("\r\n", f"\r\n{current_time} ", string) self.parent.log += to_log if hasattr(self.parent, "test_to_log"): self.parent.test_to_log.log += re.sub( r"\r\n\[", f"\r\n{self.parent.test_prefix}: [", to_log )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def color_print(*args, **kwargs):\n file = kwargs.get('file', sys.stdout)\n\n end = kwargs.get('end', '\\n')\n\n write = file.write\n if file.isatty():\n for i in range(0, len(args), 2):\n msg = args[i]\n if i + 1 == len(args):\n color = ''\n else:...
[ "0.664176", "0.58514845", "0.573148", "0.56499183", "0.5588023", "0.5570282", "0.55647844", "0.55574226", "0.5555951", "0.5462549", "0.5433804", "0.542239", "0.53665817", "0.5349737", "0.5295903", "0.5280991", "0.52719", "0.5243998", "0.52435213", "0.52247983", "0.5207164", ...
0.0
-1
Add process time with the log messages.
def extra_log(self, string): if hasattr(self.parent, "log"): self.parent.log += f"\r\n[{time.process_time()}] " self.parent.log += string + "\r\n"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_line_in_log():\n logging.info(' ' + '-' * 60 + '\\n')", "def add_log(self, log):\n log = str(datetime.datetime.now()) + \": \"+log+\"\\n\"\n print(log)\n self.logs.append(log)\n if len(self.logs) > 10:\n self.append_to_logfile()", "def extra_log(self, string):\...
[ "0.67774904", "0.6688818", "0.6468294", "0.64525807", "0.6431555", "0.6324461", "0.6312171", "0.6251729", "0.6245165", "0.62316775", "0.6187182", "0.61647475", "0.6160086", "0.61188847", "0.6091051", "0.60706383", "0.6057694", "0.60288566", "0.60258675", "0.60083884", "0.6007...
0.66905326
1
Flushes the buffer storage in console before pexpect.
def flush(self): if self.out is not None: self.out.flush()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _flush_buffer(self):\n self.pexpect_child.logfile = None\n flushedStuff = \"\"\n while self.pexpect_child.expect([pexpect.TIMEOUT, r\".+\"], timeout=1):\n flushedStuff += self.pexpect_child.match.group(0)\n self.pexpect_child.logfile = self.log_file", "def _flush():\n ...
[ "0.7308612", "0.7289075", "0.7045256", "0.6931466", "0.6834416", "0.6816106", "0.6713305", "0.66353625", "0.6627915", "0.65735334", "0.65354127", "0.64686835", "0.6459456", "0.6449715", "0.6398675", "0.6393837", "0.6390865", "0.6362393", "0.6325738", "0.6310141", "0.6291992",...
0.0
-1
Add and write log messages to a combined list.
def create_file_logs(config, board, tests_to_run, logger): combined_list = [] def add_to_combined_list(log, name, combined_list=combined_list): for line in log.split("\r\n"): try: if line == "": continue if line.startswith("\n"): line = line[1:] if line.startswith(" ["): line = line[1:] ts, text = line.split("]", 1) timestamp = float(ts[1:-1]) else: text = line timestamp = 0.0 combined_list.append( {"time": timestamp, "text": str(text), "name": name} ) except Exception as error: logger.error(error) logger.debug(f"Failed to parse log line = {repr(line)}") idx = 1 console_combined = [] for console in board.consoles: with open(os.path.join(config.output_dir, f"console-{idx}.log"), "w") as clog: clog.write(console.log) add_to_combined_list(console.log, f"console-{idx}") add_to_combined_list(console.log_calls, f"console-{idx}") add_to_combined_list(console.log, "", console_combined) idx = idx + 1 def write_combined_log(combined_list, fname): with open(os.path.join(config.output_dir, fname), "w") as clog: for e in combined_list: try: if e["name"] == "": clog.write(f"[{e['time']}]{repr(e['text'])}\r\n") else: clog.write(f"{e['name']}: [{e['time']}] {repr(e['text'])}\n") except Exception as error: logger.error(error) logger.debug(f"failed to parse line: {repr(e)}") import operator console_combined.sort(key=operator.itemgetter("time")) write_combined_log(console_combined, "console-combined.log") for device in config.devices: with open(os.path.join(config.output_dir, device + ".log"), "w") as clog: d = getattr(config, device) if hasattr(d, "log"): clog.write(d.log) add_to_combined_list(d.log, device) add_to_combined_list(d.log_calls, device) for test in tests_to_run: if hasattr(test, "log") and test.log != "": with open( os.path.join(config.output_dir, f"{test.__class__.__name__}.log"), "w" ) as clog: clog.write(test.log) if hasattr(test, "log_calls"): add_to_combined_list(test.log_calls, test.__class__.__name__) combined_list.sort(key=operator.itemgetter("time")) write_combined_log(combined_list, "all.log")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def log_list(self,list_,level='INFO'):\r\n logger.write('\\n'.join(self._log_list(list_)),level)", "def CombineLogFiles(list_of_lists, logger):\n cur_device_log = ['']\n for cur_file, cur_file_lines in list_of_lists:\n # Ignore files with just the logcat header\n if len(cur_file_lines) < 2:\n ...
[ "0.64969385", "0.6466005", "0.62025017", "0.6156893", "0.6059565", "0.6025947", "0.60147756", "0.5977898", "0.5976505", "0.5960277", "0.59393877", "0.5859047", "0.5806393", "0.57645357", "0.5670112", "0.56171393", "0.5605485", "0.55584806", "0.55563587", "0.5537255", "0.55362...
0.5024609
64
Set members based using inventory.
def _configure(self): from .topology import FieldBase Component._configure(self) mapBasis = { "simplex": FieldBase.SIMPLEX_BASIS, "tensor": FieldBase.TENSOR_BASIS, "default": FieldBase.DEFAULT_BASIS, } self.cellBasis = mapBasis[self.inventory.cellBasisStr] mapSpace = { "polynomial": FieldBase.POLYNOMIAL_SPACE, "point": FieldBase.POINT_SPACE, } self.feSpace = mapSpace[self.inventory.feSpaceStr] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def members(self, members):\n\n self._members = members", "def members(self, members: object):\n\n self._members = members", "def members(self, items):\n pass", "def inventory_items(self, inventory_items):\n\n self._inventory_items = inventory_items", "def inventory(self, invent...
[ "0.6458913", "0.6443288", "0.64391935", "0.63899845", "0.6246272", "0.6177047", "0.6176741", "0.6058151", "0.6013703", "0.6013703", "0.6013703", "0.58846337", "0.56970346", "0.56590396", "0.5608252", "0.5567245", "0.5558885", "0.55403244", "0.5475591", "0.54485273", "0.544436...
0.0
-1
Factory for subfield items.
def subfieldFactory(name): from pythia.pyre.inventory import facility return facility(name, family="subfield", factory=Subfield)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subfield():\n return Subfield()", "def __init__(self, *args, **kwargs):\n super(ListFieldType, self).__init__(*args, **kwargs)\n\n self.item_info = self.field_info.get('items')", "def create_subspecialty(sub_data):\n return get_or_create_object(sub_data, Subspecialty)", "def test_cust...
[ "0.7079664", "0.60948706", "0.57870966", "0.5703622", "0.5617625", "0.56146836", "0.56075746", "0.5595222", "0.554094", "0.546303", "0.54049605", "0.53629017", "0.53175354", "0.5301199", "0.5221799", "0.5221799", "0.51944876", "0.51886076", "0.5142889", "0.5127228", "0.512640...
0.7673134
0
Factory associated with Subfield.
def subfield(): return Subfield()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subfieldFactory(name):\n from pythia.pyre.inventory import facility\n return facility(name, family=\"subfield\", factory=Subfield)", "def add_sub_factories(self) -> None:\n for field in get_model_fields(self.model, base=False, foreign=True, m2m=False):\n if not hasattr(self.factory, f...
[ "0.8500797", "0.66517395", "0.6441253", "0.6377734", "0.59815156", "0.58264554", "0.5662838", "0.5660655", "0.56087625", "0.5576944", "0.5576053", "0.5484294", "0.54726523", "0.5440579", "0.5439738", "0.5400487", "0.528335", "0.52721506", "0.52566725", "0.52566725", "0.519902...
0.77598864
1
Gets the short path name of a given long path.
def get_short_path_name(long_name: str): output_buf_size = _GetShortPathNameW(long_name, None, 0) if output_buf_size <= 0: return None output_buf = ctypes.create_unicode_buffer(output_buf_size) needed = _GetShortPathNameW(long_name, output_buf, output_buf_size) assert 0 < needed < output_buf_size return output_buf.value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_short_path_name(long_name):\n output_buf_size = 0\n while True:\n output_buf = ctypes.create_unicode_buffer(output_buf_size)\n needed = _GetShortPathNameW(long_name, output_buf, output_buf_size)\n if output_buf_size >= needed:\n return output_buf.value\n else:\n...
[ "0.8374927", "0.692538", "0.6779394", "0.6752157", "0.6703952", "0.65418506", "0.6538999", "0.6492663", "0.64758646", "0.6439164", "0.63933027", "0.6386224", "0.636427", "0.6346333", "0.6328241", "0.63225263", "0.6192011", "0.61536336", "0.6148895", "0.6138921", "0.61234444",...
0.8415998
0
get img_type file list such as get jpg files
def get_path_list(self, suffix=img_type): img_list = list(filter(lambda x: x.endswith(suffix), self.path_list)) return img_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_input_files():\n\n raw_list = abspath(get('input_files'))\n valid_types = ['image/jpeg', 'image/tiff']\n images = [x for x in raw_list if mimetypes.guess_type(x)[0] in valid_types]\n print('* Input images: {}'.format(len(images)))\n return images", "def img_extensions():\n return [\".JP...
[ "0.7476918", "0.7081896", "0.6956454", "0.6846246", "0.6845208", "0.6776346", "0.6740786", "0.6740108", "0.66788965", "0.6673548", "0.6597972", "0.6581447", "0.6568904", "0.65615165", "0.6554064", "0.6504134", "0.64887166", "0.6465466", "0.6461444", "0.643755", "0.64279115", ...
0.0
-1
open jpg file or merge several jpg file then open it
def execute_file(self, event=None): file_list = self.get_path_list() print(file_list) if not file_list: return # merge image # 修复内存泄露的bug,由于没有清除之前打开的图片,第二次打开的图片仍然为之前的图片 try: self.photos.destroy() except: pass self.photos.imgs = file_list merged_photo = self.photos.merge_photos() # show image try: window.destroy() except: import traceback traceback.print_exc() window.build_img_canvas() window.show_img_in_canvas(merged_photo)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_frame(path,number):\n num=str(number).zfill(3) #Zero filling\n name = glob.glob(path+\"/*\"+num+\"*\")\n if len(name)==0:\n name = glob.glob(path+\"/\"+str(number)+\".png\")\n if len(name)>1:\n print \"too many matches \",len(name),\" found\"\n name = name[0]\n img = Imag...
[ "0.58855605", "0.5591409", "0.55525833", "0.5439224", "0.5414354", "0.5400621", "0.5384213", "0.53814346", "0.5351015", "0.5304343", "0.52945495", "0.5280849", "0.52760863", "0.52742296", "0.527252", "0.5259538", "0.5245993", "0.52299297", "0.5225909", "0.52070266", "0.519107...
0.60279435
0
Scroll canvas horizontally and redraw the image
def __scroll_x(self, *args, **kwargs): self.canvas_image.xview(*args) # scroll horizontally self.__show_image() # redraw the image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __scroll_x(self, *args, **kwargs):\n self.canvas.xview(*args) # scroll horizontally\n self.__show_image() # redraw the image", "def continuous_scroll(self, context):\n\n self.drawing.redraw_canvas(self.dy)\n \n return True", "def refresh(self):\n\n # Delete old i...
[ "0.7781856", "0.6695677", "0.6625122", "0.66034824", "0.64015204", "0.6336621", "0.62105596", "0.6134658", "0.6117314", "0.6087952", "0.60482293", "0.5974955", "0.59536266", "0.59532565", "0.5856979", "0.58364534", "0.58310264", "0.58037436", "0.5784162", "0.5746283", "0.5735...
0.78580177
0
Scroll canvas vertically and redraw the image
def __scroll_y(self, *args, **kwargs): self.canvas_image.yview(*args) # scroll vertically self.__show_image() # redraw the image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __scroll_y(self, *args, **kwargs):\n self.canvas.yview(*args) # scroll vertically\n self.__show_image() # redraw the image", "def refresh(self):\n\n # Delete old image (if needed) \n if self.canvas_image_id:\n self.canvas.delete(self.canvas_image_id)\n if debug...
[ "0.78763294", "0.7029265", "0.6990234", "0.6822299", "0.67670774", "0.6654772", "0.6486098", "0.63472086", "0.6317045", "0.6261508", "0.62313896", "0.61648124", "0.6163939", "0.613647", "0.61271584", "0.61243415", "0.61053514", "0.6080344", "0.60629725", "0.606112", "0.605199...
0.79207057
0
Remember previous coordinates for scrolling with the mouse
def __move_from(self, event): self.canvas_image.scan_mark(event.x, event.y) self.from_coord = (event.x, event.y)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def store_current_position_as_previous(self):\n pos = self.get_current_position()\n self.previous_xloc = pos[0]\n self.previous_yloc = pos[1]\n self.previous_zloc = pos[2]\n return pos", "def mousePosition(self):", "def new_previous_position(self, pos):\n self.previous...
[ "0.68040586", "0.66060555", "0.65336144", "0.647154", "0.61659133", "0.61659133", "0.61084694", "0.60922366", "0.60622627", "0.60567003", "0.5945194", "0.59199005", "0.5912067", "0.5889155", "0.58295685", "0.5828022", "0.5808109", "0.57255214", "0.5721227", "0.5708124", "0.57...
0.0
-1
Drag (move) canvas to the new position
def __move_to(self, event): self.canvas_image.scan_dragto(event.x, event.y, gain=1) self.to_coord = (event.x, event.y) self.__show_image() # zoom tile and show it on the canvas
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_move(self, event):\n self.canvas.scan_dragto(event.x, event.y, gain=1)", "def drag(self,x,y):\n self.x=x\n self.y=y", "def drag(self, event):\n self.yview('scroll', self.ypos - event.y, 'units')\n self.xview('scroll', self.xpos - event.x, 'units')\n self.ypos ...
[ "0.7642598", "0.7116813", "0.6957533", "0.6895421", "0.6839819", "0.6795433", "0.6787461", "0.66360825", "0.6598902", "0.65643734", "0.65615577", "0.6521887", "0.64005405", "0.6353929", "0.63048756", "0.62917954", "0.627049", "0.6246554", "0.6198148", "0.6198148", "0.6198148"...
0.67894125
6
Checks if the point (x,y) is outside the image area
def outside(self, x, y): bbox = self.canvas_image.coords(self.container) # get image area if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]: return False # point (x,y) is inside the image area else: return True # point (x,y) is outside the image area
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def outside(self, x, y):\n bbox = self.canvas.coords(self.container) # get image area\n if bbox[0] < x < bbox[2] and bbox[1] < y < bbox[3]:\n return False # point (x,y) is inside the image area\n else:\n return True # point (x,y) is outside the image area", "def poin...
[ "0.87402356", "0.7378075", "0.7371467", "0.7322463", "0.7271476", "0.71669763", "0.7123005", "0.70656526", "0.7022389", "0.7014238", "0.696292", "0.6954585", "0.69515514", "0.69209486", "0.6896473", "0.68796045", "0.6856969", "0.68375915", "0.67989755", "0.6798606", "0.678162...
0.8744287
0
Zoom with mouse wheel
def __wheel(self, event): x = self.canvas_image.canvasx(event.x) # get coordinates of the event on the canvas y = self.canvas_image.canvasy(event.y) if self.outside(x, y): return # zoom only inside image area scale = 1.0 # Respond to Linux (event.num) or Windows (event.delta) wheel event if event.num == 5 or event.delta == -120: # scroll down, smaller if round(self.__min_side * self.imscale) < 30: return # image is less than 30 pixels self.imscale /= self.__delta scale /= self.__delta if event.num == 4 or event.delta == 120: # scroll up, bigger i = min(self.canvas_image.winfo_width(), self.canvas_image.winfo_height()) >> 1 if i < self.imscale: return # 1 pixel is bigger than the visible area self.imscale *= self.__delta scale *= self.__delta # Take appropriate image from the pyramid k = self.imscale * self.__ratio # temporary coefficient self.__curr_img = min((-1) * int(math.log(k, self.__reduction)), len(self.__pyramid) - 1) self.__scale = k * math.pow(self.__reduction, max(0, self.__curr_img)) # self.canvas_image.scale('all', x, y, scale, scale) # rescale all objects # Redraw some figures before showing image on the screen self.redraw_figures() # method for child classes self.__show_image()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_mouse_wheel(self, event):\n delta = event.delta[1]\n if delta > 0: # Zoom in\n factor = 0.9\n elif delta < 0: # Zoom out\n factor = 1 / 0.9\n for _ in range(int(abs(delta))):\n self.zoom(factor, event.pos)", "def set_zooming_wheel(self):\n ...
[ "0.8572107", "0.8045329", "0.80039686", "0.7584533", "0.7354916", "0.7326063", "0.7284798", "0.72583723", "0.72249573", "0.7202118", "0.72010577", "0.719273", "0.71766204", "0.705019", "0.70209515", "0.6953993", "0.6904493", "0.6904493", "0.68883395", "0.68644863", "0.6851454...
0.7094766
13
Scrolling with the keyboard. Independent from the language of the keyboard, CapsLock, +, etc.
def __keystroke(self, event): if event.state - self.__previous_state == 4: # means that the Control key is pressed pass # do nothing if Control key is pressed else: self.__previous_state = event.state # remember the last keystroke state # Up, Down, Left, Right keystrokes if event.keycode in [68, 39, 102]: # scroll right, keys 'd' or 'Right' self.__scroll_x('scroll', 1, 'unit', event=event) elif event.keycode in [65, 37, 100]: # scroll left, keys 'a' or 'Left' self.__scroll_x('scroll', -1, 'unit', event=event) elif event.keycode in [87, 38, 104]: # scroll up, keys 'w' or 'Up' self.__scroll_y('scroll', -1, 'unit', event=event) elif event.keycode in [83, 40, 98]: # scroll down, keys 's' or 'Down' self.__scroll_y('scroll', 1, 'unit', event=event)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_key(self, event):\n if event.key() == QtCore.Qt.Key_Up:\n self.model.channel_Scroll_Up('page')\n elif event.key() == QtCore.Qt.Key_PageUp:\n self.model.channel_Scroll_Up('page')\n elif event.key() == QtCore.Qt.Key_Down:\n self.model.channel_Scroll_Down('...
[ "0.6526636", "0.6466856", "0.61477494", "0.60642177", "0.60475975", "0.6046673", "0.5949585", "0.59238994", "0.59151155", "0.5871811", "0.5852923", "0.57747257", "0.57678497", "0.5751938", "0.5697251", "0.5689333", "0.5646427", "0.5634948", "0.5631655", "0.56251585", "0.56207...
0.563615
17
Dummy function to redraw figures in the children classes
def redraw_figures(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redraw(event):\n if np.size(plt.get_figlabels()):\n #Need to check if figure is closed or not and only then do the following\n #operations. Else, the following operations will create a new figure\n ax.clear()\n drawRectangle(ax)\n fig.canvas.draw()\n else:\n pas...
[ "0.69585615", "0.68990314", "0.6801519", "0.67597973", "0.6633574", "0.64148325", "0.6317707", "0.6250047", "0.6198424", "0.61798114", "0.61377364", "0.6077331", "0.60669315", "0.60655534", "0.60549927", "0.6053331", "0.60529304", "0.6050833", "0.604263", "0.6037915", "0.6037...
0.78338176
1
Crop rectangle from the image and return it
def crop(self, bbox): return self.__pyramid[0].crop(bbox)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crop(self, image):\n\t\treturn image.copy()[self.ymin:self.ymax,self.xmin:self.xmax]", "def crop_bounding_box(im, x, y, w, h):\n return im[y:y+h, x:x+w]", "def doCrop(image, x, y, w, h):\n\tcrop_height = int((config.FACE_HEIGHT / float(config.FACE_WIDTH)) * w)\n\tmidy = y + h/2\n\ty1 = max(0, midy-c...
[ "0.7942811", "0.79130894", "0.7909533", "0.7762608", "0.77604425", "0.768507", "0.7674636", "0.7651629", "0.7633949", "0.76148933", "0.7608375", "0.75981236", "0.75507385", "0.74944735", "0.74684227", "0.7441258", "0.7434214", "0.7434214", "0.741522", "0.741522", "0.73019385"...
0.7051762
31
Show image on the Canvas. Implements correct image zoom almost like in Google Maps
def __show_image(self): box_image = self.canvas_image.coords(self.container) # get image area box_canvas = (self.canvas_image.canvasx(0), # get visible area of the canvas self.canvas_image.canvasy(0), self.canvas_image.canvasx(self.canvas_image.winfo_width()), self.canvas_image.canvasy(self.canvas_image.winfo_height())) self.box_img_int = tuple(map(int, box_image)) # convert to integer or it will not work properly # Get scroll region box box_scroll = [min(self.box_img_int[0], box_canvas[0]), min(self.box_img_int[1], box_canvas[1]), max(self.box_img_int[2], box_canvas[2]), max(self.box_img_int[3], box_canvas[3])] # Horizontal part of the image is in the visible area if box_scroll[0] == box_canvas[0] and box_scroll[2] == box_canvas[2]: box_scroll[0] = self.box_img_int[0] box_scroll[2] = self.box_img_int[2] # Vertical part of the image is in the visible area if box_scroll[1] == box_canvas[1] and box_scroll[3] == box_canvas[3]: box_scroll[1] = self.box_img_int[1] box_scroll[3] = self.box_img_int[3] # Convert scroll region to tuple and to integer self.canvas_image.configure(scrollregion=tuple(map(int, box_scroll))) # set scroll region x1 = max(box_canvas[0] - box_image[0], 0) # get coordinates (x1,y1,x2,y2) of the image tile y1 = max(box_canvas[1] - box_image[1], 0) x2 = min(box_canvas[2], box_image[2]) - box_image[0] y2 = min(box_canvas[3], box_image[3]) - box_image[1] if int(x2 - x1) > 0 and int(y2 - y1) > 0: # show image if it in the visible area image = self.__pyramid[max(0, self.__curr_img)].crop( # crop current img from pyramid (int(x1 / self.__scale), int(y1 / self.__scale), int(x2 / self.__scale), int(y2 / self.__scale))) # imagetk = ImageTk.PhotoImage(image.resize((int(x2 - x1), int(y2 - y1)), self.__filter)) self.imageid = self.canvas_image.create_image(max(box_canvas[0], self.box_img_int[0]), max(box_canvas[1], self.box_img_int[1]), anchor='nw', image=imagetk) self.canvas_image.lower(self.imageid) # set image into background self.canvas_image.imagetk = imagetk # keep an extra reference to prevent garbage-collection
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __draw_image(self):\n if self.image_name is not None:\n img = mpimg.imread(self.image_name)\n extent = (0.5, self.xmax+0.5, -0.5, self.ymax-0.5)\n self.ax.imshow(img, extent=extent, origin='lower',\n alpha=self.image_alpha)", "def showImage(se...
[ "0.6751016", "0.6738683", "0.66990304", "0.66660595", "0.65815693", "0.64749587", "0.6393063", "0.63925314", "0.63365763", "0.6326308", "0.62597394", "0.6224787", "0.62157637", "0.6211517", "0.6196586", "0.61757445", "0.6165809", "0.61133194", "0.6083976", "0.6078011", "0.607...
0.636585
8
Provides consistant naming to statistic descriptors
def stat_by_group(stat: str, group: str) -> str: return f'{stat} by {group}'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def DescriptiveName(self):\r\n\t\treturn self._get_attribute('descriptiveName')", "def name(self):\n return 'data_extraction_for_' + '_'.join(self.names).lower()", "def DescriptiveName(self):\n return self._get_attribute('descriptiveName')", "def name(self):\n return '{} {} {}'.format(se...
[ "0.667977", "0.6442585", "0.6435272", "0.6424433", "0.6370413", "0.6370413", "0.6370413", "0.6370413", "0.6370413", "0.6370413", "0.6352662", "0.63082206", "0.62384665", "0.6236644", "0.62084746", "0.62084746", "0.62084746", "0.62084746", "0.61990166", "0.6192866", "0.6161828...
0.0
-1
Repeat the retrieval of the metrics of a metrics context until at least one of the specified metric group names has data. Returns the MetricGroupValues object for the metric group that has data.
def wait_for_metrics(metric_context, metric_groups): retries = 0 got_data = False while not got_data: mr_str = metric_context.get_metrics() mr = zhmcclient.MetricsResponse(metric_context, mr_str) for mg_values in mr.metric_group_values: if mg_values.name in metric_groups: got_data = True if DEBUG_METRICS_RESPONSE: print("Debug: MetricsResponse:") print(mr_str) break if not got_data: if retries > GET_METRICS_MAX_RETRIES: return None time.sleep(GET_METRICS_RETRY_TIME) # avoid hot spin loop retries += 1 return mg_values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def result_group(group_id, failures=False, wait=0, count=None, cached=Conf.CACHED):\n if cached:\n return result_group_cached(group_id, failures, wait, count)\n start = time.time()\n if count:\n while 1:\n if count_group(group_id) == count or wait and (time.time() - start) * 1000 ...
[ "0.55873525", "0.55362415", "0.54371434", "0.51211834", "0.50748", "0.50633067", "0.50494426", "0.50236344", "0.5014017", "0.49106795", "0.4888227", "0.48790106", "0.48743725", "0.4858078", "0.48575234", "0.48438308", "0.48435786", "0.4804029", "0.47981688", "0.47835502", "0....
0.72703934
0
Print a metric group for a list of resources in the desired output format.
def print_object_values( object_values_list, metric_group_definition, resource_classes, output_format, transposed): if output_format in TABLE_FORMATS: if output_format == 'table': output_format = 'psql' print_object_values_as_table( object_values_list, metric_group_definition, resource_classes, output_format, transposed) elif output_format == 'json': print_object_values_as_json( object_values_list, metric_group_definition, resource_classes) else: raise InvalidOutputFormatError(output_format)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_metric_groups(cmd_ctx, client, metric_groups, resource_filter):\n\n if not isinstance(metric_groups, (list, tuple)):\n metric_groups = [metric_groups]\n\n properties = {\n 'anticipated-frequency-seconds': MIN_ANTICIPATED_FREQUENCY,\n 'metric-groups': metric_groups,\n }\n ...
[ "0.751741", "0.63145715", "0.5910002", "0.59023666", "0.59002185", "0.5885719", "0.5866339", "0.5861136", "0.58524024", "0.5828134", "0.57840455", "0.5753333", "0.57369936", "0.57281035", "0.57204556", "0.56444025", "0.56444025", "0.557295", "0.5570822", "0.5553722", "0.55439...
0.48897582
93
Print a list of object values in a tabular output format.
def print_object_values_as_table( object_values_list, metric_group_definition, resource_classes, table_format, transposed): if object_values_list: metric_definitions = metric_group_definition.metric_definitions sorted_metric_names = [md.name for md in sorted(metric_definitions.values(), key=lambda md: md.index)] table = list() headers = list() for i, ov in enumerate(object_values_list): row = list() # Add resource names up to the CPC res = ov.resource while res: if i == 0: name_prop = res.manager.class_name + '-name' headers.insert(0, name_prop) row.insert(0, res.name) res = res.manager.parent # CPC has None as parent # Add the metric values for name in sorted_metric_names: if i == 0: m_def = metric_definitions[name] header_str = name if m_def.unit: header_str += u" [{}]".format(m_def.unit) headers.append(header_str) value = ov.metrics[name] row.append(value) table.append(row) # Sort the table by the resource name columns n_sort_cols = len(resource_classes) table = sorted(table, key=lambda row: row[0:n_sort_cols]) if transposed: table.insert(0, headers) table = [list(col) for col in zip(*table)] headers = [] if not table: click.echo("No {} resources with metrics data for metric group {}.". format(metric_group_definition.resource_class, metric_group_definition.name)) else: click.echo(tabulate(table, headers, tablefmt=table_format))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table_print_object(self, obj_name, header, values_list):\n def terminal_size():\n import fcntl, termios, struct\n h, w, hp, wp = struct.unpack('HHHH',\n fcntl.ioctl(0, termios.TIOCGWINSZ,\n ...
[ "0.7586605", "0.75449604", "0.72875947", "0.71443856", "0.7099981", "0.6986745", "0.6952594", "0.6931348", "0.6849189", "0.68258506", "0.6799973", "0.67922515", "0.67875236", "0.67875236", "0.6745301", "0.67065424", "0.66971654", "0.6696674", "0.6684991", "0.66635174", "0.664...
0.7025772
5
Print a list of object values in JSON output format.
def print_object_values_as_json( object_values_list, metric_group_definition, resource_classes): if object_values_list: metric_definitions = metric_group_definition.metric_definitions sorted_metric_names = [md.name for md in sorted(metric_definitions.values(), key=lambda md: md.index)] json_obj = list() for i, ov in enumerate(object_values_list): resource_obj = OrderedDict() # Add resource names up to the CPC res = ov.resource while res: name_prop = res.manager.class_name + '-name' resource_obj[name_prop] = res.name res = res.manager.parent # CPC has None as parent # Add the metric values for name in sorted_metric_names: m_def = metric_definitions[name] value = ov.metrics[name] resource_obj[name] = OrderedDict(value=value, unit=m_def.unit) json_obj.append(resource_obj) json_str = json.dumps(json_obj) click.echo(json_str)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jprint(obj):\n #\n text = json.dumps(obj, sort_keys=True, indent=4)\n print(text)", "def print_json(obj):\n print(json.dumps(obj, sort_keys=True, indent=2, separators=(',', ': ')))", "def print_json(obj):\n print(json.dumps(obj, indent=2))", "def pprint(self,obj):\n return(json.dump...
[ "0.75082326", "0.7412574", "0.7307961", "0.7016454", "0.6846915", "0.6846915", "0.6722339", "0.6652273", "0.6616749", "0.660433", "0.65436924", "0.65436924", "0.65428746", "0.65419024", "0.64811295", "0.64317226", "0.64314044", "0.64107096", "0.6371735", "0.6338294", "0.62911...
0.6860965
4
Retrieve and print metric groups.
def print_metric_groups(cmd_ctx, client, metric_groups, resource_filter): if not isinstance(metric_groups, (list, tuple)): metric_groups = [metric_groups] properties = { 'anticipated-frequency-seconds': MIN_ANTICIPATED_FREQUENCY, 'metric-groups': metric_groups, } mc = client.metrics_contexts.create(properties) mg_values = wait_for_metrics(mc, metric_groups) filtered_object_values = list() # of MetricObjectValues if not mg_values: mg_name = metric_groups[0] # just pick any res_class = zhmcclient._metrics._resource_class_from_group(mg_name) mg_def = zhmcclient.MetricGroupDefinition( name=mg_name, resource_class=res_class, metric_definitions=[]) else: mg_def = mc.metric_group_definitions[mg_values.name] filter_cpc = None filter_partition = None filter_lpar = None filter_adapter = None filter_nic = None for r_class, r_name in resource_filter: if r_class == 'cpc' and r_name: filter_cpc = client.cpcs.find(name=r_name) elif r_class == 'partition' and r_name: assert filter_cpc filter_partition = filter_cpc.partitions.find(name=r_name) elif r_class == 'logical-partition' and r_name: assert filter_cpc filter_lpar = filter_cpc.lpars.find(name=r_name) elif r_class == 'adapter' and r_name: assert filter_cpc filter_adapter = filter_cpc.adapters.find(name=r_name) elif r_class == 'nic' and r_name: assert filter_partition filter_nic = filter_partition.nics.find(name=r_name) resource_class = mg_def.resource_class for ov in mg_values.object_values: included = False if resource_class == 'cpc': if not filter_cpc: included = True elif ov.resource_uri == filter_cpc.uri: included = True elif resource_class == 'partition': if not filter_cpc: included = True elif ov.resource.manager.cpc.uri == filter_cpc.uri: if not filter_partition: included = True elif ov.resource_uri == filter_partition.uri: included = True elif resource_class == 'logical-partition': if not filter_cpc: included = True elif ov.resource.manager.cpc.uri == filter_cpc.uri: if not filter_lpar: included = True elif ov.resource_uri == filter_lpar.uri: included = True elif resource_class == 'adapter': if not filter_cpc: included = True elif ov.resource.manager.cpc.uri == filter_cpc.uri: if not filter_adapter: included = True elif ov.resource_uri == filter_adapter.uri: included = True elif resource_class == 'nic': if not filter_cpc: included = True elif ov.resource.manager.partition.manager.cpc.uri == \ filter_cpc.uri: if not filter_partition: included = True elif ov.resource.manager.partition.uri == \ filter_partition.uri: if not filter_nic: included = True elif ov.resource_uri == filter_nic.uri: included = True else: raise ValueError( "Invalid resource class: {}".format(resource_class)) if included: filtered_object_values.append(ov) resource_classes = [f[0] for f in resource_filter] cmd_ctx.spinner.stop() print_object_values(filtered_object_values, mg_def, resource_classes, cmd_ctx.output_format, cmd_ctx.transpose) mc.delete()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_groups(self):\n\n for counter, label in enumerate(self.exp_labels_list):\n print('Key {}: {} \\n'.format(str(counter), label))", "def metrics_group():", "def groups(self):\n #return self.get('{}/groups'.format(ApiVersion.A1.value))\n return self.get('{}/groups'.format(ApiVersio...
[ "0.72279793", "0.70860595", "0.674111", "0.66355723", "0.64703333", "0.6281449", "0.62582123", "0.62421024", "0.62346387", "0.6213498", "0.61584836", "0.61584836", "0.6125847", "0.6110818", "0.61033535", "0.60848945", "0.6041761", "0.6026084", "0.599124", "0.5982218", "0.5980...
0.73456234
0
Command group for reporting metrics. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_group():
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cmd_help(args):", "def help_opt(self):\n print(OPTIONS)", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\")...
[ "0.589855", "0.58264685", "0.58036107", "0.57175773", "0.56941164", "0.56822515", "0.56753105", "0.567416", "0.56666476", "0.5660978", "0.5652861", "0.56472456", "0.56321657", "0.56308264", "0.55924976", "0.55864775", "0.5582454", "0.5558841", "0.55458933", "0.55455136", "0.5...
0.0
-1
Report usage overview metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_cpc(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usag...
[ "0.6095599", "0.59995025", "0.5967684", "0.58918285", "0.5879187", "0.58760214", "0.58532023", "0.58369166", "0.58359265", "0.5832341", "0.58278644", "0.58267254", "0.582368", "0.57165456", "0.5707957", "0.56925315", "0.5681756", "0.5681756", "0.5672487", "0.5671114", "0.5671...
0.60818744
1
Report usage metrics for active partitions of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_partition(cmd_ctx, cpc, partition, **options): cmd_ctx.execute_cmd( lambda: cmd_metrics_partition(cmd_ctx, cpc, partition, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(la...
[ "0.59879315", "0.59793746", "0.5929683", "0.5513063", "0.5448064", "0.54282194", "0.54047555", "0.5269779", "0.5266438", "0.5178241", "0.5156107", "0.5139908", "0.5117616", "0.51164675", "0.5078646", "0.50614786", "0.5020368", "0.5004413", "0.5001407", "0.4972005", "0.4944734...
0.5978439
2
Report usage metrics for active LPARs of CPCs in classic mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_lpar(cmd_ctx, cpc, lpar, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_lpar(cmd_ctx, cpc, lpar, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of dat...
[ "0.56626374", "0.5658966", "0.5620566", "0.55107105", "0.5491458", "0.5334808", "0.53031045", "0.52814096", "0.5276708", "0.52741003", "0.52695084", "0.5258748", "0.5241154", "0.5233588", "0.51980907", "0.5171323", "0.5130707", "0.5097048", "0.5062551", "0.504902", "0.5048986...
0.54307204
5
Report usage metrics for active adapters of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_adapter(cmd_ctx, cpc, adapter, **options): cmd_ctx.execute_cmd( lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n c...
[ "0.6317614", "0.59251046", "0.58999896", "0.58430314", "0.57148993", "0.56526655", "0.547031", "0.53368515", "0.5278745", "0.52745694", "0.52628577", "0.51370335", "0.5132186", "0.51290196", "0.512005", "0.5110605", "0.51074606", "0.5080317", "0.5074927", "0.50636065", "0.504...
0.643978
0
Report usage metrics for all channels of CPCs in classic mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_channel(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: us...
[ "0.59195566", "0.5586245", "0.5444794", "0.54191935", "0.53922415", "0.5380868", "0.53543603", "0.52876854", "0.52462244", "0.51891714", "0.5112807", "0.51073605", "0.50244516", "0.49929634", "0.49870348", "0.4977524", "0.49535966", "0.4952735", "0.4930333", "0.4928074", "0.4...
0.6753259
0
Report environmental and power consumption metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_env(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd...
[ "0.629929", "0.6273303", "0.60736", "0.5926187", "0.5641638", "0.55025315", "0.5396064", "0.53120846", "0.52493966", "0.5243705", "0.523837", "0.5222159", "0.51953936", "0.519269", "0.5182496", "0.5178438", "0.51700777", "0.5129585", "0.51175195", "0.51148605", "0.51081395", ...
0.649805
0
Report processor usage metrics for CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_proc(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_c...
[ "0.65378803", "0.627548", "0.6020114", "0.5702169", "0.56018233", "0.55116755", "0.55000883", "0.5320139", "0.5315897", "0.5299815", "0.52958316", "0.5274619", "0.52392995", "0.52203125", "0.52038133", "0.5199431", "0.5187151", "0.5178949", "0.5170392", "0.5164013", "0.510556...
0.66221523
0