query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
The following function is used to format the numbers. In the beginning "th, st, nd, rd" are removed
def clean_numbers(self, x): # remove "th" after a number matches = re.findall(r'\b\d+\s*th\b', x) if len(matches) != 0: x = re.sub(r'\s*th\b', " ", x) # remove "rd" after a number matches = re.findall(r'\b\d+\s*rd\b', x) if len(matches) != 0: x = re.sub(r'\s*rd\b', " ", x) # remove "st" after a number matches = re.findall(r'\b\d+\s*st\b', x) if len(matches) != 0: x = re.sub(r'\s*st\b', " ", x) # remove "nd" after a number matches = re.findall(r'\b\d+\s*nd\b', x) if len(matches) != 0: x = re.sub(r'\s*nd\b', " ", x) # replace standalone numbers higher than 10 by # # this function does not touch numbers linked to words like "G-20" matches = re.findall(r'^\d+\s+|\s+\d+\s+|\s+\d+$', x) if len(matches) != 0: x = re.sub('^[0-9]{5,}\s+|\s+[0-9]{5,}\s+|\s+[0-9]{5,}$', ' ##### ', x) x = re.sub('^[0-9]{4}\s+|\s+[0-9]{4}\s+|\s+[0-9]{4}$', ' #### ', x) x = re.sub('^[0-9]{3}\s+|\s+[0-9]{3}\s+|\s+[0-9]{3}$', ' ### ', x) x = re.sub('^[0-9]{2}\s+|\s+[0-9]{2}\s+|\s+[0-9]{2}$', ' ## ', x) # we do include the range from 1 to 10 as all word-vectors include them # x = re.sub('[0-9]{1}', '#', x) return x
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _format_numbers(smth: any) -> any:\n if isinstance(smth, int):\n return float(smth)\n elif smth == 'N.V.':\n return 0.0 # meaning, wine is of type 'non-vintage' and is made of grapes from more than one harvest\n else:\n return smth", "def ordinal_filter(...
[ "0.67999727", "0.62027526", "0.61682063", "0.6136683", "0.6127746", "0.6096326", "0.60839456", "0.6068743", "0.6052151", "0.5988611", "0.5986997", "0.59576005", "0.59576005", "0.59576005", "0.5921066", "0.59175247", "0.59080505", "0.59045625", "0.59045625", "0.58787817", "0.5...
0.7203597
0
This function is used to replace "yr,yrs" by year and "hr,hrs" by hour.
def year_and_hour(self, text): # Find matches for "yr", "yrs", "hr", "hrs" matches_year = re.findall(r'\b\d+\s*yr\b', text) matches_years = re.findall(r'\b\d+\s*yrs\b', text) matches_hour = re.findall(r'\b\d+\s*hr\b', text) matches_hours = re.findall(r'\b\d+\s*hrs\b', text) # replace all matches accordingly if len(matches_year) != 0: text = re.sub(r'\b\d+\s*yr\b', "year", text) if len(matches_years) != 0: text = re.sub(r'\b\d+\s*yrs\b', "year", text) if len(matches_hour) != 0: text = re.sub(r'\b\d+\s*hr\b', "hour", text) if len(matches_hours) != 0: text = re.sub(r'\b\d+\s*hrs\b', "hour", text) return text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace_time(text, ori):\n r = ori\n if '**' in text:\n r = 'xxhour'\n else:\n try:\n # handle exceptions with custom rules\n f, s = text.split()\n s = 'am' if s[0] == 'a' else 'pm'\n l, r = f.split(':')\n if l == '' or l == '00':\n ...
[ "0.61535865", "0.54928744", "0.54547983", "0.5405917", "0.5310719", "0.5267757", "0.52620614", "0.5259947", "0.51755303", "0.5144724", "0.51272285", "0.5085674", "0.50707275", "0.50623596", "0.5054484", "0.505168", "0.5038544", "0.50103486", "0.500148", "0.498647", "0.4965652...
0.7474295
0
Loads a vocabulary file into a dictionary.
def load_vocab(vocab_file): vocab = collections.OrderedDict() index = 0 with open(vocab_file, "r", encoding="utf-8") as reader: while True: token = reader.readline() if not token: break token = token.strip() vocab[token] = index index += 1 return vocab
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_vocabulary():\n global vocabulary_list, vocabulary_dict\n vocabulary_list = []\n vocabulary_dict = {}\n\n with open(_VOCABULARY_PATH, 'r') as f:\n for index, line in enumerate(f):\n line = line.strip()\n vocabulary_dict[line] = index\n vocabulary_list.ap...
[ "0.840865", "0.8027993", "0.80113626", "0.79590535", "0.77948564", "0.77901465", "0.77684295", "0.7714422", "0.76952124", "0.758633", "0.7529703", "0.7526938", "0.7503534", "0.74811476", "0.7436023", "0.7410485", "0.7398859", "0.73721033", "0.73700345", "0.73358095", "0.73308...
0.7847063
5
Performs an HTTP request set in 'method'. Returns requests object The method will try to catch some of the typical errors and gather error messages from Newrelic API Each known error has a corresponding exception. All exceptions are inherited from generic NewRelicException If HTTP return code is not known a generic NewRelicException is raised.
def _request(self, method, *args, **kwargs): try: r = getattr(requests, method)(*args, **kwargs) except AttributeError: raise NewRelicException( 'Method {} is unsupported by requests module' .format(method) ) except requests.exceptions.Timeout: raise Timeout('Request timed out after {} seconds' .format(self.timeout)) if r.status_code < 200 or r.status_code > 299: # Try to work out all known errors into separate exceptions if r.status_code == 401: try: error_message = r.json()['error']['title'] except (KeyError, ValueError): raise UnathorizedError( 'User is not authorized to perform requested operation' ) else: raise UnathorizedError(error_message) if r.status_code == 402: raise ChecksLimitExceeded( "Creating the monitor will increase your scheduled checks " "past your account's purchased check limit." ) elif r.status_code == 404: try: error_message = r.json()['error']['title'] except (KeyError, ValueError): raise ItemNotFoundError( 'Requested item not found. ' 'No error message was provided by server.' ) else: raise ItemNotFoundError(error_message) else: # If we don't know what to do with specific error code # ( most likely it's 400 ) # We at least try to get error message from the response try: response_errors = r.json()['errors'] raise NewRelicException( "The following errors were returned by server:\n{}" .format('\n' .join( [x['error'] for x in response_errors] )) ) # Sometimes API does not return any useful information. # In this case that's just an HTML page # reporting 400 instead of JSON. # We will just return an error code in this case. except ValueError: raise NewRelicException( 'Got unexpected response code {}. ' 'No additional information provided by server.' .format(r.status_code) ) return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_request(method, url, data=None, headers=None):\n try:\n if method == 'GET':\n resp = requests.get(url, headers=headers)\n return resp\n elif method == 'POST':\n resp = requests.post(url, json=data, headers=headers)\n return resp\n elif meth...
[ "0.62948996", "0.62826276", "0.62717605", "0.6218818", "0.61214685", "0.6097778", "0.6028783", "0.59454954", "0.5928931", "0.5927472", "0.5919466", "0.5915387", "0.584488", "0.58434975", "0.58159757", "0.5748989", "0.57105803", "0.57093656", "0.5705746", "0.5681902", "0.56764...
0.7337592
0
Wrapper for requests GET method
def _get(self, *args, **kwargs): return self._request('get', *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, *args, **kwargs):\n self.request(\"get\", *args, **kwargs)", "def http_method_get():\n return 'GET'", "def get(self, *path, **data):\n\t\treturn self.request('GET', *path, **data)", "def _get(self, url):\n return self._request(url)", "def get(self, *args, **kwargs):\n ...
[ "0.7811461", "0.7689053", "0.7667969", "0.75441927", "0.753764", "0.75005716", "0.7481838", "0.746681", "0.7408591", "0.7339474", "0.7280342", "0.7256964", "0.7247393", "0.72263336", "0.7189114", "0.718059", "0.7164386", "0.7148469", "0.71383345", "0.71329045", "0.7128152", ...
0.79950804
0
Wrapper for requests POST method
def _post(self, *args, **kwargs): return self._request('post', *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self, *args, **kwargs):\n return self._requests_call(util.requests_post, *args, **kwargs)", "def http_method_post():\n return 'POST'", "def http_post(self, **kwargs):\n return self.rabjcallable.post(**kwargs)", "def post(self, *args, **kwargs):\n self.request(\"post\", *args,...
[ "0.7969932", "0.746994", "0.73935425", "0.73165405", "0.72691715", "0.7221908", "0.7146746", "0.713131", "0.71067417", "0.707506", "0.7030268", "0.7027897", "0.7024082", "0.7005868", "0.69719", "0.6945976", "0.69432557", "0.6938295", "0.6930771", "0.68950063", "0.6866864", ...
0.7983252
0
Wrapper for requests PUT method
def _put(self, *args, **kwargs): return self._request('put', *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def http_method_put():\n return 'PUT'", "def put(self, *args, **kwargs):\n self.request(\"put\", *args, **kwargs)", "def put(self, request, pk=None):\n\n return Response({'method': 'put'})", "def do_PUT(self,):\n self.http_method = 'PUT'\n # Nothing to do for now.\n pass...
[ "0.7939011", "0.792131", "0.78856367", "0.7825104", "0.7805697", "0.77722734", "0.76859236", "0.76859236", "0.76859236", "0.7676738", "0.7652082", "0.7603654", "0.7537342", "0.7523113", "0.74227804", "0.7417111", "0.74126923", "0.7345135", "0.7330537", "0.73288274", "0.729239...
0.82278174
0
Wrapper for requests DELETE method
def _delete(self, *args, **kwargs): return self._request('delete', *args, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def http_delete(self, **kwargs):\n return self.rabjcallable.delete(**kwargs)", "def _delete(self, url):\n return self._request(url, method=\"DELETE\")", "def _delete(self, url, **kwargs):\n return self._call('DELETE', url, kwargs)", "def delete(self, method, uri, query_param, request_par...
[ "0.80676454", "0.805195", "0.7881113", "0.78649473", "0.7842438", "0.78090477", "0.78055274", "0.7791732", "0.77519745", "0.76926935", "0.7613614", "0.7541842", "0.7470509", "0.7459804", "0.74210435", "0.7401509", "0.73857987", "0.73857987", "0.7379348", "0.7372078", "0.73668...
0.8166634
0
Load JSON as a protobuf (pb2) object. Any calls to load protobuf objects from JSON in this repository should be through this function. Returns `None` if the loading failed.
def open_pbobject(path, pb_class): assert path.endswith(".json"), 'File extension for {} needs to be json.'.format(path) if path.startswith('s3://'): return open_remote_pb_object(path, pb_class) assert os.path.exists(path), f'Path not found: {path}' with open(path, 'r', encoding='UTF-8') as json_file: pb_object = Parse(json_file.read(), pb_class()) return pb_object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load(self):\n with io.open(self.filename, encoding='utf-8') as f:\n self.load_from_dict(json.loads(f.read()))", "def load_json(json_str):\n return _api_internal._load_json(json_str)", "def _localloadjson(path: str) -> JSONType:\n with open(path, encoding=\"utf-8\") as fh:\n r...
[ "0.57719094", "0.57548875", "0.5741168", "0.568899", "0.5675841", "0.55961937", "0.5520938", "0.55098826", "0.55086243", "0.5459598", "0.54020417", "0.5388325", "0.5387577", "0.5386068", "0.5349237", "0.533662", "0.53339887", "0.53334725", "0.5324513", "0.5323394", "0.5311550...
0.5887734
0
Like open_pboject but source can be a path or a bytestring
def parse_pbobject(source, pb_class): if isinstance(source, str): return open_pbobject(source, pb_class) elif isinstance(source, bytes): pb_object = pb_class() pb_object.ParseFromString(source) return pb_object else: logging.error(f'cannot parse type {type(source)}')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def source(self) -> str | Path:\n ...", "def open_pds(source):\n\t# if isinstance(source, file):\n\t# \treturn source\n\tif hasattr(source, \"read\"):\n\t\t# sys.stderr.write(\"Identified a file-like object by read() method existence\\n\")\n\t\treturn source\n\n\ttry:\n\t\t# For universal newlines -- i.e....
[ "0.6619778", "0.6433512", "0.62496525", "0.61425763", "0.59727526", "0.58300316", "0.57773453", "0.57492805", "0.5733119", "0.5724798", "0.5724798", "0.5724405", "0.57121646", "0.5667463", "0.5616308", "0.55945593", "0.55895805", "0.55895805", "0.55895805", "0.55640024", "0.5...
0.6567848
1
Load JSON as a protobuf (pb2) object from S3 remote
def open_remote_pb_object(s3_object_uri, pb_class): if s3_object_uri.startswith('s3://'): bucket_name, s3_base_path = convert_uri_to_bucket_path(s3_object_uri) else: raise ValueError("Expected path to S3 bucket but got {}".format(s3_object_uri)) pb_object = Parse(get_string_from_s3_file(bucket_name, s3_base_path), pb_class()) return pb_object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __retrieve_from_bucket(fname):\n blob = BUCKET.blob(fname)\n json_data = json.loads(blob.download_as_string())\n return json_data", "def load(self, bucket, key):\n\n bucket = self._build_bucket_resource(bucket)\n\n with io.BytesIO() as stream:\n bucket.download_fileobj(key, ...
[ "0.6704851", "0.663122", "0.66172427", "0.6463626", "0.63138694", "0.624817", "0.61768293", "0.6076889", "0.603407", "0.60289794", "0.59423906", "0.5903331", "0.5902657", "0.5846186", "0.583783", "0.58322567", "0.571561", "0.56856984", "0.5669956", "0.5647885", "0.56409043", ...
0.62069356
6
Save protobuf (pb2) object to JSON file with our standard indent, key ordering, and other settings. Any calls to save protobuf objects to JSON in this repository should be through this function.
def save_pbobject_as_json(pb_object, save_path): if os.path.isdir(save_path): save_path = os.path.join(save_path, generate_uid_from_pbobject(pb_object) + ".json") assert save_path.endswith(".json"), 'File extension for {} needs to be json.'.format(save_path) with open(save_path, "w", encoding='UTF-8') as _f: json.dump( MessageToDict(pb_object, including_default_value_fields=True, preserving_proto_field_name=True), _f, indent=2, sort_keys=True ) return save_path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _save(self):\n with open(self.file_path, 'w') as fid:\n json.dump(self.data, fid, indent=4, sort_keys=True)", "def save(self):\n with open(self.file_path, 'w', encoding=Config.ENCODING) as file:\n json.dump(self.data, file, indent=2, ensure_ascii=False)", "def save(self)...
[ "0.64825857", "0.63305324", "0.62367463", "0.61235774", "0.61045235", "0.6103396", "0.60486585", "0.59887636", "0.5981807", "0.5977813", "0.59710145", "0.5964725", "0.5957643", "0.5951203", "0.5950735", "0.59460485", "0.59422106", "0.5939334", "0.592283", "0.59114516", "0.591...
0.72667795
0
Open ontology objects, first attempt to open V2 before trying V1.
def open_ontology_pbobject(ontology_file): try: ontology = parse_pbobject(ontology_file, OntologyV2Pb2) if ontology is not None: logging.info('Successfully loaded Ontology V2 spec.') return ontology except Exception: logging.error('Failed to load ontology file with V2 spec, trying V1 spec.') try: ontology = parse_pbobject(ontology_file, OntologyV1Pb2) if ontology is not None: logging.info('Successfully loaded Ontology V1 spec.') return ontology except Exception: if isinstance(ontology_file, str): logging.error('Failed to load ontology file' + ontology_file + 'with V1 spec also, returning None.') else: logging.error('Failed to load ontology file with V1 spec also, returning None.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open(self):\n\n self._key_generator = KeyGenerator()\n\n # A map from LOD to LODHistory instance for all LODs that have\n # been referenced so far:\n self._lod_histories = {}\n\n # This corresponds to the 'nodes' table in a Subversion fs. (We\n # don't need a 'representations' or 'strings' t...
[ "0.56189865", "0.5365172", "0.52742535", "0.5039581", "0.50218326", "0.50218326", "0.50218326", "0.49638537", "0.48817602", "0.48285356", "0.47886187", "0.47444418", "0.47354096", "0.47354096", "0.47251382", "0.47234756", "0.4720916", "0.4704863", "0.46936986", "0.4692322", "...
0.6786061
0
Open feature ontology objects.
def open_feature_ontology_pbobject(ontology_file): try: ontology = open_pbobject(ontology_file, FeatureOntologyPb2) if ontology is not None: logging.info('Successfully loaded FeatureOntology spec.') return ontology except Exception: logging.error('Failed to load ontology file' + ontology_file + '.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_ontology_db_xrefs(self, feature):\n ontology = collections.defaultdict(dict) # type: dict\n db_xrefs = []\n # these are keys are formatted strangely and require special parsing\n for key in (\"go_process\", \"go_function\", \"go_component\"):\n ontology_event_index ...
[ "0.5661019", "0.55700505", "0.5532482", "0.5513073", "0.5394623", "0.53802025", "0.53731954", "0.53539014", "0.5332702", "0.53029007", "0.53000814", "0.52954465", "0.52883095", "0.5224929", "0.522275", "0.5220456", "0.5214841", "0.5178848", "0.5178848", "0.5178848", "0.517366...
0.7093566
0
Given a pb object, return the deterministic SHA1 hash hexdigest. Used for creating unique IDs.
def generate_uid_from_pbobject(pb_object): json_string = json.dumps( MessageToDict(pb_object, including_default_value_fields=True, preserving_proto_field_name=True), indent=2, sort_keys=True ) out = StringIO() out.write(json_string) uid = hashlib.sha1(out.getvalue().encode('utf-8')).hexdigest() out.close() return uid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def object_sha1(obj):\n\n return hashlib.sha1(json.dumps(obj).encode()).hexdigest()", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def hexdigest(self):\n return self.hashObject.hexdigest()", "def sha1(self) -> str:\n return self.data.sha1", "def _sha1_hash_json(self, value):\n hash = hashl...
[ "0.6950721", "0.68938136", "0.6875082", "0.68536943", "0.68525857", "0.68343544", "0.6825216", "0.676524", "0.6735063", "0.67005074", "0.664288", "0.6572037", "0.65659446", "0.6546874", "0.653492", "0.6523896", "0.6502122", "0.64945704", "0.64752346", "0.640935", "0.6403112",...
0.6621342
11
From a list of 'scene.json' and/or 'scene_.json' paths in s3, return a Scene object for the one with the latest timestamp.
def get_latest_scene(s3_scene_jsons): # Fetch all 'scene*.json' files and load Scenes scenes = [open_remote_pb_object(scene_json, Scene) for scene_json in s3_scene_jsons] # Find Scene with latest creation timestamp creation_ts = [_s.creation_date.ToMicroseconds() for _s in scenes] index = creation_ts.index(max(creation_ts)) return scenes[index], s3_scene_jsons[index]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_latest_year_month_day_prefix(s3_path):\n latest = date.min\n keys = get_contents_of_directory(s3_path)\n\n for key in keys:\n search = re.search(r'.*year=(\\d{4}).*month=(\\d{2}).*day=(\\d{2})', key)\n if search:\n year, month, day = search.groups()\n bucket_dat...
[ "0.61708826", "0.5877487", "0.55048525", "0.5477997", "0.54031044", "0.5294818", "0.5293787", "0.52524304", "0.5156226", "0.5150664", "0.5150446", "0.512073", "0.49706817", "0.49576333", "0.4952994", "0.49373975", "0.4913797", "0.4860639", "0.48384994", "0.4826581", "0.482025...
0.7924387
0
` Read a file where each line is of the form "word1 word2 ..." Yields lists of the form [word1, word2, ...]
def read(fname): cmu_dict = split_cmu_dict.load_dict(TRAIN_FILEPATH_SRC) for word, phonemes in cmu_dict.iteritems(): yield word, phonemes.split()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_txt(filename):\n file_object = open(filename, 'r')\n file_as_string = file_object.read()\n return create_word_list(file_as_string)", "def _get_wordlist(file_name):\n ifile = codecs.open(file_name, 'r', encoding='utf-8')\n for _ in range(int(ifile.__next__())):\n yield (ifile.__next...
[ "0.7736308", "0.76894", "0.7536886", "0.74992454", "0.7413196", "0.7329414", "0.73256314", "0.7306998", "0.72972393", "0.7285923", "0.7252589", "0.72504383", "0.72416985", "0.72221196", "0.71844417", "0.7173946", "0.7148515", "0.7139532", "0.71371263", "0.7133431", "0.7123027...
0.0
-1
The input parameter "config" (dictionary) contains the sampled configurations passed by the bohb optimizer
def compute(self, config, budget, working_directory, *args, **kwargs): # Useful website -- https://aws.amazon.com/blogs/machine-learning/scalable-multi-node-deep-learning-training-using-gpus-in-the-aws-cloud/ ''' The below is commented out because I don't want to mess with the CNN's architecture. If you want to use hyperparameter optimization to alter the architecture of the fully connected layers as well, you can use the below. ''' #new_layer_elements = np.array([config['num_els_new_1'] if config['num_new_fc_layers'] >= 1 else None, # config['num_els_new_2'] if config['num_new_fc_layers'] >= 2 else None, # config['num_els_new_3'] if config['num_new_fc_layers'] >= 3 else None]) #new_layer_elements = list(new_layer_elements[new_layer_elements != None]) #old_fclayers_tofreeze = np.array([0 if config['freeze0_cat'] == 1 else None, # 1 if config['freeze1_cat'] == 1 else None]) #old_fclayers_tofreeze = list(old_fclayers_tofreeze[old_fclayers_tofreeze != None]) # Generate the model model = ISICNetAlex(num_new_fc_layers=0, new_layer_elements=[], dropout_rate=config['dropout_rate'], old_fclayers_tofreeze=[], ) # Use GPU processing if available. if torch.cuda.is_available(): model.cuda() # Build criterion and optimizer. criterion = torch.nn.CrossEntropyLoss() ''' The below is commented out because I don't want to mess with the optimizer. ''' #if config['optimizer'] == 'Adam': # optimizer = torch.optim.Adam(model.parameters(), lr=config['lr']) #else: # optimizer = torch.optim.SGD(model.parameters(), lr=config['lr'], momentum=config['sgd_momentum']) optimizer = torch.optim.SGD(model.parameters(), lr=config['lr'], momentum=config['sgd_momentum']) # Run training loop. # IMPORTANT -- note that the budget parameter used in setting up HpBandSter refers to the number of epochs. It can be made to refer to other parameters, but here we chose to have it refer to epochs. for epoch in range(int(budget)): start = time.time() # initialize variables to monitor training and validation loss train_loss = 0.0 ################### # train the model # ################### model.train() for batch_idx, (data, target) in enumerate(self.train_loader): # move to GPU if available if torch.cuda.is_available(): data, target = data.cuda(), target.cuda() optimizer.zero_grad() output = model(data) loss = criterion(output, target) loss.backward() optimizer.step() train_loss += 1/(batch_idx+1)*(loss.data-train_loss) print("Epoch {} training time took {} seconds".format(epoch,time.time()-start)) train_accuracy = self.evaluate_accuracy(model, self.train_loader) validation_accuracy = self.evaluate_accuracy(model, self.validation_loader) test_accuracy = self.evaluate_accuracy(model, self.test_loader) return ({ 'loss': 1-validation_accuracy, # remember: HpBandSter always minimizes! 'info': { 'test accuracy': test_accuracy, 'train accuracy': train_accuracy, 'validation accuracy': validation_accuracy, 'number of parameters': number_of_parameters(model), } })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_params(config):\n params = copy.deepcopy(config.view.params)\n params.t2bins = np.arange(0, params.t2bin_max + 1e-4, params.t2bin_stepsize)\n params.out = make_Bunch(\"State and output of detection processing\") # outputs are not parameters, maybe separate \n return params", "def set_params(...
[ "0.69010735", "0.66355693", "0.6371017", "0.6329762", "0.621126", "0.62031883", "0.616896", "0.60010886", "0.5989587", "0.59527737", "0.5929536", "0.5908079", "0.5880665", "0.58574", "0.58471894", "0.58373", "0.5830637", "0.5825823", "0.58240813", "0.57930523", "0.575335", ...
0.0
-1
It builds the configuration space with the needed hyperparameters. It is easily possible to implement different types of hyperparameters. Beside floathyperparameters on a log scale, it is also able to handle categorical input parameter.
def get_configspace(): cs = CS.ConfigurationSpace() # Learning rate hyperparameter lr = CSH.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value='1e-2', log=True) # Stochastic gradient descent momentum as parameter. sgd_momentum = CSH.UniformFloatHyperparameter('sgd_momentum', lower=0.0, upper=0.99, default_value=0.9, log=False) cs.add_hyperparameters([lr, sgd_momentum]) # Optimizer hyperparameters. #optimizer = CSH.CategoricalHyperparameter('optimizer', ['Adam', 'SGD']) #cs.add_hyperparameters([optimizer]) # Only add the sgd_momentum hyperparameter if the optimizer is stochastic gradient descent. Otherwise, it doesn't make sense. #cond = CS.EqualsCondition(sgd_momentum, optimizer, 'SGD') #cs.add_condition(cond) ''' The below is commented out because we're not fiddling with architecture in this optimization.''' #num_new_fc_layers = CSH.UniformIntegerHyperparameter('num_new_fc_layers', lower=0, upper=3, default_value=0, log=False) #num_els_new_1 = CSH.UniformIntegerHyperparameter('num_els_new_1', lower=128, upper=4096, default_value = 1000, log=True) #num_els_new_2 = CSH.UniformIntegerHyperparameter('num_els_new_2', lower=128, upper=4096, default_value = 1000, log=True) #num_els_new_3 = CSH.UniformIntegerHyperparameter('num_els_new_3', lower=128, upper=4096, default_value = 1000, log=True) #freeze0_old = CSH.UniformIntegerHyperparameter('freeze0_cat', lower = 0, upper = 1, default_value = 1, log=False) #freeze1_old = CSH.UniformIntegerHyperparameter('freeze1_cat', lower=0, upper=1, default_value=1, log=False) #cs.add_hyperparameters([num_new_fc_layers, num_els_new_1, num_els_new_2, num_els_new_3, freeze0_old, freeze1_old, batchsize]) dropout_rate = CSH.UniformFloatHyperparameter('dropout_rate', lower=0.0, upper=0.9, default_value=0.5, log=False) cs.add_hyperparameters([dropout_rate]) return cs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_configspace():\r\n cs = CS.ConfigurationSpace()\r\n\r\n lr = CSH.UniformFloatHyperparameter('lr', lower=1e-6, upper=1e-1, default_value='1e-2', log=True)\r\n\r\n # For demonstration purposes, we add different optimizers as categorical hyperparameters.\r\n # To sh...
[ "0.7177107", "0.68647355", "0.66281545", "0.6522021", "0.6518543", "0.651809", "0.651809", "0.6490908", "0.64341015", "0.6414481", "0.6403292", "0.63531125", "0.6241054", "0.62265354", "0.62035", "0.6195846", "0.61952543", "0.61585575", "0.6135327", "0.6123834", "0.61211634",...
0.713302
1
Private method controlling the primary algorithm behind the implementation of conditional abundance matching. This method will be renamed according to ``self.galprop_key`` in the instance of `ConditionalAbunMatch`. For example, if the property being modeled is ``gr_color``, then the `_mc_galprop` function would instead be named ``mc_gr_color``, a bound method to the `ConditionalAbunMatch` class instance.
def _mc_galprop(self, seed=None, **kwargs): model_helpers.update_param_dict(self, **kwargs) self._set_correlation_strength() if ('galaxy_table' in kwargs.keys()) & ('halos' in kwargs.keys()): msg = ("The mc_"+self.galprop_key+" method accepts either " + "a halos keyword argument, or a galaxy_table keyword argument" + " but never both.") raise KeyError(msg) elif 'galaxy_table' in kwargs.keys(): galaxy_table = kwargs['galaxy_table'] operative_sec_haloprop_key = ( model_defaults.host_haloprop_prefix + self.sec_haloprop_key) elif 'halos' in kwargs.keys(): galaxy_table = kwargs['halos'] operative_sec_haloprop_key = self.sec_haloprop_key else: msg = ("The mc_"+self.galprop_key+" requires either " + "a halos keyword argument, or a galaxy_table keyword argument") raise KeyError(msg) self.add_new_haloprops(galaxy_table) # All at once, draw all the randoms we will need np.random.seed(seed=seed) all_randoms = np.random.random(len(galaxy_table)*2) galprop_cumprob = all_randoms[0:len(galaxy_table)] galprop_scatter = all_randoms[len(galaxy_table):] # Initialize the output array output_galprop = np.zeros(len(galaxy_table)) # Determine binning and loop range if 'galaxy_table_slice_array' not in kwargs.keys(): binned_prim_galprop = np.digitize( galaxy_table[self.prim_galprop_key], self.prim_galprop_bins) prim_galprop_loop_range = set(binned_prim_galprop) else: prim_galprop_loop_range = range(len(self.one_point_lookup_table)) for i in prim_galprop_loop_range: # Determine the slice corresponding to the i^th prim_galprop bin if 'galaxy_table_slice_array' not in kwargs.keys(): idx_bini = np.where(binned_prim_galprop==i)[0] num_bini = len(idx_bini) else: idx_bini = kwargs['galaxy_table_slice_array'][i] num_bini = len(galaxy_table[idx_bini]) if len(idx_bini) > 0: # Fetch the appropriate number of randoms # for the i^th prim_galprop bin galprop_cumprob_bini = galprop_cumprob[idx_bini] galprop_scatter_bini = galprop_scatter[idx_bini] # Fetch the halos in the i^th prim_galprop bin, # and determine how they are sorted haloprop_bini = galaxy_table[idx_bini][operative_sec_haloprop_key] idx_sorted_haloprop_bini = np.argsort(haloprop_bini) galprop_bini = self._condition_matched_galprop( haloprop_bini[idx_sorted_haloprop_bini], galprop_cumprob_bini, i, galprop_scatter_bini, self.tol) # Assign the final values to the # appropriately sorted subarray of output_galprop output_galprop[idx_bini[idx_sorted_haloprop_bini]] = galprop_bini return output_galprop
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_comparison_func(self, adjective):\n return self.SONG_ADJECTIVES.get(adjective, {}).get(\"comparison\")", "def _set_primary_behaviors(self):\n\n for component_model in self.model_dictionary.values():\n gal_type = component_model.gal_type\n feature_name = component_mode...
[ "0.52170765", "0.51648027", "0.49750897", "0.48653188", "0.48162937", "0.48143739", "0.47746363", "0.4698766", "0.46927482", "0.46442884", "0.46392474", "0.46255955", "0.46191144", "0.45699182", "0.45459855", "0.4538789", "0.45209238", "0.45171022", "0.45171022", "0.44952714", ...
0.49315447
3
Method computes lookup tables of the cumulative ``galprop`` PDF defined by ``input_galaxy_table``.
def build_one_point_lookup_table(self, **kwargs): galaxy_table = kwargs['input_galaxy_table'] prim_galprop_bins = kwargs['prim_galprop_bins'] self.one_point_lookup_table = np.zeros( len(prim_galprop_bins)+1, dtype=object) binned_prim_galprop = np.digitize( galaxy_table[self.prim_galprop_key], self.prim_galprop_bins) for i in range(len(self.one_point_lookup_table)): idx_bini = np.where(binned_prim_galprop == i)[0] if model_helpers.custom_len(idx_bini) > self.minimum_sampling: gals_bini = galaxy_table[idx_bini] abcissa = np.arange(len(gals_bini))/float(len(gals_bini)-1) ordinates = np.sort(gals_bini[self.galprop_key]) self.one_point_lookup_table[i] = ( model_helpers.custom_spline(abcissa, ordinates, k=2) ) # For all empty lookup tables, fill them with the nearest lookup table unfilled_lookup_table_idx = np.where( self.one_point_lookup_table == 0)[0] filled_lookup_table_idx = np.where( self.one_point_lookup_table != 0)[0] if len(unfilled_lookup_table_idx) > 0: msg = ("When building the one-point lookup table from input_galaxy_table, " + "there were some bins of prim_galprop_bins that contained fewer than " + str(self.minimum_sampling)+ " galaxies. In such cases, the lookup table " + "of the nearest sufficiently populated bin will be chosen.") warn(msg) for idx in unfilled_lookup_table_idx: closest_filled_idx_idx = array_utils.find_idx_nearest_val( filled_lookup_table_idx, idx) closest_filled_idx = filled_lookup_table_idx[closest_filled_idx_idx] self.one_point_lookup_table[idx] = ( self.one_point_lookup_table[closest_filled_idx])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mc_galprop(self, seed=None, **kwargs):\n model_helpers.update_param_dict(self, **kwargs)\n self._set_correlation_strength()\n\n if ('galaxy_table' in kwargs.keys()) & ('halos' in kwargs.keys()):\n msg = (\"The mc_\"+self.galprop_key+\" method accepts either \" + \n ...
[ "0.5527577", "0.48766977", "0.48666936", "0.48461375", "0.48401406", "0.48253617", "0.48167393", "0.47914568", "0.4777298", "0.46711516", "0.46498317", "0.46371827", "0.46279138", "0.46278507", "0.4626222", "0.46053305", "0.45921257", "0.45880622", "0.4578638", "0.45648557", ...
0.63473904
0
Method creates ``self.param_dict`` regulating the strength of the correlation between sec_haloprop and galprop at each value of prim_galprop.
def _build_param_dict(self, **kwargs): if 'correlation_strength' in kwargs.keys(): correlation_strength = kwargs['correlation_strength'] if custom_len(correlation_strength) > 1: try: self.correlation_strength_abcissa = kwargs['correlation_strength_abcissa'] except KeyError: msg = ("If correlation_strength keyword is passed to the constructor, \n" + "you must also pass a correlation_strength_abcissa keyword argument " + "storing an array of the same length as correlation_strength.") raise(msg) else: self.correlation_strength_abcissa = [0] correlation_strength = [correlation_strength] self._param_dict_keys = ['correlation_param' + str(i+1) for i in range(len(correlation_strength))] self.param_dict = {key:value for key, value in zip(self._param_dict_keys, correlation_strength)} else: self.param_dict = {'correlation_param1': 1.0} self._set_correlation_strength()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gen_params(self) -> Dict:\n param_dict: Dict = {}\n\n gX_name: List[str] = ['g_leak', 'g_nav', 'g_kvhh', 'g_kva', 'g_kvsi', \n 'g_cav', 'g_kca', 'g_nap', 'g_kir']\n gX_log: np.ndarray = 4 * np.random.rand(9) - 2 # from -2 to 2\n gX: np.ndarray = (10 * np.one...
[ "0.6686416", "0.6469411", "0.64443755", "0.6407086", "0.5964358", "0.5906427", "0.5815626", "0.57940143", "0.5791521", "0.5775666", "0.5771342", "0.5738936", "0.57148474", "0.57035136", "0.56996524", "0.5679744", "0.56791604", "0.56763065", "0.56688666", "0.56636876", "0.5646...
0.70243114
0
Method uses the current values in the param_dict to update the strength of the correlation between sec_haloprop and galprop at each value of prim_galprop.
def _set_correlation_strength(self): if hasattr(self, 'correlation_strength_abcissa'): abcissa = self.correlation_strength_abcissa ordinates = [self.param_dict['correlation_param'+str(i+1)] for i in range(len(abcissa))] correlation_strength_spline = model_helpers.custom_spline(abcissa, ordinates, k=custom_len(abcissa)-1) self.correlation_strength = correlation_strength_spline(self.prim_galprop_bins) else: self.correlation_strength = np.repeat(self.param_dict['correlation_param1'], len(self.prim_galprop_bins)) self.correlation_strength[self.correlation_strength > 1] = 1 self.correlation_strength[self.correlation_strength <- 1] = -1 self.correlation_strength = np.append( self.correlation_strength, self.correlation_strength[-1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mc_galprop(self, seed=None, **kwargs):\n model_helpers.update_param_dict(self, **kwargs)\n self._set_correlation_strength()\n\n if ('galaxy_table' in kwargs.keys()) & ('halos' in kwargs.keys()):\n msg = (\"The mc_\"+self.galprop_key+\" method accepts either \" + \n ...
[ "0.6255577", "0.62177485", "0.6214475", "0.5984097", "0.5967375", "0.59337133", "0.59327203", "0.59210086", "0.58783424", "0.5867557", "0.57922786", "0.57610255", "0.57574403", "0.57097393", "0.5705617", "0.5699275", "0.56962854", "0.5681587", "0.5657348", "0.56380326", "0.56...
0.63643956
0
Method calls ``new_haloprop_func_dict`` to create new halo properties as columns to the mock catalog, if applicable.
def add_new_haloprops(self, galaxy_table): if hasattr(self, 'new_haloprop_func_dict'): d = self.new_haloprop_func_dict for key, func in d.iteritems(): if key not in galaxy_table.keys(): galaxy_table[key] = func(galaxy_table=galaxy_table)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_make_hmp(self):\n table_factory = DataTableFactory(PACKET_DIR)\n table_factory.hmp()", "def _parse_constructor_kwargs(self, **kwargs):\n\n try:\n halo_id = np.array(kwargs['halo_id'])\n assert type(halo_id) is np.ndarray\n Nhalos = custom_len(halo_id...
[ "0.53807044", "0.52395046", "0.50270265", "0.49795195", "0.49258766", "0.49224955", "0.4920414", "0.49198395", "0.49087453", "0.49079537", "0.48617426", "0.48479292", "0.4806662", "0.48012027", "0.47516495", "0.47495013", "0.47483745", "0.46836653", "0.46500537", "0.46343023", ...
0.639631
0
Get sub directories within a path
def get_directories(self, path): if self.name == 'dropbox': dbx = dropbox.get_dropbox() return dropbox.get_folders(dbx, path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getImmediateSubdirectories(dir):", "def get_subdirs(dir_path: str) -> list:\n res = list(x.name for x in os.scandir(dir_path) if x.is_dir())\n return res", "def collect_subdirs(path_to_walk):\n root, subdirs, _ = next(os.walk(path_to_walk))\n\n return [os.path.join(root, d) for d in subdirs]", ...
[ "0.7549383", "0.74412143", "0.74089825", "0.7138318", "0.7118109", "0.70590365", "0.69344604", "0.6872523", "0.68333566", "0.6820497", "0.6777506", "0.67719275", "0.67686915", "0.67627996", "0.67528176", "0.67451686", "0.6711929", "0.6701213", "0.6682351", "0.6675598", "0.666...
0.6562612
24
Check a given directory exists on the cloud storage.
def check_exists(self, directory): if self.name == 'dropbox': directory = dropbox.normalise_path(directory) dbx = dropbox.get_dropbox() return dropbox.is_folder(dbx, directory)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def directory_exists(self, directory: str = None) -> bool:\n return os.access(directory if directory else self.get_directory(), os.R_OK)", "def check_dir_exist():\n if os.path.isdir(path_structure):\n return True\n else:\n return False", "def path_exists(path):\n if path.startswith...
[ "0.73309547", "0.72222155", "0.7112823", "0.70747614", "0.70586145", "0.6962344", "0.68925315", "0.682691", "0.6792617", "0.67795223", "0.66985345", "0.6674074", "0.66731733", "0.66687524", "0.66685283", "0.66561913", "0.6646632", "0.6562051", "0.6539875", "0.65383136", "0.65...
0.7083334
3
Currently not implemented First print returns date of modifications to the video file Second print prints date of Creation of the video file, literally time when it was written to folder
def creation_date_video(path_to_file): print("Last modified: %s" % time.ctime(os.path.getmtime(path_to_file))) print("Created: %s" % time.ctime(os.path.getctime(path_to_file))) # return os.path.getctime(path_to_file)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_timestamps(dir_video):\n print(\"Adding creation dates to file names\")\n os.chdir(dir_video)\n # get only top level dir info\n dir_data_video_files = next(os.walk(dir_video))\n list_video_files = dir_data_video_files[2] # get file list\n for f_name in list_video_files:\n if GOPRO...
[ "0.6664668", "0.6570803", "0.6459046", "0.6410988", "0.63897413", "0.6233249", "0.6155345", "0.6155294", "0.6054676", "0.6041707", "0.6028616", "0.59397346", "0.5918884", "0.5896831", "0.58840746", "0.5882793", "0.58803463", "0.587276", "0.5860853", "0.5849358", "0.58296496",...
0.8283047
0
We give location of folder as input
def main_one(string_path_to_folder, destination_folder): # .jpg and .JPG are the same # photos = glob.glob("C:/Personal/pp2_photo/dataBase/*.JPG") # Examples of location format # pho = glob.glob("C:/Personal/pp2_photo/dataBase/*.jpg") photos = glob.glob(string_path_to_folder+"/*.JPG") print("Number of files: ", len(photos)) for k in photos: print(get_photo_date(k)) process_all(k, destination_folder)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_folder():\n return input(\"Folder: \")", "def identify_folder(self, folder):", "def Directory(self) -> str:", "def subdir(self):", "def __init__(self, folder: str):\n self.folder = folder", "def folder(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"folder\")", "def fol...
[ "0.70578855", "0.70199007", "0.6585436", "0.65788156", "0.653169", "0.6479249", "0.635464", "0.6243236", "0.61891365", "0.61666906", "0.6152387", "0.6135909", "0.61036575", "0.60858923", "0.60739803", "0.6059854", "0.60517687", "0.6041118", "0.60151416", "0.601386", "0.599648...
0.0
-1
Give actions for AtomSiteL.
def action_atom_site_l(obj: AtomSiteL, thread: QtCore.QThread): w_actions = [] if obj.is_attribute("type_symbol"): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Show b_scat") qtb_1.clicked.connect(lambda: run_function( obj.report, (), thread)) w_actions.append(qtb_1) return w_actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def actions():\n pass", "def setupActions(obj):\n at = getToolByName(obj, 'portal_actions')\n ait = getToolByName(obj, 'portal_actionicons')\n for action in at.listActions():\n if action.getId() == 'atom':\n break\n else:\n at.addAction(id = 'atom',\n n...
[ "0.6464216", "0.63749534", "0.6212826", "0.6211583", "0.6051947", "0.5988117", "0.5883288", "0.5883288", "0.58782697", "0.5835679", "0.5777518", "0.5745189", "0.573512", "0.5696905", "0.5692459", "0.5652118", "0.56314516", "0.56207705", "0.56207705", "0.56207705", "0.56207705...
0.59871656
6
Give actions for AtomSiteL.
def action_inversed_hessian(obj: InversedHessian, thread: QtCore.QThread): w_actions = [] if obj.is_defined(): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Show correlation") qtb_1.clicked.connect(lambda: run_function( obj.report, (), thread)) w_actions.append(qtb_1) return w_actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def actions():\n pass", "def setupActions(obj):\n at = getToolByName(obj, 'portal_actions')\n ait = getToolByName(obj, 'portal_actionicons')\n for action in at.listActions():\n if action.getId() == 'atom':\n break\n else:\n at.addAction(id = 'atom',\n n...
[ "0.6464216", "0.63749534", "0.6212826", "0.6211583", "0.6051947", "0.5988117", "0.59871656", "0.5883288", "0.5883288", "0.58782697", "0.5835679", "0.5777518", "0.5745189", "0.573512", "0.5696905", "0.5692459", "0.5652118", "0.56314516", "0.56207705", "0.56207705", "0.56207705...
0.0
-1
Dock for RhoChi object.
def action_rhochi(obj: RhoChi, thread: QtCore.QThread): w_actions = [] crystals = obj.crystals() experiments = obj.experiments() flag_crystals = len(crystals) != 0 flag_experiments = len(experiments) != 0 flag_diffrn = any([isinstance(exp, Diffrn) for exp in experiments]) # for experiment in experiments: # if isinstance(experiment, Pd): # w_actions_t = action_pd(experiment, thread) # w_actions.extend(w_actions_t) # elif isinstance(experiment, Pd2d): # w_actions_t = action_pd2d(experiment, thread) # w_actions.extend(w_actions_t) # elif isinstance(experiment, Diffrn): # w_actions_t = action_diffrn(experiment, thread) # w_actions.extend(w_actions_t) # Action doc if (flag_crystals & flag_experiments & obj.is_defined()): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Calc. Chi square") qtb_1.clicked.connect(lambda: run_function( obj.calc_chi_sq, (True, ), thread)) w_actions.append(qtb_1) qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Refine") qtb_1.clicked.connect(lambda: run_function( obj.refine, (False, "BFGS",), thread)) w_actions.append(qtb_1) qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Estimate Inversed Hessian") qtb_1.clicked.connect(lambda: run_function( obj.estimate_inversed_hessian, (), thread)) w_actions.append(qtb_1) elif not(flag_crystals & flag_experiments): if not flag_crystals: qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add crystal") qtb_1.clicked.connect(lambda: add_items(obj, [ Crystal(data_name="phase")], thread)) w_actions.append(qtb_1) if not flag_experiments: qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add diffrn") qtb_1.clicked.connect(lambda: add_items(obj, [ Diffrn(data_name="mono")], thread)) w_actions.append(qtb_1) qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add pd") qtb_1.clicked.connect(lambda: add_items(obj, [ Pd(data_name="powder1d")], thread)) w_actions.append(qtb_1) qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add pd2d") qtb_1.clicked.connect(lambda: add_items(obj, [ Pd2d(data_name="powder2d")], thread)) w_actions.append(qtb_1) else: qlabel = QtWidgets.QLabel( "To run calculations all items should be defined.") w_actions.append(qlabel) # layout_actions.addWidget(qlabel) if (flag_diffrn & flag_crystals): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Estimate F_M") qtb_1.clicked.connect(lambda: run_function( obj.estimate_f_mag_for_diffrn, (), thread)) w_actions.append(qtb_1) if obj.is_attribute("inversed_hessian"): w_actions_t = action_inversed_hessian(obj.inversed_hessian, thread) w_actions.extend(w_actions_t) return w_actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createDockArea(self):\n self.centralDock = CentralDockArea(self.globalSession)\n self.setCentralWidget(self.centralDock)", "def init_layout(self):\n super(WxDockPane, self).init_layout()\n self.widget.SetDockWidget(self.dock_widget())", "def create(self, verbose=False):\r\n ...
[ "0.6622695", "0.5954567", "0.5893806", "0.57845724", "0.57674456", "0.5709266", "0.5709266", "0.5704553", "0.56602496", "0.5624555", "0.5622377", "0.55571485", "0.5540067", "0.5499297", "0.54939884", "0.5486771", "0.54789144", "0.54647344", "0.5457878", "0.5430313", "0.542813...
0.0
-1
Dock for MEM object.
def action_mem(obj: MEM, thread: QtCore.QThread): w_actions = [] crystals = obj.crystals() experiments = obj.experiments() flag_crystals = len(crystals) != 0 flag_experiments = len(experiments) != 0 # for experiment in experiments: # w_actions_temp = action_diffrn(experiment, thread) # w_actions.extend(w_actions_temp) # Action doc if (flag_crystals & flag_experiments & obj.is_defined()): if flag_crystals: crystal = crystals[0] if not(crystal.is_attribute("atom_electron_configuration")): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Create AtomElectronConfiguration") qtb_1.clicked.connect(lambda: crystal.add_items([ AtomElectronConfigurationL()])) qtb_1.clicked.connect(lambda: run_function(pass_func, (), thread)) w_actions.append(qtb_1) qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Create prior density") qtb_1.clicked.connect(lambda: run_function( obj.create_prior_density, (), thread)) w_actions.append(qtb_1) qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Calculate FR") qtb_1.clicked.connect(lambda: run_function(obj.calc_fr, (), thread)) w_actions.append(qtb_1) qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Maximize entropy") qtb_1.clicked.connect(lambda: run_function(obj.maximize_entropy, (), thread)) w_actions.append(qtb_1) qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Chi refinement") qtb_1.clicked.connect(lambda: run_function(obj.refine_susceptibility, (), thread)) w_actions.append(qtb_1) qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Run cycle") qtb_1.clicked.connect(lambda: run_function(obj.make_cycle, (), thread)) w_actions.append(qtb_1) qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Save to '.den' files") qtb_1.clicked.connect(lambda: run_function(obj.save_to_file_den, (), thread)) w_actions.append(qtb_1) if obj.is_attribute("section"): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Plot sections") def func_plot(obj): crystal = obj.crystals()[0] space_group = crystal.space_group f_s_g_s = space_group.full_space_group_symop r_11 = numpy.array(f_s_g_s.r_11, dtype=float) r_12 = numpy.array(f_s_g_s.r_12, dtype=float) r_13 = numpy.array(f_s_g_s.r_13, dtype=float) r_21 = numpy.array(f_s_g_s.r_21, dtype=float) r_22 = numpy.array(f_s_g_s.r_22, dtype=float) r_23 = numpy.array(f_s_g_s.r_23, dtype=float) r_31 = numpy.array(f_s_g_s.r_31, dtype=float) r_32 = numpy.array(f_s_g_s.r_32, dtype=float) r_33 = numpy.array(f_s_g_s.r_33, dtype=float) r_ij = (r_11, r_12, r_13, r_21, r_22, r_23, r_31, r_32, r_33) b_1 = numpy.array(f_s_g_s.b_1, dtype=float) b_2 = numpy.array(f_s_g_s.b_2, dtype=float) b_3 = numpy.array(f_s_g_s.b_3, dtype=float) b_i = (b_1, b_2, b_3) atom_site = crystal.atom_site fract_x = atom_site.numpy_fract_x fract_y = atom_site.numpy_fract_y fract_z = atom_site.numpy_fract_z fract_xyz = (fract_x, fract_y, fract_z) atom_label = atom_site.numpy_label fract_uc_x, fract_uc_y, fract_uc_z, label_uc = \ calc_atoms_in_unit_cell(r_ij, b_i, fract_xyz, atom_label) cell = crystal.cell atom_site_susceptibility = crystal.atom_site_susceptibility section = obj.section[0] density_point = obj.density_point mem_parameters = obj.mem_parameters atom_x, atom_y, atom_label = section.calc_atoms( cell, atom_site, f_s_g_s, distance_min=0.3) den_chi_section, den_b_section = \ calc_section_from_density_point( section, density_point, mem_parameters, cell, f_s_g_s, atom_site, atom_site_susceptibility) fract_atom_xyz = numpy.array(fract_xyz, dtype=float ).transpose() fract_sec_xyz = section.calc_fractions(cell, atom_site) fract_sec_xyz = numpy.transpose(numpy.array(fract_sec_xyz, dtype=float)) n_atom_index, n_symmetry, distance = \ calc_index_atom_symmetry_closest_to_fract_xyz( fract_sec_xyz, fract_atom_xyz, r_ij, b_i, cell) n_at_2d = numpy.transpose(n_atom_index.reshape( section.points_x, section.points_y)) fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(4.2, 4.2), dpi=300) plt.set_cmap('Accent') ax1.imshow(n_at_2d, interpolation='bilinear', extent=(-0.5*section.size_x, 0.5*section.size_x, -0.5*section.size_y, 0.5*section.size_y), alpha=0.1, origin="lower") den_x = numpy.linspace(-0.5*section.size_x, 0.5*section.size_x, section.points_x) den_y = numpy.linspace(-0.5*section.size_y, 0.5*section.size_y, section.points_y) blk = '#000000' ax1.contour(den_x, den_y, den_chi_section.transpose(), levels=[0.1, 0.5, 1., 5., 10., 50.], colors=[blk, blk, blk, blk, blk, blk], linewidths=0.5) ax1.plot(atom_x, atom_y, 'ko', ms=3) for _1, _2, _3 in zip(atom_x, atom_y, atom_label): ax1.text(_1, _2, _3) ax1.set_title( f"Tensor. Max is {den_chi_section.max():.1f}") # plt.set_cmap('RdBu') ax2.imshow(n_at_2d, interpolation='bilinear', extent=(-0.5*section.size_x, 0.5*section.size_x, -0.5*section.size_y, 0.5*section.size_y), alpha=0.1, origin="lower") hh = numpy.abs(den_b_section).max() rd = '#FF0000' ax2.contour(den_x, den_y, den_b_section.transpose(), levels=[-50., -10., -5., -1., -0.5, -0.1, 0.1, 0.5, 1., 5., 10., 50.], colors=[rd, rd, rd, rd, rd, rd, blk, blk, blk, blk, blk, blk], linewidths=0.5) # ax2.imshow(den_b_section, interpolation='bilinear', # extent=(-0.5*section.size_x, 0.5*section.size_x, # -0.5*section.size_y, 0.5*section.size_y), # vmin=-hh, vmax=hh, # alpha=1., origin="lower") ax2.set_title(f"2channel. Max is {hh:.1f}") ax2.plot(atom_x, atom_y, 'ko', ms=3) for _1, _2, _3 in zip(atom_x, atom_y, atom_label): ax2.text(_1, _2, _3) plt.show() return qtb_1.clicked.connect(lambda: func_plot(obj)) w_actions.append(qtb_1) elif not(flag_crystals & flag_experiments): if not flag_crystals: qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add crystal") qtb_1.clicked.connect(lambda: add_items(obj, [ Crystal(data_name="phase")], thread)) w_actions.append(qtb_1) if not flag_experiments: qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add diffrn") qtb_1.clicked.connect(lambda: add_items(obj, [ Diffrn(data_name="mono")], thread)) w_actions.append(qtb_1) else: qlabel = QtWidgets.QLabel( "To run calculations all items should be defined.") qlabel.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding) w_actions.append(qlabel) return w_actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createDockArea(self):\n self.centralDock = CentralDockArea(self.globalSession)\n self.setCentralWidget(self.centralDock)", "def __init__(self):\r\n\r\n object.__init__(self)\r\n \r\n self.dock_direction = 0\r\n self.dock_layer = 0\r\n self.dock_row = 0\r\n ...
[ "0.666467", "0.61904645", "0.58657914", "0.57930666", "0.5759", "0.5710022", "0.5661305", "0.55973077", "0.55593544", "0.5494071", "0.5491788", "0.5429935", "0.54137725", "0.5384663", "0.5345779", "0.53450286", "0.53433484", "0.5328388", "0.5326785", "0.5324091", "0.53176713"...
0.0
-1
Form dock_pd. Based on dock_proc dock_meas dock_chi2 dock_refine_ls dock_peak
def action_pd(obj: Pd, thread: QtCore.QThread): w_actions = [] f_meas = obj.is_attribute("pd_meas") f_chi2 = obj.is_attribute("chi2") f_phase = obj.is_attribute("phase") l_pd_peak = [] if f_phase: phase = obj.phase for item in phase.items: try: pd_peak = getattr(obj, f"pd_peak_{item.label.lower():}") l_pd_peak.append(pd_peak) except AttributeError: pass f_setup = obj.is_attribute("setup") f_pd_instr_resolution = obj.is_attribute("pd_instr_resolution") f_pd_background = obj.is_attribute("pd_background") f_range = obj.is_attribute("range") if not(f_chi2 & f_meas & f_setup & f_pd_instr_resolution & f_phase & f_pd_background & f_range): if not f_chi2: qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add chi2") qtb_1.clicked.connect(lambda: add_items(obj, [Chi2()], thread)) w_actions.append(qtb_1) if not f_meas: qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add pd_meas") qtb_1.clicked.connect(lambda: add_items(obj, [PdMeasL()], thread)) w_actions.append(qtb_1) if not f_setup: qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add setup") qtb_1.clicked.connect(lambda: add_items(obj, [Setup()], thread)) w_actions.append(qtb_1) if not f_pd_instr_resolution: qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add pd_instr_resolution") qtb_1.clicked.connect(lambda: add_items(obj, [PdInstrResolution()], thread)) w_actions.append(qtb_1) if not f_phase: qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add phase") vv = PhaseL() vv.items = [Phase(label="phase", igsize=0., scale=1.)] qtb_1.clicked.connect(lambda: add_items(obj, [vv], thread)) w_actions.append(qtb_1) if not f_pd_background: qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add pd_background") qtb_1.clicked.connect(lambda: add_items(obj, [PdBackgroundL()], thread)) w_actions.append(qtb_1) if not f_range: qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add range") qtb_1.clicked.connect(lambda: add_items(obj, [Range( ttheta_min=2, ttheta_max=100.)], thread)) w_actions.append(qtb_1) return w_actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CalculateDockSizerLimits(self, dock):\r\n\r\n docks, panes = CopyDocksAndPanes2(self._docks, self._panes)\r\n\r\n sash_size = self._art.GetMetric(AUI_DOCKART_SASH_SIZE)\r\n caption_size = self._art.GetMetric(AUI_DOCKART_CAPTION_SIZE)\r\n opposite_size = self.GetOppositeDockTotalSize...
[ "0.5546065", "0.5520014", "0.55192053", "0.53659904", "0.536205", "0.53281355", "0.53086746", "0.5241043", "0.5237947", "0.5195551", "0.51808137", "0.51035964", "0.50467604", "0.4982178", "0.49796942", "0.4977867", "0.49674752", "0.49330243", "0.49260616", "0.49172583", "0.49...
0.0
-1
Form dock_pd. Based on dock_proc dock_meas dock_chi2 dock_refine_ls dock_peak
def action_tof(obj: TOF, thread: QtCore.QThread): w_actions = [] # f_meas = obj.is_attribute("pd_meas") # f_chi2 = obj.is_attribute("chi2") # f_phase = obj.is_attribute("phase") # l_pd_peak = [] # if f_phase: # phase = obj.phase # for item in phase.items: # try: # pd_peak = getattr(obj, f"pd_peak_{item.label.lower():}") # l_pd_peak.append(pd_peak) # except AttributeError: # pass # f_setup = obj.is_attribute("setup") # f_pd_instr_resolution = obj.is_attribute("pd_instr_resolution") # f_pd_background = obj.is_attribute("pd_background") # f_range = obj.is_attribute("range") # if not(f_chi2 & f_meas & f_setup & f_pd_instr_resolution & f_phase & # f_pd_background & f_range): # if not f_chi2: # qtb_1 = QtWidgets.QToolButton() # qtb_1.setText("Add chi2") # qtb_1.clicked.connect(lambda: add_items(obj, [Chi2()], thread)) # w_actions.append(qtb_1) return w_actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def CalculateDockSizerLimits(self, dock):\r\n\r\n docks, panes = CopyDocksAndPanes2(self._docks, self._panes)\r\n\r\n sash_size = self._art.GetMetric(AUI_DOCKART_SASH_SIZE)\r\n caption_size = self._art.GetMetric(AUI_DOCKART_CAPTION_SIZE)\r\n opposite_size = self.GetOppositeDockTotalSize...
[ "0.5547273", "0.5520557", "0.5519151", "0.536733", "0.53612316", "0.53288966", "0.5309132", "0.52417874", "0.523785", "0.51947534", "0.51807857", "0.510445", "0.5047843", "0.49827212", "0.498094", "0.497829", "0.49675336", "0.49336413", "0.49266827", "0.49179497", "0.49072325...
0.0
-1
Actions for Pd2dMeas objects.
def action_pd2d_meas(obj: Pd2dMeas, thread: QtCore.QThread): w_actions = [] qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Plot gamma-nu") def func_plot_gn(obj): fig, ax = obj.plot_gamma_nu() fig.show() return (fig, ax) qtb_1.clicked.connect(lambda: func_plot_gn(obj)) w_actions.append(qtb_1) qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Plot 2theta-phi") def func_plot_tp(obj): fig, ax = obj.plot_ttheta_phi() fig.show() return (fig, ax) qtb_1.clicked.connect(lambda: func_plot_tp(obj)) w_actions.append(qtb_1) return w_actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self,measurements,actions):\n raise NotImplementedError", "def getMeasures():", "def _add_or_update_measurement(self,eq,meas_type,mplane_param2value,period):\r\n meas = self._pvsr.create_pvsr_object(\"Measurement\")\r\n meas.ParentId = eq.Id\r\n meas.Type = meas_type\r\n ...
[ "0.5662687", "0.55383396", "0.55314547", "0.5518812", "0.5434003", "0.5369537", "0.53463304", "0.52960706", "0.52332073", "0.5206542", "0.5196402", "0.51885736", "0.511476", "0.5113631", "0.5077229", "0.50592333", "0.50523233", "0.502988", "0.5012061", "0.5006214", "0.4951757...
0.542207
5
Actions for Pd2dMeas objects.
def action_pd2d_proc(obj: Pd2dProc, thread: QtCore.QThread): w_actions = [] qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Plot gamma-nu") def func_plot_gn(obj): fig, ax = obj.plot_gamma_nu() fig.show() return (fig, ax) qtb_1.clicked.connect(lambda: func_plot_gn(obj)) w_actions.append(qtb_1) qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Plot 2theta-phi") def func_plot_tp(obj): fig, ax = obj.plot_ttheta_phi() fig.show() return (fig, ax) qtb_1.clicked.connect(lambda: func_plot_tp(obj)) w_actions.append(qtb_1) return w_actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run(self,measurements,actions):\n raise NotImplementedError", "def getMeasures():", "def _add_or_update_measurement(self,eq,meas_type,mplane_param2value,period):\r\n meas = self._pvsr.create_pvsr_object(\"Measurement\")\r\n meas.ParentId = eq.Id\r\n meas.Type = meas_type\r\n ...
[ "0.56643057", "0.5537284", "0.55313635", "0.55198115", "0.543281", "0.542291", "0.53697515", "0.5345556", "0.5296102", "0.5231518", "0.52056426", "0.51958555", "0.5187039", "0.5114033", "0.5113663", "0.50779897", "0.50573653", "0.50535953", "0.50285745", "0.5011202", "0.50048...
0.0
-1
Actions for Diffrn objects.
def action_diffrn(obj: Diffrn, thread: QtCore.QThread): w_actions = [] f_setup = obj.is_attribute("setup") f_diffrn_radiation = obj.is_attribute("diffrn_radiation") f_diffrn_orient_matrix = obj.is_attribute("diffrn_orient_matrix") f_diffrn_refln = obj.is_attribute("diffrn_refln") f_phase = obj.is_attribute("phase") if not(f_setup & f_diffrn_radiation & f_diffrn_orient_matrix & f_diffrn_refln & f_phase): if not(f_setup): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add setup") qtb_1.clicked.connect(lambda: add_items(obj, [Setup()], thread)) w_actions.append(qtb_1) if not(f_diffrn_radiation): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add diffrn_radiation") qtb_1.clicked.connect(lambda: add_items( obj, [DiffrnRadiation()], thread)) w_actions.append(qtb_1) if not(f_diffrn_orient_matrix): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add diffrn_orient_matrix") qtb_1.clicked.connect(lambda: add_items(obj, [DiffrnOrientMatrix( ub_11=1., ub_12=0., ub_13=0., ub_21=0., ub_22=1., ub_23=0., ub_31=0., ub_32=0., ub_33=1.,)], thread)) w_actions.append(qtb_1) if not(f_diffrn_refln): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add diffrn_refln") qtb_1.clicked.connect(lambda: add_items( obj, [DiffrnReflnL()], thread)) w_actions.append(qtb_1) if not(f_phase): qtb_1 = QtWidgets.QToolButton() qtb_1.setText("Add phase") qtb_1.clicked.connect(lambda: add_items(obj, [ Phase(label="phase")], thread)) w_actions.append(qtb_1) if f_diffrn_refln: diffrn_refln = obj.diffrn_refln w_actions.extend(action_diffrn_refln_l(diffrn_refln, thread)) if f_diffrn_orient_matrix: diffrn_orient_matrix = obj.diffrn_orient_matrix w_actions.extend(action_diffrn_orient_matrix( diffrn_orient_matrix, thread)) return w_actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_diffs(history):\n\n # First get all possible representations\n mgr = plugins_get_mgr() \n keys = mgr.search('representation')['representation']\n representations = [mgr.get_by_key('representation', k) for k in keys]\n\n for i in range(len(history)):\n if i+1 > len(history) - 1:\n ...
[ "0.5404981", "0.53203255", "0.52869326", "0.5260477", "0.52597624", "0.5186789", "0.518508", "0.5163919", "0.514824", "0.50897974", "0.50714517", "0.50684255", "0.50684255", "0.5065854", "0.5063287", "0.50580245", "0.5037629", "0.50206566", "0.50169116", "0.5014264", "0.50007...
0.5864252
0
Method to scan product. Adds the product order to the list of orders.
def scan(self, product_code): self.order.add_product(product_code)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add(self, product):\n pass", "def orderWatch(self, order):\r\n\t\tself.orders.append(order)", "def add_product(self):\n self.owner.new_product(self.barcode, self.description, self.price, self._add_product_callback)", "def orderWatch(self, order):\r\n\t\tself.pair.orders.append(order)", "d...
[ "0.6195393", "0.6153183", "0.6137681", "0.60344553", "0.60218424", "0.5938747", "0.582828", "0.57936937", "0.576499", "0.5742286", "0.57390934", "0.57390934", "0.56984586", "0.56835234", "0.5683081", "0.567723", "0.5611014", "0.55724466", "0.5555511", "0.5555511", "0.5548241"...
0.7857374
0
Attribute which calculates the total amount on the order after deducting discounts.
def total(self): total_price = self.get_total_amount() discounts = self.get_total_discount() return total_price - discounts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def discount_amount(self):\r\n customer = self.records.find_customers(str(self.__customer).strip())\r\n order_value = self.order_value\r\n discount = customer.get_discount(order_value)\r\n return discount", "def total_amount(self):\n full_price = sum(item.price for item in self...
[ "0.67122465", "0.6551784", "0.6532095", "0.6519561", "0.651557", "0.6478766", "0.6468313", "0.6403922", "0.63672656", "0.63636285", "0.63349086", "0.63340664", "0.6284223", "0.62303615", "0.62296844", "0.6176185", "0.61685467", "0.61363816", "0.61200655", "0.61124754", "0.608...
0.66989225
1
Returns the total amount of the order without discounts.
def get_total_amount(self): total_price = 0.00 for k, v in self.order.product_orders.items(): total_price += v.quantity * v.product.price return total_price
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def basket_total_before_discounts_excl_tax(self):\n result = self.lines.aggregate(total=Sum(\"line_price_before_discounts_excl_tax\"))\n return result[\"total\"]", "def total_amount(self):\n full_price = sum(item.price for item in self._products) if self._products else 0.0\n return fu...
[ "0.7163056", "0.69267505", "0.69222707", "0.6920065", "0.66498226", "0.6615668", "0.660972", "0.65836567", "0.6532857", "0.6476015", "0.6449595", "0.6415554", "0.6324086", "0.63018864", "0.62091404", "0.61972386", "0.61502963", "0.61472183", "0.6100125", "0.60200626", "0.6015...
0.64590997
10
Calculates total discount applicable on this order.
def get_total_discount(self): total_discount = 0.00 for promotion in self.pricing_rules: discount = promotion.get_discount(self.order) total_discount += discount return total_discount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def discount_amount(self):\r\n customer = self.records.find_customers(str(self.__customer).strip())\r\n order_value = self.order_value\r\n discount = customer.get_discount(order_value)\r\n return discount", "def calculate_total(self):\n if self.total_price == 0:\n fo...
[ "0.76241744", "0.73756224", "0.7202873", "0.70788705", "0.6990974", "0.6952235", "0.6787426", "0.67570555", "0.6640429", "0.6552841", "0.652841", "0.64726514", "0.64331007", "0.64018595", "0.6394077", "0.6384539", "0.6307665", "0.62925327", "0.6282091", "0.6279652", "0.627704...
0.8006258
0
Return total but in a pretty format with Euro sign.
def get_total_display(self): total = self.total return '%.2f\N{euro sign}' % total
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_total_elle(self):\r\n \r\n return str(round(self._total_elle, 2))", "def get_total(self):\r\n \r\n return str(round(self._total, 2))", "def amount_ui(self) -> str:\n return \"{:,.2f}\".format(self.amount)", "def display_price(self):\n return '$ '+str(self.pri...
[ "0.6930728", "0.6926814", "0.6910748", "0.69088656", "0.65733093", "0.6563159", "0.65392554", "0.6340894", "0.62750125", "0.62716407", "0.62608546", "0.6224019", "0.6152539", "0.61229", "0.6107103", "0.60937685", "0.60425156", "0.6040899", "0.60386825", "0.6008453", "0.599921...
0.81358236
0
Create variables for tests.
def setUp(self): self.sync = synchronization.Sync() self.game = game.Game() self.leaderboards = leaderboards.Leaderboards() self.leaderboards.scoreboard = leaderboards.Leaderboards.scoreboard self.sync.file1 = self.f1 self.sync.file2 = self.f2 self.player1 = self.game.create_player("Drake testing") self.player2 = self.game.create_player("Benson testing") os.mkdir(self.directory)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testTurntableVariables(self):\n crawler = Crawler.create(PathHolder(self.__exrFile))\n self.assertEqual(crawler.var(\"type\"), \"turntable\")\n self.assertEqual(crawler.var(\"category\"), \"render\")\n self.assertEqual(crawler.var(\"renderType\"), \"tt\")\n self.assertEqual(c...
[ "0.68591744", "0.67279863", "0.6680693", "0.6607811", "0.65469337", "0.6510623", "0.642608", "0.64156944", "0.633385", "0.62405956", "0.6165352", "0.6113072", "0.59246886", "0.5924667", "0.5894547", "0.58901113", "0.588886", "0.58794427", "0.58783376", "0.58669263", "0.586640...
0.0
-1
Test that writing happens correctly and a file is generated.
def test_self_write(self): self.assertFalse(os.path.exists(self.f1)) self.assertFalse(os.path.exists(self.f2)) self.sync.pickle_write() self.assertTrue(os.path.exists(self.f1)) self.assertTrue(os.path.exists(self.f2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_writer_with_file():\n outputfile = \"testfile.txt\"\n GCMT(write=outputfile)\n assert os.path.exists(outputfile)\n os.remove(outputfile)", "def test_write_file():\n filename = 'test'\n content = 'hello!'\n\n write_file(content, filename)\n assert read_file(filename) == 'hello!'",...
[ "0.7806734", "0.7739952", "0.7384342", "0.7323826", "0.7065146", "0.7057785", "0.703889", "0.70044935", "0.700189", "0.69939166", "0.69856334", "0.6979201", "0.69667256", "0.690036", "0.68829405", "0.68710196", "0.68689007", "0.68655556", "0.68140554", "0.67293036", "0.668932...
0.6595651
29
MessagingCampaign a model defined in Swagger
def __init__(self): self.swagger_types = { 'id': 'str', 'name': 'str', 'date_created': 'datetime', 'date_modified': 'datetime', 'version': 'int', 'division': 'DomainEntityRef', 'campaign_status': 'str', 'callable_time_set': 'DomainEntityRef', 'contact_list': 'DomainEntityRef', 'dnc_lists': 'list[DomainEntityRef]', 'always_running': 'bool', 'contact_sorts': 'list[ContactSort]', 'messages_per_minute': 'int', 'errors': 'list[RestErrorDetail]', 'sms_config': 'SmsConfig', 'self_uri': 'str' } self.attribute_map = { 'id': 'id', 'name': 'name', 'date_created': 'dateCreated', 'date_modified': 'dateModified', 'version': 'version', 'division': 'division', 'campaign_status': 'campaignStatus', 'callable_time_set': 'callableTimeSet', 'contact_list': 'contactList', 'dnc_lists': 'dncLists', 'always_running': 'alwaysRunning', 'contact_sorts': 'contactSorts', 'messages_per_minute': 'messagesPerMinute', 'errors': 'errors', 'sms_config': 'smsConfig', 'self_uri': 'selfUri' } self._id = None self._name = None self._date_created = None self._date_modified = None self._version = None self._division = None self._campaign_status = None self._callable_time_set = None self._contact_list = None self._dnc_lists = None self._always_running = None self._contact_sorts = None self._messages_per_minute = None self._errors = None self._sms_config = None self._self_uri = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self):\n json_data = request.get_json()\n json_data[\"sender_id\"] = current_user.id\n try:\n new_campaign = self.schema.load(json_data)\n except ValidationError as err:\n return {\"message\": err.messages}, HTTPStatus.BAD_REQUEST\n if Campaign.quer...
[ "0.5676597", "0.56284505", "0.528872", "0.5195491", "0.5169349", "0.5091118", "0.50815505", "0.5049281", "0.50430477", "0.5014071", "0.4998264", "0.4988041", "0.49813396", "0.49767828", "0.49386248", "0.49370554", "0.49350393", "0.49330032", "0.49310178", "0.49302855", "0.492...
0.5792911
0
Gets the id of this MessagingCampaign. The globally unique identifier for the object.
def id(self): return self._id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n return self._id", "def get_id(self):\n ...
[ "0.77193385", "0.77193385", "0.77193385", "0.77193385", "0.77193385", "0.77193385", "0.765815", "0.765815", "0.7566762", "0.7515753", "0.75107026", "0.75107026", "0.75107026", "0.75107026", "0.75107026", "0.75107026", "0.75107026", "0.75107026", "0.75107026", "0.75107026", "0...
0.0
-1
Sets the id of this MessagingCampaign. The globally unique identifier for the object.
def id(self, id): self._id = id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_id(self, id):\n self.__id = id", "def set_id(self, id_):\n\n self.id_ = id_", "def set_id(self, id_=None):\n if id_ is None:\n self.id = id(self)\n else:\n self.id = id_", "def set_id(self, id):\n self.data['id'] = id", "def setID(self, id):\...
[ "0.7718702", "0.7609532", "0.757609", "0.74520427", "0.7333251", "0.73325944", "0.72652215", "0.7247922", "0.720335", "0.7196361", "0.7196361", "0.7196361", "0.71722925", "0.71722925", "0.71722925", "0.71394897", "0.71394897", "0.71394897", "0.71394897", "0.71394897", "0.7139...
0.72002715
13
Gets the name of this MessagingCampaign.
def name(self): return self._name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def campaign_name(self):\n\n return self._campaign_name", "def campaign_name(self):\n\n return self._campaign_name", "def campaign_name(self):\n\n return self._campaign_name", "def get_name(self) -> str:\n\n return self.name_", "def get_name(self) -> str:\n return self._n...
[ "0.83846724", "0.83846724", "0.83846724", "0.7315201", "0.7272899", "0.7272899", "0.72664964", "0.717051", "0.717051", "0.717051", "0.717051", "0.717051", "0.717051", "0.717051", "0.717051", "0.717051", "0.717051", "0.7166163", "0.7131615", "0.7131615", "0.711222", "0.71059...
0.0
-1
Sets the name of this MessagingCampaign.
def name(self, name): self._name = name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SetName(self, name):\n self.name = name", "def set_name(self, name: str):\n self._name = name", "def set_name(self, name):\n self._name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name):\n self.name = name", "def set_name(self, name)...
[ "0.735443", "0.7340858", "0.7322299", "0.7251668", "0.7251668", "0.7251668", "0.7251668", "0.7251668", "0.72128856", "0.7197441", "0.7195748", "0.7195748", "0.7178476", "0.70947015", "0.7060306", "0.6969277", "0.6966887", "0.6966887", "0.69516367", "0.6950923", "0.6910362", ...
0.66005486
90
Gets the date_created of this MessagingCampaign.
def date_created(self): return self._date_created
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_created(self) -> datetime:\n return self._date_created", "def created_date(self):\n return self._created_date", "def created_date(self):\n return self._created_date", "def GetDateCreated(self):\n return str(self.datecreated)", "def date_created(self) -> str:\n re...
[ "0.797184", "0.78968954", "0.78968954", "0.75586146", "0.7542879", "0.7494075", "0.73534364", "0.73534364", "0.73233813", "0.7312706", "0.7265856", "0.7256938", "0.7255421", "0.7170623", "0.7149717", "0.7149717", "0.7149717", "0.71311", "0.7127949", "0.7127949", "0.7127949", ...
0.7941877
3
Sets the date_created of this MessagingCampaign.
def date_created(self, date_created): self._date_created = date_created
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_created(self, date_created: datetime):\n\n self._date_created = date_created", "def date_created(self, date_created):\n\n self._date_created = date_created", "def date_created(self, date_created):\n\n self._date_created = date_created", "def date_created(self, date_created):\n\n...
[ "0.80591285", "0.79904276", "0.79904276", "0.79904276", "0.7909916", "0.7909916", "0.7818758", "0.7676886", "0.7676886", "0.7676886", "0.7676886", "0.7676886", "0.7632258", "0.759261", "0.7258268", "0.69510114", "0.6808352", "0.67060393", "0.67060393", "0.67060393", "0.664208...
0.80568826
1
Gets the date_modified of this MessagingCampaign.
def date_modified(self): return self._date_modified
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")", "def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetLastModifiedDate', self.handle)", "def modifie...
[ "0.72402114", "0.72402114", "0.71875286", "0.7092818", "0.70100117", "0.6977944", "0.6977944", "0.69701886", "0.69357944", "0.69300026", "0.69300026", "0.69300026", "0.69300026", "0.69300026", "0.69300026", "0.6924368", "0.6924368", "0.6866872", "0.6866872", "0.68586713", "0....
0.81347257
0
Sets the date_modified of this MessagingCampaign.
def date_modified(self, date_modified): self._date_modified = date_modified
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):...
[ "0.7839285", "0.7839285", "0.7839285", "0.7839285", "0.7839285", "0.7839285", "0.7839285", "0.75868607", "0.75868607", "0.64068264", "0.64068264", "0.63872856", "0.6339021", "0.6339021", "0.6242144", "0.61531746", "0.6107971", "0.6012136", "0.5762818", "0.5762818", "0.575947"...
0.7893535
0
Gets the version of this MessagingCampaign. Required for updates, must match the version number of the most recent update
def version(self): return self._version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_version(self):\n return self.bot_data_file[\"version\"]", "def get_version(self):\n return self.__make_api_call('get/version')", "def get_version(self):\n return self._version", "def get_version(self):\n return self._version", "def version(self):\n self._get_lates...
[ "0.74936455", "0.7484783", "0.7445432", "0.7445432", "0.74160135", "0.7414787", "0.72851366", "0.7277261", "0.72265685", "0.7201598", "0.71837974", "0.7182243", "0.7182243", "0.71736085", "0.7157434", "0.7147735", "0.7075582", "0.7058257", "0.70546836", "0.7049833", "0.703816...
0.6938133
37
Sets the version of this MessagingCampaign. Required for updates, must match the version number of the most recent update
def version(self, version): self._version = version
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version(self, version):\n self._version = version", "def version(self, version):\n self._version = version", "def set_version(self, version: str) -> None:\n if self.current_version == version:\n return\n self.current_version = version\n self._del_cached_propert...
[ "0.72409934", "0.72409934", "0.71715456", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844", "0.71619844"...
0.7293403
0
Gets the division of this MessagingCampaign. The division this entity belongs to.
def division(self): return self._division
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subdivision(self) -> Optional[str]:\n return pulumi.get(self, \"subdivision\")", "def subdivision(self) -> Optional[str]:\n return pulumi.get(self, \"subdivision\")", "def get_group(self):\n return self._group", "def getGroup(self):\n\t\treturn self.Group", "def get_domain(self):\n...
[ "0.5810358", "0.5810358", "0.51984763", "0.5110629", "0.50223154", "0.5020974", "0.50027514", "0.49567127", "0.49472067", "0.49352798", "0.49319625", "0.49162114", "0.4908078", "0.4903234", "0.488874", "0.488874", "0.488874", "0.4876585", "0.4860735", "0.48574632", "0.4844820...
0.6648795
0
Sets the division of this MessagingCampaign. The division this entity belongs to.
def division(self, division): self._division = division
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def division(self, division):\n\n self._division = division", "def set_divide(self, a_divide):\n self.set_parameter('divide', a_divide)\n return self", "def SetBoundaryCriterion(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeDivideContinuity_SetBoundaryCriterion(self, *args)...
[ "0.6715935", "0.48573586", "0.48263213", "0.45991567", "0.45864677", "0.458617", "0.45811203", "0.44704387", "0.44482273", "0.4443386", "0.4443386", "0.44264278", "0.44212875", "0.439743", "0.43385282", "0.43234468", "0.43153226", "0.4298492", "0.4283395", "0.42175615", "0.41...
0.66989917
1
Gets the campaign_status of this MessagingCampaign. The current status of the messaging campaign. A messaging campaign may be turned 'on' or 'off'.
def campaign_status(self): return self._campaign_status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_status(self):\n # TODO retrieve from db if not set\n return self.status", "def get_status(self):\n return self._status", "def get_status(self):\n statuses = dict(ACTIVITY_STATUS_CHOICES)\n return statuses.get(self.status, \"N/A\")", "def campaign_status(self, campai...
[ "0.6404847", "0.6360354", "0.62657183", "0.62341815", "0.6217242", "0.6217242", "0.6217242", "0.62143916", "0.61250436", "0.6114817", "0.6114813", "0.60491306", "0.6047289", "0.6042757", "0.6023667", "0.6022818", "0.5983734", "0.59719783", "0.5946572", "0.5946572", "0.5946572...
0.8441347
0
Sets the campaign_status of this MessagingCampaign. The current status of the messaging campaign. A messaging campaign may be turned 'on' or 'off'.
def campaign_status(self, campaign_status): allowed_values = ["on", "stopping", "off", "complete", "invalid"] if campaign_status.lower() not in map(str.lower, allowed_values): # print("Invalid value for campaign_status -> " + campaign_status) self._campaign_status = "outdated_sdk_version" else: self._campaign_status = campaign_status
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def campaign_status(self):\n return self._campaign_status", "def set_activity(self, status):\n self._activity = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.status = status", "def set_status(self, status):\n self.s...
[ "0.6368695", "0.61262167", "0.5973353", "0.5973353", "0.5973353", "0.5827563", "0.57827497", "0.5776234", "0.5757022", "0.5722353", "0.5680996", "0.56777996", "0.5648092", "0.5639821", "0.5627372", "0.55989665", "0.5592994", "0.5582759", "0.5582326", "0.55720466", "0.55364394...
0.7548788
0
Gets the callable_time_set of this MessagingCampaign. The callable time set for this messaging campaign.
def callable_time_set(self): return self._callable_time_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def callable_time_set(self, callable_time_set):\n \n self._callable_time_set = callable_time_set", "def schedule_times(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"schedule_times\")", "def getScheduleOnset(self):\n return DPxGetDinSchedOnset()", "def get_schedules...
[ "0.6893189", "0.55124503", "0.54107213", "0.5211071", "0.5081652", "0.5053929", "0.4999273", "0.4996595", "0.4996595", "0.4996595", "0.49755397", "0.4962994", "0.49513596", "0.4948963", "0.4948963", "0.4948963", "0.4948963", "0.4948963", "0.4948963", "0.4948963", "0.4948963",...
0.8133848
0
Sets the callable_time_set of this MessagingCampaign. The callable time set for this messaging campaign.
def callable_time_set(self, callable_time_set): self._callable_time_set = callable_time_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def callable_time_set(self):\n return self._callable_time_set", "def set_time(self, set_time):\n\n self._set_time = set_time", "def setScheduleOnset(self, onset):\n DPxSetDinSchedOnset(onset)", "def setShowCallables(self, show_callables):\n logger.debug(\"setShowCallables: {}\".fo...
[ "0.65908307", "0.6178317", "0.5953841", "0.5100885", "0.49795374", "0.49558243", "0.483162", "0.48207587", "0.48039177", "0.477478", "0.47390914", "0.47065333", "0.47013178", "0.4697793", "0.4662012", "0.46349522", "0.45914975", "0.45656434", "0.45478013", "0.45220882", "0.45...
0.8699226
0
Gets the contact_list of this MessagingCampaign. The contact list that this messaging campaign will send messages for.
def contact_list(self): return self._contact_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_contacts(self):\n return self.contacts", "def get_contacts(self):\n\n\t\treturn self.__contacts", "def GetContactList(self):\n\t\tfeeds = []\n\t\tfeed = self.client.GetContacts()\n\t\tfeeds.append(feed)\n\t\tnext = feed.GetNextLink()\n\t\twhile next:\n\t\t\tfeed = self.client.GetContacts(uri=ne...
[ "0.72631705", "0.71663344", "0.7121245", "0.6652589", "0.65349084", "0.64175344", "0.63972867", "0.6386485", "0.63770306", "0.63157517", "0.61904186", "0.61765337", "0.6132532", "0.6121634", "0.6121634", "0.602559", "0.6003591", "0.59747976", "0.5964907", "0.5926964", "0.5926...
0.8081953
0
Sets the contact_list of this MessagingCampaign. The contact list that this messaging campaign will send messages for.
def contact_list(self, contact_list): self._contact_list = contact_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def receiveContactList(self, contactList):", "def set_contacts(self, contacts):\n\n\t\tif contacts is not None and not isinstance(contacts, list):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: contacts EXPECTED TYPE: list', None, None)\n\t\t\n\t\tself.__contacts = contacts\n\t\tself.__key_modified['...
[ "0.6209503", "0.6202445", "0.588594", "0.588594", "0.5883504", "0.5845821", "0.5795876", "0.5775629", "0.55796677", "0.5562263", "0.5562263", "0.5559013", "0.5479643", "0.5460591", "0.53779536", "0.5320598", "0.53073066", "0.5305575", "0.52854943", "0.5274175", "0.5192591", ...
0.83265656
0
Gets the dnc_lists of this MessagingCampaign. The dnc lists to check before sending a message for this messaging campaign.
def dnc_lists(self): return self._dnc_lists
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dmarc_messages(self):\n messages = []\n try:\n if self.opt_use_ssl:\n self.server = poplib.POP3_SSL(self.opt_pop3_server)\n self.server.user(self.opt_global_account[\"username\"])\n self.server.pass_(self.opt_global_account[\"password\"]...
[ "0.5988404", "0.58297056", "0.56870914", "0.55929554", "0.5453402", "0.5412666", "0.5395899", "0.5304845", "0.5249858", "0.5206307", "0.5206069", "0.5170666", "0.5170228", "0.51614743", "0.5143177", "0.51181716", "0.5111123", "0.50970674", "0.50523174", "0.504655", "0.5045429...
0.70566237
0
Sets the dnc_lists of this MessagingCampaign. The dnc lists to check before sending a message for this messaging campaign.
def dnc_lists(self, dnc_lists): self._dnc_lists = dnc_lists
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SetDomainsList(self, domainsList) :\n\t\t...", "def checklists(self, checklists):\n\n self._checklists = checklists", "def fdsid_list(self, fdsid_list):\n\n self._fdsid_list = fdsid_list", "def contact_list(self, contact_list):\n \n self._contact_list = contact_list", "def d...
[ "0.5641915", "0.55063945", "0.52784514", "0.5267238", "0.51484704", "0.5096262", "0.48523757", "0.48513708", "0.48345873", "0.48015624", "0.47823006", "0.47243136", "0.47188637", "0.47175246", "0.4708469", "0.47014678", "0.46647304", "0.466445", "0.46205124", "0.45986927", "0...
0.75755775
0
Gets the always_running of this MessagingCampaign. Whether this messaging campaign is always running
def always_running(self): return self._always_running
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsRunning(self):\n return self.running", "def running(self):\n return self.scheduler.running", "def is_running(self):\n return self._running", "def is_running(self):\n return self._running", "def is_running(self):\n return self._running.is_set()", "def running(self)...
[ "0.684747", "0.68215805", "0.6736479", "0.6736479", "0.67310226", "0.67029005", "0.6684114", "0.6684114", "0.6658457", "0.6658457", "0.6658457", "0.6624625", "0.6622321", "0.6609278", "0.6596", "0.6578254", "0.65689", "0.65341485", "0.6479026", "0.6433435", "0.6432685", "0....
0.8078336
0
Sets the always_running of this MessagingCampaign. Whether this messaging campaign is always running
def always_running(self, always_running): self._always_running = always_running
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def always_running(self):\n return self._always_running", "def set_as_running(self):\n with self._running_condition:\n assert self._state == PENDING_STATE\n self._state = RUNNING_STATE\n self._running_condition.notify()", "def set_running(self):\n with self...
[ "0.70217943", "0.6201809", "0.61069536", "0.5851402", "0.5851402", "0.5851402", "0.568576", "0.5624877", "0.5585618", "0.5526143", "0.5486297", "0.54765916", "0.5464183", "0.5464183", "0.5460435", "0.5454445", "0.5423037", "0.5418796", "0.5418796", "0.5418796", "0.53875816", ...
0.8169366
0
Gets the contact_sorts of this MessagingCampaign. The order in which to sort contacts for dialing, based on up to four columns.
def contact_sorts(self): return self._contact_sorts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSorted(self):\n return sorted(self.contacts)", "def contact_sorts(self, contact_sorts):\n \n self._contact_sorts = contact_sorts", "def get_sort_columns(self):\n col_sort_orders = self.gridpreference_sort.all().values_list('column__id', flat=True)\n return GridColumn.o...
[ "0.6996267", "0.6311992", "0.60650474", "0.60044813", "0.5959529", "0.5943762", "0.5862135", "0.57969254", "0.5660018", "0.5624674", "0.5591094", "0.55369407", "0.52994704", "0.52872974", "0.52799004", "0.5243885", "0.5241674", "0.5228304", "0.5202736", "0.51900417", "0.51636...
0.8388801
0
Sets the contact_sorts of this MessagingCampaign. The order in which to sort contacts for dialing, based on up to four columns.
def contact_sorts(self, contact_sorts): self._contact_sorts = contact_sorts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def contact_sorts(self):\n return self._contact_sorts", "def set_sorts(self, sorts: List[DataGridSort]):\n self.sorts = sorts", "def set_contacts(self, contacts):\n\n\t\tif contacts is not None and not isinstance(contacts, list):\n\t\t\traise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: contacts...
[ "0.68852335", "0.62442034", "0.55268556", "0.55110604", "0.5504529", "0.5504529", "0.5466908", "0.5404668", "0.53766954", "0.5268589", "0.5230543", "0.51540035", "0.5129339", "0.51096964", "0.5105754", "0.5105754", "0.50918037", "0.5086296", "0.5057816", "0.50474155", "0.5042...
0.8539102
0
Gets the messages_per_minute of this MessagingCampaign. How many messages this messaging campaign will send per minute.
def messages_per_minute(self): return self._messages_per_minute
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def messages_per_minute(self, messages_per_minute):\n \n self._messages_per_minute = messages_per_minute", "def get_words_per_minute(self):\n return self.words_per_minute", "def query_plans_per_minute(self) -> int:\n return pulumi.get(self, \"query_plans_per_minute\")", "def getNu...
[ "0.686777", "0.6715453", "0.5897515", "0.57897204", "0.5508112", "0.5492951", "0.5468848", "0.5431342", "0.5427984", "0.54239345", "0.5376645", "0.5357198", "0.52036357", "0.5193324", "0.5188115", "0.51773685", "0.51770973", "0.51570386", "0.51300323", "0.512672", "0.51112086...
0.8563438
0
Sets the messages_per_minute of this MessagingCampaign. How many messages this messaging campaign will send per minute.
def messages_per_minute(self, messages_per_minute): self._messages_per_minute = messages_per_minute
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def messages_per_minute(self):\n return self._messages_per_minute", "def set_words_per_minute(self, words_per_minute):\n is_valid_wpm = 5.0 <= words_per_minute <= 60.0\n if is_valid_wpm:\n self.words_per_minute = words_per_minute\n self.dot_time_in_msec = 1200.0 / self....
[ "0.66696775", "0.58786", "0.5668301", "0.5376296", "0.5365551", "0.52839804", "0.51178545", "0.5013213", "0.49938592", "0.47783017", "0.4773923", "0.4772288", "0.4751765", "0.47004074", "0.46891505", "0.46256608", "0.460433", "0.45917523", "0.45578098", "0.45366237", "0.45331...
0.87528765
0
Gets the errors of this MessagingCampaign. A list of current error conditions associated with this messaging campaign.
def errors(self): return self._errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]", "def Errors(self):\n return self._get_attribute('errors')", "def getErrorsList(self):\n return self.__errors", "def errors (self):\n return self._errors", "def errors (self):\...
[ "0.7164323", "0.70850545", "0.7084039", "0.7073318", "0.7073318", "0.7032286", "0.69743216", "0.6840167", "0.6840085", "0.6816689", "0.68060446", "0.67511207", "0.66765666", "0.6626607", "0.65734386", "0.65553755", "0.65435886", "0.6522403", "0.6402078", "0.63631475", "0.6350...
0.7075878
3
Sets the errors of this MessagingCampaign. A list of current error conditions associated with this messaging campaign.
def errors(self, errors): self._errors = errors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def errors(self, errors):\n\n self._errors = errors", "def validation_errors(self, validation_errors):\n self._validation_errors = validation_errors", "def add_errors(self, errors):\n self.errors = merge_errors(self.errors, errors)", "def errors (self):\n return self._errors", "...
[ "0.7063868", "0.6316049", "0.62133807", "0.5995449", "0.5995449", "0.5993789", "0.5955976", "0.58611673", "0.5738866", "0.57359105", "0.56898946", "0.56828547", "0.56606567", "0.56536406", "0.5637317", "0.5577304", "0.5540206", "0.5453656", "0.54534554", "0.54445463", "0.5427...
0.7172415
0
Gets the sms_config of this MessagingCampaign. Configuration for this messaging campaign to send SMS messages.
def sms_config(self): return self._sms_config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sms_config(self, sms_config):\n \n self._sms_config = sms_config", "def config(self):\n if self.__config is None:\n self.__config = self._get_config(self.bot)\n return self.__config", "def sms(self):\r\n return sms.SMS(self)", "def get_configuration(self) -> ...
[ "0.66070634", "0.592993", "0.56343", "0.54566157", "0.5446236", "0.5412347", "0.53807044", "0.53407514", "0.5317883", "0.52791315", "0.52556473", "0.52439207", "0.52439207", "0.52175206", "0.52175206", "0.52135134", "0.5207754", "0.5194879", "0.5194879", "0.5194879", "0.51929...
0.8423905
0
Sets the sms_config of this MessagingCampaign. Configuration for this messaging campaign to send SMS messages.
def sms_config(self, sms_config): self._sms_config = sms_config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sms_config(self):\n return self._sms_config", "def sms_enabled(self, sms_enabled):\n\n self._sms_enabled = sms_enabled", "def send_sms(self, sms):\n pass", "def sms_phone_number(self, sms_phone_number):\n\n self._sms_phone_number = sms_phone_number", "def sms_disabled(self, ...
[ "0.6710091", "0.6376342", "0.61567897", "0.5817117", "0.5528274", "0.5234292", "0.5205838", "0.515643", "0.5122757", "0.5109166", "0.51015836", "0.50929946", "0.50358784", "0.5014341", "0.49991947", "0.49899116", "0.49732202", "0.49195576", "0.4918217", "0.4918136", "0.490152...
0.84385973
0
Gets the self_uri of this MessagingCampaign. The URI for this object
def self_uri(self): return self._self_uri
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_uri(self):\n return self.__uri", "def get_uri(self):\n return self.url", "def uri(self) -> str:\n return self._uri", "def getURI(self):\n return _libsbml.SBase_getURI(self)", "def getURI(self):\n return _libsbml.SBasePlugin_getURI(self)", "def uri(self):\n ...
[ "0.72447217", "0.72073245", "0.7078981", "0.7075188", "0.6987316", "0.6909857", "0.6909857", "0.6909857", "0.6909857", "0.6909857", "0.6909857", "0.68474865", "0.6822147", "0.68092525", "0.68092525", "0.68092525", "0.68092525", "0.68092525", "0.68092525", "0.68092525", "0.680...
0.8186954
3
Sets the self_uri of this MessagingCampaign. The URI for this object
def self_uri(self, self_uri): self._self_uri = self_uri
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def self_uri(self):\n return self._self_uri", "def self_uri(self):\n return self._self_uri", "def self_uri(self):\n return self._self_uri", "def self_uri(self):\n return self._self_uri", "def uri(self) -> str:\n return self._uri", "def get_uri(self):\n return sel...
[ "0.7107602", "0.7107602", "0.7107602", "0.7107602", "0.591665", "0.58686566", "0.5748005", "0.574481", "0.57201016", "0.57201016", "0.57201016", "0.57201016", "0.57201016", "0.57201016", "0.57201016", "0.57201016", "0.57201016", "0.5711726", "0.5708703", "0.5708703", "0.57087...
0.8014554
3
Returns the model properties as a dict
def to_dict(self): result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_dict(self):\n return self.properties", "def to_dict(self):\n return self.properties", "def get_properties(self):\n return self.properties", "def asdict(self):\n return self._prop_dict", "def json(self):\n rv = {\n prop: getattr(self, prop)\n f...
[ "0.7751993", "0.7751993", "0.73391134", "0.7334895", "0.7297356", "0.727818", "0.7159078", "0.71578115", "0.71494967", "0.71494967", "0.71283495", "0.71275014", "0.7122587", "0.71079814", "0.7060394", "0.7043251", "0.7034103", "0.70233124", "0.69635814", "0.69586295", "0.6900...
0.0
-1
Returns the model as raw JSON
def to_json(self): return json.dumps(sanitize_for_serialization(self.to_dict()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_json(self) -> str:\n return json.dumps(model_to_dict(self))", "def as_json(self):", "def json(self):\n return {'id': self.id, 'name': self.name, 'description': self.description}", "def model_json(name):\n model = Model.query.filter_by(name=name).first_or_404()\n return jsonify(**mo...
[ "0.7920127", "0.7444696", "0.72223186", "0.7181604", "0.71696025", "0.7103236", "0.7078409", "0.7004283", "0.69838184", "0.6976507", "0.69680184", "0.69473517", "0.6939899", "0.6939899", "0.6939899", "0.6935423", "0.6935423", "0.6913819", "0.6902281", "0.6876575", "0.68757135...
0.6839134
36
Returns the string representation of the model
def to_str(self): return pformat(self.to_dict())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n return super().__str__() + self.model.__str__()", "def __str__(self) -> str:\n # noinspection PyUnresolvedReferences\n opts = self._meta\n if self.name_field:\n result = str(opts.get_field(self.name_field).value_from_object(self))\n else:\n ...
[ "0.85856134", "0.7814518", "0.77898884", "0.7751367", "0.7751367", "0.7712228", "0.76981676", "0.76700574", "0.7651133", "0.7597206", "0.75800353", "0.7568254", "0.7538184", "0.75228703", "0.7515832", "0.7498764", "0.74850684", "0.74850684", "0.7467648", "0.74488163", "0.7442...
0.0
-1
For `print` and `pprint`
def __repr__(self): return self.to_str()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pprint(*args, **kwargs):\n if PRINTING:\n print(*args, **kwargs)", "def print_out():\n pass", "def custom_print(*objects):\n print(*objects, sep=OFS, end=ORS)", "def _print(self, *args):\n return _ida_hexrays.vd_printer_t__print(self, *args)", "def _printable(self):\n ...
[ "0.75581616", "0.7337525", "0.6988224", "0.6984917", "0.6944316", "0.6923891", "0.6899785", "0.6898276", "0.6816268", "0.680663", "0.6751926", "0.67508817", "0.67453593", "0.66987187", "0.66916466", "0.6675672", "0.66579014", "0.6610545", "0.6606928", "0.6602885", "0.65634936...
0.0
-1
Returns true if both objects are equal
def __eq__(self, other): return self.__dict__ == other.__dict__
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self, other):\n return are_equal(self, other)", "def __eq__(self,other):\n try: return self.object==other.object and isinstance(self,type(other))\n except: return False", "def __eq__(self, other):\n if i...
[ "0.8088132", "0.8088132", "0.8054589", "0.7982687", "0.7961088", "0.7961088", "0.79433626", "0.79303336", "0.7926563", "0.7897525", "0.78826123", "0.78826123", "0.78806067", "0.7872423", "0.7868354", "0.78668815", "0.7825702", "0.7819993", "0.78162885", "0.78078854", "0.78068...
0.79670393
41
Returns true if both objects are not equal
def __ne__(self, other): return not self == other
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, other) -> bool:\n return not self.__eq__(other)", "def __eq__(self, other):\n ...
[ "0.845611", "0.8391477", "0.8144138", "0.81410587", "0.8132492", "0.8093973", "0.80920255", "0.80920255", "0.80920255", "0.8085325", "0.8085325", "0.8076365", "0.8076365", "0.8065748" ]
0.0
-1
seaborn time series, with errorbands
def sns_time_series(x_tuple,y_tuple,outputname,errors=0,two=False, *args,**kwargs): if (type(outputname)==str)|(type(x_tuple)==tuple)|(type(y_tuple)==tuple): pass else: raise TypeError() import matplotlib matplotlib.use("pdf") import matplotlib.pyplot as plt import numpy as np import seaborn as sns; sns.set_style('darkgrid') import seaborn.timeseries x, x_label = x_tuple y, y_label = y_tuple if two==True: x2,x_label2 = x_tuple2 y2,y_label2 = y_tuple2 def _plot_std_bars(std=None, central_data=None, ci=None, data=None,*args, **kwargs): std = errors ci = np.asarray((central_data - std, central_data + std)) kwargs.update({"central_data": central_data, "ci": ci, "data": data}) seaborn.timeseries._plot_ci_band(*args, **kwargs) seaborn.timeseries._plot_std_bars = _plot_std_bars plt.figure() sns.tsplot(xip,r,err_style='std_bars') sns.tsplot(xim,r,err_style='std_bars',color='r') plt.xlabel(r'$\theta$ (arcmin)') plt.ylabel(r'$\xi$') plt.xscale('log') plt.yscale('log') plt.legend([r'$\xi_+$',r'$\xi_-$'],bbox_to_anchor=(1, 1), loc='upper right', borderaxespad=0.) plt.savefig(outputname+'.pdf') plt.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visualize_time_series(fig_ax, data, inp_color, missing_data, lag_color, first_date,\n x_label=\"Number of Days\", y_label=\"Log of Aluminium Price\", title=\"Prices over time\"):\n fig, ax = fig_ax\n ((x_train_raw, y_train_raw), y_pred_list) = data\n\n missing_x, missing_y = missing_data\n is_mi...
[ "0.5816549", "0.5654263", "0.5647554", "0.5641937", "0.5606311", "0.5547447", "0.5509094", "0.5484054", "0.54816675", "0.5443533", "0.5367257", "0.5338084", "0.52920187", "0.52789783", "0.5228553", "0.5223728", "0.52207625", "0.52193207", "0.5198728", "0.5193151", "0.518719",...
0.580105
1
Serializes C{result} to JSON and writes it to C{request}.
def _writeJSONResponse(result, request, code=CODE.SUCCESS, status=http.OK): response = { u'code': code.value, u'result': result} request.setHeader('content-type', 'application/json') request.setResponseCode(status) request.write(json.dumps(response)) request.finish()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _success(self, result_ser, request):\n result = json.dumps(result_ser)\n request.write(result)\n request.finish()", "def _convert_to_JSON(result):\n response = make_response(json.dumps(result))\n response.headers['Access-Control-Allow-Origin'] = \"*\"\n response.mimetype = \"app...
[ "0.72616404", "0.7128404", "0.66393524", "0.6588018", "0.65028614", "0.6434777", "0.63020784", "0.62061685", "0.6200206", "0.6102137", "0.60957694", "0.608075", "0.591243", "0.58815205", "0.5878156", "0.5844654", "0.58311284", "0.5830024", "0.5819786", "0.57798034", "0.577429...
0.6903042
2
Maps a L{CODE} constant to a HTTP code.
def _mapErrorCodeToStatus(code): if code == 103: return http.NOT_FOUND return http.INTERNAL_SERVER_ERROR
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_code():\n return jsonify({\"status\": \"0\", \"code\": code_status})", "def setResponseCode(code, message=None):", "def send_code(self, code: str) -> Dict:\n raise NotImplementedError", "async def with_code_header():\n return jsonify(language=request.headers.get(\"Lang\")), 203, {\"X\": ...
[ "0.6723069", "0.6591856", "0.65714204", "0.6495424", "0.6402399", "0.63924485", "0.6334081", "0.6325058", "0.6201443", "0.6191107", "0.616254", "0.616247", "0.6142737", "0.61202556", "0.6110832", "0.6099238", "0.6080944", "0.6080944", "0.6080944", "0.6080944", "0.60740554", ...
0.77379066
0
Serializes a L{Failure} to JSON and writes it to the C{request}
def _writeJSONErrorResponse(f, request): code = getattr(f.value, 'code', CODE.UNKNOWN) _writeJSONResponse( result=f.getErrorMessage().decode('ascii'), request=request, code=code, status=_mapErrorCodeToStatus(code)) raise f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def failure(self, validation_failure):\n \n self.request.response.status_int = 400\n return validation_failure.error.asdict()", "def failure(self, error, rc, msg):\n self.module.fail_json(msg=msg, rc=rc, err=error)", "def _FailureResponse(args_dict=None):\n if args_dict is No...
[ "0.6702617", "0.66657233", "0.640679", "0.6245065", "0.6203946", "0.6131184", "0.6113463", "0.61089534", "0.6089362", "0.6044126", "0.5987193", "0.5950327", "0.5941476", "0.592693", "0.5907787", "0.586536", "0.5850635", "0.5850635", "0.5835833", "0.58124745", "0.58044654", ...
0.6412482
2
Decorator for render_ methods. Serializes the return value or exception to JSON and then writes it to the request object.
def jsonResult(f): def _inner(self, request): d = maybeDeferred(f, self, request) d.addCallback(_writeJSONResponse, request) d.addErrback(_writeJSONErrorResponse, request) return NOT_DONE_YET return _inner
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def jsonify(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n result = f(*args, **kwargs)\n data = json.dumps(result, indent=None if request.is_xhr else 2)\n return app.response_class(data, mimetype='application/json')\n return decorated_function", "def json(f):\n if d...
[ "0.70818603", "0.70353097", "0.70183825", "0.69342184", "0.68645805", "0.67931926", "0.67370886", "0.6686487", "0.6680778", "0.66791755", "0.6660768", "0.6641343", "0.6585666", "0.658487", "0.65263444", "0.6510545", "0.64600986", "0.64438164", "0.63704526", "0.6299694", "0.62...
0.0
-1
Verify PayPal IPN data.
def verify(self, request): paypalURL = 'https://www.sandbox.paypal.com/cgi-bin/webscr' if not self.SANDBOX: paypalURL = 'https://www.paypal.com/cgi-bin/webscr' def _cb(response): if response == 'INVALID': raise PaypalError( 'IPN data invalid. data: %s', (data,)) elif response == 'VERIFIED': return True else: raise PaypalError('Unrecognized verification response: %s', (response,)) data = request.content.read() params = '?cmd=_notify-validate&' + data d = getPage(paypalURL+params, method='POST') d.addCallback(_cb) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_ipn(data):\n data = dict(data)\n data['cmd'] = '_notify-validate'\n resp = requests.post(app.config['PAYPAL']['endpoint'], data=data)\n if resp.text == 'VERIFIED':\n return True\n return False", "def validate_with_paypal(request, validate_type):\n if validate_type == 'PDT':\n ...
[ "0.8481473", "0.6472825", "0.5787242", "0.5769118", "0.5757081", "0.5683456", "0.5683456", "0.5624004", "0.5467733", "0.54500043", "0.544273", "0.5417485", "0.54154587", "0.5396169", "0.5375768", "0.53615075", "0.5322293", "0.53124595", "0.52979654", "0.5271814", "0.5205165",...
0.75808173
1
Recieves and verifies PayPal callbacks.
def render_POST(self, request): log.msg("Paypal callback:") log.msg(request.args) d = self.verify(request) d.addCallback(lambda ign: self._process(request.args)) d.addErrback(log.err) return ''
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_process_postpay_accepted(self):\r\n student1 = UserFactory()\r\n student1.save()\r\n\r\n order1 = Order.get_cart_for_user(student1)\r\n params = {\r\n 'card_accountNumber': '1234',\r\n 'card_cardType': '001',\r\n 'billTo_firstName': student1.fir...
[ "0.6399683", "0.61258787", "0.5970598", "0.58396447", "0.5839387", "0.5666613", "0.5610431", "0.56041867", "0.55926824", "0.5504206", "0.55006534", "0.54768145", "0.54386526", "0.53870726", "0.536934", "0.53045034", "0.5277781", "0.5267446", "0.5254518", "0.5215958", "0.52090...
0.61163276
2
Retrieve a list of recent donations.
def recent(self, limit): def _cb(players, donations): donators = [] for donation in donations: player = players[donation.donator.steamID].copy() player['date'] = donation.date.asPOSIXTimestamp() player['amount'] = str(donation.amount) donators.append(player) return donators donations = [] steamids = set() for donation in self.store.query(Donation, AND(Donation.donator == Donator.storeID, Donator.anonymous == False, Donator.steamID != None), limit=limit, sort=Donation.date.descending): steamids.add(donation.donator.steamID) donations.append(donation) d = self.getPlayerSummaries(steamids) d.addCallback(_cb, donations) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_list_of_donations():\n try:\n logger.info('opening get_list_of_donations database call')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n query_results = (Donations.select(Donations.id, Donations.donation_date,\n ...
[ "0.65837014", "0.65390664", "0.64193267", "0.5857776", "0.5843226", "0.56542754", "0.565169", "0.56222814", "0.56117177", "0.55442196", "0.55310655", "0.5512683", "0.5447088", "0.54198706", "0.53970146", "0.5342934", "0.5318437", "0.5283673", "0.52812785", "0.52525187", "0.52...
0.668418
0
Retrieves a list of donators sorted by total donation amount.
def getTop(self, limit): def _cb(info, donators): players = [] for donator in donators: players.append(dict(donator, **info[donator['steamID']])) return players donators = [] steamIDs = [] for d in self.store.query(Donator, AND(Donator.anonymous == False, Donator.steamID != None), sort=Donator.totalAmount.desc, limit=limit): steamIDs.append(d.steamID) donators.append(donatorToDict(d)) d = self.getPlayerSummaries(steamIDs) d.addCallback(_cb, donators) return d
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def donations(self):\n return self.caller.player.Dominion.assets.donations.all().order_by(\"amount\")", "def list_donations(self, caller):\n msg = \"{wDonations:{n\\n\"\n table = PrettyTable([\"{wGroup{n\", \"{wTotal{n\"])\n for donation in self.donations:\n table.add_row([...
[ "0.71429664", "0.68319184", "0.6688947", "0.65133834", "0.65004057", "0.64896405", "0.6419263", "0.63591397", "0.6345003", "0.6311067", "0.6118759", "0.60955817", "0.6024189", "0.5991387", "0.5889248", "0.58387095", "0.5754497", "0.57437706", "0.5725672", "0.57109547", "0.563...
0.5803403
16
Sets the learning rate to the initial LR decayed by 0.5 every 5 epochs
def adjust_learning_rate(optimizer, epoch): lr = args.lr * (0.4 ** (epoch // 4)) for param_group in optimizer.param_groups: param_group['lr'] = lr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjust_learning_rate(init_lr, optimizer, epoch, n=100):\n init_lr = init_lr * (0.1 ** (epoch // n))\n print('learning rate : ', init_lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = init_lr", "def adjust_learning_rate(optimizer, epoch):\n lr = opt.lr * (0.5 ** (epoch ...
[ "0.75293964", "0.75141555", "0.75137126", "0.74792343", "0.7465126", "0.7460736", "0.7442363", "0.73354864", "0.73117834", "0.72968334", "0.72968334", "0.72960633", "0.7283488", "0.7278503", "0.72781247", "0.72749794", "0.72587585", "0.723886", "0.72382396", "0.7237294", "0.7...
0.7062625
74
Create a message for an email.
def createMessageWithAttachment(sender, to, subject, msgHtml, msgPlain, attachmentFile): message = MIMEMultipart('mixed') message['to'] = to message['from'] = sender message['subject'] = subject messageA = MIMEMultipart('alternative') messageR = MIMEMultipart('related') messageR.attach(MIMEText(msgHtml, 'html')) messageA.attach(MIMEText(msgPlain, 'plain')) messageA.attach(messageR) message.attach(messageA) print("create_message_with_attachment: file: %s" % attachmentFile) content_type, encoding = mimetypes.guess_type(attachmentFile) if content_type is None or encoding is not None: content_type = 'application/octet-stream' main_type, sub_type = content_type.split('/', 1) if main_type == 'text': fp = open(attachmentFile, 'rb') msg = MIMEText(fp.read(), _subtype=sub_type) fp.close() elif main_type == 'image': fp = open(attachmentFile, 'rb') msg = MIMEImage(fp.read(), _subtype=sub_type) fp.close() elif main_type == 'audio': fp = open(attachmentFile, 'rb') msg = MIMEAudio(fp.read(), _subtype=sub_type) fp.close() else: fp = open(attachmentFile, 'rb') msg = MIMEBase(main_type, sub_type) msg.set_payload(fp.read()) fp.close() filename = os.path.basename(attachmentFile) msg.add_header('Content-Disposition', 'attachment', filename=filename) message.attach(msg) return {'raw': base64.urlsafe_b64encode(message.as_string())}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createMessage( self, *args, **kw ):\n return MailMessage( *args, **kw )", "def createMessage( self, *args, **kw ):\n if not kw.has_key('charset'):\n kw['charset'] = self.getOutputCharset()\n kw['to_mail'] = 1\n return MailServerBase.createMessage( self, *args, **kw )", ...
[ "0.8220897", "0.783499", "0.7542338", "0.7535529", "0.7519684", "0.74869245", "0.7417828", "0.73909754", "0.7373257", "0.7355638", "0.73106784", "0.7297086", "0.7269137", "0.719846", "0.7196095", "0.7140178", "0.7047853", "0.7021525", "0.70037013", "0.70037013", "0.68474555",...
0.6165352
69
Return the next `batch_size` examples from this data set.
def next_batch(self, batch_size, shuffle=True): start = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: perm0 = np.arange(self._num_examples) np.random.shuffle(perm0) self._images = self.images[perm0] self._labels = self.labels[perm0] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch rest_num_examples = self._num_examples - start images_rest_part = self._images[start:self._num_examples] labels_rest_part = self._labels[start:self._num_examples] # Shuffle the data if shuffle: perm = np.arange(self._num_examples) np.random.shuffle(perm) self._images = self.images[perm] self._labels = self.labels[perm] # Start next epoch start = 0 self._index_in_epoch = batch_size - rest_num_examples end = self._index_in_epoch images_new_part = self._images[start:end] labels_new_part = self._labels[start:end] return np.concatenate( (images_rest_part, images_new_part), axis=0), np.concatenate( (labels_rest_part, labels_new_part), axis=0) else: self._index_in_epoch += batch_size end = self._index_in_epoch return self._images[start:end], self._labels[start:end]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_batch(self, batch_size):\n start = self._index_in_epoch\n self._index_in_epoch += batch_size\n if self._index_in_epoch > self._num_examples:\n self._epochs_completed += 1\n start = 0\n self._index_in_epoch = batch_size\n end = self._index_in_epo...
[ "0.8355541", "0.8247464", "0.8239602", "0.8215164", "0.81823456", "0.81794965", "0.8149099", "0.8149099", "0.8118594", "0.8114728", "0.810647", "0.8063478", "0.80631614", "0.8062337", "0.8052529", "0.804097", "0.80327535", "0.80327535", "0.7869733", "0.78591037", "0.7831932",...
0.7211136
50
Checks that certain pipeline files are not modified from template output. Iterates through the pipeline's directory content and compares specified files against output from the template using the pipeline's metadata. File content should not be modified / missing.
def files_unchanged(self): passed = [] failed = [] ignored = [] fixed = [] could_fix = False # Check that we have the minimum required config required_pipeline_config = {"manifest.name", "manifest.description", "manifest.author"} missing_pipeline_config = required_pipeline_config.difference(self.nf_config) if missing_pipeline_config: return {"ignored": [f"Required pipeline config not found - {missing_pipeline_config}"]} try: prefix, short_name = self.nf_config["manifest.name"].strip("\"'").split("/") except ValueError: log.warning( "Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is <pipeline> and default to repo 'nf-core'" ) short_name = self.nf_config["manifest.name"].strip("\"'") prefix = "nf-core" # NB: Should all be files, not directories # List of lists. Passes if any of the files in the sublist are found. files_exact = [ [".gitattributes"], [".prettierrc.yml"], ["CODE_OF_CONDUCT.md"], ["LICENSE", "LICENSE.md", "LICENCE", "LICENCE.md"], # NB: British / American spelling [os.path.join(".github", ".dockstore.yml")], [os.path.join(".github", "CONTRIBUTING.md")], [os.path.join(".github", "ISSUE_TEMPLATE", "bug_report.yml")], [os.path.join(".github", "ISSUE_TEMPLATE", "config.yml")], [os.path.join(".github", "ISSUE_TEMPLATE", "feature_request.yml")], [os.path.join(".github", "PULL_REQUEST_TEMPLATE.md")], [os.path.join(".github", "workflows", "branch.yml")], [os.path.join(".github", "workflows", "linting_comment.yml")], [os.path.join(".github", "workflows", "linting.yml")], [os.path.join("assets", "email_template.html")], [os.path.join("assets", "email_template.txt")], [os.path.join("assets", "sendmail_template.txt")], [os.path.join("assets", f"nf-core-{short_name}_logo_light.png")], [os.path.join("docs", "images", f"nf-core-{short_name}_logo_light.png")], [os.path.join("docs", "images", f"nf-core-{short_name}_logo_dark.png")], [os.path.join("docs", "README.md")], [os.path.join("lib", "nfcore_external_java_deps.jar")], [os.path.join("lib", "NfcoreTemplate.groovy")], ] files_partial = [ [".gitignore", ".prettierignore", "pyproject.toml"], ] # Only show error messages from pipeline creation logging.getLogger("nf_core.create").setLevel(logging.ERROR) # Generate a new pipeline with nf-core create that we can compare to tmp_dir = tempfile.mkdtemp() # Create a template.yaml file for the pipeline creation template_yaml = { "name": short_name, "description": self.nf_config["manifest.description"].strip("\"'"), "author": self.nf_config["manifest.author"].strip("\"'"), "prefix": prefix, } template_yaml_path = os.path.join(tmp_dir, "template.yaml") with open(template_yaml_path, "w") as fh: yaml.dump(template_yaml, fh, default_flow_style=False) test_pipeline_dir = os.path.join(tmp_dir, f"{prefix}-{short_name}") create_obj = nf_core.create.PipelineCreate( None, None, None, no_git=True, outdir=test_pipeline_dir, template_yaml_path=template_yaml_path ) create_obj.init_pipeline() # Helper functions for file paths def _pf(file_path): """Helper function - get file path for pipeline file""" return os.path.join(self.wf_path, file_path) def _tf(file_path): """Helper function - get file path for template file""" return os.path.join(test_pipeline_dir, file_path) # Files that must be completely unchanged from template for files in files_exact: # Ignore if file specified in linting config ignore_files = self.lint_config.get("files_unchanged", []) if any([f in ignore_files for f in files]): ignored.append(f"File ignored due to lint config: {self._wrap_quotes(files)}") # Ignore if we can't find the file elif not any([os.path.isfile(_pf(f)) for f in files]): ignored.append(f"File does not exist: {self._wrap_quotes(files)}") # Check that the file has an identical match else: for f in files: try: if filecmp.cmp(_pf(f), _tf(f), shallow=True): passed.append(f"`{f}` matches the template") else: if "files_unchanged" in self.fix: # Try to fix the problem by overwriting the pipeline file shutil.copy(_tf(f), _pf(f)) passed.append(f"`{f}` matches the template") fixed.append(f"`{f}` overwritten with template file") else: failed.append(f"`{f}` does not match the template") could_fix = True except FileNotFoundError: pass # Files that can be added to, but that must contain the template contents for files in files_partial: # Ignore if file specified in linting config ignore_files = self.lint_config.get("files_unchanged", []) if any([f in ignore_files for f in files]): ignored.append(f"File ignored due to lint config: {self._wrap_quotes(files)}") # Ignore if we can't find the file elif not any([os.path.isfile(_pf(f)) for f in files]): ignored.append(f"File does not exist: {self._wrap_quotes(files)}") # Check that the file contains the template file contents else: for f in files: try: with open(_pf(f), "r") as fh: pipeline_file = fh.read() with open(_tf(f), "r") as fh: template_file = fh.read() if template_file in pipeline_file: passed.append(f"`{f}` matches the template") else: if "files_unchanged" in self.fix: # Try to fix the problem by overwriting the pipeline file with open(_tf(f), "r") as fh: template_file = fh.read() with open(_pf(f), "w") as fh: fh.write(template_file) passed.append(f"`{f}` matches the template") fixed.append(f"`{f}` overwritten with template file") else: failed.append(f"`{f}` does not match the template") could_fix = True except FileNotFoundError: pass # cleaning up temporary dir shutil.rmtree(tmp_dir) return {"passed": passed, "failed": failed, "ignored": ignored, "fixed": fixed, "could_fix": could_fix}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def files_exist(self):\n\n passed = []\n warned = []\n failed = []\n ignored = []\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n #: test autodoc\n try:\n _, short_name = self.nf_config[\"manifest.name\"].strip(...
[ "0.6298096", "0.5958212", "0.5917722", "0.5899919", "0.5821627", "0.58071446", "0.57569844", "0.5753009", "0.5753009", "0.5753009", "0.57331693", "0.5726181", "0.56889504", "0.5665329", "0.5652075", "0.56505686", "0.5650157", "0.561327", "0.56093055", "0.5601862", "0.5555765"...
0.68884873
0
Helper function get file path for pipeline file
def _pf(file_path): return os.path.join(self.wf_path, file_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_path(self) -> global___Expression:", "def pipe_path(name: str, extension=\".txt\") -> str:\n return \"\\\\\".join(sys.argv[0].split(\"\\\\\")[:-3]) + f\"\\\\pipeline\\\\{name}{extension}\"", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)...
[ "0.7225395", "0.7101079", "0.7039319", "0.6956954", "0.6794682", "0.67932165", "0.67596304", "0.674209", "0.6732054", "0.6722096", "0.6676987", "0.66186446", "0.6618615", "0.66175014", "0.66000587", "0.65910906", "0.65839905", "0.6580201", "0.657943", "0.6575496", "0.6546741"...
0.681078
4
Helper function get file path for template file
def _tf(file_path): return os.path.join(test_pipeline_dir, file_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTemplateFile(fname):\n return os.path.join(Configurations.getTemplateDir(), fname)", "def get_template_path(self):\n raise NotImplementedError()", "def _get_template_filename(self):\n _format = self.cfg.get('mutations', 'format')\n if _format == 'pdf':\n tf = 'PDFT...
[ "0.8357311", "0.7725359", "0.7723076", "0.76889884", "0.76159966", "0.76117176", "0.7574724", "0.75500005", "0.7501025", "0.7403312", "0.7396856", "0.72851056", "0.7204504", "0.7142604", "0.7068567", "0.7013538", "0.6973091", "0.69499636", "0.69488835", "0.6947856", "0.694159...
0.0
-1
source_address argument used only for python2.7 compatibility
def create_connection_nodelay(address, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, source_address=None): msg = "getaddrinfo returns an empty list" host, port = address for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res sock = None try: sock = socket.socket(af, socktype, proto) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if timeout is not socket._GLOBAL_DEFAULT_TIMEOUT: sock.settimeout(timeout) sock.connect(sa) return sock except socket.error as msg: if sock is not None: sock.close() raise socket.error(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSource():", "def add_source_address(self, srcAddr):\n self.source.address = srcAddr", "def add_source_address(self, srcAddr):\n self.source.address = srcAddr", "def getAddressSourceInfo(self, address: ghidra.program.model.address.Address) -> ghidra.program.database.mem.AddressSourceInfo:...
[ "0.6894374", "0.667033", "0.667033", "0.63841516", "0.62165433", "0.613326", "0.6117469", "0.61146295", "0.61146295", "0.61146295", "0.60979617", "0.6066222", "0.6025801", "0.59759766", "0.59759283", "0.58986866", "0.586193", "0.58330524", "0.5787249", "0.5779645", "0.5777858...
0.0
-1
sample from the MACKRL tree
def select_actions(self, inputs, avail_actions, tformat, info, hidden_states=None, test_mode=False, **kwargs): noise_params = kwargs.get("noise_params", None) T_env = info["T_env"] test_suffix = "" if not test_mode else "_test" if self.args.agent_level1_share_params: # --------------------- LEVEL 1 if self.is_obs_noise(test_mode): inputs_level1, inputs_level1_tformat = _build_model_inputs(self.input_columns_level1_noisy, inputs, to_variable=True, inputs_tformat=tformat) inputs_level1_tformat = "a*bs*t*v" else: inputs_level1, inputs_level1_tformat = _build_model_inputs(self.input_columns_level1, inputs, to_variable=True, inputs_tformat=tformat) if self.args.debug_mode: _check_nan(inputs_level1) out_level1, hidden_states_level1, losses_level1, tformat_level1 = self.model.model_level1(inputs_level1["agent_input_level1"], hidden_states=hidden_states["level1"], loss_fn=None, tformat=inputs_level1_tformat, n_agents=self.n_agents, test_mode=test_mode, **kwargs) if self.args.debug_mode: _check_nan(inputs_level1) if self.is_obs_noise(test_mode): # have to do correlated sampling of what pair id everyone agrees on bs = out_level1.shape[_bsdim(inputs_level1_tformat)] ftype = th.FloatTensor if not out_level1.is_cuda else th.cuda.FloatTensor sampled_pair_ids = ftype(*out_level1.shape[:-1], 1) for _b in range(bs): ps = out_level1[:, _b] rn = np.random.random() for _a in range(ps.shape[0]): act = 0 s = ps[_a, 0, act] while s <= rn: act += 1 s += ps[_a, 0, act] sampled_pair_ids[_a, _b, 0, :] = act modified_inputs_level1 = inputs_level1 selected_actions_format_level1 = "a*bs*t*v" else: # TODO: This is the pair-product encoded ID of both selected pairs. sampled_pair_ids, modified_inputs_level1, selected_actions_format_level1 = self.action_selector.select_action({"policies":out_level1}, avail_actions=None, tformat=tformat_level1, test_mode=test_mode) _check_nan(sampled_pair_ids) if self.args.debug_mode in ["level2_actions_fixed_pair"]: """ DEBUG MODE: LEVEL2 ACTIONS FIXED PAIR Here we pick level2 actions from a fixed agent pair (0,1) and the third action from IQL """ assert self.n_agents == 3, "only makes sense in n_agents=3 scenario" sampled_pair_ids.fill_(0.0) # sample which pairs should be selected # TODO: HAVE TO ADAPT THIS FOR NOISY OBS! if self.is_obs_noise(test_mode): self.selected_actions_format = selected_actions_format_level1 else: self.actions_level1 = sampled_pair_ids.clone() self.selected_actions_format = selected_actions_format_level1 self.policies_level1 = modified_inputs_level1.squeeze(0).clone() if self.is_obs_noise(test_mode): inputs_level2, inputs_level2_tformat = _build_model_inputs(self.input_columns_level2_noisy, inputs, to_variable=True, inputs_tformat=tformat, ) else: inputs_level2, inputs_level2_tformat = _build_model_inputs(self.input_columns_level2, inputs, to_variable=True, inputs_tformat=tformat, ) assert self.args.agent_level2_share_params, "not implemented!" if "avail_actions_pair" in inputs_level2["agent_input_level2"]: pairwise_avail_actions = inputs_level2["agent_input_level2"]["avail_actions_pair"] else: assert False, "NOT SUPPORTED CURRENTLY." avail_actions1, params_aa1, tformat_aa1 = _to_batch(inputs_level2["agent_input_level2"]["avail_actions_id1"], inputs_level2_tformat) avail_actions2, params_aa2, _ = _to_batch(inputs_level2["agent_input_level2"]["avail_actions_id2"], inputs_level2_tformat) pairwise_avail_actions = th.bmm(avail_actions1.unsqueeze(2), avail_actions2.unsqueeze(1)) pairwise_avail_actions = _from_batch(pairwise_avail_actions, params_aa2, tformat_aa1) ttype = th.cuda.FloatTensor if pairwise_avail_actions.is_cuda else th.FloatTensor delegation_avails = Variable(ttype(pairwise_avail_actions.shape[0], pairwise_avail_actions.shape[1], pairwise_avail_actions.shape[2], 1).fill_(1.0), requires_grad=False) pairwise_avail_actions = th.cat([delegation_avails, pairwise_avail_actions], dim=_vdim(tformat)) out_level2, hidden_states_level2, losses_level2, tformat_level2 \ = self.model.models["level2_{}".format(0)](inputs_level2["agent_input_level2"], hidden_states=hidden_states["level2"], loss_fn=None, tformat=inputs_level2_tformat, # sampled_pair_ids=sampled_pair_ids, # UNUSED? pairwise_avail_actions=pairwise_avail_actions, test_mode=test_mode, seq_lens=inputs["agent_input_level2__agent0"].seq_lens, **kwargs) if self.is_obs_noise(test_mode): # have to do correlated sampling of what pair id everyone agrees on bs = out_level2.shape[_bsdim(inputs_level2_tformat)] ftype = th.FloatTensor if not out_level2.is_cuda else th.cuda.FloatTensor pair_sampled_actions = ftype(*out_level2.shape[:-1], 1).view(int(out_level2.shape[0]/2), 2, *out_level2.shape[1:-1], 1) for _b in range(bs): ps = out_level2.view(int(out_level2.shape[0]/2), 2, *out_level2.shape[1:])[:, :, _b] avail_actions = pairwise_avail_actions.view(int(out_level2.shape[0]/2), 2, *out_level2.shape[1:])[:, :, _b] _sum0 = th.sum(ps[:, 0] * avail_actions[:, 0], dim=-1, keepdim=True) _sum0_mask = (_sum0 == 0.0) _sum0.masked_fill_(_sum0_mask, 1.0) ps[:, 0] = ps[:, 0] * avail_actions[:, 0] / _sum0 _sum1 = th.sum(ps[:, 1] * avail_actions[:, 1], dim=-1, keepdim=True) _sum1_mask = (_sum1 == 0.0) _sum1.masked_fill_(_sum1_mask, 1.0) ps[:, 1] = ps[:, 1] * avail_actions[:, 1] / _sum1 rns = np.random.random(ps.shape[0]) #one seed for each pair / batch for _a in range(ps.shape[0]): for _j in range(2): act = 0 s = ps[_a, _j, 0, act] while s <= rns[_a]: act += 1 s += ps[_a, _j, 0, act] if act == 122: # DEBUG a = 5 pass pair_sampled_actions[_a, _j, _b, 0, :] = act # TODO: Fix the return values so I can debug in episode buffer!!! modified_inputs_level2 = inputs_level2 selected_actions_format_level2 = "a*bs*t*v" else: # TODO: Implement for noisy obs!! # Need again correlated sampling pair_sampled_actions, \ modified_inputs_level2, \ selected_actions_format_level2 = self.action_selector.select_action({"policies":out_level2}, avail_actions=pairwise_avail_actions.data, tformat=tformat_level2, test_mode=test_mode) # if th.sum(pair_sampled_actions == 26.0) > 0.0: # a = 5 if sampled_pair_ids.shape[_tdim(tformat_level1)] > 1: # only used for mackrl sampling sampled_pairs = th.cat([ self.magic_map[sampled_pair_ids[:,:,_t:_t+1,:].long()].squeeze(2) for _t in range(sampled_pair_ids.shape[_tdim(tformat_level1)]) ], dim=_tdim(tformat_level1)) else: sampled_pairs = self.magic_map[sampled_pair_ids.long()].squeeze(2) self.actions_level2 = pair_sampled_actions.clone() if self.is_obs_noise(test_mode): self.actions_level2_sampled = [] for _aid in range(self.n_agents): self.actions_level2_sampled.append([]) for i in range(sampled_pairs.shape[-1]): self.actions_level2_sampled[_aid].append( pair_sampled_actions[:, i].gather(0, sampled_pairs[_aid:_aid+1, :, :, i:i + 1].long())) self.actions_level2_sampled[_aid] = th.cat(self.actions_level2_sampled[_aid], 0) else: # ToDO: Gather across all selected pairs!! self.actions_level2_sampled = [] for i in range(sampled_pairs.shape[-1]): self.actions_level2_sampled.append(pair_sampled_actions.gather(0, sampled_pairs[:,:,:,i:i+1].long())) self.actions_level2_sampled = th.cat(self.actions_level2_sampled, 0) self.selected_actions_format_level2 = selected_actions_format_level2 self.policies_level2 = modified_inputs_level2.clone() inputs_level3, inputs_level3_tformat = _build_model_inputs(self.input_columns_level3, inputs, to_variable=True, inputs_tformat=tformat, ) action_tensor = None if self.is_obs_noise(test_mode): action_tensor = ttype(self.n_agents, sampled_pairs.shape[_bsdim(tformat)], sampled_pairs.shape[_tdim(tformat)], 1).fill_(float("nan")) for _bid in range(sampled_pairs.shape[_bsdim(tformat)]): # each agent has it's own assumptions about what pair-wise actions were sampled! for _aid in range(self.n_agents): # work out which pair id agent _aid is in (if any) and whether at first or second position partid = None posid = None #for _partid, _part in enumerate(_ordered_2_agent_pairings(self.n_agents)): combid = int(sampled_pair_ids[_aid, _bid, 0, 0].item()) part = list(_ordered_2_agent_pairings(self.n_agents))[combid] for pid, p in enumerate(part): agentids = _pairing_id_2_agent_ids(p, self.n_agents) if agentids[0] == _aid: partid = pid posid = 0 break if agentids[1] == _aid: partid = pid posid = 1 break pass if partid is not None: # ok so what actions did agent _aid finally select? joint_act = self.actions_level2_sampled[_aid][partid,_bid,0,0].item() joint_act_dec = _joint_actions_2_action_pair(int(joint_act), self.n_actions) if joint_act_dec == 11: # DEBUG a = 5 if joint_act_dec != 0: # else delegate action_tensor[_aid,_bid,0,:] = joint_act_dec[posid] else: # decentralized anyway! pass else: action_tensor = ttype(self.n_agents, pair_sampled_actions.shape[_bsdim(tformat)], pair_sampled_actions.shape[_tdim(tformat)], 1).fill_(float("nan")) for i in range(sampled_pairs.shape[-1]): sampled_pair = sampled_pairs[:,:,:,i:i+1] pair_id1, pair_id2 = _pairing_id_2_agent_ids__tensor(sampled_pair, self.n_agents, "a*bs*t*v") # sampled_pair_ids.squeeze(0).squeeze(2).view(-1), self.n_agents) avail_actions1 = inputs_level3["agent_input_level3"]["avail_actions"].gather( _adim(inputs_level3_tformat), Variable(pair_id1.repeat(1, 1, 1, inputs_level3["agent_input_level3"][ "avail_actions"].shape[_vdim(inputs_level3_tformat)]))) avail_actions2 = inputs_level3["agent_input_level3"]["avail_actions"].gather( _adim(inputs_level3_tformat), Variable(pair_id2.repeat(1, 1, 1, inputs_level3["agent_input_level3"][ "avail_actions"].shape[_vdim(inputs_level3_tformat)]))) # selected_level_2_actions = pair_sampled_actions.gather(0, sampled_pair_ids.long()) this_pair_sampled_actions = pair_sampled_actions.gather(0, sampled_pair.long()) actions1, actions2 = _joint_actions_2_action_pair_aa(this_pair_sampled_actions.clone(), self.n_actions, avail_actions1, avail_actions2) # count how often level2 actions are un-available at level 3 # TODO: Verify that 'this_pair_sampled_actions != 0' is the right thing to do!! pair_action_unavail_rate = (th.mean(((actions1 != actions1) & (this_pair_sampled_actions != 0)).float()).item() + th.mean(((actions2 != actions2) & (this_pair_sampled_actions != 0)).float()).item()) / 2.0 if pair_action_unavail_rate != 0.0 and hasattr(self.args, "mackrl_delegate_if_zero_ck") and self.args.mackrl_delegate_if_zero_ck: #assert False, "pair action unavail HAS to be zero in mackrl_delegate_if_zero_ck setting!" self.logging_struct.py_logger.warning("ERROR: pair action unavail HAS to be zero in mackrl_delegate_if_zero_ck setting!") self._add_stat("pair_action_unavail_rate__runner", pair_action_unavail_rate, T_env=T_env, suffix=test_suffix, to_sacred=False) # Now check whether any of the pair_sampled_actions violate individual agent constraints on avail_actions ttype = th.cuda.FloatTensor if self.args.use_cuda else th.FloatTensor action_tensor.scatter_(0, pair_id1, actions1) action_tensor.scatter_(0, pair_id2, actions2) avail_actions_level3 = inputs_level3["agent_input_level3"]["avail_actions"].clone().data self.avail_actions = avail_actions_level3.clone() inputs_level3["agent_input_level3"]["avail_actions"] = Variable(avail_actions_level3, requires_grad=False) out_level3, hidden_states_level3, losses_level3, tformat_level3 = self.model.models["level3_{}".format(0)](inputs_level3["agent_input_level3"], hidden_states=hidden_states["level3"], loss_fn=None, tformat=inputs_level3_tformat, test_mode=test_mode, seq_lens=inputs["agent_input_level3__agent0"].seq_lens, **kwargs) # extract available actions avail_actions_level3 = inputs_level3["agent_input_level3"]["avail_actions"] individual_actions, \ modified_inputs_level3, \ selected_actions_format_level3 = self.action_selector.select_action({"policies":out_level3}, avail_actions=avail_actions_level3.data, tformat=tformat_level3, test_mode=test_mode) self.actions_level3 = individual_actions action_tensor[action_tensor != action_tensor] = individual_actions[action_tensor != action_tensor] # set states beyond episode termination to NaN if self.is_obs_noise(test_mode): action_tensor = _pad_nan(action_tensor, tformat=tformat_level3, seq_lens=inputs["agent_input_level1__agent0"].seq_lens) # DEBUG else: action_tensor = _pad_nan(action_tensor, tformat=tformat_level3, seq_lens=inputs["agent_input_level1"].seq_lens) # DEBUG # l2 = action_tensor.squeeze() # DEBUG if self.args.debug_mode in ["level3_actions_only"]: """ DEBUG MODE: LEVEL3 ACTIONS ONLY Here we just pick actions from level3 - should therefore just correspond to vanilla COMA! """ action_tensor = individual_actions self.final_actions = action_tensor.clone() if th.sum(self.final_actions == 11).item() > 0: # DEBUG a = 5 pass if self.is_obs_noise(test_mode): selected_actions_list = [] selected_actions_list += [dict(name="actions", select_agent_ids=list(range(self.n_agents)), data=self.final_actions)] modified_inputs_list = [] else: #self.actions_level3 = individual_actions.clone() self.selected_actions_format_level3 = selected_actions_format_level3 self.policies_level3 = modified_inputs_level3.clone() self.avail_actions_active = avail_actions_level3.data selected_actions_list = [] for _i in range(_n_agent_pair_samples(self.n_agents) if self.args.n_pair_samples is None else self.args.n_pair_samples): #_n_agent_pair_samples(self.n_agents)): selected_actions_list += [dict(name="actions_level1__sample{}".format(_i), data=self.actions_level1[_i])] for _i in range(_n_agent_pair_samples(self.n_agents)): selected_actions_list += [dict(name="actions_level2__sample{}".format(_i), data=self.actions_level2_sampled[_i])] # TODO: BUG!? selected_actions_list += [dict(name="actions_level2", select_agent_ids=list(range(_n_agent_pairings(self.n_agents))), data=self.actions_level2)] selected_actions_list += [dict(name="actions_level3", select_agent_ids=list(range(self.n_agents)), data=self.actions_level3)] selected_actions_list += [dict(name="actions", select_agent_ids=list(range(self.n_agents)), data=self.final_actions)] modified_inputs_list = [] modified_inputs_list += [dict(name="policies_level1", data=self.policies_level1)] for _i in range(_n_agent_pair_samples(self.n_agents)): modified_inputs_list += [dict(name="policies_level2__sample{}".format(_i), data=self.policies_level2[_i])] modified_inputs_list += [dict(name="policies_level3", select_agent_ids=list(range(self.n_agents)), data=self.policies_level3)] modified_inputs_list += [dict(name="avail_actions_active", select_agent_ids=list(range(self.n_agents)), data=self.avail_actions_active)] modified_inputs_list += [dict(name="avail_actions", select_agent_ids=list(range(self.n_agents)), data=self.avail_actions)] #modified_inputs_list += [dict(name="avail_actions", # select_agent_ids=list(range(self.n_agents)), # data=self.avail_actions)] selected_actions_list += [dict(name="actions_onehot", select_agent_ids=list(range(self.n_agents)), data=_onehot(self.final_actions, rng=(0, self.n_actions)))] hidden_states = dict(level1=hidden_states_level1, level2=hidden_states_level2, level3=hidden_states_level3) return hidden_states, selected_actions_list, modified_inputs_list, self.selected_actions_format pass else: assert False, "Not implemented"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sample(self):\n return self._root.sample()", "def sample(self):", "def sample(tree, i, alpha=0.5, beta=0.5, only_tree=True):\n # for n in tree.nodes():\n # lab = tuple(n)\n # if len(n) == 1:\n # lab = \"(\" + str(list(n)[0]) + \")\"\n # tree.node[n] = {\"color\": \"bla...
[ "0.73472464", "0.6716626", "0.66083133", "0.65131354", "0.64742315", "0.64742315", "0.6471004", "0.64553005", "0.6374327", "0.6284512", "0.62842137", "0.62587696", "0.61892176", "0.61316454", "0.6015631", "0.6002262", "0.5970276", "0.5970129", "0.5945327", "0.5932385", "0.590...
0.0
-1