function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def get_script(self): return self.script.strip()
maaaaz/nmaptocsv
[ 350, 93, 350, 5, 1348413900 ]
def set_service(self, service): self.service = service
maaaaz/nmaptocsv
[ 350, 93, 350, 5, 1348413900 ]
def set_version(self, version): self.version = version
maaaaz/nmaptocsv
[ 350, 93, 350, 5, 1348413900 ]
def set_script(self, script): self.script = script
maaaaz/nmaptocsv
[ 350, 93, 350, 5, 1348413900 ]
def parse(fd): """ Parse the data according to several regexes
maaaaz/nmaptocsv
[ 350, 93, 350, 5, 1348413900 ]
def parse_xml(xml_file): """ Parse the XML file
maaaaz/nmaptocsv
[ 350, 93, 350, 5, 1348413900 ]
def is_format_valid(fmt): """ Check for the supplied custom output format @param fmt : the supplied format
maaaaz/nmaptocsv
[ 350, 93, 350, 5, 1348413900 ]
def formatted_item(host, format_item): """ return the attribute value related to the host
maaaaz/nmaptocsv
[ 350, 93, 350, 5, 1348413900 ]
def repeat_attributes(attribute_list): """ repeat attribute lists to the maximum for the
maaaaz/nmaptocsv
[ 350, 93, 350, 5, 1348413900 ]
def generate_csv(fd, results, options): """ Generate a plain ';' separated csv file with the desired or default attribute format @param fd : output file descriptor, could be a true file or stdout """ if results: spamwriter = csv.writer(fd, delimiter=options.delimiter, quoting=csv.QUOTE_ALL, lineterminator='\n')
maaaaz/nmaptocsv
[ 350, 93, 350, 5, 1348413900 ]
def main(): global parser
maaaaz/nmaptocsv
[ 350, 93, 350, 5, 1348413900 ]
def play_example(cls, dur=5, toprint=True, double=False): """ Execute the documentation example of the object given as an argument. :Args: cls: PyoObject class or string Class reference of the desired object example. If this argument is the string of the full path of an example (as returned by the getPyoExamples() function), it will be executed. dur: float, optional Duration of the example. toprint: boolean, optional If True, the example script will be printed to the console. Defaults to True. double: boolean, optional If True, force the example to run in double precision (64-bit) Defaults to False. """ root_dir = os.path.join(os.path.split(__file__)[0], "manual_example_references") if not os.path.isdir(root_dir): os.mkdir(root_dir) executable = sys.executable if not executable or executable is None: executable = "python3" doc = cls.__doc__.splitlines() filename = cls.__name__ + ".wav" filepath = os.path.join(root_dir, filename) lines = [] store = False for line in doc: if not store: if ">>> s = Server" in line: line = line.replace("Server()", 'Server(audio="offline")') line = line + "\ns.recordOptions(filename=r'{}', dur={})".format(filepath, dur) store = True if store: if line.strip() == "": store = False elif 's.start()' in line: pass else: lines.append(line) if lines == []: print("There is no manual example for %s object." % cls.__name__) return ex_lines = [l.lstrip(" ") for l in lines if ">>>" in l or "..." in l] if hasattr(builtins, "pyo_use_double") or double: ex = "import time\nfrom pyo64 import *\n" else: ex = "import time\nfrom pyo import *\n" for line in ex_lines: if ">>>" in line: line = line.lstrip(">>> ") if "..." in line: line = " " + line.lstrip("... ") ex += line + "\n" ex += "s.start()\ns.shutdown()\n" f = tempfile.NamedTemporaryFile(delete=False) if toprint: f.write(tobytes('print(r"""\n%s\n""")\n' % ex)) f.write(tobytes(ex)) f.close() call([executable, f.name])
belangeo/pyo
[ 1154, 119, 1154, 25, 1440516200 ]
def find_median(arr): # O(n) heapq.heapify(arr) num_elements = len(arr) if num_elements % 2 != 0: return arr[(num_elements + 1)/2 - 1] else: return (arr[num_elements/2 - 1] + arr[num_elements/2 + 1 - 1])/2.0
amitsaha/learning
[ 4, 4, 4, 20, 1413605035 ]
def __init__(self, errors): self.errors = errors Exception.__init__(self, errors)
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def __init__(self): self.messages = []
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def __init__(self): self._destinations = [BufferingDestination()] self._any_added = False self._globalFields = {}
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def send(self, message): """ Deliver a message to all destinations. The passed in message might be mutated. @param message: A message dictionary that can be serialized to JSON. @type message: L{dict} """ message.update(self._globalFields) errors = [] for dest in self._destinations: try: dest(message) except: errors.append(sys.exc_info()) if errors: raise _DestinationsSendError(errors)
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def remove(self, destination): """ Remove an existing destination. @param destination: A destination previously added with C{self.add}. @raises ValueError: If the destination is unknown. """ self._destinations.remove(destination)
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def write(dictionary, serializer=None): """ Write a dictionary to the appropriate destination. @note: This method is thread-safe. @param serializer: Either C{None}, or a L{eliot._validation._MessageSerializer} which can be used to validate this message. @param dictionary: The message to write out. The given dictionary will not be mutated. @type dictionary: C{dict} """
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def _safeUnicodeDictionary(self, dictionary): """ Serialize a dictionary to a unicode string no matter what it contains. The resulting dictionary will loosely follow Python syntax but it is not expected to actually be a lossless encoding in all cases. @param dictionary: A L{dict} to serialize. @return: A L{unicode} string representing the input dictionary as faithfully as can be done without putting in too much effort. """ try: return str( dict( (saferepr(key), saferepr(value)) for (key, value) in dictionary.items() ) ) except: return saferepr(dictionary)
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def exclusively(f): """ Decorate a function to make it thread-safe by serializing invocations using a per-instance lock. """ @wraps(f) def exclusively_f(self, *a, **kw): with self._lock: return f(self, *a, **kw) return exclusively_f
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def __init__(self, encoder=EliotJSONEncoder): """ @param encoder: A JSONEncoder subclass to use when encoding JSON. """ self._lock = Lock() self._encoder = encoder self.reset()
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def flushTracebacks(self, exceptionType): """ Flush all logged tracebacks whose exception is of the given type. This means they are expected tracebacks and should not cause the test to fail. @param exceptionType: A subclass of L{Exception}. @return: C{list} of flushed messages. """ result = [] remaining = [] for message in self.tracebackMessages: if isinstance(message[REASON_FIELD], exceptionType): result.append(message) else: remaining.append(message) self.tracebackMessages = remaining return result
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def write(self, dictionary, serializer=None): """ Add the dictionary to list of messages. """ # Validate copy of the dictionary, to ensure what we store isn't # mutated. try: self._validate_message(dictionary.copy(), serializer) except Exception as e: # Skip irrelevant frames that don't help pinpoint the problem: from . import _output, _message, _action skip_filenames = [_output.__file__, _message.__file__, _action.__file__] for frame in inspect.stack(): if frame[1] not in skip_filenames: break self._failed_validations.append( "{}: {}".format(e, "".join(traceback.format_stack(frame[0]))) ) self.messages.append(dictionary) self.serializers.append(serializer) if serializer is TRACEBACK_MESSAGE._serializer: self.tracebackMessages.append(dictionary)
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def validate(self): """ Validate all written messages. Does minimal validation of types, and for messages with corresponding serializers use those to do additional validation. As a side-effect, the messages are replaced with their serialized contents. @raises TypeError: If a field name is not unicode, or the dictionary fails to serialize to JSON. @raises eliot.ValidationError: If serializer was given and validation failed. """ for dictionary, serializer in zip(self.messages, self.serializers): try: self._validate_message(dictionary, serializer) except (TypeError, ValidationError) as e: # We already figured out which messages failed validation # earlier. This just lets us figure out which exception type to # raise. raise e.__class__("\n\n".join(self._failed_validations))
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def serialize(self): """ Serialize all written messages. This is the Field-based serialization, not JSON. @return: A C{list} of C{dict}, the serialized messages. """ result = [] for dictionary, serializer in zip(self.messages, self.serializers): dictionary = dictionary.copy() serializer.serialize(dictionary) result.append(dictionary) return result
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def reset(self): """ Clear all logged messages. Any logged tracebacks will also be cleared, and will therefore not cause a test failure. This is useful to ensure a logger is in a known state before testing logging of a specific code path. """ self.messages = [] self.serializers = [] self.tracebackMessages = [] self._failed_validations = []
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def __new__(cls, file, encoder=EliotJSONEncoder): if isinstance(file, IOBase) and not file.writable(): raise RuntimeError("Given file {} is not writeable.") unicodeFile = False try: file.write(b"") except TypeError: unicodeFile = True if unicodeFile: # On Python 3 native json module outputs unicode: _dumps = pyjson.dumps _linebreak = "\n" else: _dumps = bytesjson.dumps _linebreak = b"\n" return PClass.__new__( cls, file=file, _dumps=_dumps, _linebreak=_linebreak, encoder=encoder )
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def to_file(output_file, encoder=EliotJSONEncoder): """ Add a destination that writes a JSON message per line to the given file. @param output_file: A file-like object. @param encoder: A JSONEncoder subclass to use when encoding JSON. """ Logger._destinations.add(FileDestination(file=output_file, encoder=encoder))
ScatterHQ/eliot
[ 1026, 64, 1026, 113, 1397056339 ]
def __init__(self): self.con = mysql.connector.connect(**config)
aitoralmeida/eu-elections
[ 4, 1, 4, 2, 1396527867 ]
def insert_users(self,tweet): #id TEXT, screen_name TEXT, total_tweets INT keys = [tweet['user']['id'], tweet['user']['screen_name'],1] try: cursor = self.con.cursor() select = "SELECT id, total_tweets from twitter_users where id="+str(keys[0]) cursor.execute(select) node = cursor.fetchone() if node: total = node[1]+1 update = "UPDATE twitter_users set total_tweets = "+str(total)+" where id = "+str(keys[0]) cursor.execute(update) else: insert = "INSERT INTO twitter_users(id, screen_name, total_tweets) VALUES (" + str(keys[0]) + ",'" + keys[1] + "', 1)" cursor.execute(insert) except Exception, e: print "DB Error - insert_user: ", e
aitoralmeida/eu-elections
[ 4, 1, 4, 2, 1396527867 ]
def insert_mention(self,tweet): #user_id INT, target_id INT, day DATE, weight INT replies = tweet['in_reply_to_user_id'] replies_screen_name = tweet['in_reply_to_screen_name'] date= time.strftime('%Y-%m-%d', time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y')) if replies: keys = [tweet['user']['id'], replies, date, 1] try: cursor = self.con.cursor() cursor.execute("SELECT * from interactions where (user_id = '"+str(tweet['user']['id'])+"' AND target_id = '"+str(replies)+"' AND day = '"+str(date)+"')") node = cursor.fetchone() if node: total = node[3]+1 cursor.execute("UPDATE interactions set weight = '"+str(total)+"' WHERE (user_id = '"+str(tweet['user']['id'])+"' AND target_id = '"+str(replies)+"' AND day = '"+str(date)+"')") else: insert = "INSERT INTO interactions(user_id, target_id, day, weight) VALUES ('"+str(keys[0])+"','"+str(keys[1])+"','"+str(keys[2])+"','"+str(keys[3])+"') " cursor.execute(insert) except Exception, e: print "DB Error - insert_mention: ", e try: cursor = self.con.cursor() select = "SELECT id from twitter_users WHERE id="+str(replies)+";" print select cursor.execute(select) node = cursor.fetchone() if node: print node else: insert = "INSERT INTO twitter_users(id, screen_name, total_tweets) VALUES (" + str(replies) + ",'" + replies_screen_name + "', 1)" cursor.execute(insert) print "added" ################ except Exception, e: print "DB Error - insert_mentionAA: ", e
aitoralmeida/eu-elections
[ 4, 1, 4, 2, 1396527867 ]
def insert_language_candidate(self,tweet): #lang TEXT, candidate_id INT, total INT keys = [tweet['lang'], 44101578, 1] try: cursor = self.con.cursor() cursor.execute("SELECT total from language_candidate WHERE ( lang='"+tweet['lang']+"' AND candidate_id ='"+str(44101578)+"')") node = cursor.fetchone() if node: total = node[0]+1 cursor.execute("UPDATE language_candidate set total = "+str(total)+" WHERE ( lang='"+tweet['lang']+"' AND candidate_id ='"+str(44101578)+"')") else: cursor.execute("INSERT INTO language_candidate(lang,candidate_id,total) VALUES ('"+keys[0]+"','"+str(keys[1])+"','"+str(keys[2])+"')") except Exception, e: print "DB Error - language_candidate: ", e
aitoralmeida/eu-elections
[ 4, 1, 4, 2, 1396527867 ]
def insert_hash_group(self,tweet): #text TEXT, group_id TEXT, day DATE, total INT hashtags = tweet['entities']['hashtags'] date= time.strftime('%Y-%m-%d', time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y')) for h in hashtags: hashtag = h['text'] try: cursor = self.con.cursor() cursor.execute("SELECT text, total from hash_group WHERE ( text='"+hashtag+"' AND group_id = 'ALDE' AND day = '"+str(date)+"')") node = cursor.fetchone() if node: total = node[1]+1 cursor.execute("UPDATE hash_group set total = "+str(total)+" WHERE ( text='"+hashtag+"' AND group_id = 'ALDE' AND day = '"+str(date)+"')") else: insert = "INSERT INTO hash_group(text, group_id, day, total) VALUES ('"+hashtag+"','"+"ALDE"+"','"+str(date)+"','"+str(1)+"' )" cursor.execute(insert) except Exception, e: print "DB Error - insert_hash_group: ", e
aitoralmeida/eu-elections
[ 4, 1, 4, 2, 1396527867 ]
def capitalizeFirst(word): """ Capitalizes the first letter of a string. """ return word[0].upper() + word[1:]
datacommonsorg/tools
[ 5, 20, 5, 34, 1590014482 ]
def _create_naics_map(): """ Downloads all NAICS codes across long and short form codes. """ # Read in list of industry topics. naics_codes = pd.read_excel( "https://www.census.gov/eos/www/naics/2017NAICS/2-6%20digit_2017_Codes.xlsx" ) naics_codes = naics_codes.iloc[:, [1, 2]] naics_codes.columns = ['NAICSCode', 'Title'] # Replace all ranges with individual rows. E.g. 31-33 -> 31, 32, 33. def range_to_array(read_code): if isinstance(read_code, str) and "-" in read_code: lower, upper = read_code.split("-") return list(range(int(lower), int(upper) + 1)) return read_code naics_codes = naics_codes.dropna() naics_codes['NAICSCode'] = naics_codes['NAICSCode'].apply(range_to_array) naics_codes = naics_codes.explode('NAICSCode') # Add unclassified code which is used in some statistical variables. naics_codes = naics_codes.append( { "NAICSCode": 99, "Title": "Nonclassifiable" }, ignore_index=True) # Query for only two digit codes. short_codes = naics_codes[naics_codes['NAICSCode'] < 100] short_codes = short_codes.set_index("NAICSCode") short_codes = short_codes['Title'].to_dict() # Read in overview codes. overview_codes = pd.read_csv( "https://data.bls.gov/cew/doc/titles/industry/high_level_industries.csv" ) overview_codes.columns = ["NAICSCode", "Title"] overview_codes = overview_codes.set_index("NAICSCode") overview_codes = overview_codes['Title'].to_dict() # Combine the two sources of codes. NAICS_MAP = {} combined_codes = short_codes combined_codes.update(overview_codes) # Rename industries into Pascal case. for code, orig_name in combined_codes.items(): NAICS_MAP[str(code)] = standard_name_remapper(orig_name) # Other edge cases. NAICS_MAP['00'] = 'Unclassified' return NAICS_MAP
datacommonsorg/tools
[ 5, 20, 5, 34, 1590014482 ]
def test_push_file(self): driver = android_w3c_driver() httpretty.register_uri( httpretty.POST, appium_command('/session/1234567890/appium/device/push_file'), ) dest_path = '/path/to/file.txt' data = base64.b64encode(bytes('HelloWorld', 'utf-8')).decode('utf-8') assert isinstance(driver.push_file(dest_path, data), WebDriver) d = get_httpretty_request_body(httpretty.last_request()) assert d['path'] == dest_path assert d['data'] == str(data)
appium/python-client
[ 1415, 523, 1415, 30, 1396890095 ]
def test_push_file_invalid_arg_exception_without_src_path_and_base64data(self): driver = android_w3c_driver() httpretty.register_uri( httpretty.POST, appium_command('/session/1234567890/appium/device/push_file'), ) dest_path = '/path/to/file.txt' with pytest.raises(InvalidArgumentException): driver.push_file(dest_path)
appium/python-client
[ 1415, 523, 1415, 30, 1396890095 ]
def test_push_file_invalid_arg_exception_with_src_file_not_found(self): driver = android_w3c_driver() httpretty.register_uri( httpretty.POST, appium_command('/session/1234567890/appium/device/push_file'), ) dest_path = '/dest_path/to/file.txt' src_path = '/src_path/to/file.txt' with pytest.raises(InvalidArgumentException): driver.push_file(dest_path, source_path=src_path)
appium/python-client
[ 1415, 523, 1415, 30, 1396890095 ]
def test_pull_file(self): driver = android_w3c_driver() httpretty.register_uri( httpretty.POST, appium_command('/session/1234567890/appium/device/pull_file'), body='{"value": "SGVsbG9Xb3JsZA=="}', ) dest_path = '/path/to/file.txt' assert driver.pull_file(dest_path) == str(base64.b64encode(bytes('HelloWorld', 'utf-8')).decode('utf-8')) d = get_httpretty_request_body(httpretty.last_request()) assert d['path'] == dest_path
appium/python-client
[ 1415, 523, 1415, 30, 1396890095 ]
def rescore_all(workdir, nbestdir, config): for tsk in ['nbestlist_{}_{}'.format(a, b) for a in ['dt05', 'et05'] for b in ['real', 'simu']]: print('process ' + tsk) nbest_txt = nbestdir + tsk + '/words_text' outdir = workdir + nbestdir.split('/')[-2] + '/' + tsk + '/' wb.mkdir(outdir) write_lmscore = outdir + 'lmwt.lstm' lstm.rescore(workdir, nbest_txt, write_lmscore, config)
wbengine/SPMILM
[ 18, 10, 18, 1, 1470972882 ]
def __init__(self, host_dir, conf, image='bgperf/quagga'): super(Quagga, self).__init__(self.CONTAINER_NAME, image, host_dir, self.GUEST_DIR, conf)
osrg/bgperf
[ 76, 30, 76, 5, 1450693323 ]
def build_image(cls, force=False, tag='bgperf/quagga', checkout='HEAD', nocache=False): cls.dockerfile = '''
osrg/bgperf
[ 76, 30, 76, 5, 1450693323 ]
def write_config(self, scenario_global_conf): config = """hostname bgpd
osrg/bgperf
[ 76, 30, 76, 5, 1450693323 ]
def gen_neighbor_config(n): local_addr = n['local-address'] c = """neighbor {0} remote-as {1}
osrg/bgperf
[ 76, 30, 76, 5, 1450693323 ]
def _BuildConfig(self, additional_config_str=''): """Builds a metrics config.""" config = motion_metrics_pb2.MotionMetricsConfig() config_text = """ track_steps_per_second: 10 prediction_steps_per_second: 10 track_history_samples: 0 track_future_samples: 4 step_configurations { measurement_step: 3 lateral_miss_threshold: 1.0 longitudinal_miss_threshold: 2.0 } max_predictions: 6 speed_scale_lower: 1.0 speed_scale_upper: 1.0 speed_lower_bound: 1.4 speed_upper_bound: 11.0 """ + additional_config_str text_format.Parse(config_text, config) return config
waymo-research/waymo-open-dataset
[ 2097, 494, 2097, 229, 1560442662 ]
def setUp(self): super(MotionMetricsOpsTest, self).setUp() self._config = self._BuildConfig() self._gt = self._CreateTestScenario()
waymo-research/waymo-open-dataset
[ 2097, 494, 2097, 229, 1560442662 ]
def testComputeMissRateNoMisses(self): pred_score = np.reshape([0.5], (1, 1, 1)) pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]], [[-2, 0], [-3, 0], [-4, 0], [-5, 0]]], (1, 1, 1, 2, 4, 2)) val = self._RunEval(pred_score, pred_trajectory) # miss_rate of Vehicle. self.assertEqual(val[2][0], 0.0) # mean_ap of Vehicle. self.assertEqual(val[4][0], 1.0)
waymo-research/waymo-open-dataset
[ 2097, 494, 2097, 229, 1560442662 ]
def testComputeMissRateLateral_2(self): pred_score = np.reshape([0.5], (1, 1, 1)) pred_trajectory = np.reshape( [[[4, 4], [6, 6], [8, 8], [10, 10]], [[-2, 1.01], [-3, 1.01], [-4, 1.01], [-5, 1.01]]], (1, 1, 1, 2, 4, 2)) val = self._RunEval(pred_score, pred_trajectory) # miss_rate of Vehicle. self.assertEqual(val[2][0], 1.0) # mean_ap of Vehicle. self.assertEqual(val[4][0], 0.0)
waymo-research/waymo-open-dataset
[ 2097, 494, 2097, 229, 1560442662 ]
def testComputeMissRateLongitudinal_2(self): pred_score = np.reshape([0.5], (1, 1, 1)) pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]], [[-2, 0], [-3, 0], [-4, 0], [-7.01, 0]]], (1, 1, 1, 2, 4, 2)) val = self._RunEval(pred_score, pred_trajectory) # miss_rate of Vehicle. self.assertEqual(val[2][0], 1.0) # mean_ap of Vehicle. self.assertEqual(val[4][0], 0.0)
waymo-research/waymo-open-dataset
[ 2097, 494, 2097, 229, 1560442662 ]
def testComputeNoMissLongitudinal_1(self): pred_score = np.reshape([0.5], (1, 1, 1)) pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [11.414, 11.414]], [[-2, 0], [-3, 0], [-4, 0], [-5, 0]]], (1, 1, 1, 2, 4, 2)) val = self._RunEval(pred_score, pred_trajectory) # miss_rate of Vehicle. self.assertEqual(val[2][0], 0.0) # mean_ap of Vehicle. self.assertEqual(val[4][0], 1.0)
waymo-research/waymo-open-dataset
[ 2097, 494, 2097, 229, 1560442662 ]
def testComputeVelocityScalingLongitudinal(self): pred_score = np.reshape([0.5], (1, 1, 1)) pred_trajectory = np.reshape([[[4, 4], [6, 6], [8, 8], [10, 10]], [[-2, 0], [-3, 0], [-4, 0], [-6.5, 0]]], (1, 1, 1, 2, 4, 2)) config = motion_metrics_pb2.MotionMetricsConfig() config.CopyFrom(self._config) config.speed_scale_lower = 0.5 config.speed_scale_upper = 1.0 config.speed_lower_bound = 1.0 config.speed_upper_bound = 3.0 val = self._RunEval(pred_score, pred_trajectory, config=config) # miss_rate of Vehicle. self.assertEqual(val[2][0], 0.0) # mean_ap of Vehicle. self.assertEqual(val[4][0], 1.0) # Decrease the velocity below the speed lower bound. gt = copy.deepcopy(self._gt) gt['gt_trajectory'][0, 1, :, 5:7] = 0.0 val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt) # miss_rate of Vehicle. self.assertEqual(val[2][0], 1.0) # Set the velocity to just below the speed required for object2 to fit. gt = copy.deepcopy(self._gt) gt['gt_trajectory'][0, 1, :, 5] = 1.999 val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt) # miss_rate of Vehicle. self.assertEqual(val[2][0], 1.0) # Set the velocity to just above the speed required for object2 to fit. gt = copy.deepcopy(self._gt) gt['gt_trajectory'][0, 1, :, 5] = 2.001 val = self._RunEval(pred_score, pred_trajectory, config=config, gt=gt) # miss_rate of Vehicle. self.assertEqual(val[2][0], 0.0)
waymo-research/waymo-open-dataset
[ 2097, 494, 2097, 229, 1560442662 ]
def testTwoJointPredictionsNoMiss(self): pred_score = np.reshape([0.8, 0.5], (1, 1, 2)) pred_trajectory = np.reshape([[[[4, 4], [6, 6], [8, 8], [10, 10]], [[-2, 0], [-3, 0], [-4, 0], [-7.01, 0]]], [[[4, 4], [6, 6], [8, 8], [10, 10]], [[-2, 0], [-3, 0], [-4, 0], [-5, 0]]]], (1, 1, 2, 2, 4, 2)) val = self._RunEval(pred_score, pred_trajectory) # miss_rate of Vehicle. self.assertEqual(val[2][0], 0.0) # mean_ap of Vehicle. self.assertEqual(val[4][0], 0.5)
waymo-research/waymo-open-dataset
[ 2097, 494, 2097, 229, 1560442662 ]
def testComputeMinADE(self): pred_score = np.reshape([0.5, 0.5], (1, 1, 2)) pred_trajectory = np.reshape( [[[[4, 0], [6, 0], [8, 0], [10, 0]], [[0, 2], [0, 3], [0, 4], [0, 5]]], [[[14, 0], [16, 0], [18, 0], [20, 0]], [[0, 22], [0, 23], [0, 24], [0, 25]]]], (1, 1, 2, 2, 4, 2)) val = self._RunEval(pred_score, pred_trajectory) # 5 metrics. self.assertEqual(len(val), 5) # 3 steps. self.assertEqual(len(val[0]), 3) # ADE of Vehicle. self.assertAlmostEqual(val[0][0], 5.97487, delta=1e-4) # FDE of Vehicle. self.assertAlmostEqual(val[1][0], 8.53553, delta=1e-4)
waymo-research/waymo-open-dataset
[ 2097, 494, 2097, 229, 1560442662 ]
def get_admin_user(): return { 'username': 'admin', 'password': 'admin', 'role': ADMIN_ROLE }
cloudify-cosmo/cloudify-manager
[ 135, 77, 135, 11, 1396350407 ]
def get_test_users(): test_users = [ { 'username': 'alice', 'password': 'alice_password', 'role': ADMIN_ROLE }, { 'username': 'bob', 'password': 'bob_password', 'role': USER_ROLE }, { 'username': 'clair', 'password': 'clair_password', 'role': USER_ROLE, 'active': False }, { 'username': 'dave', 'password': 'dave_password', 'role': USER_ROLE } ] return test_users
cloudify-cosmo/cloudify-manager
[ 135, 77, 135, 11, 1396350407 ]
def test_reference_to_providers_from_core(self): for filename in glob.glob(f"{ROOT_FOLDER}/example_dags/**/*.py", recursive=True): self.assert_file_not_contains(filename, "providers")
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def assert_file_not_contains(self, filename: str, pattern: str): with open(filename, 'rb', 0) as file, mmap.mmap(file.fileno(), 0, access=mmap.ACCESS_READ) as content: if content.find(bytes(pattern, 'utf-8')) != -1: self.fail(f"File {filename} not contains pattern - {pattern}")
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def test_providers_modules_should_have_tests(self): """ Assert every module in /airflow/providers has a corresponding test_ file in tests/airflow/providers. """ # Deprecated modules that don't have corresponded test expected_missing_providers_modules = { ( 'airflow/providers/amazon/aws/hooks/aws_dynamodb.py', 'tests/providers/amazon/aws/hooks/test_aws_dynamodb.py', ) } # TODO: Should we extend this test to cover other directories? modules_files = glob.glob(f"{ROOT_FOLDER}/airflow/providers/**/*.py", recursive=True) # Make path relative modules_files = (os.path.relpath(f, ROOT_FOLDER) for f in modules_files) # Exclude example_dags modules_files = (f for f in modules_files if "/example_dags/" not in f) # Exclude __init__.py modules_files = (f for f in modules_files if not f.endswith("__init__.py")) # Change airflow/ to tests/ expected_test_files = ( f'tests/{f.partition("/")[2]}' for f in modules_files if not f.endswith("__init__.py") ) # Add test_ prefix to filename expected_test_files = ( f'{f.rpartition("/")[0]}/test_{f.rpartition("/")[2]}' for f in expected_test_files if not f.endswith("__init__.py") ) current_test_files = glob.glob(f"{ROOT_FOLDER}/tests/providers/**/*.py", recursive=True) # Make path relative current_test_files = (os.path.relpath(f, ROOT_FOLDER) for f in current_test_files) # Exclude __init__.py current_test_files = (f for f in current_test_files if not f.endswith("__init__.py")) modules_files = set(modules_files) expected_test_files = set(expected_test_files) current_test_files = set(current_test_files) missing_tests_files = expected_test_files - expected_test_files.intersection(current_test_files) with self.subTest("Detect missing tests in providers module"): expected_missing_test_modules = {pair[1] for pair in expected_missing_providers_modules} missing_tests_files = missing_tests_files - set(expected_missing_test_modules) assert set() == missing_tests_files with self.subTest("Verify removed deprecated module also removed from deprecated list"): expected_missing_modules = {pair[0] for pair in expected_missing_providers_modules} removed_deprecated_module = expected_missing_modules - modules_files if removed_deprecated_module: self.fail( "You've removed a deprecated module:\n" f"{removed_deprecated_module}" "\n" "Thank you very much.\n" "Can you remove it from the list of expected missing modules tests, please?" )
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def filepath_to_module(filepath: str): filepath = os.path.relpath(os.path.abspath(filepath), ROOT_FOLDER) return filepath.replace("/", ".")[: -(len('.py'))]
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def test_example_dags(self): operators_modules = itertools.chain( *(self.find_resource_files(resource_type=d) for d in ["operators", "sensors", "transfers"]) ) example_dags_files = self.find_resource_files(resource_type="example_dags") # Generate tuple of department and service e.g. ('marketing_platform', 'display_video') operator_sets = [(f.split("/")[-3], f.split("/")[-1].rsplit(".")[0]) for f in operators_modules] example_sets = [ (f.split("/")[-3], f.split("/")[-1].rsplit(".")[0].replace("example_", "", 1)) for f in example_dags_files ] def has_example_dag(operator_set): for e in example_sets: if e[0] != operator_set[0]: continue if e[1].startswith(operator_set[1]): return True return False with self.subTest("Detect missing example dags"): missing_example = {s for s in operator_sets if not has_example_dag(s)} missing_example -= self.MISSING_EXAMPLE_DAGS assert set() == missing_example with self.subTest("Keep update missing example dags list"): new_example_dag = set(example_sets).intersection(set(self.MISSING_EXAMPLE_DAGS)) if new_example_dag: new_example_dag_text = '\n'.join(str(f) for f in new_example_dag) self.fail( "You've added a example dag currently listed as missing:\n" f"{new_example_dag_text}" "\n" "Thank you very much.\n" "Can you remove it from the list of missing example, please?" ) with self.subTest("Remove extra elements"): extra_example_dags = set(self.MISSING_EXAMPLE_DAGS) - set(operator_sets) if extra_example_dags: new_example_dag_text = '\n'.join(str(f) for f in extra_example_dags) self.fail( "You've added a example dag currently listed as missing:\n" f"{new_example_dag_text}" "\n" "Thank you very much.\n" "Can you remove it from the list of missing example, please?" )
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def test_detect_invalid_system_tests(self, resource_type, filename_suffix): operators_tests = self.find_resource_files(top_level_directory="tests", resource_type=resource_type) operators_files = self.find_resource_files(top_level_directory="airflow", resource_type=resource_type) files = {f for f in operators_tests if f.endswith(filename_suffix)} expected_files = (f"tests/{f[8:]}" for f in operators_files) expected_files = (f.replace(".py", filename_suffix).replace("/test_", "/") for f in expected_files) expected_files = {f'{f.rpartition("/")[0]}/test_{f.rpartition("/")[2]}' for f in expected_files} assert set() == files - expected_files
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def find_resource_files( top_level_directory: str = "airflow", department: str = "*", resource_type: str = "*", service: str = "*",
apache/incubator-airflow
[ 29418, 12032, 29418, 869, 1428948298 ]
def get_equivalent_k(k, supercell_size): return itertools.product( *[ (np.linspace(0, 1, s, endpoint=False) + ki / s) for ki, s in zip(k, supercell_size) ] )
Z2PackDev/TBmodels
[ 32, 25, 32, 10, 1440401352 ]
def test_supercell_simple(get_model, t_values, supercell_size, sparse): """ Test that the eigenvalues from a supercell model match the folded eigenvalues of the base model, for a simple model. """ model = get_model(*t_values, sparse=sparse) supercell_model = model.supercell(size=supercell_size) for k in KPT: ev_supercell = supercell_model.eigenval(k) equivalent_k = get_equivalent_k(k, supercell_size) ev_folded = np.sort( np.array([model.eigenval(kval) for kval in equivalent_k]).flatten() ) assert ev_supercell.shape == ev_folded.shape assert_allclose(ev_supercell, ev_folded, atol=1e-7)
Z2PackDev/TBmodels
[ 32, 25, 32, 10, 1440401352 ]
def test_supercell_simple_2d(get_model, t_values, supercell_size): """ Test that the eigenvalues from a supercell model match the folded eigenvalues of the base model, for a simple model. """ model = get_model(*t_values, dim=2) supercell_model = model.supercell(size=supercell_size) for k in [(-0.12341, 0.92435), (0, 0), (0.65432, -0.1561)]: ev_supercell = supercell_model.eigenval(k) equivalent_k = get_equivalent_k(k, supercell_size) ev_folded = np.sort( np.array([model.eigenval(kval) for kval in equivalent_k]).flatten() ) assert ev_supercell.shape == ev_folded.shape assert_allclose(ev_supercell, ev_folded, atol=1e-7)
Z2PackDev/TBmodels
[ 32, 25, 32, 10, 1440401352 ]
def test_supercell_simple_4d(get_model, t_values, supercell_size): """ Test that the eigenvalues from a supercell model match the folded eigenvalues of the base model, for a simple model. """ model = get_model(*t_values, dim=4) supercell_model = model.supercell(size=supercell_size) for k in [ (-0.12341, 0.92435, 0.32, 0.1212), (0, 0, 0, 0), (0.65432, -0.1561, 0.2352346, -0.92345), ]: ev_supercell = supercell_model.eigenval(k) equivalent_k = get_equivalent_k(k, supercell_size) ev_folded = np.sort( np.array([model.eigenval(kval) for kval in equivalent_k]).flatten() ) assert ev_supercell.shape == ev_folded.shape assert_allclose(ev_supercell, ev_folded, atol=1e-7)
Z2PackDev/TBmodels
[ 32, 25, 32, 10, 1440401352 ]
def test_supercell_inas(sample, supercell_size): """ Test that the eigenvalues from a supercell model match the folded eigenvalues of the base model, for the realistic InAs model. """ model = tbmodels.io.load(sample("InAs_nosym.hdf5")) supercell_model = model.supercell(size=supercell_size) for k in [(-0.4, 0.1, 0.45), (0, 0, 0), (0.41126, -0.153112, 0.2534)]: ev_supercell = supercell_model.eigenval(k) equivalent_k = get_equivalent_k(k, supercell_size) ev_folded = np.sort( np.array([model.eigenval(kval) for kval in equivalent_k]).flatten() ) assert ev_supercell.shape == ev_folded.shape assert_allclose(ev_supercell, ev_folded, atol=1e-7)
Z2PackDev/TBmodels
[ 32, 25, 32, 10, 1440401352 ]
def data_masking(self, cluster_name, db_name, sql, sql_result): result = {'status': 0, 'msg': 'ok', 'data': []} # 通过inception获取语法树,并进行解析 try: print_info = self.query_tree(sql, cluster_name, db_name) except Exception as msg: result['status'] = 1 result['msg'] = str(msg) return result if print_info is None: result['status'] = 1 result['msg'] = 'inception返回的结果集为空!可能是SQL语句有语法错误,无法完成脱敏校验,如果需要继续查询请关闭校验' elif print_info['errlevel'] != 0: result['status'] = 2 result['msg'] = 'inception返回异常,无法完成脱敏校验,如果需要继续查询请关闭校验:\n' + print_info['errmsg'] else: query_tree = print_info['query_tree'] # 获取命中脱敏规则的列数据 try: table_hit_columns, hit_columns = self.analy_query_tree(query_tree, cluster_name) except Exception as msg: result['status'] = 2 result['msg'] = '解析inception语法树获取表信息出错,无法完成脱敏校验,如果需要继续查询请关闭校验:{}\nquery_tree:{}'.format(str(msg), print_info) return result # 存在select * 的查询,遍历column_list,获取命中列的index,添加到hit_columns if table_hit_columns and sql_result.get('rows'): column_list = sql_result['column_list'] table_hit_column = {} for column_info in table_hit_columns: table_hit_column_info = {} rule_type = column_info['rule_type'] table_hit_column_info[column_info['column_name']] = rule_type table_hit_column.update(table_hit_column_info) for index, item in enumerate(column_list): if item in table_hit_column.keys(): column = {} column['column_name'] = item column['index'] = index column['rule_type'] = table_hit_column.get(item) hit_columns.append(column) # 对命中规则列hit_columns的数据进行脱敏 # 获取全部脱敏规则信息,减少循环查询,提升效率 DataMaskingRulesOb = DataMaskingRules.objects.all() if hit_columns and sql_result.get('rows'): rows = list(sql_result['rows']) for column in hit_columns: index = column['index'] for idx, item in enumerate(rows): rows[idx] = list(item) rows[idx][index] = self.regex(DataMaskingRulesOb, column['rule_type'], rows[idx][index]) sql_result['rows'] = rows return result
jly8866/archer
[ 1509, 649, 1509, 34, 1480664527 ]
def query_tree(self, sqlContent, cluster_name, dbName): try: print_info = inceptionDao.query_print(sqlContent, cluster_name, dbName) except Exception as e: raise Exception('通过inception获取语法树异常,请检查inception配置,并确保inception可以访问实例:' + str(e)) if print_info: id = print_info[0][0] statement = print_info[0][1] # 返回值为非0的情况下,说明是有错的,1表示警告,不影响执行,2表示严重错误,必须修改 errlevel = print_info[0][2] query_tree = print_info[0][3] errmsg = print_info[0][4] # 提交给inception语法错误的情况 if errmsg == 'Global environment': errlevel = 2 errmsg = 'Global environment: ' + query_tree if errlevel == 0: pass # print(json.dumps(json.loads(query_tree), indent=4, sort_keys=False, ensure_ascii=False)) return {'id': id, 'statement': statement, 'errlevel': errlevel, 'query_tree': query_tree, 'errmsg': errmsg} else: return None
jly8866/archer
[ 1509, 649, 1509, 34, 1480664527 ]
def query_table_ref(self, sqlContent, cluster_name, dbName): result = {'status': 0, 'msg': 'ok', 'data': []} try: print_info = self.query_tree(sqlContent, cluster_name, dbName) except Exception as msg: result['status'] = 1 result['msg'] = str(msg) return result if print_info is None: result['status'] = 1 result['msg'] = 'inception返回的结果集为空!可能是SQL语句有语法错误,无法校验表权限,如果需要继续查询请关闭校验' elif print_info['errlevel'] != 0: result['status'] = 2 result['msg'] = 'inception返回异常,无法校验表权限,如果需要继续查询请关闭校验:\n' + print_info['errmsg'] else: try: table_ref = json.loads(print_info['query_tree'])['table_ref'] except Exception: try: table_ref = json.loads(print_info['query_tree'])['table_ref'] except JSONDecodeError: try: table_ref = json.loads(repair_json_str(print_info['query_tree']))['table_ref'] except JSONDecodeError as msg: result['status'] = 2 result['msg'] = '通过inception语法树解析表信息出错,无法校验表权限,如果需要继续查询请关闭校验:{}\nquery_tree:{}'.format(str(msg), print_info) table_ref = '' result['data'] = table_ref return result
jly8866/archer
[ 1509, 649, 1509, 34, 1480664527 ]
def analy_query_tree(self, query_tree, cluster_name): try: query_tree_dict = json.loads(query_tree) except JSONDecodeError: query_tree_dict = json.loads(repair_json_str(query_tree)) select_list = query_tree_dict.get('select_list') table_ref = query_tree_dict.get('table_ref') # 获取全部脱敏字段信息,减少循环查询,提升效率 DataMaskingColumnsOb = DataMaskingColumns.objects.all() # 判断语句涉及的表是否存在脱敏字段配置 is_exist = False for table in table_ref: if DataMaskingColumnsOb.filter(cluster_name=cluster_name, table_schema=table['db'], table_name=table['table'], active=1).exists(): is_exist = True # 不存在脱敏字段则直接跳过规则解析 if is_exist: # 遍历select_list columns = [] hit_columns = [] # 命中列 table_hit_columns = [] # 涉及表命中的列,仅select *需要 # 判断是否存在不支持脱敏的语法 for select_item in select_list: if select_item['type'] not in ('FIELD_ITEM', 'aggregate'): raise Exception('不支持该查询语句脱敏!') if select_item['type'] == 'aggregate': if select_item['aggregate'].get('type') not in ('FIELD_ITEM', 'INT_ITEM'): raise Exception('不支持该查询语句脱敏!') # 获取select信息的规则,仅处理type为FIELD_ITEM和aggregate类型的select信息,如[*],[*,column_a],[column_a,*],[column_a,a.*,column_b],[a.*,column_a,b.*], select_index = [ select_item['field'] if select_item['type'] == 'FIELD_ITEM' else select_item['aggregate'].get('field') for select_item in select_list if select_item['type'] in ('FIELD_ITEM', 'aggregate')] # 处理select_list,为统一的{'type': 'FIELD_ITEM', 'db': 'archer_master', 'table': 'sql_users', 'field': 'email'}格式 select_list = [select_item if select_item['type'] == 'FIELD_ITEM' else select_item['aggregate'] for select_item in select_list if select_item['type'] in ('FIELD_ITEM', 'aggregate')] if select_index: # 如果发现存在field='*',则遍历所有表,找出所有的命中字段 if '*' in select_index: # 涉及表命中的列 for table in table_ref: hit_columns_info = self.hit_table(DataMaskingColumnsOb, cluster_name, table['db'], table['table']) table_hit_columns.extend(hit_columns_info) # 几种不同查询格式 # [*] if re.match(r"^(\*,?)+$", ','.join(select_index)): hit_columns = [] # [*,column_a] elif re.match(r"^(\*,)+(\w,?)+$", ','.join(select_index)): # 找出field不为* 的列信息, 循环判断列是否命中脱敏规则,并增加规则类型和index,index采取后切片 for index, item in enumerate(select_list): item['index'] = index - len(select_list) if item.get('field') != '*': columns.append(item) # [column_a, *] elif re.match(r"^(\w,?)+(\*,?)+$", ','.join(select_index)): # 找出field不为* 的列信息, 循环判断列是否命中脱敏规则,并增加规则类型和index,index采取前切片 for index, item in enumerate(select_list): item['index'] = index if item.get('field') != '*': columns.append(item) # [column_a,a.*,column_b] elif re.match(r"^(\w,?)+(\*,?)+(\w,?)+$", ','.join(select_index)): # 找出field不为* 的列信息, 循环判断列是否命中脱敏规则,并增加规则类型和index,*前面的字段index采取前切片,*后面的字段采取后切片 for index, item in enumerate(select_list): item['index'] = index if item.get('field') == '*': first_idx = index break select_list.reverse() for index, item in enumerate(select_list): item['index'] = index if item.get('field') == '*': last_idx = len(select_list) - index - 1 break select_list.reverse() for index, item in enumerate(select_list): if item.get('field') != '*' and index < first_idx: item['index'] = index if item.get('field') != '*' and index > last_idx: item['index'] = index - len(select_list) columns.append(item) # [a.*, column_a, b.*] else: raise Exception('不支持select信息为[a.*, column_a, b.*]格式的查询脱敏!') # 没有*的查询,直接遍历查询命中字段,query_tree的列index就是查询语句列的index else: for index, item in enumerate(select_list): item['index'] = index if item.get('field') != '*': columns.append(item) # 格式化命中的列信息 for column in columns: hit_info = self.hit_column(DataMaskingColumnsOb, cluster_name, column.get('db'), column.get('table'), column.get('field')) if hit_info['is_hit']: hit_info['index'] = column['index'] hit_columns.append(hit_info) else: table_hit_columns = None hit_columns = None return table_hit_columns, hit_columns
jly8866/archer
[ 1509, 649, 1509, 34, 1480664527 ]
def hit_column(self, DataMaskingColumnsOb, cluster_name, table_schema, table_name, column_name): column_info = DataMaskingColumnsOb.filter(cluster_name=cluster_name, table_schema=table_schema, table_name=table_name, column_name=column_name, active=1) hit_column_info = {} hit_column_info['cluster_name'] = cluster_name hit_column_info['table_schema'] = table_schema hit_column_info['table_name'] = table_name hit_column_info['column_name'] = column_name hit_column_info['rule_type'] = 0 hit_column_info['is_hit'] = False # 命中规则 if column_info: hit_column_info['rule_type'] = column_info[0].rule_type hit_column_info['is_hit'] = True return hit_column_info
jly8866/archer
[ 1509, 649, 1509, 34, 1480664527 ]
def hit_table(self, DataMaskingColumnsOb, cluster_name, table_schema, table_name): columns_info = DataMaskingColumnsOb.filter(cluster_name=cluster_name, table_schema=table_schema, table_name=table_name, active=1) # 命中规则 hit_columns_info = [] for column in columns_info: hit_column_info = {} hit_column_info['cluster_name'] = cluster_name hit_column_info['table_schema'] = table_schema hit_column_info['table_name'] = table_name hit_column_info['is_hit'] = True hit_column_info['column_name'] = column.column_name hit_column_info['rule_type'] = column.rule_type hit_columns_info.append(hit_column_info) return hit_columns_info
jly8866/archer
[ 1509, 649, 1509, 34, 1480664527 ]
def regex(self, DataMaskingRulesOb, rule_type, value): rules_info = DataMaskingRulesOb.get(rule_type=rule_type) if rules_info: rule_regex = rules_info.rule_regex hide_group = rules_info.hide_group # 正则匹配必须分组,隐藏的组会使用****代替 try: p = re.compile(rule_regex) m = p.search(str(value)) masking_str = '' for i in range(m.lastindex): if i == hide_group - 1: group = '****' else: group = m.group(i + 1) masking_str = masking_str + group return masking_str except Exception: return value else: return value
jly8866/archer
[ 1509, 649, 1509, 34, 1480664527 ]
def foo(*args): print "foo!", args import sys; sys.stdout.flush()
boakley/robotframework-workbench
[ 22, 6, 22, 16, 1332542179 ]
def __init__(self, app): self.app = app pass
boakley/robotframework-workbench
[ 22, 6, 22, 16, 1332542179 ]
def evaluate(step, width, height): return (0.1 + width * step / 100) ** (-1) + height * 0.01
ray-project/ray
[ 24488, 4264, 24488, 2914, 1477424310 ]
def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials info. Args: info (dict): The service account private key info. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: MetadataServiceAsyncClient: The constructed client. """ return MetadataServiceClient.from_service_account_info.__func__(MetadataServiceAsyncClient, info, *args, **kwargs) # type: ignore
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def from_service_account_file(cls, filename: str, *args, **kwargs): """Creates an instance of this client using the provided credentials file. Args: filename (str): The path to the service account private key json file. args: Additional arguments to pass to the constructor. kwargs: Additional arguments to pass to the constructor. Returns: MetadataServiceAsyncClient: The constructed client. """ return MetadataServiceClient.from_service_account_file.__func__(MetadataServiceAsyncClient, filename, *args, **kwargs) # type: ignore
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[ClientOptions] = None
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def transport(self) -> MetadataServiceTransport: """Returns the transport used by the client instance. Returns: MetadataServiceTransport: The transport used by the client instance. """ return self._client.transport
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def __init__( self, *, credentials: ga_credentials.Credentials = None, transport: Union[str, MetadataServiceTransport] = "grpc_asyncio", client_options: ClientOptions = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_create_metadata_store(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.CreateMetadataStoreRequest( parent="parent_value", ) # Make the request operation = client.create_metadata_store(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_get_metadata_store(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.GetMetadataStoreRequest( name="name_value", ) # Make the request response = client.get_metadata_store(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_list_metadata_stores(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.ListMetadataStoresRequest( parent="parent_value", ) # Make the request page_result = client.list_metadata_stores(request=request) # Handle the response for response in page_result: print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_delete_metadata_store(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.DeleteMetadataStoreRequest( name="name_value", ) # Make the request operation = client.delete_metadata_store(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_create_artifact(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.CreateArtifactRequest( parent="parent_value", ) # Make the request response = client.create_artifact(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_get_artifact(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.GetArtifactRequest( name="name_value", ) # Make the request response = client.get_artifact(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_list_artifacts(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.ListArtifactsRequest( parent="parent_value", ) # Make the request page_result = client.list_artifacts(request=request) # Handle the response for response in page_result: print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_update_artifact(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.UpdateArtifactRequest( ) # Make the request response = client.update_artifact(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_delete_artifact(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.DeleteArtifactRequest( name="name_value", ) # Make the request operation = client.delete_artifact(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_purge_artifacts(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.PurgeArtifactsRequest( parent="parent_value", filter="filter_value", ) # Make the request operation = client.purge_artifacts(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_create_context(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.CreateContextRequest( parent="parent_value", ) # Make the request response = client.create_context(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_get_context(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.GetContextRequest( name="name_value", ) # Make the request response = client.get_context(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_list_contexts(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.ListContextsRequest( parent="parent_value", ) # Make the request page_result = client.list_contexts(request=request) # Handle the response for response in page_result: print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_update_context(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.UpdateContextRequest( ) # Make the request response = client.update_context(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_delete_context(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.DeleteContextRequest( name="name_value", ) # Make the request operation = client.delete_context(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_purge_contexts(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.PurgeContextsRequest( parent="parent_value", filter="filter_value", ) # Make the request operation = client.purge_contexts(request=request) print("Waiting for operation to complete...") response = operation.result() # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]
def sample_add_context_artifacts_and_executions(): # Create a client client = aiplatform_v1.MetadataServiceClient() # Initialize request argument(s) request = aiplatform_v1.AddContextArtifactsAndExecutionsRequest( context="context_value", ) # Make the request response = client.add_context_artifacts_and_executions(request=request) # Handle the response print(response)
googleapis/python-aiplatform
[ 306, 205, 306, 52, 1600875819 ]