_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q268600
_exc_to_net
test
def _exc_to_net(param1, success): """ translate http code to net code. if accertion failed, set net code to 314 """ if len(param1) <= 3: # FIXME: we're unable to use better logic here, because we should support non-http codes # but, we should look for core.util.HTTP or some other common logic # here if success: return 0 else: return 314 exc = param1.split(' ')[-1] if exc in KNOWN_EXC.keys(): return KNOWN_EXC[exc] else: logger.warning( "Unknown Java exception, consider adding it to dictionary: %s", param1) return 41
python
{ "resource": "" }
q268601
_exc_to_http
test
def _exc_to_http(param1): """ translate exception str to http code""" if len(param1) <= 3: try: int(param1) except BaseException: logger.error( "JMeter wrote some strange data into codes column: %s", param1) else: return int(param1) exc = param1.split(' ')[-1] if exc in KNOWN_EXC.keys(): return 0 else: logger.warning("Unknown Java exception. %s", param1) return 0
python
{ "resource": "" }
q268602
PhantomConfig.read_config
test
def read_config(self): """ Read phantom tool specific options """ self.threads = self.cfg["threads"] or str(int(multiprocessing.cpu_count() / 2) + 1) self.phantom_modules_path = self.cfg["phantom_modules_path"] self.additional_libs = ' '.join(self.cfg["additional_libs"]) self.answ_log_level = self.cfg["writelog"] if self.answ_log_level.lower() in ['0', 'false']: self.answ_log_level = 'none' elif self.answ_log_level.lower() in ['1', 'true']: self.answ_log_level = 'all' self.timeout = parse_duration(self.cfg["timeout"]) if self.timeout > 120000: logger.warning( "You've set timeout over 2 minutes." " Are you a functional tester?") self.answ_log = self.core.mkstemp(".log", "answ_") self.core.add_artifact_file(self.answ_log) self.core.add_artifact_file(self.phout_file) self.core.add_artifact_file(self.stat_log) self.phantom_log = self.core.mkstemp(".log", "phantom_") self.core.add_artifact_file(self.phantom_log) main_stream = StreamConfig( self.core, len(self.streams), self.phout_file, self.answ_log, self.answ_log_level, self.timeout, self.cfg, True) self.streams.append(main_stream) for section in self.multi(): self.streams.append( StreamConfig( self.core, len(self.streams), self.phout_file, self.answ_log, self.answ_log_level, self.timeout, section)) for stream in self.streams: stream.read_config() if any(stream.ssl for stream in self.streams): self.additional_libs += ' ssl io_benchmark_method_stream_transport_ssl'
python
{ "resource": "" }
q268603
PhantomConfig.compose_config
test
def compose_config(self): """ Generate phantom tool run config """ streams_config = '' stat_benchmarks = '' for stream in self.streams: streams_config += stream.compose_config() if not stream.is_main: stat_benchmarks += " " + "benchmark_io%s" % stream.sequence_no kwargs = {} kwargs['threads'] = self.threads kwargs['phantom_log'] = self.phantom_log kwargs['stat_log'] = self.stat_log kwargs['benchmarks_block'] = streams_config kwargs['stat_benchmarks'] = stat_benchmarks kwargs['additional_libs'] = self.additional_libs kwargs['phantom_modules_path'] = self.phantom_modules_path filename = self.core.mkstemp(".conf", "phantom_") self.core.add_artifact_file(filename) logger.debug("Generating phantom config: %s", filename) template_str = resource_string(__name__, "config/phantom.conf.tpl") tpl = string.Template(template_str) config = tpl.substitute(kwargs) with open(filename, 'w') as conffile: conffile.write(config) return filename
python
{ "resource": "" }
q268604
PhantomConfig.get_info
test
def get_info(self): """ get merged info about phantom conf """ result = copy.copy(self.streams[0]) result.stat_log = self.stat_log result.steps = [] result.ammo_file = '' result.rps_schedule = None result.ammo_count = 0 result.duration = 0 result.instances = 0 result.loadscheme = [] result.loop_count = 0 for stream in self.streams: sec_no = 0 logger.debug("Steps: %s", stream.stepper_wrapper.steps) for item in stream.stepper_wrapper.steps: for x in range(0, item[1]): if len(result.steps) > sec_no: result.steps[sec_no][0] += item[0] else: result.steps.append([item[0], 1]) sec_no += 1 if result.rps_schedule: result.rps_schedule = [] else: result.rps_schedule = stream.stepper_wrapper.loadscheme if result.loadscheme: result.loadscheme = '' else: # FIXME: add formatted load scheme for server: # <step_size,step_type,first_rps,last_rps,original_step_params> # as a string result.loadscheme = '' if result.loop_count: result.loop_count = u'0' else: result.loop_count = stream.stepper_wrapper.loop_count result.ammo_file += '{} '.format(stream.stepper_wrapper.ammo_file) result.ammo_count += stream.stepper_wrapper.ammo_count result.duration = max( result.duration, stream.stepper_wrapper.duration) result.instances += stream.instances if not result.ammo_count: raise ValueError("Total ammo count cannot be zero") return result
python
{ "resource": "" }
q268605
StreamConfig.compose_config
test
def compose_config(self): """ compose benchmark block """ # step file self.stepper_wrapper.prepare_stepper() self.stpd = self.stepper_wrapper.stpd if self.stepper_wrapper.instances: self.instances = self.stepper_wrapper.instances if not self.stpd: raise RuntimeError("Cannot proceed with no STPD file") kwargs = {} kwargs['sequence_no'] = self.sequence_no if self.ssl: _auth_section = '' _ciphers = '' ssl_template = "transport_t ssl_transport = transport_ssl_t {\n" \ " timeout = 1s\n" \ " %s\n" \ " %s}\n" \ " transport = ssl_transport" if self.client_certificate or self.client_key: _auth_section = 'auth_t def_auth = auth_t { key = "%s" cert = "%s"} auth = def_auth' \ % (self.client_key, self.client_certificate) if self.client_cipher_suites: _ciphers = 'ciphers = "%s"' % self.client_cipher_suites kwargs['ssl_transport'] = ssl_template % (_auth_section, _ciphers) else: kwargs['ssl_transport'] = "" kwargs['method_stream'] = self.method_prefix + \ "_ipv6_t" if self.ipv6 else self.method_prefix + "_ipv4_t" kwargs['phout'] = self.phout_file kwargs['answ_log'] = self.answ_log kwargs['answ_log_level'] = self.answ_log_level kwargs['comment_answ'] = "# " if self.answ_log_level == 'none' else '' kwargs['stpd'] = self.stpd kwargs['source_log_prefix'] = self.source_log_prefix kwargs['method_options'] = self.method_options if self.tank_type: kwargs[ 'proto'] = "proto=http_proto%s" % self.sequence_no if self.tank_type == 'http' else "proto=none_proto" kwargs['comment_proto'] = "" else: kwargs['proto'] = "" kwargs['comment_proto'] = "#" if self.gatling: kwargs['bind'] = 'bind={ ' + self.gatling + ' }' else: kwargs['bind'] = '' kwargs['ip'] = self.resolved_ip kwargs['port'] = self.port kwargs['timeout'] = self.timeout kwargs['instances'] = self.instances tune = '' if self.phantom_http_entity: tune += "entity = " + self.phantom_http_entity + "\n" if self.phantom_http_field: tune += "field = " + self.phantom_http_field + "\n" if self.phantom_http_field_num: tune += "field_num = {}\n".format(self.phantom_http_field_num) if self.phantom_http_line: tune += "line = " + self.phantom_http_line + "\n" if tune: kwargs['reply_limits'] = 'reply_limits = {\n' + tune + "}" else: kwargs['reply_limits'] = '' if self.is_main: fname = 'phantom_benchmark_main.tpl' else: fname = 'phantom_benchmark_additional.tpl' template_str = resource_string( __name__, "config/" + fname) tpl = string.Template(template_str) config = tpl.substitute(kwargs) return config
python
{ "resource": "" }
q268606
log_stdout_stderr
test
def log_stdout_stderr(log, stdout, stderr, comment=""): """ This function polls stdout and stderr streams and writes their contents to log """ readable = select.select([stdout], [], [], 0)[0] if stderr: exceptional = select.select([stderr], [], [], 0)[0] else: exceptional = [] log.debug("Selected: %s, %s", readable, exceptional) for handle in readable: line = handle.read() readable.remove(handle) if line: log.debug("%s stdout: %s", comment, line.strip()) for handle in exceptional: line = handle.read() exceptional.remove(handle) if line: log.warn("%s stderr: %s", comment, line.strip())
python
{ "resource": "" }
q268607
expand_time
test
def expand_time(str_time, default_unit='s', multiplier=1): """ helper for above functions """ parser = re.compile(r'(\d+)([a-zA-Z]*)') parts = parser.findall(str_time) result = 0.0 for value, unit in parts: value = int(value) unit = unit.lower() if unit == '': unit = default_unit if unit == 'ms': result += value * 0.001 continue elif unit == 's': result += value continue elif unit == 'm': result += value * 60 continue elif unit == 'h': result += value * 60 * 60 continue elif unit == 'd': result += value * 60 * 60 * 24 continue elif unit == 'w': result += value * 60 * 60 * 24 * 7 continue else: raise ValueError( "String contains unsupported unit %s: %s" % (unit, str_time)) return int(result * multiplier)
python
{ "resource": "" }
q268608
StepperWrapper.read_config
test
def read_config(self): ''' stepper part of reading options ''' self.log.info("Configuring StepperWrapper...") self.ammo_file = self.get_option(self.OPTION_AMMOFILE) self.ammo_type = self.get_option('ammo_type') if self.ammo_file: self.ammo_file = os.path.expanduser(self.ammo_file) self.loop_limit = self.get_option(self.OPTION_LOOP) self.ammo_limit = self.get_option("ammo_limit") self.load_profile = LoadProfile(**self.get_option('load_profile')) self.instances = int( self.get_option(self.OPTION_INSTANCES_LIMIT, '1000')) self.uris = self.get_option("uris", []) while '' in self.uris: self.uris.remove('') self.headers = self.get_option("headers") self.http_ver = self.get_option("header_http") self.autocases = self.get_option("autocases") self.enum_ammo = self.get_option("enum_ammo") self.use_caching = self.get_option("use_caching") self.file_cache = self.get_option('file_cache') cache_dir = self.get_option("cache_dir") or self.core.artifacts_base_dir self.cache_dir = os.path.expanduser(cache_dir) self.force_stepping = self.get_option("force_stepping") if self.get_option(self.OPTION_LOAD)[self.OPTION_LOAD_TYPE] == 'stpd_file': self.stpd = self.get_option(self.OPTION_LOAD)[self.OPTION_SCHEDULE] self.chosen_cases = self.get_option("chosen_cases").split() if self.chosen_cases: self.log.info("chosen_cases LIMITS: %s", self.chosen_cases)
python
{ "resource": "" }
q268609
StepperWrapper.prepare_stepper
test
def prepare_stepper(self): ''' Generate test data if necessary ''' def publish_info(stepper_info): info.status.publish('loadscheme', stepper_info.loadscheme) info.status.publish('loop_count', stepper_info.loop_count) info.status.publish('steps', stepper_info.steps) info.status.publish('duration', stepper_info.duration) info.status.ammo_count = stepper_info.ammo_count info.status.publish('instances', stepper_info.instances) self.core.publish('stepper', 'loadscheme', stepper_info.loadscheme) self.core.publish('stepper', 'loop_count', stepper_info.loop_count) self.core.publish('stepper', 'steps', stepper_info.steps) self.core.publish('stepper', 'duration', stepper_info.duration) self.core.publish('stepper', 'ammo_count', stepper_info.ammo_count) self.core.publish('stepper', 'instances', stepper_info.instances) return stepper_info if not self.stpd: self.stpd = self.__get_stpd_filename() if self.use_caching and not self.force_stepping and os.path.exists( self.stpd) and os.path.exists(self.__si_filename()): self.log.info("Using cached stpd-file: %s", self.stpd) stepper_info = self.__read_cached_options() if self.instances and self.load_profile.is_rps(): self.log.info( "rps_schedule is set. Overriding cached instances param from config: %s", self.instances) stepper_info = stepper_info._replace( instances=self.instances) publish_info(stepper_info) else: if ( self.force_stepping and os.path.exists(self.__si_filename())): os.remove(self.__si_filename()) self.__make_stpd_file() stepper_info = info.status.get_info() self.__write_cached_options(stepper_info) else: self.log.info("Using specified stpd-file: %s", self.stpd) stepper_info = publish_info(self.__read_cached_options()) self.ammo_count = stepper_info.ammo_count self.duration = stepper_info.duration self.loop_count = stepper_info.loop_count self.loadscheme = stepper_info.loadscheme self.steps = stepper_info.steps if stepper_info.instances: self.instances = stepper_info.instances
python
{ "resource": "" }
q268610
StepperWrapper.__get_stpd_filename
test
def __get_stpd_filename(self): ''' Choose the name for stepped data file ''' if self.use_caching: sep = "|" hasher = hashlib.md5() hashed_str = "cache version 6" + sep + \ ';'.join(self.load_profile.schedule) + sep + str(self.loop_limit) hashed_str += sep + str(self.ammo_limit) + sep + ';'.join( self.load_profile.schedule) + sep + str(self.autocases) hashed_str += sep + ";".join(self.uris) + sep + ";".join( self.headers) + sep + self.http_ver + sep + ";".join( self.chosen_cases) hashed_str += sep + str(self.enum_ammo) + sep + str(self.ammo_type) if self.load_profile.is_instances(): hashed_str += sep + str(self.instances) if self.ammo_file: opener = resource.get_opener(self.ammo_file) hashed_str += sep + opener.hash else: if not self.uris: raise RuntimeError("Neither ammofile nor uris specified") hashed_str += sep + \ ';'.join(self.uris) + sep + ';'.join(self.headers) self.log.debug("stpd-hash source: %s", hashed_str) hasher.update(hashed_str.encode('utf8')) if not os.path.exists(self.cache_dir): os.makedirs(self.cache_dir) stpd = self.cache_dir + '/' + \ os.path.basename(self.ammo_file) + \ "_" + hasher.hexdigest() + ".stpd" else: stpd = os.path.realpath("ammo.stpd") self.log.debug("Generated cache file name: %s", stpd) return stpd
python
{ "resource": "" }
q268611
StepperWrapper.__read_cached_options
test
def __read_cached_options(self): ''' Read stepper info from json ''' self.log.debug("Reading cached stepper info: %s", self.__si_filename()) with open(self.__si_filename(), 'r') as si_file: si = info.StepperInfo(**json.load(si_file)) return si
python
{ "resource": "" }
q268612
StepperWrapper.__write_cached_options
test
def __write_cached_options(self, si): ''' Write stepper info to json ''' self.log.debug("Saving stepper info: %s", self.__si_filename()) with open(self.__si_filename(), 'w') as si_file: json.dump(si._asdict(), si_file, indent=4)
python
{ "resource": "" }
q268613
StepperWrapper.__make_stpd_file
test
def __make_stpd_file(self): ''' stpd generation using Stepper class ''' self.log.info("Making stpd-file: %s", self.stpd) stepper = Stepper( self.core, rps_schedule=self.load_profile.schedule if self.load_profile.is_rps() else None, http_ver=self.http_ver, ammo_file=self.ammo_file, instances_schedule=self.load_profile.schedule if self.load_profile.is_instances() else None, instances=self.instances, loop_limit=self.loop_limit, ammo_limit=self.ammo_limit, uris=self.uris, headers=[header.strip('[]') for header in self.headers], autocases=self.autocases, enum_ammo=self.enum_ammo, ammo_type=self.ammo_type, chosen_cases=self.chosen_cases, use_cache=self.use_caching) with open(self.stpd, 'w', self.file_cache) as os: stepper.write(os)
python
{ "resource": "" }
q268614
create
test
def create(rps_schedule): """ Create Load Plan as defined in schedule. Publish info about its duration. """ if len(rps_schedule) > 1: lp = Composite( [StepFactory.produce(step_config) for step_config in rps_schedule]) else: lp = StepFactory.produce(rps_schedule[0]) info.status.publish('duration', lp.get_duration() / 1000) info.status.publish('steps', lp.get_rps_list()) info.status.lp_len = len(lp) return lp
python
{ "resource": "" }
q268615
Line.rps_at
test
def rps_at(self, t): '''Return rps for second t''' if 0 <= t <= self.duration: return self.minrps + \ float(self.maxrps - self.minrps) * t / self.duration else: return 0
python
{ "resource": "" }
q268616
Plugin.execute
test
def execute(self, cmd): """ Execute and check exit code """ self.log.info("Executing: %s", cmd) retcode = execute( cmd, shell=True, poll_period=0.1, catch_out=self.catch_out)[0] if retcode: raise RuntimeError("Subprocess returned %s" % retcode) return retcode
python
{ "resource": "" }
q268617
Decoder.decode_monitoring
test
def decode_monitoring(self, data): """ The reason why we have two separate methods for monitoring and aggregates is a strong difference in incoming data. """ points = list() for second_data in data: for host, host_data in second_data["data"].iteritems(): points.append( self.__make_points( "monitoring", {"host": host, "comment": host_data.get("comment")}, second_data["timestamp"], { metric: value for metric, value in host_data["metrics"].iteritems() } ) ) return points
python
{ "resource": "" }
q268618
Decoder.__make_points_for_label
test
def __make_points_for_label(self, ts, data, label, prefix, gun_stats): """x Make a set of points for `this` label overall_quantiles, overall_meta, net_codes, proto_codes, histograms """ label_points = list() label_points.extend( ( # overall quantiles for label self.__make_points( prefix + "overall_quantiles", {"label": label}, ts, self.__make_quantile_fields(data) ), # overall meta (gun status) for label self.__make_points( prefix + "overall_meta", {"label": label}, ts, self.__make_overall_meta_fields(data, gun_stats) ), # net codes for label self.__make_points( prefix + "net_codes", {"label": label}, ts, self.__make_netcodes_fields(data) ), # proto codes for label self.__make_points( prefix + "proto_codes", {"label": label}, ts, self.__make_protocodes_fields(data) ) ) ) # histograms, one row for each bin if self.histograms: for bin_, count in zip(data["interval_real"]["hist"]["bins"], data["interval_real"]["hist"]["data"]): label_points.append( self.__make_points( prefix + "histograms", {"label": label}, ts, {"bin": bin_, "count": count} ) ) return label_points
python
{ "resource": "" }
q268619
AbstractPlugin.publish
test
def publish(self, key, value): """publish value to status""" self.log.debug( "Publishing status: %s/%s: %s", self.__class__.__name__, key, value) self.core.publish(self.__class__.__name__, key, value)
python
{ "resource": "" }
q268620
AbstractCriterion.count_matched_codes
test
def count_matched_codes(codes_regex, codes_dict): """ helper to aggregate codes by mask """ total = 0 for code, count in codes_dict.items(): if codes_regex.match(str(code)): total += count return total
python
{ "resource": "" }
q268621
BFGBase.stop
test
def stop(self): """ Say the workers to finish their jobs and quit. """ self.quit.set() # yapf:disable while sorted([ self.pool[i].is_alive() for i in xrange(len(self.pool))])[-1]: time.sleep(1) # yapf:enable try: while not self.task_queue.empty(): self.task_queue.get(timeout=0.1) self.task_queue.close() self.feeder.join() except Exception as ex: logger.info(ex)
python
{ "resource": "" }
q268622
BFGBase._feed
test
def _feed(self): """ A feeder that runs in distinct thread in main process. """ self.plan = StpdReader(self.stpd_filename) if self.cached_stpd: self.plan = list(self.plan) for task in self.plan: if self.quit.is_set(): logger.info("Stop feeding: gonna quit") return # try putting a task to a queue unless there is a quit flag # or all workers have exited while True: try: self.task_queue.put(task, timeout=1) break except Full: if self.quit.is_set() or self.workers_finished: return else: continue workers_count = self.instances logger.info( "Feeded all data. Publishing %d killer tasks" % (workers_count)) retry_delay = 1 for _ in range(5): try: [ self.task_queue.put(None, timeout=1) for _ in xrange(0, workers_count) ] break except Full: logger.debug( "Couldn't post killer tasks" " because queue is full. Retrying in %ss", retry_delay) time.sleep(retry_delay) retry_delay *= 2 try: logger.info("Waiting for workers") map(lambda x: x.join(), self.pool) logger.info("All workers exited.") self.workers_finished = True except (KeyboardInterrupt, SystemExit): self.task_queue.close() self.results.close() self.quit.set() logger.info("Going to quit. Waiting for workers") map(lambda x: x.join(), self.pool) self.workers_finished = True
python
{ "resource": "" }
q268623
ApiWorker.init_logging
test
def init_logging(self, log_filename="tank.log"): """ Set up logging """ logger = logging.getLogger('') self.log_filename = log_filename self.core.add_artifact_file(self.log_filename) file_handler = logging.FileHandler(self.log_filename) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter( logging.Formatter( "%(asctime)s [%(levelname)s] %(name)s %(message)s")) logger.addHandler(file_handler) console_handler = logging.StreamHandler(sys.stdout) stderr_hdl = logging.StreamHandler(sys.stderr) # fmt_verbose = logging.Formatter( # "%(asctime)s [%(levelname)s] %(name)s %(message)s") fmt_regular = logging.Formatter( "%(asctime)s %(levelname)s: %(message)s", "%H:%M:%S") console_handler.setLevel(logging.INFO) console_handler.setFormatter(fmt_regular) stderr_hdl.setFormatter(fmt_regular) f_err = SingleLevelFilter(logging.ERROR, True) f_warn = SingleLevelFilter(logging.WARNING, True) f_crit = SingleLevelFilter(logging.CRITICAL, True) console_handler.addFilter(f_err) console_handler.addFilter(f_warn) console_handler.addFilter(f_crit) logger.addHandler(console_handler) f_info = SingleLevelFilter(logging.INFO, True) f_debug = SingleLevelFilter(logging.DEBUG, True) stderr_hdl.addFilter(f_info) stderr_hdl.addFilter(f_debug) logger.addHandler(stderr_hdl)
python
{ "resource": "" }
q268624
ApiWorker.__add_user_options
test
def __add_user_options(self): """ override config options with user specified options""" if self.options.get('user_options', None): self.core.apply_shorthand_options(self.options['user_options'])
python
{ "resource": "" }
q268625
ApiWorker.configure
test
def configure(self, options): """ Make preparations before running Tank """ self.options = options if self.options.get('lock_dir', None): self.core.set_option(self.core.SECTION, "lock_dir", self.options['lock_dir']) if self.options.get('ignore_lock', None): self.core.set_option(self.core.SECTION, 'ignore_lock', self.options['ignore_lock']) while True: try: self.core.get_lock() break except Exception as exc: if self.options.get('lock_fail', None): raise RuntimeError("Lock file present, cannot continue") self.log.info( "Couldn't get lock. Will retry in 5 seconds... (%s)", str(exc)) time.sleep(5) configs = self.get_default_configs() if self.options.get('config', None): configs.append(self.options['config']) self.core.load_configs(configs) self.__add_user_options() self.core.load_plugins() if self.options.get('ignore_lock', None): self.core.set_option(self.core.SECTION, self.IGNORE_LOCKS, "1")
python
{ "resource": "" }
q268626
ApiWorker.__graceful_shutdown
test
def __graceful_shutdown(self): """ call shutdown routines """ retcode = 1 self.log.info("Trying to shutdown gracefully...") retcode = self.core.plugins_end_test(retcode) retcode = self.core.plugins_post_process(retcode) self.log.info("Done graceful shutdown") return retcode
python
{ "resource": "" }
q268627
TankAggregator._collect_data
test
def _collect_data(self, end=False): """ Collect data, cache it and send to listeners """ data = get_nowait_from_queue(self.results) stats = get_nowait_from_queue(self.stats_results) logger.debug("Data timestamps: %s" % [d.get('ts') for d in data]) logger.debug("Stats timestamps: %s" % [d.get('ts') for d in stats]) for item in data: ts = item['ts'] if ts in self.stat_cache: # send items data_item = item stat_item = self.stat_cache.pop(ts) self.__notify_listeners(data_item, stat_item) else: self.data_cache[ts] = item for item in stats: ts = item['ts'] if ts in self.data_cache: # send items data_item = self.data_cache.pop(ts) stat_item = item self.__notify_listeners(data_item, stat_item) else: self.stat_cache[ts] = item if end and len(self.data_cache) > 0: logger.info('Timestamps without stats:') for ts, data_item in sorted(self.data_cache.items(), key=lambda i: i[0]): logger.info(ts) self.__notify_listeners(data_item, StatsReader.stats_item(ts, 0, 0))
python
{ "resource": "" }
q268628
TankAggregator.__notify_listeners
test
def __notify_listeners(self, data, stats): """ notify all listeners about aggregate data and stats """ for listener in self.listeners: listener.on_aggregated_data(data, stats)
python
{ "resource": "" }
q268629
get_marker
test
def get_marker(marker_type, enum_ammo=False): ''' Returns a marker function of the requested marker_type >>> marker = get_marker('uniq')(__test_missile) >>> type(marker) <type 'str'> >>> len(marker) 32 >>> get_marker('uri')(__test_missile) '_example_search_hello_help_us' >>> marker = get_marker('non-existent')(__test_missile) Traceback (most recent call last): ... NotImplementedError: No such marker: "non-existent" >>> get_marker('3')(__test_missile) '_example_search_hello' >>> marker = get_marker('3', True) >>> marker(__test_missile) '_example_search_hello#0' >>> marker(__test_missile) '_example_search_hello#1' ''' try: limit = int(marker_type) if limit: marker = __UriMarker(limit) else: def marker(m): return '' except ValueError: if marker_type in __markers: marker = __markers[marker_type] else: raise NotImplementedError('No such marker: "%s"' % marker_type) # todo: fix u'False' if enum_ammo: marker = __Enumerator(marker) return marker
python
{ "resource": "" }
q268630
parse_duration
test
def parse_duration(duration): ''' Parse duration string, such as '3h2m3s' into milliseconds >>> parse_duration('3h2m3s') 10923000 >>> parse_duration('0.3s') 300 >>> parse_duration('5') 5000 ''' _re_token = re.compile("([0-9.]+)([dhms]?)") def parse_token(time, multiplier): multipliers = { 'd': 86400, 'h': 3600, 'm': 60, 's': 1, } if multiplier: if multiplier in multipliers: return int(float(time) * multipliers[multiplier] * 1000) else: raise StepperConfigurationError( 'Failed to parse duration: %s' % duration) else: return int(float(time) * 1000) return sum(parse_token(*token) for token in _re_token.findall(duration))
python
{ "resource": "" }
q268631
LocalhostClient.start
test
def start(self): """Start local agent""" logger.info('Starting agent on localhost') args = self.python.split() + [ os.path.join( self.workdir, self.AGENT_FILENAME), '--telegraf', self.path['TELEGRAF_LOCAL_PATH'], '--host', self.host] if self.kill_old: args.append(self.kill_old) self.session = self.popen(args) self.reader_thread = threading.Thread(target=self.read_buffer) self.reader_thread.setDaemon(True) return self.session
python
{ "resource": "" }
q268632
SSHClient.start
test
def start(self): """Start remote agent""" logger.info('Starting agent: %s', self.host) command = "{python} {agent_path} --telegraf {telegraf_path} --host {host} {kill_old}".format( python=self.python, agent_path=os.path.join( self.path['AGENT_REMOTE_FOLDER'], self.AGENT_FILENAME), telegraf_path=self.path['TELEGRAF_REMOTE_PATH'], host=self.host, kill_old=self.kill_old) logger.debug('Command to start agent: %s', command) self.session = self.ssh.async_session(command) self.reader_thread = threading.Thread(target=self.read_buffer) self.reader_thread.setDaemon(True) return self.session
python
{ "resource": "" }
q268633
Plugin.__discover_jmeter_udp_port
test
def __discover_jmeter_udp_port(self): """Searching for line in jmeter.log such as Waiting for possible shutdown message on port 4445 """ r = re.compile(self.DISCOVER_PORT_PATTERN) with open(self.process_stderr.name, 'r') as f: cnt = 0 while self.process.pid and cnt < 10: line = f.readline() m = r.match(line) if m is None: cnt += 1 time.sleep(1) else: port = int(m.group('port')) return port else: logger.warning('JMeter UDP port wasn\'t discovered') return None
python
{ "resource": "" }
q268634
Plugin.__add_jmeter_components
test
def __add_jmeter_components(self, jmx, jtl, variables): """ Genius idea by Alexey Lavrenyuk """ logger.debug("Original JMX: %s", os.path.realpath(jmx)) with open(jmx, 'r') as src_jmx: source_lines = src_jmx.readlines() try: # In new Jmeter version (3.2 as example) WorkBench's plugin checkbox enabled by default # It totally crashes Yandex tank injection and raises XML Parse Exception closing = source_lines.pop(-1) if "WorkBenchGui" in source_lines[-5]: logger.info("WorkBench checkbox enabled...bypassing") last_string_count = 6 else: last_string_count = 2 while last_string_count > 0: closing = source_lines.pop(-1) + closing last_string_count -= 1 logger.debug("Closing statement: %s", closing) except Exception as exc: raise RuntimeError("Failed to find the end of JMX XML: %s" % exc) udv_tpl = resource_string(__name__, 'config/jmeter_var_template.xml') udv_set = [] for var_name, var_value in variables.iteritems(): udv_set.append(udv_tpl % (var_name, var_name, var_value)) udv = "\n".join(udv_set) if self.jmeter_ver >= 2.13: save_connect = '<connectTime>true</connectTime>' else: save_connect = '' if self.ext_log in ['errors', 'all']: level_map = {'errors': 'true', 'all': 'false'} tpl_resource = 'jmeter_writer_ext.xml' tpl_args = { 'jtl': self.jtl_file, 'udv': udv, 'ext_log': self.ext_log_file, 'ext_level': level_map[self.ext_log], 'save_connect': save_connect } else: tpl_resource = 'jmeter_writer.xml' tpl_args = { 'jtl': self.jtl_file, 'udv': udv, 'save_connect': save_connect } tpl = resource_string(__name__, 'config/' + tpl_resource) try: new_jmx = self.core.mkstemp( '.jmx', 'modified_', os.path.dirname(os.path.realpath(jmx))) except OSError as exc: logger.debug("Can't create modified jmx near original: %s", exc) new_jmx = self.core.mkstemp('.jmx', 'modified_') logger.debug("Modified JMX: %s", new_jmx) with open(new_jmx, "wb") as fh: fh.write(''.join(source_lines)) fh.write(tpl % tpl_args) fh.write(closing) return new_jmx
python
{ "resource": "" }
q268635
Plugin.__terminate
test
def __terminate(self): """Gracefull termination of running process""" if self.__stderr_file: self.__stderr_file.close() if not self.__process: return waitfor = time.time() + _PROCESS_KILL_TIMEOUT while time.time() < waitfor: try: self.__process.terminate() except EnvironmentError as e: if e.errno != errno.ESRCH: _LOGGER.warning("Failed to terminate process '{}': {}".format(self.__cmd, e)) return time.sleep(0.1) try: self.__process.kill() except EnvironmentError as e: if e.errno != errno.ESRCH: _LOGGER.warning("Failed to kill process '{}': {}".format(self.__cmd, e)) return
python
{ "resource": "" }
q268636
_FileStatsReader._read_data
test
def _read_data(self, lines): """ Parse lines and return stats """ results = [] for line in lines: timestamp, rps, instances = line.split("\t") curr_ts = int(float(timestamp)) # We allow floats here, but tank expects only seconds if self.__last_ts < curr_ts: self.__last_ts = curr_ts results.append(self.stats_item(self.__last_ts, float(rps), float(instances))) return results
python
{ "resource": "" }
q268637
Plugin.__create_criterion
test
def __create_criterion(self, criterion_str): """ instantiate criterion from config string """ parsed = criterion_str.split("(") type_str = parsed[0].strip().lower() parsed[1] = parsed[1].split(")")[0].strip() for criterion_class in self.custom_criterions: if criterion_class.get_type_string() == type_str: return criterion_class(self, parsed[1]) raise ValueError( "Unsupported autostop criterion type: %s" % criterion_str)
python
{ "resource": "" }
q268638
ConfigManager.getconfig
test
def getconfig(self, filename, target_hint): """Prepare config data.""" try: tree = self.parse_xml(filename) except IOError as exc: logger.error("Error loading config: %s", exc) raise RuntimeError("Can't read monitoring config %s" % filename) hosts = tree.findall('Host') config = [] for host in hosts: host_config = self.get_host_config(host, target_hint) config.append(host_config) return config
python
{ "resource": "" }
q268639
AgentConfig.create_startup_config
test
def create_startup_config(self): """ Startup and shutdown commands config Used by agent.py on the target """ cfg_path = "agent_startup_{}.cfg".format(self.host) if os.path.isfile(cfg_path): logger.info( 'Found agent startup config file in working directory with the same name as created for host %s.\n' 'Creating new one via tempfile. This will affect predictable filenames for agent artefacts', self.host) handle, cfg_path = tempfile.mkstemp('.cfg', 'agent_') os.close(handle) try: config = ConfigParser.RawConfigParser() # FIXME incinerate such a string formatting inside a method call # T_T config.add_section('startup') [ config.set('startup', "cmd%s" % idx, cmd) for idx, cmd in enumerate(self.startups) ] config.add_section('shutdown') [ config.set('shutdown', "cmd%s" % idx, cmd) for idx, cmd in enumerate(self.shutdowns) ] config.add_section('source') [ config.set('source', "file%s" % idx, path) for idx, path in enumerate(self.sources) ] with open(cfg_path, 'w') as fds: config.write(fds) except Exception as exc: logger.error( 'Error trying to create monitoring startups config. Malformed? %s', exc, exc_info=True) return cfg_path
python
{ "resource": "" }
q268640
Plugin.__check_disk
test
def __check_disk(self): ''' raise exception on disk space exceeded ''' cmd = "sh -c \"df --no-sync -m -P -l -x fuse -x tmpfs -x devtmpfs -x davfs -x nfs " cmd += self.core.artifacts_base_dir cmd += " | tail -n 1 | awk '{print \$4}' \"" res = execute(cmd, True, 0.1, True) logging.debug("Result: %s", res) if not len(res[1]): self.log.debug("No disk usage info: %s", res[2]) return disk_free = res[1] self.log.debug( "Disk free space: %s/%s", disk_free.strip(), self.disk_limit) if int(disk_free.strip()) < self.disk_limit: raise RuntimeError( "Not enough local resources: disk space less than %sMB in %s: %sMB" % ( self.disk_limit, self.core.artifacts_base_dir, int(disk_free.strip())))
python
{ "resource": "" }
q268641
Plugin.__check_mem
test
def __check_mem(self): ''' raise exception on RAM exceeded ''' mem_free = psutil.virtual_memory().available / 2**20 self.log.debug("Memory free: %s/%s", mem_free, self.mem_limit) if mem_free < self.mem_limit: raise RuntimeError( "Not enough resources: free memory less " "than %sMB: %sMB" % (self.mem_limit, mem_free))
python
{ "resource": "" }
q268642
get_terminal_size
test
def get_terminal_size(): ''' Gets width and height of terminal viewport ''' default_size = (30, 120) env = os.environ def ioctl_gwinsz(file_d): ''' Helper to get console size ''' try: sizes = struct.unpack( 'hh', fcntl.ioctl(file_d, termios.TIOCGWINSZ, '1234')) except Exception: sizes = default_size return sizes sizes = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2) if not sizes: try: file_d = os.open(os.ctermid(), os.O_RDONLY) sizes = ioctl_gwinsz(file_d) os.close(file_d.fileno()) except Exception: pass if not sizes: try: sizes = (env['LINES'], env['COLUMNS']) except Exception: sizes = default_size return int(sizes[1]), int(sizes[0])
python
{ "resource": "" }
q268643
Screen.__get_right_line
test
def __get_right_line(self, widget_output): ''' Gets next line for right panel ''' right_line = '' if widget_output: right_line = widget_output.pop(0) if len(right_line) > self.right_panel_width: right_line_plain = self.markup.clean_markup(right_line) if len(right_line_plain) > self.right_panel_width: right_line = right_line[:self.right_panel_width] + self.markup.RESET return right_line
python
{ "resource": "" }
q268644
Screen.__truncate
test
def __truncate(self, line_arr, max_width): ''' Cut tuple of line chunks according to it's wisible lenght ''' def is_space(chunk): return all([True if i == ' ' else False for i in chunk]) def is_empty(chunks, markups): result = [] for chunk in chunks: if chunk in markups: result.append(True) elif is_space(chunk): result.append(True) else: result.append(False) return all(result) left = max_width result = '' markups = self.markup.get_markup_vars() for num, chunk in enumerate(line_arr): if chunk in markups: result += chunk else: if left > 0: if len(chunk) <= left: result += chunk left -= len(chunk) else: leftover = (chunk[left:],) + line_arr[num + 1:] was_cut = not is_empty(leftover, markups) if was_cut: result += chunk[:left - 1] + self.markup.RESET + u'\u2026' else: result += chunk[:left] left = 0 return result
python
{ "resource": "" }
q268645
Screen.__render_left_panel
test
def __render_left_panel(self): ''' Render left blocks ''' self.log.debug("Rendering left blocks") left_block = self.left_panel left_block.render() blank_space = self.left_panel_width - left_block.width lines = [] pre_space = ' ' * int(blank_space / 2) if not left_block.lines: lines = [(''), (self.markup.RED + 'BROKEN LEFT PANEL' + self.markup.RESET)] else: while self.left_panel.lines: src_line = self.left_panel.lines.pop(0) line = pre_space + self.__truncate(src_line, self.left_panel_width) post_space = ' ' * (self.left_panel_width - len(self.markup.clean_markup(line))) line += post_space + self.markup.RESET lines.append(line) return lines
python
{ "resource": "" }
q268646
Screen.render_screen
test
def render_screen(self): ''' Main method to render screen view ''' self.term_width, self.term_height = get_terminal_size() self.log.debug( "Terminal size: %sx%s", self.term_width, self.term_height) self.right_panel_width = int( (self.term_width - len(self.RIGHT_PANEL_SEPARATOR)) * (float(self.info_panel_percent) / 100)) - 1 if self.right_panel_width > 0: self.left_panel_width = self.term_width - \ self.right_panel_width - len(self.RIGHT_PANEL_SEPARATOR) - 2 else: self.right_panel_width = 0 self.left_panel_width = self.term_width - 1 self.log.debug( "Left/right panels width: %s/%s", self.left_panel_width, self.right_panel_width) widget_output = [] if self.right_panel_width: widget_output = [] self.log.debug("There are %d info widgets" % len(self.info_widgets)) for index, widget in sorted( self.info_widgets.iteritems(), key=lambda item: (item[1].get_index(), item[0])): self.log.debug("Rendering info widget #%s: %s", index, widget) widget_out = widget.render(self).strip() if widget_out: widget_output += widget_out.split("\n") widget_output += [""] left_lines = self.__render_left_panel() self.log.debug("Composing final screen output") output = [] for line_no in range(1, self.term_height): line = " " if line_no > 1 and left_lines: left_line = left_lines.pop(0) left_line_plain = self.markup.clean_markup(left_line) left_line += ( ' ' * (self.left_panel_width - len(left_line_plain))) line += left_line else: line += ' ' * self.left_panel_width if self.right_panel_width: line += self.markup.RESET line += self.markup.WHITE line += self.RIGHT_PANEL_SEPARATOR line += self.markup.RESET right_line = self.__get_right_line(widget_output) line += right_line output.append(line) return self.markup.new_line.join(output) + self.markup.new_line
python
{ "resource": "" }
q268647
Screen.add_info_widget
test
def add_info_widget(self, widget): ''' Add widget string to right panel of the screen ''' index = widget.get_index() while index in self.info_widgets.keys(): index += 1 self.info_widgets[widget.get_index()] = widget
python
{ "resource": "" }
q268648
AbstractBlock.fill_rectangle
test
def fill_rectangle(self, prepared): ''' Right-pad lines of block to equal width ''' result = [] width = max([self.clean_len(line) for line in prepared]) for line in prepared: spacer = ' ' * (width - self.clean_len(line)) result.append(line + (self.screen.markup.RESET, spacer)) return (width, result)
python
{ "resource": "" }
q268649
AbstractBlock.clean_len
test
def clean_len(self, line): ''' Calculate wisible length of string ''' if isinstance(line, basestring): return len(self.screen.markup.clean_markup(line)) elif isinstance(line, tuple) or isinstance(line, list): markups = self.screen.markup.get_markup_vars() length = 0 for i in line: if i not in markups: length += len(i) return length
python
{ "resource": "" }
q268650
create
test
def create(instances_schedule): ''' Creates load plan timestamps generator >>> from util import take >>> take(7, LoadPlanBuilder().ramp(5, 4000).create()) [0, 1000, 2000, 3000, 4000, 0, 0] >>> take(7, create(['ramp(5, 4s)'])) [0, 1000, 2000, 3000, 4000, 0, 0] >>> take(12, create(['ramp(5, 4s)', 'wait(5s)', 'ramp(5,4s)'])) [0, 1000, 2000, 3000, 4000, 9000, 10000, 11000, 12000, 13000, 0, 0] >>> take(7, create(['wait(5s)', 'ramp(5, 0)'])) [5000, 5000, 5000, 5000, 5000, 0, 0] >>> take(7, create([])) [0, 0, 0, 0, 0, 0, 0] >>> take(12, create(['line(1, 9, 4s)'])) [0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 0, 0, 0] >>> take(12, create(['const(3, 5s)', 'line(7, 11, 2s)'])) [0, 0, 0, 5000, 5000, 5000, 5000, 5500, 6000, 6500, 7000, 0] >>> take(12, create(['step(2, 10, 2, 3s)'])) [0, 0, 3000, 3000, 6000, 6000, 9000, 9000, 12000, 12000, 0, 0] >>> take(12, LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).steps) [(3, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1)] >>> take(12, LoadPlanBuilder().stairway(100, 950, 100, 30000).steps) [(100, 30), (200, 30), (300, 30), (400, 30), (500, 30), (600, 30), (700, 30), (800, 30), (900, 30), (950, 30)] >>> LoadPlanBuilder().stairway(100, 950, 100, 30000).instances 950 >>> LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).instances 10 >>> LoadPlanBuilder().line(1, 100, 60000).instances 100 ''' lpb = LoadPlanBuilder().add_all_steps(instances_schedule) lp = lpb.create() info.status.publish('duration', 0) # info.status.publish('steps', lpb.steps) info.status.publish('steps', []) info.status.publish('instances', lpb.instances) return lp
python
{ "resource": "" }
q268651
TotalHTTPCodesCriterion.get_level_str
test
def get_level_str(self): ''' format level str ''' if self.is_relative: level_str = str(self.level) + "%" else: level_str = self.level return level_str
python
{ "resource": "" }
q268652
Plugin.add_info_widget
test
def add_info_widget(self, widget): ''' add right panel widget ''' if not self.screen: self.log.debug("No screen instance to add widget") else: self.screen.add_info_widget(widget)
python
{ "resource": "" }
q268653
APIClient.__make_writer_request
test
def __make_writer_request( self, params=None, json=None, http_method="POST", trace=False): ''' Send request to writer service. ''' request = requests.Request( http_method, self.writer_url, params=params, json=json, headers={ 'User-Agent': self.user_agent}) ids = id_gen(str(uuid.uuid4())) network_timeouts = self.network_timeouts() maintenance_timeouts = self.maintenance_timeouts() while True: try: response = self.__send_single_request(request, ids.next(), trace=trace) return response except (Timeout, ConnectionError, ProtocolError): logger.warn(traceback.format_exc()) try: timeout = next(network_timeouts) logger.warn( "Network error, will retry in %ss..." % timeout) time.sleep(timeout) continue except StopIteration: raise self.NetworkError() except self.UnderMaintenance as e: try: timeout = next(maintenance_timeouts) logger.warn( "Writer is under maintenance, will retry in %ss..." % timeout) time.sleep(timeout) continue except StopIteration: raise e
python
{ "resource": "" }
q268654
TankCore.load_plugins
test
def load_plugins(self): """ Tells core to take plugin options and instantiate plugin classes """ logger.info("Loading plugins...") for (plugin_name, plugin_path, plugin_cfg) in self.config.plugins: logger.debug("Loading plugin %s from %s", plugin_name, plugin_path) if plugin_path == "yandextank.plugins.Overload": logger.warning( "Deprecated plugin name: 'yandextank.plugins.Overload'\n" "There is a new generic plugin now.\n" "Correcting to 'yandextank.plugins.DataUploader overload'") plugin_path = "yandextank.plugins.DataUploader overload" try: plugin = il.import_module(plugin_path) except ImportError: logger.warning('Plugin name %s path %s import error', plugin_name, plugin_path) logger.debug('Plugin name %s path %s import error', plugin_name, plugin_path, exc_info=True) raise try: instance = getattr(plugin, 'Plugin')(self, cfg=plugin_cfg, name=plugin_name) except AttributeError: logger.warning('Plugin %s classname should be `Plugin`', plugin_name) raise else: self.register_plugin(self.PLUGIN_PREFIX + plugin_name, instance) logger.debug("Plugin instances: %s", self._plugins)
python
{ "resource": "" }
q268655
TankCore.get_plugin_of_type
test
def get_plugin_of_type(self, plugin_class): """ Retrieve a plugin of desired class, KeyError raised otherwise """ logger.debug("Searching for plugin: %s", plugin_class) matches = [plugin for plugin in self.plugins.values() if isinstance(plugin, plugin_class)] if matches: if len(matches) > 1: logger.debug( "More then one plugin of type %s found. Using first one.", plugin_class) return matches[-1] else: raise KeyError("Requested plugin type not found: %s" % plugin_class)
python
{ "resource": "" }
q268656
TankCore.get_plugins_of_type
test
def get_plugins_of_type(self, plugin_class): """ Retrieve a list of plugins of desired class, KeyError raised otherwise """ logger.debug("Searching for plugins: %s", plugin_class) matches = [plugin for plugin in self.plugins.values() if isinstance(plugin, plugin_class)] if matches: return matches else: raise KeyError("Requested plugin type not found: %s" % plugin_class)
python
{ "resource": "" }
q268657
TankCore.__collect_file
test
def __collect_file(self, filename, keep_original=False): """ Move or copy single file to artifacts dir """ dest = self.artifacts_dir + '/' + os.path.basename(filename) logger.debug("Collecting file: %s to %s", filename, dest) if not filename or not os.path.exists(filename): logger.warning("File not found to collect: %s", filename) return if os.path.exists(dest): # FIXME: 3 find a way to store artifacts anyway logger.warning("File already exists: %s", dest) return if keep_original: shutil.copy(filename, self.artifacts_dir) else: shutil.move(filename, self.artifacts_dir) os.chmod(dest, 0o644)
python
{ "resource": "" }
q268658
TankCore.add_artifact_file
test
def add_artifact_file(self, filename, keep_original=False): """ Add file to be stored as result artifact on post-process phase """ if filename: logger.debug( "Adding artifact file to collect (keep=%s): %s", keep_original, filename) self.artifact_files[filename] = keep_original
python
{ "resource": "" }
q268659
TankCore.mkstemp
test
def mkstemp(self, suffix, prefix, directory=None): """ Generate temp file name in artifacts base dir and close temp file handle """ if not directory: directory = self.artifacts_dir fd, fname = tempfile.mkstemp(suffix, prefix, directory) os.close(fd) os.chmod(fname, 0o644) # FIXME: chmod to parent dir's mode? return fname
python
{ "resource": "" }
q268660
ConfigManager.load_files
test
def load_files(self, configs): """ Read configs set into storage """ logger.debug("Reading configs: %s", configs) config_filenames = [resource.resource_filename(config) for config in configs] try: self.config.read(config_filenames) except Exception as ex: logger.error("Can't load configs: %s", ex) raise ex
python
{ "resource": "" }
q268661
ConfigManager.flush
test
def flush(self, filename=None): """ Flush current stat to file """ if not filename: filename = self.file if filename: with open(filename, 'w') as handle: self.config.write(handle)
python
{ "resource": "" }
q268662
ConfigManager.get_options
test
def get_options(self, section, prefix=''): """ Get options list with requested prefix """ res = [] try: for option in self.config.options(section): if not prefix or option.find(prefix) == 0: res += [( option[len(prefix):], self.config.get(section, option))] except ConfigParser.NoSectionError as ex: logger.warning("No section: %s", ex) logger.debug( "Section: [%s] prefix: '%s' options:\n%s", section, prefix, res) return res
python
{ "resource": "" }
q268663
ConfigManager.find_sections
test
def find_sections(self, prefix): """ return sections with specified prefix """ res = [] for section in self.config.sections(): if section.startswith(prefix): res.append(section) return res
python
{ "resource": "" }
q268664
PhantomStatsReader._decode_stat_data
test
def _decode_stat_data(self, chunk): """ Return all items found in this chunk """ for date_str, statistics in chunk.iteritems(): date_obj = datetime.datetime.strptime( date_str.split(".")[0], '%Y-%m-%d %H:%M:%S') chunk_date = int(time.mktime(date_obj.timetuple())) instances = 0 for benchmark_name, benchmark in statistics.iteritems(): if not benchmark_name.startswith("benchmark_io"): continue for method, meth_obj in benchmark.iteritems(): if "mmtasks" in meth_obj: instances += meth_obj["mmtasks"][2] offset = chunk_date - 1 - self.start_time reqps = 0 if 0 <= offset < len(self.phantom_info.steps): reqps = self.phantom_info.steps[offset][0] yield self.stats_item(chunk_date - 1, instances, reqps)
python
{ "resource": "" }
q268665
Plugin.get_info
test
def get_info(self): """ returns info object """ if not self.cached_info: if not self.phantom: return None self.cached_info = self.phantom.get_info() return self.cached_info
python
{ "resource": "" }
q268666
MonitoringCollector.prepare
test
def prepare(self): """Prepare for monitoring - install agents etc""" # Parse config agent_configs = [] if self.config: agent_configs = self.config_manager.getconfig( self.config, self.default_target) # Creating agent for hosts for config in agent_configs: if config['host'] in ['localhost', '127.0.0.1', '::1']: client = self.clients['localhost']( config, self.old_style_configs, kill_old=self.kill_old) else: client = self.clients['ssh']( config, self.old_style_configs, timeout=5, kill_old=self.kill_old) logger.debug('Installing monitoring agent. Host: %s', client.host) agent_config, startup_config, customs_script = client.install() if agent_config: self.agents.append(client) self.artifact_files.append(agent_config) if startup_config: self.artifact_files.append(startup_config) if customs_script: self.artifact_files.append(customs_script)
python
{ "resource": "" }
q268667
MonitoringCollector.poll
test
def poll(self): """ Poll agents for data """ start_time = time.time() for agent in self.agents: for collect in agent.reader: # don't crush if trash or traceback came from agent to stdout if not collect: return 0 for chunk in collect: ts, prepared_results = chunk if self.load_start_time and int( ts) >= self.load_start_time: ready_to_send = { "timestamp": int(ts), "data": { self.hash_hostname(agent.host): { "comment": agent.config.comment, "metrics": prepared_results } } } self.__collected_data.append(ready_to_send) logger.debug( 'Polling/decoding agents data took: %.2fms', (time.time() - start_time) * 1000) collected_data_length = len(self.__collected_data) if not self.first_data_received and self.__collected_data: self.first_data_received = True logger.info("Monitoring received first data.") else: self.send_collected_data() return collected_data_length
python
{ "resource": "" }
q268668
MonitoringCollector.send_collected_data
test
def send_collected_data(self): """sends pending data set to listeners""" data = self.__collected_data self.__collected_data = [] for listener in self.listeners: # deep copy to ensure each listener gets it's own copy listener.monitoring_data(copy.deepcopy(data))
python
{ "resource": "" }
q268669
Plugin.__detect_configuration
test
def __detect_configuration(self): """ we need to be flexible in order to determine which plugin's configuration specified and make appropriate configs to metrics collector :return: SECTION name or None for defaults """ try: is_telegraf = self.core.get_option('telegraf', "config") except KeyError: is_telegraf = None try: is_monitoring = self.core.get_option('monitoring', "config") except KeyError: is_monitoring = None if is_telegraf and is_monitoring: raise ValueError( 'Both telegraf and monitoring configs specified. ' 'Clean up your config and delete one of them') if is_telegraf and not is_monitoring: return 'telegraf' if not is_telegraf and is_monitoring: return 'monitoring' if not is_telegraf and not is_monitoring: # defaults target logic try: is_telegraf_dt = self.core.get_option('telegraf') except NoOptionError: is_telegraf_dt = None try: is_monitoring_dt = self.core.get_option('monitoring') except BaseException: is_monitoring_dt = None if is_telegraf_dt and is_monitoring_dt: raise ValueError( 'Both telegraf and monitoring default targets specified. ' 'Clean up your config and delete one of them') if is_telegraf_dt and not is_monitoring_dt: return if not is_telegraf_dt and is_monitoring_dt: self.core.set_option( "telegraf", "default_target", is_monitoring_dt) if not is_telegraf_dt and not is_monitoring_dt: return
python
{ "resource": "" }
q268670
MonitoringWidget.__handle_data_items
test
def __handle_data_items(self, host, data): """ store metric in data tree and calc offset signs sign < 0 is CYAN, means metric value is lower then previous, sign > 1 is YELLOW, means metric value is higher then prevoius, sign == 0 is WHITE, means initial or equal metric value """ for metric, value in data.iteritems(): if value == '': self.sign[host][metric] = -1 self.data[host][metric] = value else: if not self.data[host].get(metric, None): self.sign[host][metric] = 1 elif float(value) > float(self.data[host][metric]): self.sign[host][metric] = 1 elif float(value) < float(self.data[host][metric]): self.sign[host][metric] = -1 else: self.sign[host][metric] = 0 self.data[host][metric] = "%.2f" % float(value)
python
{ "resource": "" }
q268671
MonitoringReader._decode_agents_data
test
def _decode_agents_data(self, block): """ decode agents jsons, count diffs """ collect = [] if block: for chunk in block.split('\n'): try: if chunk: prepared_results = {} jsn = json.loads(chunk) for ts, values in jsn.iteritems(): for key, value in values.iteritems(): # key sample: diskio-sda1_io_time # key_group sample: diskio # key_name sample: io_time try: key_group, key_name = key.split('_')[0].split('-')[0], '_'.join(key.split('_')[1:]) except: # noqa: E722 key_group, key_name = key.split('_')[0], '_'.join(key.split('_')[1:]) if key_group in decoder.diff_metrics.keys(): if key_name in decoder.diff_metrics[key_group]: decoded_key = decoder.find_common_names( key) if self.prev_check: try: value = jsn[ts][key] - \ self.prev_check[key] except KeyError: logger.debug( 'There is no diff value for metric %s.\n' 'Timestamp: %s. Is it initial data?', key, ts, exc_info=True) value = 0 prepared_results[decoded_key] = value else: decoded_key = decoder.find_common_names( key) prepared_results[decoded_key] = value else: decoded_key = decoder.find_common_names( key) prepared_results[decoded_key] = value self.prev_check = jsn[ts] collect.append((ts, prepared_results)) except ValueError: logger.error( 'Telegraf agent send trash to output: %s', chunk) logger.debug( 'Telegraf agent data block w/ trash: %s', exc_info=True) return [] except BaseException: logger.error( 'Exception trying to parse agent data: %s', chunk, exc_info=True) return [] if collect: return collect
python
{ "resource": "" }
q268672
StreamConn.subscribe
test
async def subscribe(self, channels): '''Start subscribing channels. If the necessary connection isn't open yet, it opens now. ''' ws_channels = [] nats_channels = [] for c in channels: if c.startswith(('Q.', 'T.', 'A.', 'AM.',)): nats_channels.append(c) else: ws_channels.append(c) if len(ws_channels) > 0: await self._ensure_ws() await self._ws.send(json.dumps({ 'action': 'listen', 'data': { 'streams': ws_channels, } })) if len(nats_channels) > 0: await self._ensure_nats() await self.polygon.subscribe(nats_channels)
python
{ "resource": "" }
q268673
StreamConn.run
test
def run(self, initial_channels=[]): '''Run forever and block until exception is rasised. initial_channels is the channels to start with. ''' loop = asyncio.get_event_loop() try: loop.run_until_complete(self.subscribe(initial_channels)) loop.run_forever() finally: loop.run_until_complete(self.close())
python
{ "resource": "" }
q268674
StreamConn.close
test
async def close(self): '''Close any of open connections''' if self._ws is not None: await self._ws.close() if self.polygon is not None: await self.polygon.close()
python
{ "resource": "" }
q268675
REST._one_request
test
def _one_request(self, method, url, opts, retry): ''' Perform one request, possibly raising RetryException in the case the response is 429. Otherwise, if error text contain "code" string, then it decodes to json object and returns APIError. Returns the body json in the 200 status. ''' retry_codes = self._retry_codes resp = self._session.request(method, url, **opts) try: resp.raise_for_status() except HTTPError as http_error: # retry if we hit Rate Limit if resp.status_code in retry_codes and retry > 0: raise RetryException() if 'code' in resp.text: error = resp.json() if 'code' in error: raise APIError(error, http_error) else: raise if resp.text != '': return resp.json() return None
python
{ "resource": "" }
q268676
REST.submit_order
test
def submit_order(self, symbol, qty, side, type, time_in_force, limit_price=None, stop_price=None, client_order_id=None): '''Request a new order''' params = { 'symbol': symbol, 'qty': qty, 'side': side, 'type': type, 'time_in_force': time_in_force, } if limit_price is not None: params['limit_price'] = limit_price if stop_price is not None: params['stop_price'] = stop_price if client_order_id is not None: params['client_order_id'] = client_order_id resp = self.post('/orders', params) return Order(resp)
python
{ "resource": "" }
q268677
REST.get_order
test
def get_order(self, order_id): '''Get an order''' resp = self.get('/orders/{}'.format(order_id)) return Order(resp)
python
{ "resource": "" }
q268678
REST.get_position
test
def get_position(self, symbol): '''Get an open position''' resp = self.get('/positions/{}'.format(symbol)) return Position(resp)
python
{ "resource": "" }
q268679
REST.list_assets
test
def list_assets(self, status=None, asset_class=None): '''Get a list of assets''' params = { 'status': status, 'assert_class': asset_class, } resp = self.get('/assets', params) return [Asset(o) for o in resp]
python
{ "resource": "" }
q268680
REST.get_asset
test
def get_asset(self, symbol): '''Get an asset''' resp = self.get('/assets/{}'.format(symbol)) return Asset(resp)
python
{ "resource": "" }
q268681
create_joining_subplan
test
def create_joining_subplan( pipeline_def, solid, join_step_key, parallel_steps, parallel_step_output ): ''' This captures a common pattern of fanning out a single value to N steps, where each step has similar structure. The strict requirement here is that each step must provide an output named the parameters parallel_step_output. This takes those steps and then uses a join node to coalesce them so that downstream steps can depend on a single output. Currently the join step just does a passthrough with no computation. It remains to be seen if there should be any work or verification done in this step, especially in multi-process environments that require persistence between steps. ''' check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) check.inst_param(solid, 'solid', Solid) check.str_param(join_step_key, 'join_step_key') check.list_param(parallel_steps, 'parallel_steps', of_type=ExecutionStep) check.str_param(parallel_step_output, 'parallel_step_output') for parallel_step in parallel_steps: check.invariant(parallel_step.has_step_output(parallel_step_output)) join_step = create_join_step( pipeline_def, solid, join_step_key, parallel_steps, parallel_step_output ) output_name = join_step.step_outputs[0].name return ExecutionValueSubplan( parallel_steps + [join_step], StepOutputHandle.from_step(join_step, output_name) )
python
{ "resource": "" }
q268682
dict_param
test
def dict_param(obj, param_name, key_type=None, value_type=None): '''Ensures argument obj is a native Python dictionary, raises an exception if not, and otherwise returns obj. ''' if not isinstance(obj, dict): raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name)) if not (key_type or value_type): return obj return _check_key_value_types(obj, key_type, value_type)
python
{ "resource": "" }
q268683
opt_dict_param
test
def opt_dict_param(obj, param_name, key_type=None, value_type=None, value_class=None): '''Ensures argument obj is either a dictionary or None; if the latter, instantiates an empty dictionary. ''' if obj is not None and not isinstance(obj, dict): raise_with_traceback(_param_type_mismatch_exception(obj, dict, param_name)) if not obj: return {} if value_class: return _check_key_value_types(obj, key_type, value_type=value_class, value_check=issubclass) return _check_key_value_types(obj, key_type, value_type)
python
{ "resource": "" }
q268684
construct_event_logger
test
def construct_event_logger(event_record_callback): ''' Callback receives a stream of event_records ''' check.callable_param(event_record_callback, 'event_record_callback') return construct_single_handler_logger( 'event-logger', DEBUG, StructuredLoggerHandler( lambda logger_message: event_record_callback(construct_event_record(logger_message)) ), )
python
{ "resource": "" }
q268685
construct_json_event_logger
test
def construct_json_event_logger(json_path): '''Record a stream of event records to json''' check.str_param(json_path, 'json_path') return construct_single_handler_logger( "json-event-record-logger", DEBUG, JsonEventLoggerHandler( json_path, lambda record: construct_event_record( StructuredLoggerMessage( name=record.name, message=record.msg, level=record.levelno, meta=record.dagster_meta, record=record, ) ), ), )
python
{ "resource": "" }
q268686
RCParser.from_file
test
def from_file(cls, path=None): """Read a config file and instantiate the RCParser. Create new :class:`configparser.ConfigParser` for the given **path** and instantiate the :class:`RCParser` with the ConfigParser as :attr:`config` attribute. If the **path** doesn't exist, raise :exc:`ConfigFileError`. Otherwise return a new :class:`RCParser` instance. :param path: Optional path to the config file to parse. If not given, use ``'~/.pypirc'``. """ path = path or cls.CONFIG_PATH if not os.path.exists(path): error = 'Config file not found: {0!r}'.format(path) raise ConfigFileError(error) config = read_config(path) return cls(config)
python
{ "resource": "" }
q268687
RCParser.get_repository_config
test
def get_repository_config(self, repository): """Get config dictionary for the given repository. If the repository section is not found in the config file, return ``None``. If the file is invalid, raise :exc:`configparser.Error`. Otherwise return a dictionary with: * ``'repository'`` -- the repository URL * ``'username'`` -- username for authentication * ``'password'`` -- password for authentication :param repository: Name or URL of the repository to find in the ``.pypirc`` file. The repository section must be defined in the config file. """ servers = self._read_index_servers() repo_config = self._find_repo_config(servers, repository) return repo_config
python
{ "resource": "" }
q268688
format_config_for_graphql
test
def format_config_for_graphql(config): '''This recursive descent thing formats a config dict for GraphQL.''' def _format_config_subdict(config, current_indent=0): check.dict_param(config, 'config', key_type=str) printer = IndentingStringIoPrinter(indent_level=2, current_indent=current_indent) printer.line('{') n_elements = len(config) for i, key in enumerate(sorted(config, key=lambda x: x[0])): value = config[key] with printer.with_indent(): formatted_value = ( _format_config_item(value, current_indent=printer.current_indent) .lstrip(' ') .rstrip('\n') ) printer.line( '{key}: {formatted_value}{comma}'.format( key=key, formatted_value=formatted_value, comma=',' if i != n_elements - 1 else '', ) ) printer.line('}') return printer.read() def _format_config_sublist(config, current_indent=0): printer = IndentingStringIoPrinter(indent_level=2, current_indent=current_indent) printer.line('[') n_elements = len(config) for i, value in enumerate(config): with printer.with_indent(): formatted_value = ( _format_config_item(value, current_indent=printer.current_indent) .lstrip(' ') .rstrip('\n') ) printer.line( '{formatted_value}{comma}'.format( formatted_value=formatted_value, comma=',' if i != n_elements - 1 else '' ) ) printer.line(']') return printer.read() def _format_config_item(config, current_indent=0): printer = IndentingStringIoPrinter(indent_level=2, current_indent=current_indent) if isinstance(config, dict): return _format_config_subdict(config, printer.current_indent) elif isinstance(config, list): return _format_config_sublist(config, printer.current_indent) elif isinstance(config, bool): return repr(config).lower() else: return repr(config).replace('\'', '"') check.dict_param(config, 'config', key_type=str) if not isinstance(config, dict): check.failed('Expected a dict to format as config, got: {item}'.format(item=repr(config))) return _format_config_subdict(config)
python
{ "resource": "" }
q268689
RepositoryDefinition.get_pipeline
test
def get_pipeline(self, name): '''Get a pipeline by name. Only constructs that pipeline and caches it. Args: name (str): Name of the pipeline to retriever Returns: PipelineDefinition: Instance of PipelineDefinition with that name. ''' check.str_param(name, 'name') if name in self._pipeline_cache: return self._pipeline_cache[name] try: pipeline = self.pipeline_dict[name]() except KeyError: raise DagsterInvariantViolationError( 'Could not find pipeline "{name}". Found: {pipeline_names}.'.format( name=name, pipeline_names=', '.join( [ '"{pipeline_name}"'.format(pipeline_name=pipeline_name) for pipeline_name in self.pipeline_dict.keys() ] ), ) ) check.invariant( pipeline.name == name, 'Name does not match. Name in dict {name}. Name in pipeline {pipeline.name}'.format( name=name, pipeline=pipeline ), ) self._pipeline_cache[name] = check.inst( pipeline, PipelineDefinition, ( 'Function passed into pipeline_dict with key {key} must return a ' 'PipelineDefinition' ).format(key=name), ) return pipeline
python
{ "resource": "" }
q268690
RepositoryDefinition.get_all_pipelines
test
def get_all_pipelines(self): '''Return all pipelines as a list Returns: List[PipelineDefinition]: ''' pipelines = list(map(self.get_pipeline, self.pipeline_dict.keys())) # This does uniqueness check self._construct_solid_defs(pipelines) return pipelines
python
{ "resource": "" }
q268691
get_next_event
test
def get_next_event(process, queue): ''' This function polls the process until it returns a valid item or returns PROCESS_DEAD_AND_QUEUE_EMPTY if it is in a state where the process has terminated and the queue is empty Warning: if the child process is in an infinite loop. This will also infinitely loop. ''' while True: try: return queue.get(block=True, timeout=TICK) except multiprocessing.queues.Empty: if not process.is_alive(): # There is a possibility that after the last queue.get the # process created another event and then died. In that case # we want to continue draining the queue. try: return queue.get(block=False) except multiprocessing.queues.Empty: # If the queue empty we know that there are no more events # and that the process has died. return PROCESS_DEAD_AND_QUEUE_EMPTY check.failed('unreachable')
python
{ "resource": "" }
q268692
execute_pipeline_through_queue
test
def execute_pipeline_through_queue( repository_info, pipeline_name, solid_subset, environment_dict, run_id, message_queue, reexecution_config, step_keys_to_execute, ): """ Execute pipeline using message queue as a transport """ message_queue.put(ProcessStartedSentinel(os.getpid())) run_config = RunConfig( run_id, event_callback=message_queue.put, executor_config=InProcessExecutorConfig(raise_on_error=False), reexecution_config=reexecution_config, step_keys_to_execute=step_keys_to_execute, ) repository_container = RepositoryContainer(repository_info) if repository_container.repo_error: message_queue.put( MultiprocessingError( serializable_error_info_from_exc_info(repository_container.repo_error) ) ) return try: result = execute_pipeline( repository_container.repository.get_pipeline(pipeline_name).build_sub_pipeline( solid_subset ), environment_dict, run_config=run_config, ) return result except: # pylint: disable=W0702 error_info = serializable_error_info_from_exc_info(sys.exc_info()) message_queue.put(MultiprocessingError(error_info)) finally: message_queue.put(MultiprocessingDone()) message_queue.close()
python
{ "resource": "" }
q268693
MultiprocessingExecutionManager.join
test
def join(self): '''Waits until all there are no processes enqueued.''' while True: with self._processes_lock: if not self._processes and self._processing_semaphore.locked(): return True gevent.sleep(0.1)
python
{ "resource": "" }
q268694
Field
test
def Field( dagster_type, default_value=FIELD_NO_DEFAULT_PROVIDED, is_optional=INFER_OPTIONAL_COMPOSITE_FIELD, is_secret=False, description=None, ): ''' The schema for configuration data that describes the type, optionality, defaults, and description. Args: dagster_type (DagsterType): A ``DagsterType`` describing the schema of this field, ie `Dict({'example': Field(String)})` default_value (Any): A default value to use that respects the schema provided via dagster_type is_optional (bool): Whether the presence of this field is optional despcription (str): ''' config_type = resolve_to_config_type(dagster_type) if not config_type: raise DagsterInvalidDefinitionError( ( 'Attempted to pass {value_repr} to a Field that expects a valid ' 'dagster type usable in config (e.g. Dict, NamedDict, Int, String et al).' ).format(value_repr=repr(dagster_type)) ) return FieldImpl( config_type=resolve_to_config_type(dagster_type), default_value=default_value, is_optional=is_optional, is_secret=is_secret, description=description, )
python
{ "resource": "" }
q268695
_PlanBuilder.build
test
def build(self, pipeline_def, artifacts_persisted): '''Builds the execution plan. ''' # Construct dependency dictionary deps = {step.key: set() for step in self.steps} for step in self.steps: for step_input in step.step_inputs: deps[step.key].add(step_input.prev_output_handle.step_key) step_dict = {step.key: step for step in self.steps} return ExecutionPlan(pipeline_def, step_dict, deps, artifacts_persisted)
python
{ "resource": "" }
q268696
ExecutionPlan.build
test
def build(pipeline_def, environment_config): '''Here we build a new ExecutionPlan from a pipeline definition and the environment config. To do this, we iterate through the pipeline's solids in topological order, and hand off the execution steps for each solid to a companion _PlanBuilder object. Once we've processed the entire pipeline, we invoke _PlanBuilder.build() to construct the ExecutionPlan object. ''' check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) check.inst_param(environment_config, 'environment_config', EnvironmentConfig) plan_builder = _PlanBuilder() for solid in solids_in_topological_order(pipeline_def): ### 1. INPUTS # Create and add execution plan steps for solid inputs step_inputs = [] for input_def in solid.definition.input_defs: prev_step_output_handle = get_input_source_step_handle( pipeline_def, environment_config, plan_builder, solid, input_def ) # We return None for the handle (see above in get_input_source_step_handle) when the # input def runtime type is "Nothing" if not prev_step_output_handle: continue subplan = create_subplan_for_input( pipeline_def, environment_config, solid, prev_step_output_handle, input_def ) plan_builder.add_steps(subplan.steps) step_inputs.append( StepInput( input_def.name, input_def.runtime_type, subplan.terminal_step_output_handle ) ) ### 2. TRANSFORM FUNCTION # Create and add execution plan step for the solid transform function solid_transform_step = create_transform_step( pipeline_def, environment_config, solid, step_inputs ) plan_builder.add_step(solid_transform_step) ### 3. OUTPUTS # Create and add execution plan steps (and output handles) for solid outputs for output_def in solid.definition.output_defs: subplan = create_subplan_for_output( pipeline_def, environment_config, solid, solid_transform_step, output_def ) plan_builder.add_steps(subplan.steps) output_handle = solid.output_handle(output_def.name) plan_builder.set_output_handle(output_handle, subplan.terminal_step_output_handle) # Finally, we build and return the execution plan return plan_builder.build( pipeline_def=pipeline_def, artifacts_persisted=environment_config.storage.construct_run_storage().is_persistent, )
python
{ "resource": "" }
q268697
_build_sub_pipeline
test
def _build_sub_pipeline(pipeline_def, solid_names): ''' Build a pipeline which is a subset of another pipeline. Only includes the solids which are in solid_names. ''' check.inst_param(pipeline_def, 'pipeline_def', PipelineDefinition) check.list_param(solid_names, 'solid_names', of_type=str) solid_name_set = set(solid_names) solids = list(map(pipeline_def.solid_named, solid_names)) deps = {_dep_key_of(solid): {} for solid in solids} def _out_handle_of_inp(input_handle): if pipeline_def.dependency_structure.has_dep(input_handle): output_handle = pipeline_def.dependency_structure.get_dep(input_handle) if output_handle.solid.name in solid_name_set: return output_handle return None for solid in solids: for input_handle in solid.input_handles(): output_handle = _out_handle_of_inp(input_handle) if output_handle: deps[_dep_key_of(solid)][input_handle.input_def.name] = DependencyDefinition( solid=output_handle.solid.name, output=output_handle.output_def.name ) return PipelineDefinition( name=pipeline_def.name, solids=list({solid.definition for solid in solids}), context_definitions=pipeline_def.context_definitions, dependencies=deps, )
python
{ "resource": "" }
q268698
PipelineDefinition.solid_named
test
def solid_named(self, name): '''Return the solid named "name". Throws if it does not exist. Args: name (str): Name of solid Returns: SolidDefinition: SolidDefinition with correct name. ''' check.str_param(name, 'name') if name not in self._solid_dict: raise DagsterInvariantViolationError( 'Pipeline {pipeline_name} has no solid named {name}.'.format( pipeline_name=self.name, name=name ) ) return self._solid_dict[name]
python
{ "resource": "" }
q268699
construct_publish_comands
test
def construct_publish_comands(additional_steps=None, nightly=False): '''Get the shell commands we'll use to actually build and publish a package to PyPI.''' publish_commands = ( ['rm -rf dist'] + (additional_steps if additional_steps else []) + [ 'python setup.py sdist bdist_wheel{nightly}'.format( nightly=' --nightly' if nightly else '' ), 'twine upload dist/*', ] ) return publish_commands
python
{ "resource": "" }