code
stringlengths
4
4.48k
docstring
stringlengths
1
6.45k
_id
stringlengths
24
24
def __init__(self, limit = -40): <NEW_LINE> <INDENT> self.DeviceID = "Simulated RF Device" <NEW_LINE> self.Output_Power = 0 <NEW_LINE> self.Frequency = 0 <NEW_LINE> self.Output_State = False <NEW_LINE> self.limit = limit <NEW_LINE> print("Constructed " + self.DeviceID)
Informs the user when the simulated device has been created in memory. The simulated device for the RF sig gen does not need any arguments. It's main purpose is to repeat values that have been given to it with the 'set' methods. Args: Returns:
625941c1cdde0d52a9e52fac
def sendcmd(self, cmd, axs=True): <NEW_LINE> <INDENT> if axs: <NEW_LINE> <INDENT> command = f"{self._address}{self._idx}{cmd}" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> command = f"{self._address}{cmd}" <NEW_LINE> <DEDENT> self._parent.sendcmd(command)
Send a command to an axis object. :param cmd: Command :type cmd: str :param axs: Send axis address along? Not used for controller commands. Defaults to `True` :type axs: bool
625941c18e71fb1e9831d725
def start(self): <NEW_LINE> <INDENT> self.hass.states.track_change( self.tracking, self._state_changed_listener)
Starts the tracking.
625941c155399d3f0558862e
def suppression_oppose(self): <NEW_LINE> <INDENT> for i, j in zip(*np.where(self.diamond)): <NEW_LINE> <INDENT> tile = self.pavage[i, j] <NEW_LINE> if tile == 0: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> i2, j2 = np.array([i, j]) + PAVAGE_Etapes[tile.orientation] <NEW_LINE> if not (0 <= i2 <= 2 * self.order and 0 <= j2 <= 2 * self.order): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> tile2 = self.pavage[i2, j2] <NEW_LINE> if tile2 == 0: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> if tile2.orientation == PAVAGE_Etape_conflits[tile.orientation]: <NEW_LINE> <INDENT> self.pavage[np.where(self.pavage == tile)] = 0 <NEW_LINE> self.pavage[np.where(self.pavage == tile2)] = 0 <NEW_LINE> self.tiles.remove(tile) <NEW_LINE> self.tiles.remove(tile2)
Removes tiles with opposite directions or orientations
625941c132920d7e50b28149
def shutdown(self): <NEW_LINE> <INDENT> if self.fmon: <NEW_LINE> <INDENT> self.fmon.shutdown() <NEW_LINE> <DEDENT> if self._phony_collector: <NEW_LINE> <INDENT> self._phony_collector.shutdown()
Called at program exit
625941c116aa5153ce3623f4
def initialize(): <NEW_LINE> <INDENT> global bounds, win, boxes_left <NEW_LINE> bounds = {1: [], 2: [], 3: [], 4: [], 5: [], 6: [], 7: [], 8: [], 9: []} <NEW_LINE> win = {frozenset({7, 8, 9}), frozenset({4, 5, 6}), frozenset({1, 2, 3}), frozenset({7, 4, 1}), frozenset({8, 5, 2}), frozenset({9, 6, 3}), frozenset({7, 5, 3}), frozenset({9, 5, 1})} <NEW_LINE> boxes_left = [b for b in range(1, 10)] <NEW_LINE> def grid(): <NEW_LINE> <INDENT> turtle.tracer(0) <NEW_LINE> turtle.up() <NEW_LINE> turtle.goto(-90, -90) <NEW_LINE> turtle.down() <NEW_LINE> for i in range(4): <NEW_LINE> <INDENT> turtle.fd(180) <NEW_LINE> turtle.lt(90) <NEW_LINE> <DEDENT> for j in range(1, 10): <NEW_LINE> <INDENT> for k in range(4): <NEW_LINE> <INDENT> bounds[j].append(turtle.pos()) <NEW_LINE> turtle.fd(60) <NEW_LINE> turtle.lt(90) <NEW_LINE> <DEDENT> if j % 3: <NEW_LINE> <INDENT> turtle.fd(60) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> turtle.up() <NEW_LINE> turtle.goto(-90, (-90 + 60 * j // 3)) <NEW_LINE> turtle.down() <NEW_LINE> <DEDENT> <DEDENT> turtle.update() <NEW_LINE> return bounds <NEW_LINE> <DEDENT> def title(): <NEW_LINE> <INDENT> formatting = ("Arial", 18, "bold") <NEW_LINE> turtle.up() <NEW_LINE> turtle.goto(0, 105) <NEW_LINE> turtle.down() <NEW_LINE> turtle.write('Tic-Tac-Toe', align="center", font=formatting) <NEW_LINE> <DEDENT> grid() <NEW_LINE> title() <NEW_LINE> turtle.tracer(2)
Draws the starting screen, and initializes important global variables Global variables: bounds -- dictionary with box numbers as keys and box bounds as values win -- set of frozen sets with all possible winning states boxes_left -- list to keep track of empty boxes
625941c1287bf620b61d39e0
def subcontainer(c1, c2): <NEW_LINE> <INDENT> for all_items in c1.contents: <NEW_LINE> <INDENT> k = c1.count(all_items) <NEW_LINE> p = c2.count(all_items) <NEW_LINE> if all_items in c2 and k <= p: <NEW_LINE> <INDENT> return True <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return False
If every item in A is in B return True else return False
625941c163f4b57ef000109a
@app.route(BASE_URL + 'stack/<id>', methods=['GET']) <NEW_LINE> def getStack(id): <NEW_LINE> <INDENT> id = int(id) <NEW_LINE> if id in list_stacks.keys(): <NEW_LINE> <INDENT> return make_response(jsonify({"_id": id, "stack": list_stacks[id]}), 200) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return make_response('Stack id not defined', 400)
Retrieve and send back the desired stack input : Id of the Stack output: the desired stack
625941c1be7bc26dc91cd57f
def _parse_input_spec(self, spec): <NEW_LINE> <INDENT> subsys_index, input_index = self._parse_signal(spec, 'input') <NEW_LINE> return self.input_offset[subsys_index] + input_index
Parse an input specification and returns the index This function parses a specification of an input of an interconnected system component and returns the index of that input in the internal input vector. Input specifications are of one of the following forms: i first input for the ith system (i,) first input for the ith system (i, j) jth input for the ith system 'sys.sig' signal 'sig' in subsys 'sys' ('sys', 'sig') signal 'sig' in subsys 'sys' The function returns an index into the input vector array and the gain to use for that input.
625941c17d43ff24873a2c1a
def get_marker_indices(self, marker_names): <NEW_LINE> <INDENT> return [self._marker_indices[name] for name in marker_names]
Returns the marker indices for a set of marker names
625941c1b7558d58953c4e93
def rem_state(self, state): <NEW_LINE> <INDENT> bat.utils.rem_state(self, state)
Remove a state from this object's state.
625941c15fc7496912cc38f9
def main(): <NEW_LINE> <INDENT> os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'aquila.settings') <NEW_LINE> try: <NEW_LINE> <INDENT> from django.core.management import execute_from_command_line <NEW_LINE> <DEDENT> except ImportError as exc: <NEW_LINE> <INDENT> raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc <NEW_LINE> <DEDENT> execute_from_command_line(sys.argv)
Run administrative tasks.
625941c150812a4eaa59c29f
def imputer(x,offend,mode): <NEW_LINE> <INDENT> offend_mat = findOffending(x,offend) <NEW_LINE> if(mode == 'del_row'): <NEW_LINE> <INDENT> ok_rows = np.where(np.sum(offend_mat,axis=1) == 0) <NEW_LINE> ok_rows = ok_rows[0] <NEW_LINE> clean_x = np.squeeze(x[ok_rows,:]) <NEW_LINE> return clean_x <NEW_LINE> <DEDENT> for i in range(x.shape[1]): <NEW_LINE> <INDENT> not_ok_rows = np.where(offend_mat[:,i] == 1) <NEW_LINE> if(mode == 'mean'): <NEW_LINE> <INDENT> this_val = np.mean(x[offend_mat[:,i] == 0,i]) <NEW_LINE> <DEDENT> elif(mode == 'median'): <NEW_LINE> <INDENT> this_val = np.median(x[offend_mat[:,i] == 0,i]) <NEW_LINE> <DEDENT> x[not_ok_rows,i] = this_val <NEW_LINE> <DEDENT> return x
Deal with offending values using following modes: 'del_row': Deletes rows 'mean': Replace with mean value of column 'median': Replace with median value of column .
625941c115fb5d323cde0a88
def get_latitude(filename, delimiter=','): <NEW_LINE> <INDENT> _log.debug("Looking for latitude field in '{f}'".format(f=filename)) <NEW_LINE> with open(filename, 'r') as f: <NEW_LINE> <INDENT> field_name = '' <NEW_LINE> while field_name.lower() not in STRING_HEADERS: <NEW_LINE> <INDENT> line = f.readline() <NEW_LINE> l = [i.strip().lower() for i in line.strip().split(delimiter)] <NEW_LINE> field_name = l[0] <NEW_LINE> if field_name == 'lat': <NEW_LINE> <INDENT> return float(l[1]) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> msg = "Latitude field not found in file '{f}'".format(f=filename) <NEW_LINE> _log.critical(msg) <NEW_LINE> raise ONEFluxError(msg)
Retrieves latitude from year 'input' formatted data file :param filename: Name of file to be loaded :type filename: str :param delimiter: Cell delimiter character(s) :type delimiter: str
625941c1fff4ab517eb2f3b6
def wait_for_click(self, timeout, locator): <NEW_LINE> <INDENT> WebDriverWait(self.driver, timeout).until(expected_conditions.element_to_be_clickable(locator))
显示等待方法封装 :param timeout: 显示等待时间 :param locator: touple,定位器 :return:
625941c1a219f33f346288e8
def lookupRenderer(self, cls): <NEW_LINE> <INDENT> if hasattr(cls, 'render'): <NEW_LINE> <INDENT> return self.render_renderable <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> return getattr(self, 'render_' + cls.__name__) <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> return self.render_other
Look up a rendering function for the given class
625941c14c3428357757c2a5
def assemble_output_file_name(prefix, index ='', suffix ='.txt', output_dir=OUT_DIR): <NEW_LINE> <INDENT> if suffix[0] != '.': <NEW_LINE> <INDENT> suffix = '.' + suffix <NEW_LINE> <DEDENT> if not os.path.exists(output_dir): <NEW_LINE> <INDENT> os.makedirs(output_dir) <NEW_LINE> print('# Created:', output_dir, file=sys.stderr) <NEW_LINE> <DEDENT> return os.path.join(output_dir, prefix + index + suffix)
Place the file in the proper path, adding a prefix & suffix. eg. root_dir/sub_dir/prefix + index + suffix The output files will add a prefix & suffix to the frame_index, e.g. ~/run/posteriors/regions_43.pkl
625941c10c0af96317bb8163
def before(*interceptors): <NEW_LINE> <INDENT> def add_interceptors(function): <NEW_LINE> <INDENT> return _add_interceptors(function, BEFORE_ATTRIBUTE, interceptors) <NEW_LINE> <DEDENT> return add_interceptors
Registers the given interceptors to be executed before the decorated test method: def interceptor (): pass @test @before(interceptor) def some_test (): pass You can use multiple before decorators and/ or pass in multiple values.
625941c1eab8aa0e5d26dad3
def test_sub_last_camped_old(self): <NEW_LINE> <INDENT> last_seen_datetime = (django.utils.timezone.now() - timedelta(seconds=65)) <NEW_LINE> self.sub.mark_camped(last_seen_datetime, self.bts1) <NEW_LINE> self.assertEqual(last_seen_datetime, self.sub.last_camped) <NEW_LINE> self.sub.mark_camped( last_seen_datetime - timedelta(seconds=100), self.bts2) <NEW_LINE> self.assertEqual(last_seen_datetime, self.sub.last_camped) <NEW_LINE> self.assertEqual(self.bts1, self.sub.bts)
Tests to see if we will ignore an older incoming LUR.
625941c11f037a2d8b94617a
def update_stock_data_from_yfinance_by_date(symbols, stock_data_path, file_format='.csv', required_date=None, is_print=False, is_return=False, is_save=True): <NEW_LINE> <INDENT> if required_date is None: <NEW_LINE> <INDENT> required_date = util.time_2_string(datetime.datetime.today()) <NEW_LINE> <DEDENT> data = {} <NEW_LINE> data_date = {'1991-01-01': []} <NEW_LINE> for symbol in symbols: <NEW_LINE> <INDENT> if os.path.exists(f'{stock_data_path}{symbol}{file_format}'): <NEW_LINE> <INDENT> tmp_data = load_stock_data(file_path=stock_data_path, file_name=symbol) <NEW_LINE> tmp_data_date = util.time_2_string(tmp_data.index.max()) <NEW_LINE> data[symbol] = tmp_data.copy() <NEW_LINE> if data_date.get(tmp_data_date) is None: <NEW_LINE> <INDENT> data_date[tmp_data_date] = [symbol] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> data_date[tmp_data_date].append(symbol) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> data[symbol] = pd.DataFrame() <NEW_LINE> data_date['1991-01-01'].append(symbol) <NEW_LINE> <DEDENT> <DEDENT> download_info = '' <NEW_LINE> for d in data_date.keys(): <NEW_LINE> <INDENT> tmp_symbols = data_date[d] <NEW_LINE> if len(tmp_symbols) == 0: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> tmp_batch_data = yf.download(tickers=tmp_symbols, start=d, interval='1d', group_by='ticker', actions=True) <NEW_LINE> download_info += f'{tmp_symbols} updated from {d}\n' <NEW_LINE> if len(tmp_symbols) == 1: <NEW_LINE> <INDENT> tmp_batch_data = {tmp_symbols[0]: tmp_batch_data} <NEW_LINE> <DEDENT> for symbol in tmp_symbols: <NEW_LINE> <INDENT> new_data = tmp_batch_data[symbol].copy() <NEW_LINE> new_data = post_process_download_data(df=new_data, source='yfinance') <NEW_LINE> data[symbol] = data[symbol].append(new_data, sort=True) <NEW_LINE> data[symbol] = util.remove_duplicated_index(df=data[symbol], keep='last').dropna() <NEW_LINE> <DEDENT> <DEDENT> if is_print: <NEW_LINE> <INDENT> print(download_info) <NEW_LINE> <DEDENT> if is_save: <NEW_LINE> <INDENT> for symbol in data.keys(): <NEW_LINE> <INDENT> save_stock_data(df=data[symbol], file_path=stock_data_path, file_name=symbol, file_format=file_format, reset_index=True, index=False) <NEW_LINE> <DEDENT> <DEDENT> if is_return: <NEW_LINE> <INDENT> return data
update local stock data from yahoo by date :param symbols: list of target symbols :param stock_data_path: where the local stock data files(.csv) stored :param file_format: default is .csv :param required_date: if the local data have already meet the required date, it won't be updated :param is_print: whether to print info when downloading :param is_return: whether to return the updated data :param is_save: whether to save the updated data to local files :returns: dataframe of latest stock data, per row each symbol :raises: none
625941c1b7558d58953c4e94
def move_player(self, direction: str): <NEW_LINE> <INDENT> if direction == "up": <NEW_LINE> <INDENT> self.y -= 1 <NEW_LINE> <DEDENT> elif direction == "down": <NEW_LINE> <INDENT> self.y += 1 <NEW_LINE> <DEDENT> elif direction == "right": <NEW_LINE> <INDENT> self.x += 1 <NEW_LINE> <DEDENT> elif direction == "left": <NEW_LINE> <INDENT> self.x -= 1
This method moves the player according to the arrow keys on a keyboard
625941c1ff9c53063f47c170
def test_probability_of_n_purchases_up_to_time_same_as_R_BTYD(self): <NEW_LINE> <INDENT> mbgf = estimation.ModifiedBetaGeoFitter() <NEW_LINE> mbgf.params_ = OrderedDict({'r':0.243, 'alpha':4.414, 'a':0.793, 'b':2.426}) <NEW_LINE> expected = 1.07869e-07 <NEW_LINE> actual = mbgf.probability_of_n_purchases_up_to_time(2, 10) <NEW_LINE> assert abs(expected - actual) < 10e-5 <NEW_LINE> expected = np.array([0.0019995214, 0.0015170236, 0.0011633150, 0.0009003148, 0.0007023638, 0.0005517902, 0.0004361913, 0.0003467171, 0.0002769613, 0.0002222260]) <NEW_LINE> actual = np.array([mbgf.probability_of_n_purchases_up_to_time(30, n) for n in range(11, 21)]) <NEW_LINE> npt.assert_allclose(expected, actual, rtol=0.5)
See https://cran.r-project.org/web/packages/BTYD/BTYD.pdf
625941c126068e7796caec56
def get_file_header(bmp_data): <NEW_LINE> <INDENT> file_header = BitMapFileHeader(bmp_data) <NEW_LINE> if file_header.type != 'BM': <NEW_LINE> <INDENT> raise InvalidFileHeader() <NEW_LINE> <DEDENT> return file_header
Get file header from BMP file data :param bmp_data: bytearray :return: BitMapFileHeader instance :raise InvalidFileHeader:
625941c1cc0a2c11143dce0c
def neighbours(cell): <NEW_LINE> <INDENT> pos = cell["pos"] <NEW_LINE> cellx = pos[0] <NEW_LINE> celly = pos[1] <NEW_LINE> cell_neighbours = [] <NEW_LINE> if cellx in [0, GRID_SIZE - 1] and celly in [0, GRID_SIZE - 1]: <NEW_LINE> <INDENT> cell_neighbours.append((1 if cellx == 0 else GRID_SIZE - 2, celly)) <NEW_LINE> cell_neighbours.append((cellx, 1 if celly == 0 else GRID_SIZE - 2)) <NEW_LINE> <DEDENT> elif cellx in [0, GRID_SIZE - 1]: <NEW_LINE> <INDENT> cell_neighbours.extend([(cellx, celly - 1), (cellx, celly + 1)]) <NEW_LINE> cell_neighbours.append((cellx + 1 if cellx == 0 else cellx - 1, celly)) <NEW_LINE> <DEDENT> elif celly in [0, GRID_SIZE - 1]: <NEW_LINE> <INDENT> cell_neighbours.extend([(cellx - 1, celly), (cellx + 1, celly)]) <NEW_LINE> cell_neighbours.append((cellx, celly + 1 if celly == 0 else celly - 1)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> cell_neighbours.extend([(cellx, celly - 1), (cellx, celly + 1)]) <NEW_LINE> cell_neighbours.extend([(cellx - 1, celly), (cellx + 1, celly)]) <NEW_LINE> <DEDENT> return cell_neighbours
Finds the non-diagonal neighbours of a cell, adjusted for edge/corner cases.
625941c13cc13d1c6d3c72f6
def write_cmake_macros(self, macros_file): <NEW_LINE> <INDENT> macros_printer = ScriptPrinter(macros_file) <NEW_LINE> header_lines = [ "CESM build flags for:", " Compiler = "+self.machine_dict["COMPILER"], " Machine = "+self.machine_dict["MACH"], " OS = "+self.machine_dict["OS"], ] <NEW_LINE> for line in header_lines: <NEW_LINE> <INDENT> macros_printer.comment(line) <NEW_LINE> <DEDENT> match = best_match(self.compiler_xml_tree, "compiler/PFUNIT_PATH", self.machine_dict) <NEW_LINE> if match is not None: <NEW_LINE> <INDENT> macros_printer.print_header("pFUnit location.") <NEW_LINE> macros_printer.print( "list(APPEND CMAKE_PREFIX_PATH "+match.text+")" ) <NEW_LINE> <DEDENT> normal_dict = self.machine_dict.copy() <NEW_LINE> normal_dict["DEBUG"] = "FALSE" <NEW_LINE> debug_dict = self.machine_dict.copy() <NEW_LINE> debug_dict["DEBUG"] = "TRUE" <NEW_LINE> def add_formatted_flags(flags_name, format): <NEW_LINE> <INDENT> paths = ["compiler/"+flags_name, "compiler/ADD_"+flags_name] <NEW_LINE> normal_matches = chain.from_iterable( all_matches(self.compiler_xml_tree, path, normal_dict) for path in paths ) <NEW_LINE> for match in normal_matches: <NEW_LINE> <INDENT> macros_printer.print(format("CESM", match.text)) <NEW_LINE> <DEDENT> debug_matches = chain.from_iterable( all_matches(self.compiler_xml_tree, path, debug_dict) for path in paths ) <NEW_LINE> for match in debug_matches: <NEW_LINE> <INDENT> macros_printer.print(format("CESM_DEBUG", match.text)) <NEW_LINE> <DEDENT> <DEDENT> macros_printer.print_header("CPP definitions.") <NEW_LINE> add_formatted_flags( "CPPDEFS", lambda b, m: "add_config_definitions("+b+" "+m+")" ) <NEW_LINE> def format_contiguous(build_type, match): <NEW_LINE> <INDENT> comma = "," if self.machine_dict["COMPILER"] != "ibm" else "\\\," <NEW_LINE> contig_def = "contiguous"+comma if match == "TRUE" else "" <NEW_LINE> return "add_config_definitions("+build_type+ " -DUSE_CONTIGUOUS="+contig_def+")" <NEW_LINE> <DEDENT> add_formatted_flags( "HAS_F2008_CONTIGUOUS", format_contiguous ) <NEW_LINE> macros_printer.print_header("Fortran flags.") <NEW_LINE> add_formatted_flags( "FFLAGS", lambda b, m: "add_flags(CMAKE_Fortran_FLAGS_"+b+" "+m+")" ) <NEW_LINE> macros_printer.print_header("C flags.") <NEW_LINE> add_formatted_flags( "CFLAGS", lambda b, m: "add_flags(CMAKE_C_FLAGS_"+b+" "+m+")" ) <NEW_LINE> macros_printer.print_header("Linker flags.") <NEW_LINE> add_formatted_flags( "LDFLAGS", lambda b, m: "add_flags(CMAKE_EXE_LINKER_FLAGS_"+b+" "+m+")" )
Write CMake macros file using config_compilers.xml Arguments: macros_file - File object to write to.
625941c1236d856c2ad44753
def __init__(self, phrase): <NEW_LINE> <INDENT> PhraseTrigger.__init__(self, phrase)
:param phrase: Phrase for trigger :type phrase: str
625941c163b5f9789fde7061
def createcurvefunc(points): <NEW_LINE> <INDENT> if points is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> numpoints=len(points) <NEW_LINE> if numpoints <2: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> xs,ys=zip(*points) <NEW_LINE> if numpoints<3: <NEW_LINE> <INDENT> kind="linear" <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> kind="cubic" <NEW_LINE> <DEDENT> return scipy.interpolate.interp1d(xs,ys,kind,bounds_error=False)
return a function dervied from control points
625941c15fcc89381b1e1638
def list_interfaces(self): <NEW_LINE> <INDENT> self.interfaces = self.conn.list_instance_interfaces(self.uuid) <NEW_LINE> return self.interfaces
Reload list of networks interfaces from the server. :rtype: list :return: list of VPSNetInterface objects
625941c110dbd63aa1bd2b20
def sum_function_values(f, start, stop): <NEW_LINE> <INDENT> S = 0 <NEW_LINE> for i in range(start, stop+1, 1): <NEW_LINE> <INDENT> S = S + f(i) <NEW_LINE> <DEDENT> return S
Sum up function values for integer arguments as f(start) + f(start+1) + f(start+2) + ... + f(stop)
625941c1293b9510aa2c3214
def __init__(self, review_request, request, last_visited=None, entry_classes=None): <NEW_LINE> <INDENT> self.review_request = review_request <NEW_LINE> self.request = request <NEW_LINE> self.last_visited = last_visited <NEW_LINE> self.entry_classes = entry_classes or list(entry_registry) <NEW_LINE> self.reviews = [] <NEW_LINE> self.changedescs = [] <NEW_LINE> self.diffsets = [] <NEW_LINE> self.commits_by_diffset_id = {} <NEW_LINE> self.diffsets_by_id = {} <NEW_LINE> self.all_status_updates = [] <NEW_LINE> self.latest_review_timestamp = None <NEW_LINE> self.latest_changedesc_timestamp = None <NEW_LINE> self.draft = None <NEW_LINE> self.initial_status_updates = [] <NEW_LINE> self.change_status_updates = {} <NEW_LINE> self.reviews_by_id = {} <NEW_LINE> self.latest_timestamps_by_review_id = {} <NEW_LINE> self.body_top_replies = defaultdict(list) <NEW_LINE> self.body_bottom_replies = defaultdict(list) <NEW_LINE> self.review_request_details = None <NEW_LINE> self.active_file_attachments = [] <NEW_LINE> self.all_file_attachments = [] <NEW_LINE> self.file_attachments_by_id = {} <NEW_LINE> self.active_screenshots = [] <NEW_LINE> self.all_comments = [] <NEW_LINE> self.all_screenshots = [] <NEW_LINE> self.screenshots_by_id = {} <NEW_LINE> self.review_comments = {} <NEW_LINE> self.draft_reply_comments = {} <NEW_LINE> self.draft_body_top_replies = defaultdict(list) <NEW_LINE> self.draft_body_bottom_replies = defaultdict(list) <NEW_LINE> self.issues = [] <NEW_LINE> self.issue_counts = { 'total': 0, 'open': 0, 'resolved': 0, 'dropped': 0, 'verifying': 0, } <NEW_LINE> self.status_updates_enabled = status_updates_feature.is_enabled( local_site=review_request.local_site) <NEW_LINE> self._needs_draft = False <NEW_LINE> self._needs_reviews = False <NEW_LINE> self._needs_changedescs = False <NEW_LINE> self._needs_status_updates = False <NEW_LINE> self._needs_file_attachments = False <NEW_LINE> self._needs_screenshots = False <NEW_LINE> for entry_cls in self.entry_classes: <NEW_LINE> <INDENT> self._needs_draft = self._needs_draft or entry_cls.needs_draft <NEW_LINE> self._needs_reviews = (self._needs_reviews or entry_cls.needs_reviews) <NEW_LINE> self._needs_changedescs = (self._needs_changedescs or entry_cls.needs_changedescs) <NEW_LINE> self._needs_status_updates = (self._needs_status_updates or entry_cls.needs_status_updates) <NEW_LINE> self._needs_file_attachments = (self._needs_file_attachments or entry_cls.needs_file_attachments) <NEW_LINE> self._needs_screenshots = (self._needs_screenshots or entry_cls.needs_screenshots)
Initialize the data object. Args: review_request (reviewboard.reviews.models.ReviewRequest): The review request. request (django.http.HttpRequest): The HTTP request object. last_visited (datetime.datetime, optional): The date/time when the user last visited the review request. entry_classes (list of BaseReviewRequestPageEntry, optional): The list of entry classes that should be used for data generation. If not provided, all registered entry classes will be used.
625941c10c0af96317bb8164
@app.task <NEW_LINE> def update_price(): <NEW_LINE> <INDENT> update_shares_price() <NEW_LINE> logger.info("Price updated")
price updated.
625941c1009cb60464c6332f
def dump(self): <NEW_LINE> <INDENT> return {"id": self.id(), "description": self._description, "source_node_id": self._source_node.id(), "source_port_id": self._source_port.id(), "destination_node_id": self._destination_node.id(), "destination_port_id": self._destination_port.id(), }
Returns a representation of this link. :returns: dictionary
625941c18e71fb1e9831d726
def get_host_architecture(): <NEW_LINE> <INDENT> return Architecture(os_ranks, machine_ranks)
Get an Architecture that matches implementations that will run on the host machine. @rtype: L{Architecture}
625941c13cc13d1c6d3c72f7
def hasRequiredElements(self): <NEW_LINE> <INDENT> return _libsedml.SedPlot3D_hasRequiredElements(self)
hasRequiredElements(SedPlot3D self) -> bool
625941c1e5267d203edcdc1b
def display_transaction_start(self, amount, options): <NEW_LINE> <INDENT> amount = float(amount) <NEW_LINE> operation_number = options.get('operation_number', '00000') <NEW_LINE> msg = ['C', operation_number, 1, amount, 15, 15, 1, 1, 1, 0, 0] <NEW_LINE> res = self.send(msg, blocking=True) <NEW_LINE> amount_in = self.value_float(res[1]) <NEW_LINE> amount_out = self.value_float(res[2]) <NEW_LINE> amount = amount_in - amount_out <NEW_LINE> return { 'amount_in': amount_in, 'amount_out': amount_out, 'amount': amount, }
Sets the machine to receive money from the customer. Result is {amount: 0.00, amount_in: 0.00, amount_out: 0.00}
625941c10a50d4780f666e0c
def test_get_eval_fields(self): <NEW_LINE> <INDENT> for value, result in [ ('test', set()), (Eval('foo'), {'foo'}), (Eval('_parent_foo'), {'foo'}), (Eval('foo.bar'), {'foo'}), ([Eval('foo'), Eval('bar')], {'foo', 'bar'}), ((Eval('foo'), Eval('bar')), {'foo', 'bar'}), ({'foo': Eval('bar')}, {'bar'}), (If(Eval('foo'), Eval('bar'), Eval('baz')), {'foo', 'bar', 'baz'}), ]: <NEW_LINE> <INDENT> with self.subTest(value=value): <NEW_LINE> <INDENT> self.assertEqual(fields.get_eval_fields(value), result)
Test get_eval_fields
625941c166656f66f7cbc126
def convert_record(self, old_record): <NEW_LINE> <INDENT> if sys.version_info >= (2, 5): <NEW_LINE> <INDENT> optional_kwargs = {'func': getattr(old_record, 'func_name')} <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> optional_kwargs = {} <NEW_LINE> <DEDENT> record = logging.LogRecord(old_record.channel, self.convert_level(old_record.level), old_record.filename, old_record.lineno, old_record.message, (), old_record.exc_info, **optional_kwargs) <NEW_LINE> for key, value in old_record.extra.iteritems(): <NEW_LINE> <INDENT> record.__dict__.setdefault(key, value) <NEW_LINE> <DEDENT> record.created = self.convert_time(old_record.time) <NEW_LINE> return record
Converts a record from logbook to logging.
625941c132920d7e50b2814a
def test_get_admission_reasons(self): <NEW_LINE> <INDENT> pass
Tests whether the get_admission_reasons function properly returns :return:
625941c1cb5e8a47e48b7a29
def _add_exclude_filter(self, filter_index=False): <NEW_LINE> <INDENT> if filter_index is False: <NEW_LINE> <INDENT> filter_index = -1 <NEW_LINE> filter_select_menu = QMenu() <NEW_LINE> for index in self._filter_factory_order: <NEW_LINE> <INDENT> if index in ['message', 'location'] or not self.filter_factory[index][1] in [type(item) for sublist in self._exclude_filters for item in sublist]: <NEW_LINE> <INDENT> filter_select_menu.addAction(self.filter_factory[index][0]) <NEW_LINE> <DEDENT> <DEDENT> action = filter_select_menu.exec_(QCursor.pos()) <NEW_LINE> if action is None: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> for index in self._filter_factory_order: <NEW_LINE> <INDENT> if self.filter_factory[index][0] == action.text(): <NEW_LINE> <INDENT> filter_index = index <NEW_LINE> <DEDENT> <DEDENT> if filter_index == -1: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> <DEDENT> index = len(self._exclude_filters) <NEW_LINE> newfilter = self.filter_factory[filter_index][1]() <NEW_LINE> if len(self.filter_factory[filter_index]) >= 4: <NEW_LINE> <INDENT> newwidget = self.filter_factory[filter_index][2](newfilter, self._rospack, self.filter_factory[filter_index][3]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> newwidget = self.filter_factory[filter_index][2](newfilter, self._rospack) <NEW_LINE> <DEDENT> self._exclude_filters.append((newfilter, FilterWrapperWidget(newwidget, self.filter_factory[filter_index][0]), filter_index)) <NEW_LINE> self._proxy_model.add_exclude_filter(newfilter) <NEW_LINE> newfilter.filter_changed_signal.connect(self._proxy_model.handle_exclude_filters_changed) <NEW_LINE> self._exclude_filters[index][1].delete_button.clicked.connect(self._delete_exclude_filter) <NEW_LINE> self._model.rowsInserted.connect(self._exclude_filters[index][1].repopulate) <NEW_LINE> self.exclude_table.insertRow(index) <NEW_LINE> self.exclude_table.setCellWidget(index, 0, self._exclude_filters[index][1]) <NEW_LINE> self.exclude_table.resizeColumnsToContents() <NEW_LINE> self.exclude_table.resizeRowsToContents() <NEW_LINE> newfilter.filter_changed_signal.emit() <NEW_LINE> return index
:param filter_index: if false then this function shows a QMenu to allow the user to choose a type of message filter. ''bool'' OR :param filter_index: the index of the filter to be added, ''int'' :return: if a filter was added then the index is returned, ''int'' OR :return: if no filter was added then None is returned, ''NoneType''
625941c1be383301e01b5405
def _ThreadedOutputQueue(self, pipe, queue, lock): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> while True: <NEW_LINE> <INDENT> chunk = pipe.readline() <NEW_LINE> if not chunk: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> lock.acquire() <NEW_LINE> queue.append(chunk) <NEW_LINE> lock.release() <NEW_LINE> <DEDENT> <DEDENT> except: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> finally: <NEW_LINE> <INDENT> pipe.close()
Called from the thread to update an output (stdout, stderr) queue.
625941c145492302aab5e23d
@login_required <NEW_LINE> def get_userinfo(request): <NEW_LINE> <INDENT> if 'username' in request.session: <NEW_LINE> <INDENT> username = request.session['username'] <NEW_LINE> userid = request.session['userid'] <NEW_LINE> userinfo_dict = { 'username' : username, 'userid' : userid, } <NEW_LINE> return render(request, 'head.html', userinfo_dict)
@author: Xieyz @note: 获取用户信息 :param request: :return:
625941c14527f215b584c3d6
def snippet(func): <NEW_LINE> <INDENT> func._snippet = True <NEW_LINE> return func
Mark ``func`` as a snippet example function.
625941c12c8b7c6e89b3573e
def evaluate(self, inp, tar): <NEW_LINE> <INDENT> def split_batch(iterable, n=1): <NEW_LINE> <INDENT> l = len(iterable) <NEW_LINE> for ndx in range(0, l, n): <NEW_LINE> <INDENT> yield iterable[ndx:min(ndx + n, l)] <NEW_LINE> <DEDENT> <DEDENT> batch_size = BATCH_SIZE <NEW_LINE> inp_batch = split_batch(inp, batch_size) <NEW_LINE> tar_batch = split_batch(tar, batch_size) <NEW_LINE> test_loss = tf.keras.metrics.Mean(name='test_loss') <NEW_LINE> test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy') <NEW_LINE> test_loss.reset_states() <NEW_LINE> test_accuracy.reset_states() <NEW_LINE> for inp, tar in zip(inp_batch, tar_batch): <NEW_LINE> <INDENT> tar_inp = tar[:, :-1] <NEW_LINE> tar_real = tar[:, 1:] <NEW_LINE> enc_padding_mask, combined_mask, dec_padding_mask = create_masks(inp, tar_inp) <NEW_LINE> predictions, _ = self(inp, tar_inp, False, enc_padding_mask, combined_mask, dec_padding_mask) <NEW_LINE> loss = loss_function(tar_real, predictions) <NEW_LINE> test_loss(loss) <NEW_LINE> test_accuracy(tar_real, predictions) <NEW_LINE> <DEDENT> return test_loss.result().numpy(), test_accuracy.result().numpy()
test loss, acc 계산
625941c10a366e3fb873e794
def probability_joint(net, hypothesis): <NEW_LINE> <INDENT> prob = None <NEW_LINE> for var in hypothesis.keys(): <NEW_LINE> <INDENT> hypo = {var:hypothesis[var]} <NEW_LINE> givens = {key:hypothesis[key] for key in net.get_parents(var)} <NEW_LINE> if prob == None: prob = probability_lookup(net,hypo,givens) <NEW_LINE> else: prob *= probability_lookup(net,hypo,givens) <NEW_LINE> <DEDENT> return prob
Uses the chain rule to compute a joint probability
625941c1d164cc6175782cca
def run_ansible(self, playbook, repository=None, builder=None, start_image=None, vct_image=None, vct_cid=None, verbose=False): <NEW_LINE> <INDENT> if not builder and not start_image: <NEW_LINE> <INDENT> raise ValueError('At least 1 of "builder" or "start_image" ' 'must be defined') <NEW_LINE> <DEDENT> if builder and start_image: <NEW_LINE> <INDENT> raise ValueError('Only 1 of "builder" and "start_image" may ' 'be defined') <NEW_LINE> <DEDENT> repository = repository or playbook <NEW_LINE> if builder: <NEW_LINE> <INDENT> full_builder = 'ansible-%s' % builder <NEW_LINE> start_image = self.ensure_built(full_builder, verbose=verbose) <NEW_LINE> <DEDENT> history = self.client.history(start_image) <NEW_LINE> if len(history) > 120: <NEW_LINE> <INDENT> for base in history: <NEW_LINE> <INDENT> if base['CreatedBy'].startswith('/sync-and-build'): <NEW_LINE> <INDENT> start_image = base['Id'] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> with self.vct_container(image=vct_image, cid=vct_cid, verbose=verbose) as vct_state: <NEW_LINE> <INDENT> cmd = ['/sync-and-build', '%s.yml' % playbook] <NEW_LINE> with self.create_container(start_image, command=cmd) as cid: <NEW_LINE> <INDENT> output = deque(maxlen=20) <NEW_LINE> self.client.start(cid, volumes_from=[vct_state['Name']]) <NEW_LINE> for s in self.client.attach(cid, stream=True, logs=True): <NEW_LINE> <INDENT> for line in s.splitlines(): <NEW_LINE> <INDENT> output.append(line) <NEW_LINE> if verbose: <NEW_LINE> <INDENT> print('%s> %s' % (repository, line)) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> state = self.client.inspect_container(cid) <NEW_LINE> if state['State']['ExitCode']: <NEW_LINE> <INDENT> for line in output: <NEW_LINE> <INDENT> print('ERROR %s> %s' % (repository, line)) <NEW_LINE> <DEDENT> raise Exception('Ansible did not run on %s successfully' % repository) <NEW_LINE> <DEDENT> tag = str(uuid.uuid1()) <NEW_LINE> iid = self.client.commit(cid['Id'], repository=repository, tag=tag)['Id'] <NEW_LINE> iid = self.get_full_image(iid) <NEW_LINE> return iid, repository, tag
Create an image with the results of Ansible playbook execution. This function essentially does the following: 1. Obtain a starting image. 2. Create and start a container with the content of v-c-t mounted in that container. 3. Run the ansible playbook specified. 4. Tag the resulting image. You can think of this function as an alternative mechanism for building Docker images. Instead of Dockerfiles, we use Ansible to "provision" our containers. You can provision containers either from scratch or incrementally. To build from scratch, specify a ``builder``. This corresponds to a directory in v-c-t that contains a Dockerfile specifying how to install Ansible in an image. e.g. ``centos6`` will be expanded to ``builder-ansible-centos6``. To build incrementally, specify a ``start_image``. This is an existing Docker image. One of ``builder`` or ``start_image`` must be specified. Both cannot be specified.
625941c157b8e32f52483416
def recalc_torsions(self, settings): <NEW_LINE> <INDENT> self.n_torsions = len(self.tor_idx) <NEW_LINE> self.calc_torsions(settings, calc_for='mol1') <NEW_LINE> self.calc_torsions(settings, calc_for='mol2') <NEW_LINE> t1, t2 = np.copy(self.tor_deg_mol1), np.copy(self.tor_deg_mol2) <NEW_LINE> invert_at = np.where(np.abs(t2 - t1) > np.abs((180.0 - t2) - t1))[0] <NEW_LINE> t1[invert_at] -= 180.0 <NEW_LINE> self.tor_deg_mol1 = np.abs(t1)
(Re)calculates torsion angles
625941c1d7e4931a7ee9de99
def distinct(items, key=None): <NEW_LINE> <INDENT> assert key is None or callable(key) <NEW_LINE> seen = set() <NEW_LINE> results = [] <NEW_LINE> for item in items: <NEW_LINE> <INDENT> if key is None: <NEW_LINE> <INDENT> key_val = item <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> key_val = key(item) <NEW_LINE> <DEDENT> if key_val not in seen: <NEW_LINE> <INDENT> results.append(item) <NEW_LINE> seen.add(key_val) <NEW_LINE> <DEDENT> <DEDENT> return results
Return a list of the items in the same order as they first appear, except that later duplicates of the same value are removed. Items in the sequence must be hashable, or, if a key is provided, the return values of the key must be hashable. :param items: An iterable sequence of items. :param key: A function mapping the items to a comparison key. :return: A list containing only one of each distinct item.
625941c15fc7496912cc38fa
def aggregate(self, cell=None, aggregates=None, drilldown=None, split=None, order=None, page=None, page_size=None, **options): <NEW_LINE> <INDENT> if "measures" in options: <NEW_LINE> <INDENT> raise ArgumentError("measures in aggregate are depreciated") <NEW_LINE> <DEDENT> aggregates = self.prepare_aggregates(aggregates) <NEW_LINE> order = self.prepare_order(order, is_aggregate=True) <NEW_LINE> if cell is None: <NEW_LINE> <INDENT> cell = Cell(self.cube) <NEW_LINE> <DEDENT> drilldon = Drilldown(drilldown, cell) <NEW_LINE> result = self.provide_aggregate(cell, aggregates=aggregates, drilldown=drilldon, split=split, order=order, page=page, page_size=page_size, **options) <NEW_LINE> return result
Return aggregate of a cell. Arguments: * `cell` – cell to aggregate * `aggregates` - list of aggregate measures. By default all cube's aggregates are included in the result. * `drilldown` - dimensions and levels through which to drill-down * `split` – cell for alternate 'split' dimension * `order` – attribute order specification (see below) * `page` – page index when requesting paginated results * `page_size` – number of result items per page Drill down can be specified in two ways: as a list of dimensions or as a dictionary. If it is specified as list of dimensions, then cell is going to be drilled down on the next level of specified dimension. Say you have a cell for year 2010 and you want to drill down by months, then you specify ``drilldown = ["date"]``. If `drilldown` is a dictionary, then key is dimension or dimension name and value is last level to be drilled-down by. If the cell is at `year` level and drill down is: ``{ "date": "day" }`` then both `month` and `day` levels are added. If there are no more levels to be drilled down, an exception is raised. Say your model has three levels of the `date` dimension: `year`, `month`, `day` and you try to drill down by `date` at the next level then ``ValueError`` will be raised. Retruns a :class:`AggregationResult` object. Note: subclasses should implement `provide_aggregate()` method.
625941c18e7ae83300e4af48
def _RealGetContents(self): <NEW_LINE> <INDENT> fp = self.fp <NEW_LINE> try: <NEW_LINE> <INDENT> endrec = _EndRecData(fp) <NEW_LINE> <DEDENT> except IOError: <NEW_LINE> <INDENT> raise BadZipfile("File is not a zip file") <NEW_LINE> <DEDENT> if not endrec: <NEW_LINE> <INDENT> raise BadZipfile("File is not a zip file") <NEW_LINE> <DEDENT> if self.debug > 1: <NEW_LINE> <INDENT> print(endrec) <NEW_LINE> <DEDENT> size_cd = endrec[_ECD_SIZE] <NEW_LINE> offset_cd = endrec[_ECD_OFFSET] <NEW_LINE> self.comment = endrec[_ECD_COMMENT] <NEW_LINE> concat = endrec[_ECD_LOCATION] - size_cd - offset_cd <NEW_LINE> if endrec[_ECD_SIGNATURE] == stringEndArchive64: <NEW_LINE> <INDENT> concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator) <NEW_LINE> <DEDENT> if self.debug > 2: <NEW_LINE> <INDENT> inferred = concat + offset_cd <NEW_LINE> print("given, inferred, offset", offset_cd, inferred, concat) <NEW_LINE> <DEDENT> self.start_dir = offset_cd + concat <NEW_LINE> fp.seek(self.start_dir, 0) <NEW_LINE> data = fp.read(size_cd) <NEW_LINE> fp = io.StringIO(data) <NEW_LINE> total = 0 <NEW_LINE> while total < size_cd: <NEW_LINE> <INDENT> centdir = fp.read(sizeCentralDir) <NEW_LINE> if centdir[0:4] != stringCentralDir: <NEW_LINE> <INDENT> raise BadZipfile("Bad magic number for central directory") <NEW_LINE> <DEDENT> centdir = struct.unpack(structCentralDir, centdir) <NEW_LINE> if self.debug > 2: <NEW_LINE> <INDENT> print(centdir) <NEW_LINE> <DEDENT> filename = fp.read(centdir[_CD_FILENAME_LENGTH]) <NEW_LINE> x = ZipInfo(filename) <NEW_LINE> x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) <NEW_LINE> x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) <NEW_LINE> x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] <NEW_LINE> (x.create_version, x.create_system, x.extract_version, x.reserved, x.flag_bits, x.compress_type, t, d, x.CRC, x.compress_size, x.file_size) = centdir[1:12] <NEW_LINE> x.volume, x.internal_attr, x.external_attr = centdir[15:18] <NEW_LINE> x._raw_time = t <NEW_LINE> x.date_time = ((d >> 9) + 1980, (d >> 5) & 0xF, d & 0x1F, t >> 11, (t >> 5) & 0x3F, (t & 0x1F) * 2) <NEW_LINE> x._decodeExtra() <NEW_LINE> x.header_offset = x.header_offset + concat <NEW_LINE> x.filename = x._decodeFilename() <NEW_LINE> self.filelist.append(x) <NEW_LINE> self.NameToInfo[x.filename] = x <NEW_LINE> total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] + centdir[_CD_EXTRA_FIELD_LENGTH] + centdir[_CD_COMMENT_LENGTH]) <NEW_LINE> if self.debug > 2: <NEW_LINE> <INDENT> print("total", total)
Read in the table of contents for the ZIP file.
625941c1be7bc26dc91cd580
def process_declaration(self, declaration): <NEW_LINE> <INDENT> self.backend.update_position(declaration[0].line, declaration[0].column) <NEW_LINE> self.backend.handle_decl_block_start() <NEW_LINE> try: <NEW_LINE> <INDENT> decl_def = pattern_match(['declare', '*?decls'], declaration) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> raise CompilerError.from_token( declaration[0], 'Declaration must be of the form (declare DECLARATIONS)') <NEW_LINE> <DEDENT> for element in decl_def['decls']: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> var_def = pattern_match(['~name', '?decl'], element) <NEW_LINE> identifier = var_def['name'] <NEW_LINE> if symbols.has_namespace(identifier): <NEW_LINE> <INDENT> raise CompilerError.from_token( declaration[0], 'Declaration identifiers cannot be namespaced') <NEW_LINE> <DEDENT> declaration = self.parse_decl_type(var_def['decl'], identifier) <NEW_LINE> decl_line = element[0].line <NEW_LINE> decl_col = element[0].column <NEW_LINE> self.backend.update_position(decl_line, decl_col) <NEW_LINE> self.backend.handle_decl(identifier, declaration) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> raise CompilerError.from_token( declaration[0], 'Declaration must be of the form (IDENTIFIER TYPE)') <NEW_LINE> <DEDENT> <DEDENT> self.backend.handle_decl_block_end()
Declarations have the syntax: (declare (name DECLARATION)*) Where DECLARATION can be one of: TYPE: Which indicates a variable declaration. (struct (IDENT TYPE)+): Which indicates a structured type definition. (function RETURN IDENT*): Which indicates a function declaration. (alias TYPE): Which indicates an alias to an existing type.
625941c13346ee7daa2b2ce7
def display(self, screen): <NEW_LINE> <INDENT> wall = pygame.image.load("pictures/wall.png").convert() <NEW_LINE> end = pygame.image.load("pictures/guard.png").convert_alpha() <NEW_LINE> bag_a = pygame.image.load("pictures/bag.png").convert() <NEW_LINE> bag_b = pygame.image.load("pictures/ba_g.png").convert() <NEW_LINE> loot_item = pygame.image.load("pictures/inventory.png").convert() <NEW_LINE> sprite_size = 40 <NEW_LINE> num_line = 0 <NEW_LINE> for line in self.structure: <NEW_LINE> <INDENT> num_case = 0 <NEW_LINE> for sprite in line: <NEW_LINE> <INDENT> x = num_case * sprite_size <NEW_LINE> y = num_line * sprite_size <NEW_LINE> if sprite == 'w': <NEW_LINE> <INDENT> screen.blit(wall, (x, y)) <NEW_LINE> <DEDENT> elif sprite == "e": <NEW_LINE> <INDENT> screen.blit(end, (x, y)) <NEW_LINE> <DEDENT> elif sprite == 'b': <NEW_LINE> <INDENT> screen.blit(bag_a, (x, y)) <NEW_LINE> <DEDENT> elif sprite == 'a': <NEW_LINE> <INDENT> screen.blit(bag_b, (x, y)) <NEW_LINE> <DEDENT> elif sprite == 'i': <NEW_LINE> <INDENT> screen.blit(loot_item, (x, y)) <NEW_LINE> <DEDENT> num_case += 1 <NEW_LINE> <DEDENT> num_line += 1
display level depends on structure of list returned by generate
625941c1462c4b4f79d1d64d
def check_guests_proc_scsi(self, info): <NEW_LINE> <INDENT> additional = 0 <NEW_LINE> missing = 0 <NEW_LINE> qtree_not_scsi = 0 <NEW_LINE> proc_not_scsi = 0 <NEW_LINE> _scsis = re.findall(r'Host:\s+(\w+)\s+Channel:\s+(\d+)\s+Id:\s+(\d+)' '\s+Lun:\s+(\d+)\n\s+Vendor:\s+([a-zA-Z0-9_-]+)' '\s+Model:.*\n.*Type:\s+([a-zA-Z0-9_-]+)', info) <NEW_LINE> disks = set() <NEW_LINE> for disk in self.disks: <NEW_LINE> <INDENT> if (disk.get_qtree()['type'].startswith('scsi') or disk.get_qtree()['type'].startswith('usb2')): <NEW_LINE> <INDENT> props = disk.get_qtree() <NEW_LINE> disks.add('%d-%d-%d' % (int(props.get('channel')), int(props.get('scsi-id')), int(props.get('lun')))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> qtree_not_scsi += 1 <NEW_LINE> <DEDENT> <DEDENT> scsis = set() <NEW_LINE> for scsi in _scsis: <NEW_LINE> <INDENT> if scsi[5] != 'CD-ROM': <NEW_LINE> <INDENT> scsis.add("%d-%d-%d" % (int(scsi[1]), int(scsi[2]), int(scsi[3]))) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> proc_not_scsi += 1 <NEW_LINE> <DEDENT> <DEDENT> for disk in disks.difference(scsis): <NEW_LINE> <INDENT> logging.error('Disk %s is in qtree but not in /proc/scsi/scsi.', disk) <NEW_LINE> additional += 1 <NEW_LINE> <DEDENT> for disk in scsis.difference(disks): <NEW_LINE> <INDENT> logging.error('Disk %s is in /proc/scsi/scsi but not in qtree.', disk) <NEW_LINE> missing += 1 <NEW_LINE> <DEDENT> return (additional, missing, qtree_not_scsi, proc_not_scsi)
Check info from guest's /proc/scsi/scsi file with qtree/block info @note: Not tested disks are of different type (virtio_blk, ...) @param info: contents of guest's /proc/scsi/scsi file @return: (#disks missing in guest os, #disks missing in qtree, #not tested disks from qtree, #not tested disks from guest)
625941c1a8ecb033257d304a
def astext(data): <NEW_LINE> <INDENT> if not isinstance(data, six.string_types): <NEW_LINE> <INDENT> text = repr(data) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> text = str(data) <NEW_LINE> <DEDENT> return text.replace('\n', '<NEWLINE>').replace(',', '<COMMA>')
Helper which casts model data to a string
625941c1cdde0d52a9e52fad
def get_dionysos_last_name(front_page = None, username = None, request = None): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return unicode(front_page[5].find_all('td')[1].contents[0]) <NEW_LINE> <DEDENT> except Exception as error: <NEW_LINE> <INDENT> logger_syslog.error(error, extra = log_extra_data(username, request)) <NEW_LINE> logger_mail.exception(error) <NEW_LINE> raise CronosError(u'Αδυναμία ανάκτησης Επωνύμου')
Retrieves student's last name from dionysos.teilar.gr
625941c15f7d997b87174a12
def __set_random_blocks(self): <NEW_LINE> <INDENT> len_blocks = int(self.rows * self.cols * self.percent_blocks / 100) <NEW_LINE> if not len_blocks: <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> points_blocks = self.points_random(len_blocks) <NEW_LINE> for point in points_blocks: <NEW_LINE> <INDENT> self.set_block(point=point)
======================================================================== Description: Set Random Blocks to the Grid by the Percent_Blocks. ========================================================================
625941c15510c4643540f366
def list(): <NEW_LINE> <INDENT> dir_path = os.path.dirname(os.path.realpath(__file__)) <NEW_LINE> propsDir = join(dir_path, '..', 'db_properties') <NEW_LINE> for f in listdir(propsDir): <NEW_LINE> <INDENT> onlyfiles = [f.replace('.properties', '') for f in listdir(propsDir) if (isfile(join(propsDir, f)) and f.endswith('.properties'))] <NEW_LINE> <DEDENT> return onlyfiles
Get available example banks
625941c166673b3332b9200d
def start_turn(self): <NEW_LINE> <INDENT> if len(self.user_alphabet) < len(self.alphabet_to_check_against): <NEW_LINE> <INDENT> self.add_char() <NEW_LINE> <DEDENT> elif len(self.user_alphabet) == len(self.alphabet_to_check_against): <NEW_LINE> <INDENT> print("Congratulations, you typed the whole alphabet!\nGoodbye!")
checks to make sure the game is not over by checking the alphabet_to_check_against length against the user_alphabet length. if the lengths are equal, this function congratulates the user and ends the game. Arguments: none
625941c1e76e3b2f99f3a78c
def __combine_AT(self, wf_interpolated_data, ro_interpolated_data, observation_data): <NEW_LINE> <INDENT> npAT = wf_interpolated_data.get_matrix_col('AT') <NEW_LINE> npATO = ro_interpolated_data.get_matrix_col('AT') <NEW_LINE> nLenATO = len(npATO) <NEW_LINE> nLenAT = len(npAT) - 3 * 3600 // 30 <NEW_LINE> npSwo = observation_data.get_attribute('AT_VALID_INTERPOLATED') <NEW_LINE> npSwo = npSwo[self.nDeltaIndice:nLenATO] <NEW_LINE> npCheck = numpy.where(npSwo == 0, 1, 0) <NEW_LINE> npBadIndices = (numpy.nonzero(npCheck))[0] <NEW_LINE> if len(npBadIndices) == 0: <NEW_LINE> <INDENT> for i in range(0, nLenATO - self.NTP): <NEW_LINE> <INDENT> npAT[i - self.NTP2] = npATO[i + self.NTP - 1] <NEW_LINE> <DEDENT> nFactor = nLenATO - self.NTP <NEW_LINE> fTaCorr = npAT[nLenATO - self.NTP - self.NTP2] - npATO[nLenATO - 1] <NEW_LINE> if fTaCorr != 0: <NEW_LINE> <INDENT> if self.NTP2 < 0: <NEW_LINE> <INDENT> nValueSup = nLenAT + self.NTP2 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> nValueSup = nLenAT <NEW_LINE> <DEDENT> for i in range(nLenATO - self.NTP, int(nValueSup)): <NEW_LINE> <INDENT> npAT[i - self.NTP2] = npAT[i - self.NTP2] - math.exp(-(i - nFactor) * metro_constant.fTimeStep * metro_constant.fConst) * fTaCorr <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> wf_interpolated_data.set_matrix_col('AT', npAT)
Name: __combine_AT Parameters:[I] metro_data wf_interpolated_data : interpolated forecast data. [I] metro_data wf_interpolated_data : interpolated observation data. Returns: None Functions Called: wf_interpolated_data.get_matrix_col observation_data.get_attribute numpy.where Description: Combine the air temperature of forecast and observation.
625941c1ad47b63b2c509efc
def render(self, namespace): <NEW_LINE> <INDENT> return namespace.get_macro_or_block(self._name)
Render a macro or block. Note: a macro is actually already rendered and will just return its rendered content.
625941c1851cf427c661a48e
def get_subcategories(subcategory): <NEW_LINE> <INDENT> url = 'https://www.manythings.org/vocabulary/lists/c/' + subcategory <NEW_LINE> response = simple_get(url) <NEW_LINE> if response is not None: <NEW_LINE> <INDENT> html = BeautifulSoup(response, 'html.parser') <NEW_LINE> names = list() <NEW_LINE> added = ["ESL / EFL Basic Vocabulary Word Lists","English Vocabulary Word Lists with Games, Puzzles and Quizzes", "Interesting Things for ESL Students", "Copyright", "Charles Kelly", "Lawrence Kelly"] <NEW_LINE> for li in html.select('li'): <NEW_LINE> <INDENT> for name in li.text.split('\n'): <NEW_LINE> <INDENT> if len(name) > 0: <NEW_LINE> <INDENT> names.append(name.strip()) <NEW_LINE> added.append(name.strip()) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return list(names) <NEW_LINE> <DEDENT> raise Exception('Error retrieving contents at {}'.format(url))
Downloads the page where the list of mathematicians is found and returns a list of strings, one per mathematician
625941c1a8370b771705281d
def show_stats(self, show=True): <NEW_LINE> <INDENT> html, stc = self.html_stats, self.stc_history <NEW_LINE> changed = False <NEW_LINE> focus = False <NEW_LINE> for i in [html, stc]: <NEW_LINE> <INDENT> focus = focus or (i.Shown and i.FindFocus() == i) <NEW_LINE> <DEDENT> if not stc.Shown != show: <NEW_LINE> <INDENT> stc.Show(not show) <NEW_LINE> changed = True <NEW_LINE> <DEDENT> if html.Shown != show: <NEW_LINE> <INDENT> html.Show(show) <NEW_LINE> changed = True <NEW_LINE> <DEDENT> if changed: <NEW_LINE> <INDENT> stc.ContainingSizer.Layout() <NEW_LINE> <DEDENT> if focus: <NEW_LINE> <INDENT> (html if show else stc).SetFocus() <NEW_LINE> <DEDENT> if show: <NEW_LINE> <INDENT> if hasattr(html, "_last_scroll_pos"): <NEW_LINE> <INDENT> html.Scroll(*html._last_scroll_pos) <NEW_LINE> <DEDENT> elif html.HasAnchor(html.OpenedAnchor): <NEW_LINE> <INDENT> html.ScrollToAnchor(html.OpenedAnchor) <NEW_LINE> <DEDENT> <DEDENT> self.tb_chat.ToggleTool(wx.ID_PROPERTIES, show)
Shows or hides the statistics window.
625941c160cbc95b062c64bf
def start_node(i, dirname, extra_args=None, rpchost=None): <NEW_LINE> <INDENT> datadir = os.path.join(dirname, "node"+str(i)) <NEW_LINE> args = [ os.getenv("BITCOIND", "extnd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ] <NEW_LINE> if extra_args is not None: args.extend(extra_args) <NEW_LINE> bitcoind_processes[i] = subprocess.Popen(args) <NEW_LINE> devnull = open("/dev/null", "w+") <NEW_LINE> subprocess.check_call([ os.getenv("BITCOINCLI", "extn-cli"), "-datadir="+datadir] + _rpchost_to_args(rpchost) + ["-rpcwait", "getblockcount"], stdout=devnull) <NEW_LINE> devnull.close() <NEW_LINE> url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i)) <NEW_LINE> proxy = AuthServiceProxy(url) <NEW_LINE> proxy.url = url <NEW_LINE> return proxy
Start a extnd and return RPC connection to it
625941c1a934411ee375160f
def update_current_tree_edges(self): <NEW_LINE> <INDENT> for i in self.current_tree.preorder_node_iter(): <NEW_LINE> <INDENT> i.edge.comments = i.orig.edge.comments <NEW_LINE> i.edge.distal_node = i.orig.edge.distal_node <NEW_LINE> i.edge.proximal_node = i.orig.edge.proximal_node <NEW_LINE> i.edge.distal_placements = i.orig.edge.distal_placements <NEW_LINE> i.edge.pendant_lengths = i.orig.edge.pendant_lengths
Anothe helper method for changing around the working copy of the tree. :return:
625941c1cc0a2c11143dce0d
def get_permission_object(self): <NEW_LINE> <INDENT> return self.gfk_object
Only users with add_comment permission for the gfk object are allowed to create new Comments
625941c13539df3088e2e2c8
def _add_error(self, test_name, error): <NEW_LINE> <INDENT> time_taken = self._time_taken() <NEW_LINE> self._results["summary"]["errors"] += 1 <NEW_LINE> length = len(test_name) + _ERROR_LENGTH <NEW_LINE> msg = "=" * length + f"\nERROR: {test_name}\n" + "-" * length <NEW_LINE> log_msg = j.core.tools.log("{YELLOW}%s" % msg, stdout=False) <NEW_LINE> str_msg = j.core.tools.log2str(log_msg) <NEW_LINE> log_error = j.core.tools.log("", exception=error, stdout=False) <NEW_LINE> str_error = j.core.tools.log2str(log_error) <NEW_LINE> trace_back = traceback.format_exc() <NEW_LINE> result = { "name": test_name, "traceback": trace_back, "msg": str_msg, "error": str_error, "status": "error", "time": time_taken, } <NEW_LINE> self._results["testcases"].append(result) <NEW_LINE> print("error\n")
Add a errored test. :param error: test exception error.
625941c14e696a04525c93c9
def minWindow(self, s, t): <NEW_LINE> <INDENT> from collections import deque <NEW_LINE> def check(d): <NEW_LINE> <INDENT> for _ in d: <NEW_LINE> <INDENT> if d[_] > 0: return False <NEW_LINE> <DEDENT> return True <NEW_LINE> <DEDENT> d = dict() <NEW_LINE> total = len(t) <NEW_LINE> for x in t: <NEW_LINE> <INDENT> if x not in d: d[x] = 1 <NEW_LINE> else: d[x] += 1 <NEW_LINE> <DEDENT> q = deque() <NEW_LINE> ret = None <NEW_LINE> for x in range(len(s)): <NEW_LINE> <INDENT> if s[x] in d: <NEW_LINE> <INDENT> d[s[x]] -= 1 <NEW_LINE> q.append(x) <NEW_LINE> if check(d): <NEW_LINE> <INDENT> if not ret or x - q[0] < ret[1] - ret[0]: <NEW_LINE> <INDENT> ret = (q[0], x) <NEW_LINE> <DEDENT> while d[s[q[0]]] < 0: <NEW_LINE> <INDENT> d[s[q[0]]] += 1 <NEW_LINE> q.popleft() <NEW_LINE> if x - q[0] < ret[1] - ret[0]: <NEW_LINE> <INDENT> ret = (q[0], x) <NEW_LINE> <DEDENT> <DEDENT> d[s[q[0]]] += 1 <NEW_LINE> q.popleft() <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return "" if not ret else s[ret[0]: ret[1] + 1]
:type s: str :type t: str :rtype: str
625941c1f9cc0f698b14057a
def yExperiment2(a1, a2, a3, sigma, x): <NEW_LINE> <INDENT> N = len(x) <NEW_LINE> r = numpy.random.randn(N) <NEW_LINE> y = a1 + a2 * x + a3 * x * x + sigma * r <NEW_LINE> return y
return the experimental data in a quadratic + random fashion, with a1, a2, a3 the coefficients of the quadratic and sigma is the error. This will be poorly matched to a linear fit for a3 != 0
625941c107d97122c4178803
def checked_http(args, kwargs, method): <NEW_LINE> <INDENT> for tries in xrange(RETRY_MAX): <NEW_LINE> <INDENT> r = robust_http(args, kwargs, method) <NEW_LINE> if 200 <= r.status_code < 300: <NEW_LINE> <INDENT> return r <NEW_LINE> <DEDENT> if 400 <= r.status_code < 500: <NEW_LINE> <INDENT> log.info("Try: {}, Code {}: Client Error", tries, r.status_code) <NEW_LINE> return r <NEW_LINE> <DEDENT> if 500 <= r.status_code < 600: <NEW_LINE> <INDENT> log.info("Try: {}, Code {}: Server Error", tries, r.status_code) <NEW_LINE> sleep(RETRY_AFTER) <NEW_LINE> continue <NEW_LINE> <DEDENT> log.warn("Try: {}, Code {}: Unexpected Error", tries, r.status_code) <NEW_LINE> sleep(RETRY_AFTER) <NEW_LINE> continue <NEW_LINE> <DEDENT> msg = "Maximum retry limit execeded, Exiting" <NEW_LINE> log.critical(msg) <NEW_LINE> raise Error(msg)
Check the status codes and repeat on server side errors.
625941c1ad47b63b2c509efd
def putasc(self, this, that): <NEW_LINE> <INDENT> thatDir, thatFile = os.path.split(that) <NEW_LINE> self.cd(thatDir) <NEW_LINE> f = open(this, "r") <NEW_LINE> logging.info("ftpstorasc %s" % that) <NEW_LINE> self.ftp.putfo(f, thatFile)
Put a text file to the server.
625941c14a966d76dd550f8a
def default_health_value(name, service, operation, failed_step): <NEW_LINE> <INDENT> result = False <NEW_LINE> if not ([event for event in health_values if event.metric == name]): <NEW_LINE> <INDENT> if failed_step is not None: <NEW_LINE> <INDENT> message = 'Did not attempt to %s due to timeout waiting for: %s' % (operation, failed_step) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> message = 'Timed out waiting for %s to complete' % operation <NEW_LINE> <DEDENT> health_values.append(Event(TIMESTAMP_MILLIS(), service, name, [message], False)) <NEW_LINE> result = True <NEW_LINE> <DEDENT> return result
Check health value
625941c1a934411ee3751610
def p_tattr_decl6(self, p): <NEW_LINE> <INDENT> p[0] = {p[1]:int(p[3])}
tattr : TIMEOUT EQUALS NUMBER
625941c107d97122c4178804
def extract_metrics(self, metrics_files): <NEW_LINE> <INDENT> extension_maps = dict( align_metrics = (self._parse_align_metrics, "AL"), dup_metrics = (self._parse_dup_metrics, "DUP"), hs_metrics = (self._parse_hybrid_metrics, "HS"), insert_metrics = (self._parse_insert_metrics, "INS"), ) <NEW_LINE> all_metrics = dict() <NEW_LINE> for fname in metrics_files: <NEW_LINE> <INDENT> ext = os.path.splitext(fname)[-1][1:] <NEW_LINE> try: <NEW_LINE> <INDENT> parse_fn, prefix = extension_maps[ext] <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> parse_fn = None <NEW_LINE> <DEDENT> if parse_fn: <NEW_LINE> <INDENT> with open(fname) as in_handle: <NEW_LINE> <INDENT> for key, val in parse_fn(in_handle).iteritems(): <NEW_LINE> <INDENT> if not key.startswith(prefix): <NEW_LINE> <INDENT> key = "%s_%s" % (prefix, key) <NEW_LINE> <DEDENT> all_metrics[key] = val <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> return all_metrics
Return summary information for a lane of metrics files.
625941c1fbf16365ca6f613c
def run(self,force=False): <NEW_LINE> <INDENT> content_server = "http://mdehaan.fedorapeople.org/loaders" <NEW_LINE> dest = "/var/lib/cobbler/loaders" <NEW_LINE> files = ( ( "%s/README" % content_server, "%s/README" % dest ), ( "%s/COPYING.elilo" % content_server, "%s/COPYING.elilo" % dest ), ( "%s/COPYING.yaboot" % content_server, "%s/COPYING.yaboot" % dest), ( "%s/COPYING.syslinux" % content_server, "%s/COPYING.syslinux" % dest), ( "%s/elilo-3.8-ia64.efi" % content_server, "%s/elilo-ia64.efi" % dest ), ( "%s/yaboot-1.3.14-12" % content_server, "%s/yaboot" % dest), ( "%s/pxelinux.0-3.61" % content_server, "%s/pxelinux.0" % dest), ( "%s/menu.c32-3.61" % content_server, "%s/menu.c32" % dest), ) <NEW_LINE> self.logger.info("downloading content required to netboot all arches") <NEW_LINE> for f in files: <NEW_LINE> <INDENT> src = f[0] <NEW_LINE> dst = f[1] <NEW_LINE> if os.path.exists(dst) and not force: <NEW_LINE> <INDENT> self.logger.info("path %s already exists, not overwriting existing content, use --force if you wish to update" % dst) <NEW_LINE> continue <NEW_LINE> <DEDENT> self.logger.info("downloading %s to %s" % (src,dst)) <NEW_LINE> urlgrabber.urlgrab(src,dst) <NEW_LINE> <DEDENT> return True
Download bootloader content for all of the latest bootloaders, since the user has chosen to not supply their own. You may ask "why not get this from yum", though Fedora has no IA64 repo, for instance, and we also want this to be able to work on Debian and further do not want folks to have to install a cross compiler. For those that don't like this approach they can still source their cross-arch bootloader content manually.
625941c17d847024c06be236
def main(): <NEW_LINE> <INDENT> with open("students.txt", "r") as f: <NEW_LINE> <INDENT> data = f.read() <NEW_LINE> pass
Context manager example
625941c1b545ff76a8913d93
def __init__(self,fcode,npoints,ntrials,lbound, ubound, nprocs, analytical_value): <NEW_LINE> <INDENT> self.fcode = fcode <NEW_LINE> self.f = miser_functions[self.fcode] <NEW_LINE> self.npoints = npoints <NEW_LINE> self.ntrials = ntrials <NEW_LINE> self.analytical_value = analytical_value <NEW_LINE> self.lbound = lbound <NEW_LINE> self.ubound = ubound <NEW_LINE> self.nprocs = nprocs <NEW_LINE> self.mean_sigma = None <NEW_LINE> self.sigma_sigma = None <NEW_LINE> self.seed = randint(0,10000)
Create a TestFixture. Arguments --------- fcode : str name of function in `miser_functions` dictionary. npoints : int number of MC integration points in each trial. ntrials : int number of trials to get approximate values of the mean error and standard deviation in error. analytical_value : float analytical value of the integral. lbound, ubound : list of floats lower (upper) bound for the integration nprocs : int number of processes to use in the integration. Example ------- To generate a test fixture, >>> fixture = TestRun("x**2", 1e4, 100, [0.], [1.], 1, 1./3.) >>> fixture.check_results_error() >>> fixture.mean_sigma # average error per trial 3.86390778798e-05 >>> fixture.sigma_sigma # error in error per trial 3.45037432946e-07 >>> with open("fixture.pkl") as f: pickle.dump(fixture,f) Then, to run a regression test, >>> import miser_data_generator >>> with open("fixture.pkl") as f: fixture = pickle.load(f) >>> fixture.check_trial_run()
625941c1a05bb46b383ec7a0
def count(self, sql): <NEW_LINE> <INDENT> sql = sql + ";select @@rowcount as count;" <NEW_LINE> try: <NEW_LINE> <INDENT> self.cursor.execute(sql) <NEW_LINE> self.cursor.nextset() <NEW_LINE> res = self.cursor.fetchone() <NEW_LINE> if res is None: <NEW_LINE> <INDENT> return 0 <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return res[0] <NEW_LINE> <DEDENT> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> print("统计查询受影响行数出现异常:%s" % e)
统计满足条件的sql结果集的行数
625941c194891a1f4081ba25
def ip_addrs_to_dict(self) -> Dict[int, List[Dict]]: <NEW_LINE> <INDENT> result = collections.defaultdict(list) <NEW_LINE> for addr in self._ipaddrs: <NEW_LINE> <INDENT> (address, prefixlength) = re.split( '/', ipaddress.IPv4Interface(f"{addr.ipAdEntAddr}/{addr.ipAdEntNetMask}").with_prefixlen) <NEW_LINE> result[addr.ipAdEntIfIndex].append({'protocol': 'ipv4', 'address': address, 'prefixLength': prefixlength, 'addressType': 'unknown'}) <NEW_LINE> <DEDENT> return result
Convert the list of ipAddr namedtuples to a dict whose keys are the ipAdEntIfIndex value, i.e. the IF-MIB index for the associated interface. Converts the netmask to a prefix-length, for consistency with the ipAddresses table. Intended as a helper function for combining addresses with interfaces.
625941c121a7993f00bc7c69
def set_to_zero(list_of_tensors_and_shapes, on_gpu=True): <NEW_LINE> <INDENT> if on_gpu: <NEW_LINE> <INDENT> updates = [] <NEW_LINE> for tensor, shape in list_of_tensors_and_shapes: <NEW_LINE> <INDENT> if np.sum(shape) == 1: <NEW_LINE> <INDENT> updates.append((tensor, 0)) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> updates.append((tensor, T.patternbroadcast(T.zeros(shape), [False] * tensor.ndim))) <NEW_LINE> <DEDENT> <DEDENT> return updates <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> updates = [] <NEW_LINE> for tensor, shape in list_of_tensors_and_shapes: <NEW_LINE> <INDENT> updates.append((tensor, np.zeros(shape, dtype=config_.floatX))) <NEW_LINE> <DEDENT> return updates
:param: list_of_tensors_and_shapes of the form [(tensor1, shape1), ...]
625941c163d6d428bbe4446c
def logging_debug(log, message, *args, **kwargs): <NEW_LINE> <INDENT> if DEBUG: <NEW_LINE> <INDENT> message = str(message) <NEW_LINE> if args or kwargs: <NEW_LINE> <INDENT> message = message.format(*args, **kwargs) <NEW_LINE> <DEDENT> from google.cloud.ndb import context as context_module <NEW_LINE> context = context_module.get_context(False) <NEW_LINE> if context: <NEW_LINE> <INDENT> message = "{}: {}".format(context.id, message) <NEW_LINE> <DEDENT> log.debug(message)
Conditionally write to the debug log. In some Google App Engine environments, writing to the debug log is a significant performance hit. If the environment variable `NDB_DEBUG` is set to a "truthy" value, this function will call `log.debug(message, *args, **kwargs)`, otherwise this is a no-op.
625941c1cdde0d52a9e52fae
def download_groupcat_header_file(self, snap): <NEW_LINE> <INDENT> if 'groups_%03d/' % snap not in os.listdir(self.dir_output + '%i/' % snap): <NEW_LINE> <INDENT> os.mkdir(self.dir_output + '%i/' % snap + 'groups_%03d/' % snap) <NEW_LINE> <DEDENT> for chunk_num in range(self.num_groupcat_files): <NEW_LINE> <INDENT> if 'groups_%i.%i.hdf5' % (snap, chunk_num) in os.listdir(self.dir_output + '%i/' % snap + 'snapdir_%03d/' % snap): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> cutout = get(self.base_url + "files/groupcat-" + str(snap) + '.' + str(chunk_num) + '.hdf5') <NEW_LINE> os.rename(cutout, self.dir_output + '%i/' % snap + 'groups_%03d/' % snap + cutout) <NEW_LINE> if chunk_num % 1 == 0: <NEW_LINE> <INDENT> print('Groupcat chunk', chunk_num, 'out of', self.num_chunk_files_per_snapshot, 'completed.') <NEW_LINE> <DEDENT> <DEDENT> return
Download the group catalog files which act as header files for `snapshot.py` (http://www.illustris-project.org/data/docs/scripts/ for info on this script.) Usually the first group catalog file is all that is needed for black hole particle informationself.
625941c176e4537e8c3515ee
def get_price_before(stock, datetime): <NEW_LINE> <INDENT> return __get_price_near(stock, datetime, "<")
Get the price recorded on the next day before the datetime that shows positive volume.
625941c1d18da76e23532450
def _hw_addr_aix(iface): <NEW_LINE> <INDENT> cmd = subprocess.Popen( "entstat -d {} | grep 'Hardware Address'".format(iface), shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, ).communicate()[0] <NEW_LINE> if cmd: <NEW_LINE> <INDENT> comps = cmd.split(" ") <NEW_LINE> if len(comps) == 3: <NEW_LINE> <INDENT> mac_addr = comps[2].strip("'").strip() <NEW_LINE> return mac_addr <NEW_LINE> <DEDENT> <DEDENT> error_msg = 'Interface "{}" either not available or does not contain a hardware address'.format( iface ) <NEW_LINE> log.error(error_msg) <NEW_LINE> return error_msg
Return the hardware address (a.k.a. MAC address) for a given interface on AIX MAC address not available in through interfaces
625941c116aa5153ce3623f6
def wrong_guesses_open(username): <NEW_LINE> <INDENT> with open("data/{0}_guesses.txt".format(username), "w") as guesses: <NEW_LINE> <INDENT> guesses.write("")
Opens Wrong Guesses File and Wipes for next Question
625941c1dc8b845886cb54b1
def load_game(self, filename): <NEW_LINE> <INDENT> if not filename.split('.')[1] == "yaml": <NEW_LINE> <INDENT> raise ImportError(f"filename extension incorrect {filename.split('.')[1]}") <NEW_LINE> <DEDENT> def remove_duplicates(a, b): <NEW_LINE> <INDENT> if not isinstance(a, list) and not isinstance(b, list): <NEW_LINE> <INDENT> raise TypeError("a and b must be lists") <NEW_LINE> <DEDENT> for item in a: <NEW_LINE> <INDENT> if item in b: <NEW_LINE> <INDENT> b.remove(item) <NEW_LINE> <DEDENT> <DEDENT> return b <NEW_LINE> <DEDENT> with open("./saves/" + filename) as file_: <NEW_LINE> <INDENT> yaml_data = load_yaml(file_.read()) <NEW_LINE> assert "player1" and "player2" and "puck" and "fullscreen" and "show_fps" and "options" in yaml_data.keys(), "Error importing the save, key datapoint missing" <NEW_LINE> player1, player2 = yaml_data["player1"], yaml_data["player2"] <NEW_LINE> puck = yaml_data["puck"] <NEW_LINE> self.window.set_fullscreen(yaml_data["fullscreen"]) <NEW_LINE> self.toggle_show_fps(yaml_data["show_fps"]) <NEW_LINE> self.options = yaml_data["options"] <NEW_LINE> self.player1.pos = player1["pos"] <NEW_LINE> self.player1.color = player1["color"] <NEW_LINE> self.player1.score = player1["score"] <NEW_LINE> self.player2.pos = player2["pos"] <NEW_LINE> self.player2.color = player2["color"] <NEW_LINE> self.player2.score = player2["score"] <NEW_LINE> self.puck.pos = puck["pos"] <NEW_LINE> self.puck.vel = puck["vel"] <NEW_LINE> self.puck.color = puck["color"] <NEW_LINE> <DEDENT> self.current_save = filename <NEW_LINE> self.unpause_game()
loads the game from file `filename`
625941c1287bf620b61d39e2
def getQDir(self, treeViewModel, treeViewIndex): <NEW_LINE> <INDENT> filters = QStringList() <NEW_LINE> for extension in constants.EXTENSION_LIST: <NEW_LINE> <INDENT> filters.append(QString('*.%s' % extension)) <NEW_LINE> <DEDENT> qdir = QDir(treeViewModel.filePath(treeViewIndex)) <NEW_LINE> qdir.setNameFilters(filters) <NEW_LINE> return qdir
called from _treeViewDirectoryClicked & _saveButtonClicked
625941c199cbb53fe6792b64
def compute_distances_two_loops(self, X): <NEW_LINE> <INDENT> num_test = X.shape[0] <NEW_LINE> num_train = self.X_train.shape[0] <NEW_LINE> dists = np.zeros((num_test, num_train)) <NEW_LINE> for i in range(num_test): <NEW_LINE> <INDENT> for j in range(num_train): <NEW_LINE> <INDENT> dists[i,j] = np.sqrt(np.sum(np.square(self.X_train[j] - X[i]))) <NEW_LINE> <DEDENT> <DEDENT> return dists
Compute the distance between each test point in X and each training point in self.X_train using a nested loop over both the training data and the test data. Inputs: - X: A numpy array of shape (num_test, D) containing test data. Returns: - dists: A numpy array of shape (num_test, num_train) where dists[i, j] is the Euclidean distance between the ith test point and the jth training point.
625941c145492302aab5e23e
def p_funIn(p): <NEW_LINE> <INDENT> pass
funIn : function | function funIn
625941c1009cb60464c63330
def main(argv=None): <NEW_LINE> <INDENT> parser = arg_parser(argv, globals()) <NEW_LINE> add_build_arguments(parser) <NEW_LINE> parser.add_argument('command', metavar='COMMAND', help='Command (build-and-test, build, all)') <NEW_LINE> parser.add_argument('files', metavar="FILES", default=".", help="Path to directory (or single file) of TSV files describing composite recipes.") <NEW_LINE> args = parser.parse_args() <NEW_LINE> for targets in generate_targets(args.files): <NEW_LINE> <INDENT> mull_targets(targets, **args_to_mull_targets_kwds(args))
Main entry-point for the CLI tool.
625941c163f4b57ef000109c
def open(self, file_name, **keywords): <NEW_LINE> <INDENT> self.file_name = file_name <NEW_LINE> self.keywords = keywords
open a file with unlimited keywords keywords are passed on to individual readers
625941c15166f23b2e1a50d6
def pi_hex_digits(n): <NEW_LINE> <INDENT> n -= 1 <NEW_LINE> a= [4,2,1,1] <NEW_LINE> j = [1,4,5,6] <NEW_LINE> x = + (a[0]*_series(j[0], n) - a[1]*_series(j[1], n) - a[2]*_series(j[2], n) - a[3]*_series(j[3], n)) & (16**(_dn(n)) -1) <NEW_LINE> s=("%014x" % x) <NEW_LINE> return s[0:14]
Returns a string containing 14 digits after the nth value of pi in hex The decimal has been taken out of the number, so n = 0[0] = 3 # First digit of pi in hex, 3 Examples ======== >>> from sympy.ntheory.bbp_pi import pi_hex_digits >>> pi_hex_digits(0) '3243f6a8885a30' >>> pi_hex_digits(10) '5a308d313198a2'
625941c1be8e80087fb20bc3
def load_data(self, path: Path) -> bool: <NEW_LINE> <INDENT> self._logger.debug("LOADING DATA") <NEW_LINE> if not path.exists(): <NEW_LINE> <INDENT> self._logger.debug(f'No report found for [{path.resolve()}]') <NEW_LINE> return True <NEW_LINE> <DEDENT> self._logger.debug("PATH ==> {}".format(path)) <NEW_LINE> data = read_json(path) <NEW_LINE> self._logger.debug("DATA ==> {}".format(data)) <NEW_LINE> if data is not False: <NEW_LINE> <INDENT> self._tool_report = data <NEW_LINE> <DEDENT> return data != False
Loads data and import them from json format We don't consider an undifined file as an error because some tools may not generate report
625941c1ac7a0e7691ed404d
def openXML(self): <NEW_LINE> <INDENT> self.statusbar.showMessage('Open Neurolucida XML file', 0) <NEW_LINE> fd = QFileDialog(self) <NEW_LINE> self.filename = fd.getOpenFileName(self, 'Open a Neurolucida XML file', '', 'XML file (*.xml);;All files (*)') <NEW_LINE> if type(self.filename) == type(()): <NEW_LINE> <INDENT> self.filename = self.filename[0] <NEW_LINE> <DEDENT> if self.filename: <NEW_LINE> <INDENT> self.lbFileName.setText(self.filename) <NEW_LINE> self.directoryName = '' <NEW_LINE> self.lbDirectoryName.setText('') <NEW_LINE> output_filename = str(self.filename) <NEW_LINE> output_filename = output_filename.replace('.xml', '.txt') <NEW_LINE> <DEDENT> self.statusbar.showMessage(self.filename, 0)
Open XML
625941c1b5575c28eb68df7c
def transcode(self, iterable=None, *, stream=False): <NEW_LINE> <INDENT> if iterable is None: iterable = () <NEW_LINE> def it(): <NEW_LINE> <INDENT> it = iterable <NEW_LINE> salt = self.salt <NEW_LINE> decoder = self.decoder <NEW_LINE> if decoder is None: <NEW_LINE> <INDENT> if salt is None: <NEW_LINE> <INDENT> it = (_ for _ in it) <NEW_LINE> salt = bytes(b for _, b in zip(range(16), iter_chain(self.cache, it))) <NEW_LINE> if len(salt) != 16: <NEW_LINE> <INDENT> if stream: <NEW_LINE> <INDENT> self.cache = salt <NEW_LINE> return <NEW_LINE> <DEDENT> raise ValueError("EOF reached before getting salt") <NEW_LINE> <DEDENT> if salt[:8] == b"Salted__": salt = salt[8:] <NEW_LINE> else: raise ValueError("invalid salt prefix") <NEW_LINE> <DEDENT> key, iv = [0]*32, [0]*16 <NEW_LINE> OPENSSL_EVP_BytesToKey(sha256_sumbytes, 32, salt, self.password, 1, key, 32, iv, 16) <NEW_LINE> key = self.uint32_bigendian_decode(key) <NEW_LINE> if self.iv is None: iv = self.uint32_bigendian_decode(iv) <NEW_LINE> else: iv = self.iv <NEW_LINE> decoder = AesCbcPkcs7Decoder(key, iv, cast=None) <NEW_LINE> yield from decoder.transcode(it, stream=stream) <NEW_LINE> self.cache = b"" <NEW_LINE> self.salt = salt <NEW_LINE> self.decoder = decoder <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> yield from decoder.transcode(iter_chain(self.cache, it), stream=stream) <NEW_LINE> <DEDENT> <DEDENT> return it() if self.cast is None else self.cast(it())
transcode(iterable, **opt) iterable: a byte iterable value (defaults to None) opt: stream => False: tells the transcoder that it is the last transcode operation => True
625941c1236d856c2ad44754
def find_all_by_state(self, state) -> list: <NEW_LINE> <INDENT> return self.__service_registry.find_all_by_state(state)
wrapper to fetch all from registry by state.
625941c1d4950a0f3b08c2ce
def breadthFirstSearch(problem): <NEW_LINE> <INDENT> visited=set() <NEW_LINE> stack=util.Queue() <NEW_LINE> actions=[] <NEW_LINE> stack.push((problem.getStartState(),actions)) <NEW_LINE> while not stack.isEmpty(): <NEW_LINE> <INDENT> state,actions=stack.pop() <NEW_LINE> for newState,action,cost in problem.getSuccessors(state): <NEW_LINE> <INDENT> if newState not in visited: <NEW_LINE> <INDENT> if problem.isGoalState(newState): <NEW_LINE> <INDENT> return actions+[action] <NEW_LINE> <DEDENT> stack.push((newState,actions+[action])) <NEW_LINE> visited.add(newState) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return None
Search the shallowest nodes in the search tree first. [2nd Edition: p 73, 3rd Edition: p 82]
625941c14c3428357757c2a7
def to_choices_dict(choices): <NEW_LINE> <INDENT> ret = OrderedDict() <NEW_LINE> for choice in choices: <NEW_LINE> <INDENT> if (not isinstance(choice, (list, tuple))): <NEW_LINE> <INDENT> ret[choice] = choice <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> key, value = choice <NEW_LINE> if isinstance(value, (list, tuple)): <NEW_LINE> <INDENT> ret[key] = to_choices_dict(value) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ret[key] = value <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return ret
Convert choices into key/value dicts. pairwise_choices([1]) -> {1: 1} pairwise_choices([(1, '1st'), (2, '2nd')]) -> {1: '1st', 2: '2nd'} pairwise_choices([('Group', ((1, '1st'), 2))]) -> {'Group': {1: '1st', 2: '2nd'}}
625941c1ac7a0e7691ed404e
def find_new_matching(orig_section, instructions): <NEW_LINE> <INDENT> for start in range(len(instructions) - len(orig_section)): <NEW_LINE> <INDENT> indices, dup_section = zip( *islice( non_sentinel_instructions(instructions, start), len(orig_section), ) ) <NEW_LINE> if len(dup_section) < len(orig_section): <NEW_LINE> <INDENT> return <NEW_LINE> <DEDENT> if sections_match(orig_section, dup_section): <NEW_LINE> <INDENT> yield instructions[start:indices[-1] + 1]
Yields sections of `instructions` which match `orig_section`. The yielded sections include sentinel instructions, but these are ignored when checking for matches.
625941c1eab8aa0e5d26dad5
def example_reading_spec(self): <NEW_LINE> <INDENT> data_fields = { FeatureNames.SNIPPETS: tf.FixedLenFeature( shape=[self.max_search_results_length, self.max_snippet_length], dtype=tf.int64), FeatureNames.QUESTION: tf.FixedLenFeature( shape=[1, self.max_question_length], dtype=tf.int64), FeatureNames.ANSWER: tf.FixedLenFeature( shape=[1, self.max_answer_length], dtype=tf.int64)} <NEW_LINE> data_items_to_decoders = None <NEW_LINE> return (data_fields, data_items_to_decoders)
Specify the names and types of the features on disk. Returns: The names and type of features.
625941c1de87d2750b85fd0e
def normalization(): <NEW_LINE> <INDENT> mm = MinMaxScaler(feature_range=(2, 3)) <NEW_LINE> data = mm.fit_transform([[90, 2, 10, 40], [60, 4, 15, 45], [75, 3, 13, 46]]) <NEW_LINE> print(data) <NEW_LINE> return None
归一化处理 :return: None
625941c16fece00bbac2d6ba
def pop(self): <NEW_LINE> <INDENT> if self.is_empty(): <NEW_LINE> <INDENT> raise Empty("Stack is empty") <NEW_LINE> <DEDENT> return self._data.pop()
Remove and return the element from the top of the stack (i.e., LIFO). Raise Empty exception if the stack is empyt
625941c17d847024c06be237