docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Load crs object from sr-org code, via spatialreference.org. Parses based on the proj4 representation. Arguments: - *code*: The SR-ORG code as an integer. Returns: - A CS instance of the indicated type.
def from_sr_code(code): # must go online (or look up local table) to get crs details code = str(code) proj4 = utils.crscode_to_string("sr-org", code, "proj4") crs = from_proj4(proj4) return crs
421,737
Parse crs as proj4 formatted string or dict and return the resulting crs object. Arguments: - *proj4*: The proj4 representation as a string or dict. - *strict* (optional): When True, the parser is strict about names having to match exactly with upper and lowercases. Default is not strict (False). Returns: - A CS instance of the indicated type.
def from_proj4(proj4, strict=False): # parse arguments into components # use args to create crs # TODO: SLIGTHLY MESSY STILL, CLEANUP.. params = [] if isinstance(proj4, dict): # add leading + sign as expected below, proj4 dicts do not have that partdict = dict([('+'+k,v) for k,v in proj4.items()]) else: partdict = dict([part.split("=") for part in proj4.split() if len(part.split("=")) == 2 ]) # INIT CODES # eg, +init=EPSG:1234 if "+init" in partdict: # first, get the default proj4 string of the +init code codetype, code = partdict["+init"].split(":") if codetype == "EPSG": initproj4 = utils.crscode_to_string("epsg", code, "proj4") elif codetype == "ESRI": initproj4 = utils.crscode_to_string("esri", code, "proj4") # make the default into param dict initpartdict = dict([part.split("=") for part in initproj4.split() if len(part.split("=")) == 2 ]) # override the default with any custom params specified along with the +init code initpartdict.update(partdict) # rerun from_proj4() again on the derived proj4 params as if it was not made with the +init code del initpartdict["+init"] string = " ".join("%s=%s" % (key,val) for key,val in initpartdict.items()) return from_proj4(string) # DATUM # datum param is required if "+datum" in partdict: # get predefined datum def datumname = partdict["+datum"] datumclass = datums.find(datumname, "proj4", strict) if datumclass: datum = datumclass() else: datum = datums.Unknown() else: datum = datums.Unknown() # ELLIPS # ellipse param is required ellips = None if "+ellps" in partdict: # get predefined ellips def ellipsname = partdict["+ellps"] ellipsclass = ellipsoids.find(ellipsname, "proj4", strict) if ellipsclass: ellips = ellipsclass() if not ellips: ellips = ellipsoids.Unknown() # TO WGS 84 COEFFS if "+towgs84" in partdict: coeffs = partdict["+towgs84"].split(",") datumshift = parameters.DatumShift(coeffs) # TODO: if no datum, use ellips + towgs84 params to create the correct datum # ...?? # COMBINE DATUM AND ELLIPS ## create datum and ellips param objs # +ellps loads all the required ellipsoid parameters # here we set or overwrite the parameters manually if "+a" in partdict: # semimajor radius ellips.semimaj_ax = parameters.SemiMajorRadius(partdict["+a"]) if "+b" in partdict: # semiminor radius ellips.semimin_ax = parameters.SemiMinorRadius(partdict["+b"]) if "+f" in partdict: # flattening ellips.flat = parameters.Flattening(partdict["+f"]) if "+rf" in partdict: # inverse flattening ellips.inv_flat = parameters.InverseFlattening(partdict["+rf"]) # check that ellipsoid is sufficiently defined if ellips.semimaj_ax and ellips.semimin_ax: # +a (semimajor radius) and +b (semiminor radius) is enough and can be used to calculate flattening # see https://en.wikipedia.org/wiki/Flattening pass elif ellips.semimaj_ax and ellips.inv_flat: # alternatively, it is okay with if +a (semimajor) and +f (flattening) are specified pass elif ellips.semimaj_ax and ellips.flat: # alternatively, semimajor and +rf is also acceptable (the reciprocal/inverse of +f) pass else: raise FormatError("The format string is missing the required +ellps element, or the alternative manual specification of the +a with +b or +f/+rf elements: \n\t %s" % partdict) if "+datum" in partdict: datum.ellips = ellips elif "+towgs84" in partdict: datum.ellips = ellips datum.datumshift = datumshift else: datum.ellips = ellips # PRIME MERIDIAN # set default prime_mer = parameters.PrimeMeridian(0) # overwrite with user input if "+pm" in partdict: prime_mer = parameters.PrimeMeridian(partdict["+pm"]) # ANGULAR UNIT ## proj4 cannot set angular unit, so just set to default angunit = units.Degree() # GEOGCS (note, currently does not load axes) geogcs = containers.GeogCS("Unknown", datum, prime_mer, angunit) #, twin_ax) # PROJECTION if "+proj" in partdict: # get predefined proj def projname = partdict["+proj"] projclass = projections.find(projname, "proj4", strict) if projclass: proj = projclass() elif projname == "longlat": # proj4 special case, longlat as projection name means unprojected geogcs proj = None else: raise NotImplementedError("Unsupported projection: The specified projection name %r could not be found in the list of supported projections" % projname) else: raise FormatError("The format string is missing the required +proj element") if proj: # Because proj4 has no element hierarchy, using automatic element find() would # ...would not be very effective, as that would need a try-fail approach for each # ...element type (parameter, projection, datum, ellipsoid, unit). # ...Instead load each element individually. # CENTRAL MERIDIAN if "+lon_0" in partdict: val = partdict["+lon_0"] obj = parameters.CentralMeridian(val) params.append(obj) # FALSE EASTING if "+x_0" in partdict: val = partdict["+x_0"] obj = parameters.FalseEasting(val) params.append(obj) # FALSE NORTHING if "+y_0" in partdict: val = partdict["+y_0"] obj = parameters.FalseNorthing(val) params.append(obj) # SCALING FACTOR if "+k_0" in partdict or "+k" in partdict: if "+k_0" in partdict: val = partdict["+k_0"] elif "+k" in partdict: val = partdict["+k"] obj = parameters.ScalingFactor(val) params.append(obj) # LATITUDE ORIGIN if "+lat_0" in partdict: val = partdict["+lat_0"] obj = parameters.LatitudeOrigin(val) params.append(obj) # LATITUDE TRUE SCALE if "+lat_ts" in partdict: val = partdict["+lat_ts"] obj = parameters.LatitudeTrueScale(val) params.append(obj) # LONGITUDE CENTER if "+lonc" in partdict: val = partdict["+lonc"] obj = parameters.LongitudeCenter(val) params.append(obj) # AZIMUTH if "+alpha" in partdict: val = partdict["+alpha"] obj = parameters.Azimuth(val) params.append(obj) # STD PARALLEL 1 if "+lat_1" in partdict: val = partdict["+lat_1"] obj = parameters.LatitudeFirstStndParallel(val) params.append(obj) # STD PARALLEL 2 if "+lat_2" in partdict: val = partdict["+lat_2"] obj = parameters.LatitudeSecondStndParallel(val) params.append(obj) # SATELLITE HEIGHT if "+h" in partdict: val = partdict["+h"] obj = parameters.SatelliteHeight(val) params.append(obj) # TILT ANGLE if "+tilt" in partdict: val = partdict["+tilt"] obj = parameters.TiltAngle(val) params.append(obj) # UNIT # get values if "+units" in partdict: # unit name takes precedence over to_meter unitname = partdict["+units"] unitclass = units.find(unitname, "proj4", strict) if unitclass: unit = unitclass() # takes meter multiplier from name, ignoring any custom meter multiplier else: raise FormatError("The specified unit name %r does not appear to be a valid unit name" % unitname) elif "+to_meter" in partdict: # no unit name specified, only to_meter conversion factor unit = units.Unknown() unit.metermultiplier.value = partdict["+to_meter"] else: # if nothing specified, defaults to meter unit = units.Meter() # PROJCS projcs = containers.ProjCS("Unknown", geogcs, proj, params, unit) return projcs else: # means projdef was None, ie unprojected longlat geogcs return geogcs
421,739
Detect crs string format and parse into crs object with appropriate function. Arguments: - *text*: The crs text representation of unknown type. - *strict* (optional): When True, the parser is strict about names having to match exactly with upper and lowercases. Default is not strict (False). Returns: - CRS object.
def from_unknown_text(text, strict=False): if text.startswith("+"): crs = from_proj4(text, strict) elif text.startswith(("PROJCS[","GEOGCS[")): crs = from_unknown_wkt(text, strict) #elif text.startswith("urn:"): # crs = from_ogc_urn(text, strict) elif text.startswith("EPSG:"): crs = from_epsg_code(text.split(":")[1]) elif text.startswith("ESRI:"): crs = from_esri_code(text.split(":")[1]) elif text.startswith("SR-ORG:"): crs = from_sr_code(text.split(":")[1]) else: raise FormatError("Could not auto-detect the type of crs format, make sure it is one of the supported formats") return crs
421,740
Distance unit parameter. Args: - **unitname**: A pycrs.elements.units.UnitName instance with the name given by each supported format. - **unitmultiplier**: A pycrs.elements.units.UnitMultiplier instance.
def __init__(self, **kwargs): self.unitname = kwargs.get('unitname', self.unitname) self.unitmultiplier = kwargs.get('unitmultiplier', self.unitmultiplier)
421,742
Returns a foreign function exported by `libdmtx`. Args: fname (:obj:`str`): Name of the exported function as string. restype (:obj:): Return type - one of the `ctypes` primitive C data types. *args: Arguments - a sequence of `ctypes` primitive C data types. Returns: cddl.CFunctionType: A wrapper around the function.
def libdmtx_function(fname, restype, *args): prototype = CFUNCTYPE(restype, *args) return prototype((fname, load_libdmtx()))
421,899
A context manager for `DmtxImage`, created and destroyed by `dmtxImageCreate` and `dmtxImageDestroy`. Args: pixels (:obj:): width (int): height (int): pack (int): Yields: DmtxImage: The created image Raises: PyLibDMTXError: If the image could not be created.
def _image(pixels, width, height, pack): image = dmtxImageCreate(pixels, width, height, pack) if not image: raise PyLibDMTXError('Could not create image') else: try: yield image finally: dmtxImageDestroy(byref(image))
421,903
A context manager for `DmtxDecode`, created and destroyed by `dmtxDecodeCreate` and `dmtxDecodeDestroy`. Args: image (POINTER(DmtxImage)): shrink (int): Yields: POINTER(DmtxDecode): The created decoder Raises: PyLibDMTXError: If the decoder could not be created.
def _decoder(image, shrink): decoder = dmtxDecodeCreate(image, shrink) if not decoder: raise PyLibDMTXError('Could not create decoder') else: try: yield decoder finally: dmtxDecodeDestroy(byref(decoder))
421,904
A context manager for `DmtxRegion`, created and destroyed by `dmtxRegionFindNext` and `dmtxRegionDestroy`. Args: decoder (POINTER(DmtxDecode)): timeout (int or None): Yields: DmtxRegion: The next region or None, if all regions have been found.
def _region(decoder, timeout): region = dmtxRegionFindNext(decoder, timeout) try: yield region finally: if region: dmtxRegionDestroy(byref(region))
421,905
A context manager for `DmtxMessage`, created and destoyed by `dmtxDecodeMatrixRegion` and `dmtxMessageDestroy`. Args: decoder (POINTER(DmtxDecode)): region (POINTER(DmtxRegion)): corrections (int): Yields: DmtxMessage: The message.
def _decoded_matrix_region(decoder, region, corrections): message = dmtxDecodeMatrixRegion(decoder, region, corrections) try: yield message finally: if message: dmtxMessageDestroy(byref(message))
421,906
Decodes and returns the value in a region. Args: region (DmtxRegion): Yields: Decoded or None: The decoded value.
def _decode_region(decoder, region, corrections, shrink): with _decoded_matrix_region(decoder, region, corrections) as msg: if msg: # Coordinates p00 = DmtxVector2() p11 = DmtxVector2(1.0, 1.0) dmtxMatrix3VMultiplyBy( p00, region.contents.fit2raw ) dmtxMatrix3VMultiplyBy(p11, region.contents.fit2raw) x0 = int((shrink * p00.X) + 0.5) y0 = int((shrink * p00.Y) + 0.5) x1 = int((shrink * p11.X) + 0.5) y1 = int((shrink * p11.Y) + 0.5) return Decoded( string_at(msg.contents.output), Rect(x0, y0, x1 - x0, y1 - y0) ) else: return None
421,907
pd.DataFrame.unstack adapter. Call the `df.unstack` method using the indicated level and afterwards join the column names using an underscore. Args: df (pandas.DataFrame): DataFrame to unstack. level (str, int or list): Level(s) of index to unstack, can pass level name reset_index (bool): Whether to reset the index after unstacking Returns: pandas.Dataframe: unstacked dataframe
def unstack(df, level=-1, reset_index=True): df = df.unstack(level=level) if reset_index: df = df.reset_index() df.columns = df.columns.map(_join_names) return df
422,259
Finds the next power of 2 value Args: x: Input value Returns: power_of_2: Next power of 2 value
def next_power_of_2(x): power_of_2 = 1 if x == 0 else 2 ** np.ceil(np.log2(x)) return power_of_2
422,260
Kaiser window design Args: window_length: Length of the window in number of samples beta: Beta value for Kaiser window design Returns: window: Window designed using the beta and length provided as inputs
def window_design(self, window_length, beta): self.window = np.kaiser(window_length, beta) return self.window
422,262
Defines a spectral mask based on training data Args: X: Training data
def fit(self, X): training_signal = X self.window_design(self.window_length, self.beta) if self.method == 'std_dev': self.fit_freq_std_dev(training_signal) elif self.method == 'min_max': self.fit_freq_min_max(training_signal) else: raise ValueError('Unknown method: {}'.format(self.method))
422,263
Defines a spectral mask based on training data using min and max values of each frequency component Args: training_signal: Training data
def fit_freq_min_max(self, training_signal): window_length = len(self.window) window_weight = sum(self.window) max_mask = np.zeros(int(window_length / 2) + 1) min_mask = np.zeros(int(window_length / 2) + 1) for i in range(0, len(training_signal) - window_length - 1): rfft = np.fft.rfft(training_signal[i:i + window_length] * self.window) temp = np.abs(rfft) / window_weight max_mask = np.maximum(max_mask, temp) min_mask = np.minimum(min_mask, temp) self.mask_top = self.gain * max_mask self.mask_bottom = min_mask / self.gain
422,264
Defines a spectral mask based on training data using the standard deviation values of each frequency component Args: training_signal: Training data
def fit_freq_std_dev(self, training_signal): window_length = len(self.window) window_weight = sum(self.window) num_of_windows = len(training_signal) - window_length - 1 mean = np.zeros(int(window_length / 2) + 1) pow = np.zeros(int(window_length / 2) + 1) temp = np.zeros(int(window_length / 2) + 1) rfft = np.fft.rfft(training_signal[0:0 + window_length] * self.window) max = np.abs(rfft) / window_weight min = max for i in range(0, num_of_windows): rfft = np.fft.rfft(training_signal[i:i + window_length] * self.window) temp = np.abs(rfft) / window_weight max = np.maximum(temp, max) min = np.minimum(temp, min) mean = mean + temp pow = pow + np.power(temp, 2) mean = mean / num_of_windows pow = pow / num_of_windows std_dev = np.sqrt(pow - np.power(mean, 2)) self.mask_top = mean + self.gain * std_dev self.mask_bottom = np.maximum(mean - self.gain * std_dev, np.zeros(int(window_length / 2) + 1))
422,265
Detects anomalies in telemetry data based on its power spectral density Args: X: Telemetry data Returns: anomalies: Data vector consisting of the anomalies detected in the telemetry data
def produce(self, X): signal = X window_length = len(self.window) anomalies = np.zeros(len(signal)) window_weight = sum(self.window) for i in range(0, len(signal) - window_length - 1): rfft = np.fft.rfft(signal[i:i + window_length] * self.window) sig_freq = np.abs(rfft) / window_weight anomalies[i] = 0 for m in range(0, int(window_length / 2) - 1): if ((sig_freq[m] > self.mask_top[m]) or (sig_freq[m] < self.mask_bottom[m])): anomalies[i] = 1 break return anomalies
422,266
Apply a function image by image. Args: reshape_before: whether 1d array needs to be reshaped to a 2d image reshape_after: whether the returned values need to be reshaped back to a 1d array width: image width used to rebuild the 2d images. Required if the image is not square. height: image height used to rebuild the 2d images. Required if the image is not square.
def image_transform(X, function, reshape_before=False, reshape_after=False, width=None, height=None, **kwargs): if not callable(function): function = import_object(function) elif not callable(function): raise ValueError("function must be a str or a callable") flat_image = len(X[0].shape) == 1 if reshape_before and flat_image: if not (width and height): side_length = math.sqrt(X.shape[1]) if side_length.is_integer(): side_length = int(side_length) width = side_length height = side_length else: raise ValueError("Image sizes must be given for non-square images") else: reshape_before = False new_X = [] for image in X: if reshape_before: image = image.reshape((width, height)) features = function( image, **kwargs ) if reshape_after: features = np.reshape(features, X.shape[1]) new_X.append(features) return np.array(new_X)
422,308
Compute an array of absolute errors comparing predictions and expected output. If smooth is True, apply EWMA to the resulting array of errors. Args: y (array): Ground truth. y_hat (array): Predictions array. smoothing_window (float): Size of the smoothing window, expressed as a proportion of the total length of y. smooth (bool): whether the returned errors should be smoothed with EWMA. Returns: (array): errors
def regression_errors(y, y_hat, smoothing_window=0.01, smooth=True): errors = np.abs(y - y_hat)[:, 0] if not smooth: return errors smoothing_window = int(smoothing_window * len(y)) return pd.Series(errors).ewm(span=smoothing_window).mean().values
422,334
Apply Gaussian blur to the given data. Args: X: data to blur kernel_size: Gaussian kernel size stddev: Gaussian kernel standard deviation (in both X and Y directions)
def GaussianBlur(X, ksize_width, ksize_height, sigma_x, sigma_y): return image_transform( X, cv2.GaussianBlur, ksize=(ksize_width, ksize_height), sigmaX=sigma_x, sigmaY=sigma_y )
422,342
Calculates the forecasting error for two arrays of data. If smoothed errors desired, runs EWMA. Args: y_hat (list): forecasted values. len(y_hat)==len(y_true). y_true (list): true values. len(y_hat)==len(y_true). window_size (int): batch_size (int): smoothing_percent (float): smoothed (bool): whether the returned errors should be smoothed with EWMA. Returns: (list): error residuals. Smoothed if specified by user.
def get_forecast_errors(y_hat, y_true, window_size=5, batch_size=30, smoothing_percent=0.05, smoothed=True): errors = [abs(y_h - y_t) for y_h, y_t in zip(y_hat, y_true)] if not smoothed: return errors historical_error_window = int(window_size * batch_size * smoothing_percent) moving_avg = [] for i in range(len(errors)): left_window = i - historical_error_window right_window = i + historical_error_window + 1 if left_window < 0: left_window = 0 if right_window > len(errors): right_window = len(errors) moving_avg.append(np.mean(errors[left_window:right_window])) return moving_avg
422,355
Extracts anomalies from the errors. Args: y_true (): smoothed_errors (): window_size (int): batch_size (int): error_buffer (int): Returns:
def extract_anomalies(y_true, smoothed_errors, window_size, batch_size, error_buffer): if len(y_true) <= batch_size * window_size: raise ValueError("Window size (%s) larger than y_true (len=%s)." % (batch_size, len(y_true))) num_windows = int((len(y_true) - (batch_size * window_size)) / batch_size) anomalies_indices = [] for i in range(num_windows + 1): prev_index = i * batch_size curr_index = (window_size * batch_size) + (i * batch_size) if i == num_windows + 1: curr_index = len(y_true) window_smoothed_errors = smoothed_errors[prev_index:curr_index] window_y_true = y_true[prev_index:curr_index] epsilon, sd_threshold = compute_threshold(window_smoothed_errors, error_buffer) window_anom_indices = get_anomalies( window_smoothed_errors, window_y_true, sd_threshold, i, anomalies_indices, error_buffer ) # get anomalies from inverse of smoothed errors # This was done in the implementation of NASA paper but # wasn't referenced in the paper # we get the inverse by flipping around the mean mu = np.mean(window_smoothed_errors) smoothed_errors_inv = [mu + (mu - e) for e in window_smoothed_errors] epsilon_inv, sd_inv = compute_threshold(smoothed_errors_inv, error_buffer) inv_anom_indices = get_anomalies( smoothed_errors_inv, window_y_true, sd_inv, i, anomalies_indices, len(y_true) ) anomalies_indices = list(set(anomalies_indices + inv_anom_indices)) anomalies_indices.extend([i_a + i * batch_size for i_a in window_anom_indices]) # group anomalies anomalies_indices = sorted(list(set(anomalies_indices))) anomalies_groups = [list(group) for group in mit.consecutive_groups(anomalies_indices)] anomaly_sequences = [(g[0], g[-1]) for g in anomalies_groups if not g[0] == g[-1]] # generate "scores" for anomalies based on the max distance from epsilon for each sequence anomalies_scores = [] for e_seq in anomaly_sequences: denominator = np.mean(smoothed_errors) + np.std(smoothed_errors) score = max([ abs(smoothed_errors[x] - epsilon) / denominator for x in range(e_seq[0], e_seq[1]) ]) anomalies_scores.append(score) return anomaly_sequences, anomalies_scores
422,356
Write the raw header content to the out stream Parameters: ---------- out : {file object} The output stream
def write_to(self, out): out.write(bytes(self.header)) out.write(self.record_data)
422,564
Instantiate a RawVLR by reading the content from the data stream Parameters: ---------- data_stream : {file object} The input stream Returns ------- RawVLR The RawVLR read
def read_from(cls, data_stream): raw_vlr = cls() header = RawVLRHeader.from_stream(data_stream) raw_vlr.header = header raw_vlr.record_data = data_stream.read(header.record_length_after_header) return raw_vlr
422,565
Unpack sub field using its mask Parameters: ---------- source_array : numpy.ndarray The source array mask : mask (ie: 0b00001111) Mask of the sub field to be extracted from the source array Returns ------- numpy.ndarray The sub field array
def unpack(source_array, mask, dtype=np.uint8): lsb = least_significant_bit(mask) return ((source_array & mask) >> lsb).astype(dtype)
422,697
Enables or disables BLE scanning Args: enable: boolean value to enable (True) or disable (False) scanner filter_duplicates: boolean value to enable/disable filter, that omits duplicated packets
def toggle_scan(self, enable, filter_duplicates=False): command = struct.pack(">BB", enable, filter_duplicates) self.bluez.hci_send_cmd(self.socket, OGF_LE_CTL, OCF_LE_SET_SCAN_ENABLE, command)
422,736
Create a new image by copying the image on a *color* background. Args: image (PIL.Image.Image): Image to copy color (tuple): Background color usually WHITE or BLACK Returns: PIL.Image.Image
def copy_image_on_background(image, color=WHITE): background = Image.new("RGB", image.size, color) background.paste(image, mask=image.split()[3]) return background
422,796
Returns the value for authorization header Args: user_payload(dict, required): A `dict` containing required information to create authentication token
def get_auth_header(self, user_payload): auth_token = self.get_auth_token(user_payload) return '{auth_header_prefix} {auth_token}'.format( auth_header_prefix=self.auth_header_prefix, auth_token=auth_token )
422,821
Create a JWT authentication token from ``user_payload`` Args: user_payload(dict, required): A `dict` containing required information to create authentication token
def get_auth_token(self, user_payload): now = datetime.utcnow() payload = { 'user': user_payload } if 'iat' in self.verify_claims: payload['iat'] = now if 'nbf' in self.verify_claims: payload['nbf'] = now + self.leeway if 'exp' in self.verify_claims: payload['exp'] = now + self.expiration_delta if self.audience is not None: payload['aud'] = self.audience if self.issuer is not None: payload['iss'] = self.issuer return jwt.encode( payload, self.secret_key, algorithm=self.algorithm, json_encoder=ExtendedJSONEncoder).decode('utf-8')
422,825
Apply base theme to the application. Args: app (QApplication): QApplication instance.
def _apply_base_theme(app): if QT_VERSION < (5,): app.setStyle('plastique') else: app.setStyle('Fusion') with open(_STYLESHEET) as stylesheet: app.setStyleSheet(stylesheet.read())
422,848
Apply Dark Theme to the Qt application instance. Args: app (QApplication): QApplication instance.
def dark(app): _apply_base_theme(app) darkPalette = QPalette() # base darkPalette.setColor(QPalette.WindowText, QColor(180, 180, 180)) darkPalette.setColor(QPalette.Button, QColor(53, 53, 53)) darkPalette.setColor(QPalette.Light, QColor(180, 180, 180)) darkPalette.setColor(QPalette.Midlight, QColor(90, 90, 90)) darkPalette.setColor(QPalette.Dark, QColor(35, 35, 35)) darkPalette.setColor(QPalette.Text, QColor(180, 180, 180)) darkPalette.setColor(QPalette.BrightText, QColor(180, 180, 180)) darkPalette.setColor(QPalette.ButtonText, QColor(180, 180, 180)) darkPalette.setColor(QPalette.Base, QColor(42, 42, 42)) darkPalette.setColor(QPalette.Window, QColor(53, 53, 53)) darkPalette.setColor(QPalette.Shadow, QColor(20, 20, 20)) darkPalette.setColor(QPalette.Highlight, QColor(42, 130, 218)) darkPalette.setColor(QPalette.HighlightedText, QColor(180, 180, 180)) darkPalette.setColor(QPalette.Link, QColor(56, 252, 196)) darkPalette.setColor(QPalette.AlternateBase, QColor(66, 66, 66)) darkPalette.setColor(QPalette.ToolTipBase, QColor(53, 53, 53)) darkPalette.setColor(QPalette.ToolTipText, QColor(180, 180, 180)) # disabled darkPalette.setColor(QPalette.Disabled, QPalette.WindowText, QColor(127, 127, 127)) darkPalette.setColor(QPalette.Disabled, QPalette.Text, QColor(127, 127, 127)) darkPalette.setColor(QPalette.Disabled, QPalette.ButtonText, QColor(127, 127, 127)) darkPalette.setColor(QPalette.Disabled, QPalette.Highlight, QColor(80, 80, 80)) darkPalette.setColor(QPalette.Disabled, QPalette.HighlightedText, QColor(127, 127, 127)) app.setPalette(darkPalette)
422,849
Adds appropriate sleep to avoid making too many calls. Args: num_calls: int the number of calls which will be made
def make_calls(self, num_calls=1): self._cull() while self._outstanding_calls + num_calls > self._max_calls_per_second: time.sleep(0) # yield self._cull() self._call_times.append(self.CallRecord(time=time.time(), num_calls=num_calls)) self._outstanding_calls += num_calls
423,009
Issue multiple GET requests. Args: urls - A string URL or list of string URLs query_params - None, a dict, or a list of dicts representing the query params to_json - A boolean, should the responses be returned as JSON blobs Returns: a list of dicts if to_json is set of requests.response otherwise. Raises: InvalidRequestError - Can not decide how many requests to issue.
def multi_get(self, urls, query_params=None, to_json=True): return self._multi_request( MultiRequest._VERB_GET, urls, query_params, data=None, to_json=to_json, )
423,013
Helper method to create a single post/get requests. Args: verb - MultiRequest._VERB_POST or MultiRequest._VERB_GET url - A string URL query_params - None or a dict data - None or a string or a dict send_as_file - A boolean, should the data be sent as a file. Returns: requests.PreparedRequest Raises: InvalidRequestError - if an invalid verb is passed in.
def _create_request(self, verb, url, query_params=None, data=None, send_as_file=False): # Prepare a set of kwargs to make it easier to avoid missing default params. kwargs = { 'headers': self._default_headers, 'params': query_params, 'timeout': self._req_timeout, } if MultiRequest._VERB_POST == verb: if send_as_file: kwargs['files'] = {'file': data} else: kwargs['data'] = data return PreparedRequest(partial(self._session.post, url, **kwargs), url) elif MultiRequest._VERB_GET == verb: return PreparedRequest(partial(self._session.get, url, **kwargs), url) else: raise InvalidRequestError('Invalid verb {0}'.format(verb))
423,015
Issues a batch of requests and waits for the responses. If some of the requests fail it will retry the failed ones up to `_max_retry` times. Args: requests - A list of requests Returns: A list of `requests.models.Response` objects Raises: InvalidRequestError - if any of the requests returns "403 Forbidden" response
def _wait_for_response(self, requests): failed_requests = [] responses_for_requests = OrderedDict.fromkeys(requests) for retry in range(self._max_retry): try: logging.debug('Try #{0}'.format(retry + 1)) self._availability_limiter.map_with_retries(requests, responses_for_requests) failed_requests = [] for request, response in responses_for_requests.items(): if self._drop_404s and response is not None and response.status_code == 404: logging.warning('Request to {0} failed with status code 404, dropping.'.format(request.url)) elif not response: failed_requests.append((request, response)) if not failed_requests: break logging.warning('Try #{0}. Expected {1} successful response(s) but only got {2}.'.format( retry + 1, len(requests), len(requests) - len(failed_requests), )) # retry only for the failed requests requests = [fr[0] for fr in failed_requests] except InvalidRequestError: raise except Exception as e: # log the exception for the informative purposes and pass to the next iteration logging.exception('Try #{0}. Exception occured: {1}. Retrying.'.format(retry + 1, e)) pass if failed_requests: logging.warning('Still {0} failed request(s) after {1} retries:'.format( len(failed_requests), self._max_retry, )) for failed_request, failed_response in failed_requests: if failed_response is not None: # in case response text does contain some non-ascii characters failed_response_text = failed_response.text.encode('ascii', 'xmlcharrefreplace') logging.warning('Request to {0} failed with status code {1}. Response text: {2}'.format( failed_request.url, failed_response.status_code, failed_response_text, )) else: logging.warning('Request to {0} failed with None response.'.format(failed_request.url)) return list(responses_for_requests.values())
423,017
Converts response to JSON. If the response cannot be converted to JSON then `None` is returned. Args: response - An object of type `requests.models.Response` Returns: Response in JSON format if the response can be converted to JSON. `None` otherwise.
def _convert_to_json(self, response): try: return response.json() except ValueError: logging.warning('Expected response in JSON format from {0} but the actual response text is: {1}'.format( response.request.url, response.text, )) return None
423,018
Establishes basic HTTP params and loads a cache. Args: cache_file_name: String file name of cache. update_cache: Determines whether cache should be written out back to the disk when closing it. Default is `True`. req_timeout: Maximum number of seconds to wait without reading a response byte before deciding an error has occurred. Default is 90.0 seconds.
def __init__(self, cache_file_name=None, update_cache=True, req_timeout=90.0): # TODO - lookup request rate limit # By observation, ShadowServer can be quite slow, so give it 90 seconds before it times out. self._requests = MultiRequest(max_requests=2, req_timeout=req_timeout) # Create an ApiCache if instructed to self._cache = ApiCache(cache_file_name, update_cache) if cache_file_name else None
423,032
Test hashes against a list of known software applications. Known hashes will return a dictionary of information. Unknown hashes will return nothing. Args: hashes: list of string hashes. Returns: A dict with the hash as key and the shadowserver report as value.
def get_bin_test(self, hashes): all_responses = {} if self._cache: api_name = 'shadowserver-bin-test' all_responses = self._cache.bulk_lookup(api_name, hashes) hashes = [key for key in hashes if key not in all_responses.keys()] all_responses = dict([(key, val) for key, val in all_responses.items() if len(val) >= 2]) HASHES_PER_REQ = 25 hash_chunks = ['\n'.join(hashes[pos:pos + HASHES_PER_REQ]) for pos in range(0, len(hashes), HASHES_PER_REQ)] responses = self._requests.multi_post(self.BINTEST_URL, data=hash_chunks, to_json=False, send_as_file=True) for response in responses: if response is not None and 200 == response.status_code: response_lines = response.text.split('\n') for line in response_lines: # Set an initial val. val = {} # There is just a key, no value. This means the hash was unknown to ShadowServer. index_of_first_space = line.find(' ') if -1 == index_of_first_space: index_of_first_space = len(line) key = line[:index_of_first_space].lower() # The response only has a JSON body if the hash was known. json_text = line[index_of_first_space + 1:] if len(json_text): try: val = simplejson.loads(json_text) # A very short response indicates an error? if len(val.keys()) >= 2: all_responses[key] = val except ValueError: # Sometimes ShadowServer returns invalid data. Silently skip it. pass if self._cache: self._cache.cache_value(api_name, key, val) return all_responses
423,033
Retrieves the most recent VT info for a set of domains. Args: domains: list of string domains. Returns: A dict with the domain as key and the VT report as value.
def get_alexa_rankings(self, domains): api_name = 'alexa_rankings' (all_responses, domains) = self._bulk_cache_lookup(api_name, domains) responses = self._request_reports(domains) for domain, response in zip(domains, responses): xml_response = self._extract_response_xml(domain, response) if self._cache: self._cache.cache_value(api_name, domain, response) all_responses[domain] = xml_response return all_responses
423,035
Sends multiples requests for the resources to a particular endpoint. Args: resource_param_name: a string name of the resource parameter. resources: list of of the resources. endpoint_name: AlexaRankingApi endpoint URL suffix. Returns: A list of the responses.
def _request_reports(self, domains): params = [{'url': domain} for domain in domains] responses = self._requests.multi_get( self.BASE_URL, query_params=params, to_json=False) return responses
423,036
Extract XML content of an HTTP response into dictionary format. Args: response: HTML Response objects Returns: A dictionary: {alexa-ranking key : alexa-ranking value}.
def _extract_response_xml(self, domain, response): attributes = {} alexa_keys = {'POPULARITY': 'TEXT', 'REACH': 'RANK', 'RANK': 'DELTA'} try: xml_root = ET.fromstring(response._content) for xml_child in xml_root.findall('SD//'): if xml_child.tag in alexa_keys and \ alexa_keys[xml_child.tag] in xml_child.attrib: attributes[xml_child.tag.lower( )] = xml_child.attrib[alexa_keys[xml_child.tag]] except ParseError: # Skip ill-formatted XML and return no Alexa attributes pass attributes['domain'] = domain return {'attributes': attributes}
423,037
Performes a bulk cache lookup and returns a tuple with the results found and the keys missing in the cache. If cached is not configured it will return an empty dictionary of found results and the initial list of keys. Args: api_name: a string name of the API. keys: an enumerable of string keys. Returns: A tuple: (responses found, missing keys).
def _bulk_cache_lookup(self, api_name, keys): if self._cache: responses = self._cache.bulk_lookup(api_name, keys) missing_keys = [key for key in keys if key not in responses.keys()] return (responses, missing_keys) return ({}, keys)
423,038
Opens the cache file and reads previous results. Args: cache_file_name: string file name update_cache: Specifies whether ApiCache should write out the cache file when closing it
def __init__(self, cache_file_name, update_cache=True): self._cache_file_name = cache_file_name self._cache = self._read_cache_from_file() self._update_cache = update_cache
423,039
Add the value of an API call to the cache. Args: api_name: a string name of the API. Keys and values are segmented by api_name. key: a string key for the specific call. value: the value of the call using the specific key
def cache_value(self, api_name, key, value): self._cache.setdefault(api_name, {}) self._cache[api_name][key] = value
423,043
Add the value of an API call to the cache. Args: api_name: a string name of the API. Keys and values are segmented by api_name. key: a string key for the specific call.
def lookup_value(self, api_name, key): if api_name in self._cache: return self._cache[api_name].get(key, None) return None
423,044
Perform lookup on an enumerable of keys. Args: api_name: a string name of the API. Keys and values are segmented by api_name. keys: an enumerable of string keys.
def bulk_lookup(self, api_name, keys): cached_data = {} for key in keys: value = self.lookup_value(api_name, key) if value is not None: cached_data[key] = value return cached_data
423,045
Makes multiple GETs to an OpenDNS endpoint. Args: cache_api_name: string api_name for caching fmt_url_path: format string for building URL paths url_params: An enumerable of strings used in building URLs query_params - None / dict / list of dicts containing query params Returns: A dict of {url_param: api_result}
def _multi_get(self, cache_api_name, fmt_url_path, url_params, query_params=None): all_responses = {} if self._cache: all_responses = self._cache.bulk_lookup(cache_api_name, url_params) url_params = [key for key in url_params if key not in all_responses.keys()] if len(url_params): urls = self._to_urls(fmt_url_path, url_params) responses = self._requests.multi_get(urls, query_params) for url_param, response in zip(url_params, responses): if self._cache: self._cache.cache_value(cache_api_name, url_param, response) all_responses[url_param] = response return all_responses
423,052
Calls security end point and adds an 'is_suspicious' key to each response. Args: domains: An enumerable of strings Returns: A dict of {domain: security_result}
def security(self, domains): api_name = 'opendns-security' fmt_url_path = u'security/name/{0}.json' return self._multi_get(api_name, fmt_url_path, domains)
423,053
Calls WHOIS Email end point Args: emails: An enumerable of string Emails Returns: A dict of {email: domain_result}
def whois_emails(self, emails): api_name = 'opendns-whois-emails' fmt_url_path = u'whois/emails/{0}' return self._multi_get(api_name, fmt_url_path, emails)
423,054
Calls WHOIS Nameserver end point Args: emails: An enumerable of nameservers Returns: A dict of {nameserver: domain_result}
def whois_nameservers(self, nameservers): api_name = 'opendns-whois-nameservers' fmt_url_path = u'whois/nameservers/{0}' return self._multi_get(api_name, fmt_url_path, nameservers)
423,055
Calls WHOIS domain end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_result}
def whois_domains(self, domains): api_name = 'opendns-whois-domain' fmt_url_path = u'whois/{0}' return self._multi_get(api_name, fmt_url_path, domains)
423,056
Calls WHOIS domain history end point Args: domains: An enumerable of domains Returns: A dict of {domain: domain_history_result}
def whois_domains_history(self, domains): api_name = 'opendns-whois-domain-history' fmt_url_path = u'whois/{0}/history' return self._multi_get(api_name, fmt_url_path, domains)
423,057
Get the domains related to input domains. Args: domains: an enumerable of strings domain names Returns: An enumerable of string domain names
def cooccurrences(self, domains): api_name = 'opendns-cooccurrences' fmt_url_path = u'recommendations/name/{0}.json' return self._multi_get(api_name, fmt_url_path, domains)
423,058
Get the data range when a domain is part of OpenDNS block list. Args: domains: an enumerable of strings domain names Returns: An enumerable of string with period, category, and url
def domain_tag(self, domains): api_name = 'opendns-domain_tag' fmt_url_path = u'domains/{0}/latest_tags' return self._multi_get(api_name, fmt_url_path, domains)
423,059
Get list of domain names that have been seen requested around the same time (up to 60 seconds before or after) to the given domain name. Args: domains: an enumerable of strings domain names Returns: An enumerable of [domain name, scores]
def related_domains(self, domains): api_name = 'opendns-related_domains' fmt_url_path = u'links/name/{0}.json' return self._multi_get(api_name, fmt_url_path, domains)
423,060
Get the domains related to input ips. Args: ips: an enumerable of strings as ips Returns: An enumerable of resource records and features
def rr_history(self, ips): api_name = 'opendns-rr_history' fmt_url_path = u'dnsdb/ip/a/{0}.json' return self._multi_get(api_name, fmt_url_path, ips)
423,061
Get the domains related to input domains. Args: domains: an enumerable of strings as domains Returns: An enumerable of resource records and features
def dns_rr(self, ips): api_name = 'opendns-dns_rr' fmt_url_path = u'dnsdb/name/a/{0}.json' return self._multi_get(api_name, fmt_url_path, ips)
423,062
Get the a list of malicious domains related to input ips. Args: ips: an enumerable of strings as ips Returns: An enumerable of strings for the malicious domains
def latest_malicious(self, ips): api_name = 'opendns-latest_malicious' fmt_url_path = u'ips/{0}/latest_domains' return self._multi_get(api_name, fmt_url_path, ips)
423,063
Get the information about a sample based on its hash. Args: hashes: an enumerable of strings as hashes Returns: An enumerable of arrays which contains the information about the original samples
def sample(self, hashes): api_name = 'opendns-sample' fmt_url_path = u'sample/{0}' return self._multi_get(api_name, fmt_url_path, hashes)
423,064
Performs pattern searches against the Investigate database. Args: patterns: An enumerable of RegEx domain patterns to search for start: How far back results extend from in days (max is 30) limit: Number of results to show (max is 1000) include_category: Include OpenDNS security categories Returns: An enumerable of matching domain strings
def search(self, patterns, start=30, limit=1000, include_category=False): api_name = 'opendns-patterns' fmt_url_path = u'search/{0}' start = '-{0}days'.format(start) include_category = str(include_category).lower() query_params = { 'start': start, 'limit': limit, 'includecategory': include_category, } return self._multi_get(api_name, fmt_url_path, patterns, query_params)
423,065
Performs Umbrella risk score analysis on the input domains Args: domains: an enumerable of domains Returns: An enumerable of associated domain risk scores
def risk_score(self, domains): api_name = 'opendns-risk_score' fmt_url_path = u'domains/risk-score/{0}' return self._multi_get(api_name, fmt_url_path, domains)
423,066
Aux function to extract all the API endpoint responses. Args: resources: list of string hashes. api_endpoint: endpoint path api_name: endpoint name Returns: A dict with the hash as key and the VT report as value.
def _extract_all_responses(self, resources, api_endpoint, api_name): all_responses, resources = self._bulk_cache_lookup(api_name, resources) resource_chunks = self._prepare_resource_chunks(resources) response_chunks = self._request_reports("resource", resource_chunks, api_endpoint) self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
423,067
Retrieves a report about the behaviour of a md5, sha1, and/or sha2 hash of a file when executed in a sandboxed environment (Cuckoo sandbox). Args: resources: list of string hashes.
def get_file_behaviour(self, resources): api_name = 'virustotal-file-behaviour' api_endpoint = 'file/behaviour' return self._extract_all_responses(resources, api_endpoint, api_name)
423,068
Retrieves a file from its a md5, sha1, and/or sha2 hash. Args: resources: list of string hashes. Returns: a file download
def get_file_download(self, resources): api_name = 'virustotal-file-download' api_endpoint = 'file/download' return self._extract_all_responses(resources, api_endpoint, api_name)
423,069
Retrieves a report about the network traffic of a md5, sha1, and/or sha2 hash of file, when it is executed. Args: resources: list of string hashes.
def get_file_network_traffic(self, resources): api_name = 'virustotal-file-network-traffic' api_endpoint = 'file/network-traffic' return self._extract_all_responses(resources, api_endpoint, api_name)
423,070
Retrieves the most recent VT info for a set of domains. Args: domains: list of string domains. Returns: A dict with the domain as key and the VT report as value.
def get_domain_reports(self, domains): api_name = 'virustotal-domain-reports' (all_responses, domains) = self._bulk_cache_lookup(api_name, domains) responses = self._request_reports("domain", domains, 'domain/report') for domain, response in zip(domains, responses): if self._cache: self._cache.cache_value(api_name, domain, response) all_responses[domain] = response return all_responses
423,071
Retrieves a live feed with the latest URLs submitted to VT. Args: resources: a dictionary with name and value for optional arguments Returns: A dict with the VT report.
def get_url_distribution(self, params=None): params = params or {} all_responses = {} api_name = 'virustotal-url-distribution' response_chunks = self._request_reports(list(params.keys()), list(params.values()), 'url/distribution') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
423,072
Retrieves a scan report on a given URL. Args: resources: list of URLs. Returns: A dict with the URL as key and the VT report as value.
def get_url_reports(self, resources): api_name = 'virustotal-url-reports' (all_responses, resources) = self._bulk_cache_lookup(api_name, resources) resource_chunks = self._prepare_resource_chunks(resources, '\n') response_chunks = self._request_reports("resource", resource_chunks, 'url/report') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
423,073
Retrieves the most recent VT info for a set of ips. Args: ips: list of IPs. Returns: A dict with the IP as key and the VT report as value.
def get_ip_reports(self, ips): api_name = 'virustotal-ip-address-reports' (all_responses, ips) = self._bulk_cache_lookup(api_name, ips) responses = self._request_reports("ip", ips, 'ip-address/report') for ip, response in zip(ips, responses): if self._cache: self._cache.cache_value(api_name, ip, response) all_responses[ip] = response return all_responses
423,074
Performs advanced search on samples, matching certain binary/ metadata/detection criteria. Possible queries: file size, file type, first or last submission to VT, number of positives, bynary content, etc. Args: query: dictionary with search arguments Example: 'query': 'type:peexe size:90kb+ positives:5+ behaviour:"taskkill"' Returns: A dict with the VT report.
def get_file_search(self, query): api_name = 'virustotal-file-search' (all_responses, query) = self._bulk_cache_lookup(api_name, query) response_chunks = self._request_reports("query", query, 'file/search') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
423,075
Retrieves file similarity clusters for a given time frame. Args: date: the specific date for which we want the clustering details. Example: 'date': '2013-09-10' Returns: A dict with the VT report.
def get_file_clusters(self, date): api_name = 'virustotal-file-clusters' (all_responses, resources) = self._bulk_cache_lookup(api_name, date) response = self._request_reports("date", date, 'file/clusters') self._extract_response_chunks(all_responses, response, api_name) return all_responses
423,076
As in some VirusTotal API methods the call can be made for multiple resources at once this method prepares a list of concatenated resources according to the maximum number of resources per requests. Args: resources: a list of the resources. resource_delim: a string used to separate the resources. Default value is a comma. Returns: A list of the concatenated resources.
def _prepare_resource_chunks(self, resources, resource_delim=','): return [self._prepare_resource_chunk(resources, resource_delim, pos) for pos in range(0, len(resources), self._resources_per_req)]
423,077
Sends multiples requests for the resources to a particular endpoint. Args: resource_param_name: a string name of the resource parameter. resources: list of of the resources. endpoint_name: VirusTotal endpoint URL suffix. Returns: A list of the responses.
def _request_reports(self, resource_param_name, resources, endpoint_name): params = [{resource_param_name: resource, 'apikey': self._api_key} for resource in resources] return self._requests.multi_get(self.BASE_DOMAIN + endpoint_name, query_params=params)
423,079
Extracts and caches the responses from the response chunks in case of the responses for the requests containing multiple concatenated resources. Extracted responses are added to the already cached responses passed in the all_responses parameter. Args: all_responses: a list containing already cached responses. response_chunks: a list with response chunks. api_name: a string name of the API.
def _extract_response_chunks(self, all_responses, response_chunks, api_name): for response_chunk in response_chunks: if not isinstance(response_chunk, list): response_chunk = [response_chunk] for response in response_chunk: if not response: continue if self._cache: self._cache.cache_value(api_name, response['resource'], response) all_responses[response['resource']] = response
423,080
Grabs the parser. args: parser: The parser
def get_parser(parser): parser.description = textwrap.dedent(.strip()) parser.add_argument("locale", nargs="+", help="a locale to segment")
423,164
Create a new usage id from an XML dom node. Args: node (lxml.etree.Element): The DOM node to interpret. parent_id: The usage ID of the parent block id_generator (IdGenerator): The :class:`.IdGenerator` to use for creating ids
def _usage_id_from_node(self, node, parent_id, id_generator=None): if id_generator is not None: warnings.warn( "Passing an id_generator directly is deprecated " "in favor of constructing the Runtime with the id_generator", DeprecationWarning, stacklevel=3, ) id_generator = id_generator or self.id_generator block_type = node.tag # remove xblock-family from elements node.attrib.pop('xblock-family', None) # TODO: a way for this node to be a usage to an existing definition? def_id = id_generator.create_definition(block_type) usage_id = id_generator.create_usage(def_id) keys = ScopeIds(None, block_type, def_id, usage_id) block_class = self.mixologist.mix(self.load_block_type(block_type)) # pull the asides out of the xml payload aside_children = [] for child in node.iterchildren(): # get xblock-family from node xblock_family = child.attrib.pop('xblock-family', None) if xblock_family: xblock_family = self._family_id_to_superclass(xblock_family) if issubclass(xblock_family, XBlockAside): aside_children.append(child) # now process them & remove them from the xml payload for child in aside_children: self._aside_from_xml(child, def_id, usage_id, id_generator) node.remove(child) block = block_class.parse_xml(node, self, keys, id_generator) block.parent = parent_id block.save() return usage_id
424,085
Return instances for all of the asides that will decorate this `block`. Arguments: block (:class:`.XBlock`): The block to render retrieve asides for. Returns: List of XBlockAside instances
def get_asides(self, block): aside_instances = [ self.get_aside_of_type(block, aside_type) for aside_type in self.applicable_aside_types(block) ] return [ aside_instance for aside_instance in aside_instances if aside_instance.should_apply_to_block(block) ]
424,097
Return the aside of the given aside_type which might be decorating this `block`. Arguments: block (:class:`.XBlock`): The block to retrieve asides for. aside_type (`str`): the type of the aside
def get_aside_of_type(self, block, aside_type): # TODO: This function will need to be extended if we want to allow: # a) XBlockAsides to statically indicated which types of blocks they can comment on # b) XBlockRuntimes to limit the selection of asides to a subset of the installed asides # c) Optimize by only loading asides that actually decorate a particular view if self.id_generator is None: raise Exception("Runtimes must be supplied with an IdGenerator to load XBlockAsides.") usage_id = block.scope_ids.usage_id aside_cls = self.load_aside_type(aside_type) definition_id = self.id_reader.get_definition_id(usage_id) aside_def_id, aside_usage_id = self.id_generator.create_aside(definition_id, usage_id, aside_type) scope_ids = ScopeIds(self.user_id, aside_type, aside_def_id, aside_usage_id) return aside_cls(runtime=self, scope_ids=scope_ids)
424,098
Emits completion event through Completion API. Unlike grading API, calling this method allows completion to go down - i.e. emitting a value of 0.0 on a previously completed block indicates that it is no longer considered complete. Arguments: completion_percent (float): Completion in range [0.0; 1.0] (inclusive), where 0.0 means the block is not completed, 1.0 means the block is fully completed. Returns: None
def emit_completion(self, completion_percent): completion_mode = XBlockCompletionMode.get_mode(self) if not self.has_custom_completion or completion_mode != XBlockCompletionMode.COMPLETABLE: raise AttributeError( "Using `emit_completion` requires `has_custom_completion == True` (was {}) " "and `completion_mode == 'completable'` (was {})".format( self.has_custom_completion, completion_mode, ) ) if completion_percent is None or not 0.0 <= completion_percent <= 1.0: raise ValueError("Completion percent must be in [0.0; 1.0] interval, {} given".format(completion_percent)) self.runtime.publish( self, 'completion', {'completion': completion_percent}, )
424,119
A view decorator to indicate that an xBlock view has support for the given functionalities. Arguments: functionalities: String identifiers for the functionalities of the view. For example: "multi_device".
def supports(cls, *functionalities): def _decorator(view): # pylint: disable=protected-access if not hasattr(view, "_supports"): view._supports = set() for functionality in functionalities: view._supports.add(functionality) return view return _decorator
424,152
Find and return a function object if one is an aside_view for the given view_name Aside methods declare their view provision via @XBlockAside.aside_for(view_name) This function finds those declarations for a block. Arguments: view_name (string): the name of the view requested. Returns: either the function or None
def aside_view_declaration(self, view_name): if view_name in self._combined_asides: # pylint: disable=unsupported-membership-test return getattr(self, self._combined_asides[view_name]) # pylint: disable=unsubscriptable-object else: return None
424,200
Create a new message. Args: message_type (unicode): The type associated with this message. Must be included in `TYPES`. message_text (unicode): The textual message.
def __init__(self, message_type, message_text): if message_type not in self.TYPES: raise TypeError("Unknown message_type: " + message_type) if not isinstance(message_text, six.text_type): raise TypeError("Message text must be unicode") self.type = message_type self.text = message_text
424,202
Add a new validation message to this instance. Args: message (ValidationMessage): A validation message to add to this instance's list of messages.
def add(self, message): if not isinstance(message, ValidationMessage): raise TypeError("Argument must of type ValidationMessage") self.messages.append(message)
424,203
Adds all the messages in the specified `Validation` object to this instance's messages array. Args: validation (Validation): An object containing the messages to add to this instance's messages.
def add_messages(self, validation): if not isinstance(validation, Validation): raise TypeError("Argument must be of type Validation") self.messages.extend(validation.messages)
424,204
Get entity corresponding to the specified name (looks for it in all types of entities). Args: name: Name of the entity. Raises: TypeError: if the specified entity does not exist. Returns: The AMPL entity with the specified name.
def getEntity(self, name): return lock_and_call( lambda: Entity(self._impl.getEntity(name)), self._lock )
424,229
Get the variable with the corresponding name. Args: name: Name of the variable to be found. Raises: TypeError: if the specified variable does not exist.
def getVariable(self, name): return lock_and_call( lambda: Variable(self._impl.getVariable(name)), self._lock )
424,230
Get the constraint with the corresponding name. Args: name: Name of the constraint to be found. Raises: TypeError: if the specified constraint does not exist.
def getConstraint(self, name): return lock_and_call( lambda: Constraint(self._impl.getConstraint(name)), self._lock )
424,231
Get the objective with the corresponding name. Args: name: Name of the objective to be found. Raises: TypeError: if the specified objective does not exist.
def getObjective(self, name): return lock_and_call( lambda: Objective(self._impl.getObjective(name)), self._lock )
424,232
Get the set with the corresponding name. Args: name: Name of the set to be found. Raises: TypeError: if the specified set does not exist.
def getSet(self, name): return lock_and_call( lambda: Set(self._impl.getSet(name)), self._lock )
424,233
Get the parameter with the corresponding name. Args: name: Name of the parameter to be found. Raises: TypeError: if the specified parameter does not exist.
def getParameter(self, name): return lock_and_call( lambda: Parameter(self._impl.getParameter(name)), self._lock )
424,234
Interpret the given AMPL statement asynchronously. Args: amplstatements: A collection of AMPL statements and declarations to be passed to the interpreter. callback: Callback to be executed when the statement has been interpreted. Raises: RuntimeError: if the input is not a complete AMPL statement (e.g. if it does not end with semicolon) or if the underlying interpreter is not running.
def evalAsync(self, amplstatements, callback, **kwargs): if self._langext is not None: amplstatements = self._langext.translate(amplstatements, **kwargs) def async_call(): self._lock.acquire() try: self._impl.eval(amplstatements) self._errorhandler_wrapper.check() except Exception: self._lock.release() raise else: self._lock.release() callback.run() Thread(target=async_call).start()
424,239
Solve the current model asynchronously. Args: callback: Callback to be executed when the solver is done.
def solveAsync(self, callback): def async_call(): self._lock.acquire() try: self._impl.solve() except Exception: self._lock.release() raise else: self._lock.release() callback.run() Thread(target=async_call).start()
424,240
Get or set the current working directory from the underlying interpreter (see https://en.wikipedia.org/wiki/Working_directory). Args: path: New working directory or None (to display the working directory). Returns: Current working directory.
def cd(self, path=None): if path is None: return lock_and_call( lambda: self._impl.cd(), self._lock ) else: return lock_and_call( lambda: self._impl.cd(path), self._lock )
424,241
Set an AMPL option to a specified value. Args: name: Name of the option to be set (alphanumeric without spaces). value: The value the option must be set to. Raises: InvalidArgumet: if the option name is not valid. TypeError: if the value has an invalid type.
def setOption(self, name, value): if isinstance(value, bool): lock_and_call( lambda: self._impl.setBoolOption(name, value), self._lock ) elif isinstance(value, int): lock_and_call( lambda: self._impl.setIntOption(name, value), self._lock ) elif isinstance(value, float): lock_and_call( lambda: self._impl.setDblOption(name, value), self._lock ) elif isinstance(value, basestring): lock_and_call( lambda: self._impl.setOption(name, value), self._lock ) else: raise TypeError
424,242
Get the current value of the specified option. If the option does not exist, returns None. Args: name: Option name. Returns: Value of the option. Raises: InvalidArgumet: if the option name is not valid.
def getOption(self, name): try: value = lock_and_call( lambda: self._impl.getOption(name).value(), self._lock ) except RuntimeError: return None else: try: return int(value) except ValueError: try: return float(value) except ValueError: return value
424,243
Interprets the specified file (script or model or mixed) As a side effect, it invalidates all entities (as the passed file can contain any arbitrary command); the lists of entities will be re-populated lazily (at first access). Args: fileName: Full path to the file. Raises: RuntimeError: in case the file does not exist.
def read(self, fileName, **kwargs): if self._langext is not None: with open(fileName, 'r') as fin: newmodel = self._langext.translate(fin.read(), **kwargs) with open(fileName+'.translated', 'w') as fout: fout.write(newmodel) fileName += '.translated' lock_and_call( lambda: self._impl.read(fileName), self._lock ) self._errorhandler_wrapper.check()
424,244
Interprets the specified file as an AMPL data file. As a side effect, it invalidates all entities (as the passed file can contain any arbitrary command); the lists of entities will be re-populated lazily (at first access). After reading the file, the interpreter is put back to "model" mode. Args: fileName: Full path to the file. Raises: RuntimeError: in case the file does not exist.
def readData(self, fileName): lock_and_call( lambda: self._impl.readData(fileName), self._lock ) self._errorhandler_wrapper.check()
424,245
Get a scalar value from the underlying AMPL interpreter, as a double or a string. Args: scalarExpression: An AMPL expression which evaluates to a scalar value. Returns: The value of the expression.
def getValue(self, scalarExpression): return lock_and_call( lambda: Utils.castVariant(self._impl.getValue(scalarExpression)), self._lock )
424,246
Assign the data in the dataframe to the AMPL entities with the names corresponding to the column names. Args: data: The dataframe containing the data to be assigned. setName: The name of the set to which the indices values of the DataFrame are to be assigned. Raises: AMPLException: if the data assignment procedure was not successful.
def setData(self, data, setName=None): if not isinstance(data, DataFrame): if pd is not None and isinstance(data, pd.DataFrame): data = DataFrame.fromPandas(data) if setName is None: lock_and_call( lambda: self._impl.setData(data._impl), self._lock ) else: lock_and_call( lambda: self._impl.setData(data._impl, setName), self._lock )
424,247
Read the table corresponding to the specified name, equivalent to the AMPL statement: .. code-block:: ampl read table tableName; Args: tableName: Name of the table to be read.
def readTable(self, tableName): lock_and_call( lambda: self._impl.readTable(tableName), self._lock )
424,248