function
stringlengths
11
56k
repo_name
stringlengths
5
60
features
list
def buildbounds(self, verbose = True): """ Build bounds for bok. By default I will make those bounds as wide as possible, still respecting epsilon. The parameter epsilon is the minimum distance two knots can have. If you give me a window size, I will not make the bounds as wide as possible, but only put them 0.5*window days around the current knots (still respecting all this epsilon stuff of course).
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def bok(self, bokmethod="BF", verbose=True, trace=False): """ We optimize the positions of knots by some various techniques. We use fixed bounds for the exploration, run buildbounds (with low epsilon) first. This means that I will not move my bounds.
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def score(intknots, index, value): modifknots = intknots.copy() modifknots[index] = value return si.splrep(self.datapoints.jds, self.datapoints.mags, w=weights, xb=None, xe=None, k=self.k, task=-1, s=None, t=modifknots, full_output=1, per=0, quiet=1)[1]
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def target(value): return score(intknots, i, value)
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def target(modifknots):
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def getintt(self): """ Returns the internal knots (i.e., not even the datapoints extrema) This is what you need to feed into splrep ! There are nint - 1 such knots """ return self.t[(self.k+1):-(self.k+1)].copy() # We cut the outer knots.
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def getinttex(self): """ Same as above, but we include the extremal points "once". """ return self.t[(self.k):-(self.k)].copy()
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def knotstats(self): """ Returns a string describing the knot spacing """ knots = self.getinttex() spacings = knots[1:] - knots[:-1] return " ".join(["%.1f" % (spacing) for spacing in sorted(spacings)])
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def setintt(self, intt): """ Give me some internal knots (not even containing the datapoints extrema), and I build the correct total knot vector t for you. I add the extremas, with appropriate multiplicity.
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def setinttex(self, inttex): """ Including extremal knots """ #pro = self.datapoints.jds[0] * np.ones(self.k) #post = self.datapoints.jds[-1] * np.ones(self.k) pro = inttex[0] * np.ones(self.k) post = inttex[-1] * np.ones(self.k)
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def getnint(self): """ Returns the number of intervals """ return(len(self.t) - 2* (self.k + 1) + 1)
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def getc(self, m=0): """ Returns all active coefficients of the spline, the ones it makes sense to play with. The length of this guy is number of intervals - 2 ! """ return self.c[m:-(self.k + 1 + m)].copy()
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def setc(self, c, m=0): """ Puts the coeffs from getc back into place. """ self.c[m:-(self.k + 1 + m)] = c
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def getco(self, m=0): """ Same as getc, but reorders the coeffs in a way more suited for nonlinear optimization """ c = self.getc(m=m) mid = int(len(c)/2.0) return np.concatenate([c[mid:], c[:mid][::-1]])
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def setco(self, c, m=0): """ The inverse of getco. """ mid = int(len(c)/2.0) self.setc(np.concatenate([c[mid+1:][::-1], c[:mid+1]]), m=m)
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def setcflat(self, c): """ Give me coeffs like those from getc(m=1), I will set the coeffs so that the spline extremas are flat (i.e. slope = 0). """
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def setcoflat(self, c): """ idem, but for reordered coeffs. """ mid = int(len(c)/2.0) self.setcflat(np.concatenate([c[mid:][::-1], c[:mid]]))
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def r2(self, nostab=True, nosquare=False): """ Evaluates the spline, compares it with the data points and returns a weighted sum of residuals r2.
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def tv(self): """ Returns the total variation of the spline. Simple ! http://en.wikipedia.org/wiki/Total_variation
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def optc(self): """ Optimize the coeffs, don't touch the knots This is the fast guy, one reason to use splines :-) Returns the chi2 in case you want it (including stabilization points) !
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def optcflat(self, verbose = False): """ Optimizes only the "border coeffs" so to get zero slope at the extrema Run optc() first ... This has to be done with an iterative optimizer """
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def setp(p): full[[0, 1, -2, -1]] = p self.setcflat(full)
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def errorfct(p): setp(p) return self.r2(nostab=False) # To get the same as optc would return !
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def eval(self, jds = None, nostab = True): """ Evaluates the spline at jds, and returns the corresponding mags-like vector. By default, we exclude the stabilization points ! If jds is not None, we use them instead of our own jds (in this case excludestab makes no sense) """ if jds is None: if nostab: jds = self.datapoints.jds[self.datapoints.mask] else: jds = self.datapoints.jds else: # A minimal check for non-extrapolation condition should go here ! pass
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def display(self, showbounds = True, showdatapoints = True, showerrorbars=True, figsize=(16,8)): """ A display of the spline object, with knots, jds, stab points, etc. For debugging and checks. """
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def fit(lcs, knotstep=20.0, n=None, knots=None, stab=True, stabext=300.0, stabgap=20.0, stabstep=5.0, stabmagerr=-2.0, stabrampsize=0, stabrampfact=1.0, bokit=1, bokeps=2.0, boktests=5, bokwindow=None, k=3, verbose=True): """ The highlevel function to make a spline fit.
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def seasonknots(lcs, knotstep, ingap, seasongap=60.0): """ A little helper to get some knot locations inside of seasons only
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def r2(lcs, spline, nosquare=False): """ I do not modify the spline (not even its datapoints) ! Just evaluate the quality of the match, returning an r2 (without any stab points, of course). This is used if you want to optimize something on the lightcurves without touching the spline.
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def mltv(lcs, spline, weight=True): """ Calculates the TV norm of the difference between a lightcurve (disregarding any microlensing !) and the spline. I return the sum over the curves in lcs.
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def optcmltv(lcs, spline, verbose=True): """ I will optimize the coefficients of the spline so to minimize the mltv. I do not use the microlensing of the lcs at all !
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def setc(c): spline.setc(c, m=2)
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def errorfct(c): setc(c) (tv, dist) = mltv(lcs, spline, weight=False) print "put weight" return tv + 0.1*spline.tv()
COSMOGRAIL/PyCS
[ 4, 4, 4, 14, 1454429374 ]
def serve(procs, port=None, addr='tcp://*', context=None, debug=False): """Make some procedures available for remote calls via ØMQ.""" if context is None: context = zmq.Context.instance() with context.socket(zmq.REP) as socket: if port is None: port = socket.bind_to_random_port(addr) else: socket.bind('{}:{}'.format(addr, port)) print('Serving at {}:{}'.format(addr, port)) print('sending and receiving JSON') for i in count(1): idle = datetime.now() print('{}: waiting for request #{}...'.format(idle, i)) message = socket.poll() start = datetime.now() print('{}: received request #{} after {}' .format(start, i, start - idle)) try: request = socket.recv_json() name, *args = request result = procs[name](*args) reply = {'result': result} print(reply) socket.send_json(reply) except Exception as exc: if debug: traceback.print_exc() message = '{}: {}'.format(exc.__class__.__name__, exc) reply = {'error': message} print(reply) socket.send_json(reply) end = datetime.now() print('{}: replied to #{} after {}' .format(end, i, end - start))
doctaphred/phredutils
[ 8, 1, 8, 12, 1453410109 ]
def __init__(self, file_loc, data_type='c', init_params=None, block_unit_size=0, disk_sector_size=0, open_c_reader=True, **kwargs): """ :param file_loc: location of the file :param data_type: type of data, can be "l" for int/long, "c" for string :param init_params: the init_params for opening csv :param block_unit_size: block size for storage system, 0 when disabled :param disk_sector_size: size of disk sector :param open_c_reader: bool for whether open reader in C backend :param kwargs: not used now """ super(CsvReader, self).__init__(file_loc, data_type, block_unit_size, disk_sector_size, open_c_reader, kwargs.get("lock", None)) assert init_params is not None, "please provide init_param for csvReader" assert "label" in init_params, "please provide label for csv reader" self.trace_file = open(file_loc, 'rb') # self.trace_file = open(file_loc, 'r', encoding='utf-8', errors='ignore') self.init_params = init_params self.label_column = init_params['label'] self.time_column = init_params.get("real_time", ) self.size_column = init_params.get("size", ) if self.time_column != -1: self.support_real_time = True if self.size_column != -1: self.support_size = True if block_unit_size != 0: assert "size" in init_params, "please provide size_column option to consider request size" self.header_bool = init_params.get('header', ) self.delimiter = init_params.get('delimiter', ",") if "delimiter" not in init_params: INFO("open {} using default delimiter \",\" for CsvReader".format(file_loc)) if self.header_bool: self.headers = [i.strip(string.whitespace) for i in self.trace_file.readline().decode().split(self.delimiter)] # self.trace_file.readline() if ALLOW_C_MIMIRCACHE and open_c_reader: self.c_reader = c_cacheReader.setup_reader(file_loc, 'c', data_type=data_type, block_unit_size=block_unit_size, disk_sector_size=disk_sector_size, init_params=init_params)
1a1a11a/mimircache
[ 33, 14, 33, 5, 1460551932 ]
def read_complete_req(self): """ read the complete line, including request and its all related info :return: a list of all info of the request """ super().read_one_req() line = self.trace_file.readline().decode() while line and len(line.strip()) == 0: line = self.trace_file.readline().decode() if line: line_split = line.strip().split(self.delimiter) if self.block_unit_size != 0 and self.disk_sector_size != 0: line_split[self.label_column - 1] = line_split[self.label_column - 1] * \ self.disk_sector_size // self.block_unit_size return line_split else: return None
1a1a11a/mimircache
[ 33, 14, 33, 5, 1460551932 ]
def lines(self): """ a generator for reading all the information of current request/line :return: a tuple of current request """ line = self.trace_file.readline().decode() while line and len(line.strip()) == 0: line = self.trace_file.readline().decode() while line: line_split = tuple(line.split(self.delimiter)) line = self.trace_file.readline() yield line_split # raise StopIteration
1a1a11a/mimircache
[ 33, 14, 33, 5, 1460551932 ]
def skip_n_req(self, n): """ skip N requests from current position :param n: the number of requests to skip """ for i in range(n): self.read_one_req()
1a1a11a/mimircache
[ 33, 14, 33, 5, 1460551932 ]
def copy(self, open_c_reader=False): """ reader a deep copy of current reader with everything reset to initial state, the returned reader should not interfere with current reader :param open_c_reader: whether open_c_reader_or_not, default not open :return: a copied reader """ return CsvReader(self.file_loc, self.data_type, self.init_params, self.block_unit_size, self.disk_sector_size, open_c_reader, lock=self.lock)
1a1a11a/mimircache
[ 33, 14, 33, 5, 1460551932 ]
def __next__(self): # Python 3 super().__next__() element = self.read_one_req() if element is not None: return element else: raise StopIteration
1a1a11a/mimircache
[ 33, 14, 33, 5, 1460551932 ]
def test_name(self): self.assertEqual(self.col.name, self.NAME, 'A Column does not store its name correctly')
TMiguelT/PandasSchema
[ 180, 36, 180, 37, 1480922541 ]
def test_outputs(self): results = self.col.validate(self.ser) # There should be 6 errors, 2 for each row self.assertEqual(len(results), 2 * len(self.ser), 'A Column produces the wrong number of errors') for i in range(2): in_row = [r for r in results if r.row == i] self.assertEqual(len(in_row), 2, 'A Column does not report both errors for every row')
TMiguelT/PandasSchema
[ 180, 36, 180, 37, 1480922541 ]
def ListAll_xx(): temp = [] for lang in LANGUAGES: temp.append(lang['ISO639']) return temp
matachi/subdownloader
[ 4, 1, 4, 1, 1433450532 ]
def ListAll_locale(): temp = [] for lang in LANGUAGES: temp.append(lang['locale']) return temp
matachi/subdownloader
[ 4, 1, 4, 1, 1433450532 ]
def xx2xxx(xx): for lang in LANGUAGES: if lang['ISO639'] == xx: return lang['SubLanguageID']
matachi/subdownloader
[ 4, 1, 4, 1, 1433450532 ]
def xxx2name(xxx): for lang in LANGUAGES: if lang['SubLanguageID'] == xxx: return lang['LanguageName']
matachi/subdownloader
[ 4, 1, 4, 1, 1433450532 ]
def xx2name(xx): for lang in LANGUAGES: if lang['ISO639'] == xx: return lang['LanguageName']
matachi/subdownloader
[ 4, 1, 4, 1, 1433450532 ]
def name2xxx(name): for lang in LANGUAGES: if lang['LanguageName'].lower() == name.lower(): return lang['SubLanguageID']
matachi/subdownloader
[ 4, 1, 4, 1, 1433450532 ]
def _validate_template_filename(filename: Union[str, Path]) -> Path: if isinstance(filename, str): filename = Path(filename) suffix = filename.suffix if not suffix: suffix = '.crtx' if suffix != '.crtx': raise ValueError(f"Extension for the excel template file must be '.crtx' instead of {suffix}") return filename.with_suffix(suffix)
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def __init__(self, template_dir=None, template=None, graphs_per_row=1): self.template_dir = template_dir self.template = template self.default_items_size = _default_items_size.copy() self.graphs_per_row = graphs_per_row
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def template_dir(self): r""" Set the path to the directory containing the Excel template files (with '.crtx' extension). This method is mainly useful if your template files are located in several directories, otherwise pass the template directory directly the ExcelReport constructor. Parameters ---------- template_dir : str or Path Path to the directory containing the Excel template files. See Also -------- set_graph_template Examples -------- >>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR) >>> # ... add some graphs using template files from 'C:\excel_templates_dir' >>> report.template_dir = r'C:\other_templates_dir' # doctest: +SKIP >>> # ... add some graphs using template files from 'C:\other_templates_dir' """ return self._template_dir
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def template_dir(self, template_dir): if template_dir is not None: if isinstance(template_dir, str): template_dir = Path(template_dir) if not isinstance(template_dir, Path): raise TypeError(f"Expected a string or a pathlib.Path object. " f"Got an object of type {type(template_dir).__name__} instead.") if not template_dir.is_dir(): raise ValueError(f"The directory {template_dir} could not be found.") self._template_dir = template_dir
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def template(self): r""" Set a default Excel template file. Parameters ---------- template : str or Path Name of the template to be used as default template. The extension '.crtx' will be added if not given. The full path to the template file must be given if no template directory has been set. Examples -------- >>> demo = load_example_data('demography_eurostat') Passing the name of the template (only if a template directory has been set) >>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR) >>> report.template = 'Line' >>> sheet_population = report.new_sheet('Population') >>> sheet_population.add_graph(demo.population['Belgium'],'Belgium') Passing the full path of the template file >>> # if no default template directory has been set >>> # or if the new template is located in another directory, >>> # you must provide the full path >>> sheet_population.template = r'C:\other_templates_dir\Line_Marker.crtx' # doctest: +SKIP >>> sheet_population.add_graph(demo.population['Germany'],'Germany') # doctest: +SKIP """ return self._template
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def template(self, template): if template is not None: if self.template_dir is None: raise RuntimeError("Please set 'template_dir' first") filename = _validate_template_filename(template) template = self.template_dir / filename self._template = template
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def graphs_per_row(self): r""" Default number of graphs per row. Parameters ---------- graphs_per_row: int See Also -------- ReportSheet.newline """ return self._graphs_per_row
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def graphs_per_row(self, graphs_per_row): _positive_integer(graphs_per_row) self._graphs_per_row = graphs_per_row
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def add_title(self, title, width=None, height=None, fontsize=11): r""" Add a title item to the current sheet. Note that the current method only add a new item to the list of items to be generated. The report Excel file is generated only when the :py:obj:`~ExcelReport.to_excel` is called. Parameters ---------- title : str Text to write in the title item. width : int, optional width of the title item. The current default value is used if None (see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None. height : int, optional height of the title item. The current default value is used if None (see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None. fontsize : int, optional fontsize of the displayed text. Defaults to 11. Examples -------- >>> report = ExcelReport() >>> first_sheet = report.new_sheet('First_sheet') >>> first_sheet.add_title('Title banner with default width, height and fontsize') >>> first_sheet.add_title('Larger title banner', width=1200, height=100) >>> first_sheet.add_title('Bigger fontsize', fontsize=13) >>> # do not forget to call 'to_excel' to create the report file >>> report.to_excel('Report.xlsx') """ pass
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def add_graphs(self, array_per_title, axis_per_loop_variable, template=None, width=None, height=None, graphs_per_row=1, min_y=None, max_y=None, xticks_spacing=None, customize_func=None, customize_kwargs=None): r""" Add multiple graph items to the current sheet. This method is mainly useful when multiple graphs are generated by iterating over one or several axes of an array (see examples below). The report Excel file is generated only when the :py:obj:`~ExcelReport.to_excel` is called. Parameters ---------- array_per_title: dict dictionary containing pairs (title template, array). axis_per_loop_variable: dict dictionary containing pairs (variable used in the title template, axis). template : str or Path, optional name of the template to be used to generate the graph. The full path to the template file must be provided if no template directory has not been set or if the template file belongs to another directory. Defaults to the defined template (see :py:obj:`~ExcelReport.set_graph_template`). width : int, optional width of the title item. The current default value is used if None (see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None. height : int, optional height of the title item. The current default value is used if None (see :py:obj:`~ExcelReport.set_item_default_size`). Defaults to None. graphs_per_row: int, optional Number of graphs per row. Defaults to 1. min_y: int, optional minimum value for the Y axis. max_y: int, optional maximum value for the Y axis. xticks_spacing: int, optional space interval between two ticks along the X axis. customize_func: function, optional user defined function to personalize the graph. The function must take the Chart object as first argument. All keyword arguments defined in customize_kwargs are passed to the function at call. customize_kwargs: dict, optional keywords arguments passed to the function `customize_func` at call. Examples -------- >>> demo = load_example_data('demography_eurostat') >>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR) >>> sheet_population = report.new_sheet('Population') >>> population = demo.population Generate a new graph for each combination of gender and year >>> sheet_population.add_graphs( ... {'Population of {gender} by country in {year}': population}, ... {'gender': population.gender, 'year': population.time}, ... template='line', width=450, height=250, graphs_per_row=2) Specify the mininum and maximum values for the Y axis >>> sheet_population.add_graphs({'Population of {gender} by country for the year {year}': population}, ... {'gender': population.gender, 'year': population.time}, ... template='line', width=450, height=250, graphs_per_row=2, min_y=0, max_y=50e6) Specify the interval between two ticks (X axis) >>> sheet_population.add_graphs({'Population of {gender} by country for the year {year}': population}, ... {'gender': population.gender, 'year': population.time}, ... template='line', width=450, height=250, graphs_per_row=2, xticks_spacing=2) >>> # do not forget to call 'to_excel' to create the report file >>> report.to_excel('Demography_Report.xlsx') """ pass
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def new_sheet(self, sheet_name): r""" Add a new empty output sheet. This sheet will contain only graphical elements, all data are exported to a dedicated separate sheet. Parameters ---------- sheet_name : str name of the current sheet. Returns ------- sheet: ReportSheet Examples -------- >>> demo = load_example_data('demography_eurostat') >>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR) >>> # prepare new output sheet named 'Belgium' >>> sheet_be = report.new_sheet('Belgium') >>> # add graph to the output sheet 'Belgium' >>> sheet_be.add_graph(demo.population['Belgium'], 'Population', template='Line') """ pass
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def to_excel(self, filepath, data_sheet_name='__data__', overwrite=True): r""" Generate the report Excel file. Parameters ---------- filepath : str or Path Path of the report file for the dump. data_sheet_name : str, optional name of the Excel sheet where all data associated with items is dumped. Defaults to '__data__'. overwrite : bool, optional whether to overwrite an existing report file. Defaults to True. Examples -------- >>> demo = load_example_data('demography_eurostat') >>> report = ExcelReport(EXAMPLE_EXCEL_TEMPLATES_DIR) >>> report.template = 'Line_Marker' >>> for c in demo.country: ... sheet_country = report.new_sheet(c) ... sheet_country.add_graph(demo.population[c], 'Population') ... sheet_country.add_graph(demo.births[c], 'Births') ... sheet_country.add_graph(demo.deaths[c], 'Deaths') Basic usage >>> report.to_excel('Demography_Report.xlsx') Alternative data sheet name >>> report.to_excel('Demography_Report.xlsx', data_sheet_name='Data Tables') # doctest: +SKIP Check if ouput file already exists >>> report.to_excel('Demography_Report.xlsx', overwrite=False) # doctest: +SKIP Traceback (most recent call last): ... ValueError: Sheet named 'Belgium' already present in workbook """ pass
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def __init__(self, width, height): self.width = width self.height = height
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def width(self): return self._width
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def width(self, width): _positive_integer(width) self._width = width
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def height(self): return self._height
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def height(self, height): _positive_integer(height) self._height = height
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def __init__(self, text, fontsize, top, left, width, height): ItemSize.__init__(self, width, height) self.top = top self.left = left self.text = str(text) _positive_integer(fontsize) self.fontsize = fontsize
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def __init__(self, data, title, template, top, left, width, height, min_y, max_y, xticks_spacing, customize_func, customize_kwargs): ItemSize.__init__(self, width, height) self.top = top self.left = left self.title = str(title) if title is not None else None data = asarray(data) if not (1 <= data.ndim <= 2): raise ValueError(f"Expected 1D or 2D array for data argument. Got array of dimensions {data.ndim}") self.data = data if template is not None: template = Path(template) if not template.is_file(): raise ValueError(f"Could not find template file {template}") self.template = template self.min_y = min_y self.max_y = max_y self.xticks_spacing = xticks_spacing if customize_func is not None and not callable(customize_func): raise TypeError(f"Expected a function for the argument 'customize_func'. " f"Got object of type {type(customize_func).__name__} instead.") self.customize_func = customize_func self.customize_kwargs = customize_kwargs
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def __init__(self, excel_report, name, template_dir=None, template=None, graphs_per_row=1): name = _translate_sheet_name(name) self.excel_report = excel_report self.name = name self.items = [] self.top = 0 self.left = 0 self.position_in_row = 1 self.curline_height = 0 if template_dir is None: template_dir = excel_report.template_dir if template is None: template = excel_report.template AbstractReportSheet.__init__(self, template_dir, template, graphs_per_row)
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def add_graph(self, data, title=None, template=None, width=None, height=None, min_y=None, max_y=None, xticks_spacing=None, customize_func=None, customize_kwargs=None): if width is None: width = self.default_items_size['graph'].width if height is None: height = self.default_items_size['graph'].height if template is not None: self.template = template template = self.template if self.graphs_per_row is not None and self.position_in_row > self.graphs_per_row: self.newline() self.items.append(ExcelGraphItem(data, title, template, self.top, self.left, width, height, min_y, max_y, xticks_spacing, customize_func, customize_kwargs)) self.left += width self.curline_height = max(self.curline_height, height) self.position_in_row += 1
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def newline(self): self.top += self.curline_height self.curline_height = 0 self.left = 0 self.position_in_row = 1
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def __init__(self, template_dir=None, template=None, graphs_per_row=1): AbstractExcelReport.__init__(self, template_dir, template, graphs_per_row) self.sheets = {}
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def __getitem__(self, key): return self.sheets[key]
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def __setitem__(self, key, value, warn_stacklevel=2): if not isinstance(value, ReportSheet): raise ValueError(f"Expected ReportSheet object. Got {type(value).__name__} object instead.") if key in self.sheet_names(): warnings.warn(f"Sheet '{key}' already exists in the report and will be reset", stacklevel=warn_stacklevel) self.sheets[key] = value
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def __repr__(self): return f'sheets: {self.sheet_names()}'
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def to_excel(self, filepath, data_sheet_name='__data__', overwrite=True): with open_excel(filepath, overwrite_file=overwrite) as wb: # from here on, we use pure win32com objects instead of # larray.excel or xlwings objects as this is faster xl_wb = wb.api # rename first sheet xl_wb.Worksheets(1).Name = data_sheet_name # dump items for each output sheet data_sheet_row = 1 for sheet in self.sheets.values(): data_sheet_row = sheet._to_excel(xl_wb, data_sheet_row) wb.save() # reset self.sheets.clear()
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def __init__(self): raise Exception("ReportSheet class cannot be instantiated because xlwings is not installed")
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def __init__(self): raise Exception("ExcelReport class cannot be instantiated because xlwings is not installed")
liam2/larray
[ 8, 5, 8, 228, 1392806323 ]
def my_add(a,b): ''' This is the function for addition of numbers and strings ''' print "value of a is {}".format(a) print "value of b is {}".format(b) return a+b
tuxfux-hlp-notes/python-batches
[ 5, 15, 5, 2, 1481601578 ]
def my_div(a,b): ''' This is the function for division ''' return a/b
tuxfux-hlp-notes/python-batches
[ 5, 15, 5, 2, 1481601578 ]
def my_sub(a,b): ''' This is the function for substraction ''' if a > b: return a - b elif b > a: return b - a
tuxfux-hlp-notes/python-batches
[ 5, 15, 5, 2, 1481601578 ]
def my_mul(a,b): ''' This is the function for multiplication ''' return a * b
tuxfux-hlp-notes/python-batches
[ 5, 15, 5, 2, 1481601578 ]
def checkInputs(): #Check to be sure that SHGC and VT are between 0 and 1. checkData = True
mostaphaRoudsari/Honeybee
[ 118, 145, 118, 98, 1384492853 ]
def checkBtwZeroAndOne(variable, default, variableName): if variable == None: newVariable = default else: if variable <= 1 and variable >= 0: newVariable = variable else: newVariable = 0 checkData = False warning = variableName + " must be between 0 and 1." print warning ghenv.Component.AddRuntimeMessage(w, warning)
mostaphaRoudsari/Honeybee
[ 118, 145, 118, 98, 1384492853 ]
def main(name, roughness, R_Value, thermAbsp, solAbsp, visAbsp):
mostaphaRoudsari/Honeybee
[ 118, 145, 118, 98, 1384492853 ]
def __init__(self, blockname=None, **kwargs): ModTool.__init__(self, blockname, **kwargs) self.info['pattern'] = blockname
marcusmueller/gnuradio
[ 1, 1, 1, 1, 1369304746 ]
def setup(): print "SETUP!"
anaoprea/iDibo
[ 1, 1, 1, 8, 1416675207 ]
def __init__(self, filter_size, compiled=True): LayerBase.__init__(self, activation="linear", trainable=False) if compiled: from ..llatomic.lltensor_op import MaxPoolOp else: from ..atomic import MaxPoolOp self.fdim = filter_size self.filter = None self.op = MaxPoolOp()
csxeba/brainforge
[ 113, 12, 113, 2, 1483437073 ]
def feedforward(self, questions): self.output, self.filter = self.op.forward(questions, self.fdim) return self.output
csxeba/brainforge
[ 113, 12, 113, 2, 1483437073 ]
def outshape(self): return self.output.shape[-3:]
csxeba/brainforge
[ 113, 12, 113, 2, 1483437073 ]
def __init__(self, nfilters, filterx=3, filtery=3, compiled=True, **kw): super().__init__(compiled=compiled, **kw) self.nfilters = nfilters self.fx = filterx self.fy = filtery self.depth = 0 self.stride = 1 self.inshape = None self.op = None
csxeba/brainforge
[ 113, 12, 113, 2, 1483437073 ]
def feedforward(self, X): self.inputs = X self.output = self.activation.forward(self.op.forward(X, self.weights, "valid")) self.output += self.biases return self.output
csxeba/brainforge
[ 113, 12, 113, 2, 1483437073 ]
def outshape(self): oy, ox = tuple(ix - fx + 1 for ix, fx in zip(self.inshape[-2:], (self.fx, self.fy))) return self.nfilters, ox, oy
csxeba/brainforge
[ 113, 12, 113, 2, 1483437073 ]
def __init__(self): LayerBase.__init__(self) NoParamMixin.__init__(self) self.repeats = 0
csxeba/brainforge
[ 113, 12, 113, 2, 1483437073 ]
def backpropagate(self, delta): m = len(delta) delta = np.repeat(delta / scalX(self.repeats), self.repeats) delta = delta.reshape((m,) + self.inshape) return delta
csxeba/brainforge
[ 113, 12, 113, 2, 1483437073 ]
def __init__( self, alphabet: Optional[ Union[TCounter[str], Sequence[str], Set[str], int] ] = None, tokenizer: Optional[_Tokenizer] = None, intersection_type: str = 'crisp', **kwargs: Any
chrislit/abydos
[ 154, 26, 154, 63, 1398235847 ]
def sim(self, src: str, tar: str) -> float: """Return the Tarantula similarity of two strings. Parameters ---------- src : str Source string (or QGrams/Counter objects) for comparison tar : str Target string (or QGrams/Counter objects) for comparison Returns ------- float Tarantula similarity Examples -------- >>> cmp = Tarantula() >>> cmp.sim('cat', 'hat') 0.9948979591836735 >>> cmp.sim('Niall', 'Neil') 0.98856416772554 >>> cmp.sim('aluminum', 'Catalan') 0.9249106078665077 >>> cmp.sim('ATCG', 'TAGC') 0.0 .. versionadded:: 0.4.0 """ if src == tar: return 1.0 self._tokenize(src, tar) a = self._intersection_card() b = self._src_only_card() c = self._tar_only_card() d = self._total_complement_card() num = a * (c + d) if num: return num / (a * (2 * c + d) + b * c) return 0.0
chrislit/abydos
[ 154, 26, 154, 63, 1398235847 ]
def __init__(self, config, emitter, name='wikidata'): self.config = config self.process = None self.emitter = emitter self.name = name self.emitter.on('WikidataKnowledgeAdquire', self._adquire)
ElliotTheRobot/LILACS-mycroft-core
[ 4, 1, 4, 19, 1491303887 ]
def adquire(self, subject): logger.info('Call WikidataKnowledgeAdquire') self.emitter.emit(Message('WikidataKnowledgeAdquire', {"subject": subject}))
ElliotTheRobot/LILACS-mycroft-core
[ 4, 1, 4, 19, 1491303887 ]
def stop(self): logger.info('WikidataKnowledge_Stop') if self.process: self.process.terminate() self.process = None
ElliotTheRobot/LILACS-mycroft-core
[ 4, 1, 4, 19, 1491303887 ]
def test_get_subscriptions(self): self.prepare_json_response("GET", "/ws/subscriptions", { "subscriptions": [ "subscriptions/a", "subscriptions/b", ] }) subs = self.wva.get_subscriptions() self.assertEqual(len(subs), 2) self.assertEqual(subs[0].short_name, "a") self.assertEqual(subs[1].short_name, "b")
digidotcom/python-wvalib
[ 5, 3, 5, 3, 1427831267 ]
def test_delete(self): self.prepare_response("DELETE", "/ws/subscriptions/short-name", "") sub = self.wva.get_subscription("short-name") sub.delete() self.assertEqual(self._get_last_request().method, "DELETE") self.assertEqual(self._get_last_request().path, "/ws/subscriptions/short-name")
digidotcom/python-wvalib
[ 5, 3, 5, 3, 1427831267 ]