_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
31
13.1k
language
stringclasses
1 value
meta_information
dict
q21500
get_mapped_permissions_for
train
def get_mapped_permissions_for(brain_or_object): """Get the mapped permissions for the given object A mapped permission is one that is used in the object. Each permission string, e.g. "senaite.core: Field: Edit Analysis Remarks" is translated by the function `AccessControl.Permission.pname` to a valid attribute name: >>> from bika.lims.permissions import FieldEditAnalysisResult >>> AccessControl.Permission import pname >>> pname(FieldEditAnalysisResult)
python
{ "resource": "" }
q21501
get_allowed_permissions_for
train
def get_allowed_permissions_for(brain_or_object, user=None): """Get the allowed permissions for the given object Code extracted from `IRoleManager.manage_getUserRolesAndPermissions` :param brain_or_object: Catalog brain or object :param user: A user ID, user object or None (for the current user) :returns: List of allowed permissions """ allowed = []
python
{ "resource": "" }
q21502
get_disallowed_permissions_for
train
def get_disallowed_permissions_for(brain_or_object, user=None): """Get the disallowed permissions for the given object Code extracted from `IRoleManager.manage_getUserRolesAndPermissions` :brain_or_object: Catalog brain or object :param user: A user ID, user object or None (for the current user) :returns: List of disallowed permissions """ disallowed = []
python
{ "resource": "" }
q21503
check_permission
train
def check_permission(permission, brain_or_object): """Check whether the security context allows the given permission on the given brain or object. N.B.: This includes also acquired permissions :param permission: Permission name :brain_or_object: Catalog brain or
python
{ "resource": "" }
q21504
get_permissions_for_role
train
def get_permissions_for_role(role, brain_or_object): """Return the permissions of the role which are granted on the object Code extracted from `IRoleManager.permissionsOfRole` :param role: The role to check the permission :param brain_or_object: Catalog brain or object :returns: List of permissions of the role """ obj = api.get_object(brain_or_object) # Raise an error if the role is invalid valid_roles = get_valid_roles_for(obj) if role not in valid_roles:
python
{ "resource": "" }
q21505
get_roles_for_permission
train
def get_roles_for_permission(permission, brain_or_object): """Return the roles of the permission that is granted on the object Code extracted from `IRoleManager.rolesOfPermission` :param permission: The permission to get the roles :param brain_or_object: Catalog brain or object :returns: List of roles having the permission """ obj = api.get_object(brain_or_object) valid_roles = get_valid_roles_for(obj) for item in obj.ac_inherited_permissions(1): name, value = item[:2] # found the requested permission if name == permission: # Permission maps a named permission to a set of attribute names
python
{ "resource": "" }
q21506
get_local_roles_for
train
def get_local_roles_for(brain_or_object, user=None): """Get the local defined roles on the context Code extracted from `IRoleManager.get_local_roles_for_userid` :param brain_or_object: Catalog brain
python
{ "resource": "" }
q21507
grant_local_roles_for
train
def grant_local_roles_for(brain_or_object, roles, user=None): """Grant local roles for the object Code extracted from `IRoleManager.manage_addLocalRoles` :param brain_or_object: Catalog brain or object :param user: A user ID, user object or None (for the current user) :param roles: The local roles to grant for the current user """ user_id = get_user_id(user)
python
{ "resource": "" }
q21508
revoke_local_roles_for
train
def revoke_local_roles_for(brain_or_object, roles, user=None): """Revoke local roles for the object Code extracted from `IRoleManager.manage_setLocalRoles` :param brain_or_object: Catalog brain or object :param roles: The local roles to revoke for the current user :param user: A user ID, user object or None (for the current user) """ user_id = get_user_id(user) obj = api.get_object(brain_or_object) valid_roles = get_valid_roles_for(obj) to_grant = list(get_local_roles_for(obj)) if isinstance(roles, basestring): roles = [roles] for role in roles: if role in to_grant: if role not in valid_roles:
python
{ "resource": "" }
q21509
grant_permission_for
train
def grant_permission_for(brain_or_object, permission, roles, acquire=0): """Grant the permission for the object to the defined roles Code extracted from `IRoleManager.manage_permission` :param brain_or_object: Catalog brain or object :param permission: The permission to be granted :param roles: The roles the permission to be granted to :param acquire: Flag to acquire the permission """ obj = api.get_object(brain_or_object) valid_roles = get_valid_roles_for(obj) to_grant = list(get_roles_for_permission(permission, obj))
python
{ "resource": "" }
q21510
manage_permission_for
train
def manage_permission_for(brain_or_object, permission, roles, acquire=0): """Change the settings for the given permission. Code extracted from `IRoleManager.manage_permission` :param brain_or_object: Catalog brain or object :param permission: The permission to be granted :param roles: The roles the permission to be granted to :param acquire: Flag to acquire the permission """ obj = api.get_object(brain_or_object) if isinstance(roles, basestring): roles = [roles] for item in obj.ac_inherited_permissions(1): name, value = item[:2] if name == permission:
python
{ "resource": "" }
q21511
PrintForm.getCSS
train
def getCSS(self): """ Returns the css style to be used for the current template. If the selected template is 'default.pt', this method will return the content from 'default.css'. If no css file found for the current template, returns empty string """ template = self.request.get('template', self._DEFAULT_TEMPLATE) content = '' if template.find(':') >= 0: prefix, template = template.split(':') resource = queryResourceDirectory( self._TEMPLATES_ADDON_DIR, prefix) css = '{0}.css'.format(template[:-3])
python
{ "resource": "" }
q21512
PrintForm.pdfFromPOST
train
def pdfFromPOST(self): """ It returns the pdf for the sampling rounds printed """ html = self.request.form.get('html') style = self.request.form.get('style')
python
{ "resource": "" }
q21513
BikaSetup.getAnalysisServicesVocabulary
train
def getAnalysisServicesVocabulary(self): """ Get all active Analysis Services from Bika Setup and return them as Display List. """ bsc = getToolByName(self, 'bika_setup_catalog') brains = bsc(portal_type='AnalysisService', is_active=True)
python
{ "resource": "" }
q21514
BikaSetup.getPrefixFor
train
def getPrefixFor(self, portal_type): """Return the prefix for a portal_type. If not found, simply uses the portal_type itself """ prefix
python
{ "resource": "" }
q21515
BikaSetup.getRejectionReasonsItems
train
def getRejectionReasonsItems(self): """Return the list of predefined rejection reasons """ reasons = self.getRejectionReasons() if not reasons: return [] reasons = reasons[0]
python
{ "resource": "" }
q21516
AnalysisRequestRejectBase.get_rejection_reasons
train
def get_rejection_reasons(self, keyword=None): """ Returns a list with the rejection reasons as strings :param keyword: set of rejection reasons to be retrieved. Possible values are: - 'selected': Get, amongst the set of predefined reasons, the ones selected - 'other': Get the user free-typed reason for rejection - None: Get all rejection reasons :return: list of rejection reasons as strings or an empty list """ keys = ['selected', 'other'] if keyword is None: return sum(map(self.get_rejection_reasons, keys), [])
python
{ "resource": "" }
q21517
BikaCatalogTool.softClearFindAndRebuild
train
def softClearFindAndRebuild(self): """ Empties catalog, then finds all contentish objects quering over uid_catalog and reindexes them. This may take a long time and will not care about missing objects in uid_catalog. """ logger.info('Soft cleaning and rebuilding %s...' % self.id) try: at = getToolByName(self, 'archetype_tool') types = [k for k, v in at.catalog_map.items() if self.id in v] self.counter = 0 self.manage_catalogClear() # Getting UID catalog portal = getToolByName(self, 'portal_url').getPortalObject() uid_c = getToolByName(portal, 'uid_catalog') brains = uid_c(portal_type=types) self.total = len(brains)
python
{ "resource": "" }
q21518
ARResultsInterpretationView.get_text
train
def get_text(self, department, mode="raw"): """Returns the text saved for the selected department """ row = self.context.getResultsInterpretationByDepartment(department) rt
python
{ "resource": "" }
q21519
Client.getContactUIDForUser
train
def getContactUIDForUser(self): """Get the UID of the user associated with the authenticated user """ membership_tool = api.get_tool("portal_membership")
python
{ "resource": "" }
q21520
Client.getAnalysisCategories
train
def getAnalysisCategories(self): """Return all available analysis categories """ bsc = api.get_tool("bika_setup_catalog") cats = [] for st in bsc(portal_type="AnalysisCategory",
python
{ "resource": "" }
q21521
Client.getContacts
train
def getContacts(self, only_active=True): """Return an array containing the contacts from this Client """ contacts
python
{ "resource": "" }
q21522
Client.getDecimalMark
train
def getDecimalMark(self): """Return the decimal mark to be used on reports for this client If the client has DefaultDecimalMark selected, the Default value from the LIMS Setup will be returned. Otherwise, will return the value of DecimalMark.
python
{ "resource": "" }
q21523
Client.getCountry
train
def getCountry(self, default=None): """Return the Country from the Physical or Postal Address """ physical_address = self.getPhysicalAddress().get("country", default)
python
{ "resource": "" }
q21524
WorksheetImporter.get_rows
train
def get_rows(self, startrow=3, worksheet=None): """Returns a generator for all rows in a sheet. Each row contains a dictionary where the key is the value of the first row of the sheet for each column. The data values are returned in utf-8 format. Starts to consume data from startrow """ headers = [] row_nr = 0 worksheet = worksheet if worksheet else self.worksheet for row in worksheet.rows: # .iter_rows(): row_nr += 1 if row_nr == 1: # headers = [cell.internal_value for cell in row] headers = [cell.value for cell in row] continue if row_nr % 1000 == 0: transaction.savepoint() if row_nr <= startrow: continue # row = [_c(cell.internal_value).decode('utf-8') for cell in row] new_row = [] for cell in row: value = cell.value if value is None:
python
{ "resource": "" }
q21525
WorksheetImporter.to_bool
train
def to_bool(self, value): """ Converts a sheet string value to a boolean value. Needed because of utf-8 conversions """ try: value = value.lower() except: pass try: value = value.encode('utf-8') except: pass
python
{ "resource": "" }
q21526
WorksheetImporter.get_object
train
def get_object(self, catalog, portal_type, title=None, **kwargs): """This will return an object from the catalog. Logs a message and returns None if no object or multiple objects found. All keyword arguments are passed verbatim to the contentFilter """ if not title and not kwargs: return None contentFilter = {"portal_type": portal_type} if title: contentFilter['title'] = to_unicode(title) contentFilter.update(kwargs) brains = catalog(contentFilter) if len(brains) > 1: logger.info("More than one object found for %s" % contentFilter)
python
{ "resource": "" }
q21527
Analysis_Services.get_relations
train
def get_relations(self, service_title, default_obj, obj_type, catalog_name, sheet_name, column): """ Return an array of objects of the specified type in accordance to the object titles defined in the sheet specified in 'sheet_name' and service set in the paramenter 'service_title'. If a default_obj is set, it will be included in the returned array. """ out_objects = [default_obj] if default_obj else [] cat = getToolByName(self.context, catalog_name) worksheet = self.workbook.get_sheet_by_name(sheet_name) if not worksheet: return out_objects for row in self.get_rows(3, worksheet=worksheet):
python
{ "resource": "" }
q21528
fix_workflow_transitions
train
def fix_workflow_transitions(portal): """ Replace target states from some workflow statuses """ logger.info("Fixing workflow transitions...") tochange = [ {'wfid': 'bika_duplicateanalysis_workflow', 'trid': 'submit', 'changes': { 'new_state_id': 'to_be_verified', 'guard_expr': '' }, 'update': { 'catalog': CATALOG_ANALYSIS_LISTING, 'portal_type': 'DuplicateAnalysis', 'status_from': 'attachment_due', 'status_to': 'to_be_verified' } } ] wtool = api.get_tool('portal_workflow') for item in tochange: wfid = item['wfid'] trid = item['trid'] workflow = wtool.getWorkflowById(wfid) transitions = workflow.transitions transition = transitions[trid] changes = item.get('changes', {}) if 'new_state_id' in changes: new_state_id = changes['new_state_id'] oldstate = transition.new_state_id logger.info( "Replacing target state '{0}' from '{1}.{2}' to {3}" .format(oldstate, wfid, trid, new_state_id) ) transition.new_state_id = new_state_id if 'guard_expr' in changes: new_guard = changes['guard_expr'] if not new_guard: transition.guard = None logger.info( "Removing guard expression from '{0}.{1}'" .format(wfid, trid)) else: guard = transition.getGuard() guard.expr = Expression(new_guard) transition.guard = guard logger.info(
python
{ "resource": "" }
q21529
GetSampleStickers.get_default_sticker_id
train
def get_default_sticker_id(self): """ Gets the default sticker for that content type depending on the requested size. :return: An sticker ID as string """ size = self.request.get('size', '') if size
python
{ "resource": "" }
q21530
IdentifiersIndexer
train
def IdentifiersIndexer(instance): """Return a list of unique Identifier strings This populates the Identifiers Keyword index, but with some replacements to prevent the word-splitter etc from taking effect.
python
{ "resource": "" }
q21531
IHaveIdentifiersSchemaExtender.getOrder
train
def getOrder(self, schematas): """Return modified order of field schemats. """ schemata = self.context.schema['description'].schemata fields = schematas[schemata]
python
{ "resource": "" }
q21532
AbstractAnalysis.getVerificators
train
def getVerificators(self): """Returns the user ids of the users that verified this analysis """ verifiers = list() actions = ["verify", "multi_verify"] for event in wf.getReviewHistory(self):
python
{ "resource": "" }
q21533
AbstractAnalysis.getDefaultUncertainty
train
def getDefaultUncertainty(self, result=None): """Return the uncertainty value, if the result falls within specified ranges for the service from which this analysis was derived. """ if result is None: result = self.getResult() uncertainties = self.getUncertainties() if uncertainties: try: res = float(result) except (TypeError, ValueError): # if analysis result is not a number, then we assume in range return None for d in uncertainties: _min = float(d['intercept_min']) _max = float(d['intercept_max']) if _min <= res and res <= _max: if str(d['errorvalue']).strip().endswith('%'):
python
{ "resource": "" }
q21534
AbstractAnalysis.setUncertainty
train
def setUncertainty(self, unc): """Sets the uncertainty for this analysis. If the result is a Detection Limit or the value is below LDL or upper UDL, sets the uncertainty value to 0 """ # Uncertainty calculation on DL # https://jira.bikalabs.com/browse/LIMS-1808 if self.isAboveUpperDetectionLimit() or \
python
{ "resource": "" }
q21535
AbstractAnalysis.isBelowLowerDetectionLimit
train
def isBelowLowerDetectionLimit(self): """Returns True if the result is below the Lower Detection Limit or if Lower Detection Limit has been manually set """ if self.isLowerDetectionLimit(): return True result = self.getResult() if result
python
{ "resource": "" }
q21536
AbstractAnalysis.isAboveUpperDetectionLimit
train
def isAboveUpperDetectionLimit(self): """Returns True if the result is above the Upper Detection Limit or if Upper Detection Limit has been manually set """ if self.isUpperDetectionLimit(): return True result = self.getResult() if result
python
{ "resource": "" }
q21537
AbstractAnalysis.getExponentialFormatPrecision
train
def getExponentialFormatPrecision(self, result=None): """ Returns the precision for the Analysis Service and result provided. Results with a precision value above this exponential format precision should be formatted as scientific notation. If the Calculate Precision according to Uncertainty is not set, the method will return the exponential precision value set in the Schema. Otherwise, will calculate the precision value according to the Uncertainty and the result. If Calculate Precision from the Uncertainty is set but no result provided neither uncertainty values are set, returns the fixed exponential precision. Will return positive values if the result is below 0 and will return 0 or positive values if the result is above 0. Given an analysis service with fixed exponential format precision
python
{ "resource": "" }
q21538
AbstractAnalysis.getPrecision
train
def getPrecision(self, result=None): """Returns the precision for the Analysis. - If ManualUncertainty is set, calculates the precision of the result in accordance with the manual uncertainty set. - If Calculate Precision from Uncertainty is set in Analysis Service, calculates the precision in accordance with the uncertainty infered from uncertainties ranges. - If neither Manual Uncertainty nor Calculate Precision from Uncertainty are set, returns the precision from the Analysis Service - If you have a number with zero uncertainty: If you roll a pair of dice and observe five spots, the number of spots is 5. This is a raw
python
{ "resource": "" }
q21539
AbstractAnalysis.getAnalyst
train
def getAnalyst(self): """Returns the stored Analyst or the user who submitted the result """ analyst = self.getField("Analyst").get(self)
python
{ "resource": "" }
q21540
AbstractAnalysis.getWorksheet
train
def getWorksheet(self): """Returns the Worksheet to which this analysis belongs to, or None """ worksheet = self.getBackReferences('WorksheetAnalysis') if not worksheet: return None if len(worksheet) >
python
{ "resource": "" }
q21541
AbstractAnalysis.getAttachmentUIDs
train
def getAttachmentUIDs(self): """Used to populate metadata, so that we don't need full objects of analyses when working with their attachments. """
python
{ "resource": "" }
q21542
AbstractAnalysis.remove_duplicates
train
def remove_duplicates(self, ws): """When this analysis is unassigned from a worksheet, this function is responsible for deleting DuplicateAnalysis objects from the ws. """ for analysis in ws.objectValues():
python
{ "resource": "" }
q21543
AbstractAnalysis.getInterimValue
train
def getInterimValue(self, keyword): """Returns the value of an interim of this analysis """ interims = filter(lambda item: item["keyword"] == keyword, self.getInterimFields()) if not interims: logger.warning("Interim '{}' for analysis '{}' not found" .format(keyword, self.getKeyword())) return None if len(interims) > 1:
python
{ "resource": "" }
q21544
checkUserAccess
train
def checkUserAccess(worksheet, request, redirect=True): """ Checks if the current user has granted access to the worksheet. If the user is an analyst without LabManager, LabClerk and RegulatoryInspector roles and the option 'Allow analysts only to access to the Worksheets on which they are assigned' is ticked and the above condition is true, it will redirect to the main Worksheets view. Returns False if the user has no access, otherwise returns True """ # Deny access to foreign analysts allowed = worksheet.checkUserAccess() if allowed == False and redirect == True: msg = _('You do not have sufficient privileges to view '
python
{ "resource": "" }
q21545
showRejectionMessage
train
def showRejectionMessage(worksheet): """ Adds a portalMessage if a) the worksheet has been rejected and replaced by another or b) if the worksheet is the replacement of a rejected worksheet. Otherwise, does nothing. """ if hasattr(worksheet, 'replaced_by'): uc = getToolByName(worksheet, 'uid_catalog') uid = getattr(worksheet, 'replaced_by') _ws = uc(UID=uid)[0].getObject()
python
{ "resource": "" }
q21546
get_date
train
def get_date(context, value): """Tries to return a DateTime.DateTime object """ if not value: return None if isinstance(value, DateTime): return value if isinstance(value, datetime): return dt2DT(value) if not isinstance(value, basestring): return None def try_parse(date_string, format): if not format: return None try: struct_time = strptime(date_string, format) return datetime(*struct_time[:6]) except ValueError: pass return None def get_locale_format(key, context): format = context.translate(key, domain="senaite.core", mapping={}) # TODO: Is this replacement below strictly necessary? return format.replace(r"${", '%').replace('}', '') # Try with prioritized formats formats = [get_locale_format("date_format_long", context), get_locale_format("date_format_short", context),
python
{ "resource": "" }
q21547
ulocalized_time
train
def ulocalized_time(time, long_format=None, time_only=None, context=None, request=None): """ This function gets ans string as time or a DateTime objects and returns a string with the time formatted :param time: The time to process :type time: str/DateTime :param long_format: If True, return time in ling format :type portal_type: boolean/null :param time_only: If True, only returns time. :type title: boolean/null :param context: The current context :type context: ATContentType :param request: The current request :type request: HTTPRequest object :returns: The formatted date as string :rtype: string """ # if time is a string, we'll try pass it through strptime with the various # formats defined. time = get_date(context, time) if not time
python
{ "resource": "" }
q21548
BrowserView.python_date_format
train
def python_date_format(self, long_format=None, time_only=False): """This convert bika domain date format msgstrs to Python strftime format strings, by the same rules as ulocalized_time. XXX i18nl10n.py may change, and that is where this code is taken from. """ # get msgid msgid = long_format and 'date_format_long' or 'date_format_short' if time_only: msgid = 'time_format' # get the formatstring formatstring = translate(msgid, domain="senaite.core", context=self.request)
python
{ "resource": "" }
q21549
AbstractBaseAnalysis.getVATAmount
train
def getVATAmount(self): """Compute VAT Amount from the Price and system configured VAT """
python
{ "resource": "" }
q21550
AbstractBaseAnalysis.getDiscountedPrice
train
def getDiscountedPrice(self): """Compute discounted price excl. VAT """ price = self.getPrice() price = price and price or 0 discount = self.bika_setup.getMemberDiscount()
python
{ "resource": "" }
q21551
AbstractBaseAnalysis.getDiscountedBulkPrice
train
def getDiscountedBulkPrice(self): """Compute discounted bulk discount excl. VAT """ price = self.getBulkPrice()
python
{ "resource": "" }
q21552
AbstractBaseAnalysis.getTotalPrice
train
def getTotalPrice(self): """Compute total price including VAT """ price = self.getPrice()
python
{ "resource": "" }
q21553
AbstractBaseAnalysis.getTotalBulkPrice
train
def getTotalBulkPrice(self): """Compute total bulk price """ price = self.getBulkPrice()
python
{ "resource": "" }
q21554
AbstractBaseAnalysis.getTotalDiscountedPrice
train
def getTotalDiscountedPrice(self): """Compute total discounted price """ price = self.getDiscountedPrice()
python
{ "resource": "" }
q21555
AbstractBaseAnalysis.getTotalDiscountedBulkPrice
train
def getTotalDiscountedBulkPrice(self): """Compute total discounted corporate bulk price """ price = self.getDiscountedCorporatePrice() vat = self.getVAT()
python
{ "resource": "" }
q21556
AbstractBaseAnalysis.getLowerDetectionLimit
train
def getLowerDetectionLimit(self): """Returns the Lower Detection Limit for this service as a floatable """ ldl
python
{ "resource": "" }
q21557
AbstractBaseAnalysis.getUpperDetectionLimit
train
def getUpperDetectionLimit(self): """Returns the Upper Detection Limit for this service as a floatable """ udl
python
{ "resource": "" }
q21558
WorkflowActionSubmitAdapter.get_interims_data
train
def get_interims_data(self): """Returns a dictionary with the interims data """ form = self.request.form if 'item_data' not in form: return {} item_data = {} if type(form['item_data']) == list:
python
{ "resource": "" }
q21559
get_calculation_dependants_for
train
def get_calculation_dependants_for(service): """Collect all services which depend on this service :param service: Analysis Service Object/ZCatalog Brain :returns: List of services that depend on this service """ def calc_dependants_gen(service, collector=None): """Generator for recursive resolution of dependant sevices. """ # The UID of the service service_uid = api.get_uid(service) # maintain an internal dependency mapping if collector is None: collector = {} # Stop iteration if we processed this service already if service_uid in collector: raise StopIteration # Get the dependant calculations of the service # (calculations that use the service in their formula). calc_uids = get_backreferences( service, relationship="CalculationDependentServices") for calc_uid in calc_uids: # Get the calculation object calc = api.get_object_by_uid(calc_uid) # Get the Analysis Services which have this calculation assigned dep_service_uids = get_backreferences( calc, relationship='AnalysisServiceCalculation') for dep_service_uid in dep_service_uids:
python
{ "resource": "" }
q21560
get_service_dependencies_for
train
def get_service_dependencies_for(service): """Calculate the dependencies for the given service. """ dependants = get_calculation_dependants_for(service) dependencies = get_calculation_dependencies_for(service)
python
{ "resource": "" }
q21561
InstrumentQCFailuresViewlet.get_failed_instruments
train
def get_failed_instruments(self): """Find invalid instruments - instruments who have failed QC tests - instruments whose certificate is out of date - instruments which are disposed until next calibration test Return a dictionary with all info about expired/invalid instruments """ bsc = api.get_tool("bika_setup_catalog") insts = bsc(portal_type="Instrument", is_active=True) for i in insts: i = i.getObject() instr = { 'uid': i.UID(), 'title': i.Title(), } if i.isValidationInProgress(): instr['link'] = '<a href="%s/validations">%s</a>' % ( i.absolute_url(), i.Title() ) self.nr_failed += 1 self.failed['validation'].append(instr) elif i.isCalibrationInProgress(): instr['link'] = '<a href="%s/calibrations">%s</a>' % ( i.absolute_url(), i.Title() ) self.nr_failed += 1 self.failed['calibration'].append(instr)
python
{ "resource": "" }
q21562
InstrumentQCFailuresViewlet.available
train
def available(self): """Control availability of the viewlet """ url = api.get_url(self.context) # render on the portal root if self.context == api.get_portal(): return True # render on the front-page
python
{ "resource": "" }
q21563
InstrumentQCFailuresViewlet.render
train
def render(self): """Render the viewlet """ if not self.available(): return "" mtool = api.get_tool("portal_membership") member = mtool.getAuthenticatedMember() roles = member.getRoles() allowed = "LabManager" in roles or "Manager" in roles
python
{ "resource": "" }
q21564
AuditLogView.get_widget_for
train
def get_widget_for(self, fieldname): """Lookup the widget """
python
{ "resource": "" }
q21565
AuditLogView.get_widget_label_for
train
def get_widget_label_for(self, fieldname, default=None): """Lookup the widget of the field and return the label """
python
{ "resource": "" }
q21566
AuditLogView.translate_state
train
def translate_state(self, s): """Translate the given state string """ if not isinstance(s, basestring):
python
{ "resource": "" }
q21567
AuditLogView.folderitems
train
def folderitems(self): """Generate folderitems for each version """ items = [] # get the snapshots snapshots = get_snapshots(self.context) # reverse the order to get the most recent change first snapshots = list(reversed(snapshots)) # set the total number of items self.total = len(snapshots) # slice a batch batch = snapshots[self.limit_from:self.limit_from+self.pagesize] for snapshot in batch: item = self.make_empty_item(**snapshot) # get the version of the snapshot version = get_snapshot_version(self.context, snapshot) # Version item["version"] = version # get the metadata of the diff metadata = get_snapshot_metadata(snapshot) # Modification Date m_date = metadata.get("modified") item["modified"] = self.to_localized_time(m_date) # Actor actor = metadata.get("actor") item["actor"] = actor
python
{ "resource": "" }
q21568
InstrumentResultsFileParser._addRawResult
train
def _addRawResult(self, resid, values={}, override=False): """ Adds a set of raw results for an object with id=resid resid is usually an Analysis Request ID or Worksheet's Reference Analysis ID. The values are a dictionary in which the keys are analysis service keywords and the values, another dictionary with the key,value results. The column 'DefaultResult' must be provided, because is used to map to the column from which the default result must be retrieved. Example: resid = 'DU13162-001-R1' values = { 'D2': {'DefaultResult': 'Final Conc',
python
{ "resource": "" }
q21569
InstrumentResultsFileParser.getResultsTotalCount
train
def getResultsTotalCount(self): """ The total number of analysis results parsed
python
{ "resource": "" }
q21570
InstrumentResultsFileParser.getAnalysisKeywords
train
def getAnalysisKeywords(self): """ The analysis service keywords found """ analyses = [] for rows in self.getRawResults().values():
python
{ "resource": "" }
q21571
InstrumentTXTResultsFileParser.read_file
train
def read_file(self, infile): """Given an input file read its contents, strip whitespace from the beginning and end of each line and return a list of the preprocessed lines read. :param infile: file that contains the data to be read :return: list of the read lines with stripped whitespace """ try: encoding = self._encoding if self._encoding else None mode = 'r' if self._encoding else 'rU'
python
{ "resource": "" }
q21572
AnalysisResultsImporter.attach_attachment
train
def attach_attachment(self, analysis, attachment): """ Attach a file or a given set of files to an analysis :param analysis: analysis where the files are to be attached :param attachment: files to be attached. This can be either a single file or a list of files :return: None """ if not attachment: return if isinstance(attachment, list): for attach in attachment: self.attach_attachment(analysis, attach) return # current attachments an_atts = analysis.getAttachment() atts_filenames = [att.getAttachmentFile().filename for att in an_atts]
python
{ "resource": "" }
q21573
format_numeric_result
train
def format_numeric_result(analysis, result, decimalmark='.', sciformat=1): """ Returns the formatted number part of a results value. This is responsible for deciding the precision, and notation of numeric values in accordance to the uncertainty. If a non-numeric result value is given, the value will be returned unchanged. The following rules apply: If the "Calculate precision from uncertainties" is enabled in the Analysis service, and a) If the non-decimal number of digits of the result is above the service's ExponentialFormatPrecision, the result will be formatted in scientific notation. Example: Given an Analysis with an uncertainty of 37 for a range of results between 30000 and 40000, with an ExponentialFormatPrecision equal to 4 and a result of 32092, this method will return 3.2092E+04 b) If the number of digits of the integer part of the result is below the ExponentialFormatPrecision, the result will be formatted as decimal notation and the resulta will be rounded in accordance to the precision (calculated from the uncertainty) Example: Given an Analysis with an uncertainty of 0.22 for a range of results between 1 and 10 with an ExponentialFormatPrecision equal to 4 and a result of 5.234, this method will return 5.2 If the "Calculate precision from Uncertainties" is disabled in the analysis service, the same rules described above applies, but the precision used for rounding the result is not calculated from the uncertainty. The fixed length precision is used instead. For further details, visit https://jira.bikalabs.com/browse/LIMS-1334 The default decimal mark '.' will be replaced by the decimalmark specified. :param analysis: the analysis from which the uncertainty, precision
python
{ "resource": "" }
q21574
ReferenceResultsWidget._get_spec_value
train
def _get_spec_value(self, form, uid, key, default=''): """Returns the value assigned to the passed in key for the analysis service uid from the passed in form. If check_floatable is true, will return the passed in default if the obtained value is not floatable :param form: form being submitted :param uid: uid of the Analysis Service the specification relates :param key: id of the specs param to get (e.g. 'min') :param check_floatable: check if the value is floatable :param default: fallback value that will be returned by default :type default: str, None
python
{ "resource": "" }
q21575
ReferenceResultsWidget.ReferenceResults
train
def ReferenceResults(self, field, allow_edit=False): """Render Reference Results Table """ instance = getattr(self, "instance", field.aq_parent) table = api.get_view("table_reference_results", context=instance,
python
{ "resource": "" }
q21576
RemarksField.set
train
def set(self, instance, value, **kwargs): """Adds the value to the existing text stored in the field, along with a small divider showing username and date of this entry. """ if not value: return value = value.strip() date = DateTime().rfc822() user = getSecurityManager().getUser() username = user.getUserName() divider = "=== {} ({})".format(date, username) existing_remarks = instance.getRawRemarks()
python
{ "resource": "" }
q21577
setup_handler
train
def setup_handler(context): """SENAITE setup handler """ if context.readDataFile("bika.lims_various.txt") is None: return logger.info("SENAITE setup handler [BEGIN]") portal = context.getSite() # Run Installers remove_default_content(portal) hide_navbar_items(portal) reindex_content_structure(portal) setup_groups(portal) setup_catalog_mappings(portal) setup_core_catalogs(portal) # Setting up all LIMS catalogs
python
{ "resource": "" }
q21578
remove_default_content
train
def remove_default_content(portal): """Remove default Plone contents """ logger.info("*** Delete Default Content ***") # Get the list of object ids for portal object_ids
python
{ "resource": "" }
q21579
hide_navbar_items
train
def hide_navbar_items(portal): """Hide root items in navigation """ logger.info("*** Hide Navigation Items ***") # Get the list of object ids for portal object_ids = portal.objectIds() object_ids = filter(lambda id: id in object_ids,
python
{ "resource": "" }
q21580
reindex_content_structure
train
def reindex_content_structure(portal): """Reindex contents generated by Generic Setup """ logger.info("*** Reindex content structure ***") def reindex(obj, recurse=False): # skip catalog tools etc. if api.is_object(obj): obj.reindexObject() if recurse and hasattr(aq_base(obj), "objectValues"):
python
{ "resource": "" }
q21581
setup_groups
train
def setup_groups(portal): """Setup roles and groups for BECHEM """ logger.info("*** Setup Roles and Groups ***") portal_groups = api.get_tool("portal_groups") for gdata in GROUPS: group_id = gdata["id"] # create the group and grant the roles if group_id not in portal_groups.listGroupIds(): logger.info("+++ Adding group {title} ({id})".format(**gdata)) portal_groups.addGroup(group_id, title=gdata["title"],
python
{ "resource": "" }
q21582
setup_catalog_mappings
train
def setup_catalog_mappings(portal): """Setup portal_type -> catalog mappings """ logger.info("*** Setup Catalog
python
{ "resource": "" }
q21583
setup_core_catalogs
train
def setup_core_catalogs(portal): """Setup core catalogs """ logger.info("*** Setup Core Catalogs ***") to_reindex = [] for catalog, name, attribute, meta_type in INDEXES: c = api.get_tool(catalog) indexes = c.indexes() if name in indexes: logger.info("*** Index '%s' already in Catalog [SKIP]" % name) continue logger.info("*** Adding Index '%s' for field '%s' to catalog ..." % (meta_type, name)) # do we still need ZCTextIndexes? if meta_type == "ZCTextIndex": addZCTextIndex(c, name) else: c.addIndex(name, meta_type) # get the new created index index = c._catalog.getIndex(name) # set the indexed attributes if hasattr(index, "indexed_attrs"): index.indexed_attrs = [attribute or name] to_reindex.append((c, name)) logger.info("*** Added Index '%s' for field '%s' to catalog [DONE]" % (meta_type, name)) # catalog columns for catalog, name in COLUMNS: c = api.get_tool(catalog) if name not in c.schema():
python
{ "resource": "" }
q21584
setup_auditlog_catalog
train
def setup_auditlog_catalog(portal): """Setup auditlog catalog """ logger.info("*** Setup Audit Log Catalog ***") catalog_id = auditlog_catalog.CATALOG_AUDITLOG catalog = api.get_tool(catalog_id) for name, meta_type in auditlog_catalog._indexes.iteritems(): indexes = catalog.indexes() if name in indexes: logger.info("*** Index '%s' already in Catalog [SKIP]" % name) continue logger.info("*** Adding Index '%s' for field '%s' to catalog ..." % (meta_type, name)) catalog.addIndex(name, meta_type) # Setup TextIndexNG3 for listings # XXX is there another way to do this? if meta_type == "TextIndexNG3": index = catalog._catalog.getIndex(name) index.index.default_encoding = "utf-8" index.index.query_parser = "txng.parsers.en" index.index.autoexpand = "always" index.index.autoexpand_limit = 3 logger.info("*** Added Index '%s' for field '%s' to catalog [DONE]" % (meta_type, name))
python
{ "resource": "" }
q21585
_createWorksheet
train
def _createWorksheet(base, worksheettemplate, analyst): """ This function creates a new worksheet takeing advantatge of the analyst variable. If there isn't an analyst definet, the system will puck up the the first one obtained in a query. """ if not(analyst): # Get any analyst analyst = getUsers(base, ['Manager', 'LabManager', 'Analyst'])[1] folder = base.bika_setup.worksheets _id = folder.invokeFactory('Worksheet', id=tmpID())
python
{ "resource": "" }
q21586
doWorksheetLogic
train
def doWorksheetLogic(base, action, analysis): """ This function checks if the actions contains worksheet actions. There is a selection list in each action section. This select has the following options and consequence. 1) "To the current worksheet" (selected by default) 2) "To another worksheet" 3) "Create another worksheet" 4) "No worksheet" - If option 1) is selected, the Analyst selection list will not be displayed. Since the action doesn't require to add the new analysis to another worksheet, the function will try to add the analysis to the same worksheet as the base analysis. If the base analysis is not assigned in a worksheet, no worksheet will be assigned to the new analysis. - If option 2) is selected, the Analyst selection list will be displayed. - If option 2) is selected and an analyst has also been selected, then the system will search for the latest worksheet in status "open" for the selected analyst and will add the analysis in that worksheet (the system also searches for the worksheet template if defined). If the system doesn't find any match, another worksheet assigned to the selected analyst and with the analysis must be automatically created. - If option 2) is selected but no analyst selected, then the system will search for the latest worksheet in the status "open" regardless of the analyst assigned and will add the analysis in that worksheet. If there isn't any open worksheet available, then go to option 3) - If option 3) is selected, a new worksheet with the defined analyst will be created. If no analyst is defined for the original analysis, the system will create a new worksheet and assign the same analyst as the original analysis to which the rule applies. If the original analysis doesn't have assigned any analyst, the system will assign the same analyst that was assigned to the latest worksheet available in the system. If there isn't any worksheet created yet, use the first active user with role "analyst" available. - if option 4) the Analyst selection list will not be displayed. The analysis (duplicate, repeat, whatever) will be created, but not assigned to any worksheet, so it will stay "on queue", assigned to the same Analysis Request as the original analysis for which the rule has been triggered. """ otherWS = action.get('otherWS', False) worksheet_catalog
python
{ "resource": "" }
q21587
ARImport.workflow_before_validate
train
def workflow_before_validate(self): """This function transposes values from the provided file into the ARImport object's fields, and checks for invalid values. If errors are found: - Validation transition is aborted. - Errors are stored on object and displayed to user. """ # Re-set the errors on this ARImport each time validation is attempted. # When errors are detected they are immediately appended to this field. self.setErrors([]) self.validate_headers() self.validate_samples() if self.getErrors(): addStatusMessage(self.REQUEST, _p('Validation
python
{ "resource": "" }
q21588
ARImport.workflow_script_import
train
def workflow_script_import(self): """Create objects from valid ARImport """ bsc = getToolByName(self, 'bika_setup_catalog') client = self.aq_parent title = _('Submitting Sample Import') description = _('Creating and initialising objects') bar = ProgressBar(self, self.REQUEST, title, description) notify(InitialiseProgressBar(bar)) profiles = [x.getObject() for x in bsc(portal_type='AnalysisProfile')] gridrows = self.schema['SampleData'].get(self) row_cnt = 0 for therow in gridrows: row = deepcopy(therow) row_cnt += 1 # Profiles are titles, profile keys, or UIDS: convert them to UIDs. newprofiles = [] for title in row['Profiles']: objects = [x for x in profiles if title in (x.getProfileKey(), x.UID(), x.Title())] for obj in objects:
python
{ "resource": "" }
q21589
ARImport.get_header_values
train
def get_header_values(self): """Scrape the "Header" values from the original input file """ lines = self.getOriginalFile().data.splitlines() reader = csv.reader(lines) header_fields = header_data = [] for row in reader: if not any(row): continue if row[0].strip().lower() == 'header': header_fields = [x.strip() for x in row][1:] continue if row[0].strip().lower() == 'header data': header_data = [x.strip() for x in row][1:]
python
{ "resource": "" }
q21590
ARImport.save_header_data
train
def save_header_data(self): """Save values from the file's header row into their schema fields. """ client = self.aq_parent headers = self.get_header_values() if not headers: return False # Plain header fields that can be set into plain schema fields: for h, f in [ ('File name', 'Filename'), ('No of Samples', 'NrSamples'), ('Client name', 'ClientName'), ('Client ID', 'ClientID'), ('Client Order Number', 'ClientOrderNumber'), ('Client Reference', 'ClientReference') ]: v = headers.get(h, None) if v: field = self.schema[f] field.set(self, v) del (headers[h]) # Primary Contact v = headers.get('Contact', None) contacts = [x for x in client.objectValues('Contact')] contact = [c for c in contacts if c.Title() == v] if contact: self.schema['Contact'].set(self, contact) else: self.error("Specified contact '%s' does not exist; using '%s'"% (v, contacts[0].Title())) self.schema['Contact'].set(self, contacts[0]) del (headers['Contact']) # CCContacts
python
{ "resource": "" }
q21591
ARImport.get_sample_values
train
def get_sample_values(self): """Read the rows specifying Samples and return a dictionary with related data. keys are: headers - row with "Samples" in column 0. These headers are used as dictionary keys in the rows below. prices - Row with "Analysis Price" in column 0. total_analyses - Row with "Total analyses" in colmn 0 price_totals - Row with "Total price excl Tax" in column 0 samples - All other sample rows. """ res = {'samples': []} lines = self.getOriginalFile().data.splitlines() reader = csv.reader(lines) next_rows_are_sample_rows = False for row in reader: if not any(row): continue if next_rows_are_sample_rows: vals = [x.strip() for x in row] if not any(vals): continue res['samples'].append(zip(res['headers'], vals))
python
{ "resource": "" }
q21592
ARImport.get_batch_header_values
train
def get_batch_header_values(self): """Scrape the "Batch Header" values from the original input file """ lines = self.getOriginalFile().data.splitlines() reader = csv.reader(lines) batch_headers = batch_data = [] for row in reader: if not any(row): continue if row[0].strip().lower() == 'batch header': batch_headers = [x.strip() for x in row][1:] continue if row[0].strip().lower() == 'batch data':
python
{ "resource": "" }
q21593
ARImport.create_or_reference_batch
train
def create_or_reference_batch(self): """Save reference to batch, if existing batch specified Create new batch, if possible with specified values """ client = self.aq_parent batch_headers = self.get_batch_header_values() if not batch_headers: return False # if the Batch's Title is specified and exists, no further # action is required. We will just set the Batch field to # use the existing object. batch_title = batch_headers.get('title', False)
python
{ "resource": "" }
q21594
ARImport.validate_headers
train
def validate_headers(self): """Validate headers fields from schema """ pc = getToolByName(self, 'portal_catalog') pu = getToolByName(self, "plone_utils") client = self.aq_parent # Verify Client Name if self.getClientName() != client.Title(): self.error("%s: value is invalid (%s)." % ( 'Client name', self.getClientName())) # Verify Client ID if self.getClientID() != client.getClientID(): self.error("%s: value is invalid (%s)." % ( 'Client ID', self.getClientID())) existing_arimports = pc(portal_type='ARImport', review_state=['valid', 'imported']) # Verify Client Order Number for arimport in existing_arimports: if arimport.UID == self.UID() \ or not arimport.getClientOrderNumber(): continue arimport = arimport.getObject() if arimport.getClientOrderNumber() == self.getClientOrderNumber(): self.error('%s: already used by existing ARImport.' % 'ClientOrderNumber') break # Verify Client Reference for arimport in existing_arimports: if arimport.UID == self.UID() \ or not arimport.getClientReference(): continue arimport = arimport.getObject() if arimport.getClientReference() == self.getClientReference(): self.error('%s: already used by existing ARImport.' % 'ClientReference') break # getCCContacts has no value if object is not complete (eg during test) if self.getCCContacts(): cc_contacts = self.getCCContacts()[0] contacts = [x for x in
python
{ "resource": "" }
q21595
ARImport.validate_samples
train
def validate_samples(self): """Scan through the SampleData values and make sure that each one is correct """ bsc = getToolByName(self, 'bika_setup_catalog') keywords = bsc.uniqueValuesFor('getKeyword') profiles = [] for p in bsc(portal_type='AnalysisProfile'): p = p.getObject() profiles.append(p.Title()) profiles.append(p.getProfileKey()) row_nr = 0 for gridrow in self.getSampleData(): row_nr += 1 # validate against sample and ar schemas for k, v in gridrow.items(): if k in ['Analysis', 'Profiles']: break if k in sample_schema: try: self.validate_against_schema( sample_schema, row_nr, k, v) continue except ValueError as e: self.error(e.message) break if k in ar_schema: try: self.validate_against_schema( ar_schema, row_nr, k, v) except ValueError as e: self.error(e.message)
python
{ "resource": "" }
q21596
ARImport.get_row_services
train
def get_row_services(self, row): """Return a list of services which are referenced in Analyses. values may be UID, Title or Keyword. """ bsc = getToolByName(self, 'bika_setup_catalog') services = set() for val in row.get('Analyses', []): brains = bsc(portal_type='AnalysisService', getKeyword=val) if not brains: brains = bsc(portal_type='AnalysisService', title=val) if not brains:
python
{ "resource": "" }
q21597
ARImport.get_row_profile_services
train
def get_row_profile_services(self, row): """Return a list of services which are referenced in profiles values may be UID, Title or ProfileKey. """ bsc = getToolByName(self, 'bika_setup_catalog') services = set() profiles = [x.getObject() for x in bsc(portal_type='AnalysisProfile')] for val in row.get('Profiles', []): objects = [x for x in profiles if val in (x.getProfileKey(), x.UID(), x.Title())]
python
{ "resource": "" }
q21598
ARImport.get_row_container
train
def get_row_container(self, row): """Return a sample container """ bsc = getToolByName(self, 'bika_setup_catalog') val = row.get('Container', False) if val: brains = bsc(portal_type='Container', UID=row['Container']) if brains: brains[0].getObject()
python
{ "resource": "" }
q21599
t
train
def t(i18n_msg): """Safely translate and convert to UTF8, any zope i18n msgid returned from a bikaMessageFactory _ """ text = to_unicode(i18n_msg) try: request = api.get_request()
python
{ "resource": "" }