_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q275600 | Checklist.update_checklist | test | def update_checklist(self, name):
'''
Update the current checklist. Returns a new Checklist object.
'''
checklist_json = self.fetch_json(
uri_path=self.base_uri,
http_method='PUT',
query_params={'name': name}
)
return self.create_checklist(checklist_json) | python | {
"resource": ""
} |
q275601 | Checklist.add_item | test | def add_item(self, query_params=None):
'''
Add an item to this checklist. Returns a dictionary of values of new
item.
'''
return self.fetch_json(
uri_path=self.base_uri + '/checkItems',
http_method='POST',
query_params=query_params or {}
) | python | {
"resource": ""
} |
q275602 | Checklist.remove_item | test | def remove_item(self, item_id):
'''
Deletes an item from this checklist.
'''
return self.fetch_json(
uri_path=self.base_uri + '/checkItems/' + item_id,
http_method='DELETE'
) | python | {
"resource": ""
} |
q275603 | ChecklistItem.update_name | test | def update_name( self, name ):
"""
Rename the current checklist item. Returns a new ChecklistItem object.
"""
checklistitem_json = self.fetch_json(
uri_path = self.base_uri + '/name',
http_method = 'PUT',
query_params = {'value': name}
)
return self.create_checklist_item(self.idCard, self.idChecklist, checklistitem_json) | python | {
"resource": ""
} |
q275604 | ChecklistItem.update_state | test | def update_state(self, state):
"""
Set the state of the current checklist item. Returns a new ChecklistItem object.
"""
checklistitem_json = self.fetch_json(
uri_path = self.base_uri + '/state',
http_method = 'PUT',
query_params = {'value': 'complete' if state else 'incomplete'}
)
return self.create_checklist_item(self.idCard, self.idChecklist, checklistitem_json) | python | {
"resource": ""
} |
q275605 | Client.add_authorisation | test | def add_authorisation(self, query_params):
'''
Adds the API key and user auth token to the query parameters
'''
query_params['key'] = self.api_key
if self.user_auth_token:
query_params['token'] = self.user_auth_token
return query_params | python | {
"resource": ""
} |
q275606 | Client.check_errors | test | def check_errors(self, uri, response):
'''
Check HTTP reponse for known errors
'''
if response.status == 401:
raise trolly.Unauthorised(uri, response)
if response.status != 200:
raise trolly.ResourceUnavailable(uri, response) | python | {
"resource": ""
} |
q275607 | Client.build_uri | test | def build_uri(self, path, query_params):
'''
Build the URI for the API call.
'''
url = 'https://api.trello.com/1' + self.clean_path(path)
url += '?' + urlencode(query_params)
return url | python | {
"resource": ""
} |
q275608 | Client.fetch_json | test | def fetch_json(self, uri_path, http_method='GET', query_params=None,
body=None, headers=None):
'''
Make a call to Trello API and capture JSON response. Raises an error
when it fails.
Returns:
dict: Dictionary with the JSON data
'''
query_params = query_params or {}
headers = headers or {}
query_params = self.add_authorisation(query_params)
uri = self.build_uri(uri_path, query_params)
allowed_methods = ("POST", "PUT", "DELETE")
if http_method in allowed_methods and 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
headers['Accept'] = 'application/json'
response, content = self.client.request(
uri=uri,
method=http_method,
body=body,
headers=headers
)
self.check_errors(uri, response)
return json.loads(content.decode('utf-8')) | python | {
"resource": ""
} |
q275609 | Client.create_organisation | test | def create_organisation(self, organisation_json):
'''
Create an Organisation object from a JSON object
Returns:
Organisation: The organisation from the given `organisation_json`.
'''
return trolly.organisation.Organisation(
trello_client=self,
organisation_id=organisation_json['id'],
name=organisation_json['name'],
data=organisation_json,
) | python | {
"resource": ""
} |
q275610 | Client.create_board | test | def create_board(self, board_json):
'''
Create Board object from a JSON object
Returns:
Board: The board from the given `board_json`.
'''
return trolly.board.Board(
trello_client=self,
board_id=board_json['id'],
name=board_json['name'],
data=board_json,
) | python | {
"resource": ""
} |
q275611 | Client.create_label | test | def create_label(self, label_json):
'''
Create Label object from JSON object
Returns:
Label: The label from the given `label_json`.
'''
return trolly.label.Label(
trello_client=self,
label_id=label_json['id'],
name=label_json['name'],
data=label_json,
) | python | {
"resource": ""
} |
q275612 | Client.create_list | test | def create_list(self, list_json):
'''
Create List object from JSON object
Returns:
List: The list from the given `list_json`.
'''
return trolly.list.List(
trello_client=self,
list_id=list_json['id'],
name=list_json['name'],
data=list_json,
) | python | {
"resource": ""
} |
q275613 | Client.create_card | test | def create_card(self, card_json):
'''
Create a Card object from JSON object
Returns:
Card: The card from the given `card_json`.
'''
return trolly.card.Card(
trello_client=self,
card_id=card_json['id'],
name=card_json['name'],
data=card_json,
) | python | {
"resource": ""
} |
q275614 | Client.create_checklist | test | def create_checklist(self, checklist_json):
'''
Create a Checklist object from JSON object
Returns:
Checklist: The checklist from the given `checklist_json`.
'''
return trolly.checklist.Checklist(
trello_client=self,
checklist_id=checklist_json['id'],
name=checklist_json['name'],
data=checklist_json,
) | python | {
"resource": ""
} |
q275615 | Client.create_member | test | def create_member(self, member_json):
'''
Create a Member object from JSON object
Returns:
Member: The member from the given `member_json`.
'''
return trolly.member.Member(
trello_client=self,
member_id=member_json['id'],
name=member_json['fullName'],
data=member_json,
) | python | {
"resource": ""
} |
q275616 | Client.get_organisation | test | def get_organisation(self, id, name=None):
'''
Get an organisation
Returns:
Organisation: The organisation with the given `id`
'''
return self.create_organisation(dict(id=id, name=name)) | python | {
"resource": ""
} |
q275617 | Client.get_board | test | def get_board(self, id, name=None):
'''
Get a board
Returns:
Board: The board with the given `id`
'''
return self.create_board(dict(id=id, name=name)) | python | {
"resource": ""
} |
q275618 | Client.get_list | test | def get_list(self, id, name=None):
'''
Get a list
Returns:
List: The list with the given `id`
'''
return self.create_list(dict(id=id, name=name)) | python | {
"resource": ""
} |
q275619 | Client.get_card | test | def get_card(self, id, name=None):
'''
Get a card
Returns:
Card: The card with the given `id`
'''
return self.create_card(dict(id=id, name=name)) | python | {
"resource": ""
} |
q275620 | Client.get_checklist | test | def get_checklist(self, id, name=None):
'''
Get a checklist
Returns:
Checklist: The checklist with the given `id`
'''
return self.create_checklist(dict(id=id, name=name)) | python | {
"resource": ""
} |
q275621 | Client.get_member | test | def get_member(self, id='me', name=None):
'''
Get a member or your current member if `id` wasn't given.
Returns:
Member: The member with the given `id`, defaults to the
logged in member.
'''
return self.create_member(dict(id=id, fullName=name)) | python | {
"resource": ""
} |
q275622 | domain_from_url | test | def domain_from_url(url):
"""
Get root domain from url.
Will prune away query strings, url paths, protocol prefix and sub-domains
Exceptions will be raised on invalid urls
"""
ext = tldextract.extract(url)
if not ext.suffix:
raise InvalidURLException()
new_url = ext.domain + "." + ext.suffix
return new_url | python | {
"resource": ""
} |
q275623 | to_raw_text_markupless | test | def to_raw_text_markupless(text, keep_whitespace=False, normalize_ascii=True):
"""
A generator to convert raw text segments, without xml to a
list of words without any markup.
Additionally dates are replaced by `7777` for normalization.
Arguments
---------
text: str, input text to tokenize, strip of markup.
keep_whitespace : bool, should the output retain the
whitespace of the input (so that char offsets in the
output correspond to those in the input).
Returns
-------
generator<list<list<str>>>, a generator for sentences, with
within each sentence a list of the words separated.
"""
return sent_tokenize(
remove_dates(_remove_urls(text)),
keep_whitespace,
normalize_ascii
) | python | {
"resource": ""
} |
q275624 | to_raw_text | test | def to_raw_text(text, keep_whitespace=False, normalize_ascii=True):
"""
A generator to convert raw text segments, with xml, and other
non-textual content to a list of words without any markup.
Additionally dates are replaced by `7777` for normalization.
Arguments
---------
text: str, input text to tokenize, strip of markup.
keep_whitespace : bool, should the output retain the
whitespace of the input (so that char offsets in the
output correspond to those in the input).
Returns
-------
generator<list<list<str>>>, a generator for sentences, with
within each sentence a list of the words separated.
"""
out = text
out = _remove_urls(text)
out = _remove_mvar(out)
out = _remove_squiggly_bracket(out)
out = _remove_table(out)
out = _remove_brackets(out)
out = remove_remaining_double_brackets(out)
out = remove_markup(out)
out = remove_wikipedia_link.sub(anchor_replacer, out)
out = remove_bullets_nbsps.sub(empty_space, out)
out = remove_dates(out)
out = remove_math_sections(out)
out = remove_html(out)
out = sent_tokenize(out, keep_whitespace, normalize_ascii)
return out | python | {
"resource": ""
} |
q275625 | to_raw_text_pairings | test | def to_raw_text_pairings(text, keep_whitespace=False, normalize_ascii=True):
"""
A generator to convert raw text segments, with xml, and other
non-textual content to a list of words without any markup.
Additionally dates are replaced by `7777` for normalization,
along with wikipedia anchors kept.
Arguments
---------
text: str, input text to tokenize, strip of markup.
keep_whitespace : bool, should the output retain the
whitespace of the input (so that char offsets in the
output correspond to those in the input).
Returns
-------
generator<list<list<str>>>, a generator for sentences, with
within each sentence a list of the words separated.
"""
out = text
out = _remove_mvar(out)
out = _remove_squiggly_bracket(out)
out = _remove_table(out)
out = remove_markup(out)
out = remove_wikipedia_link.sub(anchor_replacer, out)
out = remove_bullets_nbsps.sub(empty_space, out)
out = remove_math_sections(out)
out = remove_html(out)
for sentence in sent_tokenize(out, keep_whitespace, normalize_ascii):
yield sentence | python | {
"resource": ""
} |
q275626 | Keyring.set_password | test | def set_password(self, service, username, password):
"""Write the password in the file.
"""
assoc = self._generate_assoc(service, username)
# encrypt the password
password_encrypted = self.encrypt(password.encode('utf-8'), assoc)
# encode with base64 and add line break to untangle config file
password_base64 = '\n' + encodebytes(password_encrypted).decode()
self._write_config_value(service, username, password_base64) | python | {
"resource": ""
} |
q275627 | split_with_locations | test | def split_with_locations(text, locations):
"""
Use an integer list to split the string
contained in `text`.
Arguments:
----------
text : str, same length as locations.
locations : list<int>, contains values
'SHOULD_SPLIT', 'UNDECIDED', and
'SHOULD_NOT_SPLIT'. Will create
strings between each 'SHOULD_SPLIT'
locations.
Returns:
--------
Generator<str> : the substrings of text
corresponding to the slices given
in locations.
"""
start = 0
for pos, decision in enumerate(locations):
if decision == SHOULD_SPLIT:
if start != pos:
yield text[start:pos]
start = pos
if start != len(text):
yield text[start:] | python | {
"resource": ""
} |
q275628 | mark_regex | test | def mark_regex(regex, text, split_locations):
"""
Regex that adds a 'SHOULD_SPLIT' marker at the end
location of each matching group of the given regex.
Arguments
---------
regex : re.Expression
text : str, same length as split_locations
split_locations : list<int>, split decisions.
"""
for match in regex.finditer(text):
end_match = match.end()
if end_match < len(split_locations):
split_locations[end_match] = SHOULD_SPLIT | python | {
"resource": ""
} |
q275629 | mark_begin_end_regex | test | def mark_begin_end_regex(regex, text, split_locations):
"""
Regex that adds a 'SHOULD_SPLIT' marker at the end
location of each matching group of the given regex,
and adds a 'SHOULD_SPLIT' at the beginning of the
matching group. Each character within the matching
group will be marked as 'SHOULD_NOT_SPLIT'.
Arguments
---------
regex : re.Expression
text : str, same length as split_locations
split_locations : list<int>, split decisions.
"""
for match in regex.finditer(text):
end_match = match.end()
begin_match = match.start()
for i in range(begin_match+1, end_match):
split_locations[i] = SHOULD_NOT_SPLIT
if end_match < len(split_locations):
if split_locations[end_match] == UNDECIDED:
split_locations[end_match] = SHOULD_SPLIT
if split_locations[begin_match] == UNDECIDED:
split_locations[begin_match] = SHOULD_SPLIT | python | {
"resource": ""
} |
q275630 | main | test | def main(argv=None):
"""Main command line interface."""
if argv is None:
argv = sys.argv[1:]
cli = CommandLineTool()
try:
return cli.run(argv)
except KeyboardInterrupt:
print('Canceled')
return 3 | python | {
"resource": ""
} |
q275631 | ArgonAESEncryption._create_cipher | test | def _create_cipher(self, password, salt, nonce = None):
"""
Create the cipher object to encrypt or decrypt a payload.
"""
from argon2.low_level import hash_secret_raw, Type
from Crypto.Cipher import AES
aesmode = self._get_mode(self.aesmode)
if aesmode is None: # pragma: no cover
raise ValueError('invalid AES mode: %s' % self.aesmode)
key = hash_secret_raw(
secret = password.encode(self.password_encoding),
salt = salt,
time_cost = self.time_cost,
memory_cost = self.memory_cost,
parallelism = self.parallelism,
hash_len = 16,
type = Type.ID)
return AES.new(key, aesmode, nonce) | python | {
"resource": ""
} |
q275632 | ArgonAESEncryption._get_mode | test | def _get_mode(mode = None):
"""
Return the AES mode, or a list of valid AES modes, if mode == None
"""
from Crypto.Cipher import AES
AESModeMap = {
'CCM': AES.MODE_CCM,
'EAX': AES.MODE_EAX,
'GCM': AES.MODE_GCM,
'OCB': AES.MODE_OCB,
}
if mode is None:
return AESModeMap.keys()
return AESModeMap.get(mode) | python | {
"resource": ""
} |
q275633 | CryptFileKeyring.priority | test | def priority(self):
"""
Applicable for all platforms, where the schemes, that are integrated
with your environment, does not fit.
"""
try:
__import__('argon2.low_level')
except ImportError: # pragma: no cover
raise RuntimeError("argon2_cffi package required")
try:
__import__('Crypto.Cipher.AES')
except ImportError: # pragma: no cover
raise RuntimeError("PyCryptodome package required")
if not json: # pragma: no cover
raise RuntimeError("JSON implementation such as simplejson "
"required.")
return 2.5 | python | {
"resource": ""
} |
q275634 | CryptFileKeyring._check_scheme | test | def _check_scheme(self, config):
"""
check for a valid scheme
raise AttributeError if missing
raise ValueError if not valid
"""
try:
scheme = config.get(
escape_for_ini('keyring-setting'),
escape_for_ini('scheme'),
)
except (configparser.NoSectionError, configparser.NoOptionError):
raise AttributeError("Encryption scheme missing")
# extract AES mode
aesmode = scheme[-3:]
if aesmode not in self._get_mode():
raise ValueError("Encryption scheme invalid: %s" % (aesmode))
# setup AES mode
self.aesmode = aesmode
# remove pointless crypto module name
if scheme.startswith('PyCryptodome '):
scheme = scheme[13:]
# check other scheme properties
if scheme != self.scheme:
raise ValueError("Encryption scheme mismatch "
"(exp.: %s, found: %s)" % (self.scheme, scheme)) | python | {
"resource": ""
} |
q275635 | MQTTService.onPublish | test | def onPublish(self, topic, payload, qos, dup, retain, msgId):
'''
Callback Receiving messages from publisher
'''
log.debug("msg={payload}", payload=payload) | python | {
"resource": ""
} |
q275636 | MQTTFactory.makeId | test | def makeId(self):
'''Produce ids for Protocol packets, outliving their sessions'''
self.id = (self.id + 1) % 65536
self.id = self.id or 1 # avoid id 0
return self.id | python | {
"resource": ""
} |
q275637 | BaseState.connect | test | def connect(self, request):
'''
Send a CONNECT control packet.
'''
state = self.__class__.__name__
return defer.fail(MQTTStateError("Unexpected connect() operation", state)) | python | {
"resource": ""
} |
q275638 | BaseState.handleCONNACK | test | def handleCONNACK(self, response):
'''
Handles CONNACK packet from the server
'''
state = self.__class__.__name__
log.error("Unexpected {packet:7} packet received in {log_source}", packet="CONNACK") | python | {
"resource": ""
} |
q275639 | encodeString | test | def encodeString(string):
'''
Encode an UTF-8 string into MQTT format.
Returns a bytearray
'''
encoded = bytearray(2)
encoded.extend(bytearray(string, encoding='utf-8'))
l = len(encoded)-2
if(l > 65535):
raise StringValueError(l)
encoded[0] = l >> 8
encoded[1] = l & 0xFF
return encoded | python | {
"resource": ""
} |
q275640 | decodeString | test | def decodeString(encoded):
'''
Decodes an UTF-8 string from an encoded MQTT bytearray.
Returns the decoded string and renaining bytearray to be parsed
'''
length = encoded[0]*256 + encoded[1]
return (encoded[2:2+length].decode('utf-8'), encoded[2+length:]) | python | {
"resource": ""
} |
q275641 | encode16Int | test | def encode16Int(value):
'''
Encodes a 16 bit unsigned integer into MQTT format.
Returns a bytearray
'''
value = int(value)
encoded = bytearray(2)
encoded[0] = value >> 8
encoded[1] = value & 0xFF
return encoded | python | {
"resource": ""
} |
q275642 | encodeLength | test | def encodeLength(value):
'''
Encodes value into a multibyte sequence defined by MQTT protocol.
Used to encode packet length fields.
'''
encoded = bytearray()
while True:
digit = value % 128
value //= 128
if value > 0:
digit |= 128
encoded.append(digit)
if value <= 0:
break
return encoded | python | {
"resource": ""
} |
q275643 | decodeLength | test | def decodeLength(encoded):
'''
Decodes a variable length value defined in the MQTT protocol.
This value typically represents remaining field lengths
'''
value = 0
multiplier = 1
for i in encoded:
value += (i & 0x7F) * multiplier
multiplier *= 0x80
if (i & 0x80) != 0x80:
break
return value | python | {
"resource": ""
} |
q275644 | DISCONNECT.encode | test | def encode(self):
'''
Encode and store a DISCONNECT control packet.
'''
header = bytearray(2)
header[0] = 0xE0
self.encoded = header
return str(header) if PY2 else bytes(header) | python | {
"resource": ""
} |
q275645 | CONNECT.encode | test | def encode(self):
'''
Encode and store a CONNECT control packet.
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes.
@raise e: C{ValueError} if encoded username string exceeds 65535 bytes.
'''
header = bytearray(1)
varHeader = bytearray()
payload = bytearray()
header[0] = 0x10 # packet code
# ---- Variable header encoding section -----
varHeader.extend(encodeString(self.version['tag']))
varHeader.append(self.version['level']) # protocol Level
flags = (self.cleanStart << 1)
if self.willTopic is not None and self.willMessage is not None:
flags |= 0x04 | (self.willRetain << 5) | (self.willQoS << 3)
if self.username is not None:
flags |= 0x80
if self.password is not None:
flags |= 0x40
varHeader.append(flags)
varHeader.extend(encode16Int(self.keepalive))
# ------ Payload encoding section ----
payload.extend(encodeString(self.clientId))
if self.willTopic is not None and self.willMessage is not None:
payload.extend(encodeString(self.willTopic))
payload.extend(encodeString(self.willMessage))
if self.username is not None:
payload.extend(encodeString(self.username))
if self.password is not None:
payload.extend(encode16Int(len(self.password)))
payload.extend(bytearray(self.password, encoding='ascii', errors='ignore'))
# ---- Build the packet once all lengths are known ----
header.extend(encodeLength(len(varHeader) + len(payload)))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header) | python | {
"resource": ""
} |
q275646 | CONNECT.decode | test | def decode(self, packet):
'''
Decode a CONNECT control packet.
'''
self.encoded = packet
# Strip the fixed header plus variable length field
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
# Variable Header
version_str, packet_remaining = decodeString(packet_remaining)
version_id = int(packet_remaining[0])
if version_id == v31['level']:
self.version = v31
else:
self.version = v311
flags = packet_remaining[1]
self.cleanStart = (flags & 0x02) != 0
willFlag = (flags & 0x04) != 0
willQoS = (flags >> 3) & 0x03
willRetain = (flags & 0x20) != 0
userFlag = (flags & 0x80) != 0
passFlag = (flags & 0x40) != 0
packet_remaining = packet_remaining[2:]
self.keepalive = decode16Int(packet_remaining)
# Payload
packet_remaining = packet_remaining[2:]
self.clientId, packet_remaining = decodeString(packet_remaining)
if willFlag:
self.willRetain = willRetain
self.willQoS = willQoS
self.willTopic, packet_remaining = decodeString(packet_remaining)
self.willMessage, packet_remaining = decodeString(packet_remaining)
if userFlag:
self.username, packet_remaining = decodeString(packet_remaining)
if passFlag:
l = decode16Int(packet_remaining)
self.password = packet_remaining[2:2+l] | python | {
"resource": ""
} |
q275647 | CONNACK.encode | test | def encode(self):
'''
Encode and store a CONNACK control packet.
'''
header = bytearray(1)
varHeader = bytearray(2)
header[0] = 0x20
varHeader[0] = self.session
varHeader[1] = self.resultCode
header.extend(encodeLength(len(varHeader)))
header.extend(varHeader)
self.encoded = header
return str(header) if PY2 else bytes(header) | python | {
"resource": ""
} |
q275648 | CONNACK.decode | test | def decode(self, packet):
'''
Decode a CONNACK control packet.
'''
self.encoded = packet
# Strip the fixed header plus variable length field
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.session = (packet_remaining[0] & 0x01) == 0x01
self.resultCode = int(packet_remaining[1]) | python | {
"resource": ""
} |
q275649 | SUBSCRIBE.decode | test | def decode(self, packet):
'''
Decode a SUBSCRIBE control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.msgId = decode16Int(packet_remaining[0:2])
self.topics = []
packet_remaining = packet_remaining[2:]
while len(packet_remaining):
topic, packet_remaining = decodeString(packet_remaining)
qos = int (packet_remaining[0]) & 0x03
self.topics.append((topic,qos))
packet_remaining = packet_remaining[1:] | python | {
"resource": ""
} |
q275650 | SUBACK.encode | test | def encode(self):
'''
Encode and store a SUBACK control packet.
'''
header = bytearray(1)
payload = bytearray()
varHeader = encode16Int(self.msgId)
header[0] = 0x90
for code in self.granted:
payload.append(code[0] | (0x80 if code[1] == True else 0x00))
header.extend(encodeLength(len(varHeader) + len(payload)))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header) | python | {
"resource": ""
} |
q275651 | UNSUBSCRIBE.encode | test | def encode(self):
'''
Encode and store an UNSUBCRIBE control packet
@raise e: C{ValueError} if any encoded topic string exceeds 65535 bytes
'''
header = bytearray(1)
payload = bytearray()
varHeader = encode16Int(self.msgId)
header[0] = 0xA2 # packet with QoS=1
for topic in self.topics:
payload.extend(encodeString(topic)) # topic name
header.extend(encodeLength(len(varHeader) + len(payload)))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header) | python | {
"resource": ""
} |
q275652 | UNSUBSCRIBE.decode | test | def decode(self, packet):
'''
Decode a UNSUBACK control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.msgId = decode16Int(packet_remaining[0:2])
self.topics = []
packet_remaining = packet_remaining[2:]
while len(packet_remaining):
l = decode16Int(packet_remaining[0:2])
topic = packet_remaining[2:2+l].decode(encoding='utf-8')
self.topics.append(topic)
packet_remaining = packet_remaining[2+l:] | python | {
"resource": ""
} |
q275653 | UNSUBACK.encode | test | def encode(self):
'''
Encode and store an UNSUBACK control packet
'''
header = bytearray(1)
varHeader = encode16Int(self.msgId)
header[0] = 0xB0
header.extend(encodeLength(len(varHeader)))
header.extend(varHeader)
self.encoded = header
return str(header) if PY2 else bytes(header) | python | {
"resource": ""
} |
q275654 | PUBLISH.encode | test | def encode(self):
'''
Encode and store a PUBLISH control packet.
@raise e: C{ValueError} if encoded topic string exceeds 65535 bytes.
@raise e: C{ValueError} if encoded packet size exceeds 268435455 bytes.
@raise e: C{TypeError} if C{data} is not a string, bytearray, int, boolean or float.
'''
header = bytearray(1)
varHeader = bytearray()
payload = bytearray()
if self.qos:
header[0] = 0x30 | self.retain | (self.qos << 1) | (self.dup << 3)
varHeader.extend(encodeString(self.topic)) # topic name
varHeader.extend(encode16Int(self.msgId)) # msgId should not be None
else:
header[0] = 0x30 | self.retain
varHeader.extend(encodeString(self.topic)) # topic name
if isinstance(self.payload, bytearray):
payload.extend(self.payload)
elif isinstance(self.payload, str):
payload.extend(bytearray(self.payload, encoding='utf-8'))
else:
raise PayloadTypeError(type(self.payload))
totalLen = len(varHeader) + len(payload)
if totalLen > 268435455:
raise PayloadValueError(totalLen)
header.extend(encodeLength(totalLen))
header.extend(varHeader)
header.extend(payload)
self.encoded = header
return str(header) if PY2 else bytes(header) | python | {
"resource": ""
} |
q275655 | PUBLISH.decode | test | def decode(self, packet):
'''
Decode a PUBLISH control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.dup = (packet[0] & 0x08) == 0x08
self.qos = (packet[0] & 0x06) >> 1
self.retain = (packet[0] & 0x01) == 0x01
self.topic, _ = decodeString(packet_remaining)
topicLen = decode16Int(packet_remaining)
if self.qos:
self.msgId = decode16Int( packet_remaining[topicLen+2:topicLen+4] )
self.payload = packet_remaining[topicLen+4:]
else:
self.msgId = None
self.payload = packet_remaining[topicLen+2:] | python | {
"resource": ""
} |
q275656 | PUBREL.decode | test | def decode(self, packet):
'''
Decode a PUBREL control packet.
'''
self.encoded = packet
lenLen = 1
while packet[lenLen] & 0x80:
lenLen += 1
packet_remaining = packet[lenLen+1:]
self.msgId = decode16Int(packet_remaining)
self.dup = (packet[0] & 0x08) == 0x08 | python | {
"resource": ""
} |
q275657 | API.get_url | test | def get_url(self, method=None, **kwargs):
"""Return url for call method.
:param method (optional): `str` method name.
:returns: `str` URL.
"""
kwargs.setdefault('v', self.__version)
if self.__token is not None:
kwargs.setdefault('access_token', self.__token)
return 'https://api.vk.com/method/{}?{}'.format(
method or self.__method, urlencode(kwargs)
) | python | {
"resource": ""
} |
q275658 | API.request | test | def request(self, method, **kwargs):
"""
Send request to API.
:param method: `str` method name.
:returns: `dict` response.
"""
kwargs.setdefault('v', self.__version)
if self.__token is not None:
kwargs.setdefault('access_token', self.__token)
return requests.get(self.get_url(method, **kwargs)).json() | python | {
"resource": ""
} |
q275659 | FileMPI.refresh | test | def refresh(self):
""" Refresh the list of blocks to the disk, collectively """
if self.comm.rank == 0:
self._blocks = self.list_blocks()
else:
self._blocks = None
self._blocks = self.comm.bcast(self._blocks) | python | {
"resource": ""
} |
q275660 | classifier.format_data | test | def format_data(self, data, scale=True):
"""
Function for converting a dict to an array suitable for sklearn.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
scale : bool
Whether or not to scale the data. Should always be
`True`, unless used by `classifier.fitting_data`
where a scaler hasn't been created yet.
Returns
-------
A data array suitable for use with `sklearn.cluster`.
"""
if len(self.analytes) == 1:
# if single analyte
d = nominal_values(data[self.analytes[0]])
ds = np.array(list(zip(d, np.zeros(len(d)))))
else:
# package multiple analytes
d = [nominal_values(data[a]) for a in self.analytes]
ds = np.vstack(d).T
# identify all nan values
finite = np.isfinite(ds).sum(1) == ds.shape[1]
# remember which values are sampled
sampled = np.arange(data[self.analytes[0]].size)[finite]
# remove all nan values
ds = ds[finite]
if scale:
ds = self.scaler.transform(ds)
return ds, sampled | python | {
"resource": ""
} |
q275661 | classifier.fitting_data | test | def fitting_data(self, data):
"""
Function to format data for cluster fitting.
Parameters
----------
data : dict
A dict of data, containing all elements of
`analytes` as items.
Returns
-------
A data array for initial cluster fitting.
"""
ds_fit, _ = self.format_data(data, scale=False)
# define scaler
self.scaler = preprocessing.StandardScaler().fit(ds_fit)
# scale data and return
return self.scaler.transform(ds_fit) | python | {
"resource": ""
} |
q275662 | classifier.fit_kmeans | test | def fit_kmeans(self, data, n_clusters, **kwargs):
"""
Fit KMeans clustering algorithm to data.
Parameters
----------
data : array-like
A dataset formatted by `classifier.fitting_data`.
n_clusters : int
The number of clusters in the data.
**kwargs
passed to `sklearn.cluster.KMeans`.
Returns
-------
Fitted `sklearn.cluster.KMeans` object.
"""
km = cl.KMeans(n_clusters=n_clusters, **kwargs)
km.fit(data)
return km | python | {
"resource": ""
} |
q275663 | classifier.fit_meanshift | test | def fit_meanshift(self, data, bandwidth=None, bin_seeding=False, **kwargs):
"""
Fit MeanShift clustering algorithm to data.
Parameters
----------
data : array-like
A dataset formatted by `classifier.fitting_data`.
bandwidth : float
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs
passed to `sklearn.cluster.MeanShift`.
Returns
-------
Fitted `sklearn.cluster.MeanShift` object.
"""
if bandwidth is None:
bandwidth = cl.estimate_bandwidth(data)
ms = cl.MeanShift(bandwidth=bandwidth, bin_seeding=bin_seeding)
ms.fit(data)
return ms | python | {
"resource": ""
} |
q275664 | classifier.fit | test | def fit(self, data, method='kmeans', **kwargs):
"""
fit classifiers from large dataset.
Parameters
----------
data : dict
A dict of data for clustering. Must contain
items with the same name as analytes used for
clustering.
method : str
A string defining the clustering method used. Can be:
* 'kmeans' : K-Means clustering algorithm
* 'meanshift' : Meanshift algorithm
n_clusters : int
*K-Means only*. The numebr of clusters to identify
bandwidth : float
*Meanshift only.*
The bandwidth value used during clustering.
If none, determined automatically. Note:
the data are scaled before clutering, so
this is not in the same units as the data.
bin_seeding : bool
*Meanshift only.*
Whether or not to use 'bin_seeding'. See
documentation for `sklearn.cluster.MeanShift`.
**kwargs :
passed to `sklearn.cluster.MeanShift`.
Returns
-------
list
"""
self.method = method
ds_fit = self.fitting_data(data)
mdict = {'kmeans': self.fit_kmeans,
'meanshift': self.fit_meanshift}
clust = mdict[method]
self.classifier = clust(data=ds_fit, **kwargs)
# sort cluster centers by value of first column, to avoid random variation.
c0 = self.classifier.cluster_centers_.T[self.sort_by]
self.classifier.cluster_centers_ = self.classifier.cluster_centers_[np.argsort(c0)]
# recalculate the labels, so it's consistent with cluster centers
self.classifier.labels_ = self.classifier.predict(ds_fit)
self.classifier.ulabels_ = np.unique(self.classifier.labels_)
return | python | {
"resource": ""
} |
q275665 | classifier.predict | test | def predict(self, data):
"""
Label new data with cluster identities.
Parameters
----------
data : dict
A data dict containing the same analytes used to
fit the classifier.
sort_by : str
The name of an analyte used to sort the resulting
clusters. If None, defaults to the first analyte
used in fitting.
Returns
-------
array of clusters the same length as the data.
"""
size = data[self.analytes[0]].size
ds, sampled = self.format_data(data)
# predict clusters
cs = self.classifier.predict(ds)
# map clusters to original index
clusters = self.map_clusters(size, sampled, cs)
return clusters | python | {
"resource": ""
} |
q275666 | classifier.map_clusters | test | def map_clusters(self, size, sampled, clusters):
"""
Translate cluster identity back to original data size.
Parameters
----------
size : int
size of original dataset
sampled : array-like
integer array describing location of finite values
in original data.
clusters : array-like
integer array of cluster identities
Returns
-------
list of cluster identities the same length as original
data. Where original data are non-finite, returns -2.
"""
ids = np.zeros(size, dtype=int)
ids[:] = -2
ids[sampled] = clusters
return ids | python | {
"resource": ""
} |
q275667 | classifier.sort_clusters | test | def sort_clusters(self, data, cs, sort_by):
"""
Sort clusters by the concentration of a particular analyte.
Parameters
----------
data : dict
A dataset containing sort_by as a key.
cs : array-like
An array of clusters, the same length as values of data.
sort_by : str
analyte to sort the clusters by
Returns
-------
array of clusters, sorted by mean value of sort_by analyte.
"""
# label the clusters according to their contents
sdat = data[sort_by]
means = []
nclusts = np.arange(cs.max() + 1)
for c in nclusts:
means.append(np.nanmean(sdat[cs == c]))
# create ranks
means = np.array(means)
rank = np.zeros(means.size)
rank[np.argsort(means)] = np.arange(means.size)
csn = cs.copy()
for c, o in zip(nclusts, rank):
csn[cs == c] = o
return csn | python | {
"resource": ""
} |
q275668 | get_date | test | def get_date(datetime, time_format=None):
"""
Return a datetime oject from a string, with optional time format.
Parameters
----------
datetime : str
Date-time as string in any sensible format.
time_format : datetime str (optional)
String describing the datetime format. If missing uses
dateutil.parser to guess time format.
"""
if time_format is None:
t = du.parser.parse(datetime)
else:
t = dt.datetime.strftime(datetime, time_format)
return t | python | {
"resource": ""
} |
q275669 | get_total_n_points | test | def get_total_n_points(d):
"""
Returns the total number of data points in values of dict.
Paramters
---------
d : dict
"""
n = 0
for di in d.values():
n += len(di)
return n | python | {
"resource": ""
} |
q275670 | get_total_time_span | test | def get_total_time_span(d):
"""
Returns total length of analysis.
"""
tmax = 0
for di in d.values():
if di.uTime.max() > tmax:
tmax = di.uTime.max()
return tmax | python | {
"resource": ""
} |
q275671 | unitpicker | test | def unitpicker(a, llim=0.1, denominator=None, focus_stage=None):
"""
Determines the most appropriate plotting unit for data.
Parameters
----------
a : float or array-like
number to optimise. If array like, the 25% quantile is optimised.
llim : float
minimum allowable value in scaled data.
Returns
-------
(float, str)
(multiplier, unit)
"""
if not isinstance(a, (int, float)):
a = nominal_values(a)
a = np.percentile(a[~np.isnan(a)], 25)
if denominator is not None:
pd = pretty_element(denominator)
else:
pd = ''
if focus_stage == 'calibrated':
udict = {0: 'mol/mol ' + pd,
1: 'mmol/mol ' + pd,
2: '$\mu$mol/mol ' + pd,
3: 'nmol/mol ' + pd,
4: 'pmol/mol ' + pd,
5: 'fmol/mol ' + pd}
elif focus_stage == 'ratios':
udict = {0: 'counts/count ' + pd,
1: '$10^{-3}$ counts/count ' + pd,
2: '$10^{-6}$ counts/count ' + pd,
3: '$10^{-9}$ counts/count ' + pd,
4: '$10^{-12}$ counts/count ' + pd,
5: '$10^{-15}$ counts/count ' + pd}
elif focus_stage in ('rawdata', 'despiked', 'bkgsub'):
udict = udict = {0: 'counts',
1: '$10^{-3}$ counts',
2: '$10^{-6}$ counts',
3: '$10^{-9}$ counts',
4: '$10^{-12}$ counts',
5: '$10^{-15}$ counts'}
else:
udict = {0: '', 1: '', 2: '', 3: '', 4: '', 5: ''}
a = abs(a)
n = 0
if a < llim:
while a < llim:
a *= 1000
n += 1
return float(1000**n), udict[n] | python | {
"resource": ""
} |
q275672 | pretty_element | test | def pretty_element(s):
"""
Returns formatted element name.
Parameters
----------
s : str
of format [A-Z][a-z]?[0-9]+
Returns
-------
str
LaTeX formatted string with superscript numbers.
"""
el = re.match('.*?([A-z]{1,3}).*?', s).groups()[0]
m = re.match('.*?([0-9]{1,3}).*?', s).groups()[0]
return '$^{' + m + '}$' + el | python | {
"resource": ""
} |
q275673 | analyte_2_namemass | test | def analyte_2_namemass(s):
"""
Converts analytes in format '27Al' to 'Al27'.
Parameters
----------
s : str
of format [A-z]{1,3}[0-9]{1,3}
Returns
-------
str
Name in format [0-9]{1,3}[A-z]{1,3}
"""
el = re.match('.*?([A-z]{1,3}).*?', s).groups()[0]
m = re.match('.*?([0-9]{1,3}).*?', s).groups()[0]
return el + m | python | {
"resource": ""
} |
q275674 | analyte_2_massname | test | def analyte_2_massname(s):
"""
Converts analytes in format 'Al27' to '27Al'.
Parameters
----------
s : str
of format [0-9]{1,3}[A-z]{1,3}
Returns
-------
str
Name in format [A-z]{1,3}[0-9]{1,3}
"""
el = re.match('.*?([A-z]{1,3}).*?', s).groups()[0]
m = re.match('.*?([0-9]{1,3}).*?', s).groups()[0]
return m + el | python | {
"resource": ""
} |
q275675 | collate_data | test | def collate_data(in_dir, extension='.csv', out_dir=None):
"""
Copy all csvs in nested directroy to single directory.
Function to copy all csvs from a directory, and place
them in a new directory.
Parameters
----------
in_dir : str
Input directory containing csv files in subfolders
extension : str
The extension that identifies your data files.
Defaults to '.csv'.
out_dir : str
Destination directory
Returns
-------
None
"""
if out_dir is None:
out_dir = './' + re.search('^\.(.*)', extension).groups(0)[0]
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
for p, d, fs in os.walk(in_dir):
for f in fs:
if extension in f:
shutil.copy(p + '/' + f, out_dir + '/' + f)
return | python | {
"resource": ""
} |
q275676 | enumerate_bool | test | def enumerate_bool(bool_array, nstart=0):
"""
Consecutively numbers contiguous booleans in array.
i.e. a boolean sequence, and resulting numbering
T F T T T F T F F F T T F
0-1 1 1 - 2 ---3 3 -
where ' - '
Parameters
----------
bool_array : array_like
Array of booleans.
nstart : int
The number of the first boolean group.
"""
ind = bool_2_indices(bool_array)
ns = np.full(bool_array.size, nstart, dtype=int)
for n, lims in enumerate(ind):
ns[lims[0]:lims[-1] + 1] = nstart + n + 1
return ns | python | {
"resource": ""
} |
q275677 | tuples_2_bool | test | def tuples_2_bool(tuples, x):
"""
Generate boolean array from list of limit tuples.
Parameters
----------
tuples : array_like
[2, n] array of (start, end) values
x : array_like
x scale the tuples are mapped to
Returns
-------
array_like
boolean array, True where x is between each pair of tuples.
"""
if np.ndim(tuples) == 1:
tuples = [tuples]
out = np.zeros(x.size, dtype=bool)
for l, u in tuples:
out[(x > l) & (x < u)] = True
return out | python | {
"resource": ""
} |
q275678 | fastsmooth | test | def fastsmooth(a, win=11):
"""
Returns rolling - window smooth of a.
Function to efficiently calculate the rolling mean of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if 'window' is odd (even does not work)
if win % 2 == 0:
win += 1 # add 1 to window if it is even.
kernel = np.ones(win) / win
npad = int((win - 1) / 2)
spad = np.full(npad + 1, np.mean(a[:(npad + 1)]))
epad = np.full(npad - 1, np.mean(a[-(npad - 1):]))
return np.concatenate([spad, np.convolve(a, kernel, 'valid'), epad]) | python | {
"resource": ""
} |
q275679 | fastgrad | test | def fastgrad(a, win=11):
"""
Returns rolling - window gradient of a.
Function to efficiently calculate the rolling gradient of a numpy
array using 'stride_tricks' to split up a 1D array into an ndarray of
sub - sections of the original array, of dimensions [len(a) - win, win].
Parameters
----------
a : array_like
The 1D array to calculate the rolling gradient of.
win : int
The width of the rolling window.
Returns
-------
array_like
Gradient of a, assuming as constant integer x - scale.
"""
# check to see if 'window' is odd (even does not work)
if win % 2 == 0:
win += 1 # subtract 1 from window if it is even.
# trick for efficient 'rolling' computation in numpy
# shape = a.shape[:-1] + (a.shape[-1] - win + 1, win)
# strides = a.strides + (a.strides[-1], )
# wins = np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
wins = rolling_window(a, win, 'ends')
# apply rolling gradient to data
a = map(lambda x: np.polyfit(np.arange(win), x, 1)[0], wins)
return np.array(list(a)) | python | {
"resource": ""
} |
q275680 | findmins | test | def findmins(x, y):
""" Function to find local minima.
Parameters
----------
x, y : array_like
1D arrays of the independent (x) and dependent (y) variables.
Returns
-------
array_like
Array of points in x where y has a local minimum.
"""
return x[np.r_[False, y[1:] < y[:-1]] & np.r_[y[:-1] < y[1:], False]] | python | {
"resource": ""
} |
q275681 | cluster_meanshift | test | def cluster_meanshift(data, bandwidth=None, bin_seeding=False, **kwargs):
"""
Identify clusters using Meanshift algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
bandwidth : float or None
If None, bandwidth is estimated automatically using
sklean.cluster.estimate_bandwidth
bin_seeding : bool
Setting this option to True will speed up the algorithm.
See sklearn documentation for full description.
Returns
-------
dict
boolean array for each identified cluster.
"""
if bandwidth is None:
bandwidth = cl.estimate_bandwidth(data)
ms = cl.MeanShift(bandwidth=bandwidth, bin_seeding=bin_seeding, **kwargs)
ms.fit(data)
labels = ms.labels_
return labels, [np.nan] | python | {
"resource": ""
} |
q275682 | cluster_kmeans | test | def cluster_kmeans(data, n_clusters, **kwargs):
"""
Identify clusters using K - Means algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
n_clusters : int
The number of clusters expected in the data.
Returns
-------
dict
boolean array for each identified cluster.
"""
km = cl.KMeans(n_clusters, **kwargs)
kmf = km.fit(data)
labels = kmf.labels_
return labels, [np.nan] | python | {
"resource": ""
} |
q275683 | cluster_DBSCAN | test | def cluster_DBSCAN(data, eps=None, min_samples=None,
n_clusters=None, maxiter=200, **kwargs):
"""
Identify clusters using DBSCAN algorithm.
Parameters
----------
data : array_like
array of size [n_samples, n_features].
eps : float
The minimum 'distance' points must be apart for them to be in the
same cluster. Defaults to 0.3. Note: If the data are normalised
(they should be for DBSCAN) this is in terms of total sample
variance. Normalised data have a mean of 0 and a variance of 1.
min_samples : int
The minimum number of samples within distance `eps` required
to be considered as an independent cluster.
n_clusters : int
The number of clusters expected. If specified, `eps` will be
incrementally reduced until the expected number of clusters is
found.
maxiter : int
The maximum number of iterations DBSCAN will run.
Returns
-------
dict
boolean array for each identified cluster and core samples.
"""
if n_clusters is None:
if eps is None:
eps = 0.3
db = cl.DBSCAN(eps=eps, min_samples=min_samples, **kwargs).fit(data)
else:
clusters = 0
eps_temp = 1 / .95
niter = 0
while clusters < n_clusters:
clusters_last = clusters
eps_temp *= 0.95
db = cl.DBSCAN(eps=eps_temp, min_samples=min_samples, **kwargs).fit(data)
clusters = (len(set(db.labels_)) -
(1 if -1 in db.labels_ else 0))
if clusters < clusters_last:
eps_temp *= 1 / 0.95
db = cl.DBSCAN(eps=eps_temp, min_samples=min_samples, **kwargs).fit(data)
clusters = (len(set(db.labels_)) -
(1 if -1 in db.labels_ else 0))
warnings.warn(('\n\n***Unable to find {:.0f} clusters in '
'data. Found {:.0f} with an eps of {:.2e}'
'').format(n_clusters, clusters, eps_temp))
break
niter += 1
if niter == maxiter:
warnings.warn(('\n\n***Maximum iterations ({:.0f}) reached'
', {:.0f} clusters not found.\nDeacrease '
'min_samples or n_clusters (or increase '
'maxiter).').format(maxiter, n_clusters))
break
labels = db.labels_
core_samples_mask = np.zeros_like(labels)
core_samples_mask[db.core_sample_indices_] = True
return labels, core_samples_mask | python | {
"resource": ""
} |
q275684 | get_defined_srms | test | def get_defined_srms(srm_file):
"""
Returns list of SRMS defined in the SRM database
"""
srms = read_table(srm_file)
return np.asanyarray(srms.index.unique()) | python | {
"resource": ""
} |
q275685 | read_configuration | test | def read_configuration(config='DEFAULT'):
"""
Read LAtools configuration file, and return parameters as dict.
"""
# read configuration file
_, conf = read_latoolscfg()
# if 'DEFAULT', check which is the default configuration
if config == 'DEFAULT':
config = conf['DEFAULT']['config']
# grab the chosen configuration
conf = dict(conf[config])
# update config name with chosen
conf['config'] = config
return conf | python | {
"resource": ""
} |
q275686 | read_latoolscfg | test | def read_latoolscfg():
"""
Reads configuration, returns a ConfigParser object.
Distinct from read_configuration, which returns a dict.
"""
config_file = pkgrs.resource_filename('latools', 'latools.cfg')
cf = configparser.ConfigParser()
cf.read(config_file)
return config_file, cf | python | {
"resource": ""
} |
q275687 | print_all | test | def print_all():
"""
Prints all currently defined configurations.
"""
# read configuration file
_, conf = read_latoolscfg()
default = conf['DEFAULT']['config']
pstr = '\nCurrently defined LAtools configurations:\n\n'
for s in conf.sections():
if s == default:
pstr += s + ' [DEFAULT]\n'
elif s == 'REPRODUCE':
pstr += s + ' [DO NOT ALTER]\n'
else:
pstr += s + '\n'
for k, v in conf[s].items():
if k != 'config':
if v[:9] == 'resources':
v = pkgrs.resource_filename('latools', v)
pstr += ' ' + k + ': ' + v + '\n'
pstr += '\n'
print(pstr)
return | python | {
"resource": ""
} |
q275688 | copy_SRM_file | test | def copy_SRM_file(destination=None, config='DEFAULT'):
"""
Creates a copy of the default SRM table at the specified location.
Parameters
----------
destination : str
The save location for the SRM file. If no location specified,
saves it as 'LAtools_[config]_SRMTable.csv' in the current working
directory.
config : str
It's possible to set up different configurations with different
SRM files. This specifies the name of the configuration that you
want to copy the SRM file from. If not specified, the 'DEFAULT'
configuration is used.
"""
# find SRM file from configuration
conf = read_configuration()
src = pkgrs.resource_filename('latools', conf['srmfile'])
# work out destination path (if not given)
if destination is None:
destination = './LAtools_' + conf['config'] + '_SRMTable.csv'
if os.path.isdir(destination):
destination += 'LAtools_' + conf['config'] + '_SRMTable.csv'
copyfile(src, destination)
print(src + ' \n copied to:\n ' + destination)
return | python | {
"resource": ""
} |
q275689 | create | test | def create(config_name, srmfile=None, dataformat=None, base_on='DEFAULT', make_default=False):
"""
Adds a new configuration to latools.cfg.
Parameters
----------
config_name : str
The name of the new configuration. This should be descriptive
(e.g. UC Davis Foram Group)
srmfile : str (optional)
The location of the srm file used for calibration.
dataformat : str (optional)
The location of the dataformat definition to use.
base_on : str
The name of the existing configuration to base the new one on.
If either srm_file or dataformat are not specified, the new
config will copy this information from the base_on config.
make_default : bool
Whether or not to make the new configuration the default
for future analyses. Default = False.
Returns
-------
None
"""
base_config = read_configuration(base_on)
# read config file
config_file, cf = read_latoolscfg()
# if config doesn't already exist, create it.
if config_name not in cf.sections():
cf.add_section(config_name)
# set parameter values
if dataformat is None:
dataformat = base_config['dataformat']
cf.set(config_name, 'dataformat', dataformat)
if srmfile is None:
srmfile = base_config['srmfile']
cf.set(config_name, 'srmfile', srmfile)
# make the parameter set default, if requested
if make_default:
cf.set('DEFAULT', 'config', config_name)
with open(config_file, 'w') as f:
cf.write(f)
return | python | {
"resource": ""
} |
q275690 | change_default | test | def change_default(config):
"""
Change the default configuration.
"""
config_file, cf = read_latoolscfg()
if config not in cf.sections():
raise ValueError("\n'{:s}' is not a defined configuration.".format(config))
if config == 'REPRODUCE':
pstr = ('Are you SURE you want to set REPRODUCE as your default configuration?\n' +
' ... this is an odd thing to be doing.')
else:
pstr = ('Are you sure you want to change the default configuration from {:s}'.format(cf['DEFAULT']['config']) +
'to {:s}?'.format(config))
response = input(pstr + '\n> [N/y]: ')
if response.lower() == 'y':
cf.set('DEFAULT', 'config', config)
with open(config_file, 'w') as f:
cf.write(f)
print(' Default changed!')
else:
print(' Done nothing.') | python | {
"resource": ""
} |
q275691 | exclude_downhole | test | def exclude_downhole(filt, threshold=2):
"""
Exclude all data after the first excluded portion.
This makes sense for spot measurements where, because
of the signal mixing inherent in LA-ICPMS, once a
contaminant is ablated, it will always be present to
some degree in signals from further down the ablation
pit.
Parameters
----------
filt : boolean array
threshold : int
Returns
-------
filter : boolean array
"""
cfilt = filt.copy()
inds = bool_2_indices(~filt)
rem = (np.diff(inds) >= threshold)[:, 0]
if any(rem):
if inds[rem].shape[0] > 1:
limit = inds[rem][1, 0]
cfilt[limit:] = False
return cfilt | python | {
"resource": ""
} |
q275692 | defrag | test | def defrag(filt, threshold=3, mode='include'):
"""
'Defragment' a filter.
Parameters
----------
filt : boolean array
A filter
threshold : int
Consecutive values equal to or below this threshold
length are considered fragments, and will be removed.
mode : str
Wheter to change False fragments to True ('include')
or True fragments to False ('exclude')
Returns
-------
defragmented filter : boolean array
"""
if bool_2_indices(filt) is None:
return filt
if mode == 'include':
inds = bool_2_indices(~filt) + 1
rep = True
if mode == 'exclude':
inds = bool_2_indices(filt) + 1
rep = False
rem = (np.diff(inds) <= threshold)[:, 0]
cfilt = filt.copy()
if any(rem):
for lo, hi in inds[rem]:
cfilt[lo:hi] = rep
return cfilt | python | {
"resource": ""
} |
q275693 | D.despike | test | def despike(self, expdecay_despiker=True, exponent=None,
noise_despiker=True, win=3, nlim=12., maxiter=3):
"""
Applies expdecay_despiker and noise_despiker to data.
Parameters
----------
expdecay_despiker : bool
Whether or not to apply the exponential decay filter.
exponent : None or float
The exponent for the exponential decay filter. If None,
it is determined automatically using `find_expocoef`.
noise_despiker : bool
Whether or not to apply the standard deviation spike filter.
win : int
The rolling window over which the spike filter calculates
the trace statistics.
nlim : float
The number of standard deviations above the rolling mean
that data are excluded.
maxiter : int
The max number of times that the fitler is applied.
Returns
-------
None
"""
if not hasattr(self, 'despiked'):
self.data['despiked'] = Bunch()
out = {}
for a, v in self.focus.items():
if 'time' not in a.lower():
sig = v.copy() # copy data
if expdecay_despiker:
if exponent is not None:
sig = proc.expdecay_despike(sig, exponent, self.tstep, maxiter)
else:
warnings.warn('exponent is None - either provide exponent, or run at `analyse`\nlevel to automatically calculate it.')
if noise_despiker:
sig = proc.noise_despike(sig, int(win), nlim, maxiter)
out[a] = sig
self.data['despiked'].update(out)
# recalculate total counts
self.data['total_counts'] = sum(self.data['despiked'].values())
self.setfocus('despiked')
return | python | {
"resource": ""
} |
q275694 | D.autorange_plot | test | def autorange_plot(self, analyte='total_counts', gwin=7, swin=None, win=20,
on_mult=[1.5, 1.], off_mult=[1., 1.5],
transform='log'):
"""
Plot a detailed autorange report for this sample.
"""
if analyte is None:
# sig = self.focus[self.internal_standard]
sig = self.data['total_counts']
elif analyte == 'total_counts':
sig = self.data['total_counts']
elif analyte in self.analytes:
sig = self.focus[analyte]
else:
raise ValueError('Invalid analyte.')
if transform == 'log':
sig = np.log10(sig)
fig, axs = plot.autorange_plot(t=self.Time, sig=sig, gwin=gwin,
swin=swin, win=win, on_mult=on_mult,
off_mult=off_mult)
return fig, axs | python | {
"resource": ""
} |
q275695 | D.mkrngs | test | def mkrngs(self):
"""
Transform boolean arrays into list of limit pairs.
Gets Time limits of signal/background boolean arrays and stores them as
sigrng and bkgrng arrays. These arrays can be saved by 'save_ranges' in
the analyse object.
"""
bbool = bool_2_indices(self.bkg)
if bbool is not None:
self.bkgrng = self.Time[bbool]
else:
self.bkgrng = [[np.nan, np.nan]]
sbool = bool_2_indices(self.sig)
if sbool is not None:
self.sigrng = self.Time[sbool]
else:
self.sigrng = [[np.nan, np.nan]]
tbool = bool_2_indices(self.trn)
if tbool is not None:
self.trnrng = self.Time[tbool]
else:
self.trnrng = [[np.nan, np.nan]]
self.ns = np.zeros(self.Time.size)
n = 1
for i in range(len(self.sig) - 1):
if self.sig[i]:
self.ns[i] = n
if self.sig[i] and ~self.sig[i + 1]:
n += 1
self.n = int(max(self.ns)) # record number of traces
return | python | {
"resource": ""
} |
q275696 | D.ratio | test | def ratio(self, internal_standard=None):
"""
Divide all analytes by a specified internal_standard analyte.
Parameters
----------
internal_standard : str
The analyte used as the internal_standard.
Returns
-------
None
"""
if internal_standard is not None:
self.internal_standard = internal_standard
self.data['ratios'] = Bunch()
for a in self.analytes:
self.data['ratios'][a] = (self.data['bkgsub'][a] /
self.data['bkgsub'][self.internal_standard])
self.setfocus('ratios')
return | python | {
"resource": ""
} |
q275697 | D.calibrate | test | def calibrate(self, calib_ps, analytes=None):
"""
Apply calibration to data.
The `calib_dict` must be calculated at the `analyse` level,
and passed to this calibrate function.
Parameters
----------
calib_dict : dict
A dict of calibration values to apply to each analyte.
Returns
-------
None
"""
# can have calibration function stored in self and pass *coefs?
if analytes is None:
analytes = self.analytes
if 'calibrated' not in self.data.keys():
self.data['calibrated'] = Bunch()
for a in analytes:
m = calib_ps[a]['m'].new(self.uTime)
if 'c' in calib_ps[a]:
c = calib_ps[a]['c'].new(self.uTime)
else:
c = 0
self.data['calibrated'][a] = self.data['ratios'][a] * m + c
if self.internal_standard not in analytes:
self.data['calibrated'][self.internal_standard] = \
np.empty(len(self.data['ratios'][self.internal_standard]))
self.setfocus('calibrated')
return | python | {
"resource": ""
} |
q275698 | D.sample_stats | test | def sample_stats(self, analytes=None, filt=True,
stat_fns={},
eachtrace=True):
"""
Calculate sample statistics
Returns samples, analytes, and arrays of statistics
of shape (samples, analytes). Statistics are calculated
from the 'focus' data variable, so output depends on how
the data have been processed.
Parameters
----------
analytes : array_like
List of analytes to calculate the statistic on
filt : bool or str
The filter to apply to the data when calculating sample statistics.
bool: True applies filter specified in filt.switches.
str: logical string specifying a partucular filter
stat_fns : dict
Dict of {name: function} pairs. Functions that take a single
array_like input, and return a single statistic. Function should
be able to cope with NaN values.
eachtrace : bool
True: per - ablation statistics
False: whole sample statistics
Returns
-------
None
"""
if analytes is None:
analytes = self.analytes
elif isinstance(analytes, str):
analytes = [analytes]
self.stats = Bunch()
self.stats['analytes'] = analytes
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
for n, f in stat_fns.items():
self.stats[n] = []
for a in analytes:
ind = self.filt.grab_filt(filt, a)
dat = nominal_values(self.focus[a])
if eachtrace:
sts = []
for t in np.arange(self.n) + 1:
sts.append(f(dat[ind & (self.ns == t)]))
self.stats[n].append(sts)
else:
self.stats[n].append(f(dat[ind]))
self.stats[n] = np.array(self.stats[n])
return | python | {
"resource": ""
} |
q275699 | D.ablation_times | test | def ablation_times(self):
"""
Function for calculating the ablation time for each
ablation.
Returns
-------
dict of times for each ablation.
"""
ats = {}
for n in np.arange(self.n) + 1:
t = self.Time[self.ns == n]
ats[n - 1] = t.max() - t.min()
return ats | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.