_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q270900 | SecurityLevel | test | def SecurityLevel():
"""
Generates a filter chain for validating a security level.
"""
return (
f.Type(int) |
f.Min(1) |
f.Max(3) |
f.Optional(default=AddressGenerator.DEFAULT_SECURITY_LEVEL)
) | python | {
"resource": ""
} |
q270901 | ProposedTransaction.increment_legacy_tag | test | def increment_legacy_tag(self):
"""
Increments the transaction's legacy tag, used to fix insecure
bundle hashes when finalizing a bundle.
References:
- https://github.com/iotaledger/iota.lib.py/issues/84
"""
self._legacy_tag = (
Tag.from_trits(add_trits(self.legacy_tag.as_trits(), [1]))
) | python | {
"resource": ""
} |
q270902 | ProposedBundle.tag | test | def tag(self):
# type: () -> Tag
"""
Determines the most relevant tag for the bundle.
"""
for txn in reversed(self): # type: ProposedTransaction
if txn.tag:
return txn.tag
return Tag(b'') | python | {
"resource": ""
} |
q270903 | ProposedBundle.add_transaction | test | def add_transaction(self, transaction):
# type: (ProposedTransaction) -> None
"""
Adds a transaction to the bundle.
If the transaction message is too long, it will be split
automatically into multiple transactions.
"""
if self.hash:
raise RuntimeError('Bundle is already finalized.')
if transaction.value < 0:
raise ValueError('Use ``add_inputs`` to add inputs to the bundle.')
self._transactions.append(ProposedTransaction(
address=transaction.address,
value=transaction.value,
tag=transaction.tag,
message=transaction.message[:Fragment.LEN],
timestamp=transaction.timestamp,
))
# If the message is too long to fit in a single transactions,
# it must be split up into multiple transactions so that it will
# fit.
fragment = transaction.message[Fragment.LEN:]
while fragment:
self._transactions.append(ProposedTransaction(
address=transaction.address,
value=0,
tag=transaction.tag,
message=fragment[:Fragment.LEN],
timestamp=transaction.timestamp,
))
fragment = fragment[Fragment.LEN:] | python | {
"resource": ""
} |
q270904 | ProposedBundle.finalize | test | def finalize(self):
# type: () -> None
"""
Finalizes the bundle, preparing it to be attached to the Tangle.
"""
if self.hash:
raise RuntimeError('Bundle is already finalized.')
if not self:
raise ValueError('Bundle has no transactions.')
# Quick validation.
balance = self.balance
if balance < 0:
if self.change_address:
self.add_transaction(ProposedTransaction(
address=self.change_address,
value=-balance,
tag=self.tag,
))
else:
raise ValueError(
'Bundle has unspent inputs (balance: {balance}); '
'use ``send_unspent_inputs_to`` to create '
'change transaction.'.format(
balance=balance,
),
)
elif balance > 0:
raise ValueError(
'Inputs are insufficient to cover bundle spend '
'(balance: {balance}).'.format(
balance=balance,
),
)
# Generate bundle hash.
while True:
sponge = Kerl()
last_index = len(self) - 1
for i, txn in enumerate(self):
txn.current_index = i
txn.last_index = last_index
sponge.absorb(txn.get_signature_validation_trytes().as_trits())
bundle_hash_trits = [0] * HASH_LENGTH
sponge.squeeze(bundle_hash_trits)
bundle_hash = BundleHash.from_trits(bundle_hash_trits)
# Check that we generated a secure bundle hash.
# https://github.com/iotaledger/iota.lib.py/issues/84
if any(13 in part for part in normalize(bundle_hash)):
# Increment the legacy tag and try again.
tail_transaction = (
self.tail_transaction
) # type: ProposedTransaction
tail_transaction.increment_legacy_tag()
else:
break
# Copy bundle hash to individual transactions.
for txn in self:
txn.bundle_hash = bundle_hash
# Initialize signature/message fragment.
txn.signature_message_fragment = Fragment(txn.message or b'') | python | {
"resource": ""
} |
q270905 | ProposedBundle.sign_inputs | test | def sign_inputs(self, key_generator):
# type: (KeyGenerator) -> None
"""
Sign inputs in a finalized bundle.
"""
if not self.hash:
raise RuntimeError('Cannot sign inputs until bundle is finalized.')
# Use a counter for the loop so that we can skip ahead as we go.
i = 0
while i < len(self):
txn = self[i]
if txn.value < 0:
# In order to sign the input, we need to know the index
# of the private key used to generate it.
if txn.address.key_index is None:
raise with_context(
exc=ValueError(
'Unable to sign input {input}; '
'``key_index`` is None '
'(``exc.context`` has more info).'.format(
input=txn.address,
),
),
context={
'transaction': txn,
},
)
if txn.address.security_level is None:
raise with_context(
exc=ValueError(
'Unable to sign input {input}; '
'``security_level`` is None '
'(``exc.context`` has more info).'.format(
input=txn.address,
),
),
context={
'transaction': txn,
},
)
self.sign_input_at(i, key_generator.get_key_for(txn.address))
i += txn.address.security_level
else:
# No signature needed (nor even possible, in some
# cases); skip this transaction.
i += 1 | python | {
"resource": ""
} |
q270906 | ProposedBundle.sign_input_at | test | def sign_input_at(self, start_index, private_key):
# type: (int, PrivateKey) -> None
"""
Signs the input at the specified index.
:param start_index:
The index of the first input transaction.
If necessary, the resulting signature will be split across
multiple transactions automatically (i.e., if an input has
``security_level=2``, you still only need to call
:py:meth:`sign_input_at` once).
:param private_key:
The private key that will be used to generate the signature.
.. important::
Be sure that the private key was generated using the
correct seed, or the resulting signature will be
invalid!
"""
if not self.hash:
raise RuntimeError('Cannot sign inputs until bundle is finalized.')
private_key.sign_input_transactions(self, start_index) | python | {
"resource": ""
} |
q270907 | ProposedBundle._create_input_transactions | test | def _create_input_transactions(self, addy):
# type: (Address) -> None
"""
Creates transactions for the specified input address.
"""
self._transactions.append(ProposedTransaction(
address=addy,
tag=self.tag,
# Spend the entire address balance; if necessary, we will
# add a change transaction to the bundle.
value=-addy.balance,
))
# Signatures require additional transactions to store, due to
# transaction length limit.
# Subtract 1 to account for the transaction we just added.
for _ in range(addy.security_level - 1):
self._transactions.append(ProposedTransaction(
address=addy,
tag=self.tag,
# Note zero value; this is a meta transaction.
value=0,
)) | python | {
"resource": ""
} |
q270908 | convert_value_to_standard_unit | test | def convert_value_to_standard_unit(value, symbol='i'):
# type: (Text, Text) -> float
"""
Converts between any two standard units of iota.
:param value:
Value (affixed) to convert. For example: '1.618 Mi'.
:param symbol:
Unit symbol of iota to convert to. For example: 'Gi'.
:return:
Float as units of given symbol to convert to.
"""
try:
# Get input value
value_tuple = value.split()
amount = float(value_tuple[0])
except (ValueError, IndexError, AttributeError):
raise with_context(
ValueError('Value to convert is not valid.'),
context={
'value': value,
},
)
try:
# Set unit symbols and find factor/multiplier.
unit_symbol_from = value_tuple[1]
unit_factor_from = float(STANDARD_UNITS[unit_symbol_from])
unit_factor_to = float(STANDARD_UNITS[symbol])
except (KeyError, IndexError):
# Invalid symbol or no factor
raise with_context(
ValueError('Invalid IOTA unit.'),
context={
'value': value,
'symbol': symbol,
},
)
return amount * (unit_factor_from / unit_factor_to) | python | {
"resource": ""
} |
q270909 | decompress_G1 | test | def decompress_G1(z: G1Compressed) -> G1Uncompressed:
"""
Recovers x and y coordinates from the compressed point.
"""
# b_flag == 1 indicates the infinity point
b_flag = (z % POW_2_383) // POW_2_382
if b_flag == 1:
return Z1
x = z % POW_2_381
# Try solving y coordinate from the equation Y^2 = X^3 + b
# using quadratic residue
y = pow((x**3 + b.n) % q, (q + 1) // 4, q)
if pow(y, 2, q) != (x**3 + b.n) % q:
raise ValueError(
"The given point is not on G1: y**2 = x**3 + b"
)
# Choose the y whose leftmost bit is equal to the a_flag
a_flag = (z % POW_2_382) // POW_2_381
if (y * 2) // q != a_flag:
y = q - y
return (FQ(x), FQ(y), FQ(1)) | python | {
"resource": ""
} |
q270910 | prime_field_inv | test | def prime_field_inv(a: int, n: int) -> int:
"""
Extended euclidean algorithm to find modular inverses for integers
"""
if a == 0:
return 0
lm, hm = 1, 0
low, high = a % n, n
while low > 1:
r = high // low
nm, new = hm - lm * r, high - low * r
lm, low, hm, high = nm, new, lm, low
return lm % n | python | {
"resource": ""
} |
q270911 | Lexicon.from_json_file | test | def from_json_file(cls, filename):
"""
Load a lexicon from a JSON file.
Args:
filename (str): The path to a JSON dump.
"""
with open(filename, 'r') as fp:
return cls(json.load(fp)) | python | {
"resource": ""
} |
q270912 | Lexicon.find_word_groups | test | def find_word_groups(self, text, category, proximity=2):
"""
Given a string and a category, finds and combines words into
groups based on their proximity.
Args:
text (str): Some text.
tokens (list): A list of regex strings.
Returns:
list. The combined strings it found.
Example:
COLOURS = [r"red(?:dish)?", r"grey(?:ish)?", r"green(?:ish)?"]
s = 'GREYISH-GREEN limestone with RED or GREY sandstone.'
find_word_groups(s, COLOURS) --> ['greyish green', 'red', 'grey']
"""
f = re.IGNORECASE
words = getattr(self, category)
regex = re.compile(r'(\b' + r'\b|\b'.join(words) + r'\b)', flags=f)
candidates = regex.finditer(text)
starts, ends = [], []
groups = []
for item in candidates:
starts.append(item.span()[0])
ends.append(item.span()[1])
groups.append(item.group().lower())
new_starts = [] # As a check only.
new_groups = [] # This is what I want.
skip = False
for i, g in enumerate(groups):
if skip:
skip = False
continue
if (i < len(groups)-1) and (starts[i+1]-ends[i] <= proximity):
if g[-1] == '-':
sep = '' # Don't insert spaces after hyphens.
else:
sep = ' '
new_groups.append(g + sep + groups[i+1])
new_starts.append(starts[i])
skip = True
else:
if g not in new_groups:
new_groups.append(g)
new_starts.append(starts[i])
skip = False
return new_groups | python | {
"resource": ""
} |
q270913 | Lexicon.find_synonym | test | def find_synonym(self, word):
"""
Given a string and a dict of synonyms, returns the 'preferred'
word. Case insensitive.
Args:
word (str): A word.
Returns:
str: The preferred word, or the input word if not found.
Example:
>>> syn = {'snake': ['python', 'adder']}
>>> find_synonym('adder', syn)
'snake'
>>> find_synonym('rattler', syn)
'rattler'
TODO:
Make it handle case, returning the same case it received.
"""
if word and self.synonyms:
# Make the reverse look-up table.
reverse_lookup = {}
for k, v in self.synonyms.items():
for i in v:
reverse_lookup[i.lower()] = k.lower()
# Now check words against this table.
if word.lower() in reverse_lookup:
return reverse_lookup[word.lower()]
return word | python | {
"resource": ""
} |
q270914 | Lexicon.expand_abbreviations | test | def expand_abbreviations(self, text):
"""
Parse a piece of text and replace any abbreviations with their full
word equivalents. Uses the lexicon.abbreviations dictionary to find
abbreviations.
Args:
text (str): The text to parse.
Returns:
str: The text with abbreviations replaced.
"""
if not self.abbreviations:
raise LexiconError("No abbreviations in lexicon.")
def chunks(data, SIZE=25):
"""
Regex only supports 100 groups for munging callbacks. So we have to
chunk the abbreviation dicitonary.
"""
it = iter(data)
for i in range(0, len(data), SIZE):
yield {k: data[k] for k in islice(it, SIZE)}
def cb(g):
"""Regex callback"""
return self.abbreviations.get(g.group(0)) or g.group(0)
# Special cases.
# TODO: We should handle these with a special set of
# replacements that are made before the others.
text = re.sub(r'w/', r'wi', text)
# Main loop.
for subdict in chunks(self.abbreviations):
regex = r'(\b' + r'\b)|(\b'.join(subdict.keys()) + r'\b)'
text = re.sub(regex, cb, text)
return text | python | {
"resource": ""
} |
q270915 | Lexicon.split_description | test | def split_description(self, text):
"""
Split a description into parts, each of which can be turned into
a single component.
"""
# Protect some special sequences.
t = re.sub(r'(\d) ?in\. ', r'\1 inch ', text) # Protect.
t = re.sub(r'(\d) ?ft\. ', r'\1 feet ', t) # Protect.
# Transform all part delimiters to first splitter.
words = getattr(self, 'splitters')
try:
splitter = words[0].strip()
except:
splitter = 'with'
t = re.sub(r'\,?\;?\.? ?((under)?(less than)? \d+%) (?=\w)', r' '+splitter+' \1 ', t)
# Split.
f = re.IGNORECASE
pattern = re.compile(r'(?:' + r'|'.join(words) + r')', flags=f)
parts = filter(None, pattern.split(t))
return [i.strip() for i in parts] | python | {
"resource": ""
} |
q270916 | Lexicon.categories | test | def categories(self):
"""
Lists the categories in the lexicon, except the
optional categories.
Returns:
list: A list of strings of category names.
"""
keys = [k for k in self.__dict__.keys() if k not in SPECIAL]
return keys | python | {
"resource": ""
} |
q270917 | Decor.random | test | def random(cls, component):
"""
Returns a minimal Decor with a random colour.
"""
colour = random.sample([i for i in range(256)], 3)
return cls({'colour': colour, 'component': component, 'width': 1.0}) | python | {
"resource": ""
} |
q270918 | Decor.plot | test | def plot(self, fmt=None, fig=None, ax=None):
"""
Make a simple plot of the Decor.
Args:
fmt (str): A Python format string for the component summaries.
fig (Pyplot figure): A figure, optional. Use either fig or ax, not
both.
ax (Pyplot axis): An axis, optional. Use either fig or ax, not
both.
Returns:
fig or ax or None. If you pass in an ax, you get it back. If you pass
in a fig, you get it. If you pass nothing, the function creates a
plot object as a side-effect.
"""
u = 4 # aspect ratio of decor plot
v = 0.25 # ratio of decor tile width
r = None
if (fig is None) and (ax is None):
fig = plt.figure(figsize=(u, 1))
else:
r = fig
if ax is None:
ax = fig.add_axes([0.1*v, 0.1, 0.8*v, 0.8])
else:
r = ax
rect1 = patches.Rectangle((0, 0),
u*v, u*v,
color=self.colour,
lw=1,
hatch=self.hatch,
ec='k')
ax.add_patch(rect1)
ax.text(1.0+0.1*v*u, u*v*0.5,
self.component.summary(fmt=fmt),
fontsize=max(u, 15),
verticalalignment='center',
horizontalalignment='left')
ax.set_xlim([0, u*v])
ax.set_ylim([0, u*v])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax.invert_yaxis()
return r | python | {
"resource": ""
} |
q270919 | Legend.builtin | test | def builtin(cls, name):
"""
Generate a default legend.
Args:
name (str): The name of the legend you want. Not case sensitive.
'nsdoe': Nova Scotia Dept. of Energy
'canstrat': Canstrat
'nagmdm__6_2': USGS N. Am. Geol. Map Data Model 6.2
'nagmdm__6_1': USGS N. Am. Geol. Map Data Model 6.1
'nagmdm__4_3': USGS N. Am. Geol. Map Data Model 4.3
'sgmc': USGS State Geologic Map Compilation
Default 'nagmdm__6_2'.
Returns:
Legend: The legend stored in `defaults.py`.
"""
names = {
'nsdoe': LEGEND__NSDOE,
'canstrat': LEGEND__Canstrat,
'nagmdm__6_2': LEGEND__NAGMDM__6_2,
'nagmdm__6_1': LEGEND__NAGMDM__6_1,
'nagmdm__4_3': LEGEND__NAGMDM__4_3,
'sgmc': LEGEND__SGMC,
}
return cls.from_csv(text=names[name.lower()]) | python | {
"resource": ""
} |
q270920 | Legend.builtin_timescale | test | def builtin_timescale(cls, name):
"""
Generate a default timescale legend. No arguments.
Returns:
Legend: The timescale stored in `defaults.py`.
"""
names = {
'isc': TIMESCALE__ISC,
'usgs_isc': TIMESCALE__USGS_ISC,
'dnag': TIMESCALE__DNAG,
}
return cls.from_csv(text=names[name.lower()]) | python | {
"resource": ""
} |
q270921 | Legend.random | test | def random(cls, components, width=False, colour=None):
"""
Generate a random legend for a given list of components.
Args:
components (list or Striplog): A list of components. If you pass
a Striplog, it will use the primary components. If you pass a
component on its own, you will get a random Decor.
width (bool): Also generate widths for the components, based on the
order in which they are encountered.
colour (str): If you want to give the Decors all the same colour,
provide a hex string.
Returns:
Legend or Decor: A legend (or Decor) with random colours.
TODO:
It might be convenient to have a partial method to generate an
'empty' legend. Might be an easy way for someone to start with a
template, since it'll have the components in it already.
"""
try: # Treating as a Striplog.
list_of_Decors = [Decor.random(c)
for c
in [i[0] for i in components.unique if i[0]]
]
except:
try:
list_of_Decors = [Decor.random(c) for c in components.copy()]
except:
# It's a single component.
list_of_Decors = [Decor.random(components)]
if colour is not None:
for d in list_of_Decors:
d.colour = colour
if width:
for i, d in enumerate(list_of_Decors):
d.width = i + 1
return cls(list_of_Decors) | python | {
"resource": ""
} |
q270922 | Legend.from_image | test | def from_image(cls, filename, components,
ignore=None,
col_offset=0.1,
row_offset=2):
"""
A slightly easier way to make legends from images.
Args:
filename (str)
components (list)
ignore (list): Colours to ignore, e.g. "#FFFFFF" to ignore white.
col_offset (Number): If < 1, interpreted as proportion of way
across the image. If > 1, interpreted as pixels from left.
row_offset (int): Number of pixels to skip at the top of each
interval.
"""
if ignore is None:
ignore = []
rgb = utils.loglike_from_image(filename, offset=col_offset)
loglike = np.array([utils.rgb_to_hex(t) for t in rgb])
# Get the pixels and colour values at 'tops' (i.e. changes).
_, hexes = utils.tops_from_loglike(loglike, offset=row_offset)
# Reduce to unique colours.
hexes_reduced = []
for h in hexes:
if h not in hexes_reduced:
if h not in ignore:
hexes_reduced.append(h)
list_of_Decors = []
for i, c in enumerate(components):
d = Decor({'colour': hexes_reduced[i], 'component': c})
list_of_Decors.append(d)
return cls(list_of_Decors) | python | {
"resource": ""
} |
q270923 | Legend.from_csv | test | def from_csv(cls, filename=None, text=None):
"""
Read CSV text and generate a Legend.
Args:
string (str): The CSV string.
In the first row, list the properties. Precede the properties of the
component with 'comp ' or 'component '. For example:
colour, width, comp lithology, comp colour
#FFFFFF, 0, ,
#F7E9A6, 3, Sandstone, Grey
#FF99CC, 2, Anhydrite,
... etc
Note:
To edit a legend, the easiest thing to do is probably this:
- `legend.to_csv()`
- Edit the legend, call it `new_legend`.
- `legend = Legend.from_csv(text=new_legend)`
"""
if (filename is None) and (text is None):
raise LegendError("You must provide a filename or CSV text.")
if (filename is not None):
with open(filename, 'r') as f:
text = f.read()
try:
f = StringIO(text) # Python 3
except TypeError:
f = StringIO(unicode(text)) # Python 2
r = csv.DictReader(f, skipinitialspace=True)
list_of_Decors, components = [], []
kind = 'component'
for row in r:
d, component = {}, {}
for (k, v) in row.items():
if (k in [None, '']):
continue
if (v in [None, '']):
if k.lower() not in ['color', 'colour']:
continue
if k[:4].lower() == 'comp':
prop = ' '.join(k.split()[1:])
if v.lower() == 'true':
component[prop] = True
elif v.lower() == 'false':
component[prop] = False
else:
try:
component[prop] = float(v)
except ValueError:
component[prop] = v.lower()
elif k[:5].lower() == 'curve':
prop = ' '.join(k.split()[1:])
component[prop] = v.lower()
kind = 'curve'
else:
try:
d[k] = float(v)
except ValueError:
d[k] = v.lower()
this_component = Component(component)
d[kind] = this_component
# Check for duplicates and warn.
if this_component in components:
with warnings.catch_warnings():
warnings.simplefilter("always")
w = "This legend contains duplicate components."
warnings.warn(w)
components.append(this_component)
# Append to the master list and continue.
list_of_Decors.append(Decor(d))
return cls(list_of_Decors) | python | {
"resource": ""
} |
q270924 | Legend.to_csv | test | def to_csv(self):
"""
Renders a legend as a CSV string.
No arguments.
Returns:
str: The legend as a CSV.
"""
# We can't delegate this to Decor because we need to know the superset
# of all Decor properties. There may be lots of blanks.
header = []
component_header = []
for row in self:
for j in row.__dict__.keys():
if j == '_colour':
j = 'colour'
header.append(j)
for k in row.component.__dict__.keys():
component_header.append(k)
header = set(header)
component_header = set(component_header)
header.remove('component')
header_row = ''
if 'colour' in header:
header_row += 'colour,'
header.remove('colour')
has_colour = True
for item in header:
header_row += item + ','
for item in component_header:
header_row += 'component ' + item + ','
# Now we have a header row! Phew.
# Next we'll go back over the legend and collect everything.
result = header_row.strip(',') + '\n'
for row in self:
if has_colour:
result += row.__dict__.get('_colour', '') + ','
for item in header:
result += str(row.__dict__.get(item, '')) + ','
for item in component_header:
result += str(row.component.__dict__.get(item, '')) + ','
result += '\n'
return result | python | {
"resource": ""
} |
q270925 | Legend.max_width | test | def max_width(self):
"""
The maximum width of all the Decors in the Legend. This is needed
to scale a Legend or Striplog when plotting with widths turned on.
"""
try:
maximum = max([row.width for row in self.__list if row.width is not None])
return maximum
except:
return 0 | python | {
"resource": ""
} |
q270926 | Legend.get_decor | test | def get_decor(self, c, match_only=None):
"""
Get the decor for a component.
Args:
c (component): The component to look up.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
Decor. The matching Decor from the Legend, or None if not found.
"""
if isinstance(c, Component):
if c:
if match_only:
# Filter the component only those attributes
c = Component({k: getattr(c, k, None) for k in match_only})
for decor in self.__list:
try:
if c == decor.component:
return decor
except AttributeError:
continue
else:
for decor in self.__list:
try:
if getattr(c, 'mnemonic').lower() == decor.curve.mnemonic:
return decor
except AttributeError:
continue
return Decor({'colour': '#eeeeee', 'component': Component()}) | python | {
"resource": ""
} |
q270927 | Legend.getattr | test | def getattr(self, c, attr, default=None, match_only=None):
"""
Get the attribute of a component.
Args:
c (component): The component to look up.
attr (str): The attribute to get.
default (str): What to return in the event of no match.
match_only (list of str): The component attributes to include in the
comparison. Default: All of them.
Returns:
obj. The specified attribute of the matching Decor in the Legend.
"""
matching_decor = self.get_decor(c, match_only=match_only)
try:
return getattr(matching_decor, attr)
except AttributeError:
return default | python | {
"resource": ""
} |
q270928 | Legend.get_component | test | def get_component(self, colour, tolerance=0, default=None):
"""
Get the component corresponding to a display colour. This is for
generating a Striplog object from a colour image of a striplog.
Args:
colour (str): The hex colour string to look up.
tolerance (float): The colourspace distance within which to match.
default (component or None): The component to return in the event
of no match.
Returns:
component. The component best matching the provided colour.
"""
if not (0 <= tolerance <= np.sqrt(195075)):
raise LegendError('Tolerance must be between 0 and 441.67')
for decor in self.__list:
if colour.lower() == decor.colour:
return decor.component
# If we're here, we didn't find one yet.
r1, g1, b1 = utils.hex_to_rgb(colour)
# Start with a best match of black.
best_match = '#000000'
best_match_dist = np.sqrt(r1**2. + g1**2. + b1**2.)
# Now compare to each colour in the legend.
for decor in self.__list:
r2, g2, b2 = decor.rgb
distance = np.sqrt((r2-r1)**2. + (g2-g1)**2. + (b2-b1)**2.)
if distance < best_match_dist:
best_match = decor.component
best_match_dist = distance
best_match_colour = decor.colour
if best_match_dist <= tolerance:
return best_match
else:
with warnings.catch_warnings():
warnings.simplefilter("always")
w = "No match found for {0} ".format(colour.lower())
w += "with tolerance of {0}. Best match is ".format(tolerance)
w += "{0}, {1}".format(best_match.summary(), best_match_colour)
w += ", d={0}".format(best_match_dist)
warnings.warn(w)
return default | python | {
"resource": ""
} |
q270929 | Legend.plot | test | def plot(self, fmt=None):
"""
Make a simple plot of the legend.
Simply calls Decor.plot() on all of its members.
TODO: Build a more attractive plot.
"""
for d in self.__list:
d.plot(fmt=fmt)
return None | python | {
"resource": ""
} |
q270930 | Component.from_text | test | def from_text(cls, text, lexicon, required=None, first_only=True):
"""
Generate a Component from a text string, using a Lexicon.
Args:
text (str): The text string to parse.
lexicon (Lexicon): The dictionary to use for the
categories and lexemes.
required (str): An attribute that we must have. If a required
attribute is missing from the component, then None is returned.
first_only (bool): Whether to only take the first
match of a lexeme against the text string.
Returns:
Component: A Component object, or None if there was no
must-have field.
"""
component = lexicon.get_component(text, first_only=first_only)
if required and (required not in component):
return None
else:
return cls(component) | python | {
"resource": ""
} |
q270931 | Component.summary | test | def summary(self, fmt=None, initial=True, default=''):
"""
Given a format string, return a summary description of a component.
Args:
component (dict): A component dictionary.
fmt (str): Describes the format with a string. If no format is
given, you will just get a list of attributes. If you give the
empty string (''), you'll get `default` back. By default this
gives you the empty string, effectively suppressing the
summary.
initial (bool): Whether to capitialize the first letter. Default is
True.
default (str): What to give if there's no component defined.
Returns:
str: A summary string.
Example:
r = Component({'colour': 'Red',
'grainsize': 'VF-F',
'lithology': 'Sandstone'})
r.summary() --> 'Red, vf-f, sandstone'
"""
if default and not self.__dict__:
return default
if fmt == '':
return default
keys = [k for k, v in self.__dict__.items() if v is not '']
f = fmt or '{' + '}, {'.join(keys) + '}'
try:
summary = CustomFormatter().format(f, **self.__dict__)
except KeyError as e:
raise ComponentError("Error building summary, "+str(e))
if summary and initial and not fmt:
summary = summary[0].upper() + summary[1:]
return summary | python | {
"resource": ""
} |
q270932 | Rock | test | def Rock(*args, **kwargs):
"""
Graceful deprecation for old class name.
"""
with warnings.catch_warnings():
warnings.simplefilter("always")
w = "The 'Rock' class was renamed 'Component'. "
w += "Please update your code."
warnings.warn(w, DeprecationWarning, stacklevel=2)
return Component(*args, **kwargs) | python | {
"resource": ""
} |
q270933 | _process_row | test | def _process_row(text, columns):
"""
Processes a single row from the file.
"""
if not text:
return
# Construct the column dictionary that maps each field to
# its start, its length, and its read and write functions.
coldict = {k: {'start': s,
'len': l,
'read': r,
'write': w} for k, (s, l, r, w) in columns.items()}
# Now collect the item
item = {}
for field in coldict:
value = _get_field(text, coldict, field)
if value is not None:
item[field] = value
return item | python | {
"resource": ""
} |
q270934 | parse_canstrat | test | def parse_canstrat(text):
"""
Read all the rows and return a dict of the results.
"""
result = {}
for row in text.split('\n'):
if not row:
continue
if len(row) < 8: # Not a real record.
continue
# Read the metadata for this row/
row_header = _process_row(row, columns_) or {'card': None}
card = row_header['card']
# Now we know the card type for this row, we can process it.
if card is not None:
item = _process_row(row, columns[card])
this_list = result.get(card, [])
this_list.append(item)
result[card] = this_list
# Flatten if possible.
for c, d in result.items():
if len(d) == 1:
result[c] = d[0]
return result | python | {
"resource": ""
} |
q270935 | Striplog.__strict | test | def __strict(self):
"""
Private method. Checks if striplog is monotonically increasing in
depth.
Returns:
Bool.
"""
def conc(a, b):
return a + b
# Check boundaries, b
b = np.array(reduce(conc, [[i.top.z, i.base.z] for i in self]))
return all(np.diff(b) >= 0) | python | {
"resource": ""
} |
q270936 | Striplog.unique | test | def unique(self):
"""
Property. Summarize a Striplog with some statistics.
Returns:
List. A list of (Component, total thickness thickness) tuples.
"""
all_rx = set([iv.primary for iv in self])
table = {r: 0 for r in all_rx}
for iv in self:
table[iv.primary] += iv.thickness
return sorted(table.items(), key=operator.itemgetter(1), reverse=True) | python | {
"resource": ""
} |
q270937 | Striplog.__intervals_from_tops | test | def __intervals_from_tops(self,
tops,
values,
basis,
components,
field=None,
ignore_nan=True):
"""
Private method. Take a sequence of tops in an arbitrary dimension,
and provide a list of intervals from which a striplog can be made.
This is only intended to be used by ``from_image()``.
Args:
tops (iterable). A list of floats.
values (iterable). A list of values to look up.
basis (iterable). A list of components.
components (iterable). A list of Components.
Returns:
List. A list of Intervals.
"""
# Scale tops to actual depths.
length = float(basis.size)
start, stop = basis[0], basis[-1]
tops = [start + (p/(length-1)) * (stop-start) for p in tops]
bases = tops[1:] + [stop]
list_of_Intervals = []
for i, t in enumerate(tops):
v, c, d = values[i], [], {}
if ignore_nan and np.isnan(v):
continue
if (field is not None):
d = {field: v}
if components is not None:
try:
c = [deepcopy(components[int(v)])]
except IndexError:
c = []
if c and (c[0] is None):
c = []
interval = Interval(t, bases[i], data=d, components=c)
list_of_Intervals.append(interval)
return list_of_Intervals | python | {
"resource": ""
} |
q270938 | Striplog._clean_longitudinal_data | test | def _clean_longitudinal_data(cls, data, null=None):
"""
Private function. Make sure we have what we need to make a striplog.
"""
# Rename 'depth' or 'MD'
if ('top' not in data.keys()):
data['top'] = data.pop('depth', data.pop('MD', None))
# Sort everything
idx = list(data.keys()).index('top')
values = sorted(zip(*data.values()), key=lambda x: x[idx])
data = {k: list(v) for k, v in zip(data.keys(), zip(*values))}
if data['top'] is None:
raise StriplogError('Could not get tops.')
# Get rid of null-like values if specified.
if null is not None:
for k, v in data.items():
data[k] = [i if i != null else None for i in v]
return data | python | {
"resource": ""
} |
q270939 | Striplog.from_petrel | test | def from_petrel(cls, filename,
stop=None,
points=False,
null=None,
function=None,
include=None,
exclude=None,
remap=None,
ignore=None):
"""
Makes a striplog from a Petrel text file.
Returns:
striplog.
"""
result = utils.read_petrel(filename,
function=function,
remap=remap,
)
data = cls._clean_longitudinal_data(result,
null=null
)
list_of_Intervals = cls._build_list_of_Intervals(data,
stop=stop,
points=points,
include=include,
exclude=exclude,
ignore=ignore
)
if list_of_Intervals:
return cls(list_of_Intervals)
return None | python | {
"resource": ""
} |
q270940 | Striplog._build_list_of_Intervals | test | def _build_list_of_Intervals(cls,
data_dict,
stop=None,
points=False,
include=None,
exclude=None,
ignore=None,
lexicon=None):
"""
Private function. Takes a data dictionary and reconstructs a list
of Intervals from it.
Args:
data_dict (dict)
stop (float): Where to end the last interval.
points (bool)
include (dict)
exclude (dict)
ignore (list)
lexicon (Lexicon)
Returns:
list.
"""
include = include or {}
exclude = exclude or {}
ignore = ignore or []
# Reassemble as list of dicts
all_data = []
for data in zip(*data_dict.values()):
all_data.append({k: v for k, v in zip(data_dict.keys(), data)})
# Sort
all_data = sorted(all_data, key=lambda x: x['top'])
# Filter down:
wanted_data = []
for dictionary in all_data:
keep = True
delete = []
for k, v in dictionary.items():
incl = include.get(k, utils.null_default(True))
excl = exclude.get(k, utils.null_default(False))
if k in ignore:
delete.append(k)
if not incl(v):
keep = False
if excl(v):
keep = False
if delete:
for key in delete:
_ = dictionary.pop(key, None)
if keep:
wanted_data.append(dictionary)
# Fill in
if not points:
for i, iv in enumerate(wanted_data):
if iv.get('base', None) is None:
try: # To set from next interval
iv['base'] = wanted_data[i+1]['top']
except (IndexError, KeyError):
# It's the last interval
if stop is not None:
thick = stop - iv['top']
else:
thick = 1
iv['base'] = iv['top'] + thick
# Build the list of intervals to pass to __init__()
list_of_Intervals = []
for iv in wanted_data:
top = iv.pop('top')
base = iv.pop('base', None)
descr = iv.pop('description', '')
if iv:
c, d = {}, {}
for k, v in iv.items():
if (k[:5].lower() == 'comp ') or (k[:9].lower() == 'component'):
k = re.sub(r'comp(?:onent)? ', '', k, flags=re.I)
c[k] = v # It's a component
else:
if v is not None:
d[k] = v # It's data
comp = [Component(c)] if c else None
this = Interval(**{'top': top,
'base': base,
'description': descr,
'data': d,
'components': comp})
else:
this = Interval(**{'top': top,
'base': base,
'description': descr,
'lexicon': lexicon})
list_of_Intervals.append(this)
return list_of_Intervals | python | {
"resource": ""
} |
q270941 | Striplog.from_csv | test | def from_csv(cls, filename=None,
text=None,
dlm=',',
lexicon=None,
points=False,
include=None,
exclude=None,
remap=None,
function=None,
null=None,
ignore=None,
source=None,
stop=None,
fieldnames=None):
"""
Load from a CSV file or text.
"""
if (filename is None) and (text is None):
raise StriplogError("You must provide a filename or CSV text.")
if (filename is not None):
if source is None:
source = filename
with open(filename, 'r') as f:
text = f.read()
source = source or 'CSV'
# Deal with multiple spaces in space delimited file.
if dlm == ' ':
text = re.sub(r'[ \t]+', ' ', text)
if fieldnames is not None:
text = dlm.join(fieldnames) + '\n' + text
try:
f = StringIO(text) # Python 3
except TypeError:
f = StringIO(unicode(text)) # Python 2
reader = csv.DictReader(f, delimiter=dlm)
# Reorganize the data to make fixing it easier.
reorg = {k.strip().lower(): [] for k in reader.fieldnames if k is not None}
t = f.tell()
for key in reorg:
f.seek(t)
for r in reader:
s = {k.strip().lower(): v.strip() for k, v in r.items()}
try:
reorg[key].append(float(s[key]))
except ValueError:
reorg[key].append(s[key])
f.close()
remap = remap or {}
for k, v in remap.items():
reorg[v] = reorg.pop(k)
data = cls._clean_longitudinal_data(reorg, null=null)
list_of_Intervals = cls._build_list_of_Intervals(data,
points=points,
lexicon=lexicon,
include=include,
exclude=exclude,
ignore=ignore,
stop=stop)
return cls(list_of_Intervals, source=source) | python | {
"resource": ""
} |
q270942 | Striplog.from_image | test | def from_image(cls, filename, start, stop, legend,
source="Image",
col_offset=0.1,
row_offset=2,
tolerance=0):
"""
Read an image and generate Striplog.
Args:
filename (str): An image file, preferably high-res PNG.
start (float or int): The depth at the top of the image.
stop (float or int): The depth at the bottom of the image.
legend (Legend): A legend to look up the components in.
source (str): A source for the data. Default: 'Image'.
col_offset (Number): The proportion of the way across the image
from which to extract the pixel column. Default: 0.1 (ie 10%).
row_offset (int): The number of pixels to skip at the top of
each change in colour. Default: 2.
tolerance (float): The Euclidean distance between hex colours,
which has a maximum (black to white) of 441.67 in base 10.
Default: 0.
Returns:
Striplog: The ``striplog`` object.
"""
rgb = utils.loglike_from_image(filename, col_offset)
loglike = np.array([utils.rgb_to_hex(t) for t in rgb])
# Get the pixels and colour values at 'tops' (i.e. changes).
tops, hexes = utils.tops_from_loglike(loglike, offset=row_offset)
# If there are consecutive tops, we assume it's because there is a
# single-pixel row that we don't want. So take the second one only.
# We used to do this reduction in ``utils.tops_from_loglike()`` but
# it was prventing us from making intervals only one sample thick.
nonconsecutive = np.append(np.diff(tops), 2)
tops = tops[nonconsecutive > 1]
hexes = hexes[nonconsecutive > 1]
# Get the set of unique colours.
hexes_reduced = list(set(hexes))
# Get the components corresponding to the colours.
components = [legend.get_component(h, tolerance=tolerance)
for h in hexes_reduced]
# Turn them into integers.
values = [hexes_reduced.index(i) for i in hexes]
basis = np.linspace(start, stop, loglike.size)
list_of_Intervals = cls.__intervals_from_tops(tops,
values,
basis,
components)
return cls(list_of_Intervals, source="Image") | python | {
"resource": ""
} |
q270943 | Striplog.from_log | test | def from_log(cls, log,
cutoff=None,
components=None,
legend=None,
legend_field=None,
field=None,
right=False,
basis=None,
source='Log'):
"""
Turn a 1D array into a striplog, given a cutoff.
Args:
log (array-like): A 1D array or a list of integers.
cutoff (number or array-like): The log value(s) at which to bin
the log. Optional.
components (array-like): A list of components. Use this or
``legend``.
legend (``Legend``): A legend object. Use this or ``components``.
legend_field ('str'): If you're not trying to match against
components, then you can match the log values to this field in
the Decors.
field (str): The field in the Interval's ``data`` to store the log
values as.
right (bool): Which side of the cutoff to send things that are
equal to, i.e. right on, the cutoff.
basis (array-like): A depth basis for the log, so striplog knows
where to put the boundaries.
source (str): The source of the data. Default 'Log'.
Returns:
Striplog: The ``striplog`` object.
"""
if (components is None) and (legend is None) and (field is None):
m = 'You must provide a list of components, and legend, or a field.'
raise StriplogError(m)
if (legend is not None) and (legend_field is None):
try: # To treat it like a legend.
components = [deepcopy(decor.component) for decor in legend]
except AttributeError: # It's just a list of components.
pass
if legend_field is not None:
field_values = [getattr(d, legend_field, 0) for d in legend]
components = [Component() for i in range(int(max(field_values)+1))]
for i, decor in enumerate(legend):
components[i] = deepcopy(decor.component)
if cutoff is not None:
# First make sure we have enough components.
try:
n = len(cutoff)
except TypeError:
n = 1
if len(components) < n+1:
m = 'For n cutoffs, you need to provide at least'
m += 'n+1 components.'
raise StriplogError(m)
# Digitize.
try: # To use cutoff as a list.
a = np.digitize(log, cutoff, right)
except ValueError: # It's just a number.
a = np.digitize(log, [cutoff], right)
else:
a = np.copy(log)
tops, values = utils.tops_from_loglike(a)
if basis is None:
m = 'You must provide a depth or elevation basis.'
raise StriplogError(m)
list_of_Intervals = cls.__intervals_from_tops(tops,
values,
basis,
components,
field=field
)
return cls(list_of_Intervals, source=source) | python | {
"resource": ""
} |
q270944 | Striplog.from_las3 | test | def from_las3(cls, string, lexicon=None,
source="LAS",
dlm=',',
abbreviations=False):
"""
Turn LAS3 'lithology' section into a Striplog.
Args:
string (str): A section from an LAS3 file.
lexicon (Lexicon): The language for conversion to components.
source (str): A source for the data.
dlm (str): The delimiter.
abbreviations (bool): Whether to expand abbreviations.
Returns:
Striplog: The ``striplog`` object.
Note:
Handles multiple 'Data' sections. It would be smarter for it
to handle one at a time, and to deal with parsing the multiple
sections in the Well object.
Does not read an actual LAS file. Use the Well object for that.
"""
f = re.DOTALL | re.IGNORECASE
regex = r'\~\w+?_Data.+?\n(.+?)(?:\n\n+|\n*\~|\n*$)'
pattern = re.compile(regex, flags=f)
text = pattern.search(string).group(1)
s = re.search(r'\.(.+?)\: ?.+?source', string)
if s:
source = s.group(1).strip()
return cls.from_descriptions(text, lexicon,
source=source,
dlm=dlm,
abbreviations=abbreviations) | python | {
"resource": ""
} |
q270945 | Striplog.from_canstrat | test | def from_canstrat(cls, filename, source='canstrat'):
"""
Eat a Canstrat DAT file and make a striplog.
"""
with open(filename) as f:
dat = f.read()
data = parse_canstrat(dat)
list_of_Intervals = []
for d in data[7]: # 7 is the 'card type' for lithology info.
if d.pop('skip'):
continue
top = d.pop('top')
base = d.pop('base')
comps = [Component({'lithology': d['rtc'],
'colour': d['colour_name']
})]
iv = Interval(top=top, base=base, components=comps, data=d)
list_of_Intervals.append(iv)
return cls(list_of_Intervals, source=source) | python | {
"resource": ""
} |
q270946 | Striplog.copy | test | def copy(self):
"""Returns a shallow copy."""
return Striplog([i.copy() for i in self],
order=self.order,
source=self.source) | python | {
"resource": ""
} |
q270947 | Striplog.to_csv | test | def to_csv(self,
filename=None,
as_text=True,
use_descriptions=False,
dlm=",",
header=True):
"""
Returns a CSV string built from the summaries of the Intervals.
Args:
use_descriptions (bool): Whether to use descriptions instead
of summaries, if available.
dlm (str): The delimiter.
header (bool): Whether to form a header row.
Returns:
str: A string of comma-separated values.
"""
if (filename is None):
if (not as_text):
raise StriplogError("You must provide a filename or set as_text to True.")
else:
as_text = False
if as_text:
output = StringIO()
else:
output = open(filename, 'w')
fieldnames = ['Top', 'Base', 'Component']
writer = csv.DictWriter(output,
delimiter=dlm,
fieldnames=fieldnames,
quoting=csv.QUOTE_MINIMAL)
if header:
writer.writeheader()
for i in self.__list:
if use_descriptions and i.description:
text = i.description
elif i.primary:
text = i.primary.summary()
else:
text = ''
data = {j: k for j, k in zip(fieldnames, [i.top.z, i.base.z, text])}
writer.writerow(data)
if as_text:
return output.getvalue()
#return output
else:
output.close
return None | python | {
"resource": ""
} |
q270948 | Striplog.to_las3 | test | def to_las3(self, use_descriptions=False, dlm=",", source="Striplog"):
"""
Returns an LAS 3.0 section string.
Args:
use_descriptions (bool): Whether to use descriptions instead
of summaries, if available.
dlm (str): The delimiter.
source (str): The sourse of the data.
Returns:
str: A string forming Lithology section of an LAS3 file.
"""
data = self.to_csv(use_descriptions=use_descriptions,
dlm=dlm,
header=False)
return templates.section.format(name='Lithology',
short="LITH",
source=source,
data=data) | python | {
"resource": ""
} |
q270949 | Striplog.plot_axis | test | def plot_axis(self,
ax,
legend,
ladder=False,
default_width=1,
match_only=None,
colour=None,
colour_function=None,
cmap=None,
default=None,
width_field=None,
**kwargs
):
"""
Plotting, but only the Rectangles. You have to set up the figure.
Returns a matplotlib axis object.
Args:
ax (axis): The matplotlib axis to plot into.
legend (Legend): The Legend to use for colours, etc.
ladder (bool): Whether to use widths or not. Default False.
default_width (int): A width for the plot if not using widths.
Default 1.
match_only (list): A list of strings matching the attributes you
want to compare when plotting.
colour (str): Which data field to use for colours.
cmap (cmap): Matplotlib colourmap. Default ``viridis``.
default (float): The default (null) value.
width_field (str): The field to use for the width of the patches.
**kwargs are passed through to matplotlib's ``patches.Rectangle``.
Returns:
axis: The matplotlib.pyplot axis.
"""
default_c = None
patches = []
for iv in self.__list:
origin = (0, iv.top.z)
d = legend.get_decor(iv.primary, match_only=match_only)
thick = iv.base.z - iv.top.z
if ladder:
if width_field is not None:
w = iv.data.get(width_field, 1)
w = default_width * w/self.max_field(width_field)
default_c = 'gray'
elif legend is not None:
w = d.width or default_width
try:
w = default_width * w/legend.max_width
except:
w = default_width
else:
w = default_width
# Allow override of lw
this_patch_kwargs = kwargs.copy()
lw = this_patch_kwargs.pop('lw', 0)
ec = this_patch_kwargs.pop('ec', 'k')
fc = this_patch_kwargs.pop('fc', None) or default_c or d.colour
if colour is None:
rect = mpl.patches.Rectangle(origin,
w,
thick,
fc=fc,
lw=lw,
hatch=d.hatch,
ec=ec, # edgecolour for hatching
**this_patch_kwargs)
ax.add_patch(rect)
else:
rect = mpl.patches.Rectangle(origin,
w,
thick,
lw=lw,
ec=ec, # edgecolour for hatching
**this_patch_kwargs)
patches.append(rect)
if colour is not None:
cmap = cmap or 'viridis'
p = mpl.collections.PatchCollection(patches, cmap=cmap, lw=lw)
p.set_array(self.get_data(colour, colour_function, default=default))
ax.add_collection(p)
cb = plt.colorbar(p) # orientation='horizontal' only really works with ticks=[0, 0.1, 0.2] say
cb.outline.set_linewidth(0)
return ax | python | {
"resource": ""
} |
q270950 | Striplog.get_data | test | def get_data(self, field, function=None, default=None):
"""
Get data from the striplog.
"""
f = function or utils.null
data = []
for iv in self:
d = iv.data.get(field)
if d is None:
if default is not None:
d = default
else:
d = np.nan
data.append(f(d))
return np.array(data) | python | {
"resource": ""
} |
q270951 | Striplog.extract | test | def extract(self, log, basis, name, function=None):
"""
'Extract' a log into the components of a striplog.
Args:
log (array_like). A log or other 1D data.
basis (array_like). The depths or elevations of the log samples.
name (str). The name of the attribute to store in the components.
function (function). A function that takes an array as the only
input, and returns whatever you want to store in the 'name'
attribute of the primary component.
Returns:
None. The function works on the striplog in place.
"""
# Build a dict of {index: [log values]} to keep track.
intervals = {}
previous_ix = -1
for i, z in enumerate(basis):
ix = self.read_at(z, index=True)
if ix is None:
continue
if ix == previous_ix:
intervals[ix].append(log[i])
else:
intervals[ix] = [log[i]]
previous_ix = ix
# Set the requested attribute in the primary comp of each interval.
for ix, data in intervals.items():
f = function or utils.null
d = f(np.array(data))
self[ix].data[name] = d
return None | python | {
"resource": ""
} |
q270952 | Striplog.find | test | def find(self, search_term, index=False):
"""
Look for a regex expression in the descriptions of the striplog.
If there's no description, it looks in the summaries.
If you pass a Component, then it will search the components, not the
descriptions or summaries.
Case insensitive.
Args:
search_term (string or Component): The thing you want to search
for. Strings are treated as regular expressions.
index (bool): Whether to return the index instead of the interval.
Returns:
Striplog: A striplog that contains only the 'hit' Intervals.
However, if ``index`` was ``True``, then that's what you get.
"""
hits = []
for i, iv in enumerate(self):
try:
search_text = iv.description or iv.primary.summary()
pattern = re.compile(search_term, flags=re.IGNORECASE)
if pattern.search(search_text):
hits.append(i)
except TypeError:
if search_term in iv.components:
hits.append(i)
if hits and index:
return hits
elif hits:
return self[hits]
else:
return | python | {
"resource": ""
} |
q270953 | Striplog.find_overlaps | test | def find_overlaps(self, index=False):
"""
Find overlaps in a striplog.
Args:
index (bool): If True, returns indices of intervals with
gaps after them.
Returns:
Striplog: A striplog of all the overlaps as intervals.
"""
return self.__find_incongruities(op=operator.gt, index=index) | python | {
"resource": ""
} |
q270954 | Striplog.find_gaps | test | def find_gaps(self, index=False):
"""
Finds gaps in a striplog.
Args:
index (bool): If True, returns indices of intervals with
gaps after them.
Returns:
Striplog: A striplog of all the gaps. A sort of anti-striplog.
"""
return self.__find_incongruities(op=operator.lt, index=index) | python | {
"resource": ""
} |
q270955 | Striplog.prune | test | def prune(self, limit=None, n=None, percentile=None, keep_ends=False):
"""
Remove intervals below a certain limit thickness. In place.
Args:
limit (float): Anything thinner than this will be pruned.
n (int): The n thinnest beds will be pruned.
percentile (float): The thinnest specified percentile will be
pruned.
keep_ends (bool): Whether to keep the first and last, regardless
of whether they meet the pruning criteria.
"""
strip = self.copy()
if not (limit or n or percentile):
m = "You must provide a limit or n or percentile for pruning."
raise StriplogError(m)
if limit:
prune = [i for i, iv in enumerate(strip) if iv.thickness < limit]
if n:
prune = strip.thinnest(n=n, index=True)
if percentile:
n = np.floor(len(strip)*percentile/100)
prune = strip.thinnest(n=n, index=True)
if keep_ends:
first, last = 0, len(strip) - 1
if first in prune:
prune.remove(first)
if last in prune:
prune.remove(last)
del strip[prune]
return strip | python | {
"resource": ""
} |
q270956 | Striplog.anneal | test | def anneal(self):
"""
Fill in empty intervals by growing from top and base.
Note that this operation happens in-place and destroys any information
about the ``Position`` (e.g. metadata associated with the top or base).
See GitHub issue #54.
"""
strip = self.copy()
gaps = strip.find_gaps(index=True)
if not gaps:
return
for gap in gaps:
before = strip[gap]
after = strip[gap + 1]
if strip.order == 'depth':
t = (after.top.z-before.base.z)/2
before.base = before.base.z + t
after.top = after.top.z - t
else:
t = (after.base-before.top)/2
before.top = before.top.z + t
after.base = after.base.z - t
return strip | python | {
"resource": ""
} |
q270957 | Striplog.fill | test | def fill(self, component=None):
"""
Fill gaps with the component provided.
Example
t = s.fill(Component({'lithology': 'cheese'}))
"""
c = [component] if component is not None else []
# Make the intervals to go in the gaps.
gaps = self.find_gaps()
if not gaps:
return self
for iv in gaps:
iv.components = c
return deepcopy(self) + gaps | python | {
"resource": ""
} |
q270958 | Striplog.union | test | def union(self, other):
"""
Makes a striplog of all unions.
Args:
Striplog. The striplog instance to union with.
Returns:
Striplog. The result of the union.
"""
if not isinstance(other, self.__class__):
m = "You can only union striplogs with each other."
raise StriplogError(m)
result = []
for iv in deepcopy(self):
for jv in other:
if iv.any_overlaps(jv):
iv = iv.union(jv)
result.append(iv)
return Striplog(result) | python | {
"resource": ""
} |
q270959 | Striplog.intersect | test | def intersect(self, other):
"""
Makes a striplog of all intersections.
Args:
Striplog. The striplog instance to intersect with.
Returns:
Striplog. The result of the intersection.
"""
if not isinstance(other, self.__class__):
m = "You can only intersect striplogs with each other."
raise StriplogError(m)
result = []
for iv in self:
for jv in other:
try:
result.append(iv.intersect(jv))
except IntervalError:
# The intervals don't overlap
pass
return Striplog(result) | python | {
"resource": ""
} |
q270960 | Striplog.merge_overlaps | test | def merge_overlaps(self):
"""
Merges overlaps by merging overlapping Intervals.
The function takes no arguments and returns ``None``. It operates on
the striplog 'in place'
TODO: This function will not work if any interval overlaps more than
one other intervals at either its base or top.
"""
overlaps = np.array(self.find_overlaps(index=True))
if not overlaps.any():
return
for overlap in overlaps:
before = self[overlap].copy()
after = self[overlap + 1].copy()
# Get rid of the before and after pieces.
del self[overlap]
del self[overlap]
# Make the new piece.
new_segment = before.merge(after)
# Insert it.
self.__insert(overlap, new_segment)
overlaps += 1
return | python | {
"resource": ""
} |
q270961 | Striplog.hist | test | def hist(self,
lumping=None,
summary=False,
sort=True,
plot=True,
legend=None,
ax=None
):
"""
Plots a histogram and returns the data for it.
Args:
lumping (str): If given, the bins will be lumped based on this
attribute of the primary components of the intervals
encountered.
summary (bool): If True, the summaries of the components are
returned as the bins. Otherwise, the default behaviour is to
return the Components themselves.
sort (bool): If True (default), the histogram is sorted by value,
starting with the largest.
plot (bool): If True (default), produce a bar plot.
legend (Legend): The legend with which to colour the bars.
ax (axis): An axis object, which will be returned if provided.
If you don't provide one, it will be created but not returned.
Returns:
Tuple: A tuple of tuples of entities and counts.
TODO:
Deal with numeric properties, so I can histogram 'Vp' values, say.
"""
# This seems like overkill, but collecting all this stuff gives
# the user some choice about what they get back.
comps = []
labels = []
entries = defaultdict(int)
for i in self:
if lumping:
k = i.primary[lumping]
else:
if summary:
k = i.primary.summary()
else:
k = i.primary
comps.append(i.primary)
labels.append(i.primary.summary())
entries[k] += i.thickness
if sort:
allitems = sorted(entries.items(), key=lambda i: i[1], reverse=True)
ents, counts = zip(*allitems)
else:
ents, counts = tuple(entries.keys()), tuple(entries.values())
# Make plot.
if plot:
if ax is None:
fig, ax = plt.subplots()
return_ax = False
else:
return_ax = True
ind = np.arange(len(ents))
bars = ax.bar(ind, counts, align='center')
ax.set_xticks(ind)
ax.set_xticklabels(labels)
if legend:
colours = [legend.get_colour(c) for c in comps]
for b, c in zip(bars, colours):
b.set_color(c)
ax.set_ylabel('Thickness [m]')
else:
bars = []
if plot and return_ax:
return counts, ents, ax
return counts, ents, bars | python | {
"resource": ""
} |
q270962 | Striplog.invert | test | def invert(self, copy=False):
"""
Inverts the striplog, changing its order and the order of its contents.
Operates in place by default.
Args:
copy (bool): Whether to operate in place or make a copy.
Returns:
None if operating in-place, or an inverted copy of the striplog
if not.
"""
if copy:
return Striplog([i.invert(copy=True) for i in self])
else:
for i in self:
i.invert()
self.__sort()
o = self.order
self.order = {'depth': 'elevation', 'elevation': 'depth'}[o]
return | python | {
"resource": ""
} |
q270963 | Striplog.crop | test | def crop(self, extent, copy=False):
"""
Crop to a new depth range.
Args:
extent (tuple): The new start and stop depth. Must be 'inside'
existing striplog.
copy (bool): Whether to operate in place or make a copy.
Returns:
Operates in place by deault; if copy is True, returns a striplog.
"""
try:
if extent[0] is None:
extent = (self.start.z, extent[1])
if extent[1] is None:
extent = (extent[0], self.stop.z)
except:
m = "You must provide a 2-tuple for the new extents. Use None for"
m += " the existing start or stop."
raise StriplogError(m)
first_ix = self.read_at(extent[0], index=True)
last_ix = self.read_at(extent[1], index=True)
first = self[first_ix].split_at(extent[0])[1]
last = self[last_ix].split_at(extent[1])[0]
new_list = self.__list[first_ix:last_ix+1].copy()
new_list[0] = first
new_list[-1] = last
if copy:
return Striplog(new_list)
else:
self.__list = new_list
return | python | {
"resource": ""
} |
q270964 | Striplog.quality | test | def quality(self, tests, alias=None):
"""
Run a series of tests and return the corresponding results.
Based on curve testing for ``welly``.
Args:
tests (list): a list of functions.
Returns:
list. The results. Stick to booleans (True = pass) or ints.
"""
# This is hacky... striplog should probably merge with welly...
# Ignore aliases
alias = alias or {}
alias = alias.get('striplog', alias.get('Striplog', []))
# Gather the tests.
# First, anything called 'all', 'All', or 'ALL'.
# Second, anything with the name of the curve we're in now.
# Third, anything that the alias list has for this curve.
# (This requires a reverse look-up so it's a bit messy.)
this_tests =\
tests.get('all', [])+tests.get('All', [])+tests.get('ALL', [])\
+ tests.get('striplog', tests.get('Striplog', []))\
+ utils.flatten_list([tests.get(a) for a in alias])
this_tests = filter(None, this_tests)
# If we explicitly set zero tests for a particular key, then this
# overrides the 'all' tests.
if not tests.get('striplog', tests.get('Striplog', 1)):
this_tests = []
return {test.__name__: test(self) for test in this_tests} | python | {
"resource": ""
} |
q270965 | hex_to_name | test | def hex_to_name(hexx):
"""
Convert hex to a color name, using matplotlib's colour names.
Args:
hexx (str): A hexadecimal colour, starting with '#'.
Returns:
str: The name of the colour, or None if not found.
"""
for n, h in defaults.COLOURS.items():
if (len(n) > 1) and (h == hexx.upper()):
return n.lower()
return None | python | {
"resource": ""
} |
q270966 | loglike_from_image | test | def loglike_from_image(filename, offset):
"""
Get a log-like stream of RGB values from an image.
Args:
filename (str): The filename of a PNG image.
offset (Number): If < 1, interpreted as proportion of way across
the image. If > 1, interpreted as pixels from left.
Returns:
ndarray: A 2d array (a column of RGB triples) at the specified
offset.
TODO:
Generalize this to extract 'logs' from images in other ways, such
as giving the mean of a range of pixel columns, or an array of
columns. See also a similar routine in pythonanywhere/freqbot.
"""
im = plt.imread(filename)
if offset < 1:
col = int(im.shape[1] * offset)
else:
col = offset
return im[:, col, :3] | python | {
"resource": ""
} |
q270967 | CustomFormatter.get_field | test | def get_field(self, field_name, args, kwargs):
"""
Return an underscore if the attribute is absent.
Not all components have the same attributes.
"""
try:
s = super(CustomFormatter, self)
return s.get_field(field_name, args, kwargs)
except KeyError: # Key is missing
return ("_", field_name)
except IndexError: # Value is missing
return ("_", field_name) | python | {
"resource": ""
} |
q270968 | Jobs.get_jobs | test | def get_jobs(self, prefix=None):
""" Lists all the jobs registered with Nomad.
https://www.nomadproject.io/docs/http/jobs.html
arguments:
- prefix :(str) optional, specifies a string to filter jobs on based on an prefix.
This is specified as a querystring parameter.
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"prefix": prefix}
return self.request(method="get", params=params).json() | python | {
"resource": ""
} |
q270969 | Jobs.parse | test | def parse(self, hcl, canonicalize=False):
""" Parse a HCL Job file. Returns a dict with the JSON formatted job.
This API endpoint is only supported from Nomad version 0.8.3.
https://www.nomadproject.io/api/jobs.html#parse-job
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("parse", json={"JobHCL": hcl, "Canonicalize": canonicalize}, method="post", allow_redirects=True).json() | python | {
"resource": ""
} |
q270970 | Acl.update_token | test | def update_token(self, id, token):
""" Update token.
https://www.nomadproject.io/api/acl-tokens.html
arguments:
- AccdesorID
- token
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request("token", id, json=token, method="post").json() | python | {
"resource": ""
} |
q270971 | Allocations.get_allocations | test | def get_allocations(self, prefix=None):
""" Lists all the allocations.
https://www.nomadproject.io/docs/http/allocs.html
arguments:
- prefix :(str) optional, specifies a string to filter allocations on based on an prefix.
This is specified as a querystring parameter.
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"prefix": prefix}
return self.request(method="get", params=params).json() | python | {
"resource": ""
} |
q270972 | Deployment.fail_deployment | test | def fail_deployment(self, id):
""" This endpoint is used to mark a deployment as failed. This should be done to force the scheduler to stop
creating allocations as part of the deployment or to cause a rollback to a previous job version.
https://www.nomadproject.io/docs/http/deployments.html
arguments:
- id
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
fail_json = {"DeploymentID": id}
return self.request("fail", id, json=fail_json, method="post").json() | python | {
"resource": ""
} |
q270973 | Deployment.pause_deployment | test | def pause_deployment(self, id, pause):
""" This endpoint is used to pause or unpause a deployment.
This is done to pause a rolling upgrade or resume it.
https://www.nomadproject.io/docs/http/deployments.html
arguments:
- id
- pause, Specifies whether to pause or resume the deployment.
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
pause_json = {"Pause": pause,
"DeploymentID": id}
return self.request("pause", id, json=pause_json, method="post").json() | python | {
"resource": ""
} |
q270974 | Deployment.deployment_allocation_health | test | def deployment_allocation_health(self, id, healthy_allocations=list(), unhealthy_allocations=list()):
""" This endpoint is used to set the health of an allocation that is in the deployment manually. In some use
cases, automatic detection of allocation health may not be desired. As such those task groups can be marked
with an upgrade policy that uses health_check = "manual". Those allocations must have their health marked
manually using this endpoint. Marking an allocation as healthy will allow the rolling upgrade to proceed.
Marking it as failed will cause the deployment to fail.
https://www.nomadproject.io/docs/http/deployments.html
arguments:
- id
- healthy_allocations, Specifies the set of allocation that should be marked as healthy.
- unhealthy_allocations, Specifies the set of allocation that should be marked as unhealthy.
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
allocations = {"HealthyAllocationIDs": healthy_allocations,
"UnHealthyAllocationIDs": unhealthy_allocations,
"DeploymentID": id}
return self.request("allocation-health", id, json=allocations, method="post").json() | python | {
"resource": ""
} |
q270975 | Node.drain_node | test | def drain_node(self, id, enable=False):
""" Toggle the drain mode of the node.
When enabled, no further allocations will be
assigned and existing allocations will be migrated.
https://www.nomadproject.io/docs/http/node.html
arguments:
- id (str uuid): node id
- enable (bool): enable node drain or not to enable node drain
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request(id, "drain", params={"enable": enable}, method="post").json() | python | {
"resource": ""
} |
q270976 | Node.drain_node_with_spec | test | def drain_node_with_spec(self, id, drain_spec, mark_eligible=None):
""" This endpoint toggles the drain mode of the node. When draining is enabled,
no further allocations will be assigned to this node, and existing allocations
will be migrated to new nodes.
If an empty dictionary is given as drain_spec this will disable/toggle the drain.
https://www.nomadproject.io/docs/http/node.html
arguments:
- id (str uuid): node id
- drain_spec (dict): https://www.nomadproject.io/api/nodes.html#drainspec
- mark_eligible (bool): https://www.nomadproject.io/api/nodes.html#markeligible
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
payload = {}
if drain_spec and mark_eligible is not None:
payload = {
"NodeID": id,
"DrainSpec": drain_spec,
"MarkEligible": mark_eligible
}
elif drain_spec and mark_eligible is None:
payload = {
"NodeID": id,
"DrainSpec": drain_spec
}
elif not drain_spec and mark_eligible is not None:
payload = {
"NodeID": id,
"DrainSpec": None,
"MarkEligible": mark_eligible
}
elif not drain_spec and mark_eligible is None:
payload = {
"NodeID": id,
"DrainSpec": None,
}
return self.request(id, "drain", json=payload, method="post").json() | python | {
"resource": ""
} |
q270977 | Node.eligible_node | test | def eligible_node(self, id, eligible=None, ineligible=None):
""" Toggle the eligibility of the node.
https://www.nomadproject.io/docs/http/node.html
arguments:
- id (str uuid): node id
- eligible (bool): Set to True to mark node eligible
- ineligible (bool): Set to True to mark node ineligible
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
payload = {}
if eligible is not None and ineligible is not None:
raise nomad.api.exceptions.InvalidParameters
if eligible is None and ineligible is None:
raise nomad.api.exceptions.InvalidParameters
if eligible is not None and eligible:
payload = {"Eligibility": "eligible", "NodeID": id}
elif eligible is not None and not eligible:
payload = {"Eligibility": "ineligible", "NodeID": id}
elif ineligible is not None:
payload = {"Eligibility": "ineligible", "NodeID": id}
elif ineligible is not None and not ineligible:
payload = {"Eligibility": "eligible", "NodeID": id}
return self.request(id, "eligibility", json=payload, method="post").json() | python | {
"resource": ""
} |
q270978 | ls.list_files | test | def list_files(self, id=None, path="/"):
""" List files in an allocation directory.
https://www.nomadproject.io/docs/http/client-fs-ls.html
arguments:
- id
- path
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
if id:
return self.request(id, params={"path": path}, method="get").json()
else:
return self.request(params={"path": path}, method="get").json() | python | {
"resource": ""
} |
q270979 | stream_file.stream | test | def stream(self, id, offset, origin, path="/"):
""" This endpoint streams the contents of a file in an allocation directory.
https://www.nomadproject.io/api/client.html#stream-file
arguments:
- id: (str) allocation_id required
- offset: (int) required
- origin: (str) either start|end
- path: (str) optional
returns: (str) text
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.BadRequestNomadException
"""
params = {
"path": path,
"offset": offset,
"origin": origin
}
return self.request(id, params=params, method="get").text | python | {
"resource": ""
} |
q270980 | stat.stat_file | test | def stat_file(self, id=None, path="/"):
""" Stat a file in an allocation directory.
https://www.nomadproject.io/docs/http/client-fs-stat.html
arguments:
- id
- path
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
if id:
return self.request(id, params={"path": path}, method="get").json()
else:
return self.request(params={"path": path}, method="get").json() | python | {
"resource": ""
} |
q270981 | Agent.join_agent | test | def join_agent(self, addresses):
"""Initiate a join between the agent and target peers.
https://www.nomadproject.io/docs/http/agent-join.html
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"address": addresses}
return self.request("join", params=params, method="post").json() | python | {
"resource": ""
} |
q270982 | Agent.update_servers | test | def update_servers(self, addresses):
"""Updates the list of known servers to the provided list.
Replaces all previous server addresses with the new list.
https://www.nomadproject.io/docs/http/agent-servers.html
returns: 200 status code
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"address": addresses}
return self.request("servers", params=params, method="post").status_code | python | {
"resource": ""
} |
q270983 | Agent.force_leave | test | def force_leave(self, node):
"""Force a failed gossip member into the left state.
https://www.nomadproject.io/docs/http/agent-force-leave.html
returns: 200 status code
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"node": node}
return self.request("force-leave", params=params, method="post").status_code | python | {
"resource": ""
} |
q270984 | Nodes.get_nodes | test | def get_nodes(self, prefix=None):
""" Lists all the client nodes registered with Nomad.
https://www.nomadproject.io/docs/http/nodes.html
arguments:
- prefix :(str) optional, specifies a string to filter nodes on based on an prefix.
This is specified as a querystring parameter.
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"prefix": prefix}
return self.request(method="get", params=params).json() | python | {
"resource": ""
} |
q270985 | Evaluations.get_evaluations | test | def get_evaluations(self, prefix=None):
""" Lists all the evaluations.
https://www.nomadproject.io/docs/http/evals.html
arguments:
- prefix :(str) optional, specifies a string to filter evaluations on based on an prefix.
This is specified as a querystring parameter.
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"prefix": prefix}
return self.request(method="get", params=params).json() | python | {
"resource": ""
} |
q270986 | Namespaces.get_namespaces | test | def get_namespaces(self, prefix=None):
""" Lists all the namespaces registered with Nomad.
https://www.nomadproject.io/docs/enterprise/namespaces/index.html
arguments:
- prefix :(str) optional, specifies a string to filter namespaces on based on an prefix.
This is specified as a querystring parameter.
returns: list
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"prefix": prefix}
return self.request(method="get", params=params).json() | python | {
"resource": ""
} |
q270987 | Job.register_job | test | def register_job(self, id, job):
""" Registers a new job or updates an existing job
https://www.nomadproject.io/docs/http/job.html
arguments:
- id
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
return self.request(id, json=job, method="post").json() | python | {
"resource": ""
} |
q270988 | Job.plan_job | test | def plan_job(self, id, job, diff=False, policy_override=False):
""" Invoke a dry-run of the scheduler for the job.
https://www.nomadproject.io/docs/http/job.html
arguments:
- id
- job, dict
- diff, boolean
- policy_override, boolean
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
json_dict = {}
json_dict.update(job)
json_dict.setdefault('Diff', diff)
json_dict.setdefault('PolicyOverride', policy_override)
return self.request(id, "plan", json=json_dict, method="post").json() | python | {
"resource": ""
} |
q270989 | Job.dispatch_job | test | def dispatch_job(self, id, payload=None, meta=None):
""" Dispatches a new instance of a parameterized job.
https://www.nomadproject.io/docs/http/job.html
arguments:
- id
- payload
- meta
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
dispatch_json = {"Meta": meta, "Payload": payload}
return self.request(id, "dispatch", json=dispatch_json, method="post").json() | python | {
"resource": ""
} |
q270990 | Job.revert_job | test | def revert_job(self, id, version, enforce_prior_version=None):
""" This endpoint reverts the job to an older version.
https://www.nomadproject.io/docs/http/job.html
arguments:
- id
- version, Specifies the job version to revert to.
optional_arguments:
- enforce_prior_version, Optional value specifying the current job's version.
This is checked and acts as a check-and-set value before reverting to the
specified job.
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
revert_json = {"JobID": id,
"JobVersion": version,
"EnforcePriorVersion": enforce_prior_version}
return self.request(id, "revert", json=revert_json, method="post").json() | python | {
"resource": ""
} |
q270991 | Job.stable_job | test | def stable_job(self, id, version, stable):
""" This endpoint sets the job's stability.
https://www.nomadproject.io/docs/http/job.html
arguments:
- id
- version, Specifies the job version to revert to.
- stable, Specifies whether the job should be marked as stable or not.
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
revert_json = {"JobID": id,
"JobVersion": version,
"Stable": stable}
return self.request(id, "stable", json=revert_json, method="post").json() | python | {
"resource": ""
} |
q270992 | Job.deregister_job | test | def deregister_job(self, id, purge=None):
""" Deregisters a job, and stops all allocations part of it.
https://www.nomadproject.io/docs/http/job.html
arguments:
- id
- purge (bool), optionally specifies whether the job should be
stopped and purged immediately (`purge=True`) or deferred to the
Nomad garbage collector (`purge=False`).
returns: dict
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
- nomad.api.exceptions.InvalidParameters
"""
params = None
if purge is not None:
if not isinstance(purge, bool):
raise nomad.api.exceptions.InvalidParameters("purge is invalid "
"(expected type %s but got %s)"%(type(bool()), type(purge)))
params = {"purge": purge}
return self.request(id, params=params, method="delete").json() | python | {
"resource": ""
} |
q270993 | Operator.get_configuration | test | def get_configuration(self, stale=False):
""" Query the status of a client node registered with Nomad.
https://www.nomadproject.io/docs/http/operator.html
returns: dict
optional arguments:
- stale, (defaults to False), Specifies if the cluster should respond without an active leader.
This is specified as a querystring parameter.
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"stale": stale}
return self.request("raft", "configuration", params=params, method="get").json() | python | {
"resource": ""
} |
q270994 | Operator.delete_peer | test | def delete_peer(self, peer_address, stale=False):
""" Remove the Nomad server with given address from the Raft configuration.
The return code signifies success or failure.
https://www.nomadproject.io/docs/http/operator.html
arguments:
- peer_address, The address specifies the server to remove and is given as an IP:port
optional arguments:
- stale, (defaults to False), Specifies if the cluster should respond without an active leader.
This is specified as a querystring parameter.
returns: Boolean
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"address": peer_address, "stale": stale}
return self.request("raft", "peer", params=params, method="delete").ok | python | {
"resource": ""
} |
q270995 | Deployments.get_deployments | test | def get_deployments(self, prefix=""):
""" This endpoint lists all deployments.
https://www.nomadproject.io/docs/http/deployments.html
optional_arguments:
- prefix, (default "") Specifies a string to filter deployments on based on an index prefix.
This is specified as a querystring parameter.
returns: list of dicts
raises:
- nomad.api.exceptions.BaseNomadException
- nomad.api.exceptions.URLNotFoundNomadException
"""
params = {"prefix": prefix}
return self.request(params=params, method="get").json() | python | {
"resource": ""
} |
q270996 | PJFMutators._get_random | test | def _get_random(self, obj_type):
"""
Get a random mutator from a list of mutators
"""
return self.mutator[obj_type][random.randint(0, self.config.level)] | python | {
"resource": ""
} |
q270997 | PJFMutators.get_mutator | test | def get_mutator(self, obj, obj_type):
"""
Get a random mutator for the given type
"""
if obj_type == unicode:
obj_type = str
obj = str(obj)
return self._get_random(obj_type)(obj) | python | {
"resource": ""
} |
q270998 | PJFMutators.get_string_polyglot_attack | test | def get_string_polyglot_attack(self, obj):
"""
Return a polyglot attack containing the original object
"""
return self.polyglot_attacks[random.choice(self.config.techniques)] % obj | python | {
"resource": ""
} |
q270999 | PJFMutators.fuzz | test | def fuzz(self, obj):
"""
Perform the fuzzing
"""
buf = list(obj)
FuzzFactor = random.randrange(1, len(buf))
numwrites=random.randrange(math.ceil((float(len(buf)) / FuzzFactor)))+1
for j in range(numwrites):
self.random_action(buf)
return self.safe_unicode(buf) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.