content stringlengths 35 416k | sha1 stringlengths 40 40 | id int64 0 710k |
|---|---|---|
def get_visited_coordinates_steps(path: str) ->dict:
"""
Function for part2
"""
x = y = total_steps = 0 #initial point
visited_coordinates = {}
for movement in path.split(","):
direction = movement[0]
steps = int(movement[1:])
for _ in range(steps):
total_steps += 1
if(direction == "R"):
x += 1
elif(direction == "L"):
x -= 1
elif(direction == "U"):
y += 1
elif(direction == "D"):
y -= 1
else:
raise RuntimeError(f"Unknown direction {direction}")
coordinate = (x,y)
if coordinate not in visited_coordinates:
visited_coordinates[coordinate] = total_steps
return visited_coordinates | d290a486905388c278576bf4d288aa4c73230fa2 | 691,951 |
import os
def to_api_path(os_path, root=""):
"""Convert a filesystem path to an API path
If given, root will be removed from the path.
root must be a filesystem path already.
"""
if os_path.startswith(root):
os_path = os_path[len(root) :]
parts = os_path.strip(os.path.sep).split(os.path.sep)
parts = [p for p in parts if p != ""] # remove duplicate splits
path = "/".join(parts)
return path | cd94e2e9f500988c272f62b4bd5a4a2120ff4955 | 691,952 |
def upstream_ids(id, fromtoseries, maxcycle=1e6):
"""Return all ids upstream of id given a from (index) to (values) map.
"""
s = [id]
ids = []
cycles = 0
while len(s) > 0:
si = []
for i in s:
si.extend(list(fromtoseries[fromtoseries == i].index))
ids.extend(si)
s = si
cycles += 1
if cycles > maxcycle:
raise RuntimeError('maxcycles reached. Circular fromto?')
return ids | e695f84958a4297578972d407ea5278416da0b40 | 691,953 |
def _get_reference_header(header_element):
"""Parses ReferenceInput Header element
"""
header = {}
header['key'] = header_element.attrib('key')
header['value'] = header_element.attrib('value')
return header | ebff12dca53059d2f584a9bd55651998c2c5f0cb | 691,954 |
def document_order(node):
"""Compute a document order value for the node.
cmp(document_order(a), document_order(b)) will return -1, 0, or 1 if
a is before, identical to, or after b in the document respectively.
We represent document order as a list of sibling indexes. That is,
the third child of the document node has an order of [2]. The first
child of that node has an order of [2,0].
Attributes have a sibling index of -1 (coming before all children of
their node) and are further ordered by name--e.g., [2,0,-1,'href'].
"""
# Attributes: parent-order + [-1, attribute-name]
if node.nodeType == node.ATTRIBUTE_NODE:
order = document_order(node.ownerElement)
order.extend((-1, node.name))
return order
# The document root (hopefully): []
if node.parentNode is None:
return []
# Determine which child this is of its parent.
sibpos = 0
sib = node
while sib.previousSibling is not None:
sibpos += 1
sib = sib.previousSibling
# Order: parent-order + [sibling-position]
order = document_order(node.parentNode)
order.append(sibpos)
return order | 788c211e064718b9cd4e4b38d2616c0cfdfd645d | 691,955 |
def tei_to_omeka_header(csv_header_info):
""" Transforms an XML-TEI header path to a Omeka (semantic-web compliant) header."""
# XML-TEI headers elements to Linked Data correspondences
xml_tag_to_voc = {
u"#fileDesc#titleStmt_title": u"dc:title",
u"#fileDesc#titleStmt_author_key": u"dc:creator",
u"#fileDesc#sourceDesc#bibl_publisher": u"dc:publisher",
u"#fileDesc#sourceDesc#bibl_pubPlace": u"pubPlace",
u"#fileDesc#sourceDesc#bibl_date": u"dc:date",
u"#profileDesc#langUsage_language_ident": u"dc:language",
u"#fileDesc#sourceDesc#bibl_ref_target":u"dc:relation",
u"#fileDesc#publicationStmt_idno": u"dc:identifier", # Obligatoire
u"#fileDesc#publicationStmt#availability_licence_target": u"dc:rights",
}
new_csv_header_info = {xml_tag_to_voc.get(k, k): v for (k, v) in csv_header_info.items()}
if u"dc:creator" not in new_csv_header_info:
not_normalized_creator_form = new_csv_header_info.get(u"#fileDesc#titleStmt_author", False)
if not_normalized_creator_form:
new_csv_header_info[u"dc:creator"] = not_normalized_creator_form
return {k: v for (k, v) in new_csv_header_info.items() if not k.startswith('#')} | 9d4132f55c758fc9df4d3a82d3f2134f2092fe7a | 691,956 |
def create_mail_body(messages):
"""status counts to messages
Args:
messages (list): messages
Returns:
body (string): statuses to mail body
"""
body_message = messages[1:]
body = "".join(body_message)
return body | a343c73724bfed86ea7bde03db0770d17279ca8f | 691,957 |
def multiplexer(a, bits, aut):
"""Return BDD node for selection of elements from `a`.
The resulting expression is:
```
ite(bits[-1], a[-1],
ite(bits[-2], a[-2],
...
ite(bits[1], a[1],
ite(bits[0], a[0], FALSE)
```
This shows that if i < j,
then b[i] has lower priority than b[j].
So, this is not exactly a multiplexer.
"""
assert len(a) == len(bits), (a, bits)
bdd = aut.bdd
r = bdd.false
for bit, x in zip(bits, a):
g = aut.add_expr(bit)
r = bdd.ite(g, x, r)
return r | d80426cb1a7603007ef99d87a03b838476707d29 | 691,958 |
def chained(fn):
"""Chain instance methods
Can do things like user.unfollow().follow().unfollow()
"""
def _decorator(self, *args, **kwargs):
fn(self, *args, **kwargs)
return self
return _decorator | f7d66a17c47759ede545d896343a38cfe9a092ad | 691,959 |
def _copy_jar_to_srcjar(ctx, jar):
"""Copy .jar to .srcjar
Args:
ctx: the <ctx> object
jar: the <Generated File> of a jar containing source files.
Returns:
<Generated File> for the renamed file
"""
srcjar = ctx.actions.declare_file("%s/%s.srcjar" % (ctx.label.name, ctx.label.name))
ctx.actions.run_shell(
mnemonic = "CopySrcjar",
inputs = [jar],
outputs = [srcjar],
command = "mv %s %s" % (jar.path, srcjar.path),
)
return srcjar | 2d34723d4c993f3c8a192c49a33e8e34d9f82afb | 691,960 |
def save_list(list_text, path_folder, filename:str):
"""
Save list.
Save list at given path with filename in csv format.
Parameters
----------
list_text : dict
Dictionary to save.
path_folder : path
Pathlib path to save list.
filename : str
Name of file.
Returns
-------
path_file : path
Path to file.
"""
path_file = path_folder.joinpath(f'{filename}.txt')
with open(path_file, "w") as outfile:
for entries in list_text:
outfile.write(entries)
outfile.write("\n")
return path_file | 556a161ede8cca079bb062258901a83d9592a962 | 691,961 |
def __hash_byte_str_iter(bytes_iter, hasher, as_hex_str=False):
"""
:param bytes_iter:
:param hasher:
:param as_hex_str:
:return:
"""
for block in bytes_iter:
hasher.update(block)
return hasher.hexdigest() if as_hex_str else hasher.digest() | 6d8a94deb3190fa58075e9107d1cfc2996d3280e | 691,962 |
def create_positional_array(cigar_tuples):
"""
# ========================================================================
CREATE POSITIONAL ARRAY
PURPOSE
-------
Create a positional array that maps positions in a
CIGAR tuple to a list.
Ex. CIGAR tuple is: [(0, 4), (2, 1) (1, 2)]
Positional Array is Initialized to Empty.
position (an int) starts at 0.
We look at each item in the CIGAR tuple where
the first item is the operation (ex. match, delete, insert)
and the second item is number of bases involved in the operation.
The returned array maps positions the read (as a list indicies)
to relative positions in the reference. This returned list of
relative positions starts at 0.
If we have a match we append the current reletive position
of the reference to the positional array (which represents
positions in the read) and then we will increase the relative
position in the reference. This process is repeated for the
length of the match.
If the operation is a insertion we appending the positional array
with the left anchored relative position of the insert in
the reference. This proccess is repeated for the length of the insert.
This means the same relative position is appended multiple times.
If the operation is a deletion we will increase the relative position
in the reference by the length of the operation.
This means no value gets appended to the positional array.
So for the CIGAR tuple list above we would get a positional
array that looks as follows:
1. Looking at first tuple in the list:
The tuple's operation is 0 (i.e a match).
positional_array = [0, 1, 2, 3]
position: 4
2. Looking at second tuple in the list:
The tuple's operation is 2 (i.e a delete)
positional_array: [0, 1, 2, 3] (didn't change)
position: 5
3. Looking at the third tuple in the list:
The tuple's operation is 1 (i.e an insert)
positional_array = [0, 1, 2, 3, 4,4]
position: 5
INPUT
-----
[[CIGAR] TUPLE] [cigar_tuples]
- A list containing the CIGAR tuples. (operation, length).
Return
------
[[LIST] [INT]]
- A positional array that maps CIGAR tuples to the read.
# ========================================================================
"""
positional_array = []
OPERATION = 0
LENGTH = 1
position = 0 # 0-based
MATCH = 0
INSERT = 1
DELETE = 2
for tup in cigar_tuples:
if tup[OPERATION] == MATCH:
for i in range(tup[LENGTH]):
positional_array.append(position) # consume read
position = position + 1 # consume reference
if tup[OPERATION] == INSERT:
for i in range(tup[LENGTH]):
positional_array.append(position - 1) # consume read
if tup[OPERATION] == DELETE:
position += tup[LENGTH] # consume reference
return positional_array | e5a06a82defe6534094acfeaf0093764550bf384 | 691,963 |
def portfolio_returns(df_long, df_short, lookahead_returns, n_stocks):
"""
Compute expected returns for the portfolio, assuming equal investment in each long/short stock.
Parameters
----------
df_long : DataFrame
Top stocks for each ticker and date marked with a 1
df_short : DataFrame
Bottom stocks for each ticker and date marked with a 1
lookahead_returns : DataFrame
Lookahead returns for each ticker and date
n_stocks: int
The number number of stocks chosen for each month
Returns
-------
portfolio_returns : DataFrame
Expected portfolio returns for each ticker and date
"""
return (lookahead_returns*(df_long - df_short)) / n_stocks | 39459b0d68ff6c887b1de8e7303f0674578b8bac | 691,964 |
def calculate_pot_temp(pressure, temperature):
"""calculate_pot_temp
Description:
Temp * (1000/Pressure)^0.286
Return potentail temp from pressure and temperature
"""
return temperature * (1000 / pressure)**(0.286) | d8171db43bf37ad2933295e0e8b4688d34271b44 | 691,965 |
from dateutil import tz
from datetime import datetime
def convertTimeToLocal(time: str):
"""Converts an ISO format UTC time to a local time.
Args:
time: UTC time to be converted in ISO format.
Returns:
The time in local time based on POSIX TZ variable.
"""
from_zone = tz.gettz('UTC')
to_zone = tz.gettz() # May want to get local time for lat long in the future
utc = datetime.fromisoformat(time)
utc = utc.replace(tzinfo=from_zone)
local = utc.astimezone(to_zone)
return local | 4e1663dfda16be87b4583372944e0bdf72ce8092 | 691,966 |
from typing import Optional
def _to_volts(value: Optional[float]):
"""Convert µV to V if not None."""
if value is None:
return None
return float(value) / 1e6 | 623b6824c7307352fa01f7c00502adab8a10c451 | 691,967 |
def file_list(filename):
"""checks if the input file is a .txt file and adds each separate line
as a book to the list 'Lines'.
After return this list to download_from_txt
"""
if filename.endswith(".txt"):
try:
file1 = open(filename, "r", encoding="utf-8")
Lines = file1.readlines()
for i in Lines:
if i == "\n":
Lines.remove(i)
return Lines
except FileNotFoundError:
print("Error:No such file or directory:", filename)
else:
print("\nError:Not correct file type. Please insert a '.txt' file") | b7f82f8894539b5fde0d41438f02b70add7ed092 | 691,968 |
def activation(active_name='rrelu'):
"""Activation functions.
Parameters
----------
active_name : str, optional
The name of activation function, which can be 'relu', 'leakyrelu', 'prelu', 'rrelu',
'relu6', 'elu', 'selu', 'sigmoid', 'tanh', or 'softsign'. Default: 'rrelu'
Returns
-------
object
Activation functions.
Note
----
Although many activation functions are available, the recommended activation function is 'rrelu'.
"""
return eval('%s()'%active_name) | dabe01cee97bfe8f181478513dfced9e854e9103 | 691,969 |
from bs4 import BeautifulSoup
import re
def get_last_episode(html_content):
"""Return the highest episode number availabel (int)"""
soup = BeautifulSoup(html_content)
max = 0
for link in soup.find_all('a', text=re.compile('Episode')):
for s in link.string.split():
if s.isdigit():
number = int(s)
if number > max:
max = number
return max | d6df9f00b502200c54cd9d439d4e0e34eddf1e77 | 691,970 |
import re
def _project_is_apache():
"""Determine if a project is Apache.
Look for a key string in a set of possible license files to figure out
if a project looks to be Apache. This is used as a precondition for
enforcing license headers.
"""
license_files = ["LICENSE"]
for filename in license_files:
try:
with open(filename, "r") as file:
for line in file:
if re.search('Apache License', line):
return True
except IOError:
pass
return False | 417f5668851e31e2e3f5288520920c8b55c6b978 | 691,972 |
def isPhysicalUnit(x):
"""
@param x: an object
@type x: any
@returns: C{True} if x is a L{PhysicalUnit}
@rtype: C{bool}
"""
return hasattr(x, 'factor') and hasattr(x, 'powers') | 2377aa36cd3965175d95d38a5e1d4ccd6b0bc848 | 691,974 |
import hashlib
def sha_sum(fname):
"""Returns the sha256 checksum of a file.
Args:
fname (str): Path to a file.
Returns:
str: The sha256 checksum as a hex string.
"""
hasher = hashlib.sha256()
with open(fname, 'rb') as fd:
for block in iter(lambda: fd.read(65536), b""):
hasher.update(block)
return hasher.hexdigest() | e3a95c962d50cdc8118b4298a3b083b7bcd6ee58 | 691,975 |
def get_labels ():
"""
Generates the labels.
Returns
-------
list : the list of labels
"""
return ["rectangle_mean", "ellipse_mean", "aspect_ratio", "area",
"area_hull", "solidity", "extent", "perimiter", "perimeter_hull",
"circularity","heywood_circularity", "waddel_circularity",
"rectangularity", "eccentricity", "ellipseArea",
"convexity2", "convexity3"] + ["hu%d"%d for d in range(7)] + \
["har%d"%d for d in range(13)] | 69f03419c49c495d6278593d821fae2488efe9e0 | 691,976 |
def torrentfile_create(args, creator, name): # pragma: no cover
"""Create new .torrent file in a seperate thread.
Parameters
----------
args : `dict`
keyword arguements for the torrent creator.
creator : `type`
The procedure class for creating file.
name : `list`
container to add return values to.
Returns
-------
`list`
container for path to created torrent file.
"""
torrent = creator(**args)
outfile, _ = torrent.write()
name.append(outfile)
return name | 3a5f8cd94b406fa045dc71c94d5629394025e8b8 | 691,977 |
import re
def _extract_asm_operands(asm):
"""
:param asm:
:type asm:
"""
operands = " ".join(asm.strip().upper().split()[1:])
operands = operands.replace(",", " ")
return re.findall(r"[.a-zA-Z0-9_+-@]+", operands) | d215060cf93abc62c24695ade7ee76caab250eb8 | 691,978 |
def get_sorted(dictionary):
"""
Sort the dictionary
"""
return sorted(dictionary, key=lambda x: dictionary[x], reverse=True) | 666e80219196bb2221860d687c53f36efc334cd5 | 691,980 |
import re
def isCnpjValid(cnpj):
""" If cnpf in the Brazilian format is valid, it returns True, otherwise, it returns False. """
# Check if type is str
if not isinstance(cnpj,str):
return "CPF e invalido"
# Remove some unwanted characters
cpf = re.sub("[^0-9]",'',cnpj)
# Checks if string has 11 characters
if len(cpf) != 14:
return "CPF e invalido"
sum = 0
weight = [5,4,3,2,9,8,7,6,5,4,3,2]
""" Calculating the first cpf check digit. """
for n in range(12):
value = int(cpf[n]) * weight[n]
sum = sum + value
verifyingDigit = sum % 11
if verifyingDigit < 2 :
firstVerifyingDigit = 0
else:
firstVerifyingDigit = 11 - verifyingDigit
""" Calculating the second check digit of cpf. """
sum = 0
weight = [6,5,4,3,2,9,8,7,6,5,4,3,2]
for n in range(13):
sum = sum + int(cpf[n]) * weight[n]
verifyingDigit = sum % 11
if verifyingDigit < 2 :
secondVerifyingDigit = 0
else:
secondVerifyingDigit = 11 - verifyingDigit
if cpf[-2:] == "%s%s" % (firstVerifyingDigit,secondVerifyingDigit):
return "CPF e Valido"
return "CPF e invalido" | 5de9cd60066878cde20d1819a696c9525ba1e956 | 691,981 |
def ignore_command(command, ignore):
"""
Проверяет содержатся ли в с команде строки из списка ignore
command - строка
ignore - список слов
Возвращает True если в команде найдено слово из списка ignore, False иначе.
"""
return any(word in command for word in ignore) | f7a57e2c841c770582b8fdbb0bba5e4b494dd273 | 691,982 |
def dec2bin(decvalue, init_length):
"""Convert decimal value to binary"""
value = str(bin(int(decvalue))[2:])
return value.zfill(init_length) | 8e5b9969dfd24546792f2ecc7aed54a6b0fefb27 | 691,983 |
def mk_int(s):
"""
Function to change a string to int or 0 if None.
:param s: String to change to int.
:return: Either returns the int of the string or 0 for None.
"""
try:
s = s.strip()
return int(s) if s else 0
except:
return s | 7d1c4133c0571b25ef4f161a1bd6b1e094ebec7e | 691,984 |
def unwrapMethod(obj):
"""obj -> (unwrapped, wrapperCount)
Unwrap 'obj' until we get to a real function, counting the number of
unwrappings.
Bail if we find a class or something we can't identify as callable.
"""
wrapperCount = 0
unwrapped = obj
for i in range(10):
bases = getattr(unwrapped, '__bases__', None)
if bases is not None:
raise TypeError("mapply() can not call class constructors")
im_func = getattr(unwrapped, '__func__', None)
if im_func is None:
# Backwards compatibility with objects aimed at Python 2
im_func = getattr(unwrapped, 'im_func', None)
if im_func is not None:
unwrapped = im_func
wrapperCount += 1
elif getattr(unwrapped, '__code__', None) is not None:
break
elif getattr(unwrapped, 'func_code', None) is not None:
break
else:
unwrapped = getattr(unwrapped, '__call__', None)
if unwrapped is None:
raise TypeError("mapply() can not call %s" % repr(obj))
else:
raise TypeError("couldn't find callable metadata, mapply() error on %s"
% repr(obj))
return unwrapped, wrapperCount | e14973b2ace0175d1b1827285b655697108f6882 | 691,985 |
import re
def find_file(string,extension):
"""
Find .extension file list from the given list and the regular expression
============== ========== =======================================================
**Parameters** **Type** **Description**
*string* string raw splitted command result containing the file name
*extension* string file extension (without .)
============== ========== =======================================================
Returns
-------
string list
The file list of the splitted os command result
Examples
--------
>>> print(test_file)
04/05/2018 11:55 <DIR> .
04/05/2018 11:55 <DIR> ..
04/05/2018 11:55 <DIR> DAQ_Analysis
04/05/2018 11:53 8ÿ758 find_and_replace.py
03/05/2018 13:04 1ÿ327 find_py.py
03/05/2018 13:25 3ÿ119 find_py_and_replace.py
03/05/2018 15:47 619 find_words_in_line.py
03/05/2018 16:02 524 replace_QtDesRess.py
03/05/2018 13:20 142 test.py
04/05/2018 11:53 <DIR> __pycache__
6 fichier(s) 14ÿ489 octets
>>> found_file=find_file(test_file,'py')
>>> for i in range(0,len(found_file)):
... print(found_file[i])
...
find_and_replace.py
find_py.py
find_py_and_replace.py
find_words_in_line.py
replace_QtDesRess.py
test.py
"""
string_reg="([a-zA-Z0-9-_]*"+"\."+extension+")"
regex=re.compile(string_reg,re.MULTILINE)
ret=[]
ret=re.findall(regex,string)
return ret | 37a035cba85c6799dc86308bc89ebb7fe60c8cc7 | 691,986 |
def kortti_välissä(kortti1, kortti2, kortti3):
"""Tarkistaa onko ensimmäinen kortti kahden seuraavan välissä.
Parametrit
----------
kortti1, kortti2, kortti3 : tuple(int, str)
Vertailtavat kortit
Palauttaa
---------
True
jos kortti1 on korttien 2 ja 3 välissä.
"""
# Maalla ei ole väliä vertailussa
arvo1 = kortti1[0]
arvo2 = kortti2[0]
arvo3 = kortti3[0]
return (arvo1 > min(arvo2, arvo3)) and (arvo1 < max(arvo2, arvo3)) | c729a17530ce7b3b8feed3f388287d34d3769dcd | 691,987 |
def trySurroundSelection(editor, char, map_table):
"""Try to do char-surrounding using a mapping table.
Will not do any surrounding if a keyboard modifier key (e.g. Ctrl) is in pressed state.
If the editor has multiple selections, each selection will be surrounded separately.
:param editor: editor where to try to do surrounding
:type editor: :any:`eye.widgets.editor.Editor`
:param char: the character to do the surrounding
:type char: str
:param map_table: mapping table listing chars and their replacement
:type map_table: dict[str, str]
:returns: True if a char surrounding was performed, else False. The value can be used for
returning from an event filter function.
:rtype: bool
"""
if char not in map_table:
return False
# when a surrounding is done, it will shift (invalidate) all line-indexes after it
# doing in reverse order avoids having to compute shifting
sels = reversed([editor.getSelectionN(n) for n in range(editor.selectionsCount())])
with editor.undoGroup(True):
for sel in sels:
editor.setSelection(*sel)
s = editor.selectedText()
editor.replaceSelectedText(map_table[char] % s)
return True | 7388f3d34c9d3a1cf6218ca1c76448092b9c2102 | 691,988 |
def squared_loss(y_hat, y): #@save
"""Squared loss."""
return (y_hat - y.reshape(y_hat.shape)) ** 2 / 2 | 323671e7d9a989870fa3ab04a8770d7fbf49b2ed | 691,989 |
def add(arg1, arg2):
"""
Function for adding two variables
"""
return arg1 + arg2 | dde33dc5030943afd9da6afd63b070045f5df09e | 691,990 |
def get_pointer_parent(pointer):
"""Given a `Pointer` object, return its parent node.
"""
# The `parent_node` property of the `Pointer` schema refers to the parents
# of the pointed-at `Node`, not the parents of the `Pointer`; use the
# back-reference syntax to find the parents of the `Pointer`.
parent_refs = pointer.node__parent
assert len(parent_refs) == 1, 'Pointer must have exactly one parent'
return parent_refs[0] | b8db4f1eae1f4f723f368f993d16fa7f7c86e6c4 | 691,991 |
def read_file(file):
""" Open given file and returns the content as a string. """
with open(file) as f:
wall = f.read()
return wall | bcfb29a31ac2cfcdf5b733a0bfd3019889a30fba | 691,992 |
def linear_function(x, a, b):
""" Equation for a line.
Parameters:
x: array
The independet variable where the data is measured.
a: float
The linear coefficient.
b: float
The angular coefficient.
Returns:
f: array
The linear function.
"""
f = a+b*x
return f | 542ff23efb35e273043d64f0f13027792d6394a5 | 691,993 |
def rk4_ode(df_ds, x0, s, ds, *f_args, f1=None):
"""Implements Runge-Kutta RK4 for numerically solving an ODE."""
# https://en.wikipedia.org/wiki/Runge%E2%80%93Kutta_methods
if f1 is None:
f1 = df_ds(x0, s, *f_args)
f2 = df_ds(x0 + 0.5 * ds * f1, s + 0.5 * ds, *f_args)
f3 = df_ds(x0 + 0.5 * ds * f2, s + 0.5 * ds, *f_args)
f4 = df_ds(x0 + ds * f3, s + ds, *f_args)
return x0 + (ds / 6.0) * (f1 + 2 * f2 + 2 * f3 + f4) | 5b0ed8225f5eaa07f6a14d5e9d960e19d172c98b | 691,994 |
import re
def remove_hashtags(text):
"""Remove '#' from hashtags. The hashtag text is kept as it is an organic part of the tweets"""
return re.sub('#+', '', text) | 1385c5525b104d61fb05b33dc9a53c268c6a719b | 691,995 |
def recolour(shape, source_colours, target_colour):
""" Recolours a shape from source_colours to target_colour.
>>> recolour([(0, 0, 1), (0, 1, 1), (0, 2, 1), (0, 3, 5)], [1], 4)
[(0, 0, 4), (0, 1, 4), (0, 2, 4), (0, 3, 5)]
>>> recolour([(0, 0, 1), (0, 1, 1), (0, 2, 2), (0, 3, 5)], [1, 2], 4)
[(0, 0, 4), (0, 1, 4), (0, 2, 4), (0, 3, 5)]
"""
new_shape = []
for cell in shape:
y, x, colour = cell
if colour in source_colours:
colour = target_colour
new_shape.append((y, x, colour))
return new_shape | e9a347c339cf8614917cf71a23809207fb8f2a6f | 691,997 |
import torch
def gaussian_stein_kernel(
x, y, scores_x, scores_y, sigma, return_kernel=False
):
"""Compute the Gaussian Stein kernel between x and y
Parameters
----------
x : torch.tensor, shape (n, p)
Input particles
y : torch.tensor, shape (n, p)
Input particles
score_x : torch.tensor, shape (n, p)
The score of x
score_y : torch.tensor, shape (n, p)
The score of y
sigma : float
Bandwidth
return_kernel : bool
whether the original kernel k(xi, yj) should also be returned
Return
------
stein_kernel : torch.tensor, shape (n, n)
The linear Stein kernel
kernel : torch.tensor, shape (n, n)
The base kernel, only returned id return_kernel is True
"""
_, p = x.shape
d = x[:, None, :] - y[None, :, :]
dists = (d ** 2).sum(axis=-1)
k = torch.exp(-dists / sigma / 2)
scalars = scores_x.mm(scores_y.T)
scores_diffs = scores_x[:, None, :] - scores_y[None, :, :]
diffs = (d * scores_diffs).sum(axis=-1)
der2 = p - dists / sigma
stein_kernel = k * (scalars + diffs / sigma + der2 / sigma)
if return_kernel:
return stein_kernel, k
return stein_kernel | 02beba589956c8726d31afff93028d7560c3f310 | 691,998 |
import argparse
def arguments() -> dict:
"""
Console arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"-s",
"--subreddits",
type=str,
default=None,
nargs="+",
help="Images would be parsed from these."
)
parser.add_argument(
"-st",
"--sort-type",
metavar="TYPE",
type=str,
default=None,
nargs='?',
help="Sort type. Can be hot, new, top, rising."
)
parser.add_argument(
"-l",
"--limit",
type=int,
default=None,
nargs='?',
help="How many submissions would be parsed."
)
parser.add_argument(
"-tf",
"--time-filter",
type=str,
default=None,
nargs='?',
help="Only with sort-type top. Top from day, week, month, year or all."
)
parser.add_argument(
"-rd",
"--remove-duplicates",
action="store_true",
default=None,
dest="remove_duplicates",
help="If present script would not save duplicates of images in save-folder."
)
parser.add_argument(
"-nt",
"--number-of-threads",
type=int,
dest="number_of_threads",
default=None,
nargs='?',
help="Number of threads to use for loading images."
)
parser.add_argument(
"-ua",
"--use-api",
action="store_true",
default=None,
dest="use_api",
help="If present script would connect to reddit api. Needs 'credentials.json' to be present."
)
parser.add_argument(
"-ae",
"--allowed-extensions",
type=str,
default=None,
nargs="+",
help="Images with only this extensions are allowed."
)
parser.add_argument(
"--credentials",
metavar="PATH",
dest="credentials_path",
type=str,
default=None,
nargs='?',
help="Folder with credentials.json."
)
parser.add_argument(
"--save-folder",
dest="save_folder_path",
metavar="PATH",
type=str,
default=None,
nargs='?',
help="Folder where images would be saved."
)
parser.add_argument(
"--temp-folder",
dest="temp_folder_path",
metavar="PATH",
type=str,
default=None,
nargs='?',
help="Temporary folder to save images. "
"WARNING: After finishing loaded pictures in this folder would be removed"
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=None,
dest="verbose",
help="Makes everything more verbose."
)
args = parser.parse_args()
return vars(args) | 957c1266642d450168ffbad72b5547e8a4b980c7 | 691,999 |
def queue_text(state):
"""Returns a block of text describing a given song queue."""
index = 0
if len(state.playlist) > 0:
message = [f"{len(state.playlist)} songs in queue. Currently playing {state.current_index}"]
message += [
f" {index}. **{song.title}** (requested by **{song.requested_by.name}**)"
for (index, song) in state.playlist.items()
] # add individual songs
return "\n".join(message)
else:
return "The play queue is empty." | a55dc91b68f23d74eb28b9d59e199cf9ce1ba753 | 692,000 |
def get_digit_num(num: int) -> int:
"""
Return sum of each in a number.
"""
res = 0
while num:
res += num % 10
num //= 10
return res | 5670ff237c913a17c1c935c165758b3c5aff9e97 | 692,001 |
def to_ini(settings = {}):
"""
Custom Ansible filter to print out a YAML dictionary in the INI file format.
Similar to the built-in to_yaml/to_json filters.
"""
s = ''
# loop through each section
for section in settings:
# print the section header
s += '[%s]\n' % section
# loop through each option in the section
for option in settings[section]:
# if the value is a list, join it with commas
if isinstance(settings[section][option], list):
value = ' '.join(settings[section][option])
else: # otherwise just use it as is
value = settings[section][option]
# print the option and value
s += '%s = %s\n' % (option, value)
s.rstrip()
# add some separation between sections
s += '\n'
return s.rstrip() | dac53b16ea4b29fe6a54feab2bc20938b309ef10 | 692,002 |
def from_phase_to_jd(x, t0, p):
"""
:param x: phases
:param t0: ephemeris
:param p: period
:return: x in jd
"""
_x = [((phase * p) + t0) for phase in x]
return _x | 0a19ca676513f3e5a3ae09988d7124fd4da2a3cc | 692,003 |
def write_script(p_pth, p_contents):
"""Write a script to disk
Parameters
----------
p_pth
Path to the script
p_contents
Contents of the script
Returns
-------
subprocess.CompletedProcess
See https://docs.python.org/3.9/library/subprocess.html#subprocess.CompletedProcess
Examples
--------
>>> from beetools import beeutils, beevenv
>>> beevenv.set_up(beeutils.get_tmp_dir(),'new-project',['pip','wheel'],p_verbose=False)
True
"""
contents = ''
for line in p_contents:
if isinstance(line, list):
contents += ' '.join(line) + '\n'
else:
contents += '{}\n'.format(line)
p_pth.write_text(contents)
return contents | 0c4eb8bbb7d32f22d0fb07bbaaa3a05d7f90dbdb | 692,004 |
def rk2(y, f, t, h):
"""Runge-Kutta RK2 midpoint"""
k1 = f(t, y)
k2 = f(t + 0.5*h, y + 0.5*h*k1)
return y + h*k2 | 6d08e6c0893a2e71e1d2f908009e04d302d62223 | 692,006 |
import requests
def url_ok(url):
"""
Check if server at remote URL answers ok
"""
req = requests.head(url)
return req.status_code == 200 | 42380ce706a01d82b9206293b92f063e36d8300a | 692,007 |
import re
def make_name_safe(project_name):
"""
Converts a name with underscores into a valid class name (PascalCase).
E.g. test_project will become TestProject
:param project_name: the name we want to convert
:type project_name: str
"""
words = re.split('_', project_name)
result = ''
for word in words:
result += word.capitalize()
return result | 86f634a2bbf6e5fb2f7da721690e6e83714dd3e2 | 692,008 |
from typing import Dict
from typing import List
def _cast_strings_keys_to_int(obj: Dict) -> Dict:
"""Casts string to int keys in dictionary when '__int_keys__' flag is set
Args:
obj: dictionary
Returns:
obj with string keys cast to int keys and '__int_keys__' flags removed
"""
if isinstance(obj, dict):
int_keys: List[int] = []
for k, v in list(obj.items()):
if "__int_keys__" in obj:
try:
int_keys.append(int(k))
except ValueError:
pass
_cast_strings_keys_to_int(v)
while len(int_keys) > 0:
key = int_keys.pop()
obj[key] = obj[str(key)]
obj.pop(str(key))
if "__int_keys__" in obj:
del obj["__int_keys__"]
return obj | 106912f6f9d052db244963ccffc0cd4685850fb6 | 692,009 |
def A1ToXy(space):
"""Convert user-friendly coordinates (like A1 or B5) to (x, y)
coordinates (like 0,0 or 1,4)."""
column = space[0]
row = space[1:]
# The ASCII value of 'A' is 65:
return (ord(column) - 65, int(row) - 1) | 8f576cf7b8d1750dc52c3117ba075686ff29ac74 | 692,010 |
def mm__to__km():
"""Convert millimeter to kilometer"""
return '1.0E-6{kind}*{var}' | c8c32a8dcc6fcae9fa35013c06c5dba5a08802b3 | 692,011 |
import os
def get_file_names_for_dataset(name='train',
path='/Users/zeynep068/efficientdet/voc_data/'):
""" Get file names.
Args:
name: Train or validation dataset.
path: Path to dataset.
Returns: List of file names corresponding to dataset type.
"""
path = os.path.join(path, 'ImageSets/Main/')
file_names = []
for entry in os.listdir(path):
if entry.endswith(str(name + ".txt")):
for line in open(os.path.join(path, entry)).readlines():
if line[-3:-1] == " 1":
file_names.append(line[:-3])
return list(set(file_names)) | 90f0e77db84d385e9b5544a4d7b2a41d0fcfb7e1 | 692,013 |
def emission_objective(model):
"""
The emission objective calculates the total emissions.
the total emissions are equal to the amount of energy bought from which supplier * load hours * carbon emissions per MWh
:param model: model reference
:return: total emissions
"""
total_emissions = []
for year in model.YEARS:
for supplier in model.SUPPLIER:
for block in model.BLOCKS:
emission = model.buy[year, supplier, block] * model.carbon_emissions[supplier] * model.load_hours[block]
total_emissions.append(emission)
return sum(total_emissions) | be532bfb30e1a3bcef7335ed7058ad977fdee9a2 | 692,014 |
def common_cols(df1,df2):
""" Return the intersection of commun columns name """
return list(set(df1.columns) & set(df2.columns)) | 1119806c0ba43ba4c5e272c8292b40c201e3f952 | 692,015 |
def date_remove_dashes(std_date):
"""STD_DATE is a date in string form with dashes. Removes dashes for storage in JSON."""
return std_date[0:4] + std_date[5:7] + std_date[8:] | 67e6337a90d6d966325cdd96f4b1844da540fa2e | 692,016 |
def frequencies_library(peptides):
"""
Calculate the frequency of each amino acid in a particular peptide library
Arguments:
peptides -- List of peptides in the library
Return:
count_dict -- Dictionary with the numbers per each natural amino acid
"""
# Create counter dictionary
count_dict={}
for i in range(1,len(peptides[0])+1):
count_dict[i]={"A":0,"R":0,"N":0,"D":0,"C":0,"Q":0,"E":0,"G":0,"H":0,"I":0,"L":0,"K":0,"M":0,"F":0,"P":0,"S":0,"T":0,"W":0,"Y":0,"V":0}
# Read the sequences and count each amino acid
for sequence in peptides:
for pos,aa in enumerate(sequence):
count_dict[pos+1][aa]+=1
# Return the dictionary
return count_dict | 55f21fba2cf836daca1eaeb6a3f07ff78cd61776 | 692,017 |
def to_interval(points: list):
""" Transforms the set of points into set of intervals - Orthogonal hull
Args:
points (list of tuples): which are the points
Example:
POINT INTERVALS
A B X Y
[(0, 2), (1, 3)] --> [[0, 1], [2, 3]]
Example 2:
A B C X Y
[(0, 2), (1, 5), (4, 3)] --> [[0, 4], [2, 5]]
Example 3:
A B C X Y Z
[(0, 2, 9), (1, 5, 0), (4, 3, 6)] --> [[0, 4], [2, 5], [0, 9]]
"""
intervals = []
for dimension in range(len(points[0])):
interval = [points[0][dimension], points[0][dimension]]
for point in range(len(points)):
if interval[0] > points[point][dimension]:
interval[0] = points[point][dimension]
if interval[1] < points[point][dimension]:
interval[1] = points[point][dimension]
intervals.append(interval)
return intervals | 782a6f41b536091a9aceff3ea216c3aa100e7aff | 692,018 |
import pkg_resources
def goodstuff():
"""Shortcut for specifying path to goodstuff.j2k.
Returns
-------
file : str
Platform-independent path to goodstuff.j2k.
"""
filename = pkg_resources.resource_filename(__name__, "goodstuff.j2k")
return filename | 0c900c59b17a4be923a56eebe07fdd328b1f907c | 692,019 |
def setup_method_options(method, tuning_options):
""" prepare method specific options """
kwargs = {}
# Note that not all methods iterpret maxiter in the same manner
if "maxiter" in tuning_options.strategy_options:
maxiter = tuning_options.strategy_options.maxiter
else:
maxiter = 100
kwargs['maxiter'] = maxiter
if method in ["Nelder-Mead", "Powell"]:
kwargs['maxfev'] = maxiter
elif method == "L-BFGS-B":
kwargs['maxfun'] = maxiter
# pass eps to methods that support it
if method in ["CG", "BFGS", "L-BFGS-B", "TNC", "SLSQP"]:
kwargs['eps'] = tuning_options.eps
elif method == "COBYLA":
kwargs['rhobeg'] = tuning_options.eps
# not all methods support 'disp' option
if not method in ['TNC']:
kwargs['disp'] = tuning_options.verbose
return kwargs | a83ab4dc597a2818ebe258100a5c401a4bfedc33 | 692,021 |
def local_time(time):
"""Returns the local time of the given (e.g. UTC) time.
Args:
time: A `datetime.datetime` object.
Returns:
A `datetime.datetime` object with local (system) time zone.
"""
return time.astimezone() | 085a59e16d9aeec30e861d2e97ae196e18ac6b46 | 692,023 |
def search(state, path):
"""Get value in `state` at the specified path, returning {} if the key is absent"""
if path.strip("/") == '':
return state
for p in path.strip("/").split("/"):
if p not in state:
return {}
state = state[p]
return state | 49b19542dc8ddf0a0c29494dbe1e6a14f269734a | 692,024 |
import configparser
def convert_ini2json(ini_file_name):
""" Convert INI parms to JSON for use in the Docker containers"""
ini_json = {}
cfgp = configparser.ConfigParser()
cfgp.read(ini_file_name)
for section in cfgp.sections():
ini_json[section] = dict(cfgp.items(section))
return ini_json | 5f7dfaa19669a8c219b7313d053f3ef1421c9b0a | 692,025 |
def has_el(el, el_name):
"""Return True if an element with a given name exists in the branch rooted at el"""
return True if el.xpath(f'.//{el_name}') else False | 8deba680775ba0d0b49c99c2acecbd2833da793d | 692,026 |
def metadata_cid(nft_project, artwork_path):
"""NOTE: This actually makes HTTP calls"""
content_hash_map = nft_project.pin_artwork(artwork_path)
content_hashes = [f"ipfs://{cid}" for _, cid in content_hash_map.items()]
nft_metadata_list = nft_project.create_nft_data(content_hashes)
folder_cid = nft_project.pin_metadata(nft_metadata_list)
folder_cid = f"ipfs://{folder_cid}/"
return folder_cid | aa41768252e593065b0650a02d1c5ea31fb68d87 | 692,028 |
def make_tree(words):
"""
Method to transform parser data into a trie DS (keys stored as BST), aka nested hash map
time complexity : O(M*logN), where M - max. string length, N - number of items,
search - O(M)
penalty - storage requirements
"""
## creates our root dict()
trie = dict()
for word, number in words.items():
## create a temporary dict based off our root dict object
temp_dict = trie
for letter in word:
## update our temporary dict and add our current letter and a sub-dictionary
temp_dict = temp_dict.setdefault(letter, {})
## If our word is finished, add {'$key': 'value'} this tells us our word is finished
temp_dict[f"${word}"] = number
return trie | 1df537c90f765105d13c04134acd03fd20804c54 | 692,029 |
import json
def _get_layerconfig_from_json(file):
""" JSONファイルの読み込み
"""
config = None
with open(file,'r') as stream:
config = json.load(stream)
return config | 951bb5fdd9e3a74e623b168daf8ced750ff091dc | 692,030 |
import torch
def optimizer_creator(model, config):
"""Returns optimizer"""
return torch.optim.SGD(model.parameters(), lr=config.get("lr", 0.1)) | 9c0aea37d942615ee77b60b9a5e0651d9f87dbe8 | 692,031 |
def parser_html(json_data, content=''):
"""parser html and return content
資料結構應該要長[{[],[],[]}],回傳 str
"""
for c in json_data:
for k2, v2 in c.items():
#print(k2, '-->', v2)
if k2 == 'h1':
content = content + v2 + '\r\n'
elif k2 == 'h2':
content = content + '##' + v2 + '\n'
elif k2 == 'p' and v2 != None:
content = content + v2 + '\n'
elif k2 == 'ol' and v2 != None:
content = content + '* ' + v2 + '\n'
elif k2 == 'li' and v2 != None:
content = content + '+ ' + v2 + '\n'
elif v2 != None:
content = content + v2 + '\n'
return content | 7e3996a17df825959368211d211789bf6f0bc804 | 692,032 |
def get_dict_list(data_array):
"""Returns a list of dictionaries based on the column headers
(the 0th line in the column headers)
"""
key_list = data_array[0]
dict_list = []
for index, line in enumerate(data_array[1:]):
params = {}
for i in range(len(key_list)):
# try:
# params[key_list[i]] = float(line[i])
# except ValueError:
params[key_list[i]] = line[i]
dict_list.append(params)
return dict_list | bd39aea582205bbd39b111ba344f132f55e97c8b | 692,033 |
from datetime import datetime
def convert_str_to_date(date: str):
"""
Function to convert a date in a string format into a datetime YYYY/MM/DD.
:param date: (str) Date in a string format
:return: (datetime) return datetime of a string date. The time will always be 0.
"""
try:
return datetime.strptime(date, "%Y/%m/%d").date()
except ValueError:
try:
return datetime.strptime(date, "%Y-%m-%d").date()
except ValueError as error:
raise error | 7dc57344cc99f8c9066aedddddd89f479764b444 | 692,035 |
def _get_date_fields(date):
""" This function converts a datetime object to a map object contaning the date.
Args:
date: Type datetime.
Returns:
map object in the format {'year':int, 'month':int, 'day':int}.
"""
return {'year': date.year, 'month': date.month, 'day': date.day} | 47887e57a610ae94a8b7618e53c477357bcffe36 | 692,036 |
from typing import Any
def maybelen(value: Any) -> int:
"""
A "maybified" version of the len() function.
"""
return len(value) | 2b43e4bc7a52fa7a854915f12d0bb324663596e0 | 692,037 |
import re
def pulse_factory(cls, name=None, **kwargs):
"""Returns a function that creates an instance
if the given pulse class.
Keyword arguments are passed to ``cls.__init__()``.
Args:
cls (type): Subclass of Pulse of which to create an instance.
name (optional, str): Name of the resulting pulse. If None,
will use a snake-case version of the class name,
e.g. 'GaussianPulse' -> 'gaussian_pulse'. Default: None.
Returns:
callable: A function that takes no arguments and returns
an instance of ``cls``.
"""
if name is None:
# turn 'GaussianPulse' into 'gaussian_pulse'
name = "_".join(re.findall("[a-zA-Z][^A-Z]*", cls.__name__)).lower()
return lambda: cls(name=name, **kwargs) | 9ab2b10960e2484e5b761721740e14674f89ec05 | 692,038 |
def _in_directories(filename, dirs):
"""Tests whether `filename` is anywhere in any of the given dirs."""
for dirname in dirs:
if (filename.startswith(dirname)
and (len(filename) == len(dirname) or filename[len(dirname)] == '/')):
return True
return False | fc93e4bef45a4364446c0daa3e5969f143fbacc4 | 692,039 |
import random
def gen_sequence(length):
"""
Generates a test sequence to hash of size length
:param length: size of sequence
:return: bytes sequence of length length
"""
options = ['a', 'b', 'c', 'd', 'e', 'f']
string = ''.join([random.choice(options) for _ in range(length)])
return string.encode() | da2562a6d88184c0498546c90c2057ee93e79e00 | 692,040 |
def is_sha256(content):
"""Make sure this is actually an valid SHA256 hash."""
digits58 = ('0123456789ABCDEFGHJKLMNPQRSTUVWXYZ'
'abcdefghijkmnopqrstuvwxyz')
for i in range(len(content)):
if not content[i] in digits58:
return False
return len(content) == 64 | 0f1f9a69797ac652737bfad9c5eceade59174811 | 692,042 |
def _float_parameter(level: float, maxval: float):
"""Helper function to scale a value between ``0`` and ``maxval`` and return as a float.
Args:
level (float): Level of the operation that will be between [0, 10].
maxval (float): Maximum value that the operation can have. This will be scaled to
level/10.
Returns:
float: The result from scaling ``maxval`` according to ``level``.
"""
return float(level) * maxval / 10. | b13f0fe99f921997f4b54b93f3d2649c2ea8253a | 692,043 |
import string
def _get_symbol(i):
"""Finds the i-th ASCII symbol. Works for lowercase and uppercase letters, allowing i up to
51."""
if i >= len(string.ascii_letters):
raise ValueError(
"Set the use_opt_einsum argument to True when applying more than "
f"{len(string.ascii_letters)} wire cuts to a circuit"
)
return string.ascii_letters[i] | 65197c4d43792e52db9c596f40fe3802291e256b | 692,044 |
def num_from_bins(bins, cls, reg):
"""
:param bins: b x 2 tensors
:param cls: b long tensors
:param reg: b tensors
:return: bin_center: b tensors
"""
bin_width = (bins[0][1] - bins[0][0])
bin_center = (bins[cls, 0] + bins[cls, 1]) / 2
return bin_center + reg * bin_width | 77fc74dd8c084009c8c306321d1608de9a15e092 | 692,045 |
def monthlyCorr(xlabel):
"""
"""
df = xlabel['k'].reset_index()
cluster_size = df.groupby('k')['ProfileID'].count().rename('cluster_size')
month_lbls = df.groupby(['k',df.date.dt.month])['ProfileID'].count().unstack(level=0)
# month_lbls = clusterColNames(month_lbls)
month_likelihood = month_lbls.divide(month_lbls.sum(axis=0), axis=1)
month_likelihood.loc['cluster_size'] = cluster_size
return month_likelihood | b907beda1fb60868425b57d6cc4f73dc8e46c90f | 692,046 |
def per_iter(per_iter, n_iter, result, header):
""" Optionally modifies speed functions to produce per-iteration results.
Args:
per_iter: Whether or not to do the modification.
n_iter: The number of iterations.
result: The speed estimate.
header: The unmodified header.
Returns:
result: result / n_iter
header: With "per iteration" appended.
"""
if n_iter == 0:
n_iter = 1
if per_iter:
result /= n_iter
header = header + " per iteration"
return result, header | a3cc08de96b9d00fab2df64be9087a7e4eb4cc81 | 692,047 |
def get_filenames(i):
"""Returns the filepaths for the output MusicXML and .png files.
Parameters:
- i: unique identifier for the score
Returns:
- (sheet_png_filepath, musicxml_out_filepath)
"""
output_folder_prefix = "dataset/"
sheet_png_out_filepath = output_folder_prefix + "{}-sheet.png".format(i)
musicxml_out_filepath = output_folder_prefix + "{}-musicxml.xml".format(i)
return (sheet_png_out_filepath, musicxml_out_filepath) | 4e26391eff80e08756431221d53c862c810c8071 | 692,048 |
def hass_client(hass, aiohttp_client, hass_access_token):
"""Return an authenticated HTTP client."""
async def auth_client():
"""Return an authenticated client."""
return await aiohttp_client(hass.http.app, headers={
'Authorization': "Bearer {}".format(hass_access_token)
})
return auth_client | bf7c568de945a03fd7c5ae4525f7b884587572e0 | 692,049 |
def code(visitor, block):
"""
Format:
{% code %}
code
{% endcode %}
"""
return block['body'] | f36c140a6e5a793e50ca7aa53008036b1ed37651 | 692,050 |
def retry_once(self):
"""Task that fails and is retried. Returns the number of retries."""
if self.request.retries:
return self.request.retries
raise self.retry(countdown=0.1) | 903695931ee7c7620c1b31d593a239014b9d83d8 | 692,051 |
def get_value(df, n_intervals):
"""Function which outputs the tweet text given the interval for live updates"""
text = df['tweet'][n_intervals]
return text | 8d08fa10efb57382e72f18e65d5b53492d14fbb6 | 692,052 |
def add(vector1,vector2):
"""
Calculate the sum of vector1 and vector 2
:param vector1: list of numerical values
:param vector2: list of numerical values
:return:
"""
return [vector1[i] + vector2[i] for i in range(len(vector1))] | 2a8bbfb9aad6d05fe08c1798bc2eceb99fd81266 | 692,053 |
def intersect_paths(paths1, paths2):
"""Intersect graph paths."""
if not isinstance(paths1, list):
paths1 = list(paths1)
if not isinstance(paths2, list):
paths2 = list(paths2)
index1 = {seq for seq, alts in paths1}
index = {seq for seq, alts in paths2 if seq in index1}
paths1 = (p for p in paths1 if p[0] in index)
paths2 = (p for p in paths2 if p[0] in index)
return paths1, paths2 | b9eb51b8bd872f61b5ebf0997cfc935750c125f5 | 692,054 |
def split_list_by(lst, key):
"""
Splits a list by the callable *key* where a negative result will cause the
item to be put in the first list and a positive into the second list.
"""
first, second = [], []
for item in lst:
if key(item):
second.append(item)
else:
first.append(item)
return (first, second) | de69cf738f6bfbaf1b81245d9ad89ba906f1ca71 | 692,056 |
def create_padding_block(sizeOfPaddingAndHeaderInBytes):
"""
Creates an analog UXG binary padding block with header. The padding block
is used to align binary blocks as needed so each block starts on a 16 byte
boundary. This padding block is also used to align PDW streaming data on
4096 byte boundaries.
Args:
sizeOfPaddingAndHeaderInBytes (int): Total size of resulting padding
binary block and header combined.
Returns:
binary block containing padding header and padded data
"""
paddingHeaderSize = 16
paddingFillerSize = sizeOfPaddingAndHeaderInBytes - paddingHeaderSize
padBlockId = (1).to_bytes(4, byteorder='little')
res3 = (0).to_bytes(4, byteorder='little')
size = (paddingFillerSize).to_bytes(8, byteorder='little')
# Padding Header Above = 16 bytes
# X bytes of padding required to ensure PDW stream contents
# (not PDW header) starts @ byte 4097 or (multiple of 4096)+1
padData = (0).to_bytes(paddingFillerSize, byteorder='little')
padding = [padBlockId, res3, size, padData]
return padding | 29432db62e06fa57380df805b372aca110f62cdd | 692,057 |
import json
def load_json_hyper_params(file):
"""
Loads hyper_parameters dictionary from .json file
:param file: string, path to .json file
:return: dict, hyper-paramters.
"""
with open(file, mode='r') as f:
hyper_params = json.load(f)
print('Read %d hyperparameters from %s' % (len(hyper_params.keys()), file))
return hyper_params | 7e901ddcd707c2207ecf71644a82bdd207057c2e | 692,058 |
def peso_ideal():
"""
Tendo como dados de entrada a altura de uma pessoa, construa um algoritmo que calcule seu peso ideal,
usando a seguinte fórmula: (72.7*altura) - 58
1 - Pedir altura
2 - usar fórmula
3 - retornar peso
4 - Chamar função
"""
altura = float(input("Digite sua altura: "))
peso = (72.7 * altura) - 58
return print(f"O seu peso ideal é {peso:.2f} quilos") | e8a067adeacd6f4e0e0cc7c8a03472999791500e | 692,059 |
def truncate_string(s, limit=140):
"""Truncate a string, replacing characters over the limit with "...".
"""
if len(s) <= limit:
return s
else:
# Try to find a space
space = s.rfind(' ', limit - 20, limit - 3)
if space == -1:
return s[:limit - 3] + "..."
else:
return s[:space] + "..." | aa476e750fe8d4864d06791ab89a1e9324f4f7c5 | 692,061 |
def p1_rec(input_list, out=0):
"""Compute some of numbers in list with for loop."""
if len(input_list) > 0:
p1_rec(input_list[1:], out+input_list[0])
else:
return out | 948749ed4914b1c48cb3b50ee683fc1192a75b0b | 692,062 |
import logging
def judge_payload(response, use_json):
"""
Pull out the request payload, provided it's either text or json.
"""
try:
if response.status_code:
pass
except Exception as exc:
logging.error('No response to HTTP query. Returning -None-.')
return None
if response.status_code == 200:
if use_json is True:
try:
return response.json()
except Exception as exc:
logging.warn('Unable to decode JSON: %s', exc)
else:
try:
return response.text
except Exception as exc:
logging.error('Unable to decode response text: %s', exc)
return None
logging.error('Response from server was not OK: %s', response.status_code)
return None | 46bc3703ee25512490123b0539cfdbc11b9ae937 | 692,063 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.