content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
|---|---|---|
def pass_through_formatter(value):
"""No op update function."""
return value
|
202ea761db9e1fa858718c61df3a7fd18f02826c
| 3,639,031
|
from re import U
def instantiate(decoder, model=None, dataset=None):
""" Instantiate a full decoder config, e.g. handle list of configs
Note that arguments are added in reverse order compared to encoder (model first, then dataset)
"""
decoder = utils.to_list(decoder)
return U.TupleSequential(*[_instantiate(d, model=model, dataset=dataset) for d in decoder])
|
238b97eab9a653200d0f82b92342a64bbbbc6336
| 3,639,032
|
def un_normalize(stdevs, arrList):
"""
Return an arrayList with ith column multiplied by scalar stdevs[i] if stdevs[i] is not zero,
and unmodified if it is zero.
Args:
stdevs: A list of numbers (should be the list output by normalize).
arrList: A list of list of numbers that is the (normalized) data.
Returns:
list: A list of list of numbers of the same dimensions as arrlist.
>>> un_normalize([0.5, 2],[[1, 2], [3,4]])
[[0.5, 4], [1.5, 8]]
>>> un_normalize([0.0, 2],[[1, 2], [3,4]])
[[1, 4], [3, 8]]
"""
stdevs = list(map(lambda x: x if x != 0.0 else 1, stdevs))
return scalarMultCols(stdevs, arrList)
|
a87aa89b2f591d46b077ea26d877f1d3459df2b3
| 3,639,033
|
import json
def convert_graph(input_path):
"""
Converts a CRED-like graph into a graph format supported by the igraph library. The input graph must have been
generated by cli2 CRED command (look for credResult.json)
:param input_path: The path to the CRED graph to convert (credResult.json)
"""
with open(input_path, encoding="utf8") as f:
cred_file = json.load(f)
# Locating important elements in the graph
cred_data = cred_file[1]['credData']
graph = cred_file[1]['weightedGraph'][1]['graphJSON'][1]
cred_node_addresses = graph['sortedNodeAddresses']
# Summary of edges/nodes and also a reminder about dangling edges
print(f'Found cred summary data for {len(cred_data["nodeSummaries"])} nodes and {len(cred_data["edgeSummaries"])} edges')
print(f'The graph has {len(graph["nodes"])} nodes, {len(graph["edges"])} edges and {len(graph["sortedNodeAddresses"])} node addresses')
print(f'Dangling edges expected: {len(graph["edges"]) - len(cred_data["edgeSummaries"])}')
g = Graph(directed=True)
# Collecting nodes
for i, cred_node in enumerate(graph['nodes']):
cred_node_address = cred_node_addresses[cred_node['index']]
igraph_node_atts = {'label': cred_node_address[2]+'-'+cred_node_address[-1][:7],
'type': cred_node_address[2],
'timestamp': cred_node['timestampMs'] if cred_node['timestampMs'] is not None else 0,
'totalCred': cred_data['nodeSummaries'][i]['cred'],
'index': cred_node['index'],
}
g.add_vertex(name=str(cred_node['index']), **igraph_node_atts)
# Collecting edges
dangling_edges = []
idx = 0
for cred_edge in graph['edges']:
# Checking if the edges is a dangling one. If so, we skip.
if len(g.vs.select(name_eq=str(cred_edge['srcIndex']))) + len(g.vs.select(name_eq=str(cred_edge['dstIndex']))) < 2:
dangling_edges.append({"srcIndex": cred_edge['srcIndex'], "dstIndex": cred_edge['dstIndex']})
continue
igraph_edge_atts = {'address': '-'.join(cred_edge['address']),
'timestamp': cred_edge['timestampMs'],
'backwardFlow': cred_data['edgeSummaries'][idx]['backwardFlow'],
'forwardFlow': cred_data['edgeSummaries'][idx]['forwardFlow'],
}
g.add_edge(str(cred_edge['srcIndex']), str(cred_edge['dstIndex']), **igraph_edge_atts)
idx += 1
# Reporting the number of dangling edges found
print(f"Dangling edges found: {len(dangling_edges)}")
return g
|
57037a662d033a422965f3efe13f321a5bf7f128
| 3,639,034
|
from typing import List
def get_valid_classes_from_class_input(
class_graph: class_dependency.JavaClassDependencyGraph,
class_names_input: str) -> List[str]:
"""Parses classes given as input into fully qualified, valid classes.
Input is a comma-separated list of classes."""
class_names = class_names_input.split(',')
return get_valid_classes_from_class_list(class_graph, class_names)
|
e93edea9692ab9c461ed744a8727effbf705fdea
| 3,639,035
|
def us_ppop(ppop):
""" Determines if the ppop is in a valid format to be in the US """
# return false if it's null or not 7 digits long
if not ppop or len(ppop) != 7:
return False
ppop = ppop.upper()
if ppop[:2] in g_state_by_code or ppop[:2] in g_state_code_by_fips:
return True
return False
|
afef4e7634034709f870379cd684a37a793c7ec5
| 3,639,036
|
def get_robotstxt_parser(url, session=None):
"""Get a RobotFileParser for the given robots.txt URL."""
rp = RobotFileParser()
try:
req = urlopen(url, session, max_content_bytes=MaxContentBytes,
allow_errors=range(600))
except Exception:
# connect or timeout errors are treated as an absent robots.txt
rp.allow_all = True
else:
if req.status_code >= 400:
rp.allow_all = True
elif req.status_code == 200:
rp.parse(req.text.splitlines())
return rp
|
f838f8284b250133a1c5f0ca5d514756ff4f1eb0
| 3,639,038
|
def init_model(config, checkpoint=None, device='cuda:0'):
"""Initialize a model from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
Returns:
nn.Module: The constructed model.
(nn.Module, None): The constructed extractor model
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.data.test.test_mode = True
model = build_architecture(config.model)
if checkpoint is not None:
# load model checkpoint
load_checkpoint(model, checkpoint, map_location=device)
# save the config in the model for convenience
model.cfg = config
model.to(device)
model.eval()
extractor = None
if config.model.type == 'VideoBodyModelEstimator':
extractor = build_backbone(config.extractor.backbone)
if config.extractor.checkpoint is not None:
# load model checkpoint
load_checkpoint(extractor, config.extractor.checkpoint)
extractor.cfg = config
extractor.to(device)
extractor.eval()
return model, extractor
|
494cbcb012978d49905318d92c136bc7c6241a79
| 3,639,039
|
def estimate_period(time, y, y_err, clip=True, plot=True, **kwargs):
"""
Run a Lomb-Scargle Periodogram to find periodic signals. It's recommended
to use the allesfitter.time_series functions sigma_clip and slide_clip beforehand.
Parameters
----------
time : array of float
e.g. time array (usually in days)
y : array of float
e.g. flux or RV array (usually as normalized flux or RV in km/s)
yerr : array of float
e.g. flux or RV error array (usually as normalized flux or RV in km/s)
clip : bool, optional
Automatically clip the input data with sigma_clip(low=4, high=4)
and slide_clip(window_length=1, low=4, high=4). The default is True.
plot : bool, optional
To plot or not, that is the question. The default is False.
**kwargs : collection of keyword arguments
Any keyword arguments will be passed onto the astropy periodogram class.
Returns
-------
best_period : float
The best period found.
FAP : float
The false alarm probability for the best period.
fig : matplotlib.figure object, optional
The summary figure. Only returned if plot is True.
"""
#==========================================================================
#::: clean the inputs
#==========================================================================
time, y, y_err = clean(time, y, y_err)
plot_bool = plot
if clip:
y = sigma_clip(time, y, low=4, high=4)
y = slide_clip(time, y, window_length=1, low=4, high=4)
time, y, y_err = clean(time, y, y_err)
#==========================================================================
#::: handle inputs
#==========================================================================
cadence = np.nanmedian(np.diff(time))
if kwargs is None: kwargs = {}
if 'minperiod' not in kwargs: kwargs['minperiod'] = 10. * cadence
if 'maxperiod' not in kwargs: kwargs['maxperiod'] = time[-1]-time[0]
minfreq = 1./kwargs['maxperiod']
maxfreq = 1./kwargs['minperiod']
#==========================================================================
#::: now do the periodogram
#==========================================================================
ls = LombScargle(time, y) #Analyze our dates and s-index data using the AstroPy Lomb Scargle module
frequency, power = ls.autopower(minimum_frequency=minfreq, maximum_frequency=maxfreq) #Determine the LS periodogram
best_power = np.nanmax(power)
best_frequency = frequency[np.argmax(power)]
best_period = 1./best_frequency
FAP=ls.false_alarm_probability(best_power) #Calculate the FAP for the highest peak in the power array
#==========================================================================
#::: plot
#==========================================================================
def plot():
peak_loc=round(float(1./best_frequency),2)
FAP_probabilities = [0.5, 0.1, 0.01] #Enter FAP values you want to determine
FAP_levels=ls.false_alarm_level(FAP_probabilities) #Get corresponding LS Power values
fig, axes = plt.subplots(4, 1, figsize=[10,15], tight_layout=True)
#::: plot the periodogram
ax = axes[0]
ax.semilogx(1./frequency,power,color='b')
ax.plot(peak_loc, best_power, marker='d', markersize=12, color='r')
ax.text(peak_loc*1.2,best_power*0.95,'Peak Period: '+str(peak_loc)+' days')
ax.text(peak_loc*1.2,best_power*0.85,'FAP: '+str(FAP))
ax.hlines(FAP_levels, kwargs['minperiod'], kwargs['maxperiod'], color='grey', lw=1)
ax.text(kwargs['maxperiod'], FAP_levels[0],'0.5% FAP ', ha='right')
ax.text(kwargs['maxperiod'], FAP_levels[1],'0.1% FAP ', ha='right')
ax.text(kwargs['maxperiod'], FAP_levels[2],'0.01% FAP ', ha='right')
ax.set(xlabel='Period (days)', ylabel='L-S power')
ax.tick_params(axis='both',which='major')
#::: plot the phase-folded data
ax = axes[1]
plot_phase_folded_lightcurve(time, y, period=1./best_frequency, epoch=0, ax=ax)
ax.set(ylim=[np.nanmin(y), np.nanmax(y)], ylabel='Data (clipped; phased)')
#::: plot the phase-folded data, zoomed
ax = axes[2]
plot_phase_folded_lightcurve(time, y, period=1./best_frequency, epoch=0, ax=ax)
ax.set(ylabel='Data (clipped; phased; y-zoom)')
#::: plot the autocorrelation of the data
ax = axes[3]
plot_acf(pd.Series(y, index=time), ax=ax, lags=np.linspace(start=1,stop=2*best_period/cadence,num=100,dtype=int))
ax.set(xlabel='Lag', ylabel='Autocorrelation', title='')
return fig
#==========================================================================
#::: return
#==========================================================================
if plot_bool:
fig = plot()
return best_period, FAP, fig
else:
return best_period, FAP
|
23cc58d910ff5541847fa4d5892979aa312d1609
| 3,639,040
|
def get_test_packages():
"""Get a list of packages which need tests run.
Filters the package list in the following order:
* Check command line for packages passed in as positional arguments
* Check if the the local remote and local branch environment variables
have been set to specify a remote branch to diff against.
* Check if in Travis, then limit the subset based on changes
in a Pull Request ("push" builds to branches may not have
any filtering)
* Just use all packages
An additional check is done for the cases when a diff is computed (i.e.
using local remote and local branch environment variables, and on Travis).
Once the filtered list of **changed** packages is found, the package
dependency graph is used to add any additional packages which depend on
the changed packages.
:rtype: list
:returns: A list of all package directories where tests
need be run.
"""
all_packages = get_package_directories()
local_diff = local_diff_branch()
parser = get_parser()
args = parser.parse_args()
if args.packages is not UNSET_SENTINEL:
verify_packages(args.packages, all_packages)
return sorted(args.packages)
elif local_diff is not None:
changed_packages = get_changed_packages(
'HEAD', local_diff, all_packages)
return follow_dependencies(changed_packages, all_packages)
elif in_travis():
changed_packages = get_travis_directories(all_packages)
return follow_dependencies(changed_packages, all_packages)
else:
return all_packages
|
302a3136ec84e81a68348e5ff1bffa9c916f36a1
| 3,639,041
|
def decoding_character(morse_character):
"""
Input:
- morse_character : ๋ฌธ์์ด๊ฐ์ผ๋ก get_morse_code_dict ํจ์๋ก ์ํ๋ฒณ์ผ๋ก ์นํ์ด ๊ฐ๋ฅํ ๊ฐ์ ์
๋ ฅ์ด ๋ณด์ฅ๋จ
Output:
- Morse Code๋ฅผ ์ํ๋ฒณ์ผ๋ก ์นํํจ ๊ฐ
Examples:
>>> import morsecode as mc
>>> mc.decoding_character("-")
'T'
>>> mc.decoding_character(".")
'E'
>>> mc.decoding_character(".-")
'A'
>>> mc.decoding_character("...")
'S'
>>> mc.decoding_character("....")
'H'
>>> mc.decoding_character("-.-")
'K'
"""
# ===Modify codes below=============
# ์กฐ๊ฑด์ ๋ฐ๋ผ ๋ณํ๋์ด์ผ ํ ๊ฒฐ๊ณผ๋ฅผ result ๋ณ์์ ํ ๋น ๋๋ ํ์์ ๋ฐ๋ผ ์์ ๋ก์ด ์์
#morse_code_dict = get_morse_code_dict()
char_dict = get_char_code_dict()
result = char_dict.get(morse_character)
return result
|
29c3f99da372a713d349a0c7640403ae32c08aba
| 3,639,042
|
def SparsityParametersAddDimMetadata(builder, dimMetadata):
"""This method is deprecated. Please switch to AddDimMetadata."""
return AddDimMetadata(builder, dimMetadata)
|
5a7604ca44fbf3f2a1d520018269c472340511e5
| 3,639,043
|
def check_branch(payload, branch):
"""
Check if a push was on configured branch.
:param payload: Payload from web hook.
:param branch: Name of branch to trigger action on.
:return: True if push was on configured branch, False otherwise.
"""
if "ref" in payload:
if payload["ref"] == branch:
return True
return False
|
88bd0ebae330ee169e97a40aee208b2f92ee4a32
| 3,639,044
|
from typing import Union
def convert(q: Quantity, new_unit: Union[str, Unit], equivalencies=None) -> Quantity:
"""Convert quantity to a new unit.
:raises InvalidUnit: When target unit does not exist.
:raises InvalidUnitConversion: If the conversion is invalid.
Customized to be a bit more universal than the original quantities.
"""
try:
return q.to(new_unit, equivalencies or [])
except u.UnitConversionError:
if q.unit.physical_type == "temperature":
return q.to(new_unit, u.temperature())
else:
raise InvalidUnitConversion(
f"Cannot convert unit '{q.unit}' to '{new_unit}'."
) from None
except ValueError as err:
raise InvalidUnit(f"Unit '{new_unit}' does not exist.") from None
|
7d28a40d3da4a6189aeb9efb252f50088838a1f3
| 3,639,045
|
def randomized_pairwise_t_test(arr1, arr2, output=True):
"""
Perform a randomized pairwise t-test on two arrays
of values of equal size.
see Cohen, P.R., Empirical Methods for Artificial Intelligence, p. 168
"""
# Make sure both arrays are the same length
assert len(arr1) == len(arr2)
# Cast them to floats
arr1 = map(float, arr1)
arr2 = map(float, arr2)
# Calculate the absolute diffs
diffs = [(arr1[i] - arr2[i]) for i in range(len(arr1))]
# Calculate the original mean
originalMean = sum(diffs) / float(len(diffs))
numLess = 0
# Do 10000 trials to test
for i in range(10000):
running_sum = 0.
for j in range(len(diffs)):
if choice([True,False]):
running_sum += diffs[j]
else:
running_sum -= diffs[j]
mean = running_sum / float(len(diffs))
if mean <= originalMean:
numLess += 1
# Finally output / return the stats
ratio = float(numLess + 1) / float(10001)
ratio = min(ratio, 1-ratio)
if output:
print ("mean difference: %f\nsignificant at p <= %f" % (originalMean, ratio))
return originalMean, ratio
|
92ceb071fcc03dd952a15ffe08f2bd305c603a39
| 3,639,046
|
from typing import Dict
from datetime import datetime
import uuid
def update_metadata(radar, longitude: np.ndarray, latitude: np.ndarray) -> Dict:
"""
Update metadata of the gridded products.
Parameter:
==========
radar: pyart.core.Grid
Radar data.
Returns:
========
metadata: dict
Output metadata dictionnary.
"""
today = datetime.datetime.utcnow()
dtime = cftime.num2pydate(radar.time["data"], radar.time["units"])
maxlon = longitude.max()
minlon = longitude.min()
maxlat = latitude.max()
minlat = latitude.min()
metadata = {
"comment": "Gridded radar volume using Barnes et al. ROI",
"field_names": ", ".join([k for k in radar.fields.keys()]),
"geospatial_bounds": f"POLYGON(({minlon:0.6} {minlat:0.6},{minlon:0.6} {maxlat:0.6},{maxlon:0.6} {maxlat:0.6},{maxlon:0.6} {minlat:0.6},{minlon:0.6} {minlat:0.6}))",
"geospatial_lat_max": f"{maxlat:0.6}",
"geospatial_lat_min": f"{minlat:0.6}",
"geospatial_lat_units": "degrees_north",
"geospatial_lon_max": f"{maxlon:0.6}",
"geospatial_lon_min": f"{minlon:0.6}",
"geospatial_lon_units": "degrees_east",
"geospatial_vertical_min": np.int32(radar.origin_altitude["data"][0]),
"geospatial_vertical_max": np.int32(20000),
"geospatial_vertical_positive": "up",
"history": f"created by Valentin Louf on gadi.nci.org.au at {today.isoformat()} using Py-ART",
"processing_level": "b2",
"time_coverage_start": dtime[0].isoformat(),
"time_coverage_end": dtime[-1].isoformat(),
"uuid": str(uuid.uuid4()),
}
return metadata
|
ae4b26372221262426803f40394caa06245d5afb
| 3,639,047
|
import copy
def idxsel2xsel(file, isel, dimensions, order):
""" convert a index space selection object to an xSelect object
"""
if not isinstance(isel, idxSelect):
raise TypeError('wrong argument type')
xsel = {}
xsel_size = {}
xsel_dims = {}
isarray = False
interp = False
masked = False
multidim = False
i = 0
for axis in dimensions:
inc_i = True
try:
idx = isel[axis]
if idx.interp: interp = True
if idx.isarray:
isarray = True
if idx.dims is not None: multidim = True
if isinstance(idx.v, N.ma.MaskedArray): masked = True
xsel_dims[axis] = idx.dims
idx = idx.v
if isinstance(idx, slice):
dimsize = file.cf_dimensions[axis]
res = [idx.start, idx.stop, idx.step]
if (idx.step is not None and idx.step < 0):
if idx.start is None: res[0] = dimsize - 1
if idx.stop is None: res[1] = None
else:
if idx.start is None: res[0] = 0
if idx.stop is None: res[1] = dimsize
if idx.step is None: res[2] = 1
xsel[axis] = slice(res[0], res[1], res[2])
elif N.isscalar(idx):
xsel[axis] = idx
if len(order) > 0:
order.remove(i)
for val in order:
if val > i:
order[order.index(val)] = val - 1
inc_i = False
else:
#xsel[axis] = idx.copy()
xsel[axis] = copy.copy(idx)
if len(idx.shape) == 0 or idx.shape == 1:
if len(order) > 0:
order.remove(i)
for val in order:
if val > i:
order[order.index(val)] = val - 1
inc_i = False
except KeyError:
dimsize = file.cf_dimensions[axis]
xsel[axis] = (slice(0, dimsize, 1))
xsel_dims[axis] = None
if inc_i:
i += 1
if isarray:
# convert slices to 1d-arrays and determine result size
for axis in dimensions:
idx = xsel[axis]
if isinstance(idx, slice):
xsel[axis] = N.arange(idx.start, idx.stop, idx.step)
if xsel_dims[axis] is None:
if is_scalar(xsel[axis]):
xsel_size[axis] = 0
else:
xsel_size[axis] = len(xsel[axis])
else:
xsel_size[axis] = isel[axis].axlen
# determine shape of xsel
dim_ret = []
for axis in dimensions:
if xsel_size[axis] != 0:
dim_ret.append(xsel_size[axis])
ndim_ret = len(dim_ret)
# all 1d arrays
if not multidim:
i = 0
for axis in dimensions:
if xsel_size[axis] != 0:
idx_shape = N.ones(ndim_ret,dtype="int32")
idx_shape[i] = dim_ret[i]
xsel[axis].shape = idx_shape
i += 1
# at least one multidimensional coordinate
else:
i = 0
for axis in dimensions:
if xsel_dims[axis] is None:
if xsel_size[axis] != 0:
idx_shape = N.ones(ndim_ret,dtype="int32")
idx_shape[i] = dim_ret[i]
xsel[axis].shape = idx_shape
i += 1
else:
idx_shape2 = {}
for axis2 in dimensions:
if xsel_size[axis2] != 0:
if axis2 in xsel_dims[axis]:
idx_shape2[axis2] = isel[axis].dimsize(axis2)
else:
idx_shape2[axis2] = 1
idx_shape = []
for axis2 in dimensions:
if axis2 in idx_shape2:
idx_shape.append(idx_shape2[axis2])
if isel[axis].type != 'scalar':
i += 1
# check if we only need basic slicing
if not isarray and not interp:
isbasic = True
else:
isbasic = False
ret = []
for axis in dimensions:
ret.append(xsel[axis])
ret = xSelect(ret)
ret.isbasic = isbasic
ret.interp = interp
ret.masked = masked
ret.order = order
return ret
|
ff00a7705a9ae1f633e7ec19682367ccfea2b7bf
| 3,639,048
|
def _retrieve_max_kb_s_sent_state(status: FritzStatus, last_value: str) -> float:
"""Return upload max transmission rate."""
return round(status.max_bit_rate[0] / 1000, 1)
|
e1c0a710131289e457f3c15da411a7f8d17fdfc7
| 3,639,049
|
def user_detail(request, id, format=None):
"""
Retrieve, update or delete a server assets instance.
"""
try:
snippet = User.objects.get(id=id)
except User.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == 'GET':
serializer = UserSerializer(snippet)
return Response(serializer.data)
elif request.method == 'PUT':
serializer = UserSerializer(snippet, data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
elif request.method == 'DELETE':
if not request.user.has_perm('ops.delete_user'):
return Response(status=status.HTTP_403_FORBIDDEN)
snippet.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
|
9339c85cec0b271d5eeb8a1caec976992869174a
| 3,639,050
|
def _gsmooth_img(args):
"""
HELPER FUNCTION: private!
Smooth an image with a gaussian in 2d
"""
img,kernel,use_fft,kwargs = args
if use_fft:
return convolve_fft(img, kernel, normalize_kernel=True, **kwargs)
else:
return convolve(img, kernel, normalize_kernel=True, **kwargs)
|
313a0c4475935665cb0e4c55bea343adf3a9fab4
| 3,639,051
|
def __logs_by_scan_id(scan_id, language):
"""
select all events by scan id hash
Args:
scan_id: scan id hash
language: language
Returns:
an array with JSON events or an empty array
"""
try:
logs = []
for log in send_read_query(
"select host,username,password,port,type,date,description from hosts_log where scan_id=\"{0}\"".format(
scan_id), language):
data = {
"SCAN_ID": scan_id,
"HOST": log[0],
"USERNAME": log[1],
"PASSWORD": log[2],
"PORT": log[3],
"TYPE": log[4],
"TIME": log[5],
"DESCRIPTION": log[6]
}
logs.append(data)
return logs
except:
return []
|
26ef72dd2e0ed974a84f2ddc67e61fd90f769f17
| 3,639,053
|
def docs():
"""Redirect to documentation on Github
Route: /docs
Methods: GET
Return: redirect to webpage
"""
return redirect("https://kinsaurralde.github.io/ws_281x-lights/#/")
|
18fbbf2e4d53c66545bdf1129de5d1d4ac5944fd
| 3,639,054
|
def std(a, weights=None, axis=None, dtype=None, ddof=0, keepdims=False):
"""
Compute the weighted standard deviation along the specified axis.
:param a: Array containing numbers whose standard deviation is desired. If `a` is not an
array, a conversion is attempted.
:param weights: Array containing weights for the elements of `a`. If `weights` is not an
array, a conversion is attempted.
:param axis: Axis or axes along which the means are computed. The default is to
compute the mean of the flattened array. Type is None or int or tuple of ints, optional.
:param dtype: data type to use in computing the mean.
:param int ddof: Delta Degrees of Freedom. The divisor used in calculations
is ``W - ddof``, where ``W`` is the sum of weights (or number of elements
if `weights` is None). By default `ddof` is zero
:param bool keepdims: If this is set to True, the axes which are reduced are left
in the result as dimensions with size one.
:return: np.ndarray
"""
if weights is None:
return np.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims)
else:
w = np.array(weights)
m = mean(a, weights=w, axis=axis, keepdims=True)
return np.sqrt(
np.sum(
w * (np.array(a) - m) ** 2, axis=axis, dtype=dtype, keepdims=keepdims
)
/ ( # noqa: W504
np.sum(w, axis=axis, dtype=dtype, keepdims=keepdims) - ddof
)
)
|
758421b85657197413ab4fe2713bf18da2ac184a
| 3,639,055
|
import logging
def createOneHourCandles(markets, database):
"""
Function that creates tables for one minute candles.
:param database:
:param markets:
:return:
"""
conn = pymysql.connect(host='localhost',
user='jan',
password='17051982',
database=database)
func_logging = logging.getLogger("bittrex_database." + str(__name__) + ".createOneHourCandles()")
for ix in markets:
with conn.cursor() as cur:
comm = f'CREATE OR REPLACE TABLE `{ix}` (' \
f'symbol CHAR(20) NOT NULL, ' \
f'ttime FLOAT NOT NULL, ' \
f'oopen DOUBLE NOT NULL, ' \
f'hhigh DOUBLE NOT NULL, ' \
f'llow DOUBLE NOT NULL, ' \
f'cclose DOUBLE NOT NULL, ' \
f'base_vol DOUBLE NOT NULL, ' \
f'quote_vol DOUBLE NOT NULL, ' \
f'usd_vol DOUBLE NOT NULL, ' \
f'PRIMARY KEY (symbol, ttime)' \
f') ENGINE=Maria'
cur.execute(comm)
conn.commit()
func_logging.info("One hour candles database have been initiated with empty columns.")
return True
|
c800570a4dd17acb0b588064938ccf098e6c53bb
| 3,639,056
|
def coaddspectra(splist,plotsp=True,outf=None,sn_smooth_npix=10):
""" Coadd spectra
Parameters
----------
splist : list of XSpectrum1D objects
List of spectra to coadd
plotsp : bool
If True, plot the coadded spectrum
outf : str
Output file
sn_smooth_npix : float
Parameter in coadd1d.combspec function that defines
number of pixels to median filter by when computing S/N used to decide how to scale and weight spectra
Returns
-------
sp : XSpectrum1D
A spectrum that represents coadded spectra from the splist list
"""
waves = []
fluxes = []
ivars = []
masks = []
for isp in splist:
waves.append(isp.wavelength)
fluxes.append(isp.flux)
ivars.append(1. / (isp.sig) ** 2.)
imask = np.repeat(True, len(isp.flux))
j = np.where((isp.flux == 0) & (isp.sig == 0))[0]
imask[j] = False
masks.append(imask)
waves = np.ndarray.transpose(np.asarray(waves))
fluxes = np.ndarray.transpose(np.asarray(fluxes))
ivars = np.ndarray.transpose(np.asarray(ivars))
masks = np.ndarray.transpose(np.asarray(masks))
wave_stack, flux_stack, ivar_stack, mask_stack = coadd1d.combspec(
waves, fluxes, ivars, masks, sn_smooth_npix, show=plotsp)
ii = np.where(wave_stack > 0)[0]
coadded_waves = wave_stack[ii]
coadded_fluxes = flux_stack[ii]
coadded_sigs = 1 / (np.sqrt(ivar_stack[ii]))
# write and return the spectrum
sp = xspec.XSpectrum1D(coadded_waves, coadded_fluxes, coadded_sigs)
if outf is not None:
sp.write_to_fits(outf)
return sp
|
1e0c312389f566a34cca878251b7d808968e175c
| 3,639,057
|
def get_rel_sim(relation, question, dataset):
"""
Get max cosine distance for relations
:param relation:
:param question:
:return:
"""
query_ngrams = generate_ngrams(question)
query_ngrams_vec = [get_avg_word2vec(phr, dataset) for phr in query_ngrams]
relation_ngram = get_avg_word2vec(relation, dataset)
similarities = [cosine_similarity(relation_ngram, q)[0][0] for q in query_ngrams_vec]
if similarities and np.max(similarities) > 0.5:
return np.max(similarities)
else:
return 0.0
|
63c313fac32ec2483979585c60cea916979aaf5d
| 3,639,058
|
def mk_request(bits, cn):
"""
Create a X509 request with the given number of bits in they key.
Args:
bits -- number of RSA key bits
cn -- common name in the request
Returns a X509 request and the private key (EVP)
"""
pk = EVP.PKey()
x = X509.Request()
rsa = RSA.gen_key(bits, 65537, lambda: None)
pk.assign_rsa(rsa)
x.set_pubkey(pk)
name = x.get_subject()
name.C = config.get('ca', 'cert_country')
name.CN = cn
name.ST = config.get('ca', 'cert_state')
name.L = config.get('ca', 'cert_locality')
name.O = config.get('ca', 'cert_organization')
name.OU = config.get('ca', 'cert_org_unit')
x.sign(pk, 'sha256')
return x, pk
|
f6ac4fe385caba149b85599fa6f48fc3d0dc7ccf
| 3,639,059
|
import re
def nice(name):
"""Generate a nice name based on the given string.
Examples:
>>> names = [
... "simple_command",
... "simpleCommand",
... "SimpleCommand",
... "Simple command",
... ]
>>> for name in names:
... nice(name)
'Simple Command'
'Simple Command'
'Simple Command'
'Simple Command'
Arguments:
name (str): The string from which generate the nice name.
Returns:
str: The generated nice name.
"""
# The regular expression will match all upper case characters except the
# one that starts the string and insert a space before it.
return re.sub(r"(?<!^)([A-Z])", r" \1", name).replace("_", " ").title()
|
ab96675423812a85744bb76e7f62d08bbbac2eea
| 3,639,060
|
def get_outputs():
"""Get the available outputs, excluding outputs in the EXCLUDED_OUTPUTS variable."""
outputs = []
tree = connection.get_tree()
for node in filter(
lambda node: node.type == "output" and node.name not in EXCLUDED_OUTPUTS, tree
):
workspaces = node.nodes[1].nodes
if workspaces:
outputs.append((node, workspaces))
return outputs
|
6db1ea83252a7a6f4fd7f731c206c5d4a738a282
| 3,639,061
|
def get_user_owner_mailboxes_tuples(user):
"""
Return owned mailboxes of a user as tuple
"""
return ((owned_mailbox.id, owned_mailbox.email_address) for owned_mailbox in get_user_owner_mailboxes_query(user))
|
e7db6658497678387f4a93237b686d29bc27d91f
| 3,639,062
|
def modinv(a, m):
"""Modular Multiplicative Inverse"""
a = a % m
g, x, y = egcd(a, m)
if g != 1:
raise Exception('modular inverse does not exist')
else:
return x % m
|
9ddea93398f8c96f828a8efaea36f21f6b8dd13e
| 3,639,063
|
import socket
def get_ip():
"""Get the ip of the host computer"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('1.1.1.1', 1))
IP = s.getsockname()[0]
except Exception:
IP = '127.0.0.1'
finally:
s.close()
return IP
|
fb4f79eaa25573d7078f69c5d5ad71c51c9d1c44
| 3,639,064
|
def available_structure_info():
""" Lists available attributes for :func:`abagen.mouse.get_structure_info`
"""
return _STRUCTURE_ATTRIBUTES.copy()
|
14591c89c9f7212440da282f50408459692d1fc4
| 3,639,065
|
def centos(function):
"""Decorator to set the Linux distribution to CentOS 7"""
def wrapper(*args, **kwargs):
hpccm.config.g_linux_distro = linux_distro.CENTOS
hpccm.config.g_linux_version = StrictVersion('7.0')
return function(*args, **kwargs)
return wrapper
|
9c54a7aac46bd30d490c625afa4392d5127a2be7
| 3,639,066
|
def Normalized2(p):
"""Return vector p normlized by dividing by its squared length.
Return (0.0, 1.0) if the result is undefined."""
(x, y) = p
sqrlen = x * x + y * y
if sqrlen < 1e-100:
return (0.0, 1.0)
else:
try:
d = sqrt(sqrlen)
return (x / d, y / d)
except:
return (0.0, 1.0)
|
42cc78350f264226c624a81ca5b0bd6457d353b0
| 3,639,067
|
def findwskeyword(keyword, sol):
"""Find and return a value for a keyword in the list of the wavelength solution"""
i = sol.index(keyword)
j = sol[i:].index('\n')
return sol[i:i + j].split('=')[1].strip()
|
b3cc028415d74ecfd7ec3868ae591d7b4d3b8860
| 3,639,068
|
import numpy
from typing import Tuple
from typing import Callable
from typing import Union
from typing import List
def algorithm(array: numpy.array, start: Tuple[int, int], end: Tuple[int, int],
heuristic: Callable = manhattan) -> Union[List, None]:
"""
Returns a list of all points, for the path between `start` and `end`
:param array: a numpy array of Node instances
:param start: a tuple (or list) of points corresponding to where to start on array
:param end: like start, but for the end
:param heuristic: a function that represents the heuristic (default: manhattan heuristic)
Example:
>>> test = numpy.array(
[[0, 0, 0, 0, 0, 1],
[0, 1, 1, 1, 0, 1],
[0, 1, 0, 0, 0, 1],
[0, 1, 0, 1, 1, 1],
[0, 0, 0, 0, 1, 0],
[1, 1, 1, 0, 0, 0]]
)
>>> print(algorithm(test, (0, 0), (5, 5)))
"""
array = array_to_class(array)
actual_start = array[start[0], start[1]]
actual_end = array[end[0], end[1]]
count = 0
open_set = PriorityQueue()
open_set.put((0, count, actual_start))
came_from = {}
g_score = {node: inf for row in array for node in row}
f_score = {node: inf for row in array for node in row}
g_score[actual_start] = 0
f_score[actual_start] = heuristic(start, end)
open_set_hash = {actual_start}
while not open_set.empty():
current = open_set.get()[2]
current_pos = current.pos
open_set_hash.remove(current)
if current == actual_end:
return reconstruct_path(came_from, start, end)
for neighbor in get_neighbors(array, current_pos):
neighbor_instance = array[neighbor[0], neighbor[1]]
temp_g_score = g_score[current] + 1
if temp_g_score < g_score[neighbor_instance]:
came_from[neighbor_instance] = current
g_score[neighbor_instance] = temp_g_score
f_score[neighbor_instance] = temp_g_score + heuristic(neighbor, end)
if neighbor_instance not in open_set_hash:
count += 1
open_set.put((f_score[neighbor_instance], count, neighbor_instance))
open_set_hash.add(neighbor_instance)
return None
|
9a91e27bc0dbe78f7b2801dbdfbef6747a845aa7
| 3,639,069
|
from . import logger
def MFString(string_list):
"""
input a list of unicode strings
output: a unicode string formed by encoding, enclosing each
item in double quotes, and concatenating
27 Nov 2016: The complete case is as yet unimplemented,
to avoid sending bad X3D into the world will instead fail with
a Exception if any of the elements of list contain a XML special case in '"&<>
"""
special_characters = u"\'\"&<>"
assert( len(special_characters) == 5)
# check
unicode_type = type(u"")
for item in string_list:
if not type(item) is unicode_type:
logger.warn("Non unicode entry for MFString: %s" % (repr(item),))
for c in special_characters:
if c in item:
raise ValueError("Unimplemented case: special character in MFString item: %s" % (repr(item),))
return " ".join([u'"%s"' % item for item in string_list])
|
a068c25ab157b5537ea47e625ca8ed9aecd0f4e5
| 3,639,070
|
def re_allocate_memory(ptr: VoidPtr, size: int)-> VoidPtr:
"""
Internal memory free
ptr: The pointer which is pointing the previously allocated memory block by allocate_memory.
size: The new size of memory block.
"""
return _rl.MemRealloc(
ptr,
_to_int(size)
)
|
806c17a6863db3af8c5b42474fe05c624685757c
| 3,639,071
|
import json
def get_task_manager(setup_file, **kwargs):
""" Create a task manager of a correct type.
Parameters
----------
setup_file : string
File name of the setup file.
kwargs : dict
Additional kwargs.
Returns
-------
manager : TaskManager
Created task manager.
"""
setup = json.load(open(setup_file))
manager = setup['manager'].lower()
if manager == 'slurm':
return SlurmTaskManager(setup_file, **kwargs)
elif manager == 'sge':
return SgeTaskManager(setup_file, **kwargs)
elif manager == 'local':
return LocalTaskManager(setup_file, **kwargs)
else:
raise ValueError('Unknown task manager: %s', manager)
|
a297937fd4520549df034a22739250461cbf2c0e
| 3,639,072
|
def format_time(time):
""" Converts datetimes to the format expected in SAML2 XMLs. """
return time.strftime("%Y-%m-%dT%H:%M:%SZ")
|
03651b72aa0b177ac1ac3f1ccafdba6fe967a11a
| 3,639,073
|
def get_delivery_voucher_discount(voucher, total_price, delivery_price):
"""Calculate discount value for a voucher of delivery type."""
voucher.validate_min_amount_spent(total_price)
return voucher.get_discount_amount_for(delivery_price)
|
8ede095730c1d29d01949dff47b4a2893d29720c
| 3,639,074
|
def has_admin_access(request):
# type: (Request) -> bool
"""
Verifies if the authenticated user doing the request has administrative access.
.. note::
Any request view that does not explicitly override ``permission`` by another value than the default
:envvar:`MAGPIE_ADMIN_PERMISSION` will already automatically guarantee that the request user is an
administrator since HTTP [403] Forbidden would have been otherwise replied. This method is indented
for operations that are more permissive and require conditional validation of administrator access.
.. seealso::
Definitions in :class:`magpie.models.RootFactory` and :class:`magpie.models.UserFactory` define
conditional principals and :term:`ACL` based on the request.
"""
admin_perm = get_constant("MAGPIE_ADMIN_PERMISSION", request)
authz_policy = request.registry.queryUtility(IAuthorizationPolicy) # noqa
principals = get_principals(request)
result = authz_policy.permits(models.RootFactory(request), principals, admin_perm)
return isinstance(result, ACLAllowed)
|
54a109375c60354759d98177a2db275f627034b2
| 3,639,075
|
def model_selection(modelname, num_out_classes=2, pretrain_path=None):
"""
:param modelname, num_out_classes, pretrained, dropout:
:return: model, image size
"""
return TransferModel(modelchoice=modelname,
num_out_classes=num_out_classes,
pretrain_path=pretrain_path)
|
ef80dd1c5c52bc0d090801ebb1d5e17f303e48ad
| 3,639,077
|
def stack1(x, filters, blocks, stride1=2, dilation=1, name=None):
"""A set of stacked residual blocks.
# Arguments
x: input tensor.
filters: integer, filters of the bottleneck layer in a block.
blocks: integer, blocks in the stacked blocks.
stride1: default 2, stride of the first layer in the first block.
name: string, stack label.
# Returns
Output tensor for the stacked blocks.
"""
x = block1(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = block1(x, filters, conv_shortcut=False, dilation=dilation,
name=name + '_block' + str(i))
return x
|
43103a2bcad203b1b32f33e352960bdea8d526c9
| 3,639,078
|
def _CreateDynamicDisplayAdSettings(media_service, opener):
"""Creates settings for dynamic display ad.
Args:
media_service: a SudsServiceProxy instance for AdWords's MediaService.
opener: an OpenerDirector instance.
Returns:
The dynamic display ad settings.
"""
image = _CreateImage(media_service, opener, 'https://goo.gl/dEvQeF')
logo = {
'type': 'IMAGE',
'mediaId': image['mediaId'],
'xsi_type': 'Image'
}
dynamic_settings = {
'landscapeLogoImage': logo,
'pricePrefix': 'as low as',
'promoText': 'Free shipping!',
'xsi_type': 'DynamicSettings',
}
return dynamic_settings
|
c79145ec39a7aed97eea7efe9145eab5c706b146
| 3,639,079
|
def contacts_per_person_normal_00x30():
"""
Real Name: b'contacts per person normal 00x30'
Original Eqn: b'10'
Units: b'contact/Day'
Limits: (None, None)
Type: constant
b''
"""
return 10
|
1d0f7caaa4cceafbc34045b2983e388cd1169f8b
| 3,639,080
|
def _get_scripts_shell(script_file): # type: (pathlib.Path) -> str
"""
Returns the shell used in the passed script file. If no shell is recognized exception is raised.
Depended on presence of shebang.
Supported shells: Bash, Fish, Zsh
:param script_file:
:return:
:raises exceptions.UnknownShell: If no shell is recognized
"""
with script_file.open('r') as f:
shebang = f.readline().lower()
for shell in SUPPORTED_SHELLS:
if shell in shebang:
return shell
raise exceptions.UnknownShell('It seems that the currently used post-commit '
'hook uses shebang that is not known to Gitrack: ' + shebang)
|
74332334d9b3caf1be720d656ca6e64f4971e35e
| 3,639,081
|
from shutil import which
def is_cmd_tool(name):
"""
Check whether `name` is on PATH and marked as executable.
From: https://stackoverflow.com/a/34177358
"""
return which(name) is not None
|
a35f84f1bf46aedac488a31402996f075fbe80e2
| 3,639,082
|
import pickle
def load_model(model: Model, language=()):
"""Load geo model and return as dict."""
log.info("Reading geomodel: %s", model)
with open(model.path, "rb") as infile:
m = pickle.load(infile)
result = defaultdict(set)
for _geonameid, l in list(m.items()):
result[l["name"].lower()].add((l["name"], l["latitude"], l["longitude"], l["country"], l["population"]))
for lang in l["alternative_names"]:
if lang in language or not language:
for altname in l["alternative_names"][lang]:
result[altname.lower()].add(
(l["name"], l["latitude"], l["longitude"], l["country"], l["population"]))
log.info("Read %d geographical names", len(result))
return result
|
af77d0e0835b8be6b7b87b142141f4c50082a0ae
| 3,639,083
|
def saml_metadata_generator(sp, validated=True, privacypolicy=False, tree=None, disable_entity_extensions=False):
"""
Generates metadata for single SP.
sp: ServiceProvider object
validated: if false, using unvalidated metadata
privacypolicy: fill empty privacypolicy URLs with default value
tree: use as root if given, generate new root if not
return tree
"""
entity, history, validation_date = get_entity(sp, validated)
if not entity:
return tree
if tree is not None:
entity_descriptor = etree.SubElement(tree, "EntityDescriptor", entityID=entity.entity_id)
else:
entity_descriptor = etree.Element("EntityDescriptor",
entityID=entity.entity_id,
nsmap={"ds": 'http://www.w3.org/2000/09/xmldsig#',
"mdattr": 'urn:oasis:names:tc:SAML:metadata:attribute',
"mdui": 'urn:oasis:names:tc:SAML:metadata:ui',
"saml": 'urn:oasis:names:tc:SAML:2.0:assertion',
"xmlns": 'urn:oasis:names:tc:SAML:2.0:metadata',
"xsd": 'http://www.w3.org/2001/XMLSchema',
"xsi": 'http://www.w3.org/2001/XMLSchema-instance',
})
if not disable_entity_extensions:
if history:
metadata_entity_extensions(entity_descriptor, history)
else:
metadata_entity_extensions(entity_descriptor, sp)
metadata_spssodescriptor(entity_descriptor, sp, history, validation_date, privacypolicy)
metadata_contact(entity_descriptor, sp, validation_date)
if history:
metadata_organization(entity_descriptor, history)
else:
metadata_organization(entity_descriptor, sp)
if tree is not None:
return tree
else:
return entity_descriptor
|
78f065fe7962e7221626c41b81b550ceaa9e7370
| 3,639,084
|
def normalize_not_found(wrapped):
"""View decorator to make 404 error messages more readable"""
def wrapper(context, request):
# Replace incoming 404 with one that has a sensible message
response = wrapped(_standard_not_found(), request)
return response
return wrapper
|
2a9a696c98b777e4f7295015840fbff6235092e7
| 3,639,087
|
def process_results(unprocessed, P, R, G):
"""Process the results returned by the worker pool, sorting them by
policy and run e.g. results[i][j][k] are the results from policy i
on run j on graph k. Parameters:
- unprocessed: Unprocessed results (as returned by the worker pool)
- P: number of policies
- R: number of runs
- G: number of graphs/SCMs/test cases
"""
results = []
for i in range(P):
policy_results = []
for r in range(R):
run_results = unprocessed[(i*G*R + G*r):(i*G*R + G*(r+1))]
policy_results.append(run_results)
results.append(policy_results)
return results
|
24c2854723b3fc33c3fee58595f84d789e861fbc
| 3,639,089
|
def make_inline_table(data):
"""Create an inline table from the given data."""
table = tomlkit.inline_table()
table.update(data)
return table
|
c70352de9a716ad5d3f1f33b33ea65c10ebc8f98
| 3,639,090
|
def _mi_dc(x, y, k):
"""
Calculates the mututal information between a continuous vector x and a
disrete class vector y.
This implementation can calculate the MI between the joint distribution of
one or more continuous variables (X[:, 1:3]) with a discrete variable (y).
Thanks to Adam Pocock, the author of the FEAST package for the idea.
Brian C. Ross, 2014, PLOS ONE
Mutual Information between Discrete and Continuous Data Sets
"""
y = y.flatten()
n = x.shape[0]
classes = np.unique(y)
knn = NearestNeighbors(n_neighbors=k)
# distance to kth in-class neighbour
d2k = np.empty(n)
# number of points within each point's class
Nx = []
for yi in y:
Nx.append(np.sum(y == yi))
# find the distance of the kth in-class point
for c in classes:
mask = np.where(y == c)[0]
knn.fit(x[mask, :])
d2k[mask] = knn.kneighbors()[0][:, -1]
# find the number of points within the distance of the kth in-class point
knn.fit(x)
m = knn.radius_neighbors(radius=d2k, return_distance=False)
m = [i.shape[0] for i in m]
# calculate MI based on Equation 2 in Ross 2014
MI = psi(n) - np.mean(psi(Nx)) + psi(k) - np.mean(psi(m))
return MI
|
35b1295739d9df390980db11b7f03976c5ada3de
| 3,639,091
|
def get_new_deals_intent_handler(handler_input):
"""
Purpose:
Handler for getting new deals
Args:
handler_input (Dict): Input data from the Alexa Skill
Return:
alexa_reponse (Dict): Reponse for Alexa Skill to handle
"""
feed = get_slickdeals_feed(SLICKDEALS_URL)
deals = get_top_slickdeals(feed)
speech_text = "There are {0} deals. The first deal is {1}".format(
len(deals), deals[0]
)
return (
handler_input.response_builder.speak(speech_text)
.set_card(SimpleCard("Slick Deals", speech_text))
.set_should_end_session(True)
.response
)
|
7c30af6414a99193d5a7f97f58285b06571c85fa
| 3,639,092
|
def analyse_dataset(imgs, lbls, name=None):
"""Analyse labelled dataset
# Arguments:
imgs: ndarray, a set of images
lbls: ndarray, labels for a set of images
"""
if name is not None:
print('Dataset: {}'.format(name))
unique_lbl, counts = np.unique(lbls, return_counts=True)
min_samples = min(counts)
max_samples = max(counts)
avr_samples = np.mean(counts)
std_dev = np.std(counts)
imgs_dict = dict()
imgs_dict['name'] = name
imgs_dict['n_samples'] = imgs.shape[0]
imgs_dict['samples_shape'] = imgs.shape[1:]
imgs_dict['n_unique_labels'] = len(counts)
imgs_dict['unique_labels'] = unique_lbl
imgs_dict['min_samples'] = min_samples
imgs_dict['max_samples'] = max_samples
imgs_dict['average_samples'] = round(avr_samples, 0)
imgs_dict['std_dev'] = round(std_dev, 2)
for k, v in imgs_dict.items():
print('{}: {}'.format(k, v))
return imgs_dict
|
a6eabfab49b4bdc8590b64275ee2d0bcd19b9a0b
| 3,639,094
|
def transform(doc, *, sort_keys=False):
"""reorder"""
heavy_defs = ["definitions", "schemas", "responses", "parameters", "paths"]
r = make_dict()
for k, v in doc.items():
if k in heavy_defs:
continue
r[k] = v
for k in heavy_defs:
if k in doc:
r[k] = doc[k]
if sort_keys:
r = str_dict(r) # side effect
return r
|
3b939ac3185cdae147709bab1709dd1a39d426c9
| 3,639,095
|
from typing import Optional
from typing import List
def plot_card(
box: str,
title: str,
data: PackedRecord,
plot: Plot,
events: Optional[List[str]] = None,
commands: Optional[List[Command]] = None,
) -> PlotCard:
"""Create a card displaying a plot.
Args:
box: A string indicating how to place this component on the page.
title: The title for this card.
data: Data for this card.
plot: The plot to be displayed in this card.
events: The events to capture on this card.
commands: Contextual menu commands for this component.
Returns:
A `h2o_wave.types.PlotCard` instance.
"""
return PlotCard(
box,
title,
data,
plot,
events,
commands,
)
|
fe1816d045bcf59cb28e29c90e517c79df82c621
| 3,639,096
|
def get_cluster_id(url):
"""
Google assign a cluster identifier to a group of web documents
that appear to be the same publication in different places on the web.
How they do this is a bit of a mystery, but this identifier is
important since it uniquely identifies the publication.
"""
vals = parse_qs(urlparse(url).query).get("cluster", [])
if len(vals) == 1:
return vals[0]
else:
vals = parse_qs(urlparse(url).query).get("cites", [])
print(vals)
if len(vals) == 1:
return vals[0]
return None
|
95a5f554560fd219cd07cbd8c8e251e9c8bd4d5e
| 3,639,097
|
from typing import List
def vol_allocation_factory(covs:List, pres:List=None)->[float]:
""" Allocate capital between portfolios using either cov or pre matrices
:param covs: List of covariance matrices
:param pres: List of precision matrices
:return: Capital allocation vector
"""
if pres is None:
pres = []
try:
return normalize([ 1/vol_portfolio_variance(cov=cov, pre=pre) for cov, pre in zip_longest(covs, pres, fillvalue=None) ])
except Exception as e:
print('vol allocation failed')
return diagonal_allocation_factory(covs=covs, pres=pres)
|
82e707f6d79e0c2b02c5c6f5acb4c6cce130bd4c
| 3,639,098
|
import requests
def get_inspection_page(**kwargs):
"""Fetch inspection data."""
url = KING_COUNTY_DOMAIN + DATA_PATH
params = INSPECTION_PARAMS.copy()
for key, val in kwargs.items():
print(key)
if key in INSPECTION_PARAMS:
params[key] = val
resp = requests.get(url, params=params)
resp.raise_for_status()
return resp.content, resp.encoding
|
70c2b95ea6e829f4231c887a59f717a68ede9327
| 3,639,099
|
import requests
import json
def get_table_count(url, table_id):
"""
Count the number of rowns in a ActivityTable
:param url:
:param table_id: The ActivityTable ID to update count from and return
:return: count : count of rows from ActivityTable
"""
token = ActivitySites.objects.get(site_id=1)
if token.activity_tables_token:
headers = {'content-type': 'application/json',
'Authorization': 'Token ' + token.activity_tables_token}
else:
headers = {'content-type': 'application/json'}
print("Token Not Found")
response = requests.get(url, headers=headers, verify=True)
data = json.loads(response.content)
count = None
try:
count = data['data_count']
ActivityTable.objects.filter(table_id=table_id)\
.update(unique_count=count)
except KeyError:
pass
return count
|
d7243c202317f0302fb2515f09aa096f0c275619
| 3,639,100
|
def get_audio_mfcc_features(txt_files, wav_files, n_input, n_context, word_num_map, txt_labels=None):
"""
ๆๅ้ณ้ขๆฐๆฎ็MFCC็นๅพ
:param txt_files:
:param wav_files:
:param n_input:
:param n_context:
:param word_num_map:
:param txt_labels:
:return:
"""
audio_features = []
audio_features_len = []
text_vector = []
text_vector_len = []
if txt_files != None:
txt_labels = txt_files
for txt_obj, wav_file in zip(txt_labels, wav_files):
# ่ฝฝๅ
ฅ้ณ้ขๆฐๆฎๅนถ่ฝฌๅไธบ็นๅพๅผ
audio_data = audiofile_to_input_vector(wav_file, n_input, n_context)
audio_data = audio_data.astype('float32')
audio_features.append(audio_data)
audio_features_len.append(np.int32(len(audio_data)))
# ่ฝฝๅ
ฅ้ณ้ขๅฏนๅบ็ๆๆฌ
target = []
if txt_files != None: # txt_objๆฏๆไปถ
target = trans_text_ch_to_vector(txt_obj, word_num_map)
else:
target = trans_text_ch_to_vector(None, word_num_map, txt_obj) # txt_objๆฏlabels
# target = text_to_char_array(target)
text_vector.append(target)
text_vector_len.append(len(target))
audio_features = np.asarray(audio_features)
audio_features_len = np.asarray(audio_features_len)
text_vector = np.asarray(text_vector)
text_vector_len = np.asarray(text_vector_len)
return audio_features, audio_features_len, text_vector, text_vector_len
|
bed03fb10944d00e27af400776a8efc894770e46
| 3,639,101
|
def getOffsetsFromPixelFractions(col, row):
"""
Determine just the fractional part (the intra-pixel part) of the col,row position.
For example, if (col, row) = (123.4, 987.6), then
(colFrac, rowFrac) = (.4, .6).
Function then returns the offset necessary for addressing the interleaved PRF array.
to ensure you get the location appropriate for your sub-pixel values.
Inputs
------
col
(float) Column position
row
(float) Row position.
Returns
------
(colFrac, rowFrac)
(int, int) offset necessary for addressing the interleaved PRF array.
"""
gridSize = 9
colFrac = np.remainder(float(col), 1)
rowFrac = np.remainder(float(row), 1)
colOffset = gridSize - np.round(gridSize * colFrac) - 1
rowOffset = gridSize - np.round(gridSize * rowFrac) - 1
return int(colOffset), int(rowOffset)
|
4f5945f4e3e6e2dd71056b615dc3571f6ece42c6
| 3,639,102
|
def all_index(request):
"""
Inventory Index View
"""
# build changelist
item_changelist = HTSChangeList(request, Item,
list_filter=[],
search_fields=[],
list_per_page=200,
model_admin=ItemAdmin(Item, None)
)
context_dict = {
'item_changelist': item_changelist,
'page_name': 'Inventory Index'
}
context_dict.update(INVENTORY_CONTEXT_DEFAULTS)
return render(request,
'inventory/inventory_all_index.html',
context_dict)
|
7fdd0b5f278b55767a7918e2977315312e823e93
| 3,639,103
|
def calcSeason(ra, time):
"""Calculate the 'season' in the survey for a series of ra/dec/time values of an observation.
Based only on the RA of the point on the sky, it calculates the 'season' based on when this
point would be overhead. To convert to an integer season label, take np.floor of the returned
float season values.
Note that seasons should be calculated for a fixed point on the sky, not for each pointing that
overlaps a point on the sky. For example, bad things might happen if you compute the season
for observations that overlap RA=0, but were centered on RA=359.
Parameters
----------
ra : float
The RA (in degrees) of the point on the sky
time : np.ndarray
The times of the observations, in MJD
Returns
-------
np.ndarray
The season values
"""
# A reference RA and equinox to anchor ra/season calculation - RA = 0 is overhead at this (local) time.
# This time was chosen as it is close to the expected start of the survey.
# Generally speaking, this is the equinox (RA=0 is overhead at midnight)
Equinox = 60208.00106863426
# convert ra into 'days'
dayRA = ra / 360 * 365.25
firstSeasonBegan = Equinox + dayRA - 0.5 * 365.25
seasons = (time - firstSeasonBegan) / 365.25
# Set first season to 0
seasons = seasons - np.floor(np.min(seasons))
return seasons
# The value for the equinox above was calculated as follows:
#from astropy.time import Time
#from astropy.coordinates import EarthLocation
#loc = EarthLocation.of_site('Cerro Pachon')
#t = Time('2023-09-21T00:01:32.33', format='isot', scale='utc', location=loc)
#print(t.sidereal_time('apparent') - loc.lon, t.utc.mjd)
|
1309a302fac9d01d7b5567d5722bf8f04dc9b88e
| 3,639,104
|
def set_node_event_info(info: NodeEventInfo) -> Item:
"""Encaches an item.
:param info: Node event information.
:returns: Item to be cached.
"""
if info.event_type in (
EventType.MONIT_CONSENSUS_FINALITY_SIGNATURE,
EventType.MONIT_BLOCK_FINALIZED,
EventType.MONIT_BLOCK_ADDED,
):
names = [
info.block_hash,
]
elif info.event_type == EventType.MONIT_DEPLOY_PROCESSED:
names = [
info.block_hash,
info.deploy_hash,
]
else:
names=[]
return Item(
item_key=ItemKey(
paths=[
info.network,
COL_EVENT,
info.event_type.name[6:],
],
names=names,
),
data=info,
expiration=EXPIRATION_COL_EVENT
)
|
9ee50e73b1c50172ada1b6040b675cbda5aede44
| 3,639,105
|
def check_hashtarget(bible_hash, target):
""" tests if the biblepay hash is valid for the hashtarget, means that is it lower.
True = is lower and all is fine """
rs = False
try:
rs = int(bible_hash, 16) < int(target, 16)
except:
pass
return rs
|
a0041d8834b2a0af0a08c2562ffed599925ed5a8
| 3,639,106
|
def assert_and_infer_cfg_fl(cfg_fl, args, make_immutable=True, train_mode=True):
"""
Calls /semantic-segmentation/config.assert_and_infer_cfg and adds additional assertions
"""
if args.manual_client_setup:
cfg_fl.CLIENT.MANUAL = args.manual_client_setup
if cfg_fl.CLIENT.MANUAL:
print('-------------------------')
print('> Clients manual settings')
print('-------------------------')
for i in cfg_fl.CLIENT.POPULATION:
print(i)
if args.replicate:
cfg_fl.REPLICATE = args.replicate
if args.seed:
cfg_fl.SEED = args.seed
cfg_fl.TORCH_SEED = args.seed
if args.task:
cfg_fl.TASK = args.task
if args.dataset:
cfg_fl.DATASET.DATASET_NAME = args.dataset
if args.clients_per_dist:
cfg_fl.FEDERATION.CLIENTS_PER_DIST = args.clients_per_dist
if cfg_fl.FEDERATION.CLIENTS_PER_DIST is not None and cfg_fl.FEDERATION.NUM_CLIENTS is None:
cfg_fl.FEDERATION.NUM_CLIENTS = cfg_fl.FEDERATION.CLIENTS_PER_DIST * cfg_fl.FEDERATION.NUM_DISTRIBUTIONS
if args.num_clients:
cfg_fl.FEDERATION.NUM_CLIENTS = args.num_clients
if args.print_logx:
cfg_fl.LOGX_STDOUT = True
if args.num_distributions:
cfg_fl.FEDERATION.NUM_DISTRIBUTIONS = args.num_distributions
assertion_num_clients = "Either 'clients_per_dist' or 'num_clients' needs to be specified"
assert cfg_fl.FEDERATION.CLIENTS_PER_DIST or cfg_fl.FEDERATION.NUM_CLIENTS, assertion_num_clients
# if args.dist_type:
# cfg.FEDERATION.DIST_TYPE = args.dist_type
if args.clustering_method:
cfg.FEDERATION.CLUSTERING_METHOD = args.clustering_method
if args.federation_method:
assert args.federation_method in ['fomo', 'embeddings', 'local', 'fedavg']
cfg_fl.FEDERATION.METHOD = args.federation_method
if args.federation_method == 'fedavg':
cfg_fl.FEDERATION.FED_AVERAGING = True
if args.random_distributions:
cfg_fl.FEDERATION.RANDOM_DISTS = args.random_distributions # True
if args.federated_averaging:
cfg_fl.FEDERATION.FED_AVERAGING = True
cfg_fl.FEDERATION.METHOD = 'fedavg'
if args.local_train_val_size:
cfg_fl.FEDERATION.LOCAL_TRAIN_VAL_SIZE = args.local_train_val_size
if args.federation_epoch:
cfg_fl.FEDERATION.EPOCH = args.federation_epoch
if args.num_update_clients:
cfg_fl.CLIENT_WEIGHT.NUM_UPDATE_CLIENTS = args.num_update_clients
if args.model_weight_delta:
cfg_fl.CLIENT_WEIGHT.WEIGHT_DELTA = args.model_weight_delta
if args.explicit_weight_delta:
cfg_fl.CLIENT_WEIGHT.WEIGHT_DELTA = args.explicit_weight_delta
cfg_fl.CLIENT_WEIGHT.LEAVE_ONE_OUT = False
if args.client_weight_epsilon:
cfg_fl.CLIENT_WEIGHT.EPSILON = args.client_weight_epsilon
if args.client_weight_epsilon_decay:
cfg_fl.CLIENT_WEIGHT.EPSILON_DECAY = args.client_weight_epsilon_decay
if args.client_weight_method:
cfg_fl.CLIENT_WEIGHT.METHOD = args.client_weight_method
if args.update_positive_delta_only:
cfg_fl.MODEL_WEIGHT.UPDATE_POSITIVE_ONLY = args.update_positive_delta_only
if args.leave_one_out:
cfg_fl.CLIENT_WEIGHT.LEAVE_ONE_OUT = args.leave_one_out
if args.baseline_model:
cfg_fl.CLIENT_WEIGHT.BASELINE = args.baseline_model
if args.train_split:
cfg_fl.CLIENT.TRAIN_SPLIT = args.train_split
cfg_fl.CLIENT.VAL_SPLIT = 1 - args.train_split
if args.dataset == 'cifar100':
args.num_classes = 100
elif args.dataset == 'cifar10':
args.num_classes = 10
elif args.dataset == 'mnist':
args.num_classes = 10
return cfg_fl
|
b779e0a5f06d06b9ebc542f3cd7c190efb70bca5
| 3,639,107
|
def replace_service(name,
metadata,
spec,
source,
template,
old_service,
saltenv,
namespace="default",
**kwargs):
"""
Replaces an existing service with a new one defined by name and namespace,
having the specificed metadata and spec.
"""
body = __create_object_body(
kind="Service",
obj_class=kubernetes.client.V1Service,
spec_creator=__dict_to_service_spec,
name=name,
namespace=namespace,
metadata=metadata,
spec=spec,
source=source,
template=template,
saltenv=saltenv,
)
# Some attributes have to be preserved
# otherwise exceptions will be thrown
body.spec.cluster_ip = old_service["spec"]["cluster_ip"]
body.metadata.resource_version = old_service["metadata"][
"resource_version"]
cfg = _setup_conn(**kwargs)
try:
api_instance = kubernetes.client.CoreV1Api()
api_response = api_instance.replace_namespaced_service(
name, namespace, body)
return api_response.to_dict()
except (ApiException, HTTPError) as exc:
if isinstance(exc, ApiException) and exc.status == 404:
return None
else:
log.exception("Exception when calling "
"CoreV1Api->replace_namespaced_service")
raise CommandExecutionError(exc)
finally:
_cleanup(**cfg)
|
e363ed9d9233ff6455963edba5bfa8109f6c7260
| 3,639,108
|
from typing import Optional
from typing import Dict
from typing import Any
from typing import List
from datetime import datetime
import logging
def timesketch_add_manual_event(
data: Text, timestamp: Optional[int] = 0,
date_string: Optional[Text] = '',
timestamp_desc: Optional[Text] = '',
attributes: Optional[Dict[str, Any]] = None,
tags: Optional[List[str]] = None) -> Dict[str, str]:
"""Add a manually generated event to the sketch.
Args:
data (str): The message string for for the event to be generated.
timestamp (int): Optional timestamp in either seconds since Epoch or
microseconds since Epoch.
date_string (str): An optional date time as a human readable string. If
neither date_string nor timestamp is provided then the current timestamp
will be used as the time of the event.
timestamp_desc (str): Optional timestamp description field.
attributes (dict): Optional dict which contains extra attributes to add
to the manual event.
tags (list): Optional list of tags to add to the manual event.
Returns:
Dictionary with query results.
"""
connect()
state_obj = state.state()
sketch = state_obj.get_from_cache('timesketch_sketch')
if not sketch:
print('Not able to connect to a sketch.')
return {}
# Default timestamp.
date_obj = datetime.datetime.now(datetime.timezone.utc)
date = date_obj.isoformat()
if timestamp:
try:
date_obj = datetime.datetime.fromtimestamp(
timestamp, datetime.timezone.utc)
except ValueError:
date_obj = datetime.datetime.fromtimestamp(
timestamp / 1e6, datetime.timezone.utc)
date = date_obj.isoformat()
elif date_string:
elements = time_elements.TimeElements()
if 'T' in date_string:
try:
elements.CopyFromStringISO8601(date_string)
except ValueError:
logging.error(
'Unable to convert date string, is it really in ISO 8601 format?')
return {}
try:
elements.CopyFromString(date_string)
except ValueError:
try:
elements.CopyFromStringRFC1123(date_string)
except ValueError:
logging.error(
'Unable to convert date string, needs to be in ISO 8601, 1123 or '
'in the format YYYY-MM-DD hh:mm:ss.######[+-]##:##')
return {}
date = elements.CopyToDateTimeStringISO8601()
if not timestamp_desc:
timestamp_desc = 'Event Logged'
if not isinstance(tags, (tuple, list)):
tags = []
if not isinstance(attributes, dict):
attributes = {}
if not date:
logging.error('Unable to convert date string, please check it.')
return {}
return sketch.add_event(
data, date, timestamp_desc, attributes=attributes, tags=tags)
|
c84f04bbd3a9344c5797e6d79be141b05f6edae0
| 3,639,109
|
def filter_vcf_by_sex(vcf_file, data):
"""Post-filter a single sample VCF, handling sex chromosomes.
Handles sex chromosomes and mitochondrial. Does not try to resolve called
hets into potential homozygotes when converting diploid to haploid.
Skips filtering on pooled samples, we still need to implement.
"""
if len(vcfutils.get_samples(vcf_file)) > 1:
return vcf_file
_, sexes = _configured_ploidy_sex([data])
sex = sexes.pop()
out_file = "%s-ploidyfix%s" % utils.splitext_plus(vcf_file)
if not utils.file_exists(out_file):
orig_out_file = out_file
out_file = orig_out_file.replace(".vcf.gz", ".vcf")
with file_transaction(out_file) as tx_out_file:
with open(tx_out_file, "w") as out_handle:
with utils.open_gzipsafe(vcf_file) as in_handle:
for line in in_handle:
if line.startswith("#"):
out_handle.write(line)
else:
line = _fix_line_ploidy(line, sex)
if line:
out_handle.write(line)
if orig_out_file.endswith(".gz"):
out_file = vcfutils.bgzip_and_index(out_file, data["config"])
return out_file
|
6eb6528ce4deb86b8c8ecd8746143cb0f6c82fde
| 3,639,110
|
def gen_spacer(spacer_char="-", nl=2):
"""
Returns a spacer string with 60 of designated character, "-" is default
It will generate two lines of 60 characters
"""
spacer = ""
for i in range(nl):
spacer += spacer_char * 60
spacer += "\n"
return spacer
|
7434f191dafdf500c2fc3e67373afc664e543ce0
| 3,639,111
|
def repo_config_factory(repo_type, repo_id, repo_label, **kwargs):
"""
Constructs a repository configuration in form of a
TTL structure utilizing the TTL templates from
./repo_types_template.
"""
# Check if the repo_type is a known template
if repo_type not in REPO_TYPES:
raise RepositoryTypeUnknown
# Get the path to the template
template_path = TEMPLATE_FOLDER / '{}{}'.format(repo_type, '.ttl')
# Open the template file and read it
with open(template_path) as template_file:
template = template_file.read()
# get the default values for the template
params = DEFAULTS
# Overwrite them with the given kwargs
params.update(kwargs)
# Fill the params in the template
ttl = template.format(repo_id=repo_id.replace('-', '_'), repo_label=repo_label, **params)
# return the final TTL
return ttl
|
3840d698691f226d56d25233c3fc00db23abd5d9
| 3,639,112
|
def oil_isothermal_density(rho: NDArrayOrFloat, p: NDArrayOrFloat) -> NDArrayOrFloat:
"""Calculates the oil density for a given pressure at 15.6 degC
B&W 1992 Equation 18
Args:
rho: The oil reference density (g/cc) at 15.6 degC
can be compensated for disovled gases by running `oil_rho_sat` first.
p: Pressure (MPa)
Returns:
The oil density (g/cc) at pressure p
"""
return (
rho
+ (0.00277 * p - 1.71e-7 * np.power(p, 3)) * np.power(rho - 1.15, 2)
+ 3.49e-4 * p
)
|
f8184f4820b5a19525b47f357b92ea7059e2bd74
| 3,639,113
|
def get_waveform_dataset(path):
"""Loads the waveform dataset from a given path.
Args:
path: The path to the .npz file containing the waveform data set.
Returns:
An array of waveform chunks loaded from the given path.
"""
dataset = np.load(path)['arr_0']
return dataset
|
3d8e13cddd7abdb3bc459b68761e4a6385208c77
| 3,639,114
|
def untranslate_module_name(module):
"""Rename module names mention in JSON to names that we can import
This reverses the translation applied by translate_module_name() to
a module name available to the current version of Python.
"""
if PY3:
# remap `__builtin__` and `exceptions` to the `builtins` module
if module == '__builtin__':
module = 'builtins'
elif module == 'exceptions':
module = 'builtins'
return module
|
fae87c9fb852ff1b6b82e4ebccf9c058fb4a313f
| 3,639,117
|
def RGBRamp(size=256, upperValue=.6666666666666667):
"""Generate an RGB color ramp, values range from 0.0 to 1.0"""
assert size > 0
hsv = HSVRamp(size, upperValue)
rgb = Numeric.zeros( (hsv.shape[0], 3), viewerConst.FPRECISION )
for i in xrange(hsv.shape[0]):
rgb[i] = ToRGB(hsv[i])
return rgb
|
10be72b654ac9e36610bc4c08fd05edbba45de8a
| 3,639,118
|
def find_poly_ras_intersect(shape, raster_dir, extension='.tif'):
""" Finds all the tiles falling within raster object
the get shape geometry should be seperated from the intesect check,
currently causes a exit code 139 on unix box
:param polygon:
:param extension:
:param raster_dir:
"""
print 'starting shape: {}'.format(shape)
# get vector geometry
if not os.path.isfile(shape):
raise NotImplementedError('Shapefile not found')
polygon = ogr.Open(shape)
layer = polygon.GetLayer()
feature = layer.GetFeature(0)
vector_geo = feature.GetGeometryRef()
# print 'vector geometry: {}'.format(vector_geo)
tiles = [os.path.join(raster_dir, x) for x in
os.listdir(os.path.join(raster_dir)) if x.endswith(extension)]
raster_list = []
for tile in tiles:
print tile, srt.tif_proj4_spatial_reference(tile)
if srt.check_same_reference_system(shape, tile):
raster_geo = get_polygon_from_raster(tile)
if raster_geo.Intersect(vector_geo):
print 'tile: {} intersects {}'.format(os.path.basename(tile), os.path.basename(shape))
raster_list.append(tile)
return raster_list
|
8f7ae23a2c442ff5b61bde46d8b42ac4c2c8eade
| 3,639,119
|
from typing import Iterable
import requests
def Session(
retries: int = 10,
backoff_factor: float = 0.3,
allowed_methods: Iterable[str] = ('HEAD', 'TRACE', 'GET', 'POST', 'PUT', 'OPTIONS', 'DELETE'),
status_forcelist: Iterable[int] = (408, 429, 500, 502, 503, 504),
) -> requests.Session:
"""Return a Session object with full retry capabilities.
Args:
retries (int): number of retries
backoff_factor (float): speed factor for retries (in seconds)
allowed_methods (iterable): http methods to retry on
status_forcelist (iterable): http status codes to retry on
Returns:
:py:class:`requests.Session`: session object
"""
session = requests.Session()
retry = Retry(
total=retries,
connect=retries,
read=retries,
redirect=retries,
# status=retries,
allowed_methods=allowed_methods,
status_forcelist=status_forcelist,
backoff_factor=backoff_factor,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
|
ca7d5f4d3f34e24c67eae47c01a6bd63796b03be
| 3,639,120
|
def tp53():
"""Create a TP53 gene fixture."""
params = {
'label': 'tumor protein p53',
'concept_id': 'hgnc:11998',
'symbol': 'TP53',
'location_annotations': [],
'strand': None,
'locations': [
{
'_id': 'ga4gh:VCL._Cl_XG2bfBUVG6uwi-jHtCHavOAyfPXN',
'chr': '17',
'interval': {
'end': 'p13.1',
'start': 'p13.1',
'type': 'CytobandInterval'
},
'species_id': 'taxonomy:9606',
'type': 'ChromosomeLocation'
}
],
'previous_symbols': [],
'aliases': [
'p53',
'LFS1'
],
'symbol_status': 'approved',
'associated_with': [
'vega:OTTHUMG00000162125',
'refseq:NM_000546',
'cosmic:TP53',
'omim:191170',
'ucsc:uc060aur.1',
'uniprot:P04637',
'orphanet:120204',
'ccds:CCDS73968',
'ccds:CCDS73971',
'ccds:CCDS73970',
'ccds:CCDS73969',
'ccds:CCDS73967',
'ccds:CCDS73966',
'ccds:CCDS73965',
'ccds:CCDS73964',
'ccds:CCDS73963',
'ccds:CCDS11118',
'ccds:CCDS45605',
'ccds:CCDS45606',
'ena.embl:AF307851',
'pubmed:6396087',
'pubmed:3456488',
'pubmed:2047879'
],
'xrefs': [
'ensembl:ENSG00000141510',
'ncbigene:7157'
]
}
return Gene(**params)
|
d1c41af9dce6b5eee3aa475c207a669529001b7d
| 3,639,121
|
def factorOrder(factors, varOrder):
"""Return an order of factors for sampling given a variable order for sampling"""
pri = [0 for x in varOrder]
for i,x in enumerate(varOrder): # first, find position of each var in sampling order
pri[x]=i
factorOrder = [ Factor() for x in varOrder ] # fill order with blanks initially
for f in factors:
f_pri = max([pri[x] for x in f.vars]) # get last-sampled variable for this factor
if factorOrder[f_pri].nvar == 0:
factorOrder[f_pri] = f # if first factor for this variable, save it
else: # o.w. take one with the lowest conditional entropy:
if ent[f_pri] < 0: # (compute previous' if necessary)
ent[f_pri] = factorOrder[f_pri].entropy() - factorOrder[f_pri].sum([f_pri]).entropy()
ent_new = f.entropy() - f.sum([f_pri]).entropy() # (and this factor's)
if ent_new < ent[f_pri]: # (keep whichever is lower)
factorOrder[f_pri] = f
ent[f_pri] = ent_new
return factorOrder
|
98ec337ab126d77b854be28f937eef392b9c8144
| 3,639,122
|
def home(request):
"""
rendering ui by template for homepage
this view never cache for delivering correct translation inside template
"""
template = loader.get_template('weather/home.html')
return HttpResponse(template.render({}, request))
|
b2fdf6facd633441da9d11a53a781e9e418b42de
| 3,639,124
|
def plot_histogram(df, path, col_x, ax=None, size=None, save=True, suffix=None,
show=False, **kwargs):
"""Geneate a histogram plot.
Args:
df (:class:`pandas.DataFrame`): Data frame to plot.
path (str): Path to data frame to use if ``df`` is None, also used
as the basis for output path.
col_x (str): Name of column with values to plot.
ax (:class:`matplotlib.axes.Axes`): Matplotlib axes; defaults to
None to generate a new figure with axes.
size (Sequence[float]): Sequence of ``width, height`` to size the
figure; defaults to None.
save (bool): True to save the plot; defaults to True.
suffix: String to append to output path before extension;
defaults to None to ignore.
show: True to display the image; otherwise, the figure will only
be saved to file, if :attr:``config.savefig`` is set.
Defaults to True.
kwargs (Any): Extra arguments to :meth:`decorate_plot`.
Returns:
:class:`matplotlib.axes.Axes`: Matplotlib axes.
"""
# load data frame from CSV unless already given and set up figure
if df is None:
df = pd.read_csv(path)
if ax is None:
fig, gs = plot_support.setup_fig(1, 1, size)
ax = plt.subplot(gs[0, 0])
# generate histogram
n, bins, patches = ax.hist(df[col_x])
decorate_plot(ax, **kwargs)
# save and display plot if indicated
if save:
out_path = libmag.make_out_path(path, suffix=suffix)
plot_support.save_fig(out_path, config.savefig)
if show: plt.show()
return ax
|
ec97358a9b7f8c3d20dd7a15d77b588fc2bffbe0
| 3,639,125
|
def __check_interface_state(duthost, interface, state='up'):
"""
Check interface status
Args:
duthost: DUT host object
interface: Interface of DUT
state: state of DUT's interface
Returns:
Bool value which confirm port state
"""
ports_down = duthost.interface_facts(up_ports=[interface])['ansible_facts']['ansible_interface_link_down_ports']
if 'down' in state:
return interface in ports_down
return interface not in ports_down
|
bc17d489064e9a81ec77dad5ab3682c9a96fa88d
| 3,639,126
|
def find_dateTime_in_html(text):
"""
find dateTime in html
"""
r = findall('<time dateTime="(.*?)">', text)
if r:
return r
return []
|
0ba36b69a52f421e303da4c10b70362d6d724c96
| 3,639,127
|
import torch
def get_number_of_voxels_per_class(labels: torch.Tensor) -> torch.Tensor:
"""
Computes the number of voxels for each class in a one-hot label map.
:param labels: one-hot label map in shape Batches x Classes x Z x Y x X or Classes x Z x Y x X
:return: A tensor of shape [Batches x Classes] containing the number of non-zero voxels along Z, Y, X
"""
if not len(labels.shape) in [5, 4]:
raise Exception("labels must have either 4 (Classes x Z x Y x X) "
"or 5 dimensions (Batches x Classes x Z x Y x X), found:{}"
.format(len(labels.shape)))
if len(labels.shape) == 4:
labels = labels[None, ...]
return torch.count_nonzero(labels, dim=(2, 3, 4))
|
568a91639a42cf3cd3debe365c5a963512d95dfc
| 3,639,128
|
def get_columns_width(user_width):
"""define width of the report columns"""
default_width = [30, 7, 60]
if not user_width:
return default_width
try:
return [7 if user_width[i] < 7 else user_width[i] for i in range(3)]
except (TypeError, IndexError):
_LOGGER.error(
"Invalid configuration for table column widths, default values" " used %s",
default_width,
)
return default_width
|
96901c79ac7ba2cf6d5dc56fe26d63e81a2437d4
| 3,639,129
|
def tx_failure():
"""
Failed ```tx```.
"""
message = request.args.get('m')
protocol = request.args.get('p')
address = request.args.get('a')
command = request.args.get('c')
repeats = request.args.get('r')
bits = request.args.get('b')
response = make_response(
render_template(
"tx.html",
success=False,
message=message,
protocol=protocol,
address=address,
command=command,
repeats=repeats,
bits=bits
)
)
response.headers.set('Irbox-Success', 'false')
return response
|
f5938cf59207125030502113ce3b541301279b98
| 3,639,130
|
import pydoc
def read_docstring(object_):
"""
Returns object docstring without the FILE information.
"""
fmt = "```\n{}\n```\n"
docs = pydoc.plain(pydoc.render_doc(object_)).split("FILE")[0].rstrip()
return fmt.format(docs)
|
5c21f6eadf400ac9316e3f44d98464536b9b7536
| 3,639,131
|
def _bernoulli_spiral(theta, theta_offset=0., *args, **kwargs):
"""Return Equiangular (Bernoulli's) spiral
Args:
theta: array-like, angles from polar coordinates to be converted
theta_offset: float, angle offset in radians (2*pi = 0)
Kwargs:
exp_scale: growth rate of the exponential
"""
exp_scale = kwargs.pop('exp_scale', 0.1)
x, y = np.exp(exp_scale * theta) * np.cos(theta + theta_offset), np.exp(
exp_scale * theta) * np.sin(theta + theta_offset)
x_norm = np.max(np.abs(x))
y_norm = np.max(np.abs(y))
x, y = x / x_norm, y / y_norm
return x, y
|
3e889bc61ab8e93daefc2feeaad40ae86c167627
| 3,639,132
|
import httpx
import copy
def _redacted_to_curl(request: httpx.Request) -> str:
"""Pass through to curlify2.to_curl that redacts the authorization in the headers
"""
if (auth_header := request.headers.get('authorization')) is None:
return curlify2.to_curl(request)
req_copy = copy.copy(request)
req_copy.headers = copy.deepcopy(request.headers)
if "Bearer" in auth_header:
req_copy.headers['authorization'] = "Bearer [REDACTED]"
else:
req_copy.headers['authorization'] = "[REDACTED]"
return curlify2.to_curl(req_copy)
|
e3a713c3fcf6c875af4cae6ab4c5e696eb0bd432
| 3,639,133
|
def parse_scales_line(line):
"""
Args:
- line:
Returns:
- scales_dict
"""
def advance_past_token(str, token):
return str[str.find(token) + len(token):]
scales_dict = {}
line = advance_past_token(line, 'Scales:')
pair_str = line.split(',')
for pair_str in pair_str:
dname, scale = pair_str.split(':')
scales_dict[dname.strip()] = float(scale)
return scales_dict
|
b16e1f431b878aa6418beaed3f141fe928a229e1
| 3,639,135
|
import collections
def parse_remove_configuration(configuration):
"""
Turns the configuration line of splitting into a name and a set of params.
"""
if configuration is None:
return "None", None
print('conf', configuration)
conf_dict = collections.OrderedDict(configuration)
name = 'remove'
for key in conf_dict.keys():
if key != 'weights' and key != 'boost':
name += '_'
name += key
return name, conf_dict
|
40bf749c2e142cef534f945179b987fd3c7ba6d8
| 3,639,136
|
def _calc_cost_grad_first(data_input, w, label, features):
"""Calculate the partial cost and gradient."""
train_data = read_stage_file(data_input, features + [label])
size_train = train_data.shape[0]
labels = train_data[label].values
train_data = train_data[features].values
if size_train > 0:
dim = train_data.shape[1]
if dim != len(w):
w = np.zeros(dim, dtype=float) # initial
prediction = (labels * np.dot(train_data, w))
# hinge loss (select negative values)
idx = np.nonzero((prediction - 1) < 0)
loss = np.sum(1 - prediction[idx])
# -y * x for all values lesser than 1
grad = - np.dot(labels[idx], train_data[idx])
return [loss, grad, size_train], [labels, train_data]
else:
return [0, 0, size_train], [labels, train_data]
|
d7b62ac39f824f7598cc83a078bc0f5e4e49c4ea
| 3,639,137
|
def subtract_dbm(dbm1: float, dbm2: float):
"""Adds two decibel values"""
watt1 = dbm_to_watt(dbm1)
watt2 = dbm_to_watt(dbm2)
return watt_to_dbm(watt1 - watt2)
|
ea7c6f9372182a6a39d72265428e86b26b4da765
| 3,639,138
|
def focused_evaluate(board):
"""
Given a board, return a numeric rating of how good
that board is for the current player.
A return value >= 1000 means that the current player has won;
a return value <= -1000 means that the current player has lost
"""
score = board.longest_chain(board.get_current_player_id()) * 10
# Prefer having your pieces in the center of the board.
for row in range(6):
for col in range(7):
if board.get_cell(row, col) == board.get_current_player_id():
score -= abs(3-col)
elif board.get_cell(row, col) == board.get_other_player_id():
score += abs(3-col)
if board.is_game_over():
if int(board.is_win()) == int(board.get_current_player_id()):
score = +1000;
score -= board.num_tokens_on_board()
elif int(board.is_win()) == int(board.get_other_player_id()):
score = -1000
return score
|
b2cbb91cdb048ef41a13532e400173daa05af4b8
| 3,639,140
|
def tanh(x, name=None):
"""
sparse tanh activation, requiring x to be a sparse coo or sparse csr tensor.
.. math::
out = tanh(x)
Parameters:
x (Tensor): The input Sparse Tensor with data type float32, float64.
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Returns:
A Sparse Tensor with the same data type and shape as ``x`` .
Examples:
.. code-block:: python
import paddle
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
dense_x = paddle.to_tensor([-2, 0, 1], dtype='float32')
sparse_x = dense_x.to_sparse_coo(1)
out = paddle.sparse.tanh(sparse_x)
"""
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
if x.is_sparse_coo():
return _C_ops.final_state_sparse_coo_tanh(x)
elif x.is_sparse_csr():
return _C_ops.final_state_sparse_csr_tanh(x)
else:
raise ValueError(
"Currently, sparse.tanh only support the input of SparseCooTensor or SparseCsrTensor"
)
|
24bf0889c2e1ba642442e0d8f6b11eeeaf94bf6c
| 3,639,141
|
def collector(monkeypatch):
"""
Unit test: base case
"""
col = SunPowerPVSupervisorCollector(use_device_data_timestamp=False)
attrs = [
'connect',
'disconnect',
'info_metrics',
]
mocked = MagicMock()
mocked.connect.return_value = []
mocked.disconnect.return_value = []
mocked.info_metrics.return_value = []
for attr in attrs:
monkeypatch.setattr(col, attr, getattr(mocked, attr))
return col
|
f9e99071b2dde231b4a3fc7c89e00846d26efb12
| 3,639,143
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.