_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 31 13.1k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q258900 | tic_objectsearch | validation | def tic_objectsearch(
objectid,
idcol_to_use="ID",
apiversion='v0',
forcefetch=False,
cachedir='~/.astrobase/mast-cache',
verbose=True,
timeout=90.0,
refresh=5.0,
maxtimeout=180.0,
maxtries=3,
jitter=5.0,
raiseonfail=False
):
'''
This runs a TIC search for a specified TIC ID.
Parameters
----------
objectid : str
The object ID to look up information for.
idcol_to_use : str
This is the name of the object ID column to use when looking up the
provided `objectid`. This is one of {'ID', 'HIP', 'TYC', 'UCAC',
'TWOMASS', 'ALLWISE', 'SDSS', 'GAIA', 'APASS', 'KIC'}.
apiversion : str
The API version of the MAST service to use. This sets the URL that this
function will call, using `apiversion` as key into the `MAST_URLS` dict
above.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
| python | {
"resource": ""
} |
q258901 | send_email | validation | def send_email(sender,
subject,
content,
email_recipient_list,
email_address_list,
email_user=None,
email_pass=None,
email_server=None):
'''This sends an email to addresses, informing them about events.
The email account settings are retrieved from the settings file as described
above.
Parameters
----------
sender : str
The name of the sender to use in the email header.
subject : str
Subject of the email.
content : str
Content of the email.
email_recipient list : list of str
This is a list of email recipient names of the form:
`['Example Person 1', 'Example Person 1', ...]`
email_recipient list : list of str
This is a list of email recipient addresses of the form:
`['example1@example.com', 'example2@example.org', ...]`
email_user : str
The username of the email server account that will send the emails. If
this is None, the value of EMAIL_USER from the
~/.astrobase/.emailsettings file will be used. If that is None as well,
this function won't work.
email_pass : str
The password of the email server account that will send the emails. If
this is None, the value of EMAIL_PASS from the
~/.astrobase/.emailsettings file will be used. If that is None as well,
this function won't work.
email_server : str
The address of the email server that will send the emails. If this is
None, the value of EMAIL_USER from the ~/.astrobase/.emailsettings file
will be used. If that is None as well, this function won't work.
Returns
-------
bool
True if email sending succeeded. False if email sending failed.
'''
if not email_user:
email_user = EMAIL_USER
if not email_pass:
email_pass = EMAIL_PASSWORD
if not email_server:
email_server = EMAIL_SERVER
if not email_server and email_user and email_pass:
raise ValueError("no email server address and "
"credentials available, can't continue")
msg_text = EMAIL_TEMPLATE.format(
sender=sender,
hostname=socket.gethostname(),
activity_time='%sZ' % datetime.utcnow().isoformat(),
activity_report=content
)
email_sender = '%s <%s>' % (sender, EMAIL_USER)
# put together the recipient and email lists
email_recipients = [('%s <%s>' % (x,y))
for (x,y) in zip(email_recipient_list,
email_address_list)]
| python | {
"resource": ""
} |
q258902 | fourier_sinusoidal_func | validation | def fourier_sinusoidal_func(fourierparams, times, mags, errs):
'''This generates a sinusoidal light curve using a Fourier cosine series.
Parameters
----------
fourierparams : list
This MUST be a list of the following form like so::
[period,
epoch,
[amplitude_1, amplitude_2, amplitude_3, ..., amplitude_X],
[phase_1, phase_2, phase_3, ..., phase_X]]
where X is the Fourier order.
times,mags,errs : np.array
The input time-series of measurements and associated errors for which
the model will be generated. The times will be used to generate model
mags, and the input `times`, `mags`, and `errs` will be resorted by
model phase and returned.
Returns
-------
(modelmags, phase, ptimes, pmags, perrs) : tuple
Returns the model mags and phase values. Also returns the input `times`,
`mags`, and `errs` sorted by the model's phase.
'''
period, epoch, famps, fphases = fourierparams
# figure out the order from the length of the Fourier param list
forder = len(famps)
# phase the times with this period
iphase = (times - epoch)/period
iphase = iphase - np.floor(iphase)
| python | {
"resource": ""
} |
q258903 | fourier_sinusoidal_residual | validation | def fourier_sinusoidal_residual(fourierparams, times, mags, errs):
'''
This returns the residual between the model mags and the actual mags.
Parameters
----------
fourierparams : list
This MUST be a list of the following form like so::
[period,
epoch,
[amplitude_1, amplitude_2, amplitude_3, ..., amplitude_X],
[phase_1, phase_2, phase_3, ..., phase_X]]
where X is the Fourier order.
times,mags,errs : np.array
The input time-series of measurements and associated errors for which
the model will be generated. The times will be used to generate model
mags, and the input `times`, `mags`, and `errs` will be resorted by
| python | {
"resource": ""
} |
q258904 | _make_magseries_plot | validation | def _make_magseries_plot(axes,
stimes,
smags,
serrs,
magsarefluxes=False,
ms=2.0):
'''Makes the mag-series plot tile for `checkplot_png` and
`twolsp_checkplot_png`.
axes : matplotlib.axes.Axes object
The Axes object where the generated plot will go.
stimes,smags,serrs : np.array
The mag/flux time-series arrays along with associated errors. These
should all have been run through nan-stripping and sigma-clipping
beforehand.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately.
ms : float
The `markersize` kwarg to use when making the mag-series plot.
Returns
-------
Does not return anything, works on the input Axes object directly.
'''
scaledplottime = stimes - npmin(stimes)
axes.plot(scaledplottime,
smags,
marker='o',
ms=ms, ls='None',mew=0,
color='green',
rasterized=True)
# flip y axis for mags
if not | python | {
"resource": ""
} |
q258905 | precess_coordinates | validation | def precess_coordinates(ra, dec,
epoch_one, epoch_two,
jd=None,
mu_ra=0.0,
mu_dec=0.0,
outscalar=False):
'''Precesses target coordinates `ra`, `dec` from `epoch_one` to `epoch_two`.
This takes into account the jd of the observations, as well as the proper
motion of the target mu_ra, mu_dec. Adapted from J. D. Hartman's
VARTOOLS/converttime.c [coordprecess].
Parameters
----------
ra,dec : float
The equatorial coordinates of the object at `epoch_one` to precess in
decimal degrees.
epoch_one : float
Origin epoch to precess from to target epoch. This is a float, like:
1985.0, 2000.0, etc.
epoch_two : float
Target epoch to precess from origin epoch. This is a float, like:
2000.0, 2018.0, etc.
jd : float
The full Julian date to use along with the propermotions in `mu_ra`, and
`mu_dec` to handle proper motion along with the coordinate frame
precession. If one of `jd`, `mu_ra`, or `mu_dec` is missing, the proper
motion will not be used to calculate the final precessed coordinates.
mu_ra,mu_dec : float
The proper motion in mas/yr in right ascension and declination. If these
are provided along with `jd`, the total proper motion of the object will
be taken into account to calculate the final precessed coordinates.
outscalar : bool
If True, converts the output coordinates from one-element np.arrays to
scalars.
Returns
-------
precessed_ra, precessed_dec : float
A tuple of precessed equatorial coordinates in decimal degrees at
`epoch_two` taking into account proper motion if `jd`, `mu_ra`, and
`mu_dec` are provided.
'''
raproc, decproc = np.radians(ra), np.radians(dec)
if ((mu_ra != 0.0) and (mu_dec != 0.0) and jd):
jd_epoch_one = JD2000 + (epoch_one - epoch_two)*365.25
raproc = (
raproc +
(jd - jd_epoch_one)*mu_ra*MAS_P_YR_TO_RAD_P_DAY/np.cos(decproc)
)
decproc = decproc + (jd - jd_epoch_one)*mu_dec*MAS_P_YR_TO_RAD_P_DAY
ca = np.cos(raproc)
cd = np.cos(decproc)
sa = np.sin(raproc)
sd = np.sin(decproc)
if epoch_one != epoch_two:
t1 = 1.0e-3 * (epoch_two - epoch_one)
t2 = 1.0e-3 * (epoch_one - 2000.0)
a = ( t1*ARCSEC_TO_RADIANS * (23062.181 + t2*(139.656 + 0.0139*t2) +
t1*(30.188 - 0.344*t2+17.998*t1)) )
b = t1*t1*ARCSEC_TO_RADIANS*(79.280 + 0.410*t2 + 0.205*t1) + | python | {
"resource": ""
} |
q258906 | _single_true | validation | def _single_true(iterable):
'''This returns True if only one True-ish element exists in `iterable`.
Parameters
----------
iterable : iterable
Returns
-------
bool
True if only one True-ish element exists in `iterable`. False otherwise.
'''
# return True if exactly one true found
iterator = iter(iterable)
# | python | {
"resource": ""
} |
q258907 | get_epochs_given_midtimes_and_period | validation | def get_epochs_given_midtimes_and_period(
t_mid,
period,
err_t_mid=None,
t0_fixed=None,
t0_percentile=None,
verbose=False
):
'''This calculates the future epochs for a transit, given a period and a
starting epoch
The equation used is::
t_mid = period*epoch + t0
Default behavior if no kwargs are used is to define `t0` as the median
finite time of the passed `t_mid` array.
Only one of `err_t_mid` or `t0_fixed` should be passed.
Parameters
----------
t_mid : np.array
A np.array of transit mid-time measurements
period : float
The period used to calculate epochs, per the equation above. For typical
use cases, a period precise to ~1e-5 days is sufficient to get correct
epochs.
err_t_mid : None or np.array
If provided, contains the errors of the transit mid-time
measurements. The zero-point epoch is then set equal to the average of
the transit times, weighted as `1/err_t_mid^2` . This minimizes the
covariance between the transit epoch and the period (e.g., Gibson et
al. 2013). For standard O-C analysis this is the best method.
t0_fixed : None or float:
If provided, use this t0 as the starting epoch. (Overrides all others).
t0_percentile : None or float
| python | {
"resource": ""
} |
q258908 | jd_to_datetime | validation | def jd_to_datetime(jd, returniso=False):
'''This converts a UTC JD to a Python `datetime` object or ISO date string.
Parameters
----------
jd : float
The Julian date measured at UTC.
returniso : bool
If False, returns a naive Python `datetime` object corresponding to
`jd`. If True, returns the ISO format string corresponding to the date
and time at | python | {
"resource": ""
} |
q258909 | jd_corr | validation | def jd_corr(jd,
ra, dec,
obslon=None,
obslat=None,
obsalt=None,
jd_type='bjd'):
'''Returns BJD_TDB or HJD_TDB for input JD_UTC.
The equation used is::
BJD_TDB = JD_UTC + JD_to_TDB_corr + romer_delay
where:
- JD_to_TDB_corr is the difference between UTC and TDB JDs
- romer_delay is the delay caused by finite speed of light from Earth-Sun
This is based on the code at:
https://mail.scipy.org/pipermail/astropy/2014-April/003148.html
Note that this does not correct for:
1. precession of coordinates if the epoch is not 2000.0
2. precession of coordinates if the target has a proper motion
3. Shapiro delay
4. Einstein delay
Parameters
----------
jd : float or array-like
The Julian date(s) measured at UTC.
ra,dec : float
The equatorial coordinates of the object in decimal degrees.
obslon,obslat,obsalt : float or None
The longitude, latitude of the observatory in decimal degrees and
altitude of the observatory in meters. If these are not provided, the
corrected JD will be calculated with respect to the center of the Earth.
jd_type : {'bjd','hjd'}
Conversion type to perform, either to Baryocentric Julian Date ('bjd')
or to Heliocenter Julian Date ('hjd').
Returns
-------
float or np.array
The converted BJD or HJD.
'''
if not HAVEKERNEL:
LOGERROR('no JPL kernel available, can\'t continue!')
return
# Source unit-vector
## Assume coordinates in ICRS
## Set distance to unit (kilometers)
# convert the angles to degrees
rarad = np.radians(ra)
decrad = np.radians(dec)
cosra = np.cos(rarad)
sinra = np.sin(rarad)
cosdec = np.cos(decrad)
sindec = np.sin(decrad)
# this assumes that the target is very far away
src_unitvector = np.array([cosdec*cosra,cosdec*sinra,sindec])
# Convert epochs to astropy.time.Time
## Assume JD(UTC)
if (obslon is None) or (obslat is None) or (obsalt is None):
t = astime.Time(jd, scale='utc', format='jd')
else:
t = astime.Time(jd, scale='utc', format='jd',
location=('%.5fd' % obslon,
'%.5fd' % obslat,
obsalt))
# Get Earth-Moon barycenter position
## NB: jplephem uses Barycentric Dynamical Time, e.g. JD(TDB)
## and gives positions relative to solar system barycenter
barycenter_earthmoon = jplkernel[0,3].compute(t.tdb.jd)
# Get Moon position vectors from the center of Earth to the Moon
# this means we get the following vectors from the ephemerides
| python | {
"resource": ""
} |
q258910 | _lclist_parallel_worker | validation | def _lclist_parallel_worker(task):
'''This is a parallel worker for makelclist.
Parameters
----------
task : tuple
This is a tuple containing the following items:
task[0] = lcf
task[1] = columns
task[2] = lcformat
task[3] = lcformatdir
task[4] = lcndetkey
Returns
-------
dict or None
This contains all of the info for the object processed in this LC read
operation. If this fails, returns None
'''
lcf, columns, lcformat, lcformatdir, lcndetkey = task
# get the bits needed for lcformat handling
# NOTE: we re-import things in this worker function because sometimes
# functions can't be pickled correctly for passing them to worker functions
# in a processing pool
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# we store the full path of the light curve
lcobjdict = {'lcfname':os.path.abspath(lcf)}
try:
# read the light curve in
lcdict = readerfunc(lcf)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
# insert all of the columns
for colkey in columns:
if '.' in colkey:
getkey = colkey.split('.')
else:
getkey = [colkey]
try:
thiscolval = _dict_get(lcdict, getkey)
except Exception as e:
| python | {
"resource": ""
} |
q258911 | _cpinfo_key_worker | validation | def _cpinfo_key_worker(task):
'''This wraps `checkplotlist.checkplot_infokey_worker`.
This is used to get the correct dtype for each element in retrieved results.
Parameters
----------
task : tuple
task[0] = cpfile
task[1] = keyspeclist (infokeys kwarg from `add_cpinfo_to_lclist`)
Returns
-------
dict
All of the requested keys from the checkplot are returned along with
their values in a dict.
'''
cpfile, keyspeclist = task
keystoget = [x[0] for x in keyspeclist]
nonesubs = [x[-2] for x in keyspeclist]
nansubs = [x[-1] for x in keyspeclist]
# reform the keystoget into a list of lists
for i, k in enumerate(keystoget):
thisk = k.split('.')
if sys.version_info[:2] < (3,4):
thisk = [(int(x) if x.isdigit() else x) for x in thisk]
else:
thisk = [(int(x) if x.isdecimal() else x) for x in thisk]
keystoget[i] = thisk
# add in the objectid as well to match to the object catalog later
keystoget.insert(0,['objectid'])
nonesubs.insert(0, '')
nansubs.insert(0,'')
# get all the keys we need
vals | python | {
"resource": ""
} |
q258912 | LatLngList.handle_change | validation | def handle_change(self, change):
""" Handle changes from atom ContainerLists """
op = change['operation']
if op in 'append':
self.add(len(change['value']), LatLng(*change['item']))
elif op == 'insert':
self.add(change['index'], LatLng(*change['item']))
| python | {
"resource": ""
} |
q258913 | AndroidMapView.create_widget | validation | def create_widget(self):
""" Create the underlying widget.
"""
self.init_options()
#: Retrieve the actual map
MapFragment.newInstance(self.options).then(
self.on_map_fragment_created)
# Holder for the fragment
self.widget = FrameLayout(self.get_context())
# I wrote this a few days ago and already forget how this hack works...
# lol We can't simply get a map reference using getMapAsync in the
# return value like we normally do with a normal call function return
# value. The bridge design was modified to store an object that cannot
# be decoded normally (via a standard Bridge.Packer) by saving the new
# object | python | {
"resource": ""
} |
q258914 | AndroidMapView.init_options | validation | def init_options(self):
""" Initialize the underlying map options.
"""
self.options = GoogleMapOptions()
d = self.declaration
self.set_map_type(d.map_type)
if d.ambient_mode:
self.set_ambient_mode(d.ambient_mode)
if (d.camera_position or d.camera_zoom or
d.camera_tilt or d.camera_bearing):
self.update_camera()
if d.map_bounds:
self.set_map_bounds(d.map_bounds)
if not d.show_compass:
self.set_show_compass(d.show_compass)
if not d.show_zoom_controls:
self.set_show_zoom_controls(d.show_zoom_controls)
if not d.show_toolbar:
self.set_show_toolbar(d.show_toolbar)
if d.lite_mode:
| python | {
"resource": ""
} |
q258915 | AndroidMapView.init_map | validation | def init_map(self):
""" Add markers, polys, callouts, etc.."""
d = self.declaration
if d.show_location:
self.set_show_location(d.show_location)
if d.show_traffic:
self.set_show_traffic(d.show_traffic)
if d.show_indoors:
self.set_show_indoors(d.show_indoors)
if d.show_buildings:
self.set_show_buildings(d.show_buildings)
#: Local ref access is faster
mapview = self.map
mid = mapview.getId()
#: Connect signals
#: Camera
mapview.onCameraChange.connect(self.on_camera_changed)
mapview.onCameraMoveStarted.connect(self.on_camera_move_started)
mapview.onCameraMoveCanceled.connect(self.on_camera_move_stopped)
mapview.onCameraIdle.connect(self.on_camera_move_stopped)
mapview.setOnCameraChangeListener(mid)
mapview.setOnCameraMoveStartedListener(mid)
mapview.setOnCameraMoveCanceledListener(mid)
| python | {
"resource": ""
} |
q258916 | AndroidMapView.init_info_window_adapter | validation | def init_info_window_adapter(self):
""" Initialize the info window adapter. Should only be done if one of
the markers defines a custom view.
"""
adapter = self.adapter
if adapter:
return #: Already initialized
adapter = GoogleMap.InfoWindowAdapter() | python | {
"resource": ""
} |
q258917 | AndroidMapView.on_map_fragment_created | validation | def on_map_fragment_created(self, obj_id):
""" Create the fragment and pull the map reference when it's loaded.
"""
self.fragment = MapFragment(__id__=obj_id)
#: Setup callback so we know when the map is ready
self.map.onMapReady.connect(self.on_map_ready)
self.fragment.getMapAsync(self.map.getId())
context = self.get_context()
def on_transaction(id):
trans | python | {
"resource": ""
} |
q258918 | AndroidMapItemBase.destroy | validation | def destroy(self):
""" Remove the marker if it was added to the map when destroying"""
marker = self.marker
parent = self.parent()
if marker:
if parent:
| python | {
"resource": ""
} |
q258919 | AndroidMapMarker.child_added | validation | def child_added(self, child):
""" If a child is added we have to make sure the map adapter exists """
if child.widget:
# TODO: Should we keep count and remove the adapter if not all
# markers request it?
| python | {
"resource": ""
} |
q258920 | AndroidMapMarker.on_marker | validation | def on_marker(self, marker):
""" Convert our options into the actual marker object"""
mid, pos = marker
self.marker = Marker(__id__=mid)
mapview = self.parent()
# Save ref
mapview.markers[mid] = self
# Required so the packer can pass the id | python | {
"resource": ""
} |
q258921 | AndroidMapCircle.on_marker | validation | def on_marker(self, mid):
""" Convert our options into the actual circle object"""
self.marker = Circle(__id__=mid)
self.parent().markers[mid] = self
#: Required so the packer can pass the id
self.marker.setTag(mid)
| python | {
"resource": ""
} |
q258922 | CountVectorizer.fit_transform | validation | def fit_transform(self, raw_documents, y=None):
""" Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
documents = super(CountVectorizer, | python | {
"resource": ""
} |
q258923 | Flow.data | validation | def data(self, X=None, y=None, sentences=None):
"""
Add data to flow
"""
| python | {
"resource": ""
} |
q258924 | Flow.transform | validation | def transform(self, transformer):
"""
Add transformer to flow and apply transformer to data in flow
Parameters
----------
transformer : Transformer
a transformer to transform data
"""
self.transformers.append(transformer)
from languageflow.transformer.tagged import TaggedTransformer
if isinstance(transformer, TaggedTransformer):
self.X, self.y = transformer.transform(self.sentences)
if isinstance(transformer, TfidfVectorizer):
| python | {
"resource": ""
} |
q258925 | Flow.train | validation | def train(self):
"""
Train model with transformed data
"""
for i, model in enumerate(self.models):
N = [int(i * len(self.y)) for i in self.lc_range]
for n in N:
X = self.X[:n]
y = self.y[:n]
e = Experiment(X, | python | {
"resource": ""
} |
q258926 | Flow.export | validation | def export(self, model_name, export_folder):
"""
Export model and transformers to export_folder
Parameters
----------
model_name: string
name of model to export
export_folder: string
folder to store exported model and transformers
"""
for transformer in self.transformers:
if isinstance(transformer, MultiLabelBinarizer):
joblib.dump(transformer,
join(export_folder, "label.transformer.bin"),
protocol=2)
if isinstance(transformer, TfidfVectorizer):
joblib.dump(transformer,
join(export_folder, "tfidf.transformer.bin"),
protocol=2)
if isinstance(transformer, CountVectorizer):
joblib.dump(transformer,
| python | {
"resource": ""
} |
q258927 | SGDClassifier.fit | validation | def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
| python | {
"resource": ""
} |
q258928 | print_cm | validation | def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):
"""pretty print for confusion matrixes"""
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
# Print header
print(" " + empty_cell, end=" ")
for label in labels:
print("%{0}s".format(columnwidth) % label, end=" ")
print()
# Print rows
for i, label1 in enumerate(labels):
print(" %{0}s".format(columnwidth) % label1, end=" ")
for j in range(len(labels)):
cell = "%{0}.1f".format(columnwidth) % cm[i, j]
| python | {
"resource": ""
} |
q258929 | get_from_cache | validation | def get_from_cache(url: str, cache_dir: Path = None) -> Path:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
cache_dir.mkdir(parents=True, exist_ok=True)
filename = re.sub(r'.+/', '', url)
# get cache path to put the file
cache_path = cache_dir / filename
if cache_path.exists():
return cache_path
# make HEAD request to check ETag
response = requests.head(url)
if response.status_code != 200:
if "www.dropbox.com" in url:
# dropbox return code 301, so we ignore this error
pass
else:
raise IOError("HEAD request failed for url {}".format(url))
# add ETag to filename if it exists
# etag = response.headers.get("ETag")
if not cache_path.exists():
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
fd, temp_filename = tempfile.mkstemp()
logger.info("%s not found in cache, downloading to %s", url, temp_filename)
# GET file object
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
| python | {
"resource": ""
} |
q258930 | CRF.fit | validation | def fit(self, X, y):
"""Fit CRF according to X, y
Parameters
----------
X : list of text
each item is a text
y: list
each item is either a label (in multi class problem) or list of
labels (in multi label problem)
| python | {
"resource": ""
} |
q258931 | CRF.predict | validation | def predict(self, X):
"""Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
"""
if | python | {
"resource": ""
} |
q258932 | Board.serve | validation | def serve(self, port=62000):
""" Start LanguageBoard web application
Parameters
----------
port: int
port to serve web application
"""
from http.server import HTTPServer, | python | {
"resource": ""
} |
q258933 | FastTextClassifier.predict | validation | def predict(self, X):
""" In order to obtain the most likely label for a list of text
Parameters
----------
X : list of string
Raw texts
Returns
-------
C : list of string
List labels
"""
x = X
if not isinstance(X, list):
x = [X]
| python | {
"resource": ""
} |
q258934 | KimCNNClassifier.fit | validation | def fit(self, X, y):
"""Fit KimCNNClassifier according to X, y
Parameters
----------
X : list of string
each item is a raw text
y : list of string
each item is a label
"""
####################
# Data Loader
####################
word_vector_transformer = WordVectorTransformer(padding='max')
X = word_vector_transformer.fit_transform(X)
X = LongTensor(X)
self.word_vector_transformer = word_vector_transformer
y_transformer = LabelEncoder()
y = y_transformer.fit_transform(y)
y = torch.from_numpy(y)
self.y_transformer = y_transformer
dataset = CategorizedDataset(X, y)
dataloader = DataLoader(dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=4)
####################
# Model
####################
KERNEL_SIZES = self.kernel_sizes
NUM_KERNEL = self.num_kernel
EMBEDDING_DIM = self.embedding_dim
model = TextCNN(
vocab_size=word_vector_transformer.get_vocab_size(),
embedding_dim=EMBEDDING_DIM,
output_size=len(self.y_transformer.classes_),
kernel_sizes=KERNEL_SIZES,
num_kernel=NUM_KERNEL)
if | python | {
"resource": ""
} |
q258935 | config_sources | validation | def config_sources(app, environment, cluster, configs_dirs, app_dir,
local=False, build=False):
"""Return the config files for an environment & cluster specific app."""
sources = [
# Machine-specific
(configs_dirs, 'hostname'),
(configs_dirs, 'hostname-local'),
(configs_dirs, 'hostname-build'),
# Global
(configs_dirs, 'common'),
# Environment + Cluster
(configs_dirs, 'common-%s' % environment),
(configs_dirs, 'common-%s-%s' % (environment, cluster)),
(configs_dirs, 'common-local'),
(configs_dirs, 'common-build'),
# Machine-specific overrides
(configs_dirs, 'common-overrides'),
| python | {
"resource": ""
} |
q258936 | available_sources | validation | def available_sources(sources):
"""Yield the sources that are present."""
for dirs, name in sources:
for directory in dirs:
| python | {
"resource": ""
} |
q258937 | smush_config | validation | def smush_config(sources, initial=None):
"""Merge the configuration sources and return the resulting DotDict."""
if initial is None:
initial = {}
config = DotDict(initial)
for fn in sources:
log.debug('Merging %s', fn)
mod = get_config_module(fn)
config | python | {
"resource": ""
} |
q258938 | merge_dicts | validation | def merge_dicts(d1, d2, _path=None):
"""
Merge dictionary d2 into d1, overriding entries in d1 with values from d2.
d1 is mutated.
_path is for internal, recursive use.
"""
if _path is None:
_path = ()
if isinstance(d1, dict) and isinstance(d2, dict):
for k, v in d2.items():
if isinstance(v, MissingValue) and v.name is None:
v.name = '.'.join(_path + (k,))
if isinstance(v, DeletedValue):
d1.pop(k, None)
elif k not in d1:
if isinstance(v, dict):
d1[k] = merge_dicts({}, v, _path + (k,))
else:
d1[k] = v
else:
if isinstance(d1[k], dict) and isinstance(v, dict):
d1[k] = merge_dicts(d1[k], v, _path + (k,))
elif isinstance(d1[k], list) and isinstance(v, list):
| python | {
"resource": ""
} |
q258939 | filter_dict | validation | def filter_dict(unfiltered, filter_keys):
"""Return a subset of a dictionary using the | python | {
"resource": ""
} |
q258940 | DotDict._convert_item | validation | def _convert_item(self, obj):
"""
Convert obj into a DotDict, or list of DotDict.
Directly nested lists aren't supported.
Returns the result
"""
if isinstance(obj, dict) and not isinstance(obj, DotDict):
| python | {
"resource": ""
} |
q258941 | filter_config | validation | def filter_config(config, deploy_config):
"""Return a config subset using the filter defined in the deploy config."""
if not os.path.isfile(deploy_config):
return DotDict()
| python | {
"resource": ""
} |
q258942 | seeded_auth_token | validation | def seeded_auth_token(client, service, seed):
"""Return an auth token based on the client+service+seed tuple."""
| python | {
"resource": ""
} |
q258943 | write_config | validation | def write_config(config, app_dir, filename='configuration.json'):
"""Write configuration to the applicaiton directory."""
path = os.path.join(app_dir, filename)
with open(path, 'w') as f:
| python | {
"resource": ""
} |
q258944 | validate_date | validation | def validate_date(date_text):
"""Return True if valid, raise ValueError if not"""
try:
if int(date_text) < 0:
return True
except ValueError:
pass
try:
datetime.strptime(date_text, '%Y-%m-%d')
return | python | {
"resource": ""
} |
q258945 | get_download_total | validation | def get_download_total(rows):
"""Return the total downloads, and the downloads column"""
headers = rows.pop(0)
index = headers.index('download_count')
| python | {
"resource": ""
} |
q258946 | add_download_total | validation | def add_download_total(rows):
"""Add a final row to rows showing the total downloads"""
total_row = [""] * len(rows[0])
total_row[0] = "Total"
total_downloads, downloads_column = get_download_total(rows)
| python | {
"resource": ""
} |
q258947 | find_and_patch_entry | validation | def find_and_patch_entry(soup, entry):
"""
Modify soup so Dash.app can generate TOCs on the fly.
"""
link = soup.find("a", {"class": "headerlink"}, href="#" + entry.anchor)
tag = soup.new_tag("a") | python | {
"resource": ""
} |
q258948 | inv_entry_to_path | validation | def inv_entry_to_path(data):
"""
Determine the path from the intersphinx inventory entry
Discard the anchors between head and tail to make it
compatible with situations where extra meta information is encoded.
"""
path_tuple | python | {
"resource": ""
} |
q258949 | main | validation | def main(
source,
force,
name,
quiet,
verbose,
destination,
add_to_dash,
add_to_global,
icon,
index_page,
enable_js,
online_redirect_url,
parser,
):
"""
Convert docs from SOURCE to Dash.app's docset format.
"""
try:
logging.config.dictConfig(
create_log_config(verbose=verbose, quiet=quiet)
)
except ValueError as e:
click.secho(e.args[0], fg="red")
raise SystemExit(1)
if icon:
icon_data = icon.read()
if not icon_data.startswith(PNG_HEADER):
log.error(
'"{}" is not a valid PNG image.'.format(
click.format_filename(icon.name)
)
)
raise SystemExit(1)
else:
icon_data = None
source, dest, name = setup_paths(
source,
destination,
name=name,
add_to_global=add_to_global,
force=force,
)
if parser is None:
parser = parsers.get_doctype(source)
if parser is None:
log.error(
| python | {
"resource": ""
} |
q258950 | create_log_config | validation | def create_log_config(verbose, quiet):
"""
We use logging's levels as an easy-to-use verbosity controller.
"""
if verbose and quiet:
raise ValueError(
"Supplying both --quiet and --verbose makes no sense."
)
elif verbose:
level = logging.DEBUG
elif quiet:
level = logging.ERROR
else:
level = logging.INFO
logger_cfg = {"handlers": ["click_handler"], "level": level}
return {
"version": 1,
"formatters": {"click_formatter": {"format": "%(message)s"}},
"handlers": {
| python | {
"resource": ""
} |
q258951 | setup_paths | validation | def setup_paths(source, destination, name, add_to_global, force):
"""
Determine source and destination using the options.
"""
if source[-1] == "/":
source = source[:-1]
if not name:
name = os.path.split(source)[-1]
elif name.endswith(".docset"):
name = name.replace(".docset", "")
if add_to_global:
destination = DEFAULT_DOCSET_PATH
dest = os.path.join(destination or "", name + ".docset")
dst_exists = | python | {
"resource": ""
} |
q258952 | prepare_docset | validation | def prepare_docset(
source, dest, name, index_page, enable_js, online_redirect_url
):
"""
Create boilerplate files & directories and copy vanilla docs inside.
Return a tuple of path to resources and connection to sqlite db.
"""
resources = os.path.join(dest, "Contents", "Resources")
docs = os.path.join(resources, "Documents")
os.makedirs(resources)
db_conn = sqlite3.connect(os.path.join(resources, "docSet.dsidx"))
db_conn.row_factory = sqlite3.Row
db_conn.execute(
"CREATE TABLE searchIndex(id INTEGER PRIMARY KEY, name TEXT, "
"type TEXT, path TEXT)"
)
db_conn.commit()
plist_path = os.path.join(dest, "Contents", "Info.plist")
plist_cfg = {
"CFBundleIdentifier": name,
"CFBundleName": | python | {
"resource": ""
} |
q258953 | add_icon | validation | def add_icon(icon_data, dest):
"""
Add icon to docset
"""
| python | {
"resource": ""
} |
q258954 | Bdb.run_cell | validation | def run_cell(self, cell):
"""Run the Cell code using the IPython globals and locals
Args:
cell (str): Python code to be executed
"""
globals = self.ipy_shell.user_global_ns
locals = self.ipy_shell.user_ns
| python | {
"resource": ""
} |
q258955 | filter_dict | validation | def filter_dict(d, exclude):
"""Return a new dict with specified keys excluded from the origional dict
Args:
d (dict): origional dict | python | {
"resource": ""
} |
q258956 | redirect_stdout | validation | def redirect_stdout(new_stdout):
"""Redirect the stdout
Args:
new_stdout (io.StringIO): New stdout | python | {
"resource": ""
} |
q258957 | format | validation | def format(obj, options):
"""Return a string representation of the Python object
Args:
obj: The Python object
options: Format options
"""
formatters = {
float_types: lambda x: '{:.{}g}'.format(x, options.digits),
}
for _types, fmtr in formatters.items():
if isinstance(obj, _types):
| python | {
"resource": ""
} |
q258958 | get_type_info | validation | def get_type_info(obj):
"""Get type information for a Python object
Args:
obj: The Python object
Returns:
tuple: (object type "catagory", object type name)
"""
if isinstance(obj, primitive_types):
return ('primitive', type(obj).__name__)
if isinstance(obj, sequence_types):
return ('sequence', type(obj).__name__)
if isinstance(obj, array_types):
return ('array', type(obj).__name__)
if isinstance(obj, key_value_types):
return ('key-value', type(obj).__name__)
if isinstance(obj, types.ModuleType):
return ('module', type(obj).__name__)
if isinstance(obj, (types.FunctionType, types.MethodType)):
return ('function', type(obj).__name__)
if isinstance(obj, type):
if hasattr(obj, '__dict__'):
return ('class', obj.__name__)
| python | {
"resource": ""
} |
q258959 | Wallet.spend_key | validation | def spend_key(self):
"""
Returns private spend key. None if wallet is view-only.
:rtype: str or None
"""
| python | {
"resource": ""
} |
q258960 | Wallet.transfer | validation | def transfer(self, address, amount,
priority=prio.NORMAL, payment_id=None, unlock_time=0,
relay=True):
"""
Sends a transfer from the default account. Returns a list of resulting transactions.
:param address: destination :class:`Address <monero.address.Address>` or subtype
:param amount: amount to send
:param priority: transaction priority, implies fee. The priority can be a number
from 1 to 4 (unimportant, normal, elevated, priority) or a constant
from `monero.prio`.
| python | {
"resource": ""
} |
q258961 | Wallet.transfer_multiple | validation | def transfer_multiple(self, destinations,
priority=prio.NORMAL, payment_id=None, unlock_time=0,
relay=True):
"""
Sends a batch of transfers from the default account. Returns a list of resulting
transactions.
:param destinations: a list of destination and amount pairs: [(address, amount), ...]
:param priority: transaction priority, implies fee. The priority can be a number
from 1 to 4 (unimportant, normal, elevated, priority) or a constant
from `monero.prio`.
| python | {
"resource": ""
} |
q258962 | Account.balance | validation | def balance(self, unlocked=False):
"""
Returns specified balance.
:param unlocked: if `True`, return the unlocked balance, otherwise return total balance
:rtype: Decimal | python | {
"resource": ""
} |
q258963 | Account.new_address | validation | def new_address(self, label=None):
"""
Creates a new address.
:param label: address label as | python | {
"resource": ""
} |
q258964 | Account.transfer | validation | def transfer(self, address, amount,
priority=prio.NORMAL, payment_id=None, unlock_time=0,
relay=True):
"""
Sends a transfer. Returns a list of resulting transactions.
:param address: destination :class:`Address <monero.address.Address>` or subtype
:param amount: amount to send
:param priority: transaction priority, implies fee. The priority can be a number
from 1 to 4 (unimportant, normal, elevated, priority) or a constant
from `monero.prio`.
| python | {
"resource": ""
} |
q258965 | Account.transfer_multiple | validation | def transfer_multiple(self, destinations,
priority=prio.NORMAL, payment_id=None, unlock_time=0,
relay=True):
"""
Sends a batch of transfers. Returns a list of resulting transactions.
:param destinations: a list of destination and amount pairs:
[(:class:`Address <monero.address.Address>`, `Decimal`), ...]
:param priority: transaction priority, implies fee. The priority can be a number
from 1 to 4 (unimportant, normal, elevated, priority) or a constant
from `monero.prio`.
:param payment_id: ID for the payment (must be None if
:class:`IntegratedAddress <monero.address.IntegratedAddress>`
is used as the destination)
:param unlock_time: the extra unlock delay
:param relay: if `True`, the wallet will relay the transaction(s) to the network
| python | {
"resource": ""
} |
q258966 | to_atomic | validation | def to_atomic(amount):
"""Convert Monero decimal to atomic integer of piconero."""
if not isinstance(amount, (Decimal, float) + _integer_types):
raise ValueError("Amount '{}' doesn't have numeric type. Only Decimal, int, | python | {
"resource": ""
} |
q258967 | address | validation | def address(addr, label=None):
"""Discover the proper class and return instance for a given Monero address.
:param addr: the address as a string-like object
:param label: a label for the address (defaults to `None`)
:rtype: :class:`Address`, :class:`SubAddress` or :class:`IntegratedAddress`
"""
addr = str(addr)
if _ADDR_REGEX.match(addr):
netbyte = bytearray(unhexlify(base58.decode(addr)))[0]
if netbyte in Address._valid_netbytes:
return Address(addr, label=label)
elif netbyte in SubAddress._valid_netbytes:
return SubAddress(addr, label=label)
raise ValueError("Invalid address netbyte {nb:x}. Allowed values are: {allowed}".format(
nb=netbyte,
| python | {
"resource": ""
} |
q258968 | Address.with_payment_id | validation | def with_payment_id(self, payment_id=0):
"""Integrates payment id into the address.
:param payment_id: int, hexadecimal string or :class:`PaymentID <monero.numbers.PaymentID>`
(max 64-bit long)
:rtype: `IntegratedAddress`
:raises: `TypeError` if the payment id is too long
"""
| python | {
"resource": ""
} |
q258969 | Wordlist.encode | validation | def encode(cls, hex):
"""Convert hexadecimal string to mnemonic word representation with checksum.
"""
out = []
for i in range(len(hex) // 8):
word = endian_swap(hex[8*i:8*i+8])
x = int(word, 16)
w1 = x % cls.n
w2 = (x // cls.n + w1) % cls.n
w3 = (x // cls.n // cls.n + w2) | python | {
"resource": ""
} |
q258970 | Wordlist.decode | validation | def decode(cls, phrase):
"""Calculate hexadecimal representation of the phrase.
"""
phrase = phrase.split(" ")
out = ""
for i in range(len(phrase) // 3):
word1, word2, word3 = phrase[3*i:3*i+3]
w1 = cls.word_list.index(word1)
w2 = cls.word_list.index(word2) % cls.n
| python | {
"resource": ""
} |
q258971 | Wordlist.get_checksum | validation | def get_checksum(cls, phrase):
"""Given a mnemonic word string, return a string of the computed checksum.
:rtype: str
"""
phrase_split = phrase.split(" ")
if len(phrase_split) < 12:
raise ValueError("Invalid mnemonic phrase")
if len(phrase_split) > 13:
# Standard format
phrase = phrase_split[:24]
else:
# MyMonero format
phrase = phrase_split[:12]
| python | {
"resource": ""
} |
q258972 | one | validation | def one(prompt, *args, **kwargs):
"""Instantiates a picker, registers custom handlers for going back,
and starts the picker.
"""
indicator = '‣'
if sys.version_info < (3, 0):
indicator = '>'
def go_back(picker):
return None, -1
options, verbose_options = prepare_options(args)
idx = kwargs.get('idx', 0)
picker = Picker(verbose_options, title=prompt, indicator=indicator, default_index=idx)
picker.register_custom_handler(ord('h'), go_back)
picker.register_custom_handler(curses.KEY_LEFT, go_back)
with stdout_redirected(sys.stderr):
| python | {
"resource": ""
} |
q258973 | many | validation | def many(prompt, *args, **kwargs):
"""Calls `pick` in a while loop to allow user to pick many
options. Returns a list of chosen options.
"""
def get_options(options, chosen):
return [options[i] for i, c in enumerate(chosen) if c]
def get_verbose_options(verbose_options, chosen):
no, yes = ' ', '✔'
if sys.version_info < (3, 3):
no, yes = ' ', '@'
opts = ['{} {}'.format(yes if c else no, verbose_options[i]) for i, c in enumerate(chosen)]
return opts + ['{}{}'.format(' ', kwargs.get('done', 'done...'))]
options, verbose_options = prepare_options(args)
chosen = [False] * len(options)
index = kwargs.get('idx', 0)
default = kwargs.get('default', None)
if isinstance(default, list):
for idx in default:
chosen[idx] = True | python | {
"resource": ""
} |
q258974 | prepare_options | validation | def prepare_options(options):
"""Create options and verbose options from strings and non-string iterables in
`options` array.
"""
options_, verbose_options = [], []
for option in options:
if is_string(option):
options_.append(option)
| python | {
"resource": ""
} |
q258975 | raw | validation | def raw(prompt, *args, **kwargs):
"""Calls input to allow user to input an arbitrary string. User can go
back by entering the `go_back` string. Works in both Python 2 and 3.
"""
go_back = kwargs.get('go_back', '<')
type_ = kwargs.get('type', str)
default = kwargs.get('default', '')
with stdout_redirected(sys.stderr):
while True:
try:
| python | {
"resource": ""
} |
q258976 | Condition.get_operator | validation | def get_operator(self, op):
"""Assigns function to the operators property of the instance.
"""
if op in self.OPERATORS:
return self.OPERATORS.get(op)
| python | {
"resource": ""
} |
q258977 | Question.assign_prompter | validation | def assign_prompter(self, prompter):
"""If you want to change the core prompters registry, you can
override this method in a Question subclass.
"""
if is_string(prompter):
if prompter not in prompters:
eprint("Error: | python | {
"resource": ""
} |
q258978 | Questionnaire.add | validation | def add(self, *args, **kwargs):
"""Add a Question instance to the questions dict. Each key points
to a list of Question instances with that key. Use the `question`
kwarg to pass a Question instance if you want, or pass in the same
args you would pass to instantiate a question.
"""
| python | {
"resource": ""
} |
q258979 | Questionnaire.ask | validation | def ask(self, error=None):
"""Asks the next question in the questionnaire and returns the answer,
unless user goes back.
"""
q = self.next_question
if q is None:
return
try:
answer = q.prompter(self.get_prompt(q, error), *q.prompter_args, **q.prompter_kwargs)
except QuestionnaireGoBack as e:
steps = e.args[0] if e.args else 1
if steps == 0:
self.ask() # user can redo current question even if `can_go_back` is `False`
| python | {
"resource": ""
} |
q258980 | Questionnaire.next_question | validation | def next_question(self):
"""Returns the next `Question` in the questionnaire, or `None` if there
are no questions left. Returns first question for whose key there is no
answer and for which condition is satisfied, or for which | python | {
"resource": ""
} |
q258981 | Questionnaire.go_back | validation | def go_back(self, n=1):
"""Move `n` questions back in the questionnaire by removing the last `n`
answers.
"""
if not self.can_go_back:
return
| python | {
"resource": ""
} |
q258982 | Questionnaire.format_answers | validation | def format_answers(self, fmt='obj'):
"""Formats answers depending on `fmt`.
"""
fmts = ('obj', 'array', 'plain')
if fmt not in fmts:
eprint("Error: '{}' not in {}".format(fmt, fmts))
return
def stringify(val):
if type(val) in (list, tuple):
return ', '.join(str(e) for e in val)
return val
if fmt == 'obj':
return json.dumps(self.answers)
| python | {
"resource": ""
} |
q258983 | Questionnaire.answer_display | validation | def answer_display(self, s=''):
"""Helper method for displaying the answers so far.
"""
padding = len(max(self.questions.keys(), | python | {
"resource": ""
} |
q258984 | IntentContainer.add_intent | validation | def add_intent(self, name, lines, reload_cache=False):
"""
Creates a new intent, optionally checking the cache first
Args:
name (str): The associated name of the intent
lines (list<str>): All the sentences that should activate the intent
reload_cache: | python | {
"resource": ""
} |
q258985 | IntentContainer.add_entity | validation | def add_entity(self, name, lines, reload_cache=False):
"""
Adds an entity that matches the given lines.
Example:
self.add_intent('weather', ['will it rain on {weekday}?'])
self.add_entity('{weekday}', ['monday', 'tuesday', 'wednesday']) # ... | python | {
"resource": ""
} |
q258986 | IntentContainer.load_entity | validation | def load_entity(self, name, file_name, reload_cache=False):
"""
Loads an entity, optionally checking the cache first
Args:
name (str): The associated name of the entity
file_name (str): The location of the entity file
reload_cache (bool): Whether to refresh all of cache
"""
| python | {
"resource": ""
} |
q258987 | IntentContainer.load_intent | validation | def load_intent(self, name, file_name, reload_cache=False):
"""
Loads an intent, optionally checking the cache first
Args:
name (str): The associated name of the intent
file_name (str): The location of the intent file
reload_cache (bool): Whether to refresh all of cache
| python | {
"resource": ""
} |
q258988 | IntentContainer.remove_intent | validation | def remove_intent(self, name):
"""Unload an intent"""
self.intents.remove(name)
| python | {
"resource": ""
} |
q258989 | IntentContainer.remove_entity | validation | def remove_entity(self, name):
"""Unload an entity""" | python | {
"resource": ""
} |
q258990 | IntentContainer.train | validation | def train(self, debug=True, force=False, single_thread=False, timeout=20):
"""
Trains all the loaded intents that need to be updated
If a cache file exists with the same hash as the intent file,
the intent will not be trained and just loaded from file
Args:
debug (bool): Whether to print a message to stdout each time a new intent is trained
force (bool): Whether to force training if already finished
single_thread (bool): Whether to force running in a single thread
timeout (float): Seconds before cancelling training
Returns:
bool: True if training succeeded without timeout
| python | {
"resource": ""
} |
q258991 | IntentContainer.train_subprocess | validation | def train_subprocess(self, *args, **kwargs):
"""
Trains in a subprocess which provides a timeout guarantees everything shuts down properly
Args:
See <train>
Returns:
bool: True for success, False if timed out
"""
ret = call([
sys.executable, '-m', 'padatious', 'train', self.cache_dir,
'-d', json.dumps(self.serialized_args),
'-a', json.dumps(args),
'-k', json.dumps(kwargs),
])
if ret == 2:
raise TypeError('Invalid train arguments: {} {}'.format(args, kwargs))
| python | {
"resource": ""
} |
q258992 | IntentContainer.calc_intents | validation | def calc_intents(self, query):
"""
Tests all the intents against the query and returns
data on how well each one matched against the query
Args:
query (str): Input sentence to test against intents
Returns:
list<MatchData>: List of intent matches
See calc_intent() for a description of the returned MatchData
"""
if self.must_train:
self.train()
intents = {} if self.train_thread and self.train_thread.is_alive() else {
i.name: i for i in self.intents.calc_intents(query, self.entities)
| python | {
"resource": ""
} |
q258993 | IntentContainer.calc_intent | validation | def calc_intent(self, query):
"""
Tests all the intents against the query and returns
match data of the best intent
Args:
query (str): Input sentence to test against intents
Returns:
MatchData: Best intent match
"""
matches = self.calc_intents(query)
if len(matches) == 0:
| python | {
"resource": ""
} |
q258994 | _train_and_save | validation | def _train_and_save(obj, cache, data, print_updates):
"""Internal pickleable function used to train | python | {
"resource": ""
} |
q258995 | main | validation | def main(src, pyi_dir, target_dir, incremental, quiet, replace_any, hg, traceback):
"""Re-apply type annotations from .pyi stubs to your codebase."""
Config.incremental = incremental
Config.replace_any = replace_any
returncode = 0
for src_entry in src:
for file, error, exc_type, tb in retype_path(
Path(src_entry),
pyi_dir=Path(pyi_dir),
targets=Path(target_dir),
src_explicitly_given=True,
| python | {
"resource": ""
} |
q258996 | retype_path | validation | def retype_path(
src, pyi_dir, targets, *, src_explicitly_given=False, quiet=False, hg=False
):
"""Recursively retype files or directories given. Generate errors."""
if src.is_dir():
for child in src.iterdir():
if child == pyi_dir or child == targets:
continue
yield from retype_path(
child, pyi_dir / src.name, targets / src.name, quiet=quiet, hg=hg,
)
elif src.suffix == '.py' or src_explicitly_given:
try:
| python | {
"resource": ""
} |
q258997 | retype_file | validation | def retype_file(src, pyi_dir, targets, *, quiet=False, hg=False):
"""Retype `src`, finding types in `pyi_dir`. Save in `targets`.
The file should remain formatted exactly as it was before, save for:
- annotations
- additional imports needed to satisfy annotations
- additional module-level names needed to satisfy annotations
Type comments in sources are normalized to type annotations.
"""
with tokenize.open(src) as src_buffer:
src_encoding = src_buffer.encoding
src_node = | python | {
"resource": ""
} |
q258998 | lib2to3_parse | validation | def lib2to3_parse(src_txt):
"""Given a string with source, return the lib2to3 Node."""
grammar = pygram.python_grammar_no_print_statement
drv = driver.Driver(grammar, pytree.convert)
if src_txt[-1] != '\n':
nl = '\r\n' if '\r\n' in src_txt[:1024] else '\n'
src_txt += nl
try:
result = drv.parse_string(src_txt, True)
except ParseError as pe:
lineno, column = pe.context[1]
lines = src_txt.splitlines()
try:
faulty_line = lines[lineno - 1]
except IndexError:
| python | {
"resource": ""
} |
q258999 | lib2to3_unparse | validation | def lib2to3_unparse(node, *, hg=False):
"""Given a lib2to3 node, return its string representation."""
code = str(node)
if hg:
| python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.