repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
listlengths
20
707
docstring
stringlengths
3
17.3k
docstring_tokens
listlengths
3
222
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
idx
int64
0
252k
pytroll/pyorbital
pyorbital/orbital.py
Orbital.get_position
def get_position(self, utc_time, normalize=True): """Get the cartesian position and velocity from the satellite. """ kep = self._sgdp4.propagate(utc_time) pos, vel = kep2xyz(kep) if normalize: pos /= XKMPER vel /= XKMPER * XMNPDA / SECDAY return pos, vel
python
def get_position(self, utc_time, normalize=True): """Get the cartesian position and velocity from the satellite. """ kep = self._sgdp4.propagate(utc_time) pos, vel = kep2xyz(kep) if normalize: pos /= XKMPER vel /= XKMPER * XMNPDA / SECDAY return pos, vel
[ "def", "get_position", "(", "self", ",", "utc_time", ",", "normalize", "=", "True", ")", ":", "kep", "=", "self", ".", "_sgdp4", ".", "propagate", "(", "utc_time", ")", "pos", ",", "vel", "=", "kep2xyz", "(", "kep", ")", "if", "normalize", ":", "pos"...
Get the cartesian position and velocity from the satellite.
[ "Get", "the", "cartesian", "position", "and", "velocity", "from", "the", "satellite", "." ]
647007934dc827a4c698629cf32a84a5167844b2
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/orbital.py#L208-L219
train
54,000
pytroll/pyorbital
pyorbital/geoloc.py
compute_pixels
def compute_pixels(orb, sgeom, times, rpy=(0.0, 0.0, 0.0)): """Compute cartesian coordinates of the pixels in instrument scan.""" if isinstance(orb, (list, tuple)): tle1, tle2 = orb orb = Orbital("mysatellite", line1=tle1, line2=tle2) # get position and velocity for each time of each pixel pos, vel = orb.get_position(times, normalize=False) # now, get the vectors pointing to each pixel vectors = sgeom.vectors(pos, vel, *rpy) # compute intersection of lines (directed by vectors and passing through # (0, 0, 0)) and ellipsoid. Derived from: # http://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection # do the computation between line and ellipsoid (WGS 84) # NB: AAPP uses GRS 80... centre = -pos a__ = 6378.137 # km # b__ = 6356.75231414 # km, GRS80 b__ = 6356.752314245 # km, WGS84 radius = np.array([[1 / a__, 1 / a__, 1 / b__]]).T shape = vectors.shape xr_ = vectors.reshape([3, -1]) * radius cr_ = centre.reshape([3, -1]) * radius ldotc = np.einsum("ij,ij->j", xr_, cr_) lsq = np.einsum("ij,ij->j", xr_, xr_) csq = np.einsum("ij,ij->j", cr_, cr_) d1_ = (ldotc - np.sqrt(ldotc ** 2 - csq * lsq + lsq)) / lsq # return the actual pixel positions return vectors * d1_.reshape(shape[1:]) - centre
python
def compute_pixels(orb, sgeom, times, rpy=(0.0, 0.0, 0.0)): """Compute cartesian coordinates of the pixels in instrument scan.""" if isinstance(orb, (list, tuple)): tle1, tle2 = orb orb = Orbital("mysatellite", line1=tle1, line2=tle2) # get position and velocity for each time of each pixel pos, vel = orb.get_position(times, normalize=False) # now, get the vectors pointing to each pixel vectors = sgeom.vectors(pos, vel, *rpy) # compute intersection of lines (directed by vectors and passing through # (0, 0, 0)) and ellipsoid. Derived from: # http://en.wikipedia.org/wiki/Line%E2%80%93sphere_intersection # do the computation between line and ellipsoid (WGS 84) # NB: AAPP uses GRS 80... centre = -pos a__ = 6378.137 # km # b__ = 6356.75231414 # km, GRS80 b__ = 6356.752314245 # km, WGS84 radius = np.array([[1 / a__, 1 / a__, 1 / b__]]).T shape = vectors.shape xr_ = vectors.reshape([3, -1]) * radius cr_ = centre.reshape([3, -1]) * radius ldotc = np.einsum("ij,ij->j", xr_, cr_) lsq = np.einsum("ij,ij->j", xr_, xr_) csq = np.einsum("ij,ij->j", cr_, cr_) d1_ = (ldotc - np.sqrt(ldotc ** 2 - csq * lsq + lsq)) / lsq # return the actual pixel positions return vectors * d1_.reshape(shape[1:]) - centre
[ "def", "compute_pixels", "(", "orb", ",", "sgeom", ",", "times", ",", "rpy", "=", "(", "0.0", ",", "0.0", ",", "0.0", ")", ")", ":", "if", "isinstance", "(", "orb", ",", "(", "list", ",", "tuple", ")", ")", ":", "tle1", ",", "tle2", "=", "orb",...
Compute cartesian coordinates of the pixels in instrument scan.
[ "Compute", "cartesian", "coordinates", "of", "the", "pixels", "in", "instrument", "scan", "." ]
647007934dc827a4c698629cf32a84a5167844b2
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc.py#L204-L238
train
54,001
pytroll/pyorbital
pyorbital/geoloc.py
ScanGeometry.vectors
def vectors(self, pos, vel, roll=0.0, pitch=0.0, yaw=0.0): """Get unit vectors pointing to the different pixels. *pos* and *vel* are column vectors, or matrices of column vectors. Returns vectors as stacked rows. """ # TODO: yaw steering mode ! # Fake nadir: This is the intersection point between the satellite # looking down at the centre of the ellipsoid and the surface of the # ellipsoid. Nadir on the other hand is the point which vertical goes # through the satellite... # nadir = -pos / vnorm(pos) nadir = subpoint(-pos) nadir /= vnorm(nadir) # x is along track (roll) x = vel / vnorm(vel) # y is cross track (pitch) y = np.cross(nadir, vel, 0, 0, 0) y /= vnorm(y) # rotate first around x x_rotated = qrotate(nadir, x, self.fovs[0] + roll) # then around y xy_rotated = qrotate(x_rotated, y, self.fovs[1] + pitch) # then around z return qrotate(xy_rotated, nadir, yaw)
python
def vectors(self, pos, vel, roll=0.0, pitch=0.0, yaw=0.0): """Get unit vectors pointing to the different pixels. *pos* and *vel* are column vectors, or matrices of column vectors. Returns vectors as stacked rows. """ # TODO: yaw steering mode ! # Fake nadir: This is the intersection point between the satellite # looking down at the centre of the ellipsoid and the surface of the # ellipsoid. Nadir on the other hand is the point which vertical goes # through the satellite... # nadir = -pos / vnorm(pos) nadir = subpoint(-pos) nadir /= vnorm(nadir) # x is along track (roll) x = vel / vnorm(vel) # y is cross track (pitch) y = np.cross(nadir, vel, 0, 0, 0) y /= vnorm(y) # rotate first around x x_rotated = qrotate(nadir, x, self.fovs[0] + roll) # then around y xy_rotated = qrotate(x_rotated, y, self.fovs[1] + pitch) # then around z return qrotate(xy_rotated, nadir, yaw)
[ "def", "vectors", "(", "self", ",", "pos", ",", "vel", ",", "roll", "=", "0.0", ",", "pitch", "=", "0.0", ",", "yaw", "=", "0.0", ")", ":", "# TODO: yaw steering mode !", "# Fake nadir: This is the intersection point between the satellite", "# looking down at the cent...
Get unit vectors pointing to the different pixels. *pos* and *vel* are column vectors, or matrices of column vectors. Returns vectors as stacked rows.
[ "Get", "unit", "vectors", "pointing", "to", "the", "different", "pixels", "." ]
647007934dc827a4c698629cf32a84a5167844b2
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc.py#L90-L119
train
54,002
pytroll/pyorbital
pyorbital/geoloc_instrument_definitions.py
avhrr
def avhrr(scans_nb, scan_points, scan_angle=55.37, frequency=1 / 6.0, apply_offset=True): """Definition of the avhrr instrument. Source: NOAA KLM User's Guide, Appendix J http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm """ # build the avhrr instrument (scan angles) avhrr_inst = np.vstack(((scan_points / 1023.5 - 1) * np.deg2rad(-scan_angle), np.zeros((len(scan_points),)))) avhrr_inst = np.tile( avhrr_inst[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array # times = (np.tile(scan_points * 0.000025 + 0.0025415, [scans_nb, 1]) # + np.expand_dims(offset, 1)) times = np.tile(scan_points * 0.000025, [np.int(scans_nb), 1]) if apply_offset: offset = np.arange(np.int(scans_nb)) * frequency times += np.expand_dims(offset, 1) return ScanGeometry(avhrr_inst, times)
python
def avhrr(scans_nb, scan_points, scan_angle=55.37, frequency=1 / 6.0, apply_offset=True): """Definition of the avhrr instrument. Source: NOAA KLM User's Guide, Appendix J http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm """ # build the avhrr instrument (scan angles) avhrr_inst = np.vstack(((scan_points / 1023.5 - 1) * np.deg2rad(-scan_angle), np.zeros((len(scan_points),)))) avhrr_inst = np.tile( avhrr_inst[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array # times = (np.tile(scan_points * 0.000025 + 0.0025415, [scans_nb, 1]) # + np.expand_dims(offset, 1)) times = np.tile(scan_points * 0.000025, [np.int(scans_nb), 1]) if apply_offset: offset = np.arange(np.int(scans_nb)) * frequency times += np.expand_dims(offset, 1) return ScanGeometry(avhrr_inst, times)
[ "def", "avhrr", "(", "scans_nb", ",", "scan_points", ",", "scan_angle", "=", "55.37", ",", "frequency", "=", "1", "/", "6.0", ",", "apply_offset", "=", "True", ")", ":", "# build the avhrr instrument (scan angles)", "avhrr_inst", "=", "np", ".", "vstack", "(",...
Definition of the avhrr instrument. Source: NOAA KLM User's Guide, Appendix J http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm
[ "Definition", "of", "the", "avhrr", "instrument", "." ]
647007934dc827a4c698629cf32a84a5167844b2
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc_instrument_definitions.py#L52-L76
train
54,003
pytroll/pyorbital
pyorbital/geoloc_instrument_definitions.py
avhrr_gac
def avhrr_gac(scan_times, scan_points, scan_angle=55.37, frequency=0.5): """Definition of the avhrr instrument, gac version Source: NOAA KLM User's Guide, Appendix J http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm """ try: offset = np.array([(t - scan_times[0]).seconds + (t - scan_times[0]).microseconds / 1000000.0 for t in scan_times]) except TypeError: offset = np.arange(scan_times) * frequency scans_nb = len(offset) avhrr_inst = np.vstack(((scan_points / 1023.5 - 1) * np.deg2rad(-scan_angle), np.zeros((len(scan_points),)))) avhrr_inst = np.tile( avhrr_inst[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array times = (np.tile(scan_points * 0.000025, [scans_nb, 1]) + np.expand_dims(offset, 1)) return ScanGeometry(avhrr_inst, times)
python
def avhrr_gac(scan_times, scan_points, scan_angle=55.37, frequency=0.5): """Definition of the avhrr instrument, gac version Source: NOAA KLM User's Guide, Appendix J http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm """ try: offset = np.array([(t - scan_times[0]).seconds + (t - scan_times[0]).microseconds / 1000000.0 for t in scan_times]) except TypeError: offset = np.arange(scan_times) * frequency scans_nb = len(offset) avhrr_inst = np.vstack(((scan_points / 1023.5 - 1) * np.deg2rad(-scan_angle), np.zeros((len(scan_points),)))) avhrr_inst = np.tile( avhrr_inst[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array times = (np.tile(scan_points * 0.000025, [scans_nb, 1]) + np.expand_dims(offset, 1)) return ScanGeometry(avhrr_inst, times)
[ "def", "avhrr_gac", "(", "scan_times", ",", "scan_points", ",", "scan_angle", "=", "55.37", ",", "frequency", "=", "0.5", ")", ":", "try", ":", "offset", "=", "np", ".", "array", "(", "[", "(", "t", "-", "scan_times", "[", "0", "]", ")", ".", "seco...
Definition of the avhrr instrument, gac version Source: NOAA KLM User's Guide, Appendix J http://www.ncdc.noaa.gov/oa/pod-guide/ncdc/docs/klm/html/j/app-j.htm
[ "Definition", "of", "the", "avhrr", "instrument", "gac", "version" ]
647007934dc827a4c698629cf32a84a5167844b2
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc_instrument_definitions.py#L79-L102
train
54,004
pytroll/pyorbital
pyorbital/geoloc_instrument_definitions.py
olci
def olci(scans_nb, scan_points=None): """Definition of the OLCI instrument. Source: Sentinel-3 OLCI Coverage https://sentinel.esa.int/web/sentinel/user-guides/sentinel-3-olci/coverage """ if scan_points is None: scan_len = 4000 # samples per scan scan_points = np.arange(4000) else: scan_len = len(scan_points) # scan_rate = 0.044 # single scan, seconds scan_angle_west = 46.5 # swath, degrees scan_angle_east = -22.1 # swath, degrees # sampling_interval = 18e-3 # single view, seconds # build the olci instrument scan line angles scanline_angles = np.linspace(np.deg2rad(scan_angle_west), np.deg2rad(scan_angle_east), scan_len) inst = np.vstack((scanline_angles, np.zeros(scan_len,))) inst = np.tile(inst[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array # times = (np.tile(scan_points * 0.000025 + 0.0025415, [scans_nb, 1]) # + np.expand_dims(offset, 1)) times = np.tile(np.zeros_like(scanline_angles), [np.int(scans_nb), 1]) # if apply_offset: # offset = np.arange(np.int(scans_nb)) * frequency # times += np.expand_dims(offset, 1) return ScanGeometry(inst, times)
python
def olci(scans_nb, scan_points=None): """Definition of the OLCI instrument. Source: Sentinel-3 OLCI Coverage https://sentinel.esa.int/web/sentinel/user-guides/sentinel-3-olci/coverage """ if scan_points is None: scan_len = 4000 # samples per scan scan_points = np.arange(4000) else: scan_len = len(scan_points) # scan_rate = 0.044 # single scan, seconds scan_angle_west = 46.5 # swath, degrees scan_angle_east = -22.1 # swath, degrees # sampling_interval = 18e-3 # single view, seconds # build the olci instrument scan line angles scanline_angles = np.linspace(np.deg2rad(scan_angle_west), np.deg2rad(scan_angle_east), scan_len) inst = np.vstack((scanline_angles, np.zeros(scan_len,))) inst = np.tile(inst[:, np.newaxis, :], [1, np.int(scans_nb), 1]) # building the corresponding times array # times = (np.tile(scan_points * 0.000025 + 0.0025415, [scans_nb, 1]) # + np.expand_dims(offset, 1)) times = np.tile(np.zeros_like(scanline_angles), [np.int(scans_nb), 1]) # if apply_offset: # offset = np.arange(np.int(scans_nb)) * frequency # times += np.expand_dims(offset, 1) return ScanGeometry(inst, times)
[ "def", "olci", "(", "scans_nb", ",", "scan_points", "=", "None", ")", ":", "if", "scan_points", "is", "None", ":", "scan_len", "=", "4000", "# samples per scan", "scan_points", "=", "np", ".", "arange", "(", "4000", ")", "else", ":", "scan_len", "=", "le...
Definition of the OLCI instrument. Source: Sentinel-3 OLCI Coverage https://sentinel.esa.int/web/sentinel/user-guides/sentinel-3-olci/coverage
[ "Definition", "of", "the", "OLCI", "instrument", "." ]
647007934dc827a4c698629cf32a84a5167844b2
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc_instrument_definitions.py#L434-L466
train
54,005
pytroll/pyorbital
pyorbital/geoloc_instrument_definitions.py
ascat
def ascat(scan_nb, scan_points=None): """ASCAT make two scans one to the left and one to the right of the sub-satellite track. """ if scan_points is None: scan_len = 42 # samples per scan scan_points = np.arange(42) else: scan_len = len(scan_points) scan_angle_inner = -25.0 # swath, degrees scan_angle_outer = -53.0 # swath, degrees scan_rate = 3.74747474747 # single scan, seconds if scan_len < 2: raise ValueError("Need at least two scan points!") sampling_interval = scan_rate / float(np.max(scan_points) + 1) # build the Metop/ascat instrument scan line angles scanline_angles_one = np.linspace(-np.deg2rad(scan_angle_outer), -np.deg2rad(scan_angle_inner), 21) scanline_angles_two = np.linspace(np.deg2rad(scan_angle_inner), np.deg2rad(scan_angle_outer), 21) scan_angles = np.concatenate( [scanline_angles_one, scanline_angles_two])[scan_points] inst = np.vstack((scan_angles, np.zeros(scan_len * 1,))) inst = np.tile(inst[:, np.newaxis, :], [1, np.int(scan_nb), 1]) # building the corresponding times array offset = np.arange(scan_nb) * scan_rate times = (np.tile(scan_points * sampling_interval, [np.int(scan_nb), 1]) + np.expand_dims(offset, 1)) return ScanGeometry(inst, times)
python
def ascat(scan_nb, scan_points=None): """ASCAT make two scans one to the left and one to the right of the sub-satellite track. """ if scan_points is None: scan_len = 42 # samples per scan scan_points = np.arange(42) else: scan_len = len(scan_points) scan_angle_inner = -25.0 # swath, degrees scan_angle_outer = -53.0 # swath, degrees scan_rate = 3.74747474747 # single scan, seconds if scan_len < 2: raise ValueError("Need at least two scan points!") sampling_interval = scan_rate / float(np.max(scan_points) + 1) # build the Metop/ascat instrument scan line angles scanline_angles_one = np.linspace(-np.deg2rad(scan_angle_outer), -np.deg2rad(scan_angle_inner), 21) scanline_angles_two = np.linspace(np.deg2rad(scan_angle_inner), np.deg2rad(scan_angle_outer), 21) scan_angles = np.concatenate( [scanline_angles_one, scanline_angles_two])[scan_points] inst = np.vstack((scan_angles, np.zeros(scan_len * 1,))) inst = np.tile(inst[:, np.newaxis, :], [1, np.int(scan_nb), 1]) # building the corresponding times array offset = np.arange(scan_nb) * scan_rate times = (np.tile(scan_points * sampling_interval, [np.int(scan_nb), 1]) + np.expand_dims(offset, 1)) return ScanGeometry(inst, times)
[ "def", "ascat", "(", "scan_nb", ",", "scan_points", "=", "None", ")", ":", "if", "scan_points", "is", "None", ":", "scan_len", "=", "42", "# samples per scan", "scan_points", "=", "np", ".", "arange", "(", "42", ")", "else", ":", "scan_len", "=", "len", ...
ASCAT make two scans one to the left and one to the right of the sub-satellite track.
[ "ASCAT", "make", "two", "scans", "one", "to", "the", "left", "and", "one", "to", "the", "right", "of", "the", "sub", "-", "satellite", "track", "." ]
647007934dc827a4c698629cf32a84a5167844b2
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/geoloc_instrument_definitions.py#L469-L507
train
54,006
pytroll/pyorbital
pyorbital/tlefile.py
read
def read(platform, tle_file=None, line1=None, line2=None): """Read TLE for `platform` from `tle_file` File is read from `line1` to `line2`, from the newest file provided in the TLES pattern, or from internet if none is provided. """ return Tle(platform, tle_file=tle_file, line1=line1, line2=line2)
python
def read(platform, tle_file=None, line1=None, line2=None): """Read TLE for `platform` from `tle_file` File is read from `line1` to `line2`, from the newest file provided in the TLES pattern, or from internet if none is provided. """ return Tle(platform, tle_file=tle_file, line1=line1, line2=line2)
[ "def", "read", "(", "platform", ",", "tle_file", "=", "None", ",", "line1", "=", "None", ",", "line2", "=", "None", ")", ":", "return", "Tle", "(", "platform", ",", "tle_file", "=", "tle_file", ",", "line1", "=", "line1", ",", "line2", "=", "line2", ...
Read TLE for `platform` from `tle_file` File is read from `line1` to `line2`, from the newest file provided in the TLES pattern, or from internet if none is provided.
[ "Read", "TLE", "for", "platform", "from", "tle_file" ]
647007934dc827a4c698629cf32a84a5167844b2
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/tlefile.py#L91-L97
train
54,007
pytroll/pyorbital
pyorbital/tlefile.py
fetch
def fetch(destination): """Fetch TLE from internet and save it to `destination`.""" with io.open(destination, mode="w", encoding="utf-8") as dest: for url in TLE_URLS: response = urlopen(url) dest.write(response.read().decode("utf-8"))
python
def fetch(destination): """Fetch TLE from internet and save it to `destination`.""" with io.open(destination, mode="w", encoding="utf-8") as dest: for url in TLE_URLS: response = urlopen(url) dest.write(response.read().decode("utf-8"))
[ "def", "fetch", "(", "destination", ")", ":", "with", "io", ".", "open", "(", "destination", ",", "mode", "=", "\"w\"", ",", "encoding", "=", "\"utf-8\"", ")", "as", "dest", ":", "for", "url", "in", "TLE_URLS", ":", "response", "=", "urlopen", "(", "...
Fetch TLE from internet and save it to `destination`.
[ "Fetch", "TLE", "from", "internet", "and", "save", "it", "to", "destination", "." ]
647007934dc827a4c698629cf32a84a5167844b2
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/tlefile.py#L100-L105
train
54,008
pytroll/pyorbital
pyorbital/tlefile.py
Tle._checksum
def _checksum(self): """Performs the checksum for the current TLE.""" for line in [self._line1, self._line2]: check = 0 for char in line[:-1]: if char.isdigit(): check += int(char) if char == "-": check += 1 if (check % 10) != int(line[-1]): raise ChecksumError(self._platform + " " + line)
python
def _checksum(self): """Performs the checksum for the current TLE.""" for line in [self._line1, self._line2]: check = 0 for char in line[:-1]: if char.isdigit(): check += int(char) if char == "-": check += 1 if (check % 10) != int(line[-1]): raise ChecksumError(self._platform + " " + line)
[ "def", "_checksum", "(", "self", ")", ":", "for", "line", "in", "[", "self", ".", "_line1", ",", "self", ".", "_line2", "]", ":", "check", "=", "0", "for", "char", "in", "line", "[", ":", "-", "1", "]", ":", "if", "char", ".", "isdigit", "(", ...
Performs the checksum for the current TLE.
[ "Performs", "the", "checksum", "for", "the", "current", "TLE", "." ]
647007934dc827a4c698629cf32a84a5167844b2
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/tlefile.py#L162-L173
train
54,009
pytroll/pyorbital
pyorbital/tlefile.py
Tle._read_tle
def _read_tle(self): """Read TLE data.""" if self._line1 is not None and self._line2 is not None: tle = self._line1.strip() + "\n" + self._line2.strip() else: def _open(filename): return io.open(filename, 'rb') if self._tle_file: urls = (self._tle_file,) open_func = _open elif "TLES" in os.environ: # TODO: get the TLE file closest in time to the actual satellite # overpass, NOT the latest! urls = (max(glob.glob(os.environ["TLES"]), key=os.path.getctime), ) LOGGER.debug("Reading TLE from %s", urls[0]) open_func = _open else: LOGGER.debug("Fetch TLE from the internet.") urls = TLE_URLS open_func = urlopen tle = "" designator = "1 " + SATELLITES.get(self._platform, '') for url in urls: fid = open_func(url) for l_0 in fid: l_0 = l_0.decode('utf-8') if l_0.strip() == self._platform: l_1 = next(fid).decode('utf-8') l_2 = next(fid).decode('utf-8') tle = l_1.strip() + "\n" + l_2.strip() break if(self._platform in SATELLITES and l_0.strip().startswith(designator)): l_1 = l_0 l_2 = next(fid).decode('utf-8') tle = l_1.strip() + "\n" + l_2.strip() LOGGER.debug("Found platform %s, ID: %s", self._platform, SATELLITES[self._platform]) break fid.close() if tle: break if not tle: raise KeyError("Found no TLE entry for '%s'" % self._platform) self._line1, self._line2 = tle.split('\n')
python
def _read_tle(self): """Read TLE data.""" if self._line1 is not None and self._line2 is not None: tle = self._line1.strip() + "\n" + self._line2.strip() else: def _open(filename): return io.open(filename, 'rb') if self._tle_file: urls = (self._tle_file,) open_func = _open elif "TLES" in os.environ: # TODO: get the TLE file closest in time to the actual satellite # overpass, NOT the latest! urls = (max(glob.glob(os.environ["TLES"]), key=os.path.getctime), ) LOGGER.debug("Reading TLE from %s", urls[0]) open_func = _open else: LOGGER.debug("Fetch TLE from the internet.") urls = TLE_URLS open_func = urlopen tle = "" designator = "1 " + SATELLITES.get(self._platform, '') for url in urls: fid = open_func(url) for l_0 in fid: l_0 = l_0.decode('utf-8') if l_0.strip() == self._platform: l_1 = next(fid).decode('utf-8') l_2 = next(fid).decode('utf-8') tle = l_1.strip() + "\n" + l_2.strip() break if(self._platform in SATELLITES and l_0.strip().startswith(designator)): l_1 = l_0 l_2 = next(fid).decode('utf-8') tle = l_1.strip() + "\n" + l_2.strip() LOGGER.debug("Found platform %s, ID: %s", self._platform, SATELLITES[self._platform]) break fid.close() if tle: break if not tle: raise KeyError("Found no TLE entry for '%s'" % self._platform) self._line1, self._line2 = tle.split('\n')
[ "def", "_read_tle", "(", "self", ")", ":", "if", "self", ".", "_line1", "is", "not", "None", "and", "self", ".", "_line2", "is", "not", "None", ":", "tle", "=", "self", ".", "_line1", ".", "strip", "(", ")", "+", "\"\\n\"", "+", "self", ".", "_li...
Read TLE data.
[ "Read", "TLE", "data", "." ]
647007934dc827a4c698629cf32a84a5167844b2
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/tlefile.py#L175-L225
train
54,010
pytroll/pyorbital
pyorbital/tlefile.py
Tle._parse_tle
def _parse_tle(self): """Parse values from TLE data.""" def _read_tle_decimal(rep): """Convert *rep* to decimal value.""" if rep[0] in ["-", " ", "+"]: digits = rep[1:-2].strip() val = rep[0] + "." + digits + "e" + rep[-2:] else: digits = rep[:-2].strip() val = "." + digits + "e" + rep[-2:] return float(val) self.satnumber = self._line1[2:7] self.classification = self._line1[7] self.id_launch_year = self._line1[9:11] self.id_launch_number = self._line1[11:14] self.id_launch_piece = self._line1[14:17] self.epoch_year = self._line1[18:20] self.epoch_day = float(self._line1[20:32]) self.epoch = \ np.datetime64(datetime.datetime.strptime(self.epoch_year, "%y") + datetime.timedelta(days=self.epoch_day - 1), 'us') self.mean_motion_derivative = float(self._line1[33:43]) self.mean_motion_sec_derivative = _read_tle_decimal(self._line1[44:52]) self.bstar = _read_tle_decimal(self._line1[53:61]) try: self.ephemeris_type = int(self._line1[62]) except ValueError: self.ephemeris_type = 0 self.element_number = int(self._line1[64:68]) self.inclination = float(self._line2[8:16]) self.right_ascension = float(self._line2[17:25]) self.excentricity = int(self._line2[26:33]) * 10 ** -7 self.arg_perigee = float(self._line2[34:42]) self.mean_anomaly = float(self._line2[43:51]) self.mean_motion = float(self._line2[52:63]) self.orbit = int(self._line2[63:68])
python
def _parse_tle(self): """Parse values from TLE data.""" def _read_tle_decimal(rep): """Convert *rep* to decimal value.""" if rep[0] in ["-", " ", "+"]: digits = rep[1:-2].strip() val = rep[0] + "." + digits + "e" + rep[-2:] else: digits = rep[:-2].strip() val = "." + digits + "e" + rep[-2:] return float(val) self.satnumber = self._line1[2:7] self.classification = self._line1[7] self.id_launch_year = self._line1[9:11] self.id_launch_number = self._line1[11:14] self.id_launch_piece = self._line1[14:17] self.epoch_year = self._line1[18:20] self.epoch_day = float(self._line1[20:32]) self.epoch = \ np.datetime64(datetime.datetime.strptime(self.epoch_year, "%y") + datetime.timedelta(days=self.epoch_day - 1), 'us') self.mean_motion_derivative = float(self._line1[33:43]) self.mean_motion_sec_derivative = _read_tle_decimal(self._line1[44:52]) self.bstar = _read_tle_decimal(self._line1[53:61]) try: self.ephemeris_type = int(self._line1[62]) except ValueError: self.ephemeris_type = 0 self.element_number = int(self._line1[64:68]) self.inclination = float(self._line2[8:16]) self.right_ascension = float(self._line2[17:25]) self.excentricity = int(self._line2[26:33]) * 10 ** -7 self.arg_perigee = float(self._line2[34:42]) self.mean_anomaly = float(self._line2[43:51]) self.mean_motion = float(self._line2[52:63]) self.orbit = int(self._line2[63:68])
[ "def", "_parse_tle", "(", "self", ")", ":", "def", "_read_tle_decimal", "(", "rep", ")", ":", "\"\"\"Convert *rep* to decimal value.\"\"\"", "if", "rep", "[", "0", "]", "in", "[", "\"-\"", ",", "\" \"", ",", "\"+\"", "]", ":", "digits", "=", "rep", "[", ...
Parse values from TLE data.
[ "Parse", "values", "from", "TLE", "data", "." ]
647007934dc827a4c698629cf32a84a5167844b2
https://github.com/pytroll/pyorbital/blob/647007934dc827a4c698629cf32a84a5167844b2/pyorbital/tlefile.py#L227-L266
train
54,011
lorien/user_agent
user_agent/base.py
build_system_components
def build_system_components(device_type, os_id, navigator_id): """ For given os_id build random platform and oscpu components Returns dict {platform_version, platform, ua_platform, oscpu} platform_version is OS name used in different places ua_platform goes to navigator.platform platform is used in building navigator.userAgent oscpu goes to navigator.oscpu """ if os_id == 'win': platform_version = choice(OS_PLATFORM['win']) cpu = choice(OS_CPU['win']) if cpu: platform = '%s; %s' % (platform_version, cpu) else: platform = platform_version res = { 'platform_version': platform_version, 'platform': platform, 'ua_platform': platform, 'oscpu': platform, } elif os_id == 'linux': cpu = choice(OS_CPU['linux']) platform_version = choice(OS_PLATFORM['linux']) platform = '%s %s' % (platform_version, cpu) res = { 'platform_version': platform_version, 'platform': platform, 'ua_platform': platform, 'oscpu': 'Linux %s' % cpu, } elif os_id == 'mac': cpu = choice(OS_CPU['mac']) platform_version = choice(OS_PLATFORM['mac']) platform = platform_version if navigator_id == 'chrome': platform = fix_chrome_mac_platform(platform) res = { 'platform_version': platform_version, 'platform': 'MacIntel', 'ua_platform': platform, 'oscpu': 'Intel Mac OS X %s' % platform.split(' ')[-1], } elif os_id == 'android': assert navigator_id in ('firefox', 'chrome') assert device_type in ('smartphone', 'tablet') platform_version = choice(OS_PLATFORM['android']) if navigator_id == 'firefox': if device_type == 'smartphone': ua_platform = '%s; Mobile' % platform_version elif device_type == 'tablet': ua_platform = '%s; Tablet' % platform_version elif navigator_id == 'chrome': device_id = choice(SMARTPHONE_DEV_IDS) ua_platform = 'Linux; %s; %s' % (platform_version, device_id) oscpu = 'Linux %s' % choice(OS_CPU['android']) res = { 'platform_version': platform_version, 'ua_platform': ua_platform, 'platform': oscpu, 'oscpu': oscpu, } return res
python
def build_system_components(device_type, os_id, navigator_id): """ For given os_id build random platform and oscpu components Returns dict {platform_version, platform, ua_platform, oscpu} platform_version is OS name used in different places ua_platform goes to navigator.platform platform is used in building navigator.userAgent oscpu goes to navigator.oscpu """ if os_id == 'win': platform_version = choice(OS_PLATFORM['win']) cpu = choice(OS_CPU['win']) if cpu: platform = '%s; %s' % (platform_version, cpu) else: platform = platform_version res = { 'platform_version': platform_version, 'platform': platform, 'ua_platform': platform, 'oscpu': platform, } elif os_id == 'linux': cpu = choice(OS_CPU['linux']) platform_version = choice(OS_PLATFORM['linux']) platform = '%s %s' % (platform_version, cpu) res = { 'platform_version': platform_version, 'platform': platform, 'ua_platform': platform, 'oscpu': 'Linux %s' % cpu, } elif os_id == 'mac': cpu = choice(OS_CPU['mac']) platform_version = choice(OS_PLATFORM['mac']) platform = platform_version if navigator_id == 'chrome': platform = fix_chrome_mac_platform(platform) res = { 'platform_version': platform_version, 'platform': 'MacIntel', 'ua_platform': platform, 'oscpu': 'Intel Mac OS X %s' % platform.split(' ')[-1], } elif os_id == 'android': assert navigator_id in ('firefox', 'chrome') assert device_type in ('smartphone', 'tablet') platform_version = choice(OS_PLATFORM['android']) if navigator_id == 'firefox': if device_type == 'smartphone': ua_platform = '%s; Mobile' % platform_version elif device_type == 'tablet': ua_platform = '%s; Tablet' % platform_version elif navigator_id == 'chrome': device_id = choice(SMARTPHONE_DEV_IDS) ua_platform = 'Linux; %s; %s' % (platform_version, device_id) oscpu = 'Linux %s' % choice(OS_CPU['android']) res = { 'platform_version': platform_version, 'ua_platform': ua_platform, 'platform': oscpu, 'oscpu': oscpu, } return res
[ "def", "build_system_components", "(", "device_type", ",", "os_id", ",", "navigator_id", ")", ":", "if", "os_id", "==", "'win'", ":", "platform_version", "=", "choice", "(", "OS_PLATFORM", "[", "'win'", "]", ")", "cpu", "=", "choice", "(", "OS_CPU", "[", "...
For given os_id build random platform and oscpu components Returns dict {platform_version, platform, ua_platform, oscpu} platform_version is OS name used in different places ua_platform goes to navigator.platform platform is used in building navigator.userAgent oscpu goes to navigator.oscpu
[ "For", "given", "os_id", "build", "random", "platform", "and", "oscpu", "components" ]
f37f45d9cf914dd1e535b272ba120626d1f5682d
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L266-L333
train
54,012
lorien/user_agent
user_agent/base.py
build_app_components
def build_app_components(os_id, navigator_id): """ For given navigator_id build app features Returns dict {name, product_sub, vendor, build_version, build_id} """ if navigator_id == 'firefox': build_version, build_id = get_firefox_build() if os_id in ('win', 'linux', 'mac'): geckotrail = '20100101' else: geckotrail = build_version res = { 'name': 'Netscape', 'product_sub': '20100101', 'vendor': '', 'build_version': build_version, 'build_id': build_id, 'geckotrail': geckotrail, } elif navigator_id == 'chrome': res = { 'name': 'Netscape', 'product_sub': '20030107', 'vendor': 'Google Inc.', 'build_version': get_chrome_build(), 'build_id': None, } elif navigator_id == 'ie': num_ver, build_version, trident_version = get_ie_build() if num_ver >= 11: app_name = 'Netscape' else: app_name = 'Microsoft Internet Explorer' res = { 'name': app_name, 'product_sub': None, 'vendor': '', 'build_version': build_version, 'build_id': None, 'trident_version': trident_version, } return res
python
def build_app_components(os_id, navigator_id): """ For given navigator_id build app features Returns dict {name, product_sub, vendor, build_version, build_id} """ if navigator_id == 'firefox': build_version, build_id = get_firefox_build() if os_id in ('win', 'linux', 'mac'): geckotrail = '20100101' else: geckotrail = build_version res = { 'name': 'Netscape', 'product_sub': '20100101', 'vendor': '', 'build_version': build_version, 'build_id': build_id, 'geckotrail': geckotrail, } elif navigator_id == 'chrome': res = { 'name': 'Netscape', 'product_sub': '20030107', 'vendor': 'Google Inc.', 'build_version': get_chrome_build(), 'build_id': None, } elif navigator_id == 'ie': num_ver, build_version, trident_version = get_ie_build() if num_ver >= 11: app_name = 'Netscape' else: app_name = 'Microsoft Internet Explorer' res = { 'name': app_name, 'product_sub': None, 'vendor': '', 'build_version': build_version, 'build_id': None, 'trident_version': trident_version, } return res
[ "def", "build_app_components", "(", "os_id", ",", "navigator_id", ")", ":", "if", "navigator_id", "==", "'firefox'", ":", "build_version", ",", "build_id", "=", "get_firefox_build", "(", ")", "if", "os_id", "in", "(", "'win'", ",", "'linux'", ",", "'mac'", "...
For given navigator_id build app features Returns dict {name, product_sub, vendor, build_version, build_id}
[ "For", "given", "navigator_id", "build", "app", "features" ]
f37f45d9cf914dd1e535b272ba120626d1f5682d
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L336-L379
train
54,013
lorien/user_agent
user_agent/base.py
get_option_choices
def get_option_choices(opt_name, opt_value, default_value, all_choices): """ Generate possible choices for the option `opt_name` limited to `opt_value` value with default value as `default_value` """ choices = [] if isinstance(opt_value, six.string_types): choices = [opt_value] elif isinstance(opt_value, (list, tuple)): choices = list(opt_value) elif opt_value is None: choices = default_value else: raise InvalidOption('Option %s has invalid' ' value: %s' % (opt_name, opt_value)) if 'all' in choices: choices = all_choices for item in choices: if item not in all_choices: raise InvalidOption('Choices of option %s contains invalid' ' item: %s' % (opt_name, item)) return choices
python
def get_option_choices(opt_name, opt_value, default_value, all_choices): """ Generate possible choices for the option `opt_name` limited to `opt_value` value with default value as `default_value` """ choices = [] if isinstance(opt_value, six.string_types): choices = [opt_value] elif isinstance(opt_value, (list, tuple)): choices = list(opt_value) elif opt_value is None: choices = default_value else: raise InvalidOption('Option %s has invalid' ' value: %s' % (opt_name, opt_value)) if 'all' in choices: choices = all_choices for item in choices: if item not in all_choices: raise InvalidOption('Choices of option %s contains invalid' ' item: %s' % (opt_name, item)) return choices
[ "def", "get_option_choices", "(", "opt_name", ",", "opt_value", ",", "default_value", ",", "all_choices", ")", ":", "choices", "=", "[", "]", "if", "isinstance", "(", "opt_value", ",", "six", ".", "string_types", ")", ":", "choices", "=", "[", "opt_value", ...
Generate possible choices for the option `opt_name` limited to `opt_value` value with default value as `default_value`
[ "Generate", "possible", "choices", "for", "the", "option", "opt_name", "limited", "to", "opt_value", "value", "with", "default", "value", "as", "default_value" ]
f37f45d9cf914dd1e535b272ba120626d1f5682d
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L382-L405
train
54,014
lorien/user_agent
user_agent/base.py
generate_navigator
def generate_navigator(os=None, navigator=None, platform=None, device_type=None): """ Generates web navigator's config :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (os, name, platform, oscpu, build_version, build_id, app_version, app_name, app_code_name, product, product_sub, vendor, vendor_sub, user_agent) :raises InvalidOption: if could not generate user-agent for any combination of allowed platforms and navigators :raise InvalidOption: if any of passed options is invalid """ if platform is not None: os = platform warn('The `platform` option is deprecated.' ' Use `os` option instead.', stacklevel=3) device_type, os_id, navigator_id = ( pick_config_ids(device_type, os, navigator) ) system = build_system_components( device_type, os_id, navigator_id) app = build_app_components(os_id, navigator_id) ua_template = choose_ua_template( device_type, navigator_id, app) user_agent = ua_template.format(system=system, app=app) app_version = build_navigator_app_version( os_id, navigator_id, system['platform_version'], user_agent) return { # ids 'os_id': os_id, 'navigator_id': navigator_id, # system components 'platform': system['platform'], 'oscpu': system['oscpu'], # app components 'build_version': app['build_version'], 'build_id': app['build_id'], 'app_version': app_version, 'app_name': app['name'], 'app_code_name': 'Mozilla', 'product': 'Gecko', 'product_sub': app['product_sub'], 'vendor': app['vendor'], 'vendor_sub': '', # compiled user agent 'user_agent': user_agent, }
python
def generate_navigator(os=None, navigator=None, platform=None, device_type=None): """ Generates web navigator's config :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (os, name, platform, oscpu, build_version, build_id, app_version, app_name, app_code_name, product, product_sub, vendor, vendor_sub, user_agent) :raises InvalidOption: if could not generate user-agent for any combination of allowed platforms and navigators :raise InvalidOption: if any of passed options is invalid """ if platform is not None: os = platform warn('The `platform` option is deprecated.' ' Use `os` option instead.', stacklevel=3) device_type, os_id, navigator_id = ( pick_config_ids(device_type, os, navigator) ) system = build_system_components( device_type, os_id, navigator_id) app = build_app_components(os_id, navigator_id) ua_template = choose_ua_template( device_type, navigator_id, app) user_agent = ua_template.format(system=system, app=app) app_version = build_navigator_app_version( os_id, navigator_id, system['platform_version'], user_agent) return { # ids 'os_id': os_id, 'navigator_id': navigator_id, # system components 'platform': system['platform'], 'oscpu': system['oscpu'], # app components 'build_version': app['build_version'], 'build_id': app['build_id'], 'app_version': app_version, 'app_name': app['name'], 'app_code_name': 'Mozilla', 'product': 'Gecko', 'product_sub': app['product_sub'], 'vendor': app['vendor'], 'vendor_sub': '', # compiled user agent 'user_agent': user_agent, }
[ "def", "generate_navigator", "(", "os", "=", "None", ",", "navigator", "=", "None", ",", "platform", "=", "None", ",", "device_type", "=", "None", ")", ":", "if", "platform", "is", "not", "None", ":", "os", "=", "platform", "warn", "(", "'The `platform` ...
Generates web navigator's config :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (os, name, platform, oscpu, build_version, build_id, app_version, app_name, app_code_name, product, product_sub, vendor, vendor_sub, user_agent) :raises InvalidOption: if could not generate user-agent for any combination of allowed platforms and navigators :raise InvalidOption: if any of passed options is invalid
[ "Generates", "web", "navigator", "s", "config" ]
f37f45d9cf914dd1e535b272ba120626d1f5682d
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L489-L546
train
54,015
lorien/user_agent
user_agent/base.py
generate_user_agent
def generate_user_agent(os=None, navigator=None, platform=None, device_type=None): """ Generates HTTP User-Agent header :param os: limit list of os for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent string :rtype: string :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid """ return generate_navigator(os=os, navigator=navigator, platform=platform, device_type=device_type)['user_agent']
python
def generate_user_agent(os=None, navigator=None, platform=None, device_type=None): """ Generates HTTP User-Agent header :param os: limit list of os for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent string :rtype: string :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid """ return generate_navigator(os=os, navigator=navigator, platform=platform, device_type=device_type)['user_agent']
[ "def", "generate_user_agent", "(", "os", "=", "None", ",", "navigator", "=", "None", ",", "platform", "=", "None", ",", "device_type", "=", "None", ")", ":", "return", "generate_navigator", "(", "os", "=", "os", ",", "navigator", "=", "navigator", ",", "...
Generates HTTP User-Agent header :param os: limit list of os for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent string :rtype: string :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid
[ "Generates", "HTTP", "User", "-", "Agent", "header" ]
f37f45d9cf914dd1e535b272ba120626d1f5682d
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L549-L569
train
54,016
lorien/user_agent
user_agent/base.py
generate_navigator_js
def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None): """ Generates web navigator's config with keys corresponding to keys of `windows.navigator` JavaScript object. :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (TODO) :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid """ config = generate_navigator(os=os, navigator=navigator, platform=platform, device_type=device_type) return { 'appCodeName': config['app_code_name'], 'appName': config['app_name'], 'appVersion': config['app_version'], 'platform': config['platform'], 'userAgent': config['user_agent'], 'oscpu': config['oscpu'], 'product': config['product'], 'productSub': config['product_sub'], 'vendor': config['vendor'], 'vendorSub': config['vendor_sub'], 'buildID': config['build_id'], }
python
def generate_navigator_js(os=None, navigator=None, platform=None, device_type=None): """ Generates web navigator's config with keys corresponding to keys of `windows.navigator` JavaScript object. :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (TODO) :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid """ config = generate_navigator(os=os, navigator=navigator, platform=platform, device_type=device_type) return { 'appCodeName': config['app_code_name'], 'appName': config['app_name'], 'appVersion': config['app_version'], 'platform': config['platform'], 'userAgent': config['user_agent'], 'oscpu': config['oscpu'], 'product': config['product'], 'productSub': config['product_sub'], 'vendor': config['vendor'], 'vendorSub': config['vendor_sub'], 'buildID': config['build_id'], }
[ "def", "generate_navigator_js", "(", "os", "=", "None", ",", "navigator", "=", "None", ",", "platform", "=", "None", ",", "device_type", "=", "None", ")", ":", "config", "=", "generate_navigator", "(", "os", "=", "os", ",", "navigator", "=", "navigator", ...
Generates web navigator's config with keys corresponding to keys of `windows.navigator` JavaScript object. :param os: limit list of oses for generation :type os: string or list/tuple or None :param navigator: limit list of browser engines for generation :type navigator: string or list/tuple or None :param device_type: limit possible oses by device type :type device_type: list/tuple or None, possible values: "desktop", "smartphone", "tablet", "all" :return: User-Agent config :rtype: dict with keys (TODO) :raises InvalidOption: if could not generate user-agent for any combination of allowed oses and navigators :raise InvalidOption: if any of passed options is invalid
[ "Generates", "web", "navigator", "s", "config", "with", "keys", "corresponding", "to", "keys", "of", "windows", ".", "navigator", "JavaScript", "object", "." ]
f37f45d9cf914dd1e535b272ba120626d1f5682d
https://github.com/lorien/user_agent/blob/f37f45d9cf914dd1e535b272ba120626d1f5682d/user_agent/base.py#L572-L607
train
54,017
thiezn/iperf3-python
iperf3/iperf3.py
more_data
def more_data(pipe_out): """Check if there is more data left on the pipe :param pipe_out: The os pipe_out :rtype: bool """ r, _, _ = select.select([pipe_out], [], [], 0) return bool(r)
python
def more_data(pipe_out): """Check if there is more data left on the pipe :param pipe_out: The os pipe_out :rtype: bool """ r, _, _ = select.select([pipe_out], [], [], 0) return bool(r)
[ "def", "more_data", "(", "pipe_out", ")", ":", "r", ",", "_", ",", "_", "=", "select", ".", "select", "(", "[", "pipe_out", "]", ",", "[", "]", ",", "[", "]", ",", "0", ")", "return", "bool", "(", "r", ")" ]
Check if there is more data left on the pipe :param pipe_out: The os pipe_out :rtype: bool
[ "Check", "if", "there", "is", "more", "data", "left", "on", "the", "pipe" ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L40-L47
train
54,018
thiezn/iperf3-python
iperf3/iperf3.py
read_pipe
def read_pipe(pipe_out): """Read data on a pipe Used to capture stdout data produced by libiperf :param pipe_out: The os pipe_out :rtype: unicode string """ out = b'' while more_data(pipe_out): out += os.read(pipe_out, 1024) return out.decode('utf-8')
python
def read_pipe(pipe_out): """Read data on a pipe Used to capture stdout data produced by libiperf :param pipe_out: The os pipe_out :rtype: unicode string """ out = b'' while more_data(pipe_out): out += os.read(pipe_out, 1024) return out.decode('utf-8')
[ "def", "read_pipe", "(", "pipe_out", ")", ":", "out", "=", "b''", "while", "more_data", "(", "pipe_out", ")", ":", "out", "+=", "os", ".", "read", "(", "pipe_out", ",", "1024", ")", "return", "out", ".", "decode", "(", "'utf-8'", ")" ]
Read data on a pipe Used to capture stdout data produced by libiperf :param pipe_out: The os pipe_out :rtype: unicode string
[ "Read", "data", "on", "a", "pipe" ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L50-L62
train
54,019
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3.role
def role(self): """The iperf3 instance role valid roles are 'c'=client and 's'=server :rtype: 'c' or 's' """ try: self._role = c_char( self.lib.iperf_get_test_role(self._test) ).value.decode('utf-8') except TypeError: self._role = c_char( chr(self.lib.iperf_get_test_role(self._test)) ).value.decode('utf-8') return self._role
python
def role(self): """The iperf3 instance role valid roles are 'c'=client and 's'=server :rtype: 'c' or 's' """ try: self._role = c_char( self.lib.iperf_get_test_role(self._test) ).value.decode('utf-8') except TypeError: self._role = c_char( chr(self.lib.iperf_get_test_role(self._test)) ).value.decode('utf-8') return self._role
[ "def", "role", "(", "self", ")", ":", "try", ":", "self", ".", "_role", "=", "c_char", "(", "self", ".", "lib", ".", "iperf_get_test_role", "(", "self", ".", "_test", ")", ")", ".", "value", ".", "decode", "(", "'utf-8'", ")", "except", "TypeError", ...
The iperf3 instance role valid roles are 'c'=client and 's'=server :rtype: 'c' or 's'
[ "The", "iperf3", "instance", "role" ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L246-L261
train
54,020
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3.bind_address
def bind_address(self): """The bind address the iperf3 instance will listen on use * to listen on all available IPs :rtype: string """ result = c_char_p( self.lib.iperf_get_test_bind_address(self._test) ).value if result: self._bind_address = result.decode('utf-8') else: self._bind_address = '*' return self._bind_address
python
def bind_address(self): """The bind address the iperf3 instance will listen on use * to listen on all available IPs :rtype: string """ result = c_char_p( self.lib.iperf_get_test_bind_address(self._test) ).value if result: self._bind_address = result.decode('utf-8') else: self._bind_address = '*' return self._bind_address
[ "def", "bind_address", "(", "self", ")", ":", "result", "=", "c_char_p", "(", "self", ".", "lib", ".", "iperf_get_test_bind_address", "(", "self", ".", "_test", ")", ")", ".", "value", "if", "result", ":", "self", ".", "_bind_address", "=", "result", "."...
The bind address the iperf3 instance will listen on use * to listen on all available IPs :rtype: string
[ "The", "bind", "address", "the", "iperf3", "instance", "will", "listen", "on" ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L275-L289
train
54,021
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3.port
def port(self): """The port the iperf3 server is listening on""" self._port = self.lib.iperf_get_test_server_port(self._test) return self._port
python
def port(self): """The port the iperf3 server is listening on""" self._port = self.lib.iperf_get_test_server_port(self._test) return self._port
[ "def", "port", "(", "self", ")", ":", "self", ".", "_port", "=", "self", ".", "lib", ".", "iperf_get_test_server_port", "(", "self", ".", "_test", ")", "return", "self", ".", "_port" ]
The port the iperf3 server is listening on
[ "The", "port", "the", "iperf3", "server", "is", "listening", "on" ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L300-L303
train
54,022
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3.json_output
def json_output(self): """Toggles json output of libiperf Turning this off will output the iperf3 instance results to stdout/stderr :rtype: bool """ enabled = self.lib.iperf_get_test_json_output(self._test) if enabled: self._json_output = True else: self._json_output = False return self._json_output
python
def json_output(self): """Toggles json output of libiperf Turning this off will output the iperf3 instance results to stdout/stderr :rtype: bool """ enabled = self.lib.iperf_get_test_json_output(self._test) if enabled: self._json_output = True else: self._json_output = False return self._json_output
[ "def", "json_output", "(", "self", ")", ":", "enabled", "=", "self", ".", "lib", ".", "iperf_get_test_json_output", "(", "self", ".", "_test", ")", "if", "enabled", ":", "self", ".", "_json_output", "=", "True", "else", ":", "self", ".", "_json_output", ...
Toggles json output of libiperf Turning this off will output the iperf3 instance results to stdout/stderr :rtype: bool
[ "Toggles", "json", "output", "of", "libiperf" ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L311-L326
train
54,023
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3.verbose
def verbose(self): """Toggles verbose output for the iperf3 instance :rtype: bool """ enabled = self.lib.iperf_get_verbose(self._test) if enabled: self._verbose = True else: self._verbose = False return self._verbose
python
def verbose(self): """Toggles verbose output for the iperf3 instance :rtype: bool """ enabled = self.lib.iperf_get_verbose(self._test) if enabled: self._verbose = True else: self._verbose = False return self._verbose
[ "def", "verbose", "(", "self", ")", ":", "enabled", "=", "self", ".", "lib", ".", "iperf_get_verbose", "(", "self", ".", "_test", ")", "if", "enabled", ":", "self", ".", "_verbose", "=", "True", "else", ":", "self", ".", "_verbose", "=", "False", "re...
Toggles verbose output for the iperf3 instance :rtype: bool
[ "Toggles", "verbose", "output", "for", "the", "iperf3", "instance" ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L338-L350
train
54,024
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3.iperf_version
def iperf_version(self): """Returns the version of the libiperf library :rtype: string """ # TODO: Is there a better way to get the const char than allocating 30? VersionType = c_char * 30 return VersionType.in_dll(self.lib, "version").value.decode('utf-8')
python
def iperf_version(self): """Returns the version of the libiperf library :rtype: string """ # TODO: Is there a better way to get the const char than allocating 30? VersionType = c_char * 30 return VersionType.in_dll(self.lib, "version").value.decode('utf-8')
[ "def", "iperf_version", "(", "self", ")", ":", "# TODO: Is there a better way to get the const char than allocating 30?", "VersionType", "=", "c_char", "*", "30", "return", "VersionType", ".", "in_dll", "(", "self", ".", "lib", ",", "\"version\"", ")", ".", "value", ...
Returns the version of the libiperf library :rtype: string
[ "Returns", "the", "version", "of", "the", "libiperf", "library" ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L369-L376
train
54,025
thiezn/iperf3-python
iperf3/iperf3.py
IPerf3._error_to_string
def _error_to_string(self, error_id): """Returns an error string from libiperf :param error_id: The error_id produced by libiperf :rtype: string """ strerror = self.lib.iperf_strerror strerror.restype = c_char_p return strerror(error_id).decode('utf-8')
python
def _error_to_string(self, error_id): """Returns an error string from libiperf :param error_id: The error_id produced by libiperf :rtype: string """ strerror = self.lib.iperf_strerror strerror.restype = c_char_p return strerror(error_id).decode('utf-8')
[ "def", "_error_to_string", "(", "self", ",", "error_id", ")", ":", "strerror", "=", "self", ".", "lib", ".", "iperf_strerror", "strerror", ".", "restype", "=", "c_char_p", "return", "strerror", "(", "error_id", ")", ".", "decode", "(", "'utf-8'", ")" ]
Returns an error string from libiperf :param error_id: The error_id produced by libiperf :rtype: string
[ "Returns", "an", "error", "string", "from", "libiperf" ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L378-L386
train
54,026
thiezn/iperf3-python
iperf3/iperf3.py
Client.server_hostname
def server_hostname(self): """The server hostname to connect to. Accepts DNS entries or IP addresses. :rtype: string """ result = c_char_p( self.lib.iperf_get_test_server_hostname(self._test) ).value if result: self._server_hostname = result.decode('utf-8') else: self._server_hostname = None return self._server_hostname
python
def server_hostname(self): """The server hostname to connect to. Accepts DNS entries or IP addresses. :rtype: string """ result = c_char_p( self.lib.iperf_get_test_server_hostname(self._test) ).value if result: self._server_hostname = result.decode('utf-8') else: self._server_hostname = None return self._server_hostname
[ "def", "server_hostname", "(", "self", ")", ":", "result", "=", "c_char_p", "(", "self", ".", "lib", ".", "iperf_get_test_server_hostname", "(", "self", ".", "_test", ")", ")", ".", "value", "if", "result", ":", "self", ".", "_server_hostname", "=", "resul...
The server hostname to connect to. Accepts DNS entries or IP addresses. :rtype: string
[ "The", "server", "hostname", "to", "connect", "to", "." ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L432-L446
train
54,027
thiezn/iperf3-python
iperf3/iperf3.py
Client.protocol
def protocol(self): """The iperf3 instance protocol valid protocols are 'tcp' and 'udp' :rtype: str """ proto_id = self.lib.iperf_get_test_protocol_id(self._test) if proto_id == SOCK_STREAM: self._protocol = 'tcp' elif proto_id == SOCK_DGRAM: self._protocol = 'udp' return self._protocol
python
def protocol(self): """The iperf3 instance protocol valid protocols are 'tcp' and 'udp' :rtype: str """ proto_id = self.lib.iperf_get_test_protocol_id(self._test) if proto_id == SOCK_STREAM: self._protocol = 'tcp' elif proto_id == SOCK_DGRAM: self._protocol = 'udp' return self._protocol
[ "def", "protocol", "(", "self", ")", ":", "proto_id", "=", "self", ".", "lib", ".", "iperf_get_test_protocol_id", "(", "self", ".", "_test", ")", "if", "proto_id", "==", "SOCK_STREAM", ":", "self", ".", "_protocol", "=", "'tcp'", "elif", "proto_id", "==", ...
The iperf3 instance protocol valid protocols are 'tcp' and 'udp' :rtype: str
[ "The", "iperf3", "instance", "protocol" ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L457-L471
train
54,028
thiezn/iperf3-python
iperf3/iperf3.py
Client.omit
def omit(self): """The test startup duration to omit in seconds.""" self._omit = self.lib.iperf_get_test_omit(self._test) return self._omit
python
def omit(self): """The test startup duration to omit in seconds.""" self._omit = self.lib.iperf_get_test_omit(self._test) return self._omit
[ "def", "omit", "(", "self", ")", ":", "self", ".", "_omit", "=", "self", ".", "lib", ".", "iperf_get_test_omit", "(", "self", ".", "_test", ")", "return", "self", ".", "_omit" ]
The test startup duration to omit in seconds.
[ "The", "test", "startup", "duration", "to", "omit", "in", "seconds", "." ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L486-L489
train
54,029
thiezn/iperf3-python
iperf3/iperf3.py
Client.duration
def duration(self): """The test duration in seconds.""" self._duration = self.lib.iperf_get_test_duration(self._test) return self._duration
python
def duration(self): """The test duration in seconds.""" self._duration = self.lib.iperf_get_test_duration(self._test) return self._duration
[ "def", "duration", "(", "self", ")", ":", "self", ".", "_duration", "=", "self", ".", "lib", ".", "iperf_get_test_duration", "(", "self", ".", "_test", ")", "return", "self", ".", "_duration" ]
The test duration in seconds.
[ "The", "test", "duration", "in", "seconds", "." ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L497-L500
train
54,030
thiezn/iperf3-python
iperf3/iperf3.py
Client.blksize
def blksize(self): """The test blksize.""" self._blksize = self.lib.iperf_get_test_blksize(self._test) return self._blksize
python
def blksize(self): """The test blksize.""" self._blksize = self.lib.iperf_get_test_blksize(self._test) return self._blksize
[ "def", "blksize", "(", "self", ")", ":", "self", ".", "_blksize", "=", "self", ".", "lib", ".", "iperf_get_test_blksize", "(", "self", ".", "_test", ")", "return", "self", ".", "_blksize" ]
The test blksize.
[ "The", "test", "blksize", "." ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L519-L522
train
54,031
thiezn/iperf3-python
iperf3/iperf3.py
Client.num_streams
def num_streams(self): """The number of streams to use.""" self._num_streams = self.lib.iperf_get_test_num_streams(self._test) return self._num_streams
python
def num_streams(self): """The number of streams to use.""" self._num_streams = self.lib.iperf_get_test_num_streams(self._test) return self._num_streams
[ "def", "num_streams", "(", "self", ")", ":", "self", ".", "_num_streams", "=", "self", ".", "lib", ".", "iperf_get_test_num_streams", "(", "self", ".", "_test", ")", "return", "self", ".", "_num_streams" ]
The number of streams to use.
[ "The", "number", "of", "streams", "to", "use", "." ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L552-L555
train
54,032
thiezn/iperf3-python
iperf3/iperf3.py
Client.reverse
def reverse(self): """Toggles direction of test :rtype: bool """ enabled = self.lib.iperf_get_test_reverse(self._test) if enabled: self._reverse = True else: self._reverse = False return self._reverse
python
def reverse(self): """Toggles direction of test :rtype: bool """ enabled = self.lib.iperf_get_test_reverse(self._test) if enabled: self._reverse = True else: self._reverse = False return self._reverse
[ "def", "reverse", "(", "self", ")", ":", "enabled", "=", "self", ".", "lib", ".", "iperf_get_test_reverse", "(", "self", ".", "_test", ")", "if", "enabled", ":", "self", ".", "_reverse", "=", "True", "else", ":", "self", ".", "_reverse", "=", "False", ...
Toggles direction of test :rtype: bool
[ "Toggles", "direction", "of", "test" ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L586-L598
train
54,033
thiezn/iperf3-python
iperf3/iperf3.py
Client.run
def run(self): """Run the current test client. :rtype: instance of :class:`TestResult` """ if self.json_output: output_to_pipe(self._pipe_in) # Disable stdout error = self.lib.iperf_run_client(self._test) if not self.iperf_version.startswith('iperf 3.1'): data = read_pipe(self._pipe_out) if data.startswith('Control connection'): data = '{' + data.split('{', 1)[1] else: data = c_char_p( self.lib.iperf_get_test_json_output_string(self._test) ).value if data: data = data.decode('utf-8') output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout if not data or error: data = '{"error": "%s"}' % self._error_to_string(self._errno) return TestResult(data)
python
def run(self): """Run the current test client. :rtype: instance of :class:`TestResult` """ if self.json_output: output_to_pipe(self._pipe_in) # Disable stdout error = self.lib.iperf_run_client(self._test) if not self.iperf_version.startswith('iperf 3.1'): data = read_pipe(self._pipe_out) if data.startswith('Control connection'): data = '{' + data.split('{', 1)[1] else: data = c_char_p( self.lib.iperf_get_test_json_output_string(self._test) ).value if data: data = data.decode('utf-8') output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout if not data or error: data = '{"error": "%s"}' % self._error_to_string(self._errno) return TestResult(data)
[ "def", "run", "(", "self", ")", ":", "if", "self", ".", "json_output", ":", "output_to_pipe", "(", "self", ".", "_pipe_in", ")", "# Disable stdout", "error", "=", "self", ".", "lib", ".", "iperf_run_client", "(", "self", ".", "_test", ")", "if", "not", ...
Run the current test client. :rtype: instance of :class:`TestResult`
[ "Run", "the", "current", "test", "client", "." ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L609-L634
train
54,034
thiezn/iperf3-python
iperf3/iperf3.py
Server.run
def run(self): """Run the iperf3 server instance. :rtype: instance of :class:`TestResult` """ def _run_in_thread(self, data_queue): """Runs the iperf_run_server :param data_queue: thread-safe queue """ output_to_pipe(self._pipe_in) # disable stdout error = self.lib.iperf_run_server(self._test) output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout # TODO json_output_string not available on earlier iperf3 builds # have to build in a version check using self.iperf_version # The following line should work on later versions: # data = c_char_p( # self.lib.iperf_get_test_json_output_string(self._test) # ).value data = read_pipe(self._pipe_out) if not data or error: data = '{"error": "%s"}' % self._error_to_string(self._errno) self.lib.iperf_reset_test(self._test) data_queue.put(data) if self.json_output: data_queue = Queue() t = threading.Thread( target=_run_in_thread, args=[self, data_queue] ) t.daemon = True t.start() while t.is_alive(): t.join(.1) return TestResult(data_queue.get()) else: # setting json_output to False will output test to screen only self.lib.iperf_run_server(self._test) self.lib.iperf_reset_test(self._test) return None
python
def run(self): """Run the iperf3 server instance. :rtype: instance of :class:`TestResult` """ def _run_in_thread(self, data_queue): """Runs the iperf_run_server :param data_queue: thread-safe queue """ output_to_pipe(self._pipe_in) # disable stdout error = self.lib.iperf_run_server(self._test) output_to_screen(self._stdout_fd, self._stderr_fd) # enable stdout # TODO json_output_string not available on earlier iperf3 builds # have to build in a version check using self.iperf_version # The following line should work on later versions: # data = c_char_p( # self.lib.iperf_get_test_json_output_string(self._test) # ).value data = read_pipe(self._pipe_out) if not data or error: data = '{"error": "%s"}' % self._error_to_string(self._errno) self.lib.iperf_reset_test(self._test) data_queue.put(data) if self.json_output: data_queue = Queue() t = threading.Thread( target=_run_in_thread, args=[self, data_queue] ) t.daemon = True t.start() while t.is_alive(): t.join(.1) return TestResult(data_queue.get()) else: # setting json_output to False will output test to screen only self.lib.iperf_run_server(self._test) self.lib.iperf_reset_test(self._test) return None
[ "def", "run", "(", "self", ")", ":", "def", "_run_in_thread", "(", "self", ",", "data_queue", ")", ":", "\"\"\"Runs the iperf_run_server\n\n :param data_queue: thread-safe queue\n \"\"\"", "output_to_pipe", "(", "self", ".", "_pipe_in", ")", "# disable...
Run the iperf3 server instance. :rtype: instance of :class:`TestResult`
[ "Run", "the", "iperf3", "server", "instance", "." ]
094a6e043f44fb154988348603661b1473c23a50
https://github.com/thiezn/iperf3-python/blob/094a6e043f44fb154988348603661b1473c23a50/iperf3/iperf3.py#L660-L707
train
54,035
joferkington/mplstereonet
mplstereonet/convenience_functions.py
subplots
def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, subplot_kw=None, hemisphere='lower', projection='equal_area', **fig_kw): """ Identical to matplotlib.pyplot.subplots, except that this will default to producing equal-area stereonet axes. This prevents constantly doing: >>> fig, ax = plt.subplot(subplot_kw=dict(projection='stereonet')) or >>> fig = plt.figure() >>> ax = fig.add_subplot(111, projection='stereonet') Using this function also avoids having ``mplstereonet`` continually appear to be an unused import when one of the above methods are used. Parameters ----------- nrows : int Number of rows of the subplot grid. Defaults to 1. ncols : int Number of columns of the subplot grid. Defaults to 1. hemisphere : string Currently this has no effect. When upper hemisphere and dual hemisphere plots are implemented, this will control which hemisphere is displayed. projection : string The projection for the axes. Defaults to 'equal_area'--an equal-area (a.k.a. "Schmidtt") stereonet. May also be 'equal_angle' for an equal-angle (a.k.a. "Wulff") stereonet or any other valid matplotlib projection (e.g. 'polar' or 'rectilinear' for a "normal" axes). The following parameters are identical to matplotlib.pyplot.subplots: sharex : string or bool If *True*, the X axis will be shared amongst all subplots. If *True* and you have multiple rows, the x tick labels on all but the last row of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a X axis. If "col", each subplot column will share a X axis and the x tick labels on all but the last row will have visible set to *False*. sharey : string or bool If *True*, the Y axis will be shared amongst all subplots. If *True* and you have multiple columns, the y tick labels on all but the first column of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a Y axis. If "col", each subplot column will share a Y axis and the y tick labels on all but the last row will have visible set to *False*. *squeeze* : bool If *True*, extra dimensions are squeezed out from the returned axis object: - if only one subplot is constructed (nrows=ncols=1), the resulting single Axis object is returned as a scalar. - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object array of Axis objects are returned as numpy 1-d arrays. - for NxM subplots with N>1 and M>1 are returned as a 2d array. If *False*, no squeezing at all is done: the returned axis object is always a 2-d array contaning Axis instances, even if it ends up being 1x1. *subplot_kw* : dict Dict with keywords passed to the :meth:`~matplotlib.figure.Figure.add_subplot` call used to create each subplots. *fig_kw* : dict Dict with keywords passed to the :func:`figure` call. Note that all keywords not recognized above will be automatically included here. Returns -------- fig, ax : tuple - *fig* is the :class:`matplotlib.figure.Figure` object - *ax* can be either a single axis object or an array of axis objects if more than one supblot was created. The dimensions of the resulting array can be controlled with the squeeze keyword, see above. """ import matplotlib.pyplot as plt if projection in ['equal_area', 'equal_angle']: projection += '_stereonet' if subplot_kw == None: subplot_kw = {} subplot_kw['projection'] = projection return plt.subplots(nrows, ncols, sharex=sharex, sharey=sharey, squeeze=squeeze, subplot_kw=subplot_kw, **fig_kw)
python
def subplots(nrows=1, ncols=1, sharex=False, sharey=False, squeeze=True, subplot_kw=None, hemisphere='lower', projection='equal_area', **fig_kw): """ Identical to matplotlib.pyplot.subplots, except that this will default to producing equal-area stereonet axes. This prevents constantly doing: >>> fig, ax = plt.subplot(subplot_kw=dict(projection='stereonet')) or >>> fig = plt.figure() >>> ax = fig.add_subplot(111, projection='stereonet') Using this function also avoids having ``mplstereonet`` continually appear to be an unused import when one of the above methods are used. Parameters ----------- nrows : int Number of rows of the subplot grid. Defaults to 1. ncols : int Number of columns of the subplot grid. Defaults to 1. hemisphere : string Currently this has no effect. When upper hemisphere and dual hemisphere plots are implemented, this will control which hemisphere is displayed. projection : string The projection for the axes. Defaults to 'equal_area'--an equal-area (a.k.a. "Schmidtt") stereonet. May also be 'equal_angle' for an equal-angle (a.k.a. "Wulff") stereonet or any other valid matplotlib projection (e.g. 'polar' or 'rectilinear' for a "normal" axes). The following parameters are identical to matplotlib.pyplot.subplots: sharex : string or bool If *True*, the X axis will be shared amongst all subplots. If *True* and you have multiple rows, the x tick labels on all but the last row of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a X axis. If "col", each subplot column will share a X axis and the x tick labels on all but the last row will have visible set to *False*. sharey : string or bool If *True*, the Y axis will be shared amongst all subplots. If *True* and you have multiple columns, the y tick labels on all but the first column of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a Y axis. If "col", each subplot column will share a Y axis and the y tick labels on all but the last row will have visible set to *False*. *squeeze* : bool If *True*, extra dimensions are squeezed out from the returned axis object: - if only one subplot is constructed (nrows=ncols=1), the resulting single Axis object is returned as a scalar. - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object array of Axis objects are returned as numpy 1-d arrays. - for NxM subplots with N>1 and M>1 are returned as a 2d array. If *False*, no squeezing at all is done: the returned axis object is always a 2-d array contaning Axis instances, even if it ends up being 1x1. *subplot_kw* : dict Dict with keywords passed to the :meth:`~matplotlib.figure.Figure.add_subplot` call used to create each subplots. *fig_kw* : dict Dict with keywords passed to the :func:`figure` call. Note that all keywords not recognized above will be automatically included here. Returns -------- fig, ax : tuple - *fig* is the :class:`matplotlib.figure.Figure` object - *ax* can be either a single axis object or an array of axis objects if more than one supblot was created. The dimensions of the resulting array can be controlled with the squeeze keyword, see above. """ import matplotlib.pyplot as plt if projection in ['equal_area', 'equal_angle']: projection += '_stereonet' if subplot_kw == None: subplot_kw = {} subplot_kw['projection'] = projection return plt.subplots(nrows, ncols, sharex=sharex, sharey=sharey, squeeze=squeeze, subplot_kw=subplot_kw, **fig_kw)
[ "def", "subplots", "(", "nrows", "=", "1", ",", "ncols", "=", "1", ",", "sharex", "=", "False", ",", "sharey", "=", "False", ",", "squeeze", "=", "True", ",", "subplot_kw", "=", "None", ",", "hemisphere", "=", "'lower'", ",", "projection", "=", "'equ...
Identical to matplotlib.pyplot.subplots, except that this will default to producing equal-area stereonet axes. This prevents constantly doing: >>> fig, ax = plt.subplot(subplot_kw=dict(projection='stereonet')) or >>> fig = plt.figure() >>> ax = fig.add_subplot(111, projection='stereonet') Using this function also avoids having ``mplstereonet`` continually appear to be an unused import when one of the above methods are used. Parameters ----------- nrows : int Number of rows of the subplot grid. Defaults to 1. ncols : int Number of columns of the subplot grid. Defaults to 1. hemisphere : string Currently this has no effect. When upper hemisphere and dual hemisphere plots are implemented, this will control which hemisphere is displayed. projection : string The projection for the axes. Defaults to 'equal_area'--an equal-area (a.k.a. "Schmidtt") stereonet. May also be 'equal_angle' for an equal-angle (a.k.a. "Wulff") stereonet or any other valid matplotlib projection (e.g. 'polar' or 'rectilinear' for a "normal" axes). The following parameters are identical to matplotlib.pyplot.subplots: sharex : string or bool If *True*, the X axis will be shared amongst all subplots. If *True* and you have multiple rows, the x tick labels on all but the last row of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a X axis. If "col", each subplot column will share a X axis and the x tick labels on all but the last row will have visible set to *False*. sharey : string or bool If *True*, the Y axis will be shared amongst all subplots. If *True* and you have multiple columns, the y tick labels on all but the first column of plots will have visible set to *False* If a string must be one of "row", "col", "all", or "none". "all" has the same effect as *True*, "none" has the same effect as *False*. If "row", each subplot row will share a Y axis. If "col", each subplot column will share a Y axis and the y tick labels on all but the last row will have visible set to *False*. *squeeze* : bool If *True*, extra dimensions are squeezed out from the returned axis object: - if only one subplot is constructed (nrows=ncols=1), the resulting single Axis object is returned as a scalar. - for Nx1 or 1xN subplots, the returned object is a 1-d numpy object array of Axis objects are returned as numpy 1-d arrays. - for NxM subplots with N>1 and M>1 are returned as a 2d array. If *False*, no squeezing at all is done: the returned axis object is always a 2-d array contaning Axis instances, even if it ends up being 1x1. *subplot_kw* : dict Dict with keywords passed to the :meth:`~matplotlib.figure.Figure.add_subplot` call used to create each subplots. *fig_kw* : dict Dict with keywords passed to the :func:`figure` call. Note that all keywords not recognized above will be automatically included here. Returns -------- fig, ax : tuple - *fig* is the :class:`matplotlib.figure.Figure` object - *ax* can be either a single axis object or an array of axis objects if more than one supblot was created. The dimensions of the resulting array can be controlled with the squeeze keyword, see above.
[ "Identical", "to", "matplotlib", ".", "pyplot", ".", "subplots", "except", "that", "this", "will", "default", "to", "producing", "equal", "-", "area", "stereonet", "axes", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/convenience_functions.py#L1-L108
train
54,036
joferkington/mplstereonet
examples/fault_slip_plot.py
tangent_lineation_plot
def tangent_lineation_plot(ax, strikes, dips, rakes): """Makes a tangent lineation plot for normal faults with the given strikes, dips, and rakes.""" # Calculate the position of the rake of the lineations, but don't plot yet rake_x, rake_y = mplstereonet.rake(strikes, dips, rakes) # Calculate the direction the arrows should point # These are all normal faults, so the arrows point away from the center # Because we're plotting at the pole location, however, we need to flip this # from what we plotted with the "ball of string" plot. mag = np.hypot(rake_x, rake_y) u, v = -rake_x / mag, -rake_y / mag # Calculate the position of the poles pole_x, pole_y = mplstereonet.pole(strikes, dips) # Plot the arrows centered on the pole locations... arrows = ax.quiver(pole_x, pole_y, u, v, width=1, headwidth=4, units='dots', pivot='middle') return arrows
python
def tangent_lineation_plot(ax, strikes, dips, rakes): """Makes a tangent lineation plot for normal faults with the given strikes, dips, and rakes.""" # Calculate the position of the rake of the lineations, but don't plot yet rake_x, rake_y = mplstereonet.rake(strikes, dips, rakes) # Calculate the direction the arrows should point # These are all normal faults, so the arrows point away from the center # Because we're plotting at the pole location, however, we need to flip this # from what we plotted with the "ball of string" plot. mag = np.hypot(rake_x, rake_y) u, v = -rake_x / mag, -rake_y / mag # Calculate the position of the poles pole_x, pole_y = mplstereonet.pole(strikes, dips) # Plot the arrows centered on the pole locations... arrows = ax.quiver(pole_x, pole_y, u, v, width=1, headwidth=4, units='dots', pivot='middle') return arrows
[ "def", "tangent_lineation_plot", "(", "ax", ",", "strikes", ",", "dips", ",", "rakes", ")", ":", "# Calculate the position of the rake of the lineations, but don't plot yet", "rake_x", ",", "rake_y", "=", "mplstereonet", ".", "rake", "(", "strikes", ",", "dips", ",", ...
Makes a tangent lineation plot for normal faults with the given strikes, dips, and rakes.
[ "Makes", "a", "tangent", "lineation", "plot", "for", "normal", "faults", "with", "the", "given", "strikes", "dips", "and", "rakes", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/fault_slip_plot.py#L54-L73
train
54,037
joferkington/mplstereonet
examples/parse_angelier_data.py
load
def load(): """Read data from a text file on disk.""" # Get the data file relative to this file's location... datadir = os.path.dirname(__file__) filename = os.path.join(datadir, 'angelier_data.txt') data = [] with open(filename, 'r') as infile: for line in infile: # Skip comments if line.startswith('#'): continue # First column: strike, second: dip, third: rake. strike, dip, rake = line.strip().split() if rake[-1].isalpha(): # If there's a directional letter on the rake column, parse it # normally. strike, dip, rake = mplstereonet.parse_rake(strike, dip, rake) else: # Otherwise, it's actually an azimuthal measurement of the # slickenslide directions, so we need to convert it to a rake. strike, dip = mplstereonet.parse_strike_dip(strike, dip) azimuth = float(rake) rake = mplstereonet.azimuth2rake(strike, dip, azimuth) data.append([strike, dip, rake]) # Separate the columns back out strike, dip, rake = zip(*data) return strike, dip, rake
python
def load(): """Read data from a text file on disk.""" # Get the data file relative to this file's location... datadir = os.path.dirname(__file__) filename = os.path.join(datadir, 'angelier_data.txt') data = [] with open(filename, 'r') as infile: for line in infile: # Skip comments if line.startswith('#'): continue # First column: strike, second: dip, third: rake. strike, dip, rake = line.strip().split() if rake[-1].isalpha(): # If there's a directional letter on the rake column, parse it # normally. strike, dip, rake = mplstereonet.parse_rake(strike, dip, rake) else: # Otherwise, it's actually an azimuthal measurement of the # slickenslide directions, so we need to convert it to a rake. strike, dip = mplstereonet.parse_strike_dip(strike, dip) azimuth = float(rake) rake = mplstereonet.azimuth2rake(strike, dip, azimuth) data.append([strike, dip, rake]) # Separate the columns back out strike, dip, rake = zip(*data) return strike, dip, rake
[ "def", "load", "(", ")", ":", "# Get the data file relative to this file's location...", "datadir", "=", "os", ".", "path", ".", "dirname", "(", "__file__", ")", "filename", "=", "os", ".", "path", ".", "join", "(", "datadir", ",", "'angelier_data.txt'", ")", ...
Read data from a text file on disk.
[ "Read", "data", "from", "a", "text", "file", "on", "disk", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/parse_angelier_data.py#L32-L63
train
54,038
joferkington/mplstereonet
mplstereonet/contouring.py
_exponential_kamb
def _exponential_kamb(cos_dist, sigma=3): """Kernel function from Vollmer for exponential smoothing.""" n = float(cos_dist.size) f = 2 * (1.0 + n / sigma**2) count = np.exp(f * (cos_dist - 1)) units = np.sqrt(n * (f/2.0 - 1) / f**2) return count, units
python
def _exponential_kamb(cos_dist, sigma=3): """Kernel function from Vollmer for exponential smoothing.""" n = float(cos_dist.size) f = 2 * (1.0 + n / sigma**2) count = np.exp(f * (cos_dist - 1)) units = np.sqrt(n * (f/2.0 - 1) / f**2) return count, units
[ "def", "_exponential_kamb", "(", "cos_dist", ",", "sigma", "=", "3", ")", ":", "n", "=", "float", "(", "cos_dist", ".", "size", ")", "f", "=", "2", "*", "(", "1.0", "+", "n", "/", "sigma", "**", "2", ")", "count", "=", "np", ".", "exp", "(", ...
Kernel function from Vollmer for exponential smoothing.
[ "Kernel", "function", "from", "Vollmer", "for", "exponential", "smoothing", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/contouring.py#L183-L189
train
54,039
joferkington/mplstereonet
mplstereonet/contouring.py
_linear_inverse_kamb
def _linear_inverse_kamb(cos_dist, sigma=3): """Kernel function from Vollmer for linear smoothing.""" n = float(cos_dist.size) radius = _kamb_radius(n, sigma) f = 2 / (1 - radius) cos_dist = cos_dist[cos_dist >= radius] count = (f * (cos_dist - radius)) return count, _kamb_units(n, radius)
python
def _linear_inverse_kamb(cos_dist, sigma=3): """Kernel function from Vollmer for linear smoothing.""" n = float(cos_dist.size) radius = _kamb_radius(n, sigma) f = 2 / (1 - radius) cos_dist = cos_dist[cos_dist >= radius] count = (f * (cos_dist - radius)) return count, _kamb_units(n, radius)
[ "def", "_linear_inverse_kamb", "(", "cos_dist", ",", "sigma", "=", "3", ")", ":", "n", "=", "float", "(", "cos_dist", ".", "size", ")", "radius", "=", "_kamb_radius", "(", "n", ",", "sigma", ")", "f", "=", "2", "/", "(", "1", "-", "radius", ")", ...
Kernel function from Vollmer for linear smoothing.
[ "Kernel", "function", "from", "Vollmer", "for", "linear", "smoothing", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/contouring.py#L191-L198
train
54,040
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes._get_core_transform
def _get_core_transform(self, resolution): """The projection for the stereonet as a matplotlib transform. This is primarily called by LambertAxes._set_lim_and_transforms.""" return self._base_transform(self._center_longitude, self._center_latitude, resolution)
python
def _get_core_transform(self, resolution): """The projection for the stereonet as a matplotlib transform. This is primarily called by LambertAxes._set_lim_and_transforms.""" return self._base_transform(self._center_longitude, self._center_latitude, resolution)
[ "def", "_get_core_transform", "(", "self", ",", "resolution", ")", ":", "return", "self", ".", "_base_transform", "(", "self", ".", "_center_longitude", ",", "self", ".", "_center_latitude", ",", "resolution", ")" ]
The projection for the stereonet as a matplotlib transform. This is primarily called by LambertAxes._set_lim_and_transforms.
[ "The", "projection", "for", "the", "stereonet", "as", "a", "matplotlib", "transform", ".", "This", "is", "primarily", "called", "by", "LambertAxes", ".", "_set_lim_and_transforms", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L55-L60
train
54,041
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes._get_affine_transform
def _get_affine_transform(self): """The affine portion of the base transform. This is called by LambertAxes._set_lim_and_transforms.""" # How big is the projected globe? # In the case of a stereonet, it's actually constant. xscale = yscale = self._scale # Create an affine transform to stretch the projection from 0-1 return Affine2D() \ .rotate(np.radians(self.rotation)) \ .scale(0.5 / xscale, 0.5 / yscale) \ .translate(0.5, 0.5)
python
def _get_affine_transform(self): """The affine portion of the base transform. This is called by LambertAxes._set_lim_and_transforms.""" # How big is the projected globe? # In the case of a stereonet, it's actually constant. xscale = yscale = self._scale # Create an affine transform to stretch the projection from 0-1 return Affine2D() \ .rotate(np.radians(self.rotation)) \ .scale(0.5 / xscale, 0.5 / yscale) \ .translate(0.5, 0.5)
[ "def", "_get_affine_transform", "(", "self", ")", ":", "# How big is the projected globe?", "# In the case of a stereonet, it's actually constant.", "xscale", "=", "yscale", "=", "self", ".", "_scale", "# Create an affine transform to stretch the projection from 0-1", "return", "Af...
The affine portion of the base transform. This is called by LambertAxes._set_lim_and_transforms.
[ "The", "affine", "portion", "of", "the", "base", "transform", ".", "This", "is", "called", "by", "LambertAxes", ".", "_set_lim_and_transforms", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L62-L72
train
54,042
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes._set_lim_and_transforms
def _set_lim_and_transforms(self): """Setup the key transforms for the axes.""" # Most of the transforms are set up correctly by LambertAxes LambertAxes._set_lim_and_transforms(self) # Transform for latitude ticks. These are typically unused, but just # in case we need them... yaxis_stretch = Affine2D().scale(4 * self.horizon, 1.0) yaxis_stretch = yaxis_stretch.translate(-self.horizon, 0.0) # These are identical to LambertAxes._set_lim_and_transforms, but we # need to update things to reflect the new "yaxis_stretch" yaxis_space = Affine2D().scale(1.0, 1.1) self._yaxis_transform = \ yaxis_stretch + \ self.transData yaxis_text_base = \ yaxis_stretch + \ self.transProjection + \ (yaxis_space + \ self.transAffine + \ self.transAxes) self._yaxis_text1_transform = \ yaxis_text_base + \ Affine2D().translate(-8.0, 0.0) self._yaxis_text2_transform = \ yaxis_text_base + \ Affine2D().translate(8.0, 0.0)
python
def _set_lim_and_transforms(self): """Setup the key transforms for the axes.""" # Most of the transforms are set up correctly by LambertAxes LambertAxes._set_lim_and_transforms(self) # Transform for latitude ticks. These are typically unused, but just # in case we need them... yaxis_stretch = Affine2D().scale(4 * self.horizon, 1.0) yaxis_stretch = yaxis_stretch.translate(-self.horizon, 0.0) # These are identical to LambertAxes._set_lim_and_transforms, but we # need to update things to reflect the new "yaxis_stretch" yaxis_space = Affine2D().scale(1.0, 1.1) self._yaxis_transform = \ yaxis_stretch + \ self.transData yaxis_text_base = \ yaxis_stretch + \ self.transProjection + \ (yaxis_space + \ self.transAffine + \ self.transAxes) self._yaxis_text1_transform = \ yaxis_text_base + \ Affine2D().translate(-8.0, 0.0) self._yaxis_text2_transform = \ yaxis_text_base + \ Affine2D().translate(8.0, 0.0)
[ "def", "_set_lim_and_transforms", "(", "self", ")", ":", "# Most of the transforms are set up correctly by LambertAxes", "LambertAxes", ".", "_set_lim_and_transforms", "(", "self", ")", "# Transform for latitude ticks. These are typically unused, but just", "# in case we need them...", ...
Setup the key transforms for the axes.
[ "Setup", "the", "key", "transforms", "for", "the", "axes", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L74-L101
train
54,043
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.set_longitude_grid
def set_longitude_grid(self, degrees): """ Set the number of degrees between each longitude grid. """ number = (360.0 / degrees) + 1 locs = np.linspace(-np.pi, np.pi, number, True)[1:] locs[-1] -= 0.01 # Workaround for "back" gridlines showing. self.xaxis.set_major_locator(FixedLocator(locs)) self._logitude_degrees = degrees self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
python
def set_longitude_grid(self, degrees): """ Set the number of degrees between each longitude grid. """ number = (360.0 / degrees) + 1 locs = np.linspace(-np.pi, np.pi, number, True)[1:] locs[-1] -= 0.01 # Workaround for "back" gridlines showing. self.xaxis.set_major_locator(FixedLocator(locs)) self._logitude_degrees = degrees self.xaxis.set_major_formatter(self.ThetaFormatter(degrees))
[ "def", "set_longitude_grid", "(", "self", ",", "degrees", ")", ":", "number", "=", "(", "360.0", "/", "degrees", ")", "+", "1", "locs", "=", "np", ".", "linspace", "(", "-", "np", ".", "pi", ",", "np", ".", "pi", ",", "number", ",", "True", ")", ...
Set the number of degrees between each longitude grid.
[ "Set", "the", "number", "of", "degrees", "between", "each", "longitude", "grid", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L103-L112
train
54,044
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.set_rotation
def set_rotation(self, rotation): """Set the rotation of the stereonet in degrees clockwise from North.""" self._rotation = np.radians(rotation) self._polar.set_theta_offset(self._rotation + np.pi / 2.0) self.transData.invalidate() self.transAxes.invalidate() self._set_lim_and_transforms()
python
def set_rotation(self, rotation): """Set the rotation of the stereonet in degrees clockwise from North.""" self._rotation = np.radians(rotation) self._polar.set_theta_offset(self._rotation + np.pi / 2.0) self.transData.invalidate() self.transAxes.invalidate() self._set_lim_and_transforms()
[ "def", "set_rotation", "(", "self", ",", "rotation", ")", ":", "self", ".", "_rotation", "=", "np", ".", "radians", "(", "rotation", ")", "self", ".", "_polar", ".", "set_theta_offset", "(", "self", ".", "_rotation", "+", "np", ".", "pi", "/", "2.0", ...
Set the rotation of the stereonet in degrees clockwise from North.
[ "Set", "the", "rotation", "of", "the", "stereonet", "in", "degrees", "clockwise", "from", "North", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L124-L130
train
54,045
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.format_coord
def format_coord(self, x, y): """Format displayed coordinates during mouseover of axes.""" p, b = stereonet_math.geographic2plunge_bearing(x, y) s, d = stereonet_math.geographic2pole(x, y) pb = u'P/B={:0.0f}\u00b0/{:03.0f}\u00b0'.format(p[0], b[0]) sd = u'S/D={:03.0f}\u00b0/{:0.0f}\u00b0'.format(s[0], d[0]) return u'{}, {}'.format(pb, sd)
python
def format_coord(self, x, y): """Format displayed coordinates during mouseover of axes.""" p, b = stereonet_math.geographic2plunge_bearing(x, y) s, d = stereonet_math.geographic2pole(x, y) pb = u'P/B={:0.0f}\u00b0/{:03.0f}\u00b0'.format(p[0], b[0]) sd = u'S/D={:03.0f}\u00b0/{:0.0f}\u00b0'.format(s[0], d[0]) return u'{}, {}'.format(pb, sd)
[ "def", "format_coord", "(", "self", ",", "x", ",", "y", ")", ":", "p", ",", "b", "=", "stereonet_math", ".", "geographic2plunge_bearing", "(", "x", ",", "y", ")", "s", ",", "d", "=", "stereonet_math", ".", "geographic2pole", "(", "x", ",", "y", ")", ...
Format displayed coordinates during mouseover of axes.
[ "Format", "displayed", "coordinates", "during", "mouseover", "of", "axes", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L174-L180
train
54,046
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.grid
def grid(self, b=None, which='major', axis='both', kind='arbitrary', center=None, **kwargs): """ Usage is identical to a normal axes grid except for the ``kind`` and ``center`` kwargs. ``kind="polar"`` will add a polar overlay. The ``center`` and ``kind`` arguments allow you to add a grid from a differently-centered stereonet. This is useful for making "polar stereonets" that still use the same coordinate system as a standard stereonet. (i.e. a plane/line/whatever will have the same representation on both, but the grid is displayed differently.) To display a polar grid on a stereonet, use ``kind="polar"``. It is also often useful to display a grid relative to an arbitrary measurement (e.g. a lineation axis). In that case, use the ``lon_center`` and ``lat_center`` arguments. Note that these are in radians in "stereonet coordinates". Therefore, you'll often want to use one of the functions in ``stereonet_math`` to convert a line/plane/rake into the longitude and latitude you'd input here. For example: ``add_overlay(center=stereonet_math.line(plunge, bearing))``. If no parameters are specified, this is equivalent to turning on the standard grid. """ grid_on = self._gridOn Axes.grid(self, False) if kind == 'polar': center = 0, 0 if self._overlay_axes is not None: self._overlay_axes.remove() self._overlay_axes = None if not b and b is not None: return if b is None: if grid_on: return if center is None or np.allclose(center, (np.pi/2, 0)): return Axes.grid(self, b, which, axis, **kwargs) self._add_overlay(center) self._overlay_axes.grid(True, which, axis, **kwargs) self._gridOn = True
python
def grid(self, b=None, which='major', axis='both', kind='arbitrary', center=None, **kwargs): """ Usage is identical to a normal axes grid except for the ``kind`` and ``center`` kwargs. ``kind="polar"`` will add a polar overlay. The ``center`` and ``kind`` arguments allow you to add a grid from a differently-centered stereonet. This is useful for making "polar stereonets" that still use the same coordinate system as a standard stereonet. (i.e. a plane/line/whatever will have the same representation on both, but the grid is displayed differently.) To display a polar grid on a stereonet, use ``kind="polar"``. It is also often useful to display a grid relative to an arbitrary measurement (e.g. a lineation axis). In that case, use the ``lon_center`` and ``lat_center`` arguments. Note that these are in radians in "stereonet coordinates". Therefore, you'll often want to use one of the functions in ``stereonet_math`` to convert a line/plane/rake into the longitude and latitude you'd input here. For example: ``add_overlay(center=stereonet_math.line(plunge, bearing))``. If no parameters are specified, this is equivalent to turning on the standard grid. """ grid_on = self._gridOn Axes.grid(self, False) if kind == 'polar': center = 0, 0 if self._overlay_axes is not None: self._overlay_axes.remove() self._overlay_axes = None if not b and b is not None: return if b is None: if grid_on: return if center is None or np.allclose(center, (np.pi/2, 0)): return Axes.grid(self, b, which, axis, **kwargs) self._add_overlay(center) self._overlay_axes.grid(True, which, axis, **kwargs) self._gridOn = True
[ "def", "grid", "(", "self", ",", "b", "=", "None", ",", "which", "=", "'major'", ",", "axis", "=", "'both'", ",", "kind", "=", "'arbitrary'", ",", "center", "=", "None", ",", "*", "*", "kwargs", ")", ":", "grid_on", "=", "self", ".", "_gridOn", "...
Usage is identical to a normal axes grid except for the ``kind`` and ``center`` kwargs. ``kind="polar"`` will add a polar overlay. The ``center`` and ``kind`` arguments allow you to add a grid from a differently-centered stereonet. This is useful for making "polar stereonets" that still use the same coordinate system as a standard stereonet. (i.e. a plane/line/whatever will have the same representation on both, but the grid is displayed differently.) To display a polar grid on a stereonet, use ``kind="polar"``. It is also often useful to display a grid relative to an arbitrary measurement (e.g. a lineation axis). In that case, use the ``lon_center`` and ``lat_center`` arguments. Note that these are in radians in "stereonet coordinates". Therefore, you'll often want to use one of the functions in ``stereonet_math`` to convert a line/plane/rake into the longitude and latitude you'd input here. For example: ``add_overlay(center=stereonet_math.line(plunge, bearing))``. If no parameters are specified, this is equivalent to turning on the standard grid.
[ "Usage", "is", "identical", "to", "a", "normal", "axes", "grid", "except", "for", "the", "kind", "and", "center", "kwargs", ".", "kind", "=", "polar", "will", "add", "a", "polar", "overlay", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L182-L229
train
54,047
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes._polar
def _polar(self): """The "hidden" polar axis used for azimuth labels.""" # This will be called inside LambertAxes.__init__ as well as every # time the axis is cleared, so we need the try/except to avoid having # multiple hidden axes when `cla` is _manually_ called. try: return self._hidden_polar_axes except AttributeError: fig = self.get_figure() self._hidden_polar_axes = fig.add_axes(self.get_position(True), frameon=False, projection='polar') self._hidden_polar_axes.format_coord = self._polar_format_coord return self._hidden_polar_axes
python
def _polar(self): """The "hidden" polar axis used for azimuth labels.""" # This will be called inside LambertAxes.__init__ as well as every # time the axis is cleared, so we need the try/except to avoid having # multiple hidden axes when `cla` is _manually_ called. try: return self._hidden_polar_axes except AttributeError: fig = self.get_figure() self._hidden_polar_axes = fig.add_axes(self.get_position(True), frameon=False, projection='polar') self._hidden_polar_axes.format_coord = self._polar_format_coord return self._hidden_polar_axes
[ "def", "_polar", "(", "self", ")", ":", "# This will be called inside LambertAxes.__init__ as well as every", "# time the axis is cleared, so we need the try/except to avoid having", "# multiple hidden axes when `cla` is _manually_ called.", "try", ":", "return", "self", ".", "_hidden_po...
The "hidden" polar axis used for azimuth labels.
[ "The", "hidden", "polar", "axis", "used", "for", "azimuth", "labels", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L282-L294
train
54,048
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.set_azimuth_ticklabels
def set_azimuth_ticklabels(self, labels, fontdict=None, **kwargs): """ Sets the labels for the azimuthal ticks. Parameters ---------- labels : A sequence of strings Azimuth tick labels **kwargs Additional parameters are text properties for the labels. """ return self._polar.set_xticklabels(labels, fontdict, **kwargs)
python
def set_azimuth_ticklabels(self, labels, fontdict=None, **kwargs): """ Sets the labels for the azimuthal ticks. Parameters ---------- labels : A sequence of strings Azimuth tick labels **kwargs Additional parameters are text properties for the labels. """ return self._polar.set_xticklabels(labels, fontdict, **kwargs)
[ "def", "set_azimuth_ticklabels", "(", "self", ",", "labels", ",", "fontdict", "=", "None", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_polar", ".", "set_xticklabels", "(", "labels", ",", "fontdict", ",", "*", "*", "kwargs", ")" ]
Sets the labels for the azimuthal ticks. Parameters ---------- labels : A sequence of strings Azimuth tick labels **kwargs Additional parameters are text properties for the labels.
[ "Sets", "the", "labels", "for", "the", "azimuthal", "ticks", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L330-L341
train
54,049
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.plane
def plane(self, strike, dip, *args, **kwargs): """ Plot lines representing planes on the axes. Additional arguments and keyword arguments are passed on to `ax.plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". segments : int, optional The number of vertices to use for the line. Defaults to 100. **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the lines specified by `strike` and `dip`. """ segments = kwargs.pop('segments', 100) center = self._center_latitude, self._center_longitude lon, lat = stereonet_math.plane(strike, dip, segments, center) return self.plot(lon, lat, *args, **kwargs)
python
def plane(self, strike, dip, *args, **kwargs): """ Plot lines representing planes on the axes. Additional arguments and keyword arguments are passed on to `ax.plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". segments : int, optional The number of vertices to use for the line. Defaults to 100. **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the lines specified by `strike` and `dip`. """ segments = kwargs.pop('segments', 100) center = self._center_latitude, self._center_longitude lon, lat = stereonet_math.plane(strike, dip, segments, center) return self.plot(lon, lat, *args, **kwargs)
[ "def", "plane", "(", "self", ",", "strike", ",", "dip", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "segments", "=", "kwargs", ".", "pop", "(", "'segments'", ",", "100", ")", "center", "=", "self", ".", "_center_latitude", ",", "self", "."...
Plot lines representing planes on the axes. Additional arguments and keyword arguments are passed on to `ax.plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". segments : int, optional The number of vertices to use for the line. Defaults to 100. **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the lines specified by `strike` and `dip`.
[ "Plot", "lines", "representing", "planes", "on", "the", "axes", ".", "Additional", "arguments", "and", "keyword", "arguments", "are", "passed", "on", "to", "ax", ".", "plot", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L403-L426
train
54,050
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.pole
def pole(self, strike, dip, *args, **kwargs): """ Plot points representing poles to planes on the axes. Additional arguments and keyword arguments are passed on to `ax.plot`. Parameters ---------- strike, dip : numbers or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`. """ lon, lat = stereonet_math.pole(strike, dip) args, kwargs = self._point_plot_defaults(args, kwargs) return self.plot(lon, lat, *args, **kwargs)
python
def pole(self, strike, dip, *args, **kwargs): """ Plot points representing poles to planes on the axes. Additional arguments and keyword arguments are passed on to `ax.plot`. Parameters ---------- strike, dip : numbers or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`. """ lon, lat = stereonet_math.pole(strike, dip) args, kwargs = self._point_plot_defaults(args, kwargs) return self.plot(lon, lat, *args, **kwargs)
[ "def", "pole", "(", "self", ",", "strike", ",", "dip", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lon", ",", "lat", "=", "stereonet_math", ".", "pole", "(", "strike", ",", "dip", ")", "args", ",", "kwargs", "=", "self", ".", "_point_pl...
Plot points representing poles to planes on the axes. Additional arguments and keyword arguments are passed on to `ax.plot`. Parameters ---------- strike, dip : numbers or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`.
[ "Plot", "points", "representing", "poles", "to", "planes", "on", "the", "axes", ".", "Additional", "arguments", "and", "keyword", "arguments", "are", "passed", "on", "to", "ax", ".", "plot", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L428-L448
train
54,051
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.rake
def rake(self, strike, dip, rake_angle, *args, **kwargs): """ Plot points representing lineations along planes on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". rake_angle : number or sequences of numbers The angle of the lineation(s) on the plane(s) measured in degrees downward from horizontal. Zero degrees corresponds to the "right hand" direction indicated by the strike, while negative angles are measured downward from the opposite strike direction. **kwargs Additional arguments are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`. """ lon, lat = stereonet_math.rake(strike, dip, rake_angle) args, kwargs = self._point_plot_defaults(args, kwargs) return self.plot(lon, lat, *args, **kwargs)
python
def rake(self, strike, dip, rake_angle, *args, **kwargs): """ Plot points representing lineations along planes on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". rake_angle : number or sequences of numbers The angle of the lineation(s) on the plane(s) measured in degrees downward from horizontal. Zero degrees corresponds to the "right hand" direction indicated by the strike, while negative angles are measured downward from the opposite strike direction. **kwargs Additional arguments are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`. """ lon, lat = stereonet_math.rake(strike, dip, rake_angle) args, kwargs = self._point_plot_defaults(args, kwargs) return self.plot(lon, lat, *args, **kwargs)
[ "def", "rake", "(", "self", ",", "strike", ",", "dip", ",", "rake_angle", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lon", ",", "lat", "=", "stereonet_math", ".", "rake", "(", "strike", ",", "dip", ",", "rake_angle", ")", "args", ",", ...
Plot points representing lineations along planes on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- strike, dip : number or sequences of numbers The strike and dip of the plane(s) in degrees. The dip direction is defined by the strike following the "right-hand rule". rake_angle : number or sequences of numbers The angle of the lineation(s) on the plane(s) measured in degrees downward from horizontal. Zero degrees corresponds to the "right hand" direction indicated by the strike, while negative angles are measured downward from the opposite strike direction. **kwargs Additional arguments are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`.
[ "Plot", "points", "representing", "lineations", "along", "planes", "on", "the", "axes", ".", "Additional", "arguments", "and", "keyword", "arguments", "are", "passed", "on", "to", "plot", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L450-L475
train
54,052
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes.line
def line(self, plunge, bearing, *args, **kwargs): """ Plot points representing linear features on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- plunge, bearing : number or sequence of numbers The plunge and bearing of the line(s) in degrees. The plunge is measured in degrees downward from the end of the feature specified by the bearing. **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`. """ lon, lat = stereonet_math.line(plunge, bearing) args, kwargs = self._point_plot_defaults(args, kwargs) return self.plot([lon], [lat], *args, **kwargs)
python
def line(self, plunge, bearing, *args, **kwargs): """ Plot points representing linear features on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- plunge, bearing : number or sequence of numbers The plunge and bearing of the line(s) in degrees. The plunge is measured in degrees downward from the end of the feature specified by the bearing. **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`. """ lon, lat = stereonet_math.line(plunge, bearing) args, kwargs = self._point_plot_defaults(args, kwargs) return self.plot([lon], [lat], *args, **kwargs)
[ "def", "line", "(", "self", ",", "plunge", ",", "bearing", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lon", ",", "lat", "=", "stereonet_math", ".", "line", "(", "plunge", ",", "bearing", ")", "args", ",", "kwargs", "=", "self", ".", "_...
Plot points representing linear features on the axes. Additional arguments and keyword arguments are passed on to `plot`. Parameters ---------- plunge, bearing : number or sequence of numbers The plunge and bearing of the line(s) in degrees. The plunge is measured in degrees downward from the end of the feature specified by the bearing. **kwargs Additional parameters are passed on to `plot`. Returns ------- A sequence of Line2D artists representing the point(s) specified by `strike` and `dip`.
[ "Plot", "points", "representing", "linear", "features", "on", "the", "axes", ".", "Additional", "arguments", "and", "keyword", "arguments", "are", "passed", "on", "to", "plot", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L477-L498
train
54,053
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes._point_plot_defaults
def _point_plot_defaults(self, args, kwargs): """To avoid confusion for new users, this ensures that "scattered" points are plotted by by `plot` instead of points joined by a line. Parameters ---------- args : tuple Arguments representing additional parameters to be passed to `self.plot`. kwargs : dict Keyword arguments representing additional parameters to be passed to `self.plot`. Returns ------- Modified versions of `args` and `kwargs`. """ if args: return args, kwargs if 'ls' not in kwargs and 'linestyle' not in kwargs: kwargs['linestyle'] = 'none' if 'marker' not in kwargs: kwargs['marker'] = 'o' return args, kwargs
python
def _point_plot_defaults(self, args, kwargs): """To avoid confusion for new users, this ensures that "scattered" points are plotted by by `plot` instead of points joined by a line. Parameters ---------- args : tuple Arguments representing additional parameters to be passed to `self.plot`. kwargs : dict Keyword arguments representing additional parameters to be passed to `self.plot`. Returns ------- Modified versions of `args` and `kwargs`. """ if args: return args, kwargs if 'ls' not in kwargs and 'linestyle' not in kwargs: kwargs['linestyle'] = 'none' if 'marker' not in kwargs: kwargs['marker'] = 'o' return args, kwargs
[ "def", "_point_plot_defaults", "(", "self", ",", "args", ",", "kwargs", ")", ":", "if", "args", ":", "return", "args", ",", "kwargs", "if", "'ls'", "not", "in", "kwargs", "and", "'linestyle'", "not", "in", "kwargs", ":", "kwargs", "[", "'linestyle'", "]"...
To avoid confusion for new users, this ensures that "scattered" points are plotted by by `plot` instead of points joined by a line. Parameters ---------- args : tuple Arguments representing additional parameters to be passed to `self.plot`. kwargs : dict Keyword arguments representing additional parameters to be passed to `self.plot`. Returns ------- Modified versions of `args` and `kwargs`.
[ "To", "avoid", "confusion", "for", "new", "users", "this", "ensures", "that", "scattered", "points", "are", "plotted", "by", "by", "plot", "instead", "of", "points", "joined", "by", "a", "line", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L500-L524
train
54,054
joferkington/mplstereonet
mplstereonet/stereonet_axes.py
StereonetAxes._contour_helper
def _contour_helper(self, args, kwargs): """Unify defaults and common functionality of ``density_contour`` and ``density_contourf``.""" contour_kwargs = {} contour_kwargs['measurement'] = kwargs.pop('measurement', 'poles') contour_kwargs['method'] = kwargs.pop('method', 'exponential_kamb') contour_kwargs['sigma'] = kwargs.pop('sigma', 3) contour_kwargs['gridsize'] = kwargs.pop('gridsize', 100) contour_kwargs['weights'] = kwargs.pop('weights', None) lon, lat, totals = contouring.density_grid(*args, **contour_kwargs) return lon, lat, totals, kwargs
python
def _contour_helper(self, args, kwargs): """Unify defaults and common functionality of ``density_contour`` and ``density_contourf``.""" contour_kwargs = {} contour_kwargs['measurement'] = kwargs.pop('measurement', 'poles') contour_kwargs['method'] = kwargs.pop('method', 'exponential_kamb') contour_kwargs['sigma'] = kwargs.pop('sigma', 3) contour_kwargs['gridsize'] = kwargs.pop('gridsize', 100) contour_kwargs['weights'] = kwargs.pop('weights', None) lon, lat, totals = contouring.density_grid(*args, **contour_kwargs) return lon, lat, totals, kwargs
[ "def", "_contour_helper", "(", "self", ",", "args", ",", "kwargs", ")", ":", "contour_kwargs", "=", "{", "}", "contour_kwargs", "[", "'measurement'", "]", "=", "kwargs", ".", "pop", "(", "'measurement'", ",", "'poles'", ")", "contour_kwargs", "[", "'method'"...
Unify defaults and common functionality of ``density_contour`` and ``density_contourf``.
[ "Unify", "defaults", "and", "common", "functionality", "of", "density_contour", "and", "density_contourf", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_axes.py#L526-L536
train
54,055
joferkington/mplstereonet
examples/polar_overlay.py
basic
def basic(): """Set up a basic stereonet and plot the same data each time.""" fig, ax = mplstereonet.subplots() strike, dip = 315, 30 ax.plane(strike, dip, color='lightblue') ax.pole(strike, dip, color='green', markersize=15) ax.rake(strike, dip, 40, marker='*', markersize=20, color='green') # Make a bit of room for the title... fig.subplots_adjust(top=0.8) return ax
python
def basic(): """Set up a basic stereonet and plot the same data each time.""" fig, ax = mplstereonet.subplots() strike, dip = 315, 30 ax.plane(strike, dip, color='lightblue') ax.pole(strike, dip, color='green', markersize=15) ax.rake(strike, dip, 40, marker='*', markersize=20, color='green') # Make a bit of room for the title... fig.subplots_adjust(top=0.8) return ax
[ "def", "basic", "(", ")", ":", "fig", ",", "ax", "=", "mplstereonet", ".", "subplots", "(", ")", "strike", ",", "dip", "=", "315", ",", "30", "ax", ".", "plane", "(", "strike", ",", "dip", ",", "color", "=", "'lightblue'", ")", "ax", ".", "pole",...
Set up a basic stereonet and plot the same data each time.
[ "Set", "up", "a", "basic", "stereonet", "and", "plot", "the", "same", "data", "each", "time", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/polar_overlay.py#L23-L35
train
54,056
joferkington/mplstereonet
examples/stereonet_explanation.py
setup_figure
def setup_figure(): """Setup the figure and axes""" fig, axes = mplstereonet.subplots(ncols=2, figsize=(20,10)) for ax in axes: # Make the grid lines solid. ax.grid(ls='-') # Make the longitude grids continue all the way to the poles ax.set_longitude_grid_ends(90) return fig, axes
python
def setup_figure(): """Setup the figure and axes""" fig, axes = mplstereonet.subplots(ncols=2, figsize=(20,10)) for ax in axes: # Make the grid lines solid. ax.grid(ls='-') # Make the longitude grids continue all the way to the poles ax.set_longitude_grid_ends(90) return fig, axes
[ "def", "setup_figure", "(", ")", ":", "fig", ",", "axes", "=", "mplstereonet", ".", "subplots", "(", "ncols", "=", "2", ",", "figsize", "=", "(", "20", ",", "10", ")", ")", "for", "ax", "in", "axes", ":", "# Make the grid lines solid.", "ax", ".", "g...
Setup the figure and axes
[ "Setup", "the", "figure", "and", "axes" ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/stereonet_explanation.py#L17-L25
train
54,057
joferkington/mplstereonet
examples/stereonet_explanation.py
stereonet_projection_explanation
def stereonet_projection_explanation(ax): """Example to explain azimuth and dip on a lower-hemisphere stereonet.""" ax.set_title('Dip and Azimuth', y=1.1, size=18) # Set the azimuth ticks to be just "N", "E", etc. ax.set_azimuth_ticks(range(0, 360, 10)) # Hackishly set some of the azimuth labels to North, East, etc... fmt = ax.yaxis.get_major_formatter() labels = [fmt(item) for item in ax.get_azimuth_ticks()] labels[0] = 'North' labels[9] = 'East' labels[18] = 'South' labels[27] = 'West' ax.set_azimuth_ticklabels(labels) # Unhide the xticklabels and use them for dip labels ax.xaxis.set_tick_params(label1On=True) labels = list(range(10, 100, 10)) + list(range(80, 0, -10)) ax.set_xticks(np.radians(np.arange(-80, 90, 10))) ax.set_xticklabels([fmt(np.radians(item)) for item in labels]) ax.set_xlabel('Dip or Plunge') xlabel_halo(ax) return ax
python
def stereonet_projection_explanation(ax): """Example to explain azimuth and dip on a lower-hemisphere stereonet.""" ax.set_title('Dip and Azimuth', y=1.1, size=18) # Set the azimuth ticks to be just "N", "E", etc. ax.set_azimuth_ticks(range(0, 360, 10)) # Hackishly set some of the azimuth labels to North, East, etc... fmt = ax.yaxis.get_major_formatter() labels = [fmt(item) for item in ax.get_azimuth_ticks()] labels[0] = 'North' labels[9] = 'East' labels[18] = 'South' labels[27] = 'West' ax.set_azimuth_ticklabels(labels) # Unhide the xticklabels and use them for dip labels ax.xaxis.set_tick_params(label1On=True) labels = list(range(10, 100, 10)) + list(range(80, 0, -10)) ax.set_xticks(np.radians(np.arange(-80, 90, 10))) ax.set_xticklabels([fmt(np.radians(item)) for item in labels]) ax.set_xlabel('Dip or Plunge') xlabel_halo(ax) return ax
[ "def", "stereonet_projection_explanation", "(", "ax", ")", ":", "ax", ".", "set_title", "(", "'Dip and Azimuth'", ",", "y", "=", "1.1", ",", "size", "=", "18", ")", "# Set the azimuth ticks to be just \"N\", \"E\", etc.", "ax", ".", "set_azimuth_ticks", "(", "range"...
Example to explain azimuth and dip on a lower-hemisphere stereonet.
[ "Example", "to", "explain", "azimuth", "and", "dip", "on", "a", "lower", "-", "hemisphere", "stereonet", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/stereonet_explanation.py#L27-L52
train
54,058
joferkington/mplstereonet
examples/stereonet_explanation.py
native_projection_explanation
def native_projection_explanation(ax): """Example showing how the "native" longitude and latitude relate to the stereonet projection.""" ax.set_title('Longitude and Latitude', size=18, y=1.1) # Hide the azimuth labels ax.set_azimuth_ticklabels([]) # Make the axis tick labels visible: ax.set_xticks(np.radians(np.arange(-80, 90, 10))) ax.tick_params(label1On=True) ax.set_xlabel('Longitude') xlabel_halo(ax) return ax
python
def native_projection_explanation(ax): """Example showing how the "native" longitude and latitude relate to the stereonet projection.""" ax.set_title('Longitude and Latitude', size=18, y=1.1) # Hide the azimuth labels ax.set_azimuth_ticklabels([]) # Make the axis tick labels visible: ax.set_xticks(np.radians(np.arange(-80, 90, 10))) ax.tick_params(label1On=True) ax.set_xlabel('Longitude') xlabel_halo(ax) return ax
[ "def", "native_projection_explanation", "(", "ax", ")", ":", "ax", ".", "set_title", "(", "'Longitude and Latitude'", ",", "size", "=", "18", ",", "y", "=", "1.1", ")", "# Hide the azimuth labels", "ax", ".", "set_azimuth_ticklabels", "(", "[", "]", ")", "# Ma...
Example showing how the "native" longitude and latitude relate to the stereonet projection.
[ "Example", "showing", "how", "the", "native", "longitude", "and", "latitude", "relate", "to", "the", "stereonet", "projection", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/stereonet_explanation.py#L54-L69
train
54,059
joferkington/mplstereonet
examples/stereonet_explanation.py
xlabel_halo
def xlabel_halo(ax): """Add a white "halo" around the xlabels.""" import matplotlib.patheffects as effects for tick in ax.get_xticklabels() + [ax.xaxis.label]: tick.set_path_effects([effects.withStroke(linewidth=4, foreground='w')])
python
def xlabel_halo(ax): """Add a white "halo" around the xlabels.""" import matplotlib.patheffects as effects for tick in ax.get_xticklabels() + [ax.xaxis.label]: tick.set_path_effects([effects.withStroke(linewidth=4, foreground='w')])
[ "def", "xlabel_halo", "(", "ax", ")", ":", "import", "matplotlib", ".", "patheffects", "as", "effects", "for", "tick", "in", "ax", ".", "get_xticklabels", "(", ")", "+", "[", "ax", ".", "xaxis", ".", "label", "]", ":", "tick", ".", "set_path_effects", ...
Add a white "halo" around the xlabels.
[ "Add", "a", "white", "halo", "around", "the", "xlabels", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/examples/stereonet_explanation.py#L71-L75
train
54,060
joferkington/mplstereonet
mplstereonet/utilities.py
parse_strike_dip
def parse_strike_dip(strike, dip): """ Parses strings of strike and dip and returns strike and dip measurements following the right-hand-rule. Dip directions are parsed, and if the measurement does not follow the right-hand-rule, the opposite end of the strike measurement is returned. Accepts either quadrant-formatted or azimuth-formatted strikes. For example, this would convert a strike of "N30E" and a dip of "45NW" to a strike of 210 and a dip of 45. Parameters ---------- strike : string A strike measurement. May be in azimuth or quadrant format. dip : string The dip angle and direction of a plane. Returns ------- azi : float Azimuth in degrees of the strike of the plane with dip direction indicated following the right-hand-rule. dip : float Dip of the plane in degrees. """ strike = parse_azimuth(strike) dip, direction = split_trailing_letters(dip) if direction is not None: expected_direc = strike + 90 if opposite_end(expected_direc, direction): strike += 180 if strike > 360: strike -= 360 return strike, dip
python
def parse_strike_dip(strike, dip): """ Parses strings of strike and dip and returns strike and dip measurements following the right-hand-rule. Dip directions are parsed, and if the measurement does not follow the right-hand-rule, the opposite end of the strike measurement is returned. Accepts either quadrant-formatted or azimuth-formatted strikes. For example, this would convert a strike of "N30E" and a dip of "45NW" to a strike of 210 and a dip of 45. Parameters ---------- strike : string A strike measurement. May be in azimuth or quadrant format. dip : string The dip angle and direction of a plane. Returns ------- azi : float Azimuth in degrees of the strike of the plane with dip direction indicated following the right-hand-rule. dip : float Dip of the plane in degrees. """ strike = parse_azimuth(strike) dip, direction = split_trailing_letters(dip) if direction is not None: expected_direc = strike + 90 if opposite_end(expected_direc, direction): strike += 180 if strike > 360: strike -= 360 return strike, dip
[ "def", "parse_strike_dip", "(", "strike", ",", "dip", ")", ":", "strike", "=", "parse_azimuth", "(", "strike", ")", "dip", ",", "direction", "=", "split_trailing_letters", "(", "dip", ")", "if", "direction", "is", "not", "None", ":", "expected_direc", "=", ...
Parses strings of strike and dip and returns strike and dip measurements following the right-hand-rule. Dip directions are parsed, and if the measurement does not follow the right-hand-rule, the opposite end of the strike measurement is returned. Accepts either quadrant-formatted or azimuth-formatted strikes. For example, this would convert a strike of "N30E" and a dip of "45NW" to a strike of 210 and a dip of 45. Parameters ---------- strike : string A strike measurement. May be in azimuth or quadrant format. dip : string The dip angle and direction of a plane. Returns ------- azi : float Azimuth in degrees of the strike of the plane with dip direction indicated following the right-hand-rule. dip : float Dip of the plane in degrees.
[ "Parses", "strings", "of", "strike", "and", "dip", "and", "returns", "strike", "and", "dip", "measurements", "following", "the", "right", "-", "hand", "-", "rule", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/utilities.py#L5-L44
train
54,061
joferkington/mplstereonet
mplstereonet/utilities.py
parse_plunge_bearing
def parse_plunge_bearing(plunge, bearing): """ Parses strings of plunge and bearing and returns a consistent plunge and bearing measurement as floats. Plunge angles returned by this function will always be between 0 and 90. If no direction letter(s) is present, the plunge is assumed to be measured from the end specified by the bearing. If a direction letter(s) is present, the bearing will be switched to the opposite (180 degrees) end if the specified direction corresponds to the opposite end specified by the bearing. Parameters ---------- plunge : string A plunge measurement. bearing : string A bearing measurement. May be in azimuth or quadrant format. Returns ------- plunge, bearing: floats The plunge and bearing following the conventions outlined above. Examples --------- >>> parse_plunge_bearing("30NW", 160) ... (30, 340) """ bearing = parse_azimuth(bearing) plunge, direction = split_trailing_letters(plunge) if direction is not None: if opposite_end(bearing, direction): bearing +=180 if plunge < 0: bearing += 180 plunge = -plunge if plunge > 90: bearing += 180 plunge = 180 - plunge if bearing > 360: bearing -= 360 return plunge, bearing
python
def parse_plunge_bearing(plunge, bearing): """ Parses strings of plunge and bearing and returns a consistent plunge and bearing measurement as floats. Plunge angles returned by this function will always be between 0 and 90. If no direction letter(s) is present, the plunge is assumed to be measured from the end specified by the bearing. If a direction letter(s) is present, the bearing will be switched to the opposite (180 degrees) end if the specified direction corresponds to the opposite end specified by the bearing. Parameters ---------- plunge : string A plunge measurement. bearing : string A bearing measurement. May be in azimuth or quadrant format. Returns ------- plunge, bearing: floats The plunge and bearing following the conventions outlined above. Examples --------- >>> parse_plunge_bearing("30NW", 160) ... (30, 340) """ bearing = parse_azimuth(bearing) plunge, direction = split_trailing_letters(plunge) if direction is not None: if opposite_end(bearing, direction): bearing +=180 if plunge < 0: bearing += 180 plunge = -plunge if plunge > 90: bearing += 180 plunge = 180 - plunge if bearing > 360: bearing -= 360 return plunge, bearing
[ "def", "parse_plunge_bearing", "(", "plunge", ",", "bearing", ")", ":", "bearing", "=", "parse_azimuth", "(", "bearing", ")", "plunge", ",", "direction", "=", "split_trailing_letters", "(", "plunge", ")", "if", "direction", "is", "not", "None", ":", "if", "o...
Parses strings of plunge and bearing and returns a consistent plunge and bearing measurement as floats. Plunge angles returned by this function will always be between 0 and 90. If no direction letter(s) is present, the plunge is assumed to be measured from the end specified by the bearing. If a direction letter(s) is present, the bearing will be switched to the opposite (180 degrees) end if the specified direction corresponds to the opposite end specified by the bearing. Parameters ---------- plunge : string A plunge measurement. bearing : string A bearing measurement. May be in azimuth or quadrant format. Returns ------- plunge, bearing: floats The plunge and bearing following the conventions outlined above. Examples --------- >>> parse_plunge_bearing("30NW", 160) ... (30, 340)
[ "Parses", "strings", "of", "plunge", "and", "bearing", "and", "returns", "a", "consistent", "plunge", "and", "bearing", "measurement", "as", "floats", ".", "Plunge", "angles", "returned", "by", "this", "function", "will", "always", "be", "between", "0", "and",...
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/utilities.py#L93-L141
train
54,062
joferkington/mplstereonet
mplstereonet/utilities.py
dip_direction2strike
def dip_direction2strike(azimuth): """ Converts a planar measurment of dip direction using the dip-azimuth convention into a strike using the right-hand-rule. Parameters ---------- azimuth : number or string The dip direction of the plane in degrees. This can be either a numerical azimuth in the 0-360 range or a string representing a quadrant measurement (e.g. N30W). Returns ------- strike : number The strike of the plane in degrees following the right-hand-rule. """ azimuth = parse_azimuth(azimuth) strike = azimuth - 90 if strike < 0: strike += 360 return strike
python
def dip_direction2strike(azimuth): """ Converts a planar measurment of dip direction using the dip-azimuth convention into a strike using the right-hand-rule. Parameters ---------- azimuth : number or string The dip direction of the plane in degrees. This can be either a numerical azimuth in the 0-360 range or a string representing a quadrant measurement (e.g. N30W). Returns ------- strike : number The strike of the plane in degrees following the right-hand-rule. """ azimuth = parse_azimuth(azimuth) strike = azimuth - 90 if strike < 0: strike += 360 return strike
[ "def", "dip_direction2strike", "(", "azimuth", ")", ":", "azimuth", "=", "parse_azimuth", "(", "azimuth", ")", "strike", "=", "azimuth", "-", "90", "if", "strike", "<", "0", ":", "strike", "+=", "360", "return", "strike" ]
Converts a planar measurment of dip direction using the dip-azimuth convention into a strike using the right-hand-rule. Parameters ---------- azimuth : number or string The dip direction of the plane in degrees. This can be either a numerical azimuth in the 0-360 range or a string representing a quadrant measurement (e.g. N30W). Returns ------- strike : number The strike of the plane in degrees following the right-hand-rule.
[ "Converts", "a", "planar", "measurment", "of", "dip", "direction", "using", "the", "dip", "-", "azimuth", "convention", "into", "a", "strike", "using", "the", "right", "-", "hand", "-", "rule", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/utilities.py#L143-L164
train
54,063
joferkington/mplstereonet
mplstereonet/utilities.py
parse_azimuth
def parse_azimuth(azimuth): """ Parses an azimuth measurement in azimuth or quadrant format. Parameters ----------- azimuth : string or number An azimuth measurement in degrees or a quadrant measurement of azimuth. Returns ------- azi : float The azimuth in degrees clockwise from north (range: 0-360) See Also -------- parse_quadrant_measurement parse_strike_dip parse_plunge_bearing """ try: azimuth = float(azimuth) except ValueError: if not azimuth[0].isalpha(): raise ValueError('Ambiguous azimuth: {}'.format(azimuth)) azimuth = parse_quadrant_measurement(azimuth) return azimuth
python
def parse_azimuth(azimuth): """ Parses an azimuth measurement in azimuth or quadrant format. Parameters ----------- azimuth : string or number An azimuth measurement in degrees or a quadrant measurement of azimuth. Returns ------- azi : float The azimuth in degrees clockwise from north (range: 0-360) See Also -------- parse_quadrant_measurement parse_strike_dip parse_plunge_bearing """ try: azimuth = float(azimuth) except ValueError: if not azimuth[0].isalpha(): raise ValueError('Ambiguous azimuth: {}'.format(azimuth)) azimuth = parse_quadrant_measurement(azimuth) return azimuth
[ "def", "parse_azimuth", "(", "azimuth", ")", ":", "try", ":", "azimuth", "=", "float", "(", "azimuth", ")", "except", "ValueError", ":", "if", "not", "azimuth", "[", "0", "]", ".", "isalpha", "(", ")", ":", "raise", "ValueError", "(", "'Ambiguous azimuth...
Parses an azimuth measurement in azimuth or quadrant format. Parameters ----------- azimuth : string or number An azimuth measurement in degrees or a quadrant measurement of azimuth. Returns ------- azi : float The azimuth in degrees clockwise from north (range: 0-360) See Also -------- parse_quadrant_measurement parse_strike_dip parse_plunge_bearing
[ "Parses", "an", "azimuth", "measurement", "in", "azimuth", "or", "quadrant", "format", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/utilities.py#L223-L249
train
54,064
joferkington/mplstereonet
mplstereonet/utilities.py
parse_quadrant_measurement
def parse_quadrant_measurement(quad_azimuth): """ Parses a quadrant measurement of the form "AxxB", where A and B are cardinal directions and xx is an angle measured relative to those directions. In other words, it converts a measurement such as E30N into an azimuth of 60 degrees, or W10S into an azimuth of 260 degrees. For ambiguous quadrant measurements such as "N30S", a ValueError is raised. Parameters ----------- quad_azimuth : string An azimuth measurement in quadrant form. Returns ------- azi : float An azimuth in degrees clockwise from north. See Also -------- parse_azimuth """ def rotation_direction(first, second): return np.cross(_azimuth2vec(first), _azimuth2vec(second)) # Parse measurement quad_azimuth = quad_azimuth.strip() try: first_dir = quadrantletter_to_azimuth(quad_azimuth[0].upper()) sec_dir = quadrantletter_to_azimuth(quad_azimuth[-1].upper()) except KeyError: raise ValueError('{} is not a valid azimuth'.format(quad_azimuth)) angle = float(quad_azimuth[1:-1]) # Convert quadrant measurement into an azimuth direc = rotation_direction(first_dir, sec_dir) azi = first_dir + direc * angle # Catch ambiguous measurements such as N10S and raise an error if abs(direc) < 0.9: raise ValueError('{} is not a valid azimuth'.format(quad_azimuth)) # Ensure that 0 <= azi <= 360 if azi < 0: azi += 360 elif azi > 360: azi -= 360 return azi
python
def parse_quadrant_measurement(quad_azimuth): """ Parses a quadrant measurement of the form "AxxB", where A and B are cardinal directions and xx is an angle measured relative to those directions. In other words, it converts a measurement such as E30N into an azimuth of 60 degrees, or W10S into an azimuth of 260 degrees. For ambiguous quadrant measurements such as "N30S", a ValueError is raised. Parameters ----------- quad_azimuth : string An azimuth measurement in quadrant form. Returns ------- azi : float An azimuth in degrees clockwise from north. See Also -------- parse_azimuth """ def rotation_direction(first, second): return np.cross(_azimuth2vec(first), _azimuth2vec(second)) # Parse measurement quad_azimuth = quad_azimuth.strip() try: first_dir = quadrantletter_to_azimuth(quad_azimuth[0].upper()) sec_dir = quadrantletter_to_azimuth(quad_azimuth[-1].upper()) except KeyError: raise ValueError('{} is not a valid azimuth'.format(quad_azimuth)) angle = float(quad_azimuth[1:-1]) # Convert quadrant measurement into an azimuth direc = rotation_direction(first_dir, sec_dir) azi = first_dir + direc * angle # Catch ambiguous measurements such as N10S and raise an error if abs(direc) < 0.9: raise ValueError('{} is not a valid azimuth'.format(quad_azimuth)) # Ensure that 0 <= azi <= 360 if azi < 0: azi += 360 elif azi > 360: azi -= 360 return azi
[ "def", "parse_quadrant_measurement", "(", "quad_azimuth", ")", ":", "def", "rotation_direction", "(", "first", ",", "second", ")", ":", "return", "np", ".", "cross", "(", "_azimuth2vec", "(", "first", ")", ",", "_azimuth2vec", "(", "second", ")", ")", "# Par...
Parses a quadrant measurement of the form "AxxB", where A and B are cardinal directions and xx is an angle measured relative to those directions. In other words, it converts a measurement such as E30N into an azimuth of 60 degrees, or W10S into an azimuth of 260 degrees. For ambiguous quadrant measurements such as "N30S", a ValueError is raised. Parameters ----------- quad_azimuth : string An azimuth measurement in quadrant form. Returns ------- azi : float An azimuth in degrees clockwise from north. See Also -------- parse_azimuth
[ "Parses", "a", "quadrant", "measurement", "of", "the", "form", "AxxB", "where", "A", "and", "B", "are", "cardinal", "directions", "and", "xx", "is", "an", "angle", "measured", "relative", "to", "those", "directions", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/utilities.py#L251-L302
train
54,065
joferkington/mplstereonet
mplstereonet/stereonet_transforms.py
BaseStereonetTransform.inverted
def inverted(self): """Return the inverse of the transform.""" # This is a bit of hackery so that we can put a single "inverse" # function here. If we just made "self._inverse_type" point to the class # in question, it wouldn't be defined yet. This way, it's done at # at runtime and we avoid the definition problem. Hackish, but better # than repeating code everywhere or making a relatively complex # metaclass. inverse_type = globals()[self._inverse_type] return inverse_type(self._center_longitude, self._center_latitude, self._resolution)
python
def inverted(self): """Return the inverse of the transform.""" # This is a bit of hackery so that we can put a single "inverse" # function here. If we just made "self._inverse_type" point to the class # in question, it wouldn't be defined yet. This way, it's done at # at runtime and we avoid the definition problem. Hackish, but better # than repeating code everywhere or making a relatively complex # metaclass. inverse_type = globals()[self._inverse_type] return inverse_type(self._center_longitude, self._center_latitude, self._resolution)
[ "def", "inverted", "(", "self", ")", ":", "# This is a bit of hackery so that we can put a single \"inverse\"", "# function here. If we just made \"self._inverse_type\" point to the class", "# in question, it wouldn't be defined yet. This way, it's done at", "# at runtime and we avoid the definiti...
Return the inverse of the transform.
[ "Return", "the", "inverse", "of", "the", "transform", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_transforms.py#L54-L64
train
54,066
joferkington/mplstereonet
mplstereonet/stereonet_math.py
cart2sph
def cart2sph(x, y, z): """ Converts cartesian coordinates `x`, `y`, `z` into a longitude and latitude. x=0, y=0, z=0 is assumed to correspond to the center of the globe. Returns lon and lat in radians. Parameters ---------- `x`, `y`, `z` : Arrays of cartesian coordinates Returns ------- lon : Longitude in radians lat : Latitude in radians """ r = np.sqrt(x**2 + y**2 + z**2) lat = np.arcsin(z/r) lon = np.arctan2(y, x) return lon, lat
python
def cart2sph(x, y, z): """ Converts cartesian coordinates `x`, `y`, `z` into a longitude and latitude. x=0, y=0, z=0 is assumed to correspond to the center of the globe. Returns lon and lat in radians. Parameters ---------- `x`, `y`, `z` : Arrays of cartesian coordinates Returns ------- lon : Longitude in radians lat : Latitude in radians """ r = np.sqrt(x**2 + y**2 + z**2) lat = np.arcsin(z/r) lon = np.arctan2(y, x) return lon, lat
[ "def", "cart2sph", "(", "x", ",", "y", ",", "z", ")", ":", "r", "=", "np", ".", "sqrt", "(", "x", "**", "2", "+", "y", "**", "2", "+", "z", "**", "2", ")", "lat", "=", "np", ".", "arcsin", "(", "z", "/", "r", ")", "lon", "=", "np", "....
Converts cartesian coordinates `x`, `y`, `z` into a longitude and latitude. x=0, y=0, z=0 is assumed to correspond to the center of the globe. Returns lon and lat in radians. Parameters ---------- `x`, `y`, `z` : Arrays of cartesian coordinates Returns ------- lon : Longitude in radians lat : Latitude in radians
[ "Converts", "cartesian", "coordinates", "x", "y", "z", "into", "a", "longitude", "and", "latitude", ".", "x", "=", "0", "y", "=", "0", "z", "=", "0", "is", "assumed", "to", "correspond", "to", "the", "center", "of", "the", "globe", ".", "Returns", "l...
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_math.py#L50-L68
train
54,067
joferkington/mplstereonet
mplstereonet/stereonet_math.py
plane
def plane(strike, dip, segments=100, center=(0, 0)): """ Calculates the longitude and latitude of `segments` points along the stereonet projection of each plane with a given `strike` and `dip` in degrees. Returns points for one hemisphere only. Parameters ---------- strike : number or sequence of numbers The strike of the plane(s) in degrees, with dip direction indicated by the azimuth (e.g. 315 vs. 135) specified following the "right hand rule". dip : number or sequence of numbers The dip of the plane(s) in degrees. segments : number or sequence of numbers The number of points in the returned `lon` and `lat` arrays. Defaults to 100 segments. center : sequence of two numbers (lon, lat) The longitude and latitude of the center of the hemisphere that the returned points will be in. Defaults to 0,0 (approriate for a typical stereonet). Returns ------- lon, lat : arrays `num_segments` x `num_strikes` arrays of longitude and latitude in radians. """ lon0, lat0 = center strikes, dips = np.atleast_1d(strike, dip) lons = np.zeros((segments, strikes.size), dtype=np.float) lats = lons.copy() for i, (strike, dip) in enumerate(zip(strikes, dips)): # We just plot a line of constant longitude and rotate it by the strike. dip = 90 - dip lon = dip * np.ones(segments) lat = np.linspace(-90, 90, segments) lon, lat = _rotate(lon, lat, strike) if lat0 != 0 or lon0 != 0: dist = angular_distance([lon, lat], [lon0, lat0], False) mask = dist > (np.pi / 2) lon[mask], lat[mask] = antipode(lon[mask], lat[mask]) change = np.diff(mask.astype(int)) ind = np.flatnonzero(change) + 1 lat = np.hstack(np.split(lat, ind)[::-1]) lon = np.hstack(np.split(lon, ind)[::-1]) lons[:,i] = lon lats[:,i] = lat return lons, lats
python
def plane(strike, dip, segments=100, center=(0, 0)): """ Calculates the longitude and latitude of `segments` points along the stereonet projection of each plane with a given `strike` and `dip` in degrees. Returns points for one hemisphere only. Parameters ---------- strike : number or sequence of numbers The strike of the plane(s) in degrees, with dip direction indicated by the azimuth (e.g. 315 vs. 135) specified following the "right hand rule". dip : number or sequence of numbers The dip of the plane(s) in degrees. segments : number or sequence of numbers The number of points in the returned `lon` and `lat` arrays. Defaults to 100 segments. center : sequence of two numbers (lon, lat) The longitude and latitude of the center of the hemisphere that the returned points will be in. Defaults to 0,0 (approriate for a typical stereonet). Returns ------- lon, lat : arrays `num_segments` x `num_strikes` arrays of longitude and latitude in radians. """ lon0, lat0 = center strikes, dips = np.atleast_1d(strike, dip) lons = np.zeros((segments, strikes.size), dtype=np.float) lats = lons.copy() for i, (strike, dip) in enumerate(zip(strikes, dips)): # We just plot a line of constant longitude and rotate it by the strike. dip = 90 - dip lon = dip * np.ones(segments) lat = np.linspace(-90, 90, segments) lon, lat = _rotate(lon, lat, strike) if lat0 != 0 or lon0 != 0: dist = angular_distance([lon, lat], [lon0, lat0], False) mask = dist > (np.pi / 2) lon[mask], lat[mask] = antipode(lon[mask], lat[mask]) change = np.diff(mask.astype(int)) ind = np.flatnonzero(change) + 1 lat = np.hstack(np.split(lat, ind)[::-1]) lon = np.hstack(np.split(lon, ind)[::-1]) lons[:,i] = lon lats[:,i] = lat return lons, lats
[ "def", "plane", "(", "strike", ",", "dip", ",", "segments", "=", "100", ",", "center", "=", "(", "0", ",", "0", ")", ")", ":", "lon0", ",", "lat0", "=", "center", "strikes", ",", "dips", "=", "np", ".", "atleast_1d", "(", "strike", ",", "dip", ...
Calculates the longitude and latitude of `segments` points along the stereonet projection of each plane with a given `strike` and `dip` in degrees. Returns points for one hemisphere only. Parameters ---------- strike : number or sequence of numbers The strike of the plane(s) in degrees, with dip direction indicated by the azimuth (e.g. 315 vs. 135) specified following the "right hand rule". dip : number or sequence of numbers The dip of the plane(s) in degrees. segments : number or sequence of numbers The number of points in the returned `lon` and `lat` arrays. Defaults to 100 segments. center : sequence of two numbers (lon, lat) The longitude and latitude of the center of the hemisphere that the returned points will be in. Defaults to 0,0 (approriate for a typical stereonet). Returns ------- lon, lat : arrays `num_segments` x `num_strikes` arrays of longitude and latitude in radians.
[ "Calculates", "the", "longitude", "and", "latitude", "of", "segments", "points", "along", "the", "stereonet", "projection", "of", "each", "plane", "with", "a", "given", "strike", "and", "dip", "in", "degrees", ".", "Returns", "points", "for", "one", "hemispher...
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_math.py#L130-L181
train
54,068
joferkington/mplstereonet
mplstereonet/stereonet_math.py
mean_vector
def mean_vector(lons, lats): """ Returns the resultant vector from a series of longitudes and latitudes Parameters ---------- lons : array-like A sequence of longitudes (in radians) lats : array-like A sequence of latitudes (in radians) Returns ------- mean_vec : tuple (lon, lat) in radians r_value : number The magnitude of the resultant vector (between 0 and 1) This represents the degree of clustering in the data. """ xyz = sph2cart(lons, lats) xyz = np.vstack(xyz).T mean_vec = xyz.mean(axis=0) r_value = np.linalg.norm(mean_vec) mean_vec = cart2sph(*mean_vec) return mean_vec, r_value
python
def mean_vector(lons, lats): """ Returns the resultant vector from a series of longitudes and latitudes Parameters ---------- lons : array-like A sequence of longitudes (in radians) lats : array-like A sequence of latitudes (in radians) Returns ------- mean_vec : tuple (lon, lat) in radians r_value : number The magnitude of the resultant vector (between 0 and 1) This represents the degree of clustering in the data. """ xyz = sph2cart(lons, lats) xyz = np.vstack(xyz).T mean_vec = xyz.mean(axis=0) r_value = np.linalg.norm(mean_vec) mean_vec = cart2sph(*mean_vec) return mean_vec, r_value
[ "def", "mean_vector", "(", "lons", ",", "lats", ")", ":", "xyz", "=", "sph2cart", "(", "lons", ",", "lats", ")", "xyz", "=", "np", ".", "vstack", "(", "xyz", ")", ".", "T", "mean_vec", "=", "xyz", ".", "mean", "(", "axis", "=", "0", ")", "r_val...
Returns the resultant vector from a series of longitudes and latitudes Parameters ---------- lons : array-like A sequence of longitudes (in radians) lats : array-like A sequence of latitudes (in radians) Returns ------- mean_vec : tuple (lon, lat) in radians r_value : number The magnitude of the resultant vector (between 0 and 1) This represents the degree of clustering in the data.
[ "Returns", "the", "resultant", "vector", "from", "a", "series", "of", "longitudes", "and", "latitudes" ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_math.py#L356-L380
train
54,069
joferkington/mplstereonet
mplstereonet/stereonet_math.py
azimuth2rake
def azimuth2rake(strike, dip, azimuth): """ Projects an azimuth of a linear feature onto a plane as a rake angle. Parameters ---------- strike, dip : numbers The strike and dip of the plane in degrees following the right-hand-rule. azimuth : numbers The azimuth of the linear feature in degrees clockwise from north (i.e. a 0-360 azimuth). Returns ------- rake : number A rake angle in degrees measured downwards from horizontal. Negative values correspond to the opposite end of the strike. """ plunge, bearing = plane_intersection(strike, dip, azimuth, 90) rake = project_onto_plane(strike, dip, plunge, bearing) return rake
python
def azimuth2rake(strike, dip, azimuth): """ Projects an azimuth of a linear feature onto a plane as a rake angle. Parameters ---------- strike, dip : numbers The strike and dip of the plane in degrees following the right-hand-rule. azimuth : numbers The azimuth of the linear feature in degrees clockwise from north (i.e. a 0-360 azimuth). Returns ------- rake : number A rake angle in degrees measured downwards from horizontal. Negative values correspond to the opposite end of the strike. """ plunge, bearing = plane_intersection(strike, dip, azimuth, 90) rake = project_onto_plane(strike, dip, plunge, bearing) return rake
[ "def", "azimuth2rake", "(", "strike", ",", "dip", ",", "azimuth", ")", ":", "plunge", ",", "bearing", "=", "plane_intersection", "(", "strike", ",", "dip", ",", "azimuth", ",", "90", ")", "rake", "=", "project_onto_plane", "(", "strike", ",", "dip", ",",...
Projects an azimuth of a linear feature onto a plane as a rake angle. Parameters ---------- strike, dip : numbers The strike and dip of the plane in degrees following the right-hand-rule. azimuth : numbers The azimuth of the linear feature in degrees clockwise from north (i.e. a 0-360 azimuth). Returns ------- rake : number A rake angle in degrees measured downwards from horizontal. Negative values correspond to the opposite end of the strike.
[ "Projects", "an", "azimuth", "of", "a", "linear", "feature", "onto", "a", "plane", "as", "a", "rake", "angle", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_math.py#L580-L601
train
54,070
joferkington/mplstereonet
mplstereonet/stereonet_math.py
xyz2stereonet
def xyz2stereonet(x, y, z): """ Converts x, y, z in _world_ cartesian coordinates into lower-hemisphere stereonet coordinates. Parameters ---------- x, y, z : array-likes Sequences of world coordinates Returns ------- lon, lat : arrays Sequences of longitudes and latitudes (in radians) """ x, y, z = np.atleast_1d(x, y, z) return cart2sph(-z, x, y)
python
def xyz2stereonet(x, y, z): """ Converts x, y, z in _world_ cartesian coordinates into lower-hemisphere stereonet coordinates. Parameters ---------- x, y, z : array-likes Sequences of world coordinates Returns ------- lon, lat : arrays Sequences of longitudes and latitudes (in radians) """ x, y, z = np.atleast_1d(x, y, z) return cart2sph(-z, x, y)
[ "def", "xyz2stereonet", "(", "x", ",", "y", ",", "z", ")", ":", "x", ",", "y", ",", "z", "=", "np", ".", "atleast_1d", "(", "x", ",", "y", ",", "z", ")", "return", "cart2sph", "(", "-", "z", ",", "x", ",", "y", ")" ]
Converts x, y, z in _world_ cartesian coordinates into lower-hemisphere stereonet coordinates. Parameters ---------- x, y, z : array-likes Sequences of world coordinates Returns ------- lon, lat : arrays Sequences of longitudes and latitudes (in radians)
[ "Converts", "x", "y", "z", "in", "_world_", "cartesian", "coordinates", "into", "lower", "-", "hemisphere", "stereonet", "coordinates", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_math.py#L603-L619
train
54,071
joferkington/mplstereonet
mplstereonet/stereonet_math.py
stereonet2xyz
def stereonet2xyz(lon, lat): """ Converts a sequence of longitudes and latitudes from a lower-hemisphere stereonet into _world_ x,y,z coordinates. Parameters ---------- lon, lat : array-likes Sequences of longitudes and latitudes (in radians) from a lower-hemisphere stereonet Returns ------- x, y, z : arrays The world x,y,z components of the vectors represented by the lon, lat coordinates on the stereonet. """ lon, lat = np.atleast_1d(lon, lat) x, y, z = sph2cart(lon, lat) return y, z, -x
python
def stereonet2xyz(lon, lat): """ Converts a sequence of longitudes and latitudes from a lower-hemisphere stereonet into _world_ x,y,z coordinates. Parameters ---------- lon, lat : array-likes Sequences of longitudes and latitudes (in radians) from a lower-hemisphere stereonet Returns ------- x, y, z : arrays The world x,y,z components of the vectors represented by the lon, lat coordinates on the stereonet. """ lon, lat = np.atleast_1d(lon, lat) x, y, z = sph2cart(lon, lat) return y, z, -x
[ "def", "stereonet2xyz", "(", "lon", ",", "lat", ")", ":", "lon", ",", "lat", "=", "np", ".", "atleast_1d", "(", "lon", ",", "lat", ")", "x", ",", "y", ",", "z", "=", "sph2cart", "(", "lon", ",", "lat", ")", "return", "y", ",", "z", ",", "-", ...
Converts a sequence of longitudes and latitudes from a lower-hemisphere stereonet into _world_ x,y,z coordinates. Parameters ---------- lon, lat : array-likes Sequences of longitudes and latitudes (in radians) from a lower-hemisphere stereonet Returns ------- x, y, z : arrays The world x,y,z components of the vectors represented by the lon, lat coordinates on the stereonet.
[ "Converts", "a", "sequence", "of", "longitudes", "and", "latitudes", "from", "a", "lower", "-", "hemisphere", "stereonet", "into", "_world_", "x", "y", "z", "coordinates", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_math.py#L621-L640
train
54,072
joferkington/mplstereonet
mplstereonet/stereonet_math.py
_repole
def _repole(lon, lat, center): """ Reproject data such that ``center`` is the north pole. Returns lon, lat in the new, rotated reference frame. This is currently a sketch for a later function. Do not assume it works correctly. """ vec3 = sph2cart(*center) vec3 = np.squeeze(vec3) if not np.allclose(vec3, [0, 0, 1]): vec1 = np.cross(vec3, [0, 0, 1]) else: vec1 = np.cross(vec3, [1, 0, 0]) vec2 = np.cross(vec3, vec1) vecs = [item / np.linalg.norm(item) for item in [vec1, vec2, vec3]] basis = np.column_stack(vecs) xyz = sph2cart(lon, lat) xyz = np.column_stack(xyz) prime = xyz.dot(np.linalg.inv(basis)) lon, lat = cart2sph(*prime.T) return lon[:,None], lat[:,None]
python
def _repole(lon, lat, center): """ Reproject data such that ``center`` is the north pole. Returns lon, lat in the new, rotated reference frame. This is currently a sketch for a later function. Do not assume it works correctly. """ vec3 = sph2cart(*center) vec3 = np.squeeze(vec3) if not np.allclose(vec3, [0, 0, 1]): vec1 = np.cross(vec3, [0, 0, 1]) else: vec1 = np.cross(vec3, [1, 0, 0]) vec2 = np.cross(vec3, vec1) vecs = [item / np.linalg.norm(item) for item in [vec1, vec2, vec3]] basis = np.column_stack(vecs) xyz = sph2cart(lon, lat) xyz = np.column_stack(xyz) prime = xyz.dot(np.linalg.inv(basis)) lon, lat = cart2sph(*prime.T) return lon[:,None], lat[:,None]
[ "def", "_repole", "(", "lon", ",", "lat", ",", "center", ")", ":", "vec3", "=", "sph2cart", "(", "*", "center", ")", "vec3", "=", "np", ".", "squeeze", "(", "vec3", ")", "if", "not", "np", ".", "allclose", "(", "vec3", ",", "[", "0", ",", "0", ...
Reproject data such that ``center`` is the north pole. Returns lon, lat in the new, rotated reference frame. This is currently a sketch for a later function. Do not assume it works correctly.
[ "Reproject", "data", "such", "that", "center", "is", "the", "north", "pole", ".", "Returns", "lon", "lat", "in", "the", "new", "rotated", "reference", "frame", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/stereonet_math.py#L765-L787
train
54,073
joferkington/mplstereonet
mplstereonet/analysis.py
_sd_of_eigenvector
def _sd_of_eigenvector(data, vec, measurement='poles', bidirectional=True): """Unifies ``fit_pole`` and ``fit_girdle``.""" lon, lat = _convert_measurements(data, measurement) vals, vecs = cov_eig(lon, lat, bidirectional) x, y, z = vecs[:, vec] s, d = stereonet_math.geographic2pole(*stereonet_math.cart2sph(x, y, z)) return s[0], d[0]
python
def _sd_of_eigenvector(data, vec, measurement='poles', bidirectional=True): """Unifies ``fit_pole`` and ``fit_girdle``.""" lon, lat = _convert_measurements(data, measurement) vals, vecs = cov_eig(lon, lat, bidirectional) x, y, z = vecs[:, vec] s, d = stereonet_math.geographic2pole(*stereonet_math.cart2sph(x, y, z)) return s[0], d[0]
[ "def", "_sd_of_eigenvector", "(", "data", ",", "vec", ",", "measurement", "=", "'poles'", ",", "bidirectional", "=", "True", ")", ":", "lon", ",", "lat", "=", "_convert_measurements", "(", "data", ",", "measurement", ")", "vals", ",", "vecs", "=", "cov_eig...
Unifies ``fit_pole`` and ``fit_girdle``.
[ "Unifies", "fit_pole", "and", "fit_girdle", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/analysis.py#L126-L132
train
54,074
joferkington/mplstereonet
mplstereonet/analysis.py
find_mean_vector
def find_mean_vector(*args, **kwargs): """ Returns the mean vector for a set of measurments. By default, this expects the input to be plunges and bearings, but the type of input can be controlled through the ``measurement`` kwarg. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``plunge`` & ``bearing``, both array-like sequences representing linear features. (Rake measurements require three parameters, thus the variable number of arguments.) The *measurement* kwarg controls how these arguments are interpreted. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"lines"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for analysis. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. Returns ------- mean_vector : tuple of two floats The plunge and bearing of the mean vector (in degrees). r_value : float The length of the mean vector (a value between 0 and 1). """ lon, lat = _convert_measurements(args, kwargs.get('measurement', 'lines')) vector, r_value = stereonet_math.mean_vector(lon, lat) plunge, bearing = stereonet_math.geographic2plunge_bearing(*vector) return (plunge[0], bearing[0]), r_value
python
def find_mean_vector(*args, **kwargs): """ Returns the mean vector for a set of measurments. By default, this expects the input to be plunges and bearings, but the type of input can be controlled through the ``measurement`` kwarg. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``plunge`` & ``bearing``, both array-like sequences representing linear features. (Rake measurements require three parameters, thus the variable number of arguments.) The *measurement* kwarg controls how these arguments are interpreted. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"lines"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for analysis. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. Returns ------- mean_vector : tuple of two floats The plunge and bearing of the mean vector (in degrees). r_value : float The length of the mean vector (a value between 0 and 1). """ lon, lat = _convert_measurements(args, kwargs.get('measurement', 'lines')) vector, r_value = stereonet_math.mean_vector(lon, lat) plunge, bearing = stereonet_math.geographic2plunge_bearing(*vector) return (plunge[0], bearing[0]), r_value
[ "def", "find_mean_vector", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lon", ",", "lat", "=", "_convert_measurements", "(", "args", ",", "kwargs", ".", "get", "(", "'measurement'", ",", "'lines'", ")", ")", "vector", ",", "r_value", "=", "ste...
Returns the mean vector for a set of measurments. By default, this expects the input to be plunges and bearings, but the type of input can be controlled through the ``measurement`` kwarg. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``plunge`` & ``bearing``, both array-like sequences representing linear features. (Rake measurements require three parameters, thus the variable number of arguments.) The *measurement* kwarg controls how these arguments are interpreted. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"lines"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for analysis. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. Returns ------- mean_vector : tuple of two floats The plunge and bearing of the mean vector (in degrees). r_value : float The length of the mean vector (a value between 0 and 1).
[ "Returns", "the", "mean", "vector", "for", "a", "set", "of", "measurments", ".", "By", "default", "this", "expects", "the", "input", "to", "be", "plunges", "and", "bearings", "but", "the", "type", "of", "input", "can", "be", "controlled", "through", "the",...
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/analysis.py#L219-L264
train
54,075
joferkington/mplstereonet
mplstereonet/analysis.py
find_fisher_stats
def find_fisher_stats(*args, **kwargs): """ Returns the mean vector and summary statistics for a set of measurements. By default, this expects the input to be plunges and bearings, but the type of input can be controlled through the ``measurement`` kwarg. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``plunge`` & ``bearing``, both array-like sequences representing linear features. (Rake measurements require three parameters, thus the variable number of arguments.) The *measurement* kwarg controls how these arguments are interpreted. conf : number The confidence level (0-100). Defaults to 95%, similar to 2 sigma. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"lines"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for analysis. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. Returns ------- mean_vector: tuple of two floats A set consisting of the plunge and bearing of the mean vector (in degrees). stats : tuple of three floats ``(r_value, confidence, kappa)`` The ``r_value`` is the magnitude of the mean vector as a number between 0 and 1. The ``confidence`` radius is the opening angle of a small circle that corresponds to the confidence in the calculated direction, and is dependent on the input ``conf``. The ``kappa`` value is the dispersion factor that quantifies the amount of dispersion of the given vectors, analgous to a variance/stddev. """ # How the heck did this wind up as a separate function? lon, lat = _convert_measurements(args, kwargs.get('measurement', 'lines')) conf = kwargs.get('conf', 95) center, stats = stereonet_math.fisher_stats(lon, lat, conf) plunge, bearing = stereonet_math.geographic2plunge_bearing(*center) mean_vector = (plunge[0], bearing[0]) return mean_vector, stats
python
def find_fisher_stats(*args, **kwargs): """ Returns the mean vector and summary statistics for a set of measurements. By default, this expects the input to be plunges and bearings, but the type of input can be controlled through the ``measurement`` kwarg. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``plunge`` & ``bearing``, both array-like sequences representing linear features. (Rake measurements require three parameters, thus the variable number of arguments.) The *measurement* kwarg controls how these arguments are interpreted. conf : number The confidence level (0-100). Defaults to 95%, similar to 2 sigma. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"lines"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for analysis. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. Returns ------- mean_vector: tuple of two floats A set consisting of the plunge and bearing of the mean vector (in degrees). stats : tuple of three floats ``(r_value, confidence, kappa)`` The ``r_value`` is the magnitude of the mean vector as a number between 0 and 1. The ``confidence`` radius is the opening angle of a small circle that corresponds to the confidence in the calculated direction, and is dependent on the input ``conf``. The ``kappa`` value is the dispersion factor that quantifies the amount of dispersion of the given vectors, analgous to a variance/stddev. """ # How the heck did this wind up as a separate function? lon, lat = _convert_measurements(args, kwargs.get('measurement', 'lines')) conf = kwargs.get('conf', 95) center, stats = stereonet_math.fisher_stats(lon, lat, conf) plunge, bearing = stereonet_math.geographic2plunge_bearing(*center) mean_vector = (plunge[0], bearing[0]) return mean_vector, stats
[ "def", "find_fisher_stats", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# How the heck did this wind up as a separate function?", "lon", ",", "lat", "=", "_convert_measurements", "(", "args", ",", "kwargs", ".", "get", "(", "'measurement'", ",", "'lines'...
Returns the mean vector and summary statistics for a set of measurements. By default, this expects the input to be plunges and bearings, but the type of input can be controlled through the ``measurement`` kwarg. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``plunge`` & ``bearing``, both array-like sequences representing linear features. (Rake measurements require three parameters, thus the variable number of arguments.) The *measurement* kwarg controls how these arguments are interpreted. conf : number The confidence level (0-100). Defaults to 95%, similar to 2 sigma. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"lines"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for analysis. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. Returns ------- mean_vector: tuple of two floats A set consisting of the plunge and bearing of the mean vector (in degrees). stats : tuple of three floats ``(r_value, confidence, kappa)`` The ``r_value`` is the magnitude of the mean vector as a number between 0 and 1. The ``confidence`` radius is the opening angle of a small circle that corresponds to the confidence in the calculated direction, and is dependent on the input ``conf``. The ``kappa`` value is the dispersion factor that quantifies the amount of dispersion of the given vectors, analgous to a variance/stddev.
[ "Returns", "the", "mean", "vector", "and", "summary", "statistics", "for", "a", "set", "of", "measurements", ".", "By", "default", "this", "expects", "the", "input", "to", "be", "plunges", "and", "bearings", "but", "the", "type", "of", "input", "can", "be"...
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/analysis.py#L266-L323
train
54,076
joferkington/mplstereonet
mplstereonet/analysis.py
kmeans
def kmeans(*args, **kwargs): """ Find centers of multi-modal clusters of data using a kmeans approach modified for spherical measurements. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. num : int The number of clusters to find. Defaults to 2. bidirectional : bool Whether or not the measurements are bi-directional linear/planar features or directed vectors. Defaults to True. tolerance : float Iteration will continue until the centers have not changed by more than this amount. Defaults to 1e-5. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for analysis. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. Returns ------- centers : An Nx2 array-like Longitude and latitude in radians of the centers of each cluster. """ lon, lat = _convert_measurements(args, kwargs.get('measurement', 'poles')) num = kwargs.get('num', 2) bidirectional = kwargs.get('bidirectional', True) tolerance = kwargs.get('tolerance', 1e-5) points = lon, lat dist = lambda x: stereonet_math.angular_distance(x, points, bidirectional) center_lon = np.random.choice(lon, num) center_lat = np.random.choice(lat, num) centers = np.column_stack([center_lon, center_lat]) while True: dists = np.array([dist(item) for item in centers]).T closest = dists.argmin(axis=1) new_centers = [] for i in range(num): mask = mask = closest == i _, vecs = cov_eig(lon[mask], lat[mask], bidirectional) new_centers.append(stereonet_math.cart2sph(*vecs[:,-1])) if np.allclose(centers, new_centers, atol=tolerance): break else: centers = new_centers return centers
python
def kmeans(*args, **kwargs): """ Find centers of multi-modal clusters of data using a kmeans approach modified for spherical measurements. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. num : int The number of clusters to find. Defaults to 2. bidirectional : bool Whether or not the measurements are bi-directional linear/planar features or directed vectors. Defaults to True. tolerance : float Iteration will continue until the centers have not changed by more than this amount. Defaults to 1e-5. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for analysis. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. Returns ------- centers : An Nx2 array-like Longitude and latitude in radians of the centers of each cluster. """ lon, lat = _convert_measurements(args, kwargs.get('measurement', 'poles')) num = kwargs.get('num', 2) bidirectional = kwargs.get('bidirectional', True) tolerance = kwargs.get('tolerance', 1e-5) points = lon, lat dist = lambda x: stereonet_math.angular_distance(x, points, bidirectional) center_lon = np.random.choice(lon, num) center_lat = np.random.choice(lat, num) centers = np.column_stack([center_lon, center_lat]) while True: dists = np.array([dist(item) for item in centers]).T closest = dists.argmin(axis=1) new_centers = [] for i in range(num): mask = mask = closest == i _, vecs = cov_eig(lon[mask], lat[mask], bidirectional) new_centers.append(stereonet_math.cart2sph(*vecs[:,-1])) if np.allclose(centers, new_centers, atol=tolerance): break else: centers = new_centers return centers
[ "def", "kmeans", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "lon", ",", "lat", "=", "_convert_measurements", "(", "args", ",", "kwargs", ".", "get", "(", "'measurement'", ",", "'poles'", ")", ")", "num", "=", "kwargs", ".", "get", "(", "'...
Find centers of multi-modal clusters of data using a kmeans approach modified for spherical measurements. Parameters ---------- *args : 2 or 3 sequences of measurements By default, this will be expected to be ``strike`` & ``dip``, both array-like sequences representing poles to planes. (Rake measurements require three parameters, thus the variable number of arguments.) The ``measurement`` kwarg controls how these arguments are interpreted. num : int The number of clusters to find. Defaults to 2. bidirectional : bool Whether or not the measurements are bi-directional linear/planar features or directed vectors. Defaults to True. tolerance : float Iteration will continue until the centers have not changed by more than this amount. Defaults to 1e-5. measurement : string, optional Controls how the input arguments are interpreted. Defaults to ``"poles"``. May be one of the following: ``"poles"`` : strikes, dips Arguments are assumed to be sequences of strikes and dips of planes. Poles to these planes are used for analysis. ``"lines"`` : plunges, bearings Arguments are assumed to be sequences of plunges and bearings of linear features. ``"rakes"`` : strikes, dips, rakes Arguments are assumed to be sequences of strikes, dips, and rakes along the plane. ``"radians"`` : lon, lat Arguments are assumed to be "raw" longitudes and latitudes in the stereonet's underlying coordinate system. Returns ------- centers : An Nx2 array-like Longitude and latitude in radians of the centers of each cluster.
[ "Find", "centers", "of", "multi", "-", "modal", "clusters", "of", "data", "using", "a", "kmeans", "approach", "modified", "for", "spherical", "measurements", "." ]
f6d78ca49807915d4223e864e12bb24d497cc2d6
https://github.com/joferkington/mplstereonet/blob/f6d78ca49807915d4223e864e12bb24d497cc2d6/mplstereonet/analysis.py#L325-L400
train
54,077
heroku-python/django-postgrespool
django_postgrespool/base.py
DatabaseWrapper._dispose
def _dispose(self): """Dispose of the pool for this instance, closing all connections.""" self.close() # _DBProxy.dispose doesn't actually call dispose on the pool conn_params = self.get_connection_params() key = db_pool._serialize(**conn_params) try: pool = db_pool.pools[key] except KeyError: pass else: pool.dispose() del db_pool.pools[key]
python
def _dispose(self): """Dispose of the pool for this instance, closing all connections.""" self.close() # _DBProxy.dispose doesn't actually call dispose on the pool conn_params = self.get_connection_params() key = db_pool._serialize(**conn_params) try: pool = db_pool.pools[key] except KeyError: pass else: pool.dispose() del db_pool.pools[key]
[ "def", "_dispose", "(", "self", ")", ":", "self", ".", "close", "(", ")", "# _DBProxy.dispose doesn't actually call dispose on the pool", "conn_params", "=", "self", ".", "get_connection_params", "(", ")", "key", "=", "db_pool", ".", "_serialize", "(", "*", "*", ...
Dispose of the pool for this instance, closing all connections.
[ "Dispose", "of", "the", "pool", "for", "this", "instance", "closing", "all", "connections", "." ]
ce83a4d49c19eded86d86d5fcfa8daaeea5ef662
https://github.com/heroku-python/django-postgrespool/blob/ce83a4d49c19eded86d86d5fcfa8daaeea5ef662/django_postgrespool/base.py#L91-L103
train
54,078
stitchfix/fauxtograph
fauxtograph/vaegan.py
calc_fc_size
def calc_fc_size(img_height, img_width): '''Calculates shape of data after encoding. Parameters ---------- img_height : int Height of input image. img_width : int Width of input image. Returns ------- encoded_shape : tuple(int) Gives back 3-tuple with new dims. ''' height, width = img_height, img_width for _ in range(5): height, width = _get_conv_outsize( (height, width), 4, 2, 1) conv_out_layers = 512 return conv_out_layers, height, width
python
def calc_fc_size(img_height, img_width): '''Calculates shape of data after encoding. Parameters ---------- img_height : int Height of input image. img_width : int Width of input image. Returns ------- encoded_shape : tuple(int) Gives back 3-tuple with new dims. ''' height, width = img_height, img_width for _ in range(5): height, width = _get_conv_outsize( (height, width), 4, 2, 1) conv_out_layers = 512 return conv_out_layers, height, width
[ "def", "calc_fc_size", "(", "img_height", ",", "img_width", ")", ":", "height", ",", "width", "=", "img_height", ",", "img_width", "for", "_", "in", "range", "(", "5", ")", ":", "height", ",", "width", "=", "_get_conv_outsize", "(", "(", "height", ",", ...
Calculates shape of data after encoding. Parameters ---------- img_height : int Height of input image. img_width : int Width of input image. Returns ------- encoded_shape : tuple(int) Gives back 3-tuple with new dims.
[ "Calculates", "shape", "of", "data", "after", "encoding", "." ]
393f402151126991dac1f2ee4cdd4c6aba817a5d
https://github.com/stitchfix/fauxtograph/blob/393f402151126991dac1f2ee4cdd4c6aba817a5d/fauxtograph/vaegan.py#L540-L562
train
54,079
stitchfix/fauxtograph
fauxtograph/vaegan.py
calc_im_size
def calc_im_size(img_height, img_width): '''Calculates shape of data after decoding. Parameters ---------- img_height : int Height of encoded data. img_width : int Width of encoded data. Returns ------- encoded_shape : tuple(int) Gives back 2-tuple with decoded image dimensions. ''' height, width = img_height, img_width for _ in range(5): height, width = _get_deconv_outsize((height, width), 4, 2, 1) return height, width
python
def calc_im_size(img_height, img_width): '''Calculates shape of data after decoding. Parameters ---------- img_height : int Height of encoded data. img_width : int Width of encoded data. Returns ------- encoded_shape : tuple(int) Gives back 2-tuple with decoded image dimensions. ''' height, width = img_height, img_width for _ in range(5): height, width = _get_deconv_outsize((height, width), 4, 2, 1) return height, width
[ "def", "calc_im_size", "(", "img_height", ",", "img_width", ")", ":", "height", ",", "width", "=", "img_height", ",", "img_width", "for", "_", "in", "range", "(", "5", ")", ":", "height", ",", "width", "=", "_get_deconv_outsize", "(", "(", "height", ",",...
Calculates shape of data after decoding. Parameters ---------- img_height : int Height of encoded data. img_width : int Width of encoded data. Returns ------- encoded_shape : tuple(int) Gives back 2-tuple with decoded image dimensions.
[ "Calculates", "shape", "of", "data", "after", "decoding", "." ]
393f402151126991dac1f2ee4cdd4c6aba817a5d
https://github.com/stitchfix/fauxtograph/blob/393f402151126991dac1f2ee4cdd4c6aba817a5d/fauxtograph/vaegan.py#L565-L585
train
54,080
stitchfix/fauxtograph
fauxtograph/fauxtograph.py
get_paths
def get_paths(directory): '''Gets all the paths of non-hidden files in a directory and returns a list of those paths. Parameters ---------- directory : str The directory whose contents you wish to grab. Returns ------- paths : List[str] ''' fnames = [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and not f.startswith('.')] return fnames
python
def get_paths(directory): '''Gets all the paths of non-hidden files in a directory and returns a list of those paths. Parameters ---------- directory : str The directory whose contents you wish to grab. Returns ------- paths : List[str] ''' fnames = [os.path.join(directory, f) for f in os.listdir(directory) if os.path.isfile(os.path.join(directory, f)) and not f.startswith('.')] return fnames
[ "def", "get_paths", "(", "directory", ")", ":", "fnames", "=", "[", "os", ".", "path", ".", "join", "(", "directory", ",", "f", ")", "for", "f", "in", "os", ".", "listdir", "(", "directory", ")", "if", "os", ".", "path", ".", "isfile", "(", "os",...
Gets all the paths of non-hidden files in a directory and returns a list of those paths. Parameters ---------- directory : str The directory whose contents you wish to grab. Returns ------- paths : List[str]
[ "Gets", "all", "the", "paths", "of", "non", "-", "hidden", "files", "in", "a", "directory", "and", "returns", "a", "list", "of", "those", "paths", "." ]
393f402151126991dac1f2ee4cdd4c6aba817a5d
https://github.com/stitchfix/fauxtograph/blob/393f402151126991dac1f2ee4cdd4c6aba817a5d/fauxtograph/fauxtograph.py#L1570-L1587
train
54,081
stitchfix/fauxtograph
fauxtograph/fauxtograph.py
VAE.inverse_transform
def inverse_transform(self, data, test=False): '''Transform latent vectors into images. Parameters ---------- data : array-like shape (n_images, latent_width) Input numpy array of images. test [optional] : bool Controls the test boolean for batch normalization. Returns ------- images : array-like shape (n_images, image_width, image_height, n_colors) ''' if not type(data) == Variable: if len(data.shape) < 2: data = data[np.newaxis] if len(data.shape) != 2: raise TypeError("Invalid dimensions for latent data. Dim = %s.\ Must be a 2d array." % str(data.shape)) data = Variable(data) else: if len(data.data.shape) < 2: data.data = data.data[np.newaxis] if len(data.data.shape) != 2: raise TypeError("Invalid dimensions for latent data. Dim = %s.\ Must be a 2d array." % str(data.data.shape)) assert data.data.shape[-1] == self.latent_width,\ "Latent shape %d != %d" % (data.data.shape[-1], self.latent_width) if self.flag_gpu: data.to_gpu() out = self.model.decode(data, test=test) out.to_cpu() if self.mode == 'linear': final = out.data else: final = out.data.transpose(0, 2, 3, 1) return final
python
def inverse_transform(self, data, test=False): '''Transform latent vectors into images. Parameters ---------- data : array-like shape (n_images, latent_width) Input numpy array of images. test [optional] : bool Controls the test boolean for batch normalization. Returns ------- images : array-like shape (n_images, image_width, image_height, n_colors) ''' if not type(data) == Variable: if len(data.shape) < 2: data = data[np.newaxis] if len(data.shape) != 2: raise TypeError("Invalid dimensions for latent data. Dim = %s.\ Must be a 2d array." % str(data.shape)) data = Variable(data) else: if len(data.data.shape) < 2: data.data = data.data[np.newaxis] if len(data.data.shape) != 2: raise TypeError("Invalid dimensions for latent data. Dim = %s.\ Must be a 2d array." % str(data.data.shape)) assert data.data.shape[-1] == self.latent_width,\ "Latent shape %d != %d" % (data.data.shape[-1], self.latent_width) if self.flag_gpu: data.to_gpu() out = self.model.decode(data, test=test) out.to_cpu() if self.mode == 'linear': final = out.data else: final = out.data.transpose(0, 2, 3, 1) return final
[ "def", "inverse_transform", "(", "self", ",", "data", ",", "test", "=", "False", ")", ":", "if", "not", "type", "(", "data", ")", "==", "Variable", ":", "if", "len", "(", "data", ".", "shape", ")", "<", "2", ":", "data", "=", "data", "[", "np", ...
Transform latent vectors into images. Parameters ---------- data : array-like shape (n_images, latent_width) Input numpy array of images. test [optional] : bool Controls the test boolean for batch normalization. Returns ------- images : array-like shape (n_images, image_width, image_height, n_colors)
[ "Transform", "latent", "vectors", "into", "images", "." ]
393f402151126991dac1f2ee4cdd4c6aba817a5d
https://github.com/stitchfix/fauxtograph/blob/393f402151126991dac1f2ee4cdd4c6aba817a5d/fauxtograph/fauxtograph.py#L175-L218
train
54,082
stitchfix/fauxtograph
fauxtograph/fauxtograph.py
VAE.load_images
def load_images(self, filepaths): '''Load in image files from list of paths. Parameters ---------- filepaths : List[str] List of file paths of images to be loaded. Returns ------- images : array-like shape (n_images, n_colors, image_width, image_height) Images normalized to have pixel data range [0,1]. ''' def read(fname): im = Image.open(fname) im = np.float32(im) return im/255. x_all = np.array([read(fname) for fname in tqdm.tqdm(filepaths)]) x_all = x_all.astype('float32') if self.mode == 'convolution': x_all = x_all.transpose(0, 3, 1, 2) print("Image Files Loaded!") return x_all
python
def load_images(self, filepaths): '''Load in image files from list of paths. Parameters ---------- filepaths : List[str] List of file paths of images to be loaded. Returns ------- images : array-like shape (n_images, n_colors, image_width, image_height) Images normalized to have pixel data range [0,1]. ''' def read(fname): im = Image.open(fname) im = np.float32(im) return im/255. x_all = np.array([read(fname) for fname in tqdm.tqdm(filepaths)]) x_all = x_all.astype('float32') if self.mode == 'convolution': x_all = x_all.transpose(0, 3, 1, 2) print("Image Files Loaded!") return x_all
[ "def", "load_images", "(", "self", ",", "filepaths", ")", ":", "def", "read", "(", "fname", ")", ":", "im", "=", "Image", ".", "open", "(", "fname", ")", "im", "=", "np", ".", "float32", "(", "im", ")", "return", "im", "/", "255.", "x_all", "=", ...
Load in image files from list of paths. Parameters ---------- filepaths : List[str] List of file paths of images to be loaded. Returns ------- images : array-like shape (n_images, n_colors, image_width, image_height) Images normalized to have pixel data range [0,1].
[ "Load", "in", "image", "files", "from", "list", "of", "paths", "." ]
393f402151126991dac1f2ee4cdd4c6aba817a5d
https://github.com/stitchfix/fauxtograph/blob/393f402151126991dac1f2ee4cdd4c6aba817a5d/fauxtograph/fauxtograph.py#L220-L244
train
54,083
stitchfix/fauxtograph
fauxtograph/fauxtograph.py
VAE.fit
def fit( self, img_data, save_freq=-1, pic_freq=-1, n_epochs=100, batch_size=50, weight_decay=True, model_path='./VAE_training_model/', img_path='./VAE_training_images/', img_out_width=10 ): '''Fit the VAE model to the image data. Parameters ---------- img_data : array-like shape (n_images, n_colors, image_width, image_height) Images used to fit VAE model. save_freq [optional] : int Sets the number of epochs to wait before saving the model and optimizer states. Also saves image files of randomly generated images using those states in a separate directory. Does not save if negative valued. pic_freq [optional] : int Sets the number of batches to wait before displaying a picture or randomly generated images using the current model state. Does not display if negative valued. n_epochs [optional] : int Gives the number of training epochs to run through for the fitting process. batch_size [optional] : int The size of the batch to use when training. Note: generally larger batch sizes will result in fater epoch iteration, but at the const of lower granulatity when updating the layer weights. weight_decay [optional] : bool Flag that controls adding weight decay hooks to the optimizer. model_path [optional] : str Directory where the model and optimizer state files will be saved. img_path [optional] : str Directory where the end of epoch training image files will be saved. img_out_width : int Controls the number of randomly genreated images per row in the output saved imags. ''' width = img_out_width self.opt.setup(self.model) if weight_decay: self.opt.add_hook(chainer.optimizer.WeightDecay(0.00001)) n_data = img_data.shape[0] batch_iter = list(range(0, n_data, batch_size)) n_batches = len(batch_iter) save_counter = 0 for epoch in range(1, n_epochs + 1): print('epoch: %i' % epoch) t1 = time.time() indexes = np.random.permutation(n_data) last_loss_kl = 0. last_loss_rec = 0. count = 0 for i in tqdm.tqdm(batch_iter): x_batch = Variable(img_data[indexes[i: i + batch_size]]) if self.flag_gpu: x_batch.to_gpu() out, kl_loss, rec_loss = self.model.forward(x_batch) total_loss = rec_loss + kl_loss*self.kl_ratio self.opt.zero_grads() total_loss.backward() self.opt.update() last_loss_kl += kl_loss.data last_loss_rec += rec_loss.data plot_pics = Variable(img_data[indexes[:width]]) count += 1 if pic_freq > 0: assert type(pic_freq) == int, "pic_freq must be an integer." if count % pic_freq == 0: fig = self._plot_img( plot_pics, img_path=img_path, epoch=epoch ) display(fig) if save_freq > 0: save_counter += 1 assert type(save_freq) == int, "save_freq must be an integer." if epoch % save_freq == 0: name = "vae_epoch%s" % str(epoch) if save_counter == 1: save_meta = True else: save_meta = False self.save(model_path, name, save_meta=save_meta) fig = self._plot_img( plot_pics, img_path=img_path, epoch=epoch, batch=n_batches, save=True ) msg = "rec_loss = {0} , kl_loss = {1}" print(msg.format(last_loss_rec/n_batches, last_loss_kl/n_batches)) t_diff = time.time()-t1 print("time: %f\n\n" % t_diff)
python
def fit( self, img_data, save_freq=-1, pic_freq=-1, n_epochs=100, batch_size=50, weight_decay=True, model_path='./VAE_training_model/', img_path='./VAE_training_images/', img_out_width=10 ): '''Fit the VAE model to the image data. Parameters ---------- img_data : array-like shape (n_images, n_colors, image_width, image_height) Images used to fit VAE model. save_freq [optional] : int Sets the number of epochs to wait before saving the model and optimizer states. Also saves image files of randomly generated images using those states in a separate directory. Does not save if negative valued. pic_freq [optional] : int Sets the number of batches to wait before displaying a picture or randomly generated images using the current model state. Does not display if negative valued. n_epochs [optional] : int Gives the number of training epochs to run through for the fitting process. batch_size [optional] : int The size of the batch to use when training. Note: generally larger batch sizes will result in fater epoch iteration, but at the const of lower granulatity when updating the layer weights. weight_decay [optional] : bool Flag that controls adding weight decay hooks to the optimizer. model_path [optional] : str Directory where the model and optimizer state files will be saved. img_path [optional] : str Directory where the end of epoch training image files will be saved. img_out_width : int Controls the number of randomly genreated images per row in the output saved imags. ''' width = img_out_width self.opt.setup(self.model) if weight_decay: self.opt.add_hook(chainer.optimizer.WeightDecay(0.00001)) n_data = img_data.shape[0] batch_iter = list(range(0, n_data, batch_size)) n_batches = len(batch_iter) save_counter = 0 for epoch in range(1, n_epochs + 1): print('epoch: %i' % epoch) t1 = time.time() indexes = np.random.permutation(n_data) last_loss_kl = 0. last_loss_rec = 0. count = 0 for i in tqdm.tqdm(batch_iter): x_batch = Variable(img_data[indexes[i: i + batch_size]]) if self.flag_gpu: x_batch.to_gpu() out, kl_loss, rec_loss = self.model.forward(x_batch) total_loss = rec_loss + kl_loss*self.kl_ratio self.opt.zero_grads() total_loss.backward() self.opt.update() last_loss_kl += kl_loss.data last_loss_rec += rec_loss.data plot_pics = Variable(img_data[indexes[:width]]) count += 1 if pic_freq > 0: assert type(pic_freq) == int, "pic_freq must be an integer." if count % pic_freq == 0: fig = self._plot_img( plot_pics, img_path=img_path, epoch=epoch ) display(fig) if save_freq > 0: save_counter += 1 assert type(save_freq) == int, "save_freq must be an integer." if epoch % save_freq == 0: name = "vae_epoch%s" % str(epoch) if save_counter == 1: save_meta = True else: save_meta = False self.save(model_path, name, save_meta=save_meta) fig = self._plot_img( plot_pics, img_path=img_path, epoch=epoch, batch=n_batches, save=True ) msg = "rec_loss = {0} , kl_loss = {1}" print(msg.format(last_loss_rec/n_batches, last_loss_kl/n_batches)) t_diff = time.time()-t1 print("time: %f\n\n" % t_diff)
[ "def", "fit", "(", "self", ",", "img_data", ",", "save_freq", "=", "-", "1", ",", "pic_freq", "=", "-", "1", ",", "n_epochs", "=", "100", ",", "batch_size", "=", "50", ",", "weight_decay", "=", "True", ",", "model_path", "=", "'./VAE_training_model/'", ...
Fit the VAE model to the image data. Parameters ---------- img_data : array-like shape (n_images, n_colors, image_width, image_height) Images used to fit VAE model. save_freq [optional] : int Sets the number of epochs to wait before saving the model and optimizer states. Also saves image files of randomly generated images using those states in a separate directory. Does not save if negative valued. pic_freq [optional] : int Sets the number of batches to wait before displaying a picture or randomly generated images using the current model state. Does not display if negative valued. n_epochs [optional] : int Gives the number of training epochs to run through for the fitting process. batch_size [optional] : int The size of the batch to use when training. Note: generally larger batch sizes will result in fater epoch iteration, but at the const of lower granulatity when updating the layer weights. weight_decay [optional] : bool Flag that controls adding weight decay hooks to the optimizer. model_path [optional] : str Directory where the model and optimizer state files will be saved. img_path [optional] : str Directory where the end of epoch training image files will be saved. img_out_width : int Controls the number of randomly genreated images per row in the output saved imags.
[ "Fit", "the", "VAE", "model", "to", "the", "image", "data", "." ]
393f402151126991dac1f2ee4cdd4c6aba817a5d
https://github.com/stitchfix/fauxtograph/blob/393f402151126991dac1f2ee4cdd4c6aba817a5d/fauxtograph/fauxtograph.py#L246-L359
train
54,084
stitchfix/fauxtograph
fauxtograph/fauxtograph.py
VAEGAN.transform
def transform(self, data, test=False): '''Transform image data to latent space. Parameters ---------- data : array-like shape (n_images, image_width, image_height, n_colors) Input numpy array of images. test [optional] : bool Controls the test boolean for batch normalization. Returns ------- latent_vec : array-like shape (n_images, latent_width) ''' #make sure that data has the right shape. if not type(data) == Variable: if len(data.shape) < 4: data = data[np.newaxis] if len(data.shape) != 4: raise TypeError("Invalid dimensions for image data. Dim = %s.\ Must be 4d array." % str(data.shape)) if data.shape[1] != self.color_channels: if data.shape[-1] == self.color_channels: data = data.transpose(0, 3, 1, 2) else: raise TypeError("Invalid dimensions for image data. Dim = %s" % str(data.shape)) data = Variable(data) else: if len(data.data.shape) < 4: data.data = data.data[np.newaxis] if len(data.data.shape) != 4: raise TypeError("Invalid dimensions for image data. Dim = %s.\ Must be 4d array." % str(data.data.shape)) if data.data.shape[1] != self.color_channels: if data.data.shape[-1] == self.color_channels: data.data = data.data.transpose(0, 3, 1, 2) else: raise TypeError("Invalid dimensions for image data. Dim = %s" % str(data.shape)) # Actual transformation. if self.flag_gpu: data.to_gpu() z = self._encode(data, test=test)[0] z.to_cpu() return z.data
python
def transform(self, data, test=False): '''Transform image data to latent space. Parameters ---------- data : array-like shape (n_images, image_width, image_height, n_colors) Input numpy array of images. test [optional] : bool Controls the test boolean for batch normalization. Returns ------- latent_vec : array-like shape (n_images, latent_width) ''' #make sure that data has the right shape. if not type(data) == Variable: if len(data.shape) < 4: data = data[np.newaxis] if len(data.shape) != 4: raise TypeError("Invalid dimensions for image data. Dim = %s.\ Must be 4d array." % str(data.shape)) if data.shape[1] != self.color_channels: if data.shape[-1] == self.color_channels: data = data.transpose(0, 3, 1, 2) else: raise TypeError("Invalid dimensions for image data. Dim = %s" % str(data.shape)) data = Variable(data) else: if len(data.data.shape) < 4: data.data = data.data[np.newaxis] if len(data.data.shape) != 4: raise TypeError("Invalid dimensions for image data. Dim = %s.\ Must be 4d array." % str(data.data.shape)) if data.data.shape[1] != self.color_channels: if data.data.shape[-1] == self.color_channels: data.data = data.data.transpose(0, 3, 1, 2) else: raise TypeError("Invalid dimensions for image data. Dim = %s" % str(data.shape)) # Actual transformation. if self.flag_gpu: data.to_gpu() z = self._encode(data, test=test)[0] z.to_cpu() return z.data
[ "def", "transform", "(", "self", ",", "data", ",", "test", "=", "False", ")", ":", "#make sure that data has the right shape.", "if", "not", "type", "(", "data", ")", "==", "Variable", ":", "if", "len", "(", "data", ".", "shape", ")", "<", "4", ":", "d...
Transform image data to latent space. Parameters ---------- data : array-like shape (n_images, image_width, image_height, n_colors) Input numpy array of images. test [optional] : bool Controls the test boolean for batch normalization. Returns ------- latent_vec : array-like shape (n_images, latent_width)
[ "Transform", "image", "data", "to", "latent", "space", "." ]
393f402151126991dac1f2ee4cdd4c6aba817a5d
https://github.com/stitchfix/fauxtograph/blob/393f402151126991dac1f2ee4cdd4c6aba817a5d/fauxtograph/fauxtograph.py#L1122-L1171
train
54,085
dhermes/bezier
src/bezier/_surface_helpers.py
polynomial_sign
def polynomial_sign(poly_surface, degree): r"""Determine the "sign" of a polynomial on the reference triangle. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Checks if a polynomial :math:`p(s, t)` is positive, negative or mixed sign on the reference triangle. Does this by utilizing the B |eacute| zier form of :math:`p`: it is a convex combination of the Bernstein basis (real numbers) hence if the Bernstein basis is all positive, the polynomial must be. If the values are mixed, then we can recursively subdivide until we are in a region where the coefficients are all one sign. Args: poly_surface (numpy.ndarray): 2D array (with 1 row) of control points for a "surface", i.e. a bivariate polynomial. degree (int): The degree of the surface / polynomial given by ``poly_surface``. Returns: int: The sign of the polynomial. Will be one of ``-1``, ``1`` or ``0``. A value of ``0`` indicates a mixed sign or the zero polynomial. Raises: ValueError: If no conclusion is reached after the maximum number of subdivisions. """ # The indices where the corner nodes in a surface are. corner_indices = (0, degree, -1) sub_polys = [poly_surface] signs = set() for _ in six.moves.xrange(_MAX_POLY_SUBDIVISIONS): undecided = [] for poly in sub_polys: # First add all the signs of the corner nodes. signs.update(_SIGN(poly[0, corner_indices]).astype(int)) # Then check if the ``poly`` nodes are **uniformly** one sign. if np.all(poly == 0.0): signs.add(0) elif np.all(poly > 0.0): signs.add(1) elif np.all(poly < 0.0): signs.add(-1) else: undecided.append(poly) if len(signs) > 1: return 0 sub_polys = functools.reduce( operator.add, [subdivide_nodes(poly, degree) for poly in undecided], (), ) if not sub_polys: break if sub_polys: raise ValueError( "Did not reach a conclusion after max subdivisions", _MAX_POLY_SUBDIVISIONS, ) else: # NOTE: We are guaranteed that ``len(signs) <= 1``. return signs.pop()
python
def polynomial_sign(poly_surface, degree): r"""Determine the "sign" of a polynomial on the reference triangle. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Checks if a polynomial :math:`p(s, t)` is positive, negative or mixed sign on the reference triangle. Does this by utilizing the B |eacute| zier form of :math:`p`: it is a convex combination of the Bernstein basis (real numbers) hence if the Bernstein basis is all positive, the polynomial must be. If the values are mixed, then we can recursively subdivide until we are in a region where the coefficients are all one sign. Args: poly_surface (numpy.ndarray): 2D array (with 1 row) of control points for a "surface", i.e. a bivariate polynomial. degree (int): The degree of the surface / polynomial given by ``poly_surface``. Returns: int: The sign of the polynomial. Will be one of ``-1``, ``1`` or ``0``. A value of ``0`` indicates a mixed sign or the zero polynomial. Raises: ValueError: If no conclusion is reached after the maximum number of subdivisions. """ # The indices where the corner nodes in a surface are. corner_indices = (0, degree, -1) sub_polys = [poly_surface] signs = set() for _ in six.moves.xrange(_MAX_POLY_SUBDIVISIONS): undecided = [] for poly in sub_polys: # First add all the signs of the corner nodes. signs.update(_SIGN(poly[0, corner_indices]).astype(int)) # Then check if the ``poly`` nodes are **uniformly** one sign. if np.all(poly == 0.0): signs.add(0) elif np.all(poly > 0.0): signs.add(1) elif np.all(poly < 0.0): signs.add(-1) else: undecided.append(poly) if len(signs) > 1: return 0 sub_polys = functools.reduce( operator.add, [subdivide_nodes(poly, degree) for poly in undecided], (), ) if not sub_polys: break if sub_polys: raise ValueError( "Did not reach a conclusion after max subdivisions", _MAX_POLY_SUBDIVISIONS, ) else: # NOTE: We are guaranteed that ``len(signs) <= 1``. return signs.pop()
[ "def", "polynomial_sign", "(", "poly_surface", ",", "degree", ")", ":", "# The indices where the corner nodes in a surface are.", "corner_indices", "=", "(", "0", ",", "degree", ",", "-", "1", ")", "sub_polys", "=", "[", "poly_surface", "]", "signs", "=", "set", ...
r"""Determine the "sign" of a polynomial on the reference triangle. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Checks if a polynomial :math:`p(s, t)` is positive, negative or mixed sign on the reference triangle. Does this by utilizing the B |eacute| zier form of :math:`p`: it is a convex combination of the Bernstein basis (real numbers) hence if the Bernstein basis is all positive, the polynomial must be. If the values are mixed, then we can recursively subdivide until we are in a region where the coefficients are all one sign. Args: poly_surface (numpy.ndarray): 2D array (with 1 row) of control points for a "surface", i.e. a bivariate polynomial. degree (int): The degree of the surface / polynomial given by ``poly_surface``. Returns: int: The sign of the polynomial. Will be one of ``-1``, ``1`` or ``0``. A value of ``0`` indicates a mixed sign or the zero polynomial. Raises: ValueError: If no conclusion is reached after the maximum number of subdivisions.
[ "r", "Determine", "the", "sign", "of", "a", "polynomial", "on", "the", "reference", "triangle", "." ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L719-L791
train
54,086
dhermes/bezier
src/bezier/_surface_helpers.py
quadratic_jacobian_polynomial
def quadratic_jacobian_polynomial(nodes): r"""Compute the Jacobian determinant of a quadratic surface. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Converts :math:`\det(J(s, t))` to a polynomial on the reference triangle and represents it as a surface object. .. note:: This assumes that ``nodes`` is ``2 x 6`` but doesn't verify this. (However, the right multiplication by ``_QUADRATIC_JACOBIAN_HELPER`` would fail if ``nodes`` wasn't ``R x 6`` and then the ensuing determinants would fail if there weren't 2 rows.) Args: nodes (numpy.ndarray): A 2 x 6 array of nodes in a surface. Returns: numpy.ndarray: 1 x 6 array, coefficients in Bernstein basis. """ # First evaluate the Jacobian at each of the 6 nodes. jac_parts = _helpers.matrix_product(nodes, _QUADRATIC_JACOBIAN_HELPER) jac_at_nodes = np.empty((1, 6), order="F") jac_at_nodes[0, 0] = two_by_two_det(jac_parts[:, :2]) jac_at_nodes[0, 1] = two_by_two_det(jac_parts[:, 2:4]) jac_at_nodes[0, 2] = two_by_two_det(jac_parts[:, 4:6]) jac_at_nodes[0, 3] = two_by_two_det(jac_parts[:, 6:8]) jac_at_nodes[0, 4] = two_by_two_det(jac_parts[:, 8:10]) jac_at_nodes[0, 5] = two_by_two_det(jac_parts[:, 10:]) # Convert the nodal values to the Bernstein basis... bernstein = _helpers.matrix_product(jac_at_nodes, _QUADRATIC_TO_BERNSTEIN) return bernstein
python
def quadratic_jacobian_polynomial(nodes): r"""Compute the Jacobian determinant of a quadratic surface. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Converts :math:`\det(J(s, t))` to a polynomial on the reference triangle and represents it as a surface object. .. note:: This assumes that ``nodes`` is ``2 x 6`` but doesn't verify this. (However, the right multiplication by ``_QUADRATIC_JACOBIAN_HELPER`` would fail if ``nodes`` wasn't ``R x 6`` and then the ensuing determinants would fail if there weren't 2 rows.) Args: nodes (numpy.ndarray): A 2 x 6 array of nodes in a surface. Returns: numpy.ndarray: 1 x 6 array, coefficients in Bernstein basis. """ # First evaluate the Jacobian at each of the 6 nodes. jac_parts = _helpers.matrix_product(nodes, _QUADRATIC_JACOBIAN_HELPER) jac_at_nodes = np.empty((1, 6), order="F") jac_at_nodes[0, 0] = two_by_two_det(jac_parts[:, :2]) jac_at_nodes[0, 1] = two_by_two_det(jac_parts[:, 2:4]) jac_at_nodes[0, 2] = two_by_two_det(jac_parts[:, 4:6]) jac_at_nodes[0, 3] = two_by_two_det(jac_parts[:, 6:8]) jac_at_nodes[0, 4] = two_by_two_det(jac_parts[:, 8:10]) jac_at_nodes[0, 5] = two_by_two_det(jac_parts[:, 10:]) # Convert the nodal values to the Bernstein basis... bernstein = _helpers.matrix_product(jac_at_nodes, _QUADRATIC_TO_BERNSTEIN) return bernstein
[ "def", "quadratic_jacobian_polynomial", "(", "nodes", ")", ":", "# First evaluate the Jacobian at each of the 6 nodes.", "jac_parts", "=", "_helpers", ".", "matrix_product", "(", "nodes", ",", "_QUADRATIC_JACOBIAN_HELPER", ")", "jac_at_nodes", "=", "np", ".", "empty", "("...
r"""Compute the Jacobian determinant of a quadratic surface. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Converts :math:`\det(J(s, t))` to a polynomial on the reference triangle and represents it as a surface object. .. note:: This assumes that ``nodes`` is ``2 x 6`` but doesn't verify this. (However, the right multiplication by ``_QUADRATIC_JACOBIAN_HELPER`` would fail if ``nodes`` wasn't ``R x 6`` and then the ensuing determinants would fail if there weren't 2 rows.) Args: nodes (numpy.ndarray): A 2 x 6 array of nodes in a surface. Returns: numpy.ndarray: 1 x 6 array, coefficients in Bernstein basis.
[ "r", "Compute", "the", "Jacobian", "determinant", "of", "a", "quadratic", "surface", "." ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L815-L851
train
54,087
dhermes/bezier
src/bezier/_surface_helpers.py
cubic_jacobian_polynomial
def cubic_jacobian_polynomial(nodes): r"""Compute the Jacobian determinant of a cubic surface. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Converts :math:`\det(J(s, t))` to a polynomial on the reference triangle and represents it as a surface object. .. note:: This assumes that ``nodes`` is ``2 x 10`` but doesn't verify this. (However, the right multiplication by ``_CUBIC_JACOBIAN_HELPER`` would fail if ``nodes`` wasn't ``R x 10`` and then the ensuing determinants would fail if there weren't 2 rows.) Args: nodes (numpy.ndarray): A 2 x 10 array of nodes in a surface. Returns: numpy.ndarray: 1 x 15 array, coefficients in Bernstein basis. """ # First evaluate the Jacobian at each of the 15 nodes # in the quartic triangle. jac_parts = _helpers.matrix_product(nodes, _CUBIC_JACOBIAN_HELPER) jac_at_nodes = np.empty((1, 15), order="F") jac_at_nodes[0, 0] = two_by_two_det(jac_parts[:, :2]) jac_at_nodes[0, 1] = two_by_two_det(jac_parts[:, 2:4]) jac_at_nodes[0, 2] = two_by_two_det(jac_parts[:, 4:6]) jac_at_nodes[0, 3] = two_by_two_det(jac_parts[:, 6:8]) jac_at_nodes[0, 4] = two_by_two_det(jac_parts[:, 8:10]) jac_at_nodes[0, 5] = two_by_two_det(jac_parts[:, 10:12]) jac_at_nodes[0, 6] = two_by_two_det(jac_parts[:, 12:14]) jac_at_nodes[0, 7] = two_by_two_det(jac_parts[:, 14:16]) jac_at_nodes[0, 8] = two_by_two_det(jac_parts[:, 16:18]) jac_at_nodes[0, 9] = two_by_two_det(jac_parts[:, 18:20]) jac_at_nodes[0, 10] = two_by_two_det(jac_parts[:, 20:22]) jac_at_nodes[0, 11] = two_by_two_det(jac_parts[:, 22:24]) jac_at_nodes[0, 12] = two_by_two_det(jac_parts[:, 24:26]) jac_at_nodes[0, 13] = two_by_two_det(jac_parts[:, 26:28]) jac_at_nodes[0, 14] = two_by_two_det(jac_parts[:, 28:]) # Convert the nodal values to the Bernstein basis... bernstein = _helpers.matrix_product(jac_at_nodes, _QUARTIC_TO_BERNSTEIN) bernstein /= _QUARTIC_BERNSTEIN_FACTOR return bernstein
python
def cubic_jacobian_polynomial(nodes): r"""Compute the Jacobian determinant of a cubic surface. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Converts :math:`\det(J(s, t))` to a polynomial on the reference triangle and represents it as a surface object. .. note:: This assumes that ``nodes`` is ``2 x 10`` but doesn't verify this. (However, the right multiplication by ``_CUBIC_JACOBIAN_HELPER`` would fail if ``nodes`` wasn't ``R x 10`` and then the ensuing determinants would fail if there weren't 2 rows.) Args: nodes (numpy.ndarray): A 2 x 10 array of nodes in a surface. Returns: numpy.ndarray: 1 x 15 array, coefficients in Bernstein basis. """ # First evaluate the Jacobian at each of the 15 nodes # in the quartic triangle. jac_parts = _helpers.matrix_product(nodes, _CUBIC_JACOBIAN_HELPER) jac_at_nodes = np.empty((1, 15), order="F") jac_at_nodes[0, 0] = two_by_two_det(jac_parts[:, :2]) jac_at_nodes[0, 1] = two_by_two_det(jac_parts[:, 2:4]) jac_at_nodes[0, 2] = two_by_two_det(jac_parts[:, 4:6]) jac_at_nodes[0, 3] = two_by_two_det(jac_parts[:, 6:8]) jac_at_nodes[0, 4] = two_by_two_det(jac_parts[:, 8:10]) jac_at_nodes[0, 5] = two_by_two_det(jac_parts[:, 10:12]) jac_at_nodes[0, 6] = two_by_two_det(jac_parts[:, 12:14]) jac_at_nodes[0, 7] = two_by_two_det(jac_parts[:, 14:16]) jac_at_nodes[0, 8] = two_by_two_det(jac_parts[:, 16:18]) jac_at_nodes[0, 9] = two_by_two_det(jac_parts[:, 18:20]) jac_at_nodes[0, 10] = two_by_two_det(jac_parts[:, 20:22]) jac_at_nodes[0, 11] = two_by_two_det(jac_parts[:, 22:24]) jac_at_nodes[0, 12] = two_by_two_det(jac_parts[:, 24:26]) jac_at_nodes[0, 13] = two_by_two_det(jac_parts[:, 26:28]) jac_at_nodes[0, 14] = two_by_two_det(jac_parts[:, 28:]) # Convert the nodal values to the Bernstein basis... bernstein = _helpers.matrix_product(jac_at_nodes, _QUARTIC_TO_BERNSTEIN) bernstein /= _QUARTIC_BERNSTEIN_FACTOR return bernstein
[ "def", "cubic_jacobian_polynomial", "(", "nodes", ")", ":", "# First evaluate the Jacobian at each of the 15 nodes", "# in the quartic triangle.", "jac_parts", "=", "_helpers", ".", "matrix_product", "(", "nodes", ",", "_CUBIC_JACOBIAN_HELPER", ")", "jac_at_nodes", "=", "np",...
r"""Compute the Jacobian determinant of a cubic surface. .. note:: This is used **only** by :meth:`Surface._compute_valid` (which is in turn used to compute / cache the :attr:`Surface.is_valid` property). Converts :math:`\det(J(s, t))` to a polynomial on the reference triangle and represents it as a surface object. .. note:: This assumes that ``nodes`` is ``2 x 10`` but doesn't verify this. (However, the right multiplication by ``_CUBIC_JACOBIAN_HELPER`` would fail if ``nodes`` wasn't ``R x 10`` and then the ensuing determinants would fail if there weren't 2 rows.) Args: nodes (numpy.ndarray): A 2 x 10 array of nodes in a surface. Returns: numpy.ndarray: 1 x 15 array, coefficients in Bernstein basis.
[ "r", "Compute", "the", "Jacobian", "determinant", "of", "a", "cubic", "surface", "." ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L854-L901
train
54,088
dhermes/bezier
src/bezier/_surface_helpers.py
_de_casteljau_one_round
def _de_casteljau_one_round(nodes, degree, lambda1, lambda2, lambda3): r"""Performs one "round" of the de Casteljau algorithm for surfaces. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. .. note:: This is a helper function, used by :func:`make_transform` and :func:`_specialize_surface` (and :func:`make_transform` is **only** used by :func:`_specialize_surface`). Converts the ``nodes`` into a basis for a surface one degree smaller by using the barycentric weights: .. math:: q_{i, j, k} = \lambda_1 \cdot p_{i + 1, j, k} + \lambda_2 \cdot p_{i, j + 1, k} + \lambda_2 \cdot p_{i, j, k + 1} .. note: For degree :math:`d`, the number of nodes should be :math:`(d + 1)(d + 2)/2`, but we don't verify this. Args: nodes (numpy.ndarray): The nodes to reduce. degree (int): The degree of the surface. lambda1 (float): Parameter along the reference triangle. lambda2 (float): Parameter along the reference triangle. lambda3 (float): Parameter along the reference triangle. Returns: numpy.ndarray: The converted nodes. """ dimension, num_nodes = nodes.shape num_new_nodes = num_nodes - degree - 1 new_nodes = np.empty((dimension, num_new_nodes), order="F") index = 0 # parent_i1 = index + k # parent_i2 = index + k + 1 # parent_i3 = index + degree + 1 parent_i1 = 0 parent_i2 = 1 parent_i3 = degree + 1 for k in six.moves.xrange(degree): for unused_j in six.moves.xrange(degree - k): # NOTE: i = (degree - 1) - j - k new_nodes[:, index] = ( lambda1 * nodes[:, parent_i1] + lambda2 * nodes[:, parent_i2] + lambda3 * nodes[:, parent_i3] ) # Update all the indices. parent_i1 += 1 parent_i2 += 1 parent_i3 += 1 index += 1 # Update the indices that depend on k. parent_i1 += 1 parent_i2 += 1 return new_nodes
python
def _de_casteljau_one_round(nodes, degree, lambda1, lambda2, lambda3): r"""Performs one "round" of the de Casteljau algorithm for surfaces. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. .. note:: This is a helper function, used by :func:`make_transform` and :func:`_specialize_surface` (and :func:`make_transform` is **only** used by :func:`_specialize_surface`). Converts the ``nodes`` into a basis for a surface one degree smaller by using the barycentric weights: .. math:: q_{i, j, k} = \lambda_1 \cdot p_{i + 1, j, k} + \lambda_2 \cdot p_{i, j + 1, k} + \lambda_2 \cdot p_{i, j, k + 1} .. note: For degree :math:`d`, the number of nodes should be :math:`(d + 1)(d + 2)/2`, but we don't verify this. Args: nodes (numpy.ndarray): The nodes to reduce. degree (int): The degree of the surface. lambda1 (float): Parameter along the reference triangle. lambda2 (float): Parameter along the reference triangle. lambda3 (float): Parameter along the reference triangle. Returns: numpy.ndarray: The converted nodes. """ dimension, num_nodes = nodes.shape num_new_nodes = num_nodes - degree - 1 new_nodes = np.empty((dimension, num_new_nodes), order="F") index = 0 # parent_i1 = index + k # parent_i2 = index + k + 1 # parent_i3 = index + degree + 1 parent_i1 = 0 parent_i2 = 1 parent_i3 = degree + 1 for k in six.moves.xrange(degree): for unused_j in six.moves.xrange(degree - k): # NOTE: i = (degree - 1) - j - k new_nodes[:, index] = ( lambda1 * nodes[:, parent_i1] + lambda2 * nodes[:, parent_i2] + lambda3 * nodes[:, parent_i3] ) # Update all the indices. parent_i1 += 1 parent_i2 += 1 parent_i3 += 1 index += 1 # Update the indices that depend on k. parent_i1 += 1 parent_i2 += 1 return new_nodes
[ "def", "_de_casteljau_one_round", "(", "nodes", ",", "degree", ",", "lambda1", ",", "lambda2", ",", "lambda3", ")", ":", "dimension", ",", "num_nodes", "=", "nodes", ".", "shape", "num_new_nodes", "=", "num_nodes", "-", "degree", "-", "1", "new_nodes", "=", ...
r"""Performs one "round" of the de Casteljau algorithm for surfaces. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. .. note:: This is a helper function, used by :func:`make_transform` and :func:`_specialize_surface` (and :func:`make_transform` is **only** used by :func:`_specialize_surface`). Converts the ``nodes`` into a basis for a surface one degree smaller by using the barycentric weights: .. math:: q_{i, j, k} = \lambda_1 \cdot p_{i + 1, j, k} + \lambda_2 \cdot p_{i, j + 1, k} + \lambda_2 \cdot p_{i, j, k + 1} .. note: For degree :math:`d`, the number of nodes should be :math:`(d + 1)(d + 2)/2`, but we don't verify this. Args: nodes (numpy.ndarray): The nodes to reduce. degree (int): The degree of the surface. lambda1 (float): Parameter along the reference triangle. lambda2 (float): Parameter along the reference triangle. lambda3 (float): Parameter along the reference triangle. Returns: numpy.ndarray: The converted nodes.
[ "r", "Performs", "one", "round", "of", "the", "de", "Casteljau", "algorithm", "for", "surfaces", "." ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L904-L967
train
54,089
dhermes/bezier
src/bezier/_surface_helpers.py
make_transform
def make_transform(degree, weights_a, weights_b, weights_c): """Compute matrices corresponding to the de Casteljau algorithm. .. note:: This is a helper used only by :func:`_specialize_surface`. Applies the de Casteljau to the identity matrix, thus effectively caching the algorithm in a transformation matrix. .. note:: This is premature optimization. It's unclear if the time saved from "caching" one round of de Casteljau is cancelled out by the extra storage required for the 3 matrices. Args: degree (int): The degree of a candidate surface. weights_a (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle weights_b (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle weights_c (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle Returns: Mapping[int, numpy.ndarray]: Mapping from keys to the de Casteljau transformation mappings. The keys are ``0`` corresponding to ``weights_a``, ``1`` to ``weights_b`` and ``2`` to ``weights_c``. """ num_nodes = ((degree + 1) * (degree + 2)) // 2 id_mat = np.eye(num_nodes, order="F") # Pre-compute the matrices that do the reduction so we don't # have to **actually** perform the de Casteljau algorithm # every time. transform = { 0: de_casteljau_one_round(id_mat, degree, *weights_a), 1: de_casteljau_one_round(id_mat, degree, *weights_b), 2: de_casteljau_one_round(id_mat, degree, *weights_c), } return transform
python
def make_transform(degree, weights_a, weights_b, weights_c): """Compute matrices corresponding to the de Casteljau algorithm. .. note:: This is a helper used only by :func:`_specialize_surface`. Applies the de Casteljau to the identity matrix, thus effectively caching the algorithm in a transformation matrix. .. note:: This is premature optimization. It's unclear if the time saved from "caching" one round of de Casteljau is cancelled out by the extra storage required for the 3 matrices. Args: degree (int): The degree of a candidate surface. weights_a (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle weights_b (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle weights_c (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle Returns: Mapping[int, numpy.ndarray]: Mapping from keys to the de Casteljau transformation mappings. The keys are ``0`` corresponding to ``weights_a``, ``1`` to ``weights_b`` and ``2`` to ``weights_c``. """ num_nodes = ((degree + 1) * (degree + 2)) // 2 id_mat = np.eye(num_nodes, order="F") # Pre-compute the matrices that do the reduction so we don't # have to **actually** perform the de Casteljau algorithm # every time. transform = { 0: de_casteljau_one_round(id_mat, degree, *weights_a), 1: de_casteljau_one_round(id_mat, degree, *weights_b), 2: de_casteljau_one_round(id_mat, degree, *weights_c), } return transform
[ "def", "make_transform", "(", "degree", ",", "weights_a", ",", "weights_b", ",", "weights_c", ")", ":", "num_nodes", "=", "(", "(", "degree", "+", "1", ")", "*", "(", "degree", "+", "2", ")", ")", "//", "2", "id_mat", "=", "np", ".", "eye", "(", ...
Compute matrices corresponding to the de Casteljau algorithm. .. note:: This is a helper used only by :func:`_specialize_surface`. Applies the de Casteljau to the identity matrix, thus effectively caching the algorithm in a transformation matrix. .. note:: This is premature optimization. It's unclear if the time saved from "caching" one round of de Casteljau is cancelled out by the extra storage required for the 3 matrices. Args: degree (int): The degree of a candidate surface. weights_a (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle weights_b (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle weights_c (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle Returns: Mapping[int, numpy.ndarray]: Mapping from keys to the de Casteljau transformation mappings. The keys are ``0`` corresponding to ``weights_a``, ``1`` to ``weights_b`` and ``2`` to ``weights_c``.
[ "Compute", "matrices", "corresponding", "to", "the", "de", "Casteljau", "algorithm", "." ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L970-L1010
train
54,090
dhermes/bezier
src/bezier/_surface_helpers.py
reduced_to_matrix
def reduced_to_matrix(shape, degree, vals_by_weight): r"""Converts a reduced values dictionary into a matrix. .. note:: This is a helper used only by :func:`_specialize_surface`. The ``vals_by_weight`` mapping has keys of the form: ``(0, ..., 1, ..., 2, ...)`` where the ``0`` corresponds to the number of times the first set of barycentric weights was used in the reduction process, and similarly for ``1`` and ``2``. These points correspond to barycentric weights in their own right. For example ``(0, 0, 0, 1, 2, 2)`` corresponds to the barycentric weight :math:`\left(\frac{3}{6}, \frac{1}{6}, \frac{2}{6}\right)`. Once the keys in ``vals_by_weight`` have been converted to barycentric coordinates, we order them according to our rule (bottom to top, left to right) and then return them in a single matrix. Args: shape (tuple): The shape of the result matrix. degree (int): The degree of the surface. vals_by_weight (Mapping[tuple, numpy.ndarray]): Dictionary of reduced nodes according to blending of each of the three sets of weights in a reduction. Returns: numpy.ndarray: The newly created reduced control points. """ result = np.empty(shape, order="F") index = 0 for k in six.moves.xrange(degree + 1): for j in six.moves.xrange(degree + 1 - k): i = degree - j - k key = (0,) * i + (1,) * j + (2,) * k result[:, index] = vals_by_weight[key][:, 0] index += 1 return result
python
def reduced_to_matrix(shape, degree, vals_by_weight): r"""Converts a reduced values dictionary into a matrix. .. note:: This is a helper used only by :func:`_specialize_surface`. The ``vals_by_weight`` mapping has keys of the form: ``(0, ..., 1, ..., 2, ...)`` where the ``0`` corresponds to the number of times the first set of barycentric weights was used in the reduction process, and similarly for ``1`` and ``2``. These points correspond to barycentric weights in their own right. For example ``(0, 0, 0, 1, 2, 2)`` corresponds to the barycentric weight :math:`\left(\frac{3}{6}, \frac{1}{6}, \frac{2}{6}\right)`. Once the keys in ``vals_by_weight`` have been converted to barycentric coordinates, we order them according to our rule (bottom to top, left to right) and then return them in a single matrix. Args: shape (tuple): The shape of the result matrix. degree (int): The degree of the surface. vals_by_weight (Mapping[tuple, numpy.ndarray]): Dictionary of reduced nodes according to blending of each of the three sets of weights in a reduction. Returns: numpy.ndarray: The newly created reduced control points. """ result = np.empty(shape, order="F") index = 0 for k in six.moves.xrange(degree + 1): for j in six.moves.xrange(degree + 1 - k): i = degree - j - k key = (0,) * i + (1,) * j + (2,) * k result[:, index] = vals_by_weight[key][:, 0] index += 1 return result
[ "def", "reduced_to_matrix", "(", "shape", ",", "degree", ",", "vals_by_weight", ")", ":", "result", "=", "np", ".", "empty", "(", "shape", ",", "order", "=", "\"F\"", ")", "index", "=", "0", "for", "k", "in", "six", ".", "moves", ".", "xrange", "(", ...
r"""Converts a reduced values dictionary into a matrix. .. note:: This is a helper used only by :func:`_specialize_surface`. The ``vals_by_weight`` mapping has keys of the form: ``(0, ..., 1, ..., 2, ...)`` where the ``0`` corresponds to the number of times the first set of barycentric weights was used in the reduction process, and similarly for ``1`` and ``2``. These points correspond to barycentric weights in their own right. For example ``(0, 0, 0, 1, 2, 2)`` corresponds to the barycentric weight :math:`\left(\frac{3}{6}, \frac{1}{6}, \frac{2}{6}\right)`. Once the keys in ``vals_by_weight`` have been converted to barycentric coordinates, we order them according to our rule (bottom to top, left to right) and then return them in a single matrix. Args: shape (tuple): The shape of the result matrix. degree (int): The degree of the surface. vals_by_weight (Mapping[tuple, numpy.ndarray]): Dictionary of reduced nodes according to blending of each of the three sets of weights in a reduction. Returns: numpy.ndarray: The newly created reduced control points.
[ "r", "Converts", "a", "reduced", "values", "dictionary", "into", "a", "matrix", "." ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L1013-L1054
train
54,091
dhermes/bezier
src/bezier/_surface_helpers.py
_specialize_surface
def _specialize_surface(nodes, degree, weights_a, weights_b, weights_c): """Specialize a surface to a reparameterization .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Does so by taking three points (in barycentric form) within the reference triangle and then reparameterizing the surface onto the triangle formed by those three points. .. note:: This assumes the surface is degree 1 or greater but doesn't check. .. note:: This is used **only** as a helper for :func:`_subdivide_nodes`, however it may be worth adding this to :class:`Surface` as an analogue to :meth:`Curve.specialize`. Args: nodes (numpy.ndarray): Control points for a surface. degree (int): The degree of the surface. weights_a (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle weights_b (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle weights_c (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle Returns: numpy.ndarray: The control points for the specialized surface. """ # Uses A-->0, B-->1, C-->2 to represent the specialization used. partial_vals = { (0,): de_casteljau_one_round(nodes, degree, *weights_a), (1,): de_casteljau_one_round(nodes, degree, *weights_b), (2,): de_casteljau_one_round(nodes, degree, *weights_c), } for reduced_deg in six.moves.xrange(degree - 1, 0, -1): new_partial = {} transform = make_transform( reduced_deg, weights_a, weights_b, weights_c ) for key, sub_nodes in six.iteritems(partial_vals): # Our keys are ascending so we increment from the last value. for next_id in six.moves.xrange(key[-1], 2 + 1): new_key = key + (next_id,) new_partial[new_key] = _helpers.matrix_product( sub_nodes, transform[next_id] ) partial_vals = new_partial return reduced_to_matrix(nodes.shape, degree, partial_vals)
python
def _specialize_surface(nodes, degree, weights_a, weights_b, weights_c): """Specialize a surface to a reparameterization .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Does so by taking three points (in barycentric form) within the reference triangle and then reparameterizing the surface onto the triangle formed by those three points. .. note:: This assumes the surface is degree 1 or greater but doesn't check. .. note:: This is used **only** as a helper for :func:`_subdivide_nodes`, however it may be worth adding this to :class:`Surface` as an analogue to :meth:`Curve.specialize`. Args: nodes (numpy.ndarray): Control points for a surface. degree (int): The degree of the surface. weights_a (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle weights_b (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle weights_c (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle Returns: numpy.ndarray: The control points for the specialized surface. """ # Uses A-->0, B-->1, C-->2 to represent the specialization used. partial_vals = { (0,): de_casteljau_one_round(nodes, degree, *weights_a), (1,): de_casteljau_one_round(nodes, degree, *weights_b), (2,): de_casteljau_one_round(nodes, degree, *weights_c), } for reduced_deg in six.moves.xrange(degree - 1, 0, -1): new_partial = {} transform = make_transform( reduced_deg, weights_a, weights_b, weights_c ) for key, sub_nodes in six.iteritems(partial_vals): # Our keys are ascending so we increment from the last value. for next_id in six.moves.xrange(key[-1], 2 + 1): new_key = key + (next_id,) new_partial[new_key] = _helpers.matrix_product( sub_nodes, transform[next_id] ) partial_vals = new_partial return reduced_to_matrix(nodes.shape, degree, partial_vals)
[ "def", "_specialize_surface", "(", "nodes", ",", "degree", ",", "weights_a", ",", "weights_b", ",", "weights_c", ")", ":", "# Uses A-->0, B-->1, C-->2 to represent the specialization used.", "partial_vals", "=", "{", "(", "0", ",", ")", ":", "de_casteljau_one_round", ...
Specialize a surface to a reparameterization .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Does so by taking three points (in barycentric form) within the reference triangle and then reparameterizing the surface onto the triangle formed by those three points. .. note:: This assumes the surface is degree 1 or greater but doesn't check. .. note:: This is used **only** as a helper for :func:`_subdivide_nodes`, however it may be worth adding this to :class:`Surface` as an analogue to :meth:`Curve.specialize`. Args: nodes (numpy.ndarray): Control points for a surface. degree (int): The degree of the surface. weights_a (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle weights_b (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle weights_c (numpy.ndarray): Triple (1D array) of barycentric weights for a point in the reference triangle Returns: numpy.ndarray: The control points for the specialized surface.
[ "Specialize", "a", "surface", "to", "a", "reparameterization" ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L1057-L1111
train
54,092
dhermes/bezier
src/bezier/_surface_helpers.py
_subdivide_nodes
def _subdivide_nodes(nodes, degree): """Subdivide a surface into four sub-surfaces. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Does so by taking the unit triangle (i.e. the domain of the surface) and splitting it into four sub-triangles by connecting the midpoints of each side. Args: nodes (numpy.ndarray): Control points for a surface. degree (int): The degree of the surface. Returns: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray]: The nodes for the four sub-surfaces. """ if degree == 1: nodes_a = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_A) nodes_b = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_B) nodes_c = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_C) nodes_d = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_D) elif degree == 2: nodes_a = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_A) nodes_b = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_B) nodes_c = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_C) nodes_d = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_D) elif degree == 3: nodes_a = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_A) nodes_b = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_B) nodes_c = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_C) nodes_d = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_D) elif degree == 4: nodes_a = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_A) nodes_b = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_B) nodes_c = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_C) nodes_d = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_D) else: nodes_a = specialize_surface( nodes, degree, _WEIGHTS_SUBDIVIDE0, _WEIGHTS_SUBDIVIDE1, _WEIGHTS_SUBDIVIDE2, ) nodes_b = specialize_surface( nodes, degree, _WEIGHTS_SUBDIVIDE3, _WEIGHTS_SUBDIVIDE2, _WEIGHTS_SUBDIVIDE1, ) nodes_c = specialize_surface( nodes, degree, _WEIGHTS_SUBDIVIDE1, _WEIGHTS_SUBDIVIDE4, _WEIGHTS_SUBDIVIDE3, ) nodes_d = specialize_surface( nodes, degree, _WEIGHTS_SUBDIVIDE2, _WEIGHTS_SUBDIVIDE3, _WEIGHTS_SUBDIVIDE5, ) return nodes_a, nodes_b, nodes_c, nodes_d
python
def _subdivide_nodes(nodes, degree): """Subdivide a surface into four sub-surfaces. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Does so by taking the unit triangle (i.e. the domain of the surface) and splitting it into four sub-triangles by connecting the midpoints of each side. Args: nodes (numpy.ndarray): Control points for a surface. degree (int): The degree of the surface. Returns: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray]: The nodes for the four sub-surfaces. """ if degree == 1: nodes_a = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_A) nodes_b = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_B) nodes_c = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_C) nodes_d = _helpers.matrix_product(nodes, LINEAR_SUBDIVIDE_D) elif degree == 2: nodes_a = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_A) nodes_b = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_B) nodes_c = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_C) nodes_d = _helpers.matrix_product(nodes, QUADRATIC_SUBDIVIDE_D) elif degree == 3: nodes_a = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_A) nodes_b = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_B) nodes_c = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_C) nodes_d = _helpers.matrix_product(nodes, CUBIC_SUBDIVIDE_D) elif degree == 4: nodes_a = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_A) nodes_b = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_B) nodes_c = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_C) nodes_d = _helpers.matrix_product(nodes, QUARTIC_SUBDIVIDE_D) else: nodes_a = specialize_surface( nodes, degree, _WEIGHTS_SUBDIVIDE0, _WEIGHTS_SUBDIVIDE1, _WEIGHTS_SUBDIVIDE2, ) nodes_b = specialize_surface( nodes, degree, _WEIGHTS_SUBDIVIDE3, _WEIGHTS_SUBDIVIDE2, _WEIGHTS_SUBDIVIDE1, ) nodes_c = specialize_surface( nodes, degree, _WEIGHTS_SUBDIVIDE1, _WEIGHTS_SUBDIVIDE4, _WEIGHTS_SUBDIVIDE3, ) nodes_d = specialize_surface( nodes, degree, _WEIGHTS_SUBDIVIDE2, _WEIGHTS_SUBDIVIDE3, _WEIGHTS_SUBDIVIDE5, ) return nodes_a, nodes_b, nodes_c, nodes_d
[ "def", "_subdivide_nodes", "(", "nodes", ",", "degree", ")", ":", "if", "degree", "==", "1", ":", "nodes_a", "=", "_helpers", ".", "matrix_product", "(", "nodes", ",", "LINEAR_SUBDIVIDE_A", ")", "nodes_b", "=", "_helpers", ".", "matrix_product", "(", "nodes"...
Subdivide a surface into four sub-surfaces. .. note:: There is also a Fortran implementation of this function, which will be used if it can be built. Does so by taking the unit triangle (i.e. the domain of the surface) and splitting it into four sub-triangles by connecting the midpoints of each side. Args: nodes (numpy.ndarray): Control points for a surface. degree (int): The degree of the surface. Returns: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray]: The nodes for the four sub-surfaces.
[ "Subdivide", "a", "surface", "into", "four", "sub", "-", "surfaces", "." ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L1114-L1183
train
54,093
dhermes/bezier
src/bezier/_surface_helpers.py
ignored_double_corner
def ignored_double_corner( intersection, tangent_s, tangent_t, edge_nodes1, edge_nodes2 ): """Check if an intersection is an "ignored" double corner. .. note:: This is a helper used only by :func:`ignored_corner`, which in turn is only used by :func:`classify_intersection`. Helper for :func:`ignored_corner` where both ``s`` and ``t`` are ``0``. Does so by checking if either edge through the ``t`` corner goes through the interior of the other surface. An interior check is done by checking that a few cross products are positive. Args: intersection (.Intersection): An intersection to "diagnose". tangent_s (numpy.ndarray): The tangent vector (``2 x 1`` array) to the first curve at the intersection. tangent_t (numpy.ndarray): The tangent vector (``2 x 1`` array) to the second curve at the intersection. edge_nodes1 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the first surface being intersected. edge_nodes2 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the second surface being intersected. Returns: bool: Indicates if the corner is to be ignored. """ # Compute the other edge for the ``s`` surface. prev_index = (intersection.index_first - 1) % 3 prev_edge = edge_nodes1[prev_index] alt_tangent_s = _curve_helpers.evaluate_hodograph(1.0, prev_edge) # First check if ``tangent_t`` is interior to the ``s`` surface. cross_prod1 = _helpers.cross_product( tangent_s.ravel(order="F"), tangent_t.ravel(order="F") ) # A positive cross product indicates that ``tangent_t`` is # interior to ``tangent_s``. Similar for ``alt_tangent_s``. # If ``tangent_t`` is interior to both, then the surfaces # do more than just "kiss" at the corner, so the corner should # not be ignored. if cross_prod1 >= 0.0: # Only compute ``cross_prod2`` if we need to. cross_prod2 = _helpers.cross_product( alt_tangent_s.ravel(order="F"), tangent_t.ravel(order="F") ) if cross_prod2 >= 0.0: return False # If ``tangent_t`` is not interior, we check the other ``t`` # edge that ends at the corner. prev_index = (intersection.index_second - 1) % 3 prev_edge = edge_nodes2[prev_index] alt_tangent_t = _curve_helpers.evaluate_hodograph(1.0, prev_edge) # Change the direction of the "in" tangent so that it points "out". alt_tangent_t *= -1.0 cross_prod3 = _helpers.cross_product( tangent_s.ravel(order="F"), alt_tangent_t.ravel(order="F") ) if cross_prod3 >= 0.0: # Only compute ``cross_prod4`` if we need to. cross_prod4 = _helpers.cross_product( alt_tangent_s.ravel(order="F"), alt_tangent_t.ravel(order="F") ) if cross_prod4 >= 0.0: return False # If neither of ``tangent_t`` or ``alt_tangent_t`` are interior # to the ``s`` surface, one of two things is true. Either # the two surfaces have no interior intersection (1) or the # ``s`` surface is bounded by both edges of the ``t`` surface # at the corner intersection (2). To detect (2), we only need # check if ``tangent_s`` is interior to both ``tangent_t`` # and ``alt_tangent_t``. ``cross_prod1`` contains # (tangent_s) x (tangent_t), so it's negative will tell if # ``tangent_s`` is interior. Similarly, ``cross_prod3`` # contains (tangent_s) x (alt_tangent_t), but we also reversed # the sign on ``alt_tangent_t`` so switching the sign back # and reversing the arguments in the cross product cancel out. return cross_prod1 > 0.0 or cross_prod3 < 0.0
python
def ignored_double_corner( intersection, tangent_s, tangent_t, edge_nodes1, edge_nodes2 ): """Check if an intersection is an "ignored" double corner. .. note:: This is a helper used only by :func:`ignored_corner`, which in turn is only used by :func:`classify_intersection`. Helper for :func:`ignored_corner` where both ``s`` and ``t`` are ``0``. Does so by checking if either edge through the ``t`` corner goes through the interior of the other surface. An interior check is done by checking that a few cross products are positive. Args: intersection (.Intersection): An intersection to "diagnose". tangent_s (numpy.ndarray): The tangent vector (``2 x 1`` array) to the first curve at the intersection. tangent_t (numpy.ndarray): The tangent vector (``2 x 1`` array) to the second curve at the intersection. edge_nodes1 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the first surface being intersected. edge_nodes2 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the second surface being intersected. Returns: bool: Indicates if the corner is to be ignored. """ # Compute the other edge for the ``s`` surface. prev_index = (intersection.index_first - 1) % 3 prev_edge = edge_nodes1[prev_index] alt_tangent_s = _curve_helpers.evaluate_hodograph(1.0, prev_edge) # First check if ``tangent_t`` is interior to the ``s`` surface. cross_prod1 = _helpers.cross_product( tangent_s.ravel(order="F"), tangent_t.ravel(order="F") ) # A positive cross product indicates that ``tangent_t`` is # interior to ``tangent_s``. Similar for ``alt_tangent_s``. # If ``tangent_t`` is interior to both, then the surfaces # do more than just "kiss" at the corner, so the corner should # not be ignored. if cross_prod1 >= 0.0: # Only compute ``cross_prod2`` if we need to. cross_prod2 = _helpers.cross_product( alt_tangent_s.ravel(order="F"), tangent_t.ravel(order="F") ) if cross_prod2 >= 0.0: return False # If ``tangent_t`` is not interior, we check the other ``t`` # edge that ends at the corner. prev_index = (intersection.index_second - 1) % 3 prev_edge = edge_nodes2[prev_index] alt_tangent_t = _curve_helpers.evaluate_hodograph(1.0, prev_edge) # Change the direction of the "in" tangent so that it points "out". alt_tangent_t *= -1.0 cross_prod3 = _helpers.cross_product( tangent_s.ravel(order="F"), alt_tangent_t.ravel(order="F") ) if cross_prod3 >= 0.0: # Only compute ``cross_prod4`` if we need to. cross_prod4 = _helpers.cross_product( alt_tangent_s.ravel(order="F"), alt_tangent_t.ravel(order="F") ) if cross_prod4 >= 0.0: return False # If neither of ``tangent_t`` or ``alt_tangent_t`` are interior # to the ``s`` surface, one of two things is true. Either # the two surfaces have no interior intersection (1) or the # ``s`` surface is bounded by both edges of the ``t`` surface # at the corner intersection (2). To detect (2), we only need # check if ``tangent_s`` is interior to both ``tangent_t`` # and ``alt_tangent_t``. ``cross_prod1`` contains # (tangent_s) x (tangent_t), so it's negative will tell if # ``tangent_s`` is interior. Similarly, ``cross_prod3`` # contains (tangent_s) x (alt_tangent_t), but we also reversed # the sign on ``alt_tangent_t`` so switching the sign back # and reversing the arguments in the cross product cancel out. return cross_prod1 > 0.0 or cross_prod3 < 0.0
[ "def", "ignored_double_corner", "(", "intersection", ",", "tangent_s", ",", "tangent_t", ",", "edge_nodes1", ",", "edge_nodes2", ")", ":", "# Compute the other edge for the ``s`` surface.", "prev_index", "=", "(", "intersection", ".", "index_first", "-", "1", ")", "%"...
Check if an intersection is an "ignored" double corner. .. note:: This is a helper used only by :func:`ignored_corner`, which in turn is only used by :func:`classify_intersection`. Helper for :func:`ignored_corner` where both ``s`` and ``t`` are ``0``. Does so by checking if either edge through the ``t`` corner goes through the interior of the other surface. An interior check is done by checking that a few cross products are positive. Args: intersection (.Intersection): An intersection to "diagnose". tangent_s (numpy.ndarray): The tangent vector (``2 x 1`` array) to the first curve at the intersection. tangent_t (numpy.ndarray): The tangent vector (``2 x 1`` array) to the second curve at the intersection. edge_nodes1 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the first surface being intersected. edge_nodes2 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the second surface being intersected. Returns: bool: Indicates if the corner is to be ignored.
[ "Check", "if", "an", "intersection", "is", "an", "ignored", "double", "corner", "." ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L1476-L1558
train
54,094
dhermes/bezier
src/bezier/_surface_helpers.py
ignored_corner
def ignored_corner( intersection, tangent_s, tangent_t, edge_nodes1, edge_nodes2 ): """Check if an intersection is an "ignored" corner. .. note:: This is a helper used only by :func:`classify_intersection`. An "ignored" corner is one where the surfaces just "kiss" at the point of intersection but their interiors do not meet. We can determine this by comparing the tangent lines from the point of intersection. .. note:: This assumes the ``intersection`` has been shifted to the beginning of a curve so only checks if ``s == 0.0`` or ``t == 0.0`` (rather than also checking for ``1.0``). Args: intersection (.Intersection): An intersection to "diagnose". tangent_s (numpy.ndarray): The tangent vector (``2 x 1`` array) to the first curve at the intersection. tangent_t (numpy.ndarray): The tangent vector (``2 x 1`` array) to the second curve at the intersection. edge_nodes1 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the first surface being intersected. edge_nodes2 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the second surface being intersected. Returns: bool: Indicates if the corner is to be ignored. """ if intersection.s == 0.0: if intersection.t == 0.0: # Double corner. return ignored_double_corner( intersection, tangent_s, tangent_t, edge_nodes1, edge_nodes2 ) else: # s-only corner. prev_index = (intersection.index_first - 1) % 3 prev_edge = edge_nodes1[prev_index] return ignored_edge_corner(tangent_t, tangent_s, prev_edge) elif intersection.t == 0.0: # t-only corner. prev_index = (intersection.index_second - 1) % 3 prev_edge = edge_nodes2[prev_index] return ignored_edge_corner(tangent_s, tangent_t, prev_edge) else: # Not a corner. return False
python
def ignored_corner( intersection, tangent_s, tangent_t, edge_nodes1, edge_nodes2 ): """Check if an intersection is an "ignored" corner. .. note:: This is a helper used only by :func:`classify_intersection`. An "ignored" corner is one where the surfaces just "kiss" at the point of intersection but their interiors do not meet. We can determine this by comparing the tangent lines from the point of intersection. .. note:: This assumes the ``intersection`` has been shifted to the beginning of a curve so only checks if ``s == 0.0`` or ``t == 0.0`` (rather than also checking for ``1.0``). Args: intersection (.Intersection): An intersection to "diagnose". tangent_s (numpy.ndarray): The tangent vector (``2 x 1`` array) to the first curve at the intersection. tangent_t (numpy.ndarray): The tangent vector (``2 x 1`` array) to the second curve at the intersection. edge_nodes1 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the first surface being intersected. edge_nodes2 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the second surface being intersected. Returns: bool: Indicates if the corner is to be ignored. """ if intersection.s == 0.0: if intersection.t == 0.0: # Double corner. return ignored_double_corner( intersection, tangent_s, tangent_t, edge_nodes1, edge_nodes2 ) else: # s-only corner. prev_index = (intersection.index_first - 1) % 3 prev_edge = edge_nodes1[prev_index] return ignored_edge_corner(tangent_t, tangent_s, prev_edge) elif intersection.t == 0.0: # t-only corner. prev_index = (intersection.index_second - 1) % 3 prev_edge = edge_nodes2[prev_index] return ignored_edge_corner(tangent_s, tangent_t, prev_edge) else: # Not a corner. return False
[ "def", "ignored_corner", "(", "intersection", ",", "tangent_s", ",", "tangent_t", ",", "edge_nodes1", ",", "edge_nodes2", ")", ":", "if", "intersection", ".", "s", "==", "0.0", ":", "if", "intersection", ".", "t", "==", "0.0", ":", "# Double corner.", "retur...
Check if an intersection is an "ignored" corner. .. note:: This is a helper used only by :func:`classify_intersection`. An "ignored" corner is one where the surfaces just "kiss" at the point of intersection but their interiors do not meet. We can determine this by comparing the tangent lines from the point of intersection. .. note:: This assumes the ``intersection`` has been shifted to the beginning of a curve so only checks if ``s == 0.0`` or ``t == 0.0`` (rather than also checking for ``1.0``). Args: intersection (.Intersection): An intersection to "diagnose". tangent_s (numpy.ndarray): The tangent vector (``2 x 1`` array) to the first curve at the intersection. tangent_t (numpy.ndarray): The tangent vector (``2 x 1`` array) to the second curve at the intersection. edge_nodes1 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the first surface being intersected. edge_nodes2 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the second surface being intersected. Returns: bool: Indicates if the corner is to be ignored.
[ "Check", "if", "an", "intersection", "is", "an", "ignored", "corner", "." ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L1561-L1617
train
54,095
dhermes/bezier
src/bezier/_surface_helpers.py
classify_intersection
def classify_intersection(intersection, edge_nodes1, edge_nodes2): r"""Determine which curve is on the "inside of the intersection". .. note:: This is a helper used only by :meth:`.Surface.intersect`. This is intended to be a helper for forming a :class:`.CurvedPolygon` from the edge intersections of two :class:`.Surface`-s. In order to move from one intersection to another (or to the end of an edge), the interior edge must be determined at the point of intersection. The "typical" case is on the interior of both edges: .. image:: ../images/classify_intersection1.png :align: center .. testsetup:: classify-intersection1, classify-intersection2, classify-intersection3, classify-intersection4, classify-intersection5, classify-intersection6, classify-intersection7, classify-intersection8, classify-intersection9 import numpy as np import bezier from bezier import _curve_helpers from bezier._intersection_helpers import Intersection from bezier._surface_helpers import classify_intersection def hodograph(curve, s): return _curve_helpers.evaluate_hodograph( s, curve._nodes) def curvature(curve, s): nodes = curve._nodes tangent = _curve_helpers.evaluate_hodograph( s, nodes) return _curve_helpers.get_curvature( nodes, tangent, s) .. doctest:: classify-intersection1 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [1.0, 1.75, 2.0], ... [0.0, 0.25, 1.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0, 1.6875, 2.0], ... [0.0, 0.0625, 0.5], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.25, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> tangent1 = hodograph(curve1, s) >>> tangent1 array([[1.25], [0.75]]) >>> tangent2 = hodograph(curve2, t) >>> tangent2 array([[2. ], [0.5]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.FIRST: 0> .. testcleanup:: classify-intersection1 import make_images make_images.classify_intersection1( s, curve1, tangent1, curve2, tangent2) We determine the interior (i.e. left) one by using the `right-hand rule`_: by embedding the tangent vectors in :math:`\mathbf{R}^3`, we compute .. _right-hand rule: https://en.wikipedia.org/wiki/Right-hand_rule .. math:: \left[\begin{array}{c} x_1'(s) \\ y_1'(s) \\ 0 \end{array}\right] \times \left[\begin{array}{c} x_2'(t) \\ y_2'(t) \\ 0 \end{array}\right] = \left[\begin{array}{c} 0 \\ 0 \\ x_1'(s) y_2'(t) - x_2'(t) y_1'(s) \end{array}\right]. If the cross product quantity :math:`B_1'(s) \times B_2'(t) = x_1'(s) y_2'(t) - x_2'(t) y_1'(s)` is positive, then the first curve is "outside" / "to the right", i.e. the second curve is interior. If the cross product is negative, the first curve is interior. When :math:`B_1'(s) \times B_2'(t) = 0`, the tangent vectors are parallel, i.e. the intersection is a point of tangency: .. image:: ../images/classify_intersection2.png :align: center .. doctest:: classify-intersection2 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [1.0, 1.5, 2.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0, 1.5, 3.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_SECOND: 4> .. testcleanup:: classify-intersection2 import make_images make_images.classify_intersection2(s, curve1, curve2) Depending on the direction of the parameterizations, the interior curve may change, but we can use the (signed) `curvature`_ of each curve at that point to determine which is on the interior: .. _curvature: https://en.wikipedia.org/wiki/Curvature .. image:: ../images/classify_intersection3.png :align: center .. doctest:: classify-intersection3 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [2.0, 1.5, 1.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [3.0, 1.5, 0.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_FIRST: 3> .. testcleanup:: classify-intersection3 import make_images make_images.classify_intersection3(s, curve1, curve2) When the curves are moving in opposite directions at a point of tangency, there is no side to choose. Either the point of tangency is not part of any :class:`.CurvedPolygon` intersection .. image:: ../images/classify_intersection4.png :align: center .. doctest:: classify-intersection4 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [2.0, 1.5, 1.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0, 1.5, 3.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.OPPOSED: 2> .. testcleanup:: classify-intersection4 import make_images make_images.classify_intersection4(s, curve1, curve2) or the point of tangency is a "degenerate" part of two :class:`.CurvedPolygon` intersections. It is "degenerate" because from one direction, the point should be classified as :attr:`~.IntersectionClassification.FIRST` and from another as :attr:`~.IntersectionClassification.SECOND`. .. image:: ../images/classify_intersection5.png :align: center .. doctest:: classify-intersection5 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [1.0, 1.5, 2.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [3.0, 1.5, 0.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_BOTH: 6> .. testcleanup:: classify-intersection5 import make_images make_images.classify_intersection5(s, curve1, curve2) The :attr:`~.IntersectionClassification.TANGENT_BOTH` classification can also occur if the curves are "kissing" but share a zero width interior at the point of tangency: .. image:: ../images/classify_intersection9.png :align: center .. doctest:: classify-intersection9 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [0.0, 20.0, 40.0], ... [0.0, 40.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [40.0, 20.0, 0.0], ... [40.0, 0.0, 40.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_BOTH: 6> .. testcleanup:: classify-intersection9 import make_images make_images.classify_intersection9(s, curve1, curve2) However, if the `curvature`_ of each curve is identical, we don't try to distinguish further: .. image:: ../images/classify_intersection6.png :align: center .. doctest:: classify-intersection6 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [-0.125 , -0.125 , 0.375 ], ... [ 0.0625, -0.0625, 0.0625], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [-0.25, -0.25, 0.75], ... [ 0.25, -0.25, 0.25], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> hodograph(curve1, s) array([[0.5], [0. ]]) >>> hodograph(curve2, t) array([[1.], [0.]]) >>> curvature(curve1, s) 2.0 >>> curvature(curve2, t) 2.0 >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) Traceback (most recent call last): ... NotImplementedError: Tangent curves have same curvature. .. testcleanup:: classify-intersection6 import make_images make_images.classify_intersection6(s, curve1, curve2) In addition to points of tangency, intersections that happen at the end of an edge need special handling: .. image:: ../images/classify_intersection7.png :align: center .. doctest:: classify-intersection7 :options: +NORMALIZE_WHITESPACE >>> nodes1a = np.asfortranarray([ ... [0.0, 4.5, 9.0 ], ... [0.0, 0.0, 2.25], ... ]) >>> curve1a = bezier.Curve(nodes1a, degree=2) >>> nodes2 = np.asfortranarray([ ... [11.25, 9.0, 2.75], ... [ 0.0 , 4.5, 1.0 ], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 1.0, 0.375 >>> curve1a.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1a, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) Traceback (most recent call last): ... ValueError: ('Intersection occurs at the end of an edge', 's', 1.0, 't', 0.375) >>> >>> nodes1b = np.asfortranarray([ ... [9.0, 4.5, 0.0], ... [2.25, 2.375, 2.5], ... ]) >>> curve1b = bezier.Curve(nodes1b, degree=2) >>> curve1b.evaluate(0.0) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(1, 0.0, 0, t) >>> edge_nodes1 = (nodes1a, nodes1b, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.FIRST: 0> .. testcleanup:: classify-intersection7 import make_images make_images.classify_intersection7(s, curve1a, curve1b, curve2) As above, some intersections at the end of an edge are part of an actual intersection. However, some surfaces may just "kiss" at a corner intersection: .. image:: ../images/classify_intersection8.png :align: center .. doctest:: classify-intersection8 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [0.25, 0.0, 0.0, 0.625, 0.5 , 1.0 ], ... [1.0 , 0.5, 0.0, 0.875, 0.375, 0.75], ... ]) >>> surface1 = bezier.Surface(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0625, -0.25, -1.0, -0.5 , -1.0, -1.0], ... [0.5 , 1.0 , 1.0, 0.125, 0.5, 0.0], ... ]) >>> surface2 = bezier.Surface(nodes2, degree=2) >>> curve1, _, _ = surface1.edges >>> edge_nodes1 = [curve.nodes for curve in surface1.edges] >>> curve2, _, _ = surface2.edges >>> edge_nodes2 = [curve.nodes for curve in surface2.edges] >>> s, t = 0.5, 0.0 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.IGNORED_CORNER: 5> .. testcleanup:: classify-intersection8 import make_images make_images.classify_intersection8( s, curve1, surface1, curve2, surface2) .. note:: This assumes the intersection occurs in :math:`\mathbf{R}^2` but doesn't check this. .. note:: This function doesn't allow wiggle room / round-off when checking endpoints, nor when checking if the cross product is near zero, nor when curvatures are compared. However, the most "correct" version of this function likely should allow for some round off. Args: intersection (.Intersection): An intersection object. edge_nodes1 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the first surface being intersected. edge_nodes2 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the second surface being intersected. Returns: IntersectionClassification: The "inside" curve type, based on the classification enum. Raises: ValueError: If the intersection occurs at the end of either curve involved. This is because we want to classify which curve to **move forward** on, and we can't move past the end of a segment. """ if intersection.s == 1.0 or intersection.t == 1.0: raise ValueError( "Intersection occurs at the end of an edge", "s", intersection.s, "t", intersection.t, ) nodes1 = edge_nodes1[intersection.index_first] tangent1 = _curve_helpers.evaluate_hodograph(intersection.s, nodes1) nodes2 = edge_nodes2[intersection.index_second] tangent2 = _curve_helpers.evaluate_hodograph(intersection.t, nodes2) if ignored_corner( intersection, tangent1, tangent2, edge_nodes1, edge_nodes2 ): return CLASSIFICATION_T.IGNORED_CORNER # Take the cross product of tangent vectors to determine which one # is more "inside" / "to the left". cross_prod = _helpers.cross_product( tangent1.ravel(order="F"), tangent2.ravel(order="F") ) if cross_prod < -ALMOST_TANGENT: return CLASSIFICATION_T.FIRST elif cross_prod > ALMOST_TANGENT: return CLASSIFICATION_T.SECOND else: # NOTE: A more robust approach would take ||tangent1|| and ||tangent2|| # into account when comparing (tangent1 x tangent2) to the # "almost zero" threshold. We (for now) avoid doing this because # normalizing the tangent vectors has a "cost" of ~6 flops each # and that cost would happen for **every** single intersection. return classify_tangent_intersection( intersection, nodes1, tangent1, nodes2, tangent2 )
python
def classify_intersection(intersection, edge_nodes1, edge_nodes2): r"""Determine which curve is on the "inside of the intersection". .. note:: This is a helper used only by :meth:`.Surface.intersect`. This is intended to be a helper for forming a :class:`.CurvedPolygon` from the edge intersections of two :class:`.Surface`-s. In order to move from one intersection to another (or to the end of an edge), the interior edge must be determined at the point of intersection. The "typical" case is on the interior of both edges: .. image:: ../images/classify_intersection1.png :align: center .. testsetup:: classify-intersection1, classify-intersection2, classify-intersection3, classify-intersection4, classify-intersection5, classify-intersection6, classify-intersection7, classify-intersection8, classify-intersection9 import numpy as np import bezier from bezier import _curve_helpers from bezier._intersection_helpers import Intersection from bezier._surface_helpers import classify_intersection def hodograph(curve, s): return _curve_helpers.evaluate_hodograph( s, curve._nodes) def curvature(curve, s): nodes = curve._nodes tangent = _curve_helpers.evaluate_hodograph( s, nodes) return _curve_helpers.get_curvature( nodes, tangent, s) .. doctest:: classify-intersection1 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [1.0, 1.75, 2.0], ... [0.0, 0.25, 1.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0, 1.6875, 2.0], ... [0.0, 0.0625, 0.5], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.25, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> tangent1 = hodograph(curve1, s) >>> tangent1 array([[1.25], [0.75]]) >>> tangent2 = hodograph(curve2, t) >>> tangent2 array([[2. ], [0.5]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.FIRST: 0> .. testcleanup:: classify-intersection1 import make_images make_images.classify_intersection1( s, curve1, tangent1, curve2, tangent2) We determine the interior (i.e. left) one by using the `right-hand rule`_: by embedding the tangent vectors in :math:`\mathbf{R}^3`, we compute .. _right-hand rule: https://en.wikipedia.org/wiki/Right-hand_rule .. math:: \left[\begin{array}{c} x_1'(s) \\ y_1'(s) \\ 0 \end{array}\right] \times \left[\begin{array}{c} x_2'(t) \\ y_2'(t) \\ 0 \end{array}\right] = \left[\begin{array}{c} 0 \\ 0 \\ x_1'(s) y_2'(t) - x_2'(t) y_1'(s) \end{array}\right]. If the cross product quantity :math:`B_1'(s) \times B_2'(t) = x_1'(s) y_2'(t) - x_2'(t) y_1'(s)` is positive, then the first curve is "outside" / "to the right", i.e. the second curve is interior. If the cross product is negative, the first curve is interior. When :math:`B_1'(s) \times B_2'(t) = 0`, the tangent vectors are parallel, i.e. the intersection is a point of tangency: .. image:: ../images/classify_intersection2.png :align: center .. doctest:: classify-intersection2 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [1.0, 1.5, 2.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0, 1.5, 3.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_SECOND: 4> .. testcleanup:: classify-intersection2 import make_images make_images.classify_intersection2(s, curve1, curve2) Depending on the direction of the parameterizations, the interior curve may change, but we can use the (signed) `curvature`_ of each curve at that point to determine which is on the interior: .. _curvature: https://en.wikipedia.org/wiki/Curvature .. image:: ../images/classify_intersection3.png :align: center .. doctest:: classify-intersection3 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [2.0, 1.5, 1.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [3.0, 1.5, 0.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_FIRST: 3> .. testcleanup:: classify-intersection3 import make_images make_images.classify_intersection3(s, curve1, curve2) When the curves are moving in opposite directions at a point of tangency, there is no side to choose. Either the point of tangency is not part of any :class:`.CurvedPolygon` intersection .. image:: ../images/classify_intersection4.png :align: center .. doctest:: classify-intersection4 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [2.0, 1.5, 1.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0, 1.5, 3.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.OPPOSED: 2> .. testcleanup:: classify-intersection4 import make_images make_images.classify_intersection4(s, curve1, curve2) or the point of tangency is a "degenerate" part of two :class:`.CurvedPolygon` intersections. It is "degenerate" because from one direction, the point should be classified as :attr:`~.IntersectionClassification.FIRST` and from another as :attr:`~.IntersectionClassification.SECOND`. .. image:: ../images/classify_intersection5.png :align: center .. doctest:: classify-intersection5 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [1.0, 1.5, 2.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [3.0, 1.5, 0.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_BOTH: 6> .. testcleanup:: classify-intersection5 import make_images make_images.classify_intersection5(s, curve1, curve2) The :attr:`~.IntersectionClassification.TANGENT_BOTH` classification can also occur if the curves are "kissing" but share a zero width interior at the point of tangency: .. image:: ../images/classify_intersection9.png :align: center .. doctest:: classify-intersection9 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [0.0, 20.0, 40.0], ... [0.0, 40.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [40.0, 20.0, 0.0], ... [40.0, 0.0, 40.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_BOTH: 6> .. testcleanup:: classify-intersection9 import make_images make_images.classify_intersection9(s, curve1, curve2) However, if the `curvature`_ of each curve is identical, we don't try to distinguish further: .. image:: ../images/classify_intersection6.png :align: center .. doctest:: classify-intersection6 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [-0.125 , -0.125 , 0.375 ], ... [ 0.0625, -0.0625, 0.0625], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [-0.25, -0.25, 0.75], ... [ 0.25, -0.25, 0.25], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> hodograph(curve1, s) array([[0.5], [0. ]]) >>> hodograph(curve2, t) array([[1.], [0.]]) >>> curvature(curve1, s) 2.0 >>> curvature(curve2, t) 2.0 >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) Traceback (most recent call last): ... NotImplementedError: Tangent curves have same curvature. .. testcleanup:: classify-intersection6 import make_images make_images.classify_intersection6(s, curve1, curve2) In addition to points of tangency, intersections that happen at the end of an edge need special handling: .. image:: ../images/classify_intersection7.png :align: center .. doctest:: classify-intersection7 :options: +NORMALIZE_WHITESPACE >>> nodes1a = np.asfortranarray([ ... [0.0, 4.5, 9.0 ], ... [0.0, 0.0, 2.25], ... ]) >>> curve1a = bezier.Curve(nodes1a, degree=2) >>> nodes2 = np.asfortranarray([ ... [11.25, 9.0, 2.75], ... [ 0.0 , 4.5, 1.0 ], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 1.0, 0.375 >>> curve1a.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1a, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) Traceback (most recent call last): ... ValueError: ('Intersection occurs at the end of an edge', 's', 1.0, 't', 0.375) >>> >>> nodes1b = np.asfortranarray([ ... [9.0, 4.5, 0.0], ... [2.25, 2.375, 2.5], ... ]) >>> curve1b = bezier.Curve(nodes1b, degree=2) >>> curve1b.evaluate(0.0) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(1, 0.0, 0, t) >>> edge_nodes1 = (nodes1a, nodes1b, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.FIRST: 0> .. testcleanup:: classify-intersection7 import make_images make_images.classify_intersection7(s, curve1a, curve1b, curve2) As above, some intersections at the end of an edge are part of an actual intersection. However, some surfaces may just "kiss" at a corner intersection: .. image:: ../images/classify_intersection8.png :align: center .. doctest:: classify-intersection8 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [0.25, 0.0, 0.0, 0.625, 0.5 , 1.0 ], ... [1.0 , 0.5, 0.0, 0.875, 0.375, 0.75], ... ]) >>> surface1 = bezier.Surface(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0625, -0.25, -1.0, -0.5 , -1.0, -1.0], ... [0.5 , 1.0 , 1.0, 0.125, 0.5, 0.0], ... ]) >>> surface2 = bezier.Surface(nodes2, degree=2) >>> curve1, _, _ = surface1.edges >>> edge_nodes1 = [curve.nodes for curve in surface1.edges] >>> curve2, _, _ = surface2.edges >>> edge_nodes2 = [curve.nodes for curve in surface2.edges] >>> s, t = 0.5, 0.0 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.IGNORED_CORNER: 5> .. testcleanup:: classify-intersection8 import make_images make_images.classify_intersection8( s, curve1, surface1, curve2, surface2) .. note:: This assumes the intersection occurs in :math:`\mathbf{R}^2` but doesn't check this. .. note:: This function doesn't allow wiggle room / round-off when checking endpoints, nor when checking if the cross product is near zero, nor when curvatures are compared. However, the most "correct" version of this function likely should allow for some round off. Args: intersection (.Intersection): An intersection object. edge_nodes1 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the first surface being intersected. edge_nodes2 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the second surface being intersected. Returns: IntersectionClassification: The "inside" curve type, based on the classification enum. Raises: ValueError: If the intersection occurs at the end of either curve involved. This is because we want to classify which curve to **move forward** on, and we can't move past the end of a segment. """ if intersection.s == 1.0 or intersection.t == 1.0: raise ValueError( "Intersection occurs at the end of an edge", "s", intersection.s, "t", intersection.t, ) nodes1 = edge_nodes1[intersection.index_first] tangent1 = _curve_helpers.evaluate_hodograph(intersection.s, nodes1) nodes2 = edge_nodes2[intersection.index_second] tangent2 = _curve_helpers.evaluate_hodograph(intersection.t, nodes2) if ignored_corner( intersection, tangent1, tangent2, edge_nodes1, edge_nodes2 ): return CLASSIFICATION_T.IGNORED_CORNER # Take the cross product of tangent vectors to determine which one # is more "inside" / "to the left". cross_prod = _helpers.cross_product( tangent1.ravel(order="F"), tangent2.ravel(order="F") ) if cross_prod < -ALMOST_TANGENT: return CLASSIFICATION_T.FIRST elif cross_prod > ALMOST_TANGENT: return CLASSIFICATION_T.SECOND else: # NOTE: A more robust approach would take ||tangent1|| and ||tangent2|| # into account when comparing (tangent1 x tangent2) to the # "almost zero" threshold. We (for now) avoid doing this because # normalizing the tangent vectors has a "cost" of ~6 flops each # and that cost would happen for **every** single intersection. return classify_tangent_intersection( intersection, nodes1, tangent1, nodes2, tangent2 )
[ "def", "classify_intersection", "(", "intersection", ",", "edge_nodes1", ",", "edge_nodes2", ")", ":", "if", "intersection", ".", "s", "==", "1.0", "or", "intersection", ".", "t", "==", "1.0", ":", "raise", "ValueError", "(", "\"Intersection occurs at the end of a...
r"""Determine which curve is on the "inside of the intersection". .. note:: This is a helper used only by :meth:`.Surface.intersect`. This is intended to be a helper for forming a :class:`.CurvedPolygon` from the edge intersections of two :class:`.Surface`-s. In order to move from one intersection to another (or to the end of an edge), the interior edge must be determined at the point of intersection. The "typical" case is on the interior of both edges: .. image:: ../images/classify_intersection1.png :align: center .. testsetup:: classify-intersection1, classify-intersection2, classify-intersection3, classify-intersection4, classify-intersection5, classify-intersection6, classify-intersection7, classify-intersection8, classify-intersection9 import numpy as np import bezier from bezier import _curve_helpers from bezier._intersection_helpers import Intersection from bezier._surface_helpers import classify_intersection def hodograph(curve, s): return _curve_helpers.evaluate_hodograph( s, curve._nodes) def curvature(curve, s): nodes = curve._nodes tangent = _curve_helpers.evaluate_hodograph( s, nodes) return _curve_helpers.get_curvature( nodes, tangent, s) .. doctest:: classify-intersection1 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [1.0, 1.75, 2.0], ... [0.0, 0.25, 1.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0, 1.6875, 2.0], ... [0.0, 0.0625, 0.5], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.25, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> tangent1 = hodograph(curve1, s) >>> tangent1 array([[1.25], [0.75]]) >>> tangent2 = hodograph(curve2, t) >>> tangent2 array([[2. ], [0.5]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.FIRST: 0> .. testcleanup:: classify-intersection1 import make_images make_images.classify_intersection1( s, curve1, tangent1, curve2, tangent2) We determine the interior (i.e. left) one by using the `right-hand rule`_: by embedding the tangent vectors in :math:`\mathbf{R}^3`, we compute .. _right-hand rule: https://en.wikipedia.org/wiki/Right-hand_rule .. math:: \left[\begin{array}{c} x_1'(s) \\ y_1'(s) \\ 0 \end{array}\right] \times \left[\begin{array}{c} x_2'(t) \\ y_2'(t) \\ 0 \end{array}\right] = \left[\begin{array}{c} 0 \\ 0 \\ x_1'(s) y_2'(t) - x_2'(t) y_1'(s) \end{array}\right]. If the cross product quantity :math:`B_1'(s) \times B_2'(t) = x_1'(s) y_2'(t) - x_2'(t) y_1'(s)` is positive, then the first curve is "outside" / "to the right", i.e. the second curve is interior. If the cross product is negative, the first curve is interior. When :math:`B_1'(s) \times B_2'(t) = 0`, the tangent vectors are parallel, i.e. the intersection is a point of tangency: .. image:: ../images/classify_intersection2.png :align: center .. doctest:: classify-intersection2 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [1.0, 1.5, 2.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0, 1.5, 3.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_SECOND: 4> .. testcleanup:: classify-intersection2 import make_images make_images.classify_intersection2(s, curve1, curve2) Depending on the direction of the parameterizations, the interior curve may change, but we can use the (signed) `curvature`_ of each curve at that point to determine which is on the interior: .. _curvature: https://en.wikipedia.org/wiki/Curvature .. image:: ../images/classify_intersection3.png :align: center .. doctest:: classify-intersection3 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [2.0, 1.5, 1.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [3.0, 1.5, 0.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_FIRST: 3> .. testcleanup:: classify-intersection3 import make_images make_images.classify_intersection3(s, curve1, curve2) When the curves are moving in opposite directions at a point of tangency, there is no side to choose. Either the point of tangency is not part of any :class:`.CurvedPolygon` intersection .. image:: ../images/classify_intersection4.png :align: center .. doctest:: classify-intersection4 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [2.0, 1.5, 1.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0, 1.5, 3.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.OPPOSED: 2> .. testcleanup:: classify-intersection4 import make_images make_images.classify_intersection4(s, curve1, curve2) or the point of tangency is a "degenerate" part of two :class:`.CurvedPolygon` intersections. It is "degenerate" because from one direction, the point should be classified as :attr:`~.IntersectionClassification.FIRST` and from another as :attr:`~.IntersectionClassification.SECOND`. .. image:: ../images/classify_intersection5.png :align: center .. doctest:: classify-intersection5 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [1.0, 1.5, 2.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [3.0, 1.5, 0.0], ... [0.0, 1.0, 0.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_BOTH: 6> .. testcleanup:: classify-intersection5 import make_images make_images.classify_intersection5(s, curve1, curve2) The :attr:`~.IntersectionClassification.TANGENT_BOTH` classification can also occur if the curves are "kissing" but share a zero width interior at the point of tangency: .. image:: ../images/classify_intersection9.png :align: center .. doctest:: classify-intersection9 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [0.0, 20.0, 40.0], ... [0.0, 40.0, 0.0], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [40.0, 20.0, 0.0], ... [40.0, 0.0, 40.0], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.TANGENT_BOTH: 6> .. testcleanup:: classify-intersection9 import make_images make_images.classify_intersection9(s, curve1, curve2) However, if the `curvature`_ of each curve is identical, we don't try to distinguish further: .. image:: ../images/classify_intersection6.png :align: center .. doctest:: classify-intersection6 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [-0.125 , -0.125 , 0.375 ], ... [ 0.0625, -0.0625, 0.0625], ... ]) >>> curve1 = bezier.Curve(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [-0.25, -0.25, 0.75], ... [ 0.25, -0.25, 0.25], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 0.5, 0.5 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> hodograph(curve1, s) array([[0.5], [0. ]]) >>> hodograph(curve2, t) array([[1.], [0.]]) >>> curvature(curve1, s) 2.0 >>> curvature(curve2, t) 2.0 >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) Traceback (most recent call last): ... NotImplementedError: Tangent curves have same curvature. .. testcleanup:: classify-intersection6 import make_images make_images.classify_intersection6(s, curve1, curve2) In addition to points of tangency, intersections that happen at the end of an edge need special handling: .. image:: ../images/classify_intersection7.png :align: center .. doctest:: classify-intersection7 :options: +NORMALIZE_WHITESPACE >>> nodes1a = np.asfortranarray([ ... [0.0, 4.5, 9.0 ], ... [0.0, 0.0, 2.25], ... ]) >>> curve1a = bezier.Curve(nodes1a, degree=2) >>> nodes2 = np.asfortranarray([ ... [11.25, 9.0, 2.75], ... [ 0.0 , 4.5, 1.0 ], ... ]) >>> curve2 = bezier.Curve(nodes2, degree=2) >>> s, t = 1.0, 0.375 >>> curve1a.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> edge_nodes1 = (nodes1a, None, None) >>> edge_nodes2 = (nodes2, None, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) Traceback (most recent call last): ... ValueError: ('Intersection occurs at the end of an edge', 's', 1.0, 't', 0.375) >>> >>> nodes1b = np.asfortranarray([ ... [9.0, 4.5, 0.0], ... [2.25, 2.375, 2.5], ... ]) >>> curve1b = bezier.Curve(nodes1b, degree=2) >>> curve1b.evaluate(0.0) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(1, 0.0, 0, t) >>> edge_nodes1 = (nodes1a, nodes1b, None) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.FIRST: 0> .. testcleanup:: classify-intersection7 import make_images make_images.classify_intersection7(s, curve1a, curve1b, curve2) As above, some intersections at the end of an edge are part of an actual intersection. However, some surfaces may just "kiss" at a corner intersection: .. image:: ../images/classify_intersection8.png :align: center .. doctest:: classify-intersection8 :options: +NORMALIZE_WHITESPACE >>> nodes1 = np.asfortranarray([ ... [0.25, 0.0, 0.0, 0.625, 0.5 , 1.0 ], ... [1.0 , 0.5, 0.0, 0.875, 0.375, 0.75], ... ]) >>> surface1 = bezier.Surface(nodes1, degree=2) >>> nodes2 = np.asfortranarray([ ... [0.0625, -0.25, -1.0, -0.5 , -1.0, -1.0], ... [0.5 , 1.0 , 1.0, 0.125, 0.5, 0.0], ... ]) >>> surface2 = bezier.Surface(nodes2, degree=2) >>> curve1, _, _ = surface1.edges >>> edge_nodes1 = [curve.nodes for curve in surface1.edges] >>> curve2, _, _ = surface2.edges >>> edge_nodes2 = [curve.nodes for curve in surface2.edges] >>> s, t = 0.5, 0.0 >>> curve1.evaluate(s) == curve2.evaluate(t) array([[ True], [ True]]) >>> intersection = Intersection(0, s, 0, t) >>> classify_intersection(intersection, edge_nodes1, edge_nodes2) <IntersectionClassification.IGNORED_CORNER: 5> .. testcleanup:: classify-intersection8 import make_images make_images.classify_intersection8( s, curve1, surface1, curve2, surface2) .. note:: This assumes the intersection occurs in :math:`\mathbf{R}^2` but doesn't check this. .. note:: This function doesn't allow wiggle room / round-off when checking endpoints, nor when checking if the cross product is near zero, nor when curvatures are compared. However, the most "correct" version of this function likely should allow for some round off. Args: intersection (.Intersection): An intersection object. edge_nodes1 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the first surface being intersected. edge_nodes2 (Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]): The nodes of the three edges of the second surface being intersected. Returns: IntersectionClassification: The "inside" curve type, based on the classification enum. Raises: ValueError: If the intersection occurs at the end of either curve involved. This is because we want to classify which curve to **move forward** on, and we can't move past the end of a segment.
[ "r", "Determine", "which", "curve", "is", "on", "the", "inside", "of", "the", "intersection", "." ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L1620-L2096
train
54,096
dhermes/bezier
src/bezier/_surface_helpers.py
handle_ends
def handle_ends(index1, s, index2, t): """Updates intersection parameters if it is on the end of an edge. .. note:: This is a helper used only by :meth:`.Surface.intersect`. Does nothing if the intersection happens in the middle of two edges. If the intersection occurs at the end of the first curve, moves it to the beginning of the next edge. Similar for the second curve. This function is used as a pre-processing step before passing an intersection to :func:`classify_intersection`. There, only corners that **begin** an edge are considered, since that function is trying to determine which edge to **move forward** on. Args: index1 (int): The index (among 0, 1, 2) of the first edge in the intersection. s (float): The parameter along the first curve of the intersection. index2 (int): The index (among 0, 1, 2) of the second edge in the intersection. t (float): The parameter along the second curve of the intersection. Returns: Tuple[bool, bool, Tuple[int, float, int, float]]: A triple of: * flag indicating if the intersection is at the end of an edge * flag indicating if the intersection is a "corner" * 4-tuple of the "updated" values ``(index1, s, index2, t)`` """ edge_end = False if s == 1.0: s = 0.0 index1 = (index1 + 1) % 3 edge_end = True # NOTE: This is not a typo, the values can be updated twice if both ``s`` # and ``t`` are ``1.0`` if t == 1.0: t = 0.0 index2 = (index2 + 1) % 3 edge_end = True is_corner = s == 0.0 or t == 0.0 return edge_end, is_corner, (index1, s, index2, t)
python
def handle_ends(index1, s, index2, t): """Updates intersection parameters if it is on the end of an edge. .. note:: This is a helper used only by :meth:`.Surface.intersect`. Does nothing if the intersection happens in the middle of two edges. If the intersection occurs at the end of the first curve, moves it to the beginning of the next edge. Similar for the second curve. This function is used as a pre-processing step before passing an intersection to :func:`classify_intersection`. There, only corners that **begin** an edge are considered, since that function is trying to determine which edge to **move forward** on. Args: index1 (int): The index (among 0, 1, 2) of the first edge in the intersection. s (float): The parameter along the first curve of the intersection. index2 (int): The index (among 0, 1, 2) of the second edge in the intersection. t (float): The parameter along the second curve of the intersection. Returns: Tuple[bool, bool, Tuple[int, float, int, float]]: A triple of: * flag indicating if the intersection is at the end of an edge * flag indicating if the intersection is a "corner" * 4-tuple of the "updated" values ``(index1, s, index2, t)`` """ edge_end = False if s == 1.0: s = 0.0 index1 = (index1 + 1) % 3 edge_end = True # NOTE: This is not a typo, the values can be updated twice if both ``s`` # and ``t`` are ``1.0`` if t == 1.0: t = 0.0 index2 = (index2 + 1) % 3 edge_end = True is_corner = s == 0.0 or t == 0.0 return edge_end, is_corner, (index1, s, index2, t)
[ "def", "handle_ends", "(", "index1", ",", "s", ",", "index2", ",", "t", ")", ":", "edge_end", "=", "False", "if", "s", "==", "1.0", ":", "s", "=", "0.0", "index1", "=", "(", "index1", "+", "1", ")", "%", "3", "edge_end", "=", "True", "# NOTE: Thi...
Updates intersection parameters if it is on the end of an edge. .. note:: This is a helper used only by :meth:`.Surface.intersect`. Does nothing if the intersection happens in the middle of two edges. If the intersection occurs at the end of the first curve, moves it to the beginning of the next edge. Similar for the second curve. This function is used as a pre-processing step before passing an intersection to :func:`classify_intersection`. There, only corners that **begin** an edge are considered, since that function is trying to determine which edge to **move forward** on. Args: index1 (int): The index (among 0, 1, 2) of the first edge in the intersection. s (float): The parameter along the first curve of the intersection. index2 (int): The index (among 0, 1, 2) of the second edge in the intersection. t (float): The parameter along the second curve of the intersection. Returns: Tuple[bool, bool, Tuple[int, float, int, float]]: A triple of: * flag indicating if the intersection is at the end of an edge * flag indicating if the intersection is a "corner" * 4-tuple of the "updated" values ``(index1, s, index2, t)``
[ "Updates", "intersection", "parameters", "if", "it", "is", "on", "the", "end", "of", "an", "edge", "." ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L2099-L2145
train
54,097
dhermes/bezier
src/bezier/_surface_helpers.py
to_front
def to_front(intersection, intersections, unused): """Rotates a node to the "front". .. note:: This is a helper used only by :func:`basic_interior_combine`, which in turn is only used by :func:`combine_intersections`. If a node is at the end of a segment, moves it to the beginning of the next segment (at the exact same point). We assume that callers have pruned ``intersections`` so that there are none with ``s == 1.0`` or ``t == 1.0``. Hence, any such intersection will be an "artificial" intersection added by :func:`get_next`. .. note:: This method checks for **exact** endpoints, i.e. parameter bitwise identical to ``1.0``. But it may make sense to allow some wiggle room. Args: intersection (.Intersection): The current intersection. intersections (List[.Intersection]): List of all detected intersections, provided as a reference for potential points to arrive at. unused (List[.Intersection]): List of nodes that haven't been used yet in an intersection curved polygon Returns: .Intersection: An intersection to (maybe) move to the beginning of the next edge of the surface. """ if intersection.s == 1.0: next_index = (intersection.index_first + 1) % 3 # Make sure we haven't accidentally ignored an existing intersection. for other_int in intersections: if other_int.s == 0.0 and other_int.index_first == next_index: if other_int in unused: unused.remove(other_int) return other_int # If we haven't already returned, create **another** artificial # intersection. return _intersection_helpers.Intersection( next_index, 0.0, None, None, interior_curve=CLASSIFICATION_T.FIRST ) elif intersection.t == 1.0: # NOTE: We assume, but do not check, that ``s == 1.0`` and ``t == 1.0`` # are mutually exclusive. next_index = (intersection.index_second + 1) % 3 # Make sure we haven't accidentally ignored an existing intersection. for other_int in intersections: if other_int.t == 0.0 and other_int.index_second == next_index: if other_int in unused: unused.remove(other_int) return other_int # If we haven't already returned, create **another** artificial # intersection. return _intersection_helpers.Intersection( None, None, next_index, 0.0, interior_curve=CLASSIFICATION_T.SECOND ) else: return intersection
python
def to_front(intersection, intersections, unused): """Rotates a node to the "front". .. note:: This is a helper used only by :func:`basic_interior_combine`, which in turn is only used by :func:`combine_intersections`. If a node is at the end of a segment, moves it to the beginning of the next segment (at the exact same point). We assume that callers have pruned ``intersections`` so that there are none with ``s == 1.0`` or ``t == 1.0``. Hence, any such intersection will be an "artificial" intersection added by :func:`get_next`. .. note:: This method checks for **exact** endpoints, i.e. parameter bitwise identical to ``1.0``. But it may make sense to allow some wiggle room. Args: intersection (.Intersection): The current intersection. intersections (List[.Intersection]): List of all detected intersections, provided as a reference for potential points to arrive at. unused (List[.Intersection]): List of nodes that haven't been used yet in an intersection curved polygon Returns: .Intersection: An intersection to (maybe) move to the beginning of the next edge of the surface. """ if intersection.s == 1.0: next_index = (intersection.index_first + 1) % 3 # Make sure we haven't accidentally ignored an existing intersection. for other_int in intersections: if other_int.s == 0.0 and other_int.index_first == next_index: if other_int in unused: unused.remove(other_int) return other_int # If we haven't already returned, create **another** artificial # intersection. return _intersection_helpers.Intersection( next_index, 0.0, None, None, interior_curve=CLASSIFICATION_T.FIRST ) elif intersection.t == 1.0: # NOTE: We assume, but do not check, that ``s == 1.0`` and ``t == 1.0`` # are mutually exclusive. next_index = (intersection.index_second + 1) % 3 # Make sure we haven't accidentally ignored an existing intersection. for other_int in intersections: if other_int.t == 0.0 and other_int.index_second == next_index: if other_int in unused: unused.remove(other_int) return other_int # If we haven't already returned, create **another** artificial # intersection. return _intersection_helpers.Intersection( None, None, next_index, 0.0, interior_curve=CLASSIFICATION_T.SECOND ) else: return intersection
[ "def", "to_front", "(", "intersection", ",", "intersections", ",", "unused", ")", ":", "if", "intersection", ".", "s", "==", "1.0", ":", "next_index", "=", "(", "intersection", ".", "index_first", "+", "1", ")", "%", "3", "# Make sure we haven't accidentally i...
Rotates a node to the "front". .. note:: This is a helper used only by :func:`basic_interior_combine`, which in turn is only used by :func:`combine_intersections`. If a node is at the end of a segment, moves it to the beginning of the next segment (at the exact same point). We assume that callers have pruned ``intersections`` so that there are none with ``s == 1.0`` or ``t == 1.0``. Hence, any such intersection will be an "artificial" intersection added by :func:`get_next`. .. note:: This method checks for **exact** endpoints, i.e. parameter bitwise identical to ``1.0``. But it may make sense to allow some wiggle room. Args: intersection (.Intersection): The current intersection. intersections (List[.Intersection]): List of all detected intersections, provided as a reference for potential points to arrive at. unused (List[.Intersection]): List of nodes that haven't been used yet in an intersection curved polygon Returns: .Intersection: An intersection to (maybe) move to the beginning of the next edge of the surface.
[ "Rotates", "a", "node", "to", "the", "front", "." ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L2148-L2213
train
54,098
dhermes/bezier
src/bezier/_surface_helpers.py
get_next
def get_next(intersection, intersections, unused): """Gets the next node along a given edge. .. note:: This is a helper used only by :func:`basic_interior_combine`, which in turn is only used by :func:`combine_intersections`. This function does the majority of the heavy lifting for :func:`basic_interior_combine`. .. note:: This function returns :class:`.Intersection` objects even when the point isn't strictly an intersection. This is "incorrect" in some sense, but for now, we don't bother implementing a class similar to, but different from, :class:`.Intersection` to satisfy this need. Args: intersection (.Intersection): The current intersection. intersections (List[.Intersection]): List of all detected intersections, provided as a reference for potential points to arrive at. unused (List[.Intersection]): List of nodes that haven't been used yet in an intersection curved polygon Returns: .Intersection: The "next" point along a surface of intersection. This will produce the next intersection along the current edge or the end of the current edge. Raises: ValueError: If the intersection is not classified as :attr:`~.IntersectionClassification.FIRST`, :attr:`~.IntersectionClassification.TANGENT_FIRST`, :attr:`~.IntersectionClassification.SECOND`, :attr:`~.IntersectionClassification.TANGENT_SECOND` or :attr:`~.IntersectionClassification.COINCIDENT`. """ result = None if is_first(intersection.interior_curve): result = get_next_first(intersection, intersections) elif is_second(intersection.interior_curve): result = get_next_second(intersection, intersections) elif intersection.interior_curve == CLASSIFICATION_T.COINCIDENT: result = get_next_coincident(intersection, intersections) else: raise ValueError( 'Cannot get next node if not starting from "FIRST", ' '"TANGENT_FIRST", "SECOND", "TANGENT_SECOND" or "COINCIDENT".' ) if result in unused: unused.remove(result) return result
python
def get_next(intersection, intersections, unused): """Gets the next node along a given edge. .. note:: This is a helper used only by :func:`basic_interior_combine`, which in turn is only used by :func:`combine_intersections`. This function does the majority of the heavy lifting for :func:`basic_interior_combine`. .. note:: This function returns :class:`.Intersection` objects even when the point isn't strictly an intersection. This is "incorrect" in some sense, but for now, we don't bother implementing a class similar to, but different from, :class:`.Intersection` to satisfy this need. Args: intersection (.Intersection): The current intersection. intersections (List[.Intersection]): List of all detected intersections, provided as a reference for potential points to arrive at. unused (List[.Intersection]): List of nodes that haven't been used yet in an intersection curved polygon Returns: .Intersection: The "next" point along a surface of intersection. This will produce the next intersection along the current edge or the end of the current edge. Raises: ValueError: If the intersection is not classified as :attr:`~.IntersectionClassification.FIRST`, :attr:`~.IntersectionClassification.TANGENT_FIRST`, :attr:`~.IntersectionClassification.SECOND`, :attr:`~.IntersectionClassification.TANGENT_SECOND` or :attr:`~.IntersectionClassification.COINCIDENT`. """ result = None if is_first(intersection.interior_curve): result = get_next_first(intersection, intersections) elif is_second(intersection.interior_curve): result = get_next_second(intersection, intersections) elif intersection.interior_curve == CLASSIFICATION_T.COINCIDENT: result = get_next_coincident(intersection, intersections) else: raise ValueError( 'Cannot get next node if not starting from "FIRST", ' '"TANGENT_FIRST", "SECOND", "TANGENT_SECOND" or "COINCIDENT".' ) if result in unused: unused.remove(result) return result
[ "def", "get_next", "(", "intersection", ",", "intersections", ",", "unused", ")", ":", "result", "=", "None", "if", "is_first", "(", "intersection", ".", "interior_curve", ")", ":", "result", "=", "get_next_first", "(", "intersection", ",", "intersections", ")...
Gets the next node along a given edge. .. note:: This is a helper used only by :func:`basic_interior_combine`, which in turn is only used by :func:`combine_intersections`. This function does the majority of the heavy lifting for :func:`basic_interior_combine`. .. note:: This function returns :class:`.Intersection` objects even when the point isn't strictly an intersection. This is "incorrect" in some sense, but for now, we don't bother implementing a class similar to, but different from, :class:`.Intersection` to satisfy this need. Args: intersection (.Intersection): The current intersection. intersections (List[.Intersection]): List of all detected intersections, provided as a reference for potential points to arrive at. unused (List[.Intersection]): List of nodes that haven't been used yet in an intersection curved polygon Returns: .Intersection: The "next" point along a surface of intersection. This will produce the next intersection along the current edge or the end of the current edge. Raises: ValueError: If the intersection is not classified as :attr:`~.IntersectionClassification.FIRST`, :attr:`~.IntersectionClassification.TANGENT_FIRST`, :attr:`~.IntersectionClassification.SECOND`, :attr:`~.IntersectionClassification.TANGENT_SECOND` or :attr:`~.IntersectionClassification.COINCIDENT`.
[ "Gets", "the", "next", "node", "along", "a", "given", "edge", "." ]
4f941f82637a8e70a5b159a9203132192e23406b
https://github.com/dhermes/bezier/blob/4f941f82637a8e70a5b159a9203132192e23406b/src/bezier/_surface_helpers.py#L2419-L2472
train
54,099