body
stringlengths 26
98.2k
| body_hash
int64 -9,222,864,604,528,158,000
9,221,803,474B
| docstring
stringlengths 1
16.8k
| path
stringlengths 5
230
| name
stringlengths 1
96
| repository_name
stringlengths 7
89
| lang
stringclasses 1
value | body_without_docstring
stringlengths 20
98.2k
|
|---|---|---|---|---|---|---|---|
@abstractmethod
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
'\n Implemented by subclass in order to decode a token id list into a token list.\n A token list is the string representation of each token id.\n\n Args:\n tokens: List of int representing the token ids.\n\n Returns:\n A list of decoded tokens.\n '
raise NotImplementedError()
| 8,077,708,010,352,892,000
|
Implemented by subclass in order to decode a token id list into a token list.
A token list is the string representation of each token id.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded tokens.
|
nemo/collections/asr/metrics/rnnt_wer.py
|
decode_ids_to_tokens
|
JINHXu/NeMo
|
python
|
@abstractmethod
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
'\n Implemented by subclass in order to decode a token id list into a token list.\n A token list is the string representation of each token id.\n\n Args:\n tokens: List of int representing the token ids.\n\n Returns:\n A list of decoded tokens.\n '
raise NotImplementedError()
|
def decode_tokens_to_str(self, tokens: List[int]) -> str:
'\n Implemented by subclass in order to decoder a token list into a string.\n\n Args:\n tokens: List of int representing the token ids.\n\n Returns:\n A decoded string.\n '
hypothesis = ''.join([self.labels_map[c] for c in tokens if (c != self.blank_id)])
return hypothesis
| 3,894,442,656,208,714,000
|
Implemented by subclass in order to decoder a token list into a string.
Args:
tokens: List of int representing the token ids.
Returns:
A decoded string.
|
nemo/collections/asr/metrics/rnnt_wer.py
|
decode_tokens_to_str
|
JINHXu/NeMo
|
python
|
def decode_tokens_to_str(self, tokens: List[int]) -> str:
'\n Implemented by subclass in order to decoder a token list into a string.\n\n Args:\n tokens: List of int representing the token ids.\n\n Returns:\n A decoded string.\n '
hypothesis = .join([self.labels_map[c] for c in tokens if (c != self.blank_id)])
return hypothesis
|
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
'\n Implemented by subclass in order to decode a token id list into a token list.\n A token list is the string representation of each token id.\n\n Args:\n tokens: List of int representing the token ids.\n\n Returns:\n A list of decoded tokens.\n '
token_list = [self.labels_map[c] for c in tokens if (c != self.blank_id)]
return token_list
| -6,223,895,649,209,285,000
|
Implemented by subclass in order to decode a token id list into a token list.
A token list is the string representation of each token id.
Args:
tokens: List of int representing the token ids.
Returns:
A list of decoded tokens.
|
nemo/collections/asr/metrics/rnnt_wer.py
|
decode_ids_to_tokens
|
JINHXu/NeMo
|
python
|
def decode_ids_to_tokens(self, tokens: List[int]) -> List[str]:
'\n Implemented by subclass in order to decode a token id list into a token list.\n A token list is the string representation of each token id.\n\n Args:\n tokens: List of int representing the token ids.\n\n Returns:\n A list of decoded tokens.\n '
token_list = [self.labels_map[c] for c in tokens if (c != self.blank_id)]
return token_list
|
def setUp(self, debug=False, profile=True):
'\n setUp test environment\n '
TestCase.setUp(self)
self.debug = debug
self.profile = profile
msg = f'test {self._testMethodName}, debug={self.debug}'
self.profiler = Profiler(msg, profile=self.profile)
| 2,393,079,469,751,454,000
|
setUp test environment
|
tests/basetest.py
|
setUp
|
WolfgangFahl/pyOnlineSpreadSheetEditing
|
python
|
def setUp(self, debug=False, profile=True):
'\n \n '
TestCase.setUp(self)
self.debug = debug
self.profile = profile
msg = f'test {self._testMethodName}, debug={self.debug}'
self.profiler = Profiler(msg, profile=self.profile)
|
@staticmethod
def inPublicCI():
'\n are we running in a public Continuous Integration Environment?\n '
publicCI = (getpass.getuser() in ['travis', 'runner'])
jenkins = ('JENKINS_HOME' in os.environ)
return (publicCI or jenkins)
| -2,310,455,625,163,682,300
|
are we running in a public Continuous Integration Environment?
|
tests/basetest.py
|
inPublicCI
|
WolfgangFahl/pyOnlineSpreadSheetEditing
|
python
|
@staticmethod
def inPublicCI():
'\n \n '
publicCI = (getpass.getuser() in ['travis', 'runner'])
jenkins = ('JENKINS_HOME' in os.environ)
return (publicCI or jenkins)
|
def __init__(self, msg, profile=True):
'\n construct me with the given msg and profile active flag\n \n Args:\n msg(str): the message to show if profiling is active\n profile(bool): True if messages should be shown\n '
self.msg = msg
self.profile = profile
self.starttime = time.time()
if profile:
print(f'Starting {msg} ...')
| 6,166,635,583,448,363,000
|
construct me with the given msg and profile active flag
Args:
msg(str): the message to show if profiling is active
profile(bool): True if messages should be shown
|
tests/basetest.py
|
__init__
|
WolfgangFahl/pyOnlineSpreadSheetEditing
|
python
|
def __init__(self, msg, profile=True):
'\n construct me with the given msg and profile active flag\n \n Args:\n msg(str): the message to show if profiling is active\n profile(bool): True if messages should be shown\n '
self.msg = msg
self.profile = profile
self.starttime = time.time()
if profile:
print(f'Starting {msg} ...')
|
def time(self, extraMsg=''):
'\n time the action and print if profile is active\n '
elapsed = (time.time() - self.starttime)
if self.profile:
print(f'{self.msg}{extraMsg} took {elapsed:5.1f} s')
return elapsed
| -4,921,684,839,023,697,000
|
time the action and print if profile is active
|
tests/basetest.py
|
time
|
WolfgangFahl/pyOnlineSpreadSheetEditing
|
python
|
def time(self, extraMsg=):
'\n \n '
elapsed = (time.time() - self.starttime)
if self.profile:
print(f'{self.msg}{extraMsg} took {elapsed:5.1f} s')
return elapsed
|
def __init__(self, id):
'Initialize the class.'
self.level = 'S'
Entity.__init__(self, id)
| 7,661,185,872,800,717,000
|
Initialize the class.
|
Bio/PDB/Structure.py
|
__init__
|
AaronLi/biopython
|
python
|
def __init__(self, id):
self.level = 'S'
Entity.__init__(self, id)
|
def __repr__(self):
'Return the structure identifier.'
return ('<Structure id=%s>' % self.get_id())
| -6,355,413,107,838,914,000
|
Return the structure identifier.
|
Bio/PDB/Structure.py
|
__repr__
|
AaronLi/biopython
|
python
|
def __repr__(self):
return ('<Structure id=%s>' % self.get_id())
|
def get_models(self):
'Return models.'
(yield from self)
| 6,382,655,307,569,794,000
|
Return models.
|
Bio/PDB/Structure.py
|
get_models
|
AaronLi/biopython
|
python
|
def get_models(self):
(yield from self)
|
def get_chains(self):
'Return chains from models.'
for m in self.get_models():
(yield from m)
| -7,909,861,180,488,219,000
|
Return chains from models.
|
Bio/PDB/Structure.py
|
get_chains
|
AaronLi/biopython
|
python
|
def get_chains(self):
for m in self.get_models():
(yield from m)
|
def get_residues(self):
'Return residues from chains.'
for c in self.get_chains():
(yield from c)
| 8,528,804,944,604,388,000
|
Return residues from chains.
|
Bio/PDB/Structure.py
|
get_residues
|
AaronLi/biopython
|
python
|
def get_residues(self):
for c in self.get_chains():
(yield from c)
|
def get_atoms(self):
'Return atoms from residue.'
for r in self.get_residues():
(yield from r)
| -8,607,889,299,429,391,000
|
Return atoms from residue.
|
Bio/PDB/Structure.py
|
get_atoms
|
AaronLi/biopython
|
python
|
def get_atoms(self):
for r in self.get_residues():
(yield from r)
|
def atom_to_internal_coordinates(self, verbose: bool=False) -> None:
'Create/update internal coordinates from Atom X,Y,Z coordinates.\n\n Internal coordinates are bond length, angle and dihedral angles.\n\n :param verbose bool: default False\n describe runtime problems\n\n '
for chn in self.get_chains():
chn.atom_to_internal_coordinates(verbose)
| 396,073,781,232,036,200
|
Create/update internal coordinates from Atom X,Y,Z coordinates.
Internal coordinates are bond length, angle and dihedral angles.
:param verbose bool: default False
describe runtime problems
|
Bio/PDB/Structure.py
|
atom_to_internal_coordinates
|
AaronLi/biopython
|
python
|
def atom_to_internal_coordinates(self, verbose: bool=False) -> None:
'Create/update internal coordinates from Atom X,Y,Z coordinates.\n\n Internal coordinates are bond length, angle and dihedral angles.\n\n :param verbose bool: default False\n describe runtime problems\n\n '
for chn in self.get_chains():
chn.atom_to_internal_coordinates(verbose)
|
def internal_to_atom_coordinates(self, verbose: bool=False) -> None:
'Create/update atom coordinates from internal coordinates.\n\n :param verbose bool: default False\n describe runtime problems\n\n :raises Exception: if any chain does not have .pic attribute\n '
for chn in self.get_chains():
chn.internal_to_atom_coordinates(verbose)
| 6,753,981,550,650,205,000
|
Create/update atom coordinates from internal coordinates.
:param verbose bool: default False
describe runtime problems
:raises Exception: if any chain does not have .pic attribute
|
Bio/PDB/Structure.py
|
internal_to_atom_coordinates
|
AaronLi/biopython
|
python
|
def internal_to_atom_coordinates(self, verbose: bool=False) -> None:
'Create/update atom coordinates from internal coordinates.\n\n :param verbose bool: default False\n describe runtime problems\n\n :raises Exception: if any chain does not have .pic attribute\n '
for chn in self.get_chains():
chn.internal_to_atom_coordinates(verbose)
|
def ll2z(locations, radius_search=CITY_RADIUS_SEARCH):
'Computes the z-value for the given lon, lat tuple or\n list of lon, lat tuples\n :param locations:\n :param radius_search: Checks to see if a city is within X km from the given location,\n removes the search if value is set to 0\n :return: Array of z-values, one for each location specified\n '
try:
multi = bool(len(locations[0]))
except TypeError:
multi = False
locations = [locations]
out = np.zeros(len(locations))
for p in POLYGONS:
c = Path(geo.path_from_corners(corners=np.loadtxt(p[0]).tolist(), output=None, min_edge_points=4)).contains_points(locations)
out = np.where(c, p[1], out)
if (radius_search > 0):
cities = pd.read_csv(os.path.join(DATA, 'cities_z.csv'), header=None, names=['lon', 'lat', 'city', 'z_value'])
cities_ll = cities[['lon', 'lat']].values
for (i, location) in enumerate(locations):
dists = geo.get_distances(cities_ll, location[0], location[1])
if np.any((dists < radius_search)):
cities['dist'] = dists
city_idx = cities.dist.idxmin()
out[i] = cities.loc[city_idx].z_value
nz = []
points_all = []
for z in Z_VALS:
points = np.atleast_2d(np.loadtxt((Z_FORMAT % z)))
nz.append(len(points))
points_all.append(points)
points = np.concatenate(points_all)
del points_all
z = griddata(points, np.repeat(Z_VALS, nz), locations, method='linear')
return np.where((out == 0), np.where(np.isnan(z), 0.13, z), out)
| -9,122,859,694,354,971,000
|
Computes the z-value for the given lon, lat tuple or
list of lon, lat tuples
:param locations:
:param radius_search: Checks to see if a city is within X km from the given location,
removes the search if value is set to 0
:return: Array of z-values, one for each location specified
|
calculation/gmhazard_calc/gmhazard_calc/nz_code/nzs1170p5/nzs_zfactor_2016/ll2z.py
|
ll2z
|
ucgmsim/gmhazard
|
python
|
def ll2z(locations, radius_search=CITY_RADIUS_SEARCH):
'Computes the z-value for the given lon, lat tuple or\n list of lon, lat tuples\n :param locations:\n :param radius_search: Checks to see if a city is within X km from the given location,\n removes the search if value is set to 0\n :return: Array of z-values, one for each location specified\n '
try:
multi = bool(len(locations[0]))
except TypeError:
multi = False
locations = [locations]
out = np.zeros(len(locations))
for p in POLYGONS:
c = Path(geo.path_from_corners(corners=np.loadtxt(p[0]).tolist(), output=None, min_edge_points=4)).contains_points(locations)
out = np.where(c, p[1], out)
if (radius_search > 0):
cities = pd.read_csv(os.path.join(DATA, 'cities_z.csv'), header=None, names=['lon', 'lat', 'city', 'z_value'])
cities_ll = cities[['lon', 'lat']].values
for (i, location) in enumerate(locations):
dists = geo.get_distances(cities_ll, location[0], location[1])
if np.any((dists < radius_search)):
cities['dist'] = dists
city_idx = cities.dist.idxmin()
out[i] = cities.loc[city_idx].z_value
nz = []
points_all = []
for z in Z_VALS:
points = np.atleast_2d(np.loadtxt((Z_FORMAT % z)))
nz.append(len(points))
points_all.append(points)
points = np.concatenate(points_all)
del points_all
z = griddata(points, np.repeat(Z_VALS, nz), locations, method='linear')
return np.where((out == 0), np.where(np.isnan(z), 0.13, z), out)
|
def testRaw():
'[summary]\n '
ui = UtopiaDataInterface()
ui.connect()
sigViewer(ui, 30000)
| -6,927,465,488,642,527,000
|
[summary]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
testRaw
|
CkiChen/pymindaffectBCI
|
python
|
def testRaw():
'\n '
ui = UtopiaDataInterface()
ui.connect()
sigViewer(ui, 30000)
|
def testPP():
'[summary]\n '
from sigViewer import sigViewer
ppfn = butterfilt_and_downsample(order=4, stopband=((0, 1), (25, (- 1))), fs_out=100)
ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None)
ui.connect()
sigViewer(ui)
| -8,013,218,896,713,429,000
|
[summary]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
testPP
|
CkiChen/pymindaffectBCI
|
python
|
def testPP():
'\n '
from sigViewer import sigViewer
ppfn = butterfilt_and_downsample(order=4, stopband=((0, 1), (25, (- 1))), fs_out=100)
ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None)
ui.connect()
sigViewer(ui)
|
def testFileProxy(filename, fs_out=999):
'[summary]\n\n Args:\n filename ([type]): [description]\n fs_out (int, optional): [description]. Defaults to 999.\n '
from mindaffectBCI.decoder.FileProxyHub import FileProxyHub
U = FileProxyHub(filename)
from sigViewer import sigViewer
ppfn = butterfilt_and_downsample(order=4, stopband=(1, 15, 'bandpass'), fs_out=fs_out)
ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None, mintime_ms=0, U=U)
ui.connect()
sigViewer(ui)
| 3,211,236,362,447,928,300
|
[summary]
Args:
filename ([type]): [description]
fs_out (int, optional): [description]. Defaults to 999.
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
testFileProxy
|
CkiChen/pymindaffectBCI
|
python
|
def testFileProxy(filename, fs_out=999):
'[summary]\n\n Args:\n filename ([type]): [description]\n fs_out (int, optional): [description]. Defaults to 999.\n '
from mindaffectBCI.decoder.FileProxyHub import FileProxyHub
U = FileProxyHub(filename)
from sigViewer import sigViewer
ppfn = butterfilt_and_downsample(order=4, stopband=(1, 15, 'bandpass'), fs_out=fs_out)
ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None, mintime_ms=0, U=U)
ui.connect()
sigViewer(ui)
|
def testFileProxy2(filename):
'[summary]\n\n Args:\n filename ([type]): [description]\n '
from mindaffectBCI.decoder.FileProxyHub import FileProxyHub
U = FileProxyHub(filename)
fs = 200
fs_out = 200
ppfn = butterfilt_and_downsample(order=4, stopband=((45, 65), (0, 3), (25, (- 1))), fs=fs, fs_out=fs_out)
ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None, mintime_ms=0, U=U, fs=fs)
ui.connect()
data = []
stim = []
emptycount = 0
while True:
(newmsg, nsamp, nstim) = ui.update()
if ((len(newmsg) == 0) and (nsamp == 0) and (nstim == 0)):
emptycount = (emptycount + 1)
if (emptycount > 10):
break
else:
emptycount = 0
if (nsamp > 0):
data.append(ui.data_ringbuffer[(- nsamp):, :].copy())
if (nstim > 0):
stim.append(ui.stimulus_ringbuffer[(- nstim):, :].copy())
data = np.vstack(data)
stim = np.vstack(stim)
import pickle
if (ppfn is None):
pickle.dump(dict(data=data, stim=stim), open('raw_udi.pk', 'wb'))
else:
pickle.dump(dict(data=data, stim=stim), open('pp_udi.pk', 'wb'))
| 4,541,379,977,686,302,000
|
[summary]
Args:
filename ([type]): [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
testFileProxy2
|
CkiChen/pymindaffectBCI
|
python
|
def testFileProxy2(filename):
'[summary]\n\n Args:\n filename ([type]): [description]\n '
from mindaffectBCI.decoder.FileProxyHub import FileProxyHub
U = FileProxyHub(filename)
fs = 200
fs_out = 200
ppfn = butterfilt_and_downsample(order=4, stopband=((45, 65), (0, 3), (25, (- 1))), fs=fs, fs_out=fs_out)
ui = UtopiaDataInterface(data_preprocessor=ppfn, stimulus_preprocessor=None, mintime_ms=0, U=U, fs=fs)
ui.connect()
data = []
stim = []
emptycount = 0
while True:
(newmsg, nsamp, nstim) = ui.update()
if ((len(newmsg) == 0) and (nsamp == 0) and (nstim == 0)):
emptycount = (emptycount + 1)
if (emptycount > 10):
break
else:
emptycount = 0
if (nsamp > 0):
data.append(ui.data_ringbuffer[(- nsamp):, :].copy())
if (nstim > 0):
stim.append(ui.stimulus_ringbuffer[(- nstim):, :].copy())
data = np.vstack(data)
stim = np.vstack(stim)
import pickle
if (ppfn is None):
pickle.dump(dict(data=data, stim=stim), open('raw_udi.pk', 'wb'))
else:
pickle.dump(dict(data=data, stim=stim), open('pp_udi.pk', 'wb'))
|
def testERP():
'[summary]\n '
ui = UtopiaDataInterface()
ui.connect()
erpViewer(ui, evtlabs=None)
| -352,454,395,409,471,940
|
[summary]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
testERP
|
CkiChen/pymindaffectBCI
|
python
|
def testERP():
'\n '
ui = UtopiaDataInterface()
ui.connect()
erpViewer(ui, evtlabs=None)
|
def testElectrodeQualities(X, fs=200, pktsize=20):
'[summary]\n\n Args:\n X ([type]): [description]\n fs (int, optional): [description]. Defaults to 200.\n pktsize (int, optional): [description]. Defaults to 20.\n\n Returns:\n [type]: [description]\n '
if (X.ndim > 2):
sigq = []
for i in range(X.shape[0]):
sigqi = testElectrodeQualities(X[(i, ...)], fs, pktsize)
sigq.append(sigqi)
sigq = np.concatenate(sigq, 0)
return sigq
ppfn = butterfilt_and_downsample(order=6, stopband='butter_stopband((0, 5), (25, -1))_fs200.pk', fs_out=100)
ppfn.fit(X[:10, :], fs=200)
noise2sig = np.zeros((int((X.shape[0] / pktsize)), X.shape[(- 1)]), dtype=np.float32)
for pkti in range(noise2sig.shape[0]):
t = (pkti * pktsize)
Xi = X[t:(t + pktsize), :]
Xip = ppfn.transform(Xi)
(raw_power, preproc_power) = UtopiaDataInterface.update_electrode_powers(Xi, Xip)
noise2sig[pkti, :] = (np.maximum(float(1e-06), (raw_power - preproc_power)) / np.maximum(float(1e-08), preproc_power))
return noise2sig
| -3,518,891,553,744,630,000
|
[summary]
Args:
X ([type]): [description]
fs (int, optional): [description]. Defaults to 200.
pktsize (int, optional): [description]. Defaults to 20.
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
testElectrodeQualities
|
CkiChen/pymindaffectBCI
|
python
|
def testElectrodeQualities(X, fs=200, pktsize=20):
'[summary]\n\n Args:\n X ([type]): [description]\n fs (int, optional): [description]. Defaults to 200.\n pktsize (int, optional): [description]. Defaults to 20.\n\n Returns:\n [type]: [description]\n '
if (X.ndim > 2):
sigq = []
for i in range(X.shape[0]):
sigqi = testElectrodeQualities(X[(i, ...)], fs, pktsize)
sigq.append(sigqi)
sigq = np.concatenate(sigq, 0)
return sigq
ppfn = butterfilt_and_downsample(order=6, stopband='butter_stopband((0, 5), (25, -1))_fs200.pk', fs_out=100)
ppfn.fit(X[:10, :], fs=200)
noise2sig = np.zeros((int((X.shape[0] / pktsize)), X.shape[(- 1)]), dtype=np.float32)
for pkti in range(noise2sig.shape[0]):
t = (pkti * pktsize)
Xi = X[t:(t + pktsize), :]
Xip = ppfn.transform(Xi)
(raw_power, preproc_power) = UtopiaDataInterface.update_electrode_powers(Xi, Xip)
noise2sig[pkti, :] = (np.maximum(float(1e-06), (raw_power - preproc_power)) / np.maximum(float(1e-08), preproc_power))
return noise2sig
|
def connect(self, host=None, port=(- 1), queryifhostnotfound=True):
'[make a connection to the utopia host]\n\n Args:\n host ([type], optional): [description]. Defaults to None.\n port (int, optional): [description]. Defaults to -1.\n queryifhostnotfound (bool, optional): [description]. Defaults to True.\n\n Returns:\n [type]: [description]\n '
if host:
self.host = host
if (port > 0):
self.port = port
self.U.autoconnect(self.host, self.port, timeout_ms=5000, queryifhostnotfound=queryifhostnotfound)
if self.U.isConnected:
self.U.sendMessage(Subscribe(None, 'DEMSN'))
return self.U.isConnected
| 1,323,226,354,754,162,000
|
[make a connection to the utopia host]
Args:
host ([type], optional): [description]. Defaults to None.
port (int, optional): [description]. Defaults to -1.
queryifhostnotfound (bool, optional): [description]. Defaults to True.
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
connect
|
CkiChen/pymindaffectBCI
|
python
|
def connect(self, host=None, port=(- 1), queryifhostnotfound=True):
'[make a connection to the utopia host]\n\n Args:\n host ([type], optional): [description]. Defaults to None.\n port (int, optional): [description]. Defaults to -1.\n queryifhostnotfound (bool, optional): [description]. Defaults to True.\n\n Returns:\n [type]: [description]\n '
if host:
self.host = host
if (port > 0):
self.port = port
self.U.autoconnect(self.host, self.port, timeout_ms=5000, queryifhostnotfound=queryifhostnotfound)
if self.U.isConnected:
self.U.sendMessage(Subscribe(None, 'DEMSN'))
return self.U.isConnected
|
def isConnected(self):
'[summary]\n\n Returns:\n [type]: [description]\n '
return (self.U.isConnected if (self.U is not None) else False)
| 7,819,996,133,519,783,000
|
[summary]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
isConnected
|
CkiChen/pymindaffectBCI
|
python
|
def isConnected(self):
'[summary]\n\n Returns:\n [type]: [description]\n '
return (self.U.isConnected if (self.U is not None) else False)
|
def getTimeStamp(self):
'[summary]\n\n Returns:\n [type]: [description]\n '
return self.U.getTimeStamp()
| 625,336,908,263,022,200
|
[summary]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
getTimeStamp
|
CkiChen/pymindaffectBCI
|
python
|
def getTimeStamp(self):
'[summary]\n\n Returns:\n [type]: [description]\n '
return self.U.getTimeStamp()
|
def sendMessage(self, msg: UtopiaMessage):
'[send a UtopiaMessage to the utopia hub]\n\n Args:\n msg (UtopiaMessage): [description]\n '
self.U.sendMessage(msg)
| 3,466,417,455,431,208,400
|
[send a UtopiaMessage to the utopia hub]
Args:
msg (UtopiaMessage): [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
sendMessage
|
CkiChen/pymindaffectBCI
|
python
|
def sendMessage(self, msg: UtopiaMessage):
'[send a UtopiaMessage to the utopia hub]\n\n Args:\n msg (UtopiaMessage): [description]\n '
self.U.sendMessage(msg)
|
def getNewMessages(self, timeout_ms=0):
'[get new messages from the UtopiaHub]\n\n Args:\n timeout_ms (int, optional): [description]. Defaults to 0.\n\n Returns:\n [type]: [description]\n '
return self.U.getNewMessages(timeout_ms)
| 6,442,127,469,990,777,000
|
[get new messages from the UtopiaHub]
Args:
timeout_ms (int, optional): [description]. Defaults to 0.
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
getNewMessages
|
CkiChen/pymindaffectBCI
|
python
|
def getNewMessages(self, timeout_ms=0):
'[get new messages from the UtopiaHub]\n\n Args:\n timeout_ms (int, optional): [description]. Defaults to 0.\n\n Returns:\n [type]: [description]\n '
return self.U.getNewMessages(timeout_ms)
|
def initDataRingBuffer(self):
'[initialize the data ring buffer, by getting some seed messages and datapackets to get the data sizes etc.]\n\n Returns:\n [type]: [description]\n '
print('geting some initial data to setup the ring buffer')
databuf = []
nmsg = 0
iter = 0
data_start_ts = None
data_ts = 0
while ((data_start_ts is None) or ((data_ts - data_start_ts) < 3000)):
msgs = self.getNewMessages(100)
for m in msgs:
m = self.preprocess_message(m)
if (m.msgID == DataPacket.msgID):
if (len(m.samples) > 0):
databuf.append(m)
if (data_start_ts is None):
data_start_ts = m.timestamp
data_ts = m.timestamp
else:
print('Huh? got empty data packet: {}'.format(m))
else:
self.msg_ringbuffer.append(m)
self.msg_timestamp = m.timestamp
nmsg = (nmsg + 1)
nsamp = [len(m.samples) for m in databuf]
data_ts = [m.timestamp for m in databuf]
if (self.raw_fs is None):
self.raw_fs = np.median(((np.array(nsamp[1:]) / np.diff(data_ts)) * 1000.0))
print('Estimated sample rate {} samp in {} s ={}'.format(sum(nsamp), ((data_ts[(- 1)] - data_ts[0]) / 1000.0), self.raw_fs))
if self.data_preprocessor:
self.data_preprocessor.fit(np.array(databuf[0].samples)[0:1, :], fs=self.raw_fs)
tmpdatabuf = [self.processDataPacket(m) for m in databuf]
tmpdatabuf = [d for d in tmpdatabuf if (d.shape[0] > 0)]
pp_nsamp = [m.shape[0] for m in tmpdatabuf]
pp_ts = [m[((- 1), (- 1))] for m in tmpdatabuf]
self.fs = np.median(((np.array(pp_nsamp[1:]) / np.diff(pp_ts)) * 1000.0))
print('Estimated pre-processed sample rate={}'.format(self.fs))
if self.data_ringbuffer:
print('Warning: re-init data ring buffer')
self.data_ringbuffer = RingBuffer(maxsize=((self.fs * self.datawindow_ms) / 1000), shape=tmpdatabuf[0].shape[1:], dtype=np.float32)
self.data_timestamp = None
nsamp = 0
if self.data_preprocessor:
self.data_preprocessor.fit(np.array(databuf[0].samples)[0:1, :], fs=self.raw_fs)
if ((self.sample2timestamp is None) or isinstance(self.sample2timestamp, str)):
self.sample2timestamp = timestamp_interpolation(fs=self.fs, sample2timestamp=self.sample2timestamp)
for m in databuf:
d = self.processDataPacket(m)
self.data_ringbuffer.extend(d)
nsamp = (nsamp + d.shape[0])
return (nsamp, nmsg)
| 3,578,887,752,692,050,400
|
[initialize the data ring buffer, by getting some seed messages and datapackets to get the data sizes etc.]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
initDataRingBuffer
|
CkiChen/pymindaffectBCI
|
python
|
def initDataRingBuffer(self):
'[initialize the data ring buffer, by getting some seed messages and datapackets to get the data sizes etc.]\n\n Returns:\n [type]: [description]\n '
print('geting some initial data to setup the ring buffer')
databuf = []
nmsg = 0
iter = 0
data_start_ts = None
data_ts = 0
while ((data_start_ts is None) or ((data_ts - data_start_ts) < 3000)):
msgs = self.getNewMessages(100)
for m in msgs:
m = self.preprocess_message(m)
if (m.msgID == DataPacket.msgID):
if (len(m.samples) > 0):
databuf.append(m)
if (data_start_ts is None):
data_start_ts = m.timestamp
data_ts = m.timestamp
else:
print('Huh? got empty data packet: {}'.format(m))
else:
self.msg_ringbuffer.append(m)
self.msg_timestamp = m.timestamp
nmsg = (nmsg + 1)
nsamp = [len(m.samples) for m in databuf]
data_ts = [m.timestamp for m in databuf]
if (self.raw_fs is None):
self.raw_fs = np.median(((np.array(nsamp[1:]) / np.diff(data_ts)) * 1000.0))
print('Estimated sample rate {} samp in {} s ={}'.format(sum(nsamp), ((data_ts[(- 1)] - data_ts[0]) / 1000.0), self.raw_fs))
if self.data_preprocessor:
self.data_preprocessor.fit(np.array(databuf[0].samples)[0:1, :], fs=self.raw_fs)
tmpdatabuf = [self.processDataPacket(m) for m in databuf]
tmpdatabuf = [d for d in tmpdatabuf if (d.shape[0] > 0)]
pp_nsamp = [m.shape[0] for m in tmpdatabuf]
pp_ts = [m[((- 1), (- 1))] for m in tmpdatabuf]
self.fs = np.median(((np.array(pp_nsamp[1:]) / np.diff(pp_ts)) * 1000.0))
print('Estimated pre-processed sample rate={}'.format(self.fs))
if self.data_ringbuffer:
print('Warning: re-init data ring buffer')
self.data_ringbuffer = RingBuffer(maxsize=((self.fs * self.datawindow_ms) / 1000), shape=tmpdatabuf[0].shape[1:], dtype=np.float32)
self.data_timestamp = None
nsamp = 0
if self.data_preprocessor:
self.data_preprocessor.fit(np.array(databuf[0].samples)[0:1, :], fs=self.raw_fs)
if ((self.sample2timestamp is None) or isinstance(self.sample2timestamp, str)):
self.sample2timestamp = timestamp_interpolation(fs=self.fs, sample2timestamp=self.sample2timestamp)
for m in databuf:
d = self.processDataPacket(m)
self.data_ringbuffer.extend(d)
nsamp = (nsamp + d.shape[0])
return (nsamp, nmsg)
|
def initStimulusRingBuffer(self):
'initialize the data ring buffer, by getting some seed messages and datapackets to get the data sizes etc.'
self.stimulus_ringbuffer = RingBuffer(maxsize=((self.fs * self.datawindow_ms) / 1000), shape=(257,), dtype=np.float32)
| 4,857,781,732,760,419,000
|
initialize the data ring buffer, by getting some seed messages and datapackets to get the data sizes etc.
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
initStimulusRingBuffer
|
CkiChen/pymindaffectBCI
|
python
|
def initStimulusRingBuffer(self):
self.stimulus_ringbuffer = RingBuffer(maxsize=((self.fs * self.datawindow_ms) / 1000), shape=(257,), dtype=np.float32)
|
def preprocess_message(self, m: UtopiaMessage):
'[apply pre-processing to topia message before any more work]\n\n Args:\n m (UtopiaMessage): [description]\n\n Returns:\n [type]: [description]\n '
m.timestamp = (m.timestamp % (1 << 24))
return m
| -8,116,090,609,917,216,000
|
[apply pre-processing to topia message before any more work]
Args:
m (UtopiaMessage): [description]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
preprocess_message
|
CkiChen/pymindaffectBCI
|
python
|
def preprocess_message(self, m: UtopiaMessage):
'[apply pre-processing to topia message before any more work]\n\n Args:\n m (UtopiaMessage): [description]\n\n Returns:\n [type]: [description]\n '
m.timestamp = (m.timestamp % (1 << 24))
return m
|
def processDataPacket(self, m: DataPacket):
'[pre-process a datapacket message ready to be inserted into the ringbuffer]\n\n Args:\n m (DataPacket): [description]\n\n Returns:\n [type]: [description]\n '
d = np.array(m.samples, dtype=np.float32)
if self.data_preprocessor:
d_raw = d.copy()
d = self.data_preprocessor.transform(d)
if (self.send_signalquality and (self.data_ringbuffer is not None)):
self.update_and_send_ElectrodeQualities(d_raw, d, m.timestamp)
if (d.size > 0):
if ((self.data_timestamp is not None) and (m.timestamp < self.data_timestamp)):
print('Warning: Time-stamp wrap-around detected!!')
d = self.add_sample_timestamps(d, m.timestamp, self.fs)
self.data_timestamp = m.timestamp
return d
| -5,731,186,222,297,899,000
|
[pre-process a datapacket message ready to be inserted into the ringbuffer]
Args:
m (DataPacket): [description]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
processDataPacket
|
CkiChen/pymindaffectBCI
|
python
|
def processDataPacket(self, m: DataPacket):
'[pre-process a datapacket message ready to be inserted into the ringbuffer]\n\n Args:\n m (DataPacket): [description]\n\n Returns:\n [type]: [description]\n '
d = np.array(m.samples, dtype=np.float32)
if self.data_preprocessor:
d_raw = d.copy()
d = self.data_preprocessor.transform(d)
if (self.send_signalquality and (self.data_ringbuffer is not None)):
self.update_and_send_ElectrodeQualities(d_raw, d, m.timestamp)
if (d.size > 0):
if ((self.data_timestamp is not None) and (m.timestamp < self.data_timestamp)):
print('Warning: Time-stamp wrap-around detected!!')
d = self.add_sample_timestamps(d, m.timestamp, self.fs)
self.data_timestamp = m.timestamp
return d
|
def add_sample_timestamps(self, d: np.ndarray, timestamp: float, fs: float):
'add per-sample timestamp information to the data matrix\n\n Args:\n d (np.ndarray): (t,d) the data matrix to attach time stamps to\n timestamp (float): the timestamp of the last sample of d\n fs (float): the nomional sample rate of d\n\n Returns:\n np.ndarray: (t,d+1) data matrix with attached time-stamp channel\n '
if ((self.sample2timestamp is not None) and (not isinstance(self.sample2timestamp, str))):
sample_ts = self.sample2timestamp.transform(timestamp, len(d))
else:
sample_ts = (np.ones((len(d),), dtype=int) * timestamp)
d = np.append(np.array(d), sample_ts[:, np.newaxis], (- 1)).astype(d.dtype)
return d
| 2,263,153,318,634,905,000
|
add per-sample timestamp information to the data matrix
Args:
d (np.ndarray): (t,d) the data matrix to attach time stamps to
timestamp (float): the timestamp of the last sample of d
fs (float): the nomional sample rate of d
Returns:
np.ndarray: (t,d+1) data matrix with attached time-stamp channel
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
add_sample_timestamps
|
CkiChen/pymindaffectBCI
|
python
|
def add_sample_timestamps(self, d: np.ndarray, timestamp: float, fs: float):
'add per-sample timestamp information to the data matrix\n\n Args:\n d (np.ndarray): (t,d) the data matrix to attach time stamps to\n timestamp (float): the timestamp of the last sample of d\n fs (float): the nomional sample rate of d\n\n Returns:\n np.ndarray: (t,d+1) data matrix with attached time-stamp channel\n '
if ((self.sample2timestamp is not None) and (not isinstance(self.sample2timestamp, str))):
sample_ts = self.sample2timestamp.transform(timestamp, len(d))
else:
sample_ts = (np.ones((len(d),), dtype=int) * timestamp)
d = np.append(np.array(d), sample_ts[:, np.newaxis], (- 1)).astype(d.dtype)
return d
|
def plot_raw_preproc_data(self, d_raw, d_preproc, ts):
'[debugging function to check the diff between the raw and pre-processed data]\n\n Args:\n d_raw ([type]): [description]\n d_preproc ([type]): [description]\n ts ([type]): [description]\n '
if (not hasattr(self, 'rawringbuffer')):
self.preprocringbuffer = RingBuffer(maxsize=(self.fs * 3), shape=((d_preproc.shape[(- 1)] + 1),))
self.rawringbuffer = RingBuffer(maxsize=(self.raw_fs * 3), shape=((d_raw.shape[(- 1)] + 1),))
d_preproc = self.add_sample_timestamps(d_preproc, ts, self.fs)
self.preprocringbuffer.extend(d_preproc)
d_raw = self.add_sample_timestamps(d_raw, ts, self.raw_fs)
self.rawringbuffer.extend(d_raw)
if ((self.last_sigquality_ts is None) or (ts > (self.last_sigquality_ts + self.send_sigquality_interval))):
import matplotlib.pyplot as plt
plt.figure(10)
plt.clf()
idx = np.flatnonzero(self.rawringbuffer[:, (- 1)])[0]
plt.subplot(211)
plt.cla()
plt.plot(self.rawringbuffer[idx:, (- 1)], self.rawringbuffer[idx:, :(- 1)])
idx = np.flatnonzero(self.preprocringbuffer[:, (- 1)])[0]
plt.subplot(212)
plt.cla()
plt.plot(self.preprocringbuffer[idx:, (- 1)], self.preprocringbuffer[idx:, :(- 1)])
plt.show(block=False)
| 1,639,815,109,318,496,800
|
[debugging function to check the diff between the raw and pre-processed data]
Args:
d_raw ([type]): [description]
d_preproc ([type]): [description]
ts ([type]): [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
plot_raw_preproc_data
|
CkiChen/pymindaffectBCI
|
python
|
def plot_raw_preproc_data(self, d_raw, d_preproc, ts):
'[debugging function to check the diff between the raw and pre-processed data]\n\n Args:\n d_raw ([type]): [description]\n d_preproc ([type]): [description]\n ts ([type]): [description]\n '
if (not hasattr(self, 'rawringbuffer')):
self.preprocringbuffer = RingBuffer(maxsize=(self.fs * 3), shape=((d_preproc.shape[(- 1)] + 1),))
self.rawringbuffer = RingBuffer(maxsize=(self.raw_fs * 3), shape=((d_raw.shape[(- 1)] + 1),))
d_preproc = self.add_sample_timestamps(d_preproc, ts, self.fs)
self.preprocringbuffer.extend(d_preproc)
d_raw = self.add_sample_timestamps(d_raw, ts, self.raw_fs)
self.rawringbuffer.extend(d_raw)
if ((self.last_sigquality_ts is None) or (ts > (self.last_sigquality_ts + self.send_sigquality_interval))):
import matplotlib.pyplot as plt
plt.figure(10)
plt.clf()
idx = np.flatnonzero(self.rawringbuffer[:, (- 1)])[0]
plt.subplot(211)
plt.cla()
plt.plot(self.rawringbuffer[idx:, (- 1)], self.rawringbuffer[idx:, :(- 1)])
idx = np.flatnonzero(self.preprocringbuffer[:, (- 1)])[0]
plt.subplot(212)
plt.cla()
plt.plot(self.preprocringbuffer[idx:, (- 1)], self.preprocringbuffer[idx:, :(- 1)])
plt.show(block=False)
|
def processStimulusEvent(self, m: StimulusEvent):
'[pre-process a StimulusEvent message ready to be inserted into the stimulus ringbuffer]\n\n Args:\n m (StimulusEvent): [description]\n\n Returns:\n [type]: [description]\n '
d = np.zeros((257,), dtype=np.float32)
if ((self.stimulus_ringbuffer is not None) and (self.stimulus_timestamp is not None)):
d[:] = self.stimulus_ringbuffer[(- 1), :]
d[m.objIDs] = m.objState
d[(- 1)] = m.timestamp
if self.stimulus_preprocessor:
d = self.stimulus_preprocessor.transform(d)
self.stimulus_timestamp = m.timestamp
return d
| -2,385,323,848,178,737,000
|
[pre-process a StimulusEvent message ready to be inserted into the stimulus ringbuffer]
Args:
m (StimulusEvent): [description]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
processStimulusEvent
|
CkiChen/pymindaffectBCI
|
python
|
def processStimulusEvent(self, m: StimulusEvent):
'[pre-process a StimulusEvent message ready to be inserted into the stimulus ringbuffer]\n\n Args:\n m (StimulusEvent): [description]\n\n Returns:\n [type]: [description]\n '
d = np.zeros((257,), dtype=np.float32)
if ((self.stimulus_ringbuffer is not None) and (self.stimulus_timestamp is not None)):
d[:] = self.stimulus_ringbuffer[(- 1), :]
d[m.objIDs] = m.objState
d[(- 1)] = m.timestamp
if self.stimulus_preprocessor:
d = self.stimulus_preprocessor.transform(d)
self.stimulus_timestamp = m.timestamp
return d
|
def update_and_send_ElectrodeQualities(self, d_raw: np.ndarray, d_preproc: np.ndarray, ts: int):
'[compute running estimate of electrode qality and stream it]\n\n Args:\n d_raw (np.ndarray): [description]\n d_preproc (np.ndarray): [description]\n ts (int): [description]\n '
(raw_power, preproc_power) = self.update_electrode_powers(d_raw, d_preproc)
raw_amp = np.sqrt(raw_power)
preproc_amp = np.sqrt(preproc_power)
noise2sig = (np.maximum(float(1e-06), np.abs((raw_amp - preproc_amp))) / np.maximum(float(1e-08), preproc_amp))
noise2sig[(raw_power < 1e-06)] = 100
noise2sig[(preproc_amp > (raw_amp * 10))] = 100
noise2sig = np.minimum(noise2sig, 100)
if ((self.last_sigquality_ts is None) or (ts > (self.last_sigquality_ts + self.send_sigquality_interval))):
print('SigQ:\nraw_power=({}/{})\npp_power=({}/{})\nnoise2sig={}'.format(raw_amp, d_raw.shape[0], preproc_amp, d_preproc.shape[0], noise2sig))
print('Q', end='')
self.sendMessage(SignalQuality(None, noise2sig))
self.last_sigquality_ts = ts
if (self.VERBOSITY > 2):
import matplotlib.pyplot as plt
plt.figure(10)
ts = self.data_ringbuffer[:, (- 1)]
idx = np.flatnonzero(ts)
if (len(idx) > 0):
ts = ts[idx[0]:]
plt.subplot(211)
plt.cla()
plt.plot(np.diff(ts))
plt.title('diff time-sample')
plt.subplot(212)
plt.cla()
plt.plot(((ts - ts[0]) - ((np.arange(len(ts)) * 1000.0) / self.fs)))
plt.title('regression against sample-number')
plt.show(block=False)
| 7,000,004,924,750,501,000
|
[compute running estimate of electrode qality and stream it]
Args:
d_raw (np.ndarray): [description]
d_preproc (np.ndarray): [description]
ts (int): [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
update_and_send_ElectrodeQualities
|
CkiChen/pymindaffectBCI
|
python
|
def update_and_send_ElectrodeQualities(self, d_raw: np.ndarray, d_preproc: np.ndarray, ts: int):
'[compute running estimate of electrode qality and stream it]\n\n Args:\n d_raw (np.ndarray): [description]\n d_preproc (np.ndarray): [description]\n ts (int): [description]\n '
(raw_power, preproc_power) = self.update_electrode_powers(d_raw, d_preproc)
raw_amp = np.sqrt(raw_power)
preproc_amp = np.sqrt(preproc_power)
noise2sig = (np.maximum(float(1e-06), np.abs((raw_amp - preproc_amp))) / np.maximum(float(1e-08), preproc_amp))
noise2sig[(raw_power < 1e-06)] = 100
noise2sig[(preproc_amp > (raw_amp * 10))] = 100
noise2sig = np.minimum(noise2sig, 100)
if ((self.last_sigquality_ts is None) or (ts > (self.last_sigquality_ts + self.send_sigquality_interval))):
print('SigQ:\nraw_power=({}/{})\npp_power=({}/{})\nnoise2sig={}'.format(raw_amp, d_raw.shape[0], preproc_amp, d_preproc.shape[0], noise2sig))
print('Q', end=)
self.sendMessage(SignalQuality(None, noise2sig))
self.last_sigquality_ts = ts
if (self.VERBOSITY > 2):
import matplotlib.pyplot as plt
plt.figure(10)
ts = self.data_ringbuffer[:, (- 1)]
idx = np.flatnonzero(ts)
if (len(idx) > 0):
ts = ts[idx[0]:]
plt.subplot(211)
plt.cla()
plt.plot(np.diff(ts))
plt.title('diff time-sample')
plt.subplot(212)
plt.cla()
plt.plot(((ts - ts[0]) - ((np.arange(len(ts)) * 1000.0) / self.fs)))
plt.title('regression against sample-number')
plt.show(block=False)
|
def update_electrode_powers(self, d_raw: np.ndarray, d_preproc: np.ndarray):
'[track exp-weighted-moving average centered power for 2 input streams]\n\n Args:\n d_raw (np.ndarray): [description]\n d_preproc (np.ndarray): [description]\n\n Returns:\n [type]: [description]\n '
if (self.raw_power is None):
(mu_hl, pow_hl) = self.noise2sig_halflife_ms
self.raw_power = power_tracker(mu_hl, pow_hl, self.raw_fs)
self.preproc_power = power_tracker(mu_hl, pow_hl, self.fs)
self.raw_power.transform(d_raw)
self.preproc_power.transform(d_preproc)
return (self.raw_power.power(), self.preproc_power.power())
| 2,754,732,351,761,983,000
|
[track exp-weighted-moving average centered power for 2 input streams]
Args:
d_raw (np.ndarray): [description]
d_preproc (np.ndarray): [description]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
update_electrode_powers
|
CkiChen/pymindaffectBCI
|
python
|
def update_electrode_powers(self, d_raw: np.ndarray, d_preproc: np.ndarray):
'[track exp-weighted-moving average centered power for 2 input streams]\n\n Args:\n d_raw (np.ndarray): [description]\n d_preproc (np.ndarray): [description]\n\n Returns:\n [type]: [description]\n '
if (self.raw_power is None):
(mu_hl, pow_hl) = self.noise2sig_halflife_ms
self.raw_power = power_tracker(mu_hl, pow_hl, self.raw_fs)
self.preproc_power = power_tracker(mu_hl, pow_hl, self.fs)
self.raw_power.transform(d_raw)
self.preproc_power.transform(d_preproc)
return (self.raw_power.power(), self.preproc_power.power())
|
def update(self, timeout_ms=None, mintime_ms=None):
'Update the tracking state w.r.t. the data source\n\n By adding data to the data_ringbuffer, stimulus info to the stimulus_ringbuffer, \n and other messages to the messages ring buffer.\n\n Args\n timeout_ms : int\n max block waiting for messages before returning\n mintime_ms : int\n min time to accumulate messages before returning\n Returns\n newmsgs : [newMsgs :UtopiaMessage]\n list of the *new* utopia messages from the server\n nsamp: int\n number of new data samples in this call\n Note: use data_ringbuffer[-nsamp:,...] to get the new data\n nstimulus : int\n number of new stimulus events in this call\n Note: use stimulus_ringbuffer[-nstimulus:,...] to get the new data\n '
if (timeout_ms is None):
timeout_ms = self.timeout_ms
if (mintime_ms is None):
mintime_ms = self.mintime_ms
if (not self.isConnected()):
self.connect()
if (not self.isConnected()):
return ([], 0, 0)
t0 = self.getTimeStamp()
nsamp = 0
nmsg = 0
nstimulus = 0
if (self.data_ringbuffer is None):
(nsamp, nmsg) = self.initDataRingBuffer()
if (self.stimulus_ringbuffer is None):
self.initStimulusRingBuffer()
if (self.last_log_ts is None):
self.last_log_ts = self.getTimeStamp()
if (t0 is None):
t0 = self.getTimeStamp()
newmsgs = self.newmsgs
self.newmsgs = []
ttg = (timeout_ms - (self.getTimeStamp() - t0))
while (ttg > 0):
if (ttg >= mintime_ms):
sleep((mintime_ms / 1000.0))
ttg = (timeout_ms - (self.getTimeStamp() - t0))
msgs = self.getNewMessages(ttg)
print('.', end='')
for m in msgs:
m = self.preprocess_message(m)
print('{:c}'.format(m.msgID), end='', flush=True)
if (m.msgID == DataPacket.msgID):
d = self.processDataPacket(m)
self.data_ringbuffer.extend(d)
nsamp = (nsamp + d.shape[0])
elif (m.msgID == StimulusEvent.msgID):
d = self.processStimulusEvent(m)
self.stimulus_ringbuffer.append(d)
nstimulus = (nstimulus + 1)
else:
if ((m.msgID == NewTarget.msgID) or (m.msgID == Selection.msgID)):
d = self.processStimulusEvent(StimulusEvent(m.timestamp, np.arange(255, dtype=np.int32), np.zeros(255, dtype=np.int8)))
self.stimulus_ringbuffer.append(d)
self.stimulus_timestamp = m.timestamp
if ((len(self.msg_ringbuffer) > 0) and (m.timestamp > (self.msg_ringbuffer[0].timestamp + self.msgwindow_ms))):
self.msg_ringbuffer.popleft()
self.msg_ringbuffer.append(m)
newmsgs.append(m)
nmsg = (nmsg + 1)
self.msg_timestamp = m.timestamp
ttg = (timeout_ms - (self.getTimeStamp() - t0))
if (self.getTimeStamp() > (self.last_log_ts + 2000)):
print('', flush=True)
self.last_log_ts = self.getTimeStamp()
return (newmsgs, nsamp, nstimulus)
| 6,327,734,097,786,197,000
|
Update the tracking state w.r.t. the data source
By adding data to the data_ringbuffer, stimulus info to the stimulus_ringbuffer,
and other messages to the messages ring buffer.
Args
timeout_ms : int
max block waiting for messages before returning
mintime_ms : int
min time to accumulate messages before returning
Returns
newmsgs : [newMsgs :UtopiaMessage]
list of the *new* utopia messages from the server
nsamp: int
number of new data samples in this call
Note: use data_ringbuffer[-nsamp:,...] to get the new data
nstimulus : int
number of new stimulus events in this call
Note: use stimulus_ringbuffer[-nstimulus:,...] to get the new data
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
update
|
CkiChen/pymindaffectBCI
|
python
|
def update(self, timeout_ms=None, mintime_ms=None):
'Update the tracking state w.r.t. the data source\n\n By adding data to the data_ringbuffer, stimulus info to the stimulus_ringbuffer, \n and other messages to the messages ring buffer.\n\n Args\n timeout_ms : int\n max block waiting for messages before returning\n mintime_ms : int\n min time to accumulate messages before returning\n Returns\n newmsgs : [newMsgs :UtopiaMessage]\n list of the *new* utopia messages from the server\n nsamp: int\n number of new data samples in this call\n Note: use data_ringbuffer[-nsamp:,...] to get the new data\n nstimulus : int\n number of new stimulus events in this call\n Note: use stimulus_ringbuffer[-nstimulus:,...] to get the new data\n '
if (timeout_ms is None):
timeout_ms = self.timeout_ms
if (mintime_ms is None):
mintime_ms = self.mintime_ms
if (not self.isConnected()):
self.connect()
if (not self.isConnected()):
return ([], 0, 0)
t0 = self.getTimeStamp()
nsamp = 0
nmsg = 0
nstimulus = 0
if (self.data_ringbuffer is None):
(nsamp, nmsg) = self.initDataRingBuffer()
if (self.stimulus_ringbuffer is None):
self.initStimulusRingBuffer()
if (self.last_log_ts is None):
self.last_log_ts = self.getTimeStamp()
if (t0 is None):
t0 = self.getTimeStamp()
newmsgs = self.newmsgs
self.newmsgs = []
ttg = (timeout_ms - (self.getTimeStamp() - t0))
while (ttg > 0):
if (ttg >= mintime_ms):
sleep((mintime_ms / 1000.0))
ttg = (timeout_ms - (self.getTimeStamp() - t0))
msgs = self.getNewMessages(ttg)
print('.', end=)
for m in msgs:
m = self.preprocess_message(m)
print('{:c}'.format(m.msgID), end=, flush=True)
if (m.msgID == DataPacket.msgID):
d = self.processDataPacket(m)
self.data_ringbuffer.extend(d)
nsamp = (nsamp + d.shape[0])
elif (m.msgID == StimulusEvent.msgID):
d = self.processStimulusEvent(m)
self.stimulus_ringbuffer.append(d)
nstimulus = (nstimulus + 1)
else:
if ((m.msgID == NewTarget.msgID) or (m.msgID == Selection.msgID)):
d = self.processStimulusEvent(StimulusEvent(m.timestamp, np.arange(255, dtype=np.int32), np.zeros(255, dtype=np.int8)))
self.stimulus_ringbuffer.append(d)
self.stimulus_timestamp = m.timestamp
if ((len(self.msg_ringbuffer) > 0) and (m.timestamp > (self.msg_ringbuffer[0].timestamp + self.msgwindow_ms))):
self.msg_ringbuffer.popleft()
self.msg_ringbuffer.append(m)
newmsgs.append(m)
nmsg = (nmsg + 1)
self.msg_timestamp = m.timestamp
ttg = (timeout_ms - (self.getTimeStamp() - t0))
if (self.getTimeStamp() > (self.last_log_ts + 2000)):
print(, flush=True)
self.last_log_ts = self.getTimeStamp()
return (newmsgs, nsamp, nstimulus)
|
def push_back_newmsgs(self, oldmsgs):
'[put unprocessed messages back onto the newmessages queue]\n\n Args:\n oldmsgs ([type]): [description]\n '
self.newmsgs.extend(oldmsgs)
| 3,987,448,184,813,840,000
|
[put unprocessed messages back onto the newmessages queue]
Args:
oldmsgs ([type]): [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
push_back_newmsgs
|
CkiChen/pymindaffectBCI
|
python
|
def push_back_newmsgs(self, oldmsgs):
'[put unprocessed messages back onto the newmessages queue]\n\n Args:\n oldmsgs ([type]): [description]\n '
self.newmsgs.extend(oldmsgs)
|
def extract_data_segment(self, bgn_ts, end_ts=None):
'extract a segment of data based on a start and end time-stamp\n\n Args:\n bgn_ts (float): segment start time-stamp\n end_ts (float, optional): segment end time-stamp. Defaults to None.\n\n Returns:\n (np.ndarray): the data between these time-stamps, or None if timestamps invalid\n '
return extract_ringbuffer_segment(self.data_ringbuffer, bgn_ts, end_ts)
| -7,491,877,753,707,852,000
|
extract a segment of data based on a start and end time-stamp
Args:
bgn_ts (float): segment start time-stamp
end_ts (float, optional): segment end time-stamp. Defaults to None.
Returns:
(np.ndarray): the data between these time-stamps, or None if timestamps invalid
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
extract_data_segment
|
CkiChen/pymindaffectBCI
|
python
|
def extract_data_segment(self, bgn_ts, end_ts=None):
'extract a segment of data based on a start and end time-stamp\n\n Args:\n bgn_ts (float): segment start time-stamp\n end_ts (float, optional): segment end time-stamp. Defaults to None.\n\n Returns:\n (np.ndarray): the data between these time-stamps, or None if timestamps invalid\n '
return extract_ringbuffer_segment(self.data_ringbuffer, bgn_ts, end_ts)
|
def extract_stimulus_segment(self, bgn_ts, end_ts=None):
'extract a segment of the stimulus stream based on a start and end time-stamp\n\n Args:\n bgn_ts (float): segment start time-stamp\n end_ts (float, optional): segment end time-stamp. Defaults to None.\n\n Returns:\n (np.ndarray): the stimulus events between these time-stamps, or None if timestamps invalid\n '
return extract_ringbuffer_segment(self.stimulus_ringbuffer, bgn_ts, end_ts)
| 107,512,148,402,593,490
|
extract a segment of the stimulus stream based on a start and end time-stamp
Args:
bgn_ts (float): segment start time-stamp
end_ts (float, optional): segment end time-stamp. Defaults to None.
Returns:
(np.ndarray): the stimulus events between these time-stamps, or None if timestamps invalid
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
extract_stimulus_segment
|
CkiChen/pymindaffectBCI
|
python
|
def extract_stimulus_segment(self, bgn_ts, end_ts=None):
'extract a segment of the stimulus stream based on a start and end time-stamp\n\n Args:\n bgn_ts (float): segment start time-stamp\n end_ts (float, optional): segment end time-stamp. Defaults to None.\n\n Returns:\n (np.ndarray): the stimulus events between these time-stamps, or None if timestamps invalid\n '
return extract_ringbuffer_segment(self.stimulus_ringbuffer, bgn_ts, end_ts)
|
def extract_msgs_segment(self, bgn_ts, end_ts=None):
'[extract the messages between start/end time stamps]\n\n Args:\n bgn_ts ([type]): [description]\n end_ts ([type], optional): [description]. Defaults to None.\n\n Returns:\n [type]: [description]\n '
msgs = []
for m in reversed(self.msg_ringbuffer):
if (m.timestamp <= bgn_ts):
break
if ((end_ts is None) or (m.timestamp < end_ts)):
msgs.append(m)
msgs.reverse()
return msgs
| 930,822,136,109,789,600
|
[extract the messages between start/end time stamps]
Args:
bgn_ts ([type]): [description]
end_ts ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
extract_msgs_segment
|
CkiChen/pymindaffectBCI
|
python
|
def extract_msgs_segment(self, bgn_ts, end_ts=None):
'[extract the messages between start/end time stamps]\n\n Args:\n bgn_ts ([type]): [description]\n end_ts ([type], optional): [description]. Defaults to None.\n\n Returns:\n [type]: [description]\n '
msgs = []
for m in reversed(self.msg_ringbuffer):
if (m.timestamp <= bgn_ts):
break
if ((end_ts is None) or (m.timestamp < end_ts)):
msgs.append(m)
msgs.reverse()
return msgs
|
def run(self, timeout_ms=30000):
'[test run the interface forever, just getting and storing data]\n\n Args:\n timeout_ms (int, optional): [description]. Defaults to 30000.\n '
t0 = self.getTimeStamp()
tstart = self.data_timestamp
trlen_ms = 5000
while (self.getTimeStamp() < (t0 + timeout_ms)):
self.update()
if (tstart is None):
tstart = self.data_timestamp
if (tstart and (self.data_timestamp > (tstart + trlen_ms))):
X = self.extract_data_segment(tstart, (tstart + trlen_ms))
print('Got data: {}->{}\n{}'.format(tstart, (tstart + trlen_ms), X[:, (- 1)]))
Y = self.extract_stimulus_segment(tstart, (tstart + trlen_ms))
print('Got stimulus: {}->{}\n{}'.format(tstart, (tstart + trlen_ms), Y[:, (- 1)]))
tstart = (self.data_timestamp + 5000)
print('.', flush=True)
| -8,931,932,729,467,831,000
|
[test run the interface forever, just getting and storing data]
Args:
timeout_ms (int, optional): [description]. Defaults to 30000.
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
run
|
CkiChen/pymindaffectBCI
|
python
|
def run(self, timeout_ms=30000):
'[test run the interface forever, just getting and storing data]\n\n Args:\n timeout_ms (int, optional): [description]. Defaults to 30000.\n '
t0 = self.getTimeStamp()
tstart = self.data_timestamp
trlen_ms = 5000
while (self.getTimeStamp() < (t0 + timeout_ms)):
self.update()
if (tstart is None):
tstart = self.data_timestamp
if (tstart and (self.data_timestamp > (tstart + trlen_ms))):
X = self.extract_data_segment(tstart, (tstart + trlen_ms))
print('Got data: {}->{}\n{}'.format(tstart, (tstart + trlen_ms), X[:, (- 1)]))
Y = self.extract_stimulus_segment(tstart, (tstart + trlen_ms))
print('Got stimulus: {}->{}\n{}'.format(tstart, (tstart + trlen_ms), Y[:, (- 1)]))
tstart = (self.data_timestamp + 5000)
print('.', flush=True)
|
def fit(self, X, fs: float=None, zi=None):
'[summary]\n\n Args:\n X ([type]): [description]\n fs (float, optional): [description]. Defaults to None.\n zi ([type], optional): [description]. Defaults to None.\n\n Returns:\n [type]: [description]\n '
if (fs is not None):
self.fs = fs
if isinstance(self.stopband, str):
import pickle
import os
if os.path.isfile(self.stopband):
fn = self.stopband
else:
fn = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.stopband)
with open(fn, 'rb') as f:
self.sos_ = pickle.load(f)
self.zi_ = pickle.load(f)
f.close()
self.zi_ = sosfilt_zi_warmup(self.zi_, X, self.axis)
print('X={} zi={}'.format(X.shape, self.zi_.shape))
else:
(X, self.sos_, self.zi_) = butter_sosfilt(X, self.stopband, self.fs, order=self.order, axis=self.axis, zi=zi, ftype=self.ftype)
self.nsamp = 0
self.resamprate_ = ((int(round(((self.fs * 2.0) / self.fs_out))) / 2.0) if (self.fs_out is not None) else 1)
self.out_fs_ = (self.fs / self.resamprate_)
print('resample: {}->{}hz rsrate={}'.format(self.fs, self.out_fs_, self.resamprate_))
return self
| 8,991,536,753,699,651,000
|
[summary]
Args:
X ([type]): [description]
fs (float, optional): [description]. Defaults to None.
zi ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
fit
|
CkiChen/pymindaffectBCI
|
python
|
def fit(self, X, fs: float=None, zi=None):
'[summary]\n\n Args:\n X ([type]): [description]\n fs (float, optional): [description]. Defaults to None.\n zi ([type], optional): [description]. Defaults to None.\n\n Returns:\n [type]: [description]\n '
if (fs is not None):
self.fs = fs
if isinstance(self.stopband, str):
import pickle
import os
if os.path.isfile(self.stopband):
fn = self.stopband
else:
fn = os.path.join(os.path.dirname(os.path.abspath(__file__)), self.stopband)
with open(fn, 'rb') as f:
self.sos_ = pickle.load(f)
self.zi_ = pickle.load(f)
f.close()
self.zi_ = sosfilt_zi_warmup(self.zi_, X, self.axis)
print('X={} zi={}'.format(X.shape, self.zi_.shape))
else:
(X, self.sos_, self.zi_) = butter_sosfilt(X, self.stopband, self.fs, order=self.order, axis=self.axis, zi=zi, ftype=self.ftype)
self.nsamp = 0
self.resamprate_ = ((int(round(((self.fs * 2.0) / self.fs_out))) / 2.0) if (self.fs_out is not None) else 1)
self.out_fs_ = (self.fs / self.resamprate_)
print('resample: {}->{}hz rsrate={}'.format(self.fs, self.out_fs_, self.resamprate_))
return self
|
def transform(self, X, Y=None):
'[summary]\n\n Args:\n X ([type]): [description]\n Y ([type], optional): [description]. Defaults to None.\n\n Returns:\n [type]: [description]\n '
if (not hasattr(self, 'sos_')):
self.fit(X[0:1, :])
if (self.sos_ is not None):
(X, self.zi_) = sosfilt(self.sos_, X, axis=self.axis, zi=self.zi_)
nsamp = self.nsamp
self.nsamp = (self.nsamp + X.shape[self.axis])
if (self.resamprate_ > 1):
resamp_start = (nsamp % self.resamprate_)
if (resamp_start > 0):
resamp_start = (self.resamprate_ - resamp_start)
idx = np.arange(resamp_start, X.shape[self.axis], self.resamprate_)
if (((self.resamprate_ % 1) > 0) and (idx.size > 0)):
idx_l = np.floor(idx).astype(int)
idx_u = np.ceil(idx).astype(int)
idx_u[(- 1)] = (idx_u[(- 1)] if (idx_u[(- 1)] < X.shape[self.axis]) else (X.shape[self.axis] - 1))
w_u = (idx - idx_l)
X = ((X[..., idx_u, :] * w_u[:, np.newaxis]) + (X[..., idx_l, :] * (1 - w_u[:, np.newaxis])))
if (Y is not None):
Y = ((Y[..., idx_u, :] * w_u[:, np.newaxis]) + (Y[..., idx_l, :] * (1 - w_u[:, np.newaxis])))
else:
idx = idx.astype(int)
X = X[..., idx, :]
if (Y is not None):
Y = Y[..., idx, :]
return (X if (Y is None) else (X, Y))
| -7,833,983,299,940,526,000
|
[summary]
Args:
X ([type]): [description]
Y ([type], optional): [description]. Defaults to None.
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
transform
|
CkiChen/pymindaffectBCI
|
python
|
def transform(self, X, Y=None):
'[summary]\n\n Args:\n X ([type]): [description]\n Y ([type], optional): [description]. Defaults to None.\n\n Returns:\n [type]: [description]\n '
if (not hasattr(self, 'sos_')):
self.fit(X[0:1, :])
if (self.sos_ is not None):
(X, self.zi_) = sosfilt(self.sos_, X, axis=self.axis, zi=self.zi_)
nsamp = self.nsamp
self.nsamp = (self.nsamp + X.shape[self.axis])
if (self.resamprate_ > 1):
resamp_start = (nsamp % self.resamprate_)
if (resamp_start > 0):
resamp_start = (self.resamprate_ - resamp_start)
idx = np.arange(resamp_start, X.shape[self.axis], self.resamprate_)
if (((self.resamprate_ % 1) > 0) and (idx.size > 0)):
idx_l = np.floor(idx).astype(int)
idx_u = np.ceil(idx).astype(int)
idx_u[(- 1)] = (idx_u[(- 1)] if (idx_u[(- 1)] < X.shape[self.axis]) else (X.shape[self.axis] - 1))
w_u = (idx - idx_l)
X = ((X[..., idx_u, :] * w_u[:, np.newaxis]) + (X[..., idx_l, :] * (1 - w_u[:, np.newaxis])))
if (Y is not None):
Y = ((Y[..., idx_u, :] * w_u[:, np.newaxis]) + (Y[..., idx_l, :] * (1 - w_u[:, np.newaxis])))
else:
idx = idx.astype(int)
X = X[..., idx, :]
if (Y is not None):
Y = Y[..., idx, :]
return (X if (Y is None) else (X, Y))
|
@staticmethod
def testcase():
' test the filt+downsample transformation filter by incremental calling '
X = np.sin((((np.arange(100)[:, np.newaxis] * 2) * np.pi) / 30))
xs = np.arange(X.shape[0])[:, np.newaxis]
bands = (0, 20, 'bandpass')
fs = 200
fs_out = 130
fds = butterfilt_and_downsample(stopband=bands, fs=fs, fs_out=fs_out)
print('single step')
fds.fit(X[0:1, :])
(m0, xs0) = fds.transform(X, xs)
print('M0 -> {}'.format(m0[:20]))
step = 6
print('Step size = {}'.format(step))
fds.fit(X[0:1, :])
m1 = np.zeros(m0.shape, m0.dtype)
xs1 = np.zeros(xs0.shape, xs0.dtype)
t = 0
for i in range(0, len(X), step):
idx = np.arange(i, min((i + step), len(X)))
(mm, idx1) = fds.transform(X[idx, :], idx[:, np.newaxis])
m1[t:(t + mm.shape[0]), :] = mm
xs1[t:(t + mm.shape[0])] = idx1
t = (t + mm.shape[0])
print('M1 -> {}'.format(m1[:20]))
print('diff: {}'.format(np.max(np.abs((m0 - m1)))))
import matplotlib.pyplot as plt
plt.plot(xs, X, '*-', label='X')
plt.plot(xs0, m0, '*-', label='{} {}->{}Hz single'.format(bands, fs, fs_out))
plt.plot(xs1, m1, '*-', label='{} {}->{}Hz incremental'.format(bands, fs, fs_out))
plt.legend()
plt.show()
| 4,805,566,877,508,397,000
|
test the filt+downsample transformation filter by incremental calling
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
testcase
|
CkiChen/pymindaffectBCI
|
python
|
@staticmethod
def testcase():
' '
X = np.sin((((np.arange(100)[:, np.newaxis] * 2) * np.pi) / 30))
xs = np.arange(X.shape[0])[:, np.newaxis]
bands = (0, 20, 'bandpass')
fs = 200
fs_out = 130
fds = butterfilt_and_downsample(stopband=bands, fs=fs, fs_out=fs_out)
print('single step')
fds.fit(X[0:1, :])
(m0, xs0) = fds.transform(X, xs)
print('M0 -> {}'.format(m0[:20]))
step = 6
print('Step size = {}'.format(step))
fds.fit(X[0:1, :])
m1 = np.zeros(m0.shape, m0.dtype)
xs1 = np.zeros(xs0.shape, xs0.dtype)
t = 0
for i in range(0, len(X), step):
idx = np.arange(i, min((i + step), len(X)))
(mm, idx1) = fds.transform(X[idx, :], idx[:, np.newaxis])
m1[t:(t + mm.shape[0]), :] = mm
xs1[t:(t + mm.shape[0])] = idx1
t = (t + mm.shape[0])
print('M1 -> {}'.format(m1[:20]))
print('diff: {}'.format(np.max(np.abs((m0 - m1)))))
import matplotlib.pyplot as plt
plt.plot(xs, X, '*-', label='X')
plt.plot(xs0, m0, '*-', label='{} {}->{}Hz single'.format(bands, fs, fs_out))
plt.plot(xs1, m1, '*-', label='{} {}->{}Hz incremental'.format(bands, fs, fs_out))
plt.legend()
plt.show()
|
def fit(self, X):
'[summary]\n\n Args:\n X ([type]): [description]\n\n Returns:\n [type]: [description]\n '
return self
| -7,139,748,956,259,923,000
|
[summary]
Args:
X ([type]): [description]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
fit
|
CkiChen/pymindaffectBCI
|
python
|
def fit(self, X):
'[summary]\n\n Args:\n X ([type]): [description]\n\n Returns:\n [type]: [description]\n '
return self
|
def transform(self, X):
'[transform Stimulus-encoded to brain-encoded]\n\n Args:\n X ([type]): [description]\n\n Returns:\n [type]: [description]\n '
if (X is None):
return None
prevX = self.prevX
if (self.histlen > 0):
if ((X.shape[0] >= self.histlen) or (prevX is None)):
self.prevX = X
else:
self.prevX = np.append(prevX, X, 0)
self.prevX = self.prevX[(- self.histlen):, :].copy()
X = stim2event(X, self.evtlabs, axis=(- 2), oM=prevX)
return X
| 8,042,121,421,868,493,000
|
[transform Stimulus-encoded to brain-encoded]
Args:
X ([type]): [description]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
transform
|
CkiChen/pymindaffectBCI
|
python
|
def transform(self, X):
'[transform Stimulus-encoded to brain-encoded]\n\n Args:\n X ([type]): [description]\n\n Returns:\n [type]: [description]\n '
if (X is None):
return None
prevX = self.prevX
if (self.histlen > 0):
if ((X.shape[0] >= self.histlen) or (prevX is None)):
self.prevX = X
else:
self.prevX = np.append(prevX, X, 0)
self.prevX = self.prevX[(- self.histlen):, :].copy()
X = stim2event(X, self.evtlabs, axis=(- 2), oM=prevX)
return X
|
def testcase():
' test the stimulus transformation filter by incremental calling '
M = np.array([0, 0, 0, 1, 0, 0, 1, 1, 0, 1])[:, np.newaxis]
s2ef = stim2eventfilt(evtlabs=('re', 'fe'), histlen=3)
print('single step')
m0 = s2ef.transform(M)
print('{} -> {}'.format(M, m0))
print('Step size = 1')
m1 = np.zeros(m0.shape, m0.dtype)
for i in range(len(M)):
idx = slice(i, (i + 1))
mm = s2ef.transform(M[idx, :])
m1[(idx, ...)] = mm
print('{} {} -> {}'.format(i, M[(idx, ...)], mm))
print('Step size=4')
m4 = np.zeros(m0.shape, m0.dtype)
for i in range(0, len(M), 4):
idx = slice(i, (i + 4))
mm = s2ef.transform(M[idx, :])
m4[(idx, ...)] = mm
print('{} {} -> {}'.format(i, M[(idx, ...)], mm))
print('m0={}\nm1={}\n,m4={}\n'.format(m0, m1, m4))
| 2,849,858,674,135,896,000
|
test the stimulus transformation filter by incremental calling
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
testcase
|
CkiChen/pymindaffectBCI
|
python
|
def testcase():
' '
M = np.array([0, 0, 0, 1, 0, 0, 1, 1, 0, 1])[:, np.newaxis]
s2ef = stim2eventfilt(evtlabs=('re', 'fe'), histlen=3)
print('single step')
m0 = s2ef.transform(M)
print('{} -> {}'.format(M, m0))
print('Step size = 1')
m1 = np.zeros(m0.shape, m0.dtype)
for i in range(len(M)):
idx = slice(i, (i + 1))
mm = s2ef.transform(M[idx, :])
m1[(idx, ...)] = mm
print('{} {} -> {}'.format(i, M[(idx, ...)], mm))
print('Step size=4')
m4 = np.zeros(m0.shape, m0.dtype)
for i in range(0, len(M), 4):
idx = slice(i, (i + 4))
mm = s2ef.transform(M[idx, :])
m4[(idx, ...)] = mm
print('{} {} -> {}'.format(i, M[(idx, ...)], mm))
print('m0={}\nm1={}\n,m4={}\n'.format(m0, m1, m4))
|
def hl2alpha(self, hl):
'[summary]\n\n Args:\n hl ([type]): [description]\n\n Returns:\n [type]: [description]\n '
return np.exp((np.log(0.5) / hl))
| -8,196,885,410,909,495,000
|
[summary]
Args:
hl ([type]): [description]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
hl2alpha
|
CkiChen/pymindaffectBCI
|
python
|
def hl2alpha(self, hl):
'[summary]\n\n Args:\n hl ([type]): [description]\n\n Returns:\n [type]: [description]\n '
return np.exp((np.log(0.5) / hl))
|
def fit(self, X):
'[summary]\n\n Args:\n X ([type]): [description]\n\n Returns:\n [type]: [description]\n '
self.sX_N = X.shape[0]
if (self.car and (X.shape[(- 1)] > 4)):
X = (X.copy() - np.mean(X, (- 1), keepdims=True))
self.sX = np.sum(X, axis=0)
self.sXX_N = X.shape[0]
self.sXX = np.sum(((X - (self.sX / self.sX_N)) ** 2), axis=0)
return self.power()
| 2,493,160,889,451,224,000
|
[summary]
Args:
X ([type]): [description]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
fit
|
CkiChen/pymindaffectBCI
|
python
|
def fit(self, X):
'[summary]\n\n Args:\n X ([type]): [description]\n\n Returns:\n [type]: [description]\n '
self.sX_N = X.shape[0]
if (self.car and (X.shape[(- 1)] > 4)):
X = (X.copy() - np.mean(X, (- 1), keepdims=True))
self.sX = np.sum(X, axis=0)
self.sXX_N = X.shape[0]
self.sXX = np.sum(((X - (self.sX / self.sX_N)) ** 2), axis=0)
return self.power()
|
def transform(self, X: np.ndarray):
'[compute the exponientially weighted centered power of X]\n\n Args:\n X (np.ndarray): [description]\n\n Returns:\n [type]: [description]\n '
if (self.sX is None):
return self.fit(X)
if (self.car and (X.shape[(- 1)] > 4)):
ch_power = self.power()
act_ch = (ch_power > (np.max(ch_power) * 0.001))
X = (X.copy() - np.mean(X[(..., act_ch)], (- 1), keepdims=True))
alpha_mu = (self.alpha_mu ** X.shape[0])
self.sX_N = ((self.sX_N * alpha_mu) + X.shape[0])
self.sX = ((self.sX * alpha_mu) + np.sum(X, axis=0))
alpha_pow = (self.alpha_power ** X.shape[0])
self.sXX_N = ((self.sXX_N * alpha_pow) + X.shape[0])
self.sXX = ((self.sXX * alpha_pow) + np.sum(((X - (self.sX / self.sX_N)) ** 2), axis=0))
return self.power()
| -7,378,616,810,163,433,000
|
[compute the exponientially weighted centered power of X]
Args:
X (np.ndarray): [description]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
transform
|
CkiChen/pymindaffectBCI
|
python
|
def transform(self, X: np.ndarray):
'[compute the exponientially weighted centered power of X]\n\n Args:\n X (np.ndarray): [description]\n\n Returns:\n [type]: [description]\n '
if (self.sX is None):
return self.fit(X)
if (self.car and (X.shape[(- 1)] > 4)):
ch_power = self.power()
act_ch = (ch_power > (np.max(ch_power) * 0.001))
X = (X.copy() - np.mean(X[(..., act_ch)], (- 1), keepdims=True))
alpha_mu = (self.alpha_mu ** X.shape[0])
self.sX_N = ((self.sX_N * alpha_mu) + X.shape[0])
self.sX = ((self.sX * alpha_mu) + np.sum(X, axis=0))
alpha_pow = (self.alpha_power ** X.shape[0])
self.sXX_N = ((self.sXX_N * alpha_pow) + X.shape[0])
self.sXX = ((self.sXX * alpha_pow) + np.sum(((X - (self.sX / self.sX_N)) ** 2), axis=0))
return self.power()
|
def mean(self):
'[summary]\n\n Returns:\n [type]: [description]\n '
return (self.sX / self.sX_N)
| 4,070,333,294,140,741,000
|
[summary]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
mean
|
CkiChen/pymindaffectBCI
|
python
|
def mean(self):
'[summary]\n\n Returns:\n [type]: [description]\n '
return (self.sX / self.sX_N)
|
def power(self):
'[summary]\n\n Returns:\n [type]: [description]\n '
return (self.sXX / self.sXX_N)
| -2,892,201,905,606,831,000
|
[summary]
Returns:
[type]: [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
power
|
CkiChen/pymindaffectBCI
|
python
|
def power(self):
'[summary]\n\n Returns:\n [type]: [description]\n '
return (self.sXX / self.sXX_N)
|
def testcase(self):
'[summary]\n '
import matplotlib.pyplot as plt
X = np.random.randn(10000, 2)
pt = power_tracker(100, 100, 100)
print('All at once: power={}'.format(pt.transform(X)))
pt = power_tracker(100, 1000, 1000)
print('alpha_mu={} alpha_pow={}'.format(pt.alpha_mu, pt.alpha_power))
step = 30
idxs = list(range(step, X.shape[0], step))
powers = np.zeros((len(idxs), X.shape[(- 1)]))
mus = np.zeros((len(idxs), X.shape[(- 1)]))
for (i, j) in enumerate(idxs):
powers[i, :] = np.sqrt(pt.transform(X[(j - step):j, :]))
mus[i, :] = pt.mean()
for d in range(X.shape[(- 1)]):
plt.subplot(X.shape[(- 1)], 1, (d + 1))
plt.plot(X[:, d])
plt.plot(idxs, mus[:, d])
plt.plot(idxs, powers[:, d])
| 3,154,473,121,034,125,300
|
[summary]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
testcase
|
CkiChen/pymindaffectBCI
|
python
|
def testcase(self):
'\n '
import matplotlib.pyplot as plt
X = np.random.randn(10000, 2)
pt = power_tracker(100, 100, 100)
print('All at once: power={}'.format(pt.transform(X)))
pt = power_tracker(100, 1000, 1000)
print('alpha_mu={} alpha_pow={}'.format(pt.alpha_mu, pt.alpha_power))
step = 30
idxs = list(range(step, X.shape[0], step))
powers = np.zeros((len(idxs), X.shape[(- 1)]))
mus = np.zeros((len(idxs), X.shape[(- 1)]))
for (i, j) in enumerate(idxs):
powers[i, :] = np.sqrt(pt.transform(X[(j - step):j, :]))
mus[i, :] = pt.mean()
for d in range(X.shape[(- 1)]):
plt.subplot(X.shape[(- 1)], 1, (d + 1))
plt.plot(X[:, d])
plt.plot(idxs, mus[:, d])
plt.plot(idxs, powers[:, d])
|
def __init__(self, fs=None, sample2timestamp=None, max_delta=200):
'tranform from per-packet (i.e. multiple-samples) to per-sample timestamps\n\n Args:\n fs (float): default sample rate, used when no other timing info is available\n sample2timestamp (transformer, optional): class to de-jitter timestamps based on sample-count. Defaults to None.\n '
self.fs = fs
a0 = ((1000 / self.fs) if (self.fs is not None) else 1)
if (sample2timestamp == 'lower_bound_tracker'):
self.sample2timestamp = lower_bound_tracker(a0=a0)
elif (sample2timestamp == 'linear_trend_tracker'):
self.sample2timestamp = linear_trend_tracker(a0=a0)
else:
self.sample2timestamp = sample2timestamp
self.max_delta = max_delta
| 8,096,475,303,372,531,000
|
tranform from per-packet (i.e. multiple-samples) to per-sample timestamps
Args:
fs (float): default sample rate, used when no other timing info is available
sample2timestamp (transformer, optional): class to de-jitter timestamps based on sample-count. Defaults to None.
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
__init__
|
CkiChen/pymindaffectBCI
|
python
|
def __init__(self, fs=None, sample2timestamp=None, max_delta=200):
'tranform from per-packet (i.e. multiple-samples) to per-sample timestamps\n\n Args:\n fs (float): default sample rate, used when no other timing info is available\n sample2timestamp (transformer, optional): class to de-jitter timestamps based on sample-count. Defaults to None.\n '
self.fs = fs
a0 = ((1000 / self.fs) if (self.fs is not None) else 1)
if (sample2timestamp == 'lower_bound_tracker'):
self.sample2timestamp = lower_bound_tracker(a0=a0)
elif (sample2timestamp == 'linear_trend_tracker'):
self.sample2timestamp = linear_trend_tracker(a0=a0)
else:
self.sample2timestamp = sample2timestamp
self.max_delta = max_delta
|
def fit(self, ts, nsamp=1):
'[summary]\n\n Args:\n ts ([type]): [description]\n nsamp (int, optional): [description]. Defaults to 1.\n '
self.last_sample_timestamp_ = ts
self.n_ = 0
| -909,119,697,521,445,400
|
[summary]
Args:
ts ([type]): [description]
nsamp (int, optional): [description]. Defaults to 1.
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
fit
|
CkiChen/pymindaffectBCI
|
python
|
def fit(self, ts, nsamp=1):
'[summary]\n\n Args:\n ts ([type]): [description]\n nsamp (int, optional): [description]. Defaults to 1.\n '
self.last_sample_timestamp_ = ts
self.n_ = 0
|
def transform(self, timestamp: float, nsamp: int=1):
'add per-sample timestamp information to the data matrix\n\n Args:\n timestamp (float): the timestamp of the last sample of d\n nsamp(int): number of samples to interpolate\n\n Returns:\n np.ndarray: (nsamp) the interpolated time-stamps\n '
if (not hasattr(self, 'last_sample_timestamp_')):
self.fit(timestamp, nsamp)
self.n_ = (self.n_ + nsamp)
if ((self.last_sample_timestamp_ < timestamp) or (self.sample2timestamp is not None)):
if (self.sample2timestamp is not None):
newtimestamp = self.sample2timestamp.transform(self.n_, timestamp)
if (abs((timestamp - newtimestamp)) < self.max_delta):
timestamp = int(newtimestamp)
samples_ts = np.linspace(self.last_sample_timestamp_, timestamp, (nsamp + 1), endpoint=True, dtype=int)
samples_ts = samples_ts[1:]
elif self.fs:
samples_ts = ((np.arange(((- nsamp) + 1), 1, dtype=int) * (1000 / self.fs)) + timestamp)
else:
samples_ts = (np.ones(nsamp, dtype=int) * timestamp)
self.last_sample_timestamp_ = timestamp
return samples_ts
| -4,200,367,045,856,528,400
|
add per-sample timestamp information to the data matrix
Args:
timestamp (float): the timestamp of the last sample of d
nsamp(int): number of samples to interpolate
Returns:
np.ndarray: (nsamp) the interpolated time-stamps
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
transform
|
CkiChen/pymindaffectBCI
|
python
|
def transform(self, timestamp: float, nsamp: int=1):
'add per-sample timestamp information to the data matrix\n\n Args:\n timestamp (float): the timestamp of the last sample of d\n nsamp(int): number of samples to interpolate\n\n Returns:\n np.ndarray: (nsamp) the interpolated time-stamps\n '
if (not hasattr(self, 'last_sample_timestamp_')):
self.fit(timestamp, nsamp)
self.n_ = (self.n_ + nsamp)
if ((self.last_sample_timestamp_ < timestamp) or (self.sample2timestamp is not None)):
if (self.sample2timestamp is not None):
newtimestamp = self.sample2timestamp.transform(self.n_, timestamp)
if (abs((timestamp - newtimestamp)) < self.max_delta):
timestamp = int(newtimestamp)
samples_ts = np.linspace(self.last_sample_timestamp_, timestamp, (nsamp + 1), endpoint=True, dtype=int)
samples_ts = samples_ts[1:]
elif self.fs:
samples_ts = ((np.arange(((- nsamp) + 1), 1, dtype=int) * (1000 / self.fs)) + timestamp)
else:
samples_ts = (np.ones(nsamp, dtype=int) * timestamp)
self.last_sample_timestamp_ = timestamp
return samples_ts
|
def testcase(self, npkt=1000, fs=100):
'[summary]\n\n Args:\n npkt (int, optional): [description]. Defaults to 1000.\n fs (int, optional): [description]. Defaults to 100.\n '
nsamp = np.random.random_integers(0, 10, size=(npkt,))
ts_true = ((np.arange(np.sum(nsamp)) * 1000) / fs)
idx = (np.cumsum(nsamp) - 1)
pkt_ts = ts_true[idx]
pkt_ts = (pkt_ts + np.random.uniform(0, ((0.5 * 1000) / fs), size=pkt_ts.shape))
sts = []
tsfn = timestamp_interpolation(fs=fs, sample2timestamp='lower_bound_tracker')
for (i, (n, t)) in enumerate(zip(nsamp, pkt_ts)):
samp_ts = tsfn.transform(t, n)
sts.extend(samp_ts)
import matplotlib.pyplot as plt
plt.plot((ts_true - sts))
plt.show()
| -9,214,255,085,325,899,000
|
[summary]
Args:
npkt (int, optional): [description]. Defaults to 1000.
fs (int, optional): [description]. Defaults to 100.
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
testcase
|
CkiChen/pymindaffectBCI
|
python
|
def testcase(self, npkt=1000, fs=100):
'[summary]\n\n Args:\n npkt (int, optional): [description]. Defaults to 1000.\n fs (int, optional): [description]. Defaults to 100.\n '
nsamp = np.random.random_integers(0, 10, size=(npkt,))
ts_true = ((np.arange(np.sum(nsamp)) * 1000) / fs)
idx = (np.cumsum(nsamp) - 1)
pkt_ts = ts_true[idx]
pkt_ts = (pkt_ts + np.random.uniform(0, ((0.5 * 1000) / fs), size=pkt_ts.shape))
sts = []
tsfn = timestamp_interpolation(fs=fs, sample2timestamp='lower_bound_tracker')
for (i, (n, t)) in enumerate(zip(nsamp, pkt_ts)):
samp_ts = tsfn.transform(t, n)
sts.extend(samp_ts)
import matplotlib.pyplot as plt
plt.plot((ts_true - sts))
plt.show()
|
def fit(self, X):
'[summary]\n\n Args:\n X ([type]): [description]\n '
self.W_ = np.zeros((self.order, X.shape[(- 1)]), dtype=X.dtype)
self.W_[(- 1), :] = 1
(_, self.W_) = self.transform(X[1:, :])
| -6,270,560,603,298,594,000
|
[summary]
Args:
X ([type]): [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
fit
|
CkiChen/pymindaffectBCI
|
python
|
def fit(self, X):
'[summary]\n\n Args:\n X ([type]): [description]\n '
self.W_ = np.zeros((self.order, X.shape[(- 1)]), dtype=X.dtype)
self.W_[(- 1), :] = 1
(_, self.W_) = self.transform(X[1:, :])
|
def transform(self, X):
'add per-sample timestamp information to the data matrix\n\n Args:\n X (float): the data to decorrelate\n nsamp(int): number of samples to interpolate\n\n Returns:\n np.ndarray: the decorrelated data\n '
if (not hasattr(self, 'W_')):
self.fit(X)
(X, self.W_) = temporally_decorrelate(X, W=self.W_, reg=self.reg, eta=self.eta, axis=self.axis)
return X
| 1,294,235,935,748,090,400
|
add per-sample timestamp information to the data matrix
Args:
X (float): the data to decorrelate
nsamp(int): number of samples to interpolate
Returns:
np.ndarray: the decorrelated data
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
transform
|
CkiChen/pymindaffectBCI
|
python
|
def transform(self, X):
'add per-sample timestamp information to the data matrix\n\n Args:\n X (float): the data to decorrelate\n nsamp(int): number of samples to interpolate\n\n Returns:\n np.ndarray: the decorrelated data\n '
if (not hasattr(self, 'W_')):
self.fit(X)
(X, self.W_) = temporally_decorrelate(X, W=self.W_, reg=self.reg, eta=self.eta, axis=self.axis)
return X
|
def testcase(self, dur=3, fs=100, blksize=10):
'[summary]\n\n Args:\n dur (int, optional): [description]. Defaults to 3.\n fs (int, optional): [description]. Defaults to 100.\n blksize (int, optional): [description]. Defaults to 10.\n '
import numpy as np
import matplotlib.pyplot as plt
from mindaffectBCI.decoder.preprocess import plot_grand_average_spectrum
fs = 100
X = np.random.standard_normal((2, (fs * dur), 2))
X = (X[:, :(- 1), :] + X[:, 1:, :])
print('X={}'.format(X.shape))
plt.figure(1)
plot_grand_average_spectrum(X, fs)
plt.suptitle('Raw')
plt.show(block=False)
tdc = temporal_decorrelator()
wX = np.zeros(X.shape, X.dtype)
for i in range(0, X.shape[(- 1)], blksize):
idx = range(i, (i + blksize))
wX[idx, :] = tdc.transform(X[idx, :])
plt.figure(2)
plot_grand_average_spectrum(wX, fs)
plt.suptitle('Decorrelated')
plt.show()
| -1,198,068,088,960,186,600
|
[summary]
Args:
dur (int, optional): [description]. Defaults to 3.
fs (int, optional): [description]. Defaults to 100.
blksize (int, optional): [description]. Defaults to 10.
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
testcase
|
CkiChen/pymindaffectBCI
|
python
|
def testcase(self, dur=3, fs=100, blksize=10):
'[summary]\n\n Args:\n dur (int, optional): [description]. Defaults to 3.\n fs (int, optional): [description]. Defaults to 100.\n blksize (int, optional): [description]. Defaults to 10.\n '
import numpy as np
import matplotlib.pyplot as plt
from mindaffectBCI.decoder.preprocess import plot_grand_average_spectrum
fs = 100
X = np.random.standard_normal((2, (fs * dur), 2))
X = (X[:, :(- 1), :] + X[:, 1:, :])
print('X={}'.format(X.shape))
plt.figure(1)
plot_grand_average_spectrum(X, fs)
plt.suptitle('Raw')
plt.show(block=False)
tdc = temporal_decorrelator()
wX = np.zeros(X.shape, X.dtype)
for i in range(0, X.shape[(- 1)], blksize):
idx = range(i, (i + blksize))
wX[idx, :] = tdc.transform(X[idx, :])
plt.figure(2)
plot_grand_average_spectrum(wX, fs)
plt.suptitle('Decorrelated')
plt.show()
|
def fit(self, X):
'[summary]\n\n Args:\n X ([type]): [description]\n '
self.sigma2_ = np.zeros((X.shape[(- 1)],), dtype=X.dtype)
self.sigma2_ = (X[0, :] * X[0, :])
self.transform(X[1:, :])
| 5,082,720,218,458,383,000
|
[summary]
Args:
X ([type]): [description]
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
fit
|
CkiChen/pymindaffectBCI
|
python
|
def fit(self, X):
'[summary]\n\n Args:\n X ([type]): [description]\n '
self.sigma2_ = np.zeros((X.shape[(- 1)],), dtype=X.dtype)
self.sigma2_ = (X[0, :] * X[0, :])
self.transform(X[1:, :])
|
def transform(self, X):
'add per-sample timestamp information to the data matrix\n\n Args:\n X (float): the data to decorrelate\n\n Returns:\n np.ndarray: the decorrelated data\n '
if (not hasattr(self, 'sigma2_')):
self.fit(X)
(X, self.W_) = standardize_channel_power(X, sigma2=self.sigma2_, reg=self.reg, axis=self.axis)
return X
| 2,438,386,620,523,981,000
|
add per-sample timestamp information to the data matrix
Args:
X (float): the data to decorrelate
Returns:
np.ndarray: the decorrelated data
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
transform
|
CkiChen/pymindaffectBCI
|
python
|
def transform(self, X):
'add per-sample timestamp information to the data matrix\n\n Args:\n X (float): the data to decorrelate\n\n Returns:\n np.ndarray: the decorrelated data\n '
if (not hasattr(self, 'sigma2_')):
self.fit(X)
(X, self.W_) = standardize_channel_power(X, sigma2=self.sigma2_, reg=self.reg, axis=self.axis)
return X
|
def testcase(self, dur=3, fs=100, blksize=10):
'[summary]\n\n Args:\n dur (int, optional): [description]. Defaults to 3.\n fs (int, optional): [description]. Defaults to 100.\n blksize (int, optional): [description]. Defaults to 10.\n '
import numpy as np
import matplotlib.pyplot as plt
from mindaffectBCI.decoder.preprocess import plot_grand_average_spectrum
fs = 100
X = np.random.standard_normal((2, (fs * dur), 2))
X = (X[:, :(- 1), :] + X[:, 1:, :])
print('X={}'.format(X.shape))
plt.figure(1)
plot_grand_average_spectrum(X, fs)
plt.suptitle('Raw')
plt.show(block=False)
cps = channel_power_standardizer()
wX = np.zeros(X.shape, X.dtype)
for i in range(0, X.shape[(- 1)], blksize):
idx = range(i, (i + blksize))
wX[idx, :] = cps.transform(X[idx, :])
plt.figure(2)
plot_grand_average_spectrum(wX, fs)
plt.suptitle('Decorrelated')
plt.show()
| 6,321,695,239,963,563,000
|
[summary]
Args:
dur (int, optional): [description]. Defaults to 3.
fs (int, optional): [description]. Defaults to 100.
blksize (int, optional): [description]. Defaults to 10.
|
mindaffectBCI/decoder/UtopiaDataInterface.py
|
testcase
|
CkiChen/pymindaffectBCI
|
python
|
def testcase(self, dur=3, fs=100, blksize=10):
'[summary]\n\n Args:\n dur (int, optional): [description]. Defaults to 3.\n fs (int, optional): [description]. Defaults to 100.\n blksize (int, optional): [description]. Defaults to 10.\n '
import numpy as np
import matplotlib.pyplot as plt
from mindaffectBCI.decoder.preprocess import plot_grand_average_spectrum
fs = 100
X = np.random.standard_normal((2, (fs * dur), 2))
X = (X[:, :(- 1), :] + X[:, 1:, :])
print('X={}'.format(X.shape))
plt.figure(1)
plot_grand_average_spectrum(X, fs)
plt.suptitle('Raw')
plt.show(block=False)
cps = channel_power_standardizer()
wX = np.zeros(X.shape, X.dtype)
for i in range(0, X.shape[(- 1)], blksize):
idx = range(i, (i + blksize))
wX[idx, :] = cps.transform(X[idx, :])
plt.figure(2)
plot_grand_average_spectrum(wX, fs)
plt.suptitle('Decorrelated')
plt.show()
|
def _read_fmt_chunk(fid, is_big_endian):
'\n Returns\n -------\n size : int\n size of format subchunk in bytes (minus 8 for "fmt " and itself)\n format_tag : int\n PCM, float, or compressed format\n channels : int\n number of channels\n fs : int\n sampling frequency in samples per second\n bytes_per_second : int\n overall byte rate for the file\n block_align : int\n bytes per sample, including all channels\n bit_depth : int\n bits per sample\n '
if is_big_endian:
fmt = '>'
else:
fmt = '<'
size = res = struct.unpack((fmt + 'I'), fid.read(4))[0]
bytes_read = 0
if (size < 16):
raise ValueError('Binary structure of wave file is not compliant')
res = struct.unpack((fmt + 'HHIIHH'), fid.read(16))
bytes_read += 16
(format_tag, channels, fs, bytes_per_second, block_align, bit_depth) = res
if ((format_tag == WAVE_FORMAT_EXTENSIBLE) and (size >= (16 + 2))):
ext_chunk_size = struct.unpack((fmt + 'H'), fid.read(2))[0]
bytes_read += 2
if (ext_chunk_size >= 22):
extensible_chunk_data = fid.read(22)
bytes_read += 22
raw_guid = extensible_chunk_data[(2 + 4):((2 + 4) + 16)]
if is_big_endian:
tail = b'\x00\x00\x00\x10\x80\x00\x00\xaa\x008\x9bq'
else:
tail = b'\x00\x00\x10\x00\x80\x00\x00\xaa\x008\x9bq'
if raw_guid.endswith(tail):
format_tag = struct.unpack((fmt + 'I'), raw_guid[:4])[0]
else:
raise ValueError('Binary structure of wave file is not compliant')
if (format_tag not in KNOWN_WAVE_FORMATS):
raise ValueError('Unknown wave file format')
if (size > bytes_read):
fid.read((size - bytes_read))
return (size, format_tag, channels, fs, bytes_per_second, block_align, bit_depth)
| -1,035,857,940,140,194,300
|
Returns
-------
size : int
size of format subchunk in bytes (minus 8 for "fmt " and itself)
format_tag : int
PCM, float, or compressed format
channels : int
number of channels
fs : int
sampling frequency in samples per second
bytes_per_second : int
overall byte rate for the file
block_align : int
bytes per sample, including all channels
bit_depth : int
bits per sample
|
scipy/io/wavfile.py
|
_read_fmt_chunk
|
AKuederle/scipy
|
python
|
def _read_fmt_chunk(fid, is_big_endian):
'\n Returns\n -------\n size : int\n size of format subchunk in bytes (minus 8 for "fmt " and itself)\n format_tag : int\n PCM, float, or compressed format\n channels : int\n number of channels\n fs : int\n sampling frequency in samples per second\n bytes_per_second : int\n overall byte rate for the file\n block_align : int\n bytes per sample, including all channels\n bit_depth : int\n bits per sample\n '
if is_big_endian:
fmt = '>'
else:
fmt = '<'
size = res = struct.unpack((fmt + 'I'), fid.read(4))[0]
bytes_read = 0
if (size < 16):
raise ValueError('Binary structure of wave file is not compliant')
res = struct.unpack((fmt + 'HHIIHH'), fid.read(16))
bytes_read += 16
(format_tag, channels, fs, bytes_per_second, block_align, bit_depth) = res
if ((format_tag == WAVE_FORMAT_EXTENSIBLE) and (size >= (16 + 2))):
ext_chunk_size = struct.unpack((fmt + 'H'), fid.read(2))[0]
bytes_read += 2
if (ext_chunk_size >= 22):
extensible_chunk_data = fid.read(22)
bytes_read += 22
raw_guid = extensible_chunk_data[(2 + 4):((2 + 4) + 16)]
if is_big_endian:
tail = b'\x00\x00\x00\x10\x80\x00\x00\xaa\x008\x9bq'
else:
tail = b'\x00\x00\x10\x00\x80\x00\x00\xaa\x008\x9bq'
if raw_guid.endswith(tail):
format_tag = struct.unpack((fmt + 'I'), raw_guid[:4])[0]
else:
raise ValueError('Binary structure of wave file is not compliant')
if (format_tag not in KNOWN_WAVE_FORMATS):
raise ValueError('Unknown wave file format')
if (size > bytes_read):
fid.read((size - bytes_read))
return (size, format_tag, channels, fs, bytes_per_second, block_align, bit_depth)
|
def read(filename, mmap=False):
'\n Open a WAV file\n\n Return the sample rate (in samples/sec) and data from a WAV file.\n\n Parameters\n ----------\n filename : string or open file handle\n Input wav file.\n mmap : bool, optional\n Whether to read data as memory-mapped.\n Only to be used on real files (Default: False).\n\n .. versionadded:: 0.12.0\n\n Returns\n -------\n rate : int\n Sample rate of wav file.\n data : numpy array\n Data read from wav file. Data-type is determined from the file;\n see Notes.\n\n Notes\n -----\n This function cannot read wav files with 24-bit data.\n\n Common data types: [1]_\n\n ===================== =========== =========== =============\n WAV format Min Max NumPy dtype\n ===================== =========== =========== =============\n 32-bit floating-point -1.0 +1.0 float32\n 32-bit PCM -2147483648 +2147483647 int32\n 16-bit PCM -32768 +32767 int16\n 8-bit PCM 0 255 uint8\n ===================== =========== =========== =============\n\n Note that 8-bit PCM is unsigned.\n\n References\n ----------\n .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming\n Interface and Data Specifications 1.0", section "Data Format of the\n Samples", August 1991\n http://www.tactilemedia.com/info/MCI_Control_Info.html\n\n Examples\n --------\n >>> from os.path import dirname, join as pjoin\n >>> import scipy.io as sio\n\n Get the filename for an example .wav file from the tests/data directory.\n\n >>> data_dir = pjoin(dirname(sio.__file__), \'tests\', \'data\')\n >>> wav_fname = pjoin(data_dir, \'test-44100Hz-2ch-32bit-float-be.wav\')\n\n Load the .wav file contents.\n\n >>> samplerate, data = sio.wavfile.read(wav_fname)\n >>> print(f"number of channels = {data.shape[1]}")\n number of channels = 2\n >>> length = data.shape[0] / samplerate\n >>> print(f"length = {length}s")\n length = 0.01s\n\n Plot the waveform.\n\n >>> import matplotlib.pyplot as plt\n >>> import numpy as np\n >>> time = np.linspace(0., length, data.shape[0])\n >>> plt.plot(time, data[:, 0], label="Left channel")\n >>> plt.plot(time, data[:, 1], label="Right channel")\n >>> plt.legend()\n >>> plt.xlabel("Time [s]")\n >>> plt.ylabel("Amplitude")\n >>> plt.show()\n\n '
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
try:
(file_size, is_big_endian) = _read_riff_chunk(fid)
fmt_chunk_received = False
data_chunk_received = False
channels = 1
bit_depth = 8
format_tag = WAVE_FORMAT_PCM
while (fid.tell() < file_size):
chunk_id = fid.read(4)
if (not chunk_id):
if data_chunk_received:
warnings.warn('Reached EOF prematurely; finished at {:d} bytes, expected {:d} bytes from header.'.format(fid.tell(), file_size), WavFileWarning, stacklevel=2)
break
else:
raise ValueError('Unexpected end of file.')
elif (len(chunk_id) < 4):
raise ValueError('Incomplete wav chunk.')
if (chunk_id == b'fmt '):
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
(format_tag, channels, fs) = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
if (bit_depth not in (8, 16, 32, 64, 96, 128)):
raise ValueError('Unsupported bit depth: the wav file has {}-bit data.'.format(bit_depth))
elif (chunk_id == b'fact'):
_skip_unknown_chunk(fid, is_big_endian)
elif (chunk_id == b'data'):
data_chunk_received = True
if (not fmt_chunk_received):
raise ValueError('No fmt chunk before data')
data = _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian, mmap)
elif (chunk_id == b'LIST'):
_skip_unknown_chunk(fid, is_big_endian)
elif (chunk_id in (b'JUNK', b'Fake')):
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn('Chunk (non-data) not understood, skipping it.', WavFileWarning, stacklevel=2)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if (not hasattr(filename, 'read')):
fid.close()
else:
fid.seek(0)
return (fs, data)
| -5,569,298,711,442,947,000
|
Open a WAV file
Return the sample rate (in samples/sec) and data from a WAV file.
Parameters
----------
filename : string or open file handle
Input wav file.
mmap : bool, optional
Whether to read data as memory-mapped.
Only to be used on real files (Default: False).
.. versionadded:: 0.12.0
Returns
-------
rate : int
Sample rate of wav file.
data : numpy array
Data read from wav file. Data-type is determined from the file;
see Notes.
Notes
-----
This function cannot read wav files with 24-bit data.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
Examples
--------
>>> from os.path import dirname, join as pjoin
>>> import scipy.io as sio
Get the filename for an example .wav file from the tests/data directory.
>>> data_dir = pjoin(dirname(sio.__file__), 'tests', 'data')
>>> wav_fname = pjoin(data_dir, 'test-44100Hz-2ch-32bit-float-be.wav')
Load the .wav file contents.
>>> samplerate, data = sio.wavfile.read(wav_fname)
>>> print(f"number of channels = {data.shape[1]}")
number of channels = 2
>>> length = data.shape[0] / samplerate
>>> print(f"length = {length}s")
length = 0.01s
Plot the waveform.
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> time = np.linspace(0., length, data.shape[0])
>>> plt.plot(time, data[:, 0], label="Left channel")
>>> plt.plot(time, data[:, 1], label="Right channel")
>>> plt.legend()
>>> plt.xlabel("Time [s]")
>>> plt.ylabel("Amplitude")
>>> plt.show()
|
scipy/io/wavfile.py
|
read
|
AKuederle/scipy
|
python
|
def read(filename, mmap=False):
'\n Open a WAV file\n\n Return the sample rate (in samples/sec) and data from a WAV file.\n\n Parameters\n ----------\n filename : string or open file handle\n Input wav file.\n mmap : bool, optional\n Whether to read data as memory-mapped.\n Only to be used on real files (Default: False).\n\n .. versionadded:: 0.12.0\n\n Returns\n -------\n rate : int\n Sample rate of wav file.\n data : numpy array\n Data read from wav file. Data-type is determined from the file;\n see Notes.\n\n Notes\n -----\n This function cannot read wav files with 24-bit data.\n\n Common data types: [1]_\n\n ===================== =========== =========== =============\n WAV format Min Max NumPy dtype\n ===================== =========== =========== =============\n 32-bit floating-point -1.0 +1.0 float32\n 32-bit PCM -2147483648 +2147483647 int32\n 16-bit PCM -32768 +32767 int16\n 8-bit PCM 0 255 uint8\n ===================== =========== =========== =============\n\n Note that 8-bit PCM is unsigned.\n\n References\n ----------\n .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming\n Interface and Data Specifications 1.0", section "Data Format of the\n Samples", August 1991\n http://www.tactilemedia.com/info/MCI_Control_Info.html\n\n Examples\n --------\n >>> from os.path import dirname, join as pjoin\n >>> import scipy.io as sio\n\n Get the filename for an example .wav file from the tests/data directory.\n\n >>> data_dir = pjoin(dirname(sio.__file__), \'tests\', \'data\')\n >>> wav_fname = pjoin(data_dir, \'test-44100Hz-2ch-32bit-float-be.wav\')\n\n Load the .wav file contents.\n\n >>> samplerate, data = sio.wavfile.read(wav_fname)\n >>> print(f"number of channels = {data.shape[1]}")\n number of channels = 2\n >>> length = data.shape[0] / samplerate\n >>> print(f"length = {length}s")\n length = 0.01s\n\n Plot the waveform.\n\n >>> import matplotlib.pyplot as plt\n >>> import numpy as np\n >>> time = np.linspace(0., length, data.shape[0])\n >>> plt.plot(time, data[:, 0], label="Left channel")\n >>> plt.plot(time, data[:, 1], label="Right channel")\n >>> plt.legend()\n >>> plt.xlabel("Time [s]")\n >>> plt.ylabel("Amplitude")\n >>> plt.show()\n\n '
if hasattr(filename, 'read'):
fid = filename
mmap = False
else:
fid = open(filename, 'rb')
try:
(file_size, is_big_endian) = _read_riff_chunk(fid)
fmt_chunk_received = False
data_chunk_received = False
channels = 1
bit_depth = 8
format_tag = WAVE_FORMAT_PCM
while (fid.tell() < file_size):
chunk_id = fid.read(4)
if (not chunk_id):
if data_chunk_received:
warnings.warn('Reached EOF prematurely; finished at {:d} bytes, expected {:d} bytes from header.'.format(fid.tell(), file_size), WavFileWarning, stacklevel=2)
break
else:
raise ValueError('Unexpected end of file.')
elif (len(chunk_id) < 4):
raise ValueError('Incomplete wav chunk.')
if (chunk_id == b'fmt '):
fmt_chunk_received = True
fmt_chunk = _read_fmt_chunk(fid, is_big_endian)
(format_tag, channels, fs) = fmt_chunk[1:4]
bit_depth = fmt_chunk[6]
if (bit_depth not in (8, 16, 32, 64, 96, 128)):
raise ValueError('Unsupported bit depth: the wav file has {}-bit data.'.format(bit_depth))
elif (chunk_id == b'fact'):
_skip_unknown_chunk(fid, is_big_endian)
elif (chunk_id == b'data'):
data_chunk_received = True
if (not fmt_chunk_received):
raise ValueError('No fmt chunk before data')
data = _read_data_chunk(fid, format_tag, channels, bit_depth, is_big_endian, mmap)
elif (chunk_id == b'LIST'):
_skip_unknown_chunk(fid, is_big_endian)
elif (chunk_id in (b'JUNK', b'Fake')):
_skip_unknown_chunk(fid, is_big_endian)
else:
warnings.warn('Chunk (non-data) not understood, skipping it.', WavFileWarning, stacklevel=2)
_skip_unknown_chunk(fid, is_big_endian)
finally:
if (not hasattr(filename, 'read')):
fid.close()
else:
fid.seek(0)
return (fs, data)
|
def write(filename, rate, data):
'\n Write a NumPy array as a WAV file.\n\n Parameters\n ----------\n filename : string or open file handle\n Output wav file.\n rate : int\n The sample rate (in samples/sec).\n data : ndarray\n A 1-D or 2-D NumPy array of either integer or float data-type.\n\n Notes\n -----\n * Writes a simple uncompressed WAV file.\n * To write multiple-channels, use a 2-D array of shape\n (Nsamples, Nchannels).\n * The bits-per-sample and PCM/float will be determined by the data-type.\n\n Common data types: [1]_\n\n ===================== =========== =========== =============\n WAV format Min Max NumPy dtype\n ===================== =========== =========== =============\n 32-bit floating-point -1.0 +1.0 float32\n 32-bit PCM -2147483648 +2147483647 int32\n 16-bit PCM -32768 +32767 int16\n 8-bit PCM 0 255 uint8\n ===================== =========== =========== =============\n\n Note that 8-bit PCM is unsigned.\n\n References\n ----------\n .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming\n Interface and Data Specifications 1.0", section "Data Format of the\n Samples", August 1991\n http://www.tactilemedia.com/info/MCI_Control_Info.html\n\n Examples\n --------\n Create a 100Hz sine wave, sampled at 44100Hz.\n Write to 16-bit PCM, Mono.\n\n >>> from scipy.io.wavfile import write\n >>> samplerate = 44100; fs = 100\n >>> t = np.linspace(0., 1., samplerate)\n >>> amplitude = np.iinfo(np.int16).max\n >>> data = amplitude * np.sin(2. * np.pi * fs * t)\n >>> write("example.wav", samplerate, data)\n\n '
if hasattr(filename, 'write'):
fid = filename
else:
fid = open(filename, 'wb')
fs = rate
try:
dkind = data.dtype.kind
if (not ((dkind == 'i') or (dkind == 'f') or ((dkind == 'u') and (data.dtype.itemsize == 1)))):
raise ValueError(("Unsupported data type '%s'" % data.dtype))
header_data = b''
header_data += b'RIFF'
header_data += b'\x00\x00\x00\x00'
header_data += b'WAVE'
header_data += b'fmt '
if (dkind == 'f'):
format_tag = WAVE_FORMAT_IEEE_FLOAT
else:
format_tag = WAVE_FORMAT_PCM
if (data.ndim == 1):
channels = 1
else:
channels = data.shape[1]
bit_depth = (data.dtype.itemsize * 8)
bytes_per_second = ((fs * (bit_depth // 8)) * channels)
block_align = (channels * (bit_depth // 8))
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs, bytes_per_second, block_align, bit_depth)
if (not ((dkind == 'i') or (dkind == 'u'))):
fmt_chunk_data += b'\x00\x00'
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
if (not ((dkind == 'i') or (dkind == 'u'))):
header_data += b'fact'
header_data += struct.pack('<II', 4, data.shape[0])
if ((((len(header_data) - 4) - 4) + ((4 + 4) + data.nbytes)) > 4294967295):
raise ValueError('Data exceeds wave file size limit')
fid.write(header_data)
fid.write(b'data')
fid.write(struct.pack('<I', data.nbytes))
if ((data.dtype.byteorder == '>') or ((data.dtype.byteorder == '=') and (sys.byteorder == 'big'))):
data = data.byteswap()
_array_tofile(fid, data)
size = fid.tell()
fid.seek(4)
fid.write(struct.pack('<I', (size - 8)))
finally:
if (not hasattr(filename, 'write')):
fid.close()
else:
fid.seek(0)
| -1,827,049,610,991,448,600
|
Write a NumPy array as a WAV file.
Parameters
----------
filename : string or open file handle
Output wav file.
rate : int
The sample rate (in samples/sec).
data : ndarray
A 1-D or 2-D NumPy array of either integer or float data-type.
Notes
-----
* Writes a simple uncompressed WAV file.
* To write multiple-channels, use a 2-D array of shape
(Nsamples, Nchannels).
* The bits-per-sample and PCM/float will be determined by the data-type.
Common data types: [1]_
===================== =========== =========== =============
WAV format Min Max NumPy dtype
===================== =========== =========== =============
32-bit floating-point -1.0 +1.0 float32
32-bit PCM -2147483648 +2147483647 int32
16-bit PCM -32768 +32767 int16
8-bit PCM 0 255 uint8
===================== =========== =========== =============
Note that 8-bit PCM is unsigned.
References
----------
.. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming
Interface and Data Specifications 1.0", section "Data Format of the
Samples", August 1991
http://www.tactilemedia.com/info/MCI_Control_Info.html
Examples
--------
Create a 100Hz sine wave, sampled at 44100Hz.
Write to 16-bit PCM, Mono.
>>> from scipy.io.wavfile import write
>>> samplerate = 44100; fs = 100
>>> t = np.linspace(0., 1., samplerate)
>>> amplitude = np.iinfo(np.int16).max
>>> data = amplitude * np.sin(2. * np.pi * fs * t)
>>> write("example.wav", samplerate, data)
|
scipy/io/wavfile.py
|
write
|
AKuederle/scipy
|
python
|
def write(filename, rate, data):
'\n Write a NumPy array as a WAV file.\n\n Parameters\n ----------\n filename : string or open file handle\n Output wav file.\n rate : int\n The sample rate (in samples/sec).\n data : ndarray\n A 1-D or 2-D NumPy array of either integer or float data-type.\n\n Notes\n -----\n * Writes a simple uncompressed WAV file.\n * To write multiple-channels, use a 2-D array of shape\n (Nsamples, Nchannels).\n * The bits-per-sample and PCM/float will be determined by the data-type.\n\n Common data types: [1]_\n\n ===================== =========== =========== =============\n WAV format Min Max NumPy dtype\n ===================== =========== =========== =============\n 32-bit floating-point -1.0 +1.0 float32\n 32-bit PCM -2147483648 +2147483647 int32\n 16-bit PCM -32768 +32767 int16\n 8-bit PCM 0 255 uint8\n ===================== =========== =========== =============\n\n Note that 8-bit PCM is unsigned.\n\n References\n ----------\n .. [1] IBM Corporation and Microsoft Corporation, "Multimedia Programming\n Interface and Data Specifications 1.0", section "Data Format of the\n Samples", August 1991\n http://www.tactilemedia.com/info/MCI_Control_Info.html\n\n Examples\n --------\n Create a 100Hz sine wave, sampled at 44100Hz.\n Write to 16-bit PCM, Mono.\n\n >>> from scipy.io.wavfile import write\n >>> samplerate = 44100; fs = 100\n >>> t = np.linspace(0., 1., samplerate)\n >>> amplitude = np.iinfo(np.int16).max\n >>> data = amplitude * np.sin(2. * np.pi * fs * t)\n >>> write("example.wav", samplerate, data)\n\n '
if hasattr(filename, 'write'):
fid = filename
else:
fid = open(filename, 'wb')
fs = rate
try:
dkind = data.dtype.kind
if (not ((dkind == 'i') or (dkind == 'f') or ((dkind == 'u') and (data.dtype.itemsize == 1)))):
raise ValueError(("Unsupported data type '%s'" % data.dtype))
header_data = b
header_data += b'RIFF'
header_data += b'\x00\x00\x00\x00'
header_data += b'WAVE'
header_data += b'fmt '
if (dkind == 'f'):
format_tag = WAVE_FORMAT_IEEE_FLOAT
else:
format_tag = WAVE_FORMAT_PCM
if (data.ndim == 1):
channels = 1
else:
channels = data.shape[1]
bit_depth = (data.dtype.itemsize * 8)
bytes_per_second = ((fs * (bit_depth // 8)) * channels)
block_align = (channels * (bit_depth // 8))
fmt_chunk_data = struct.pack('<HHIIHH', format_tag, channels, fs, bytes_per_second, block_align, bit_depth)
if (not ((dkind == 'i') or (dkind == 'u'))):
fmt_chunk_data += b'\x00\x00'
header_data += struct.pack('<I', len(fmt_chunk_data))
header_data += fmt_chunk_data
if (not ((dkind == 'i') or (dkind == 'u'))):
header_data += b'fact'
header_data += struct.pack('<II', 4, data.shape[0])
if ((((len(header_data) - 4) - 4) + ((4 + 4) + data.nbytes)) > 4294967295):
raise ValueError('Data exceeds wave file size limit')
fid.write(header_data)
fid.write(b'data')
fid.write(struct.pack('<I', data.nbytes))
if ((data.dtype.byteorder == '>') or ((data.dtype.byteorder == '=') and (sys.byteorder == 'big'))):
data = data.byteswap()
_array_tofile(fid, data)
size = fid.tell()
fid.seek(4)
fid.write(struct.pack('<I', (size - 8)))
finally:
if (not hasattr(filename, 'write')):
fid.close()
else:
fid.seek(0)
|
def StreptomycesSpNrrlf5008(directed: bool=False, preprocess: bool=True, load_nodes: bool=True, verbose: int=2, cache: bool=True, cache_path: str='graphs/string', version: str='links.v11.5', **additional_graph_kwargs: Dict) -> Graph:
'Return new instance of the Streptomyces sp. NRRLF5008 graph.\n\n The graph is automatically retrieved from the STRING repository.\t\n\n Parameters\n -------------------\n directed: bool = False\n Wether to load the graph as directed or undirected.\n By default false.\n preprocess: bool = True\n Whether to preprocess the graph to be loaded in \n optimal time and memory.\n load_nodes: bool = True,\n Whether to load the nodes vocabulary or treat the nodes\n simply as a numeric range.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache: bool = True\n Whether to use cache, i.e. download files only once\n and preprocess them only once.\n cache_path: str = "graphs"\n Where to store the downloaded graphs.\n version: str = "links.v11.5"\n The version of the graph to retrieve.\t\t\n\tThe available versions are:\n\t\t\t- homology.v11.0\n\t\t\t- homology.v11.5\n\t\t\t- physical.links.v11.0\n\t\t\t- physical.links.v11.5\n\t\t\t- links.v11.0\n\t\t\t- links.v11.5\n additional_graph_kwargs: Dict\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Streptomyces sp. NRRLF5008 graph.\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t```bib\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t```\n '
return AutomaticallyRetrievedGraph(graph_name='StreptomycesSpNrrlf5008', repository='string', version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
| 1,039,991,421,972,142,100
|
Return new instance of the Streptomyces sp. NRRLF5008 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.0
- homology.v11.5
- physical.links.v11.0
- physical.links.v11.5
- links.v11.0
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Streptomyces sp. NRRLF5008 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
|
bindings/python/ensmallen/datasets/string/streptomycesspnrrlf5008.py
|
StreptomycesSpNrrlf5008
|
AnacletoLAB/ensmallen
|
python
|
def StreptomycesSpNrrlf5008(directed: bool=False, preprocess: bool=True, load_nodes: bool=True, verbose: int=2, cache: bool=True, cache_path: str='graphs/string', version: str='links.v11.5', **additional_graph_kwargs: Dict) -> Graph:
'Return new instance of the Streptomyces sp. NRRLF5008 graph.\n\n The graph is automatically retrieved from the STRING repository.\t\n\n Parameters\n -------------------\n directed: bool = False\n Wether to load the graph as directed or undirected.\n By default false.\n preprocess: bool = True\n Whether to preprocess the graph to be loaded in \n optimal time and memory.\n load_nodes: bool = True,\n Whether to load the nodes vocabulary or treat the nodes\n simply as a numeric range.\n verbose: int = 2,\n Wether to show loading bars during the retrieval and building\n of the graph.\n cache: bool = True\n Whether to use cache, i.e. download files only once\n and preprocess them only once.\n cache_path: str = "graphs"\n Where to store the downloaded graphs.\n version: str = "links.v11.5"\n The version of the graph to retrieve.\t\t\n\tThe available versions are:\n\t\t\t- homology.v11.0\n\t\t\t- homology.v11.5\n\t\t\t- physical.links.v11.0\n\t\t\t- physical.links.v11.5\n\t\t\t- links.v11.0\n\t\t\t- links.v11.5\n additional_graph_kwargs: Dict\n Additional graph kwargs.\n\n Returns\n -----------------------\n Instace of Streptomyces sp. NRRLF5008 graph.\n\n\tReferences\n\t---------------------\n\tPlease cite the following if you use the data:\n\t\n\t```bib\n\t@article{szklarczyk2019string,\n\t title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},\n\t author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},\n\t journal={Nucleic acids research},\n\t volume={47},\n\t number={D1},\n\t pages={D607--D613},\n\t year={2019},\n\t publisher={Oxford University Press}\n\t}\n\t```\n '
return AutomaticallyRetrievedGraph(graph_name='StreptomycesSpNrrlf5008', repository='string', version=version, directed=directed, preprocess=preprocess, load_nodes=load_nodes, verbose=verbose, cache=cache, cache_path=cache_path, additional_graph_kwargs=additional_graph_kwargs)()
|
def compute_mask(coords, indices):
'\n Gets the mask for the coords given the indices in slice format.\n\n Works with either start-stop ranges of matching indices into coords\n called "pairs" (start-stop pairs) or filters the mask directly, based\n on which is faster.\n\n Exploits the structure in sorted coords, which is that for a constant\n value of coords[i - 1], coords[i - 2] and so on, coords[i] is sorted.\n Concretely, ``coords[i, coords[i - 1] == v1 & coords[i - 2] = v2, ...]``\n is always sorted. It uses this sortedness to find sub-pairs for each\n dimension given the previous, and so on. This is efficient for small\n slices or ints, but not for large ones.\n\n After it detects that working with pairs is rather inefficient (or after\n going through each possible index), it constructs a filtered mask from the\n start-stop pairs.\n\n Parameters\n ----------\n coords : np.ndarray\n The coordinates of the array.\n indices : np.ndarray\n The indices in the form of slices such that indices[:, 0] are starts,\n indices[:, 1] are stops and indices[:, 2] are steps.\n\n Returns\n -------\n mask : np.ndarray\n The starts and stops in the mask.\n is_slice : bool\n Whether or not the array represents a continuous slice.\n\n Examples\n --------\n Let\'s create some mock coords and indices\n\n >>> import numpy as np\n >>> coords = np.array([[0, 0, 1, 1, 2, 2]])\n >>> indices = np.array([[0, 3, 2]]) # Equivalent to slice(0, 3, 2)\n\n Now let\'s get the mask. Notice that the indices of ``0`` and ``2`` are matched.\n\n >>> _compute_mask(coords, indices)\n (array([0, 1, 4, 5]), False)\n\n Now, let\'s try with a more "continuous" slice. Matches ``0`` and ``1``.\n\n >>> indices = np.array([[0, 2, 1]])\n >>> _compute_mask(coords, indices)\n (array([0, 4]), True)\n\n This is equivalent to mask being ``slice(0, 4, 1)``.\n '
starts = [0]
stops = [coords.shape[1]]
n_matches = coords.shape[1]
i = 0
while (i < len(indices)):
n_pairs = len(starts)
n_current_slices = ((_get_slice_len(indices[i]) * n_pairs) + 2)
if ((n_current_slices * np.log((n_current_slices / max(n_pairs, 1)))) > (n_matches + n_pairs)):
break
(starts, stops, n_matches) = _get_mask_pairs(starts, stops, coords[i], indices[i])
i += 1
(starts, stops) = _join_adjacent_pairs(starts, stops)
if ((i == len(indices)) and (len(starts) == 1)):
return (np.array([starts[0], stops[0]]), True)
mask = _filter_pairs(starts, stops, coords[i:], indices[i:])
return (np.array(mask, dtype=np.intp), False)
| -8,573,733,571,166,544,000
|
Gets the mask for the coords given the indices in slice format.
Works with either start-stop ranges of matching indices into coords
called "pairs" (start-stop pairs) or filters the mask directly, based
on which is faster.
Exploits the structure in sorted coords, which is that for a constant
value of coords[i - 1], coords[i - 2] and so on, coords[i] is sorted.
Concretely, ``coords[i, coords[i - 1] == v1 & coords[i - 2] = v2, ...]``
is always sorted. It uses this sortedness to find sub-pairs for each
dimension given the previous, and so on. This is efficient for small
slices or ints, but not for large ones.
After it detects that working with pairs is rather inefficient (or after
going through each possible index), it constructs a filtered mask from the
start-stop pairs.
Parameters
----------
coords : np.ndarray
The coordinates of the array.
indices : np.ndarray
The indices in the form of slices such that indices[:, 0] are starts,
indices[:, 1] are stops and indices[:, 2] are steps.
Returns
-------
mask : np.ndarray
The starts and stops in the mask.
is_slice : bool
Whether or not the array represents a continuous slice.
Examples
--------
Let's create some mock coords and indices
>>> import numpy as np
>>> coords = np.array([[0, 0, 1, 1, 2, 2]])
>>> indices = np.array([[0, 3, 2]]) # Equivalent to slice(0, 3, 2)
Now let's get the mask. Notice that the indices of ``0`` and ``2`` are matched.
>>> _compute_mask(coords, indices)
(array([0, 1, 4, 5]), False)
Now, let's try with a more "continuous" slice. Matches ``0`` and ``1``.
>>> indices = np.array([[0, 2, 1]])
>>> _compute_mask(coords, indices)
(array([0, 4]), True)
This is equivalent to mask being ``slice(0, 4, 1)``.
|
pythran/tests/pydata/compute_mask.py
|
compute_mask
|
AlifeLines/pythran
|
python
|
def compute_mask(coords, indices):
'\n Gets the mask for the coords given the indices in slice format.\n\n Works with either start-stop ranges of matching indices into coords\n called "pairs" (start-stop pairs) or filters the mask directly, based\n on which is faster.\n\n Exploits the structure in sorted coords, which is that for a constant\n value of coords[i - 1], coords[i - 2] and so on, coords[i] is sorted.\n Concretely, ``coords[i, coords[i - 1] == v1 & coords[i - 2] = v2, ...]``\n is always sorted. It uses this sortedness to find sub-pairs for each\n dimension given the previous, and so on. This is efficient for small\n slices or ints, but not for large ones.\n\n After it detects that working with pairs is rather inefficient (or after\n going through each possible index), it constructs a filtered mask from the\n start-stop pairs.\n\n Parameters\n ----------\n coords : np.ndarray\n The coordinates of the array.\n indices : np.ndarray\n The indices in the form of slices such that indices[:, 0] are starts,\n indices[:, 1] are stops and indices[:, 2] are steps.\n\n Returns\n -------\n mask : np.ndarray\n The starts and stops in the mask.\n is_slice : bool\n Whether or not the array represents a continuous slice.\n\n Examples\n --------\n Let\'s create some mock coords and indices\n\n >>> import numpy as np\n >>> coords = np.array([[0, 0, 1, 1, 2, 2]])\n >>> indices = np.array([[0, 3, 2]]) # Equivalent to slice(0, 3, 2)\n\n Now let\'s get the mask. Notice that the indices of ``0`` and ``2`` are matched.\n\n >>> _compute_mask(coords, indices)\n (array([0, 1, 4, 5]), False)\n\n Now, let\'s try with a more "continuous" slice. Matches ``0`` and ``1``.\n\n >>> indices = np.array([[0, 2, 1]])\n >>> _compute_mask(coords, indices)\n (array([0, 4]), True)\n\n This is equivalent to mask being ``slice(0, 4, 1)``.\n '
starts = [0]
stops = [coords.shape[1]]
n_matches = coords.shape[1]
i = 0
while (i < len(indices)):
n_pairs = len(starts)
n_current_slices = ((_get_slice_len(indices[i]) * n_pairs) + 2)
if ((n_current_slices * np.log((n_current_slices / max(n_pairs, 1)))) > (n_matches + n_pairs)):
break
(starts, stops, n_matches) = _get_mask_pairs(starts, stops, coords[i], indices[i])
i += 1
(starts, stops) = _join_adjacent_pairs(starts, stops)
if ((i == len(indices)) and (len(starts) == 1)):
return (np.array([starts[0], stops[0]]), True)
mask = _filter_pairs(starts, stops, coords[i:], indices[i:])
return (np.array(mask, dtype=np.intp), False)
|
def _get_slice_len(idx):
'\n Get the number of elements in a slice.\n\n Parameters\n ----------\n idx : np.ndarray\n A (3,) shaped array containing start, stop, step\n\n Returns\n -------\n n : int\n The length of the slice.\n\n Examples\n --------\n >>> idx = np.array([5, 15, 5])\n >>> _get_slice_len(idx)\n 2\n '
(start, stop, step) = (idx[0], idx[1], idx[2])
if (step > 0):
return ((((stop - start) + step) - 1) // step)
else:
return ((((start - stop) - step) - 1) // (- step))
| -4,259,978,124,523,301,000
|
Get the number of elements in a slice.
Parameters
----------
idx : np.ndarray
A (3,) shaped array containing start, stop, step
Returns
-------
n : int
The length of the slice.
Examples
--------
>>> idx = np.array([5, 15, 5])
>>> _get_slice_len(idx)
2
|
pythran/tests/pydata/compute_mask.py
|
_get_slice_len
|
AlifeLines/pythran
|
python
|
def _get_slice_len(idx):
'\n Get the number of elements in a slice.\n\n Parameters\n ----------\n idx : np.ndarray\n A (3,) shaped array containing start, stop, step\n\n Returns\n -------\n n : int\n The length of the slice.\n\n Examples\n --------\n >>> idx = np.array([5, 15, 5])\n >>> _get_slice_len(idx)\n 2\n '
(start, stop, step) = (idx[0], idx[1], idx[2])
if (step > 0):
return ((((stop - start) + step) - 1) // step)
else:
return ((((start - stop) - step) - 1) // (- step))
|
def _get_mask_pairs(starts_old, stops_old, c, idx):
"\n Gets the pairs for a following dimension given the pairs for\n a dimension.\n\n For each pair, it searches in the following dimension for\n matching coords and returns those.\n\n The total combined length of all pairs is returned to\n help with the performance guesstimate.\n\n Parameters\n ----------\n starts_old, stops_old : list[int]\n The starts and stops from the previous index.\n c : np.ndarray\n The coords for this index's dimension.\n idx : np.ndarray\n The index in the form of a slice.\n idx[0], idx[1], idx[2] = start, stop, step\n\n Returns\n -------\n starts, stops: list\n The starts and stops after applying the current index.\n n_matches : int\n The sum of elements in all ranges.\n\n Examples\n --------\n >>> c = np.array([1, 2, 1, 2, 1, 1, 2, 2])\n >>> starts_old = [4]\n >>> stops_old = [8]\n >>> idx = np.array([1, 2, 1])\n >>> _get_mask_pairs(starts_old, stops_old, c, idx)\n ([4], [6], 2)\n "
starts = []
stops = []
n_matches = 0
for j in range(len(starts_old)):
for p_match in range(idx[0], idx[1], idx[2]):
start = (np.searchsorted(c[starts_old[j]:stops_old[j]], p_match) + starts_old[j])
stop = (np.searchsorted(c[starts_old[j]:stops_old[j]], (p_match + 1)) + starts_old[j])
if (start != stop):
starts.append(start)
stops.append(stop)
n_matches += (stop - start)
return (starts, stops, n_matches)
| -5,647,724,412,426,087,000
|
Gets the pairs for a following dimension given the pairs for
a dimension.
For each pair, it searches in the following dimension for
matching coords and returns those.
The total combined length of all pairs is returned to
help with the performance guesstimate.
Parameters
----------
starts_old, stops_old : list[int]
The starts and stops from the previous index.
c : np.ndarray
The coords for this index's dimension.
idx : np.ndarray
The index in the form of a slice.
idx[0], idx[1], idx[2] = start, stop, step
Returns
-------
starts, stops: list
The starts and stops after applying the current index.
n_matches : int
The sum of elements in all ranges.
Examples
--------
>>> c = np.array([1, 2, 1, 2, 1, 1, 2, 2])
>>> starts_old = [4]
>>> stops_old = [8]
>>> idx = np.array([1, 2, 1])
>>> _get_mask_pairs(starts_old, stops_old, c, idx)
([4], [6], 2)
|
pythran/tests/pydata/compute_mask.py
|
_get_mask_pairs
|
AlifeLines/pythran
|
python
|
def _get_mask_pairs(starts_old, stops_old, c, idx):
"\n Gets the pairs for a following dimension given the pairs for\n a dimension.\n\n For each pair, it searches in the following dimension for\n matching coords and returns those.\n\n The total combined length of all pairs is returned to\n help with the performance guesstimate.\n\n Parameters\n ----------\n starts_old, stops_old : list[int]\n The starts and stops from the previous index.\n c : np.ndarray\n The coords for this index's dimension.\n idx : np.ndarray\n The index in the form of a slice.\n idx[0], idx[1], idx[2] = start, stop, step\n\n Returns\n -------\n starts, stops: list\n The starts and stops after applying the current index.\n n_matches : int\n The sum of elements in all ranges.\n\n Examples\n --------\n >>> c = np.array([1, 2, 1, 2, 1, 1, 2, 2])\n >>> starts_old = [4]\n >>> stops_old = [8]\n >>> idx = np.array([1, 2, 1])\n >>> _get_mask_pairs(starts_old, stops_old, c, idx)\n ([4], [6], 2)\n "
starts = []
stops = []
n_matches = 0
for j in range(len(starts_old)):
for p_match in range(idx[0], idx[1], idx[2]):
start = (np.searchsorted(c[starts_old[j]:stops_old[j]], p_match) + starts_old[j])
stop = (np.searchsorted(c[starts_old[j]:stops_old[j]], (p_match + 1)) + starts_old[j])
if (start != stop):
starts.append(start)
stops.append(stop)
n_matches += (stop - start)
return (starts, stops, n_matches)
|
def _join_adjacent_pairs(starts_old, stops_old):
'\n Joins adjacent pairs into one. For example, 2-5 and 5-7\n will reduce to 2-7 (a single pair). This may help in\n returning a slice in the end which could be faster.\n\n Parameters\n ----------\n starts_old, stops_old : list[int]\n The input starts and stops\n\n Returns\n -------\n starts, stops : list[int]\n The reduced starts and stops.\n\n Examples\n --------\n >>> starts = [2, 5]\n >>> stops = [5, 7]\n >>> _join_adjacent_pairs(starts, stops)\n ([2], [7])\n '
if (len(starts_old) <= 1):
return (starts_old, stops_old)
starts = [starts_old[0]]
stops = []
for i in range(1, len(starts_old)):
if (starts_old[i] != stops_old[(i - 1)]):
starts.append(starts_old[i])
stops.append(stops_old[(i - 1)])
stops.append(stops_old[(- 1)])
return (starts, stops)
| -2,693,863,116,294,275,000
|
Joins adjacent pairs into one. For example, 2-5 and 5-7
will reduce to 2-7 (a single pair). This may help in
returning a slice in the end which could be faster.
Parameters
----------
starts_old, stops_old : list[int]
The input starts and stops
Returns
-------
starts, stops : list[int]
The reduced starts and stops.
Examples
--------
>>> starts = [2, 5]
>>> stops = [5, 7]
>>> _join_adjacent_pairs(starts, stops)
([2], [7])
|
pythran/tests/pydata/compute_mask.py
|
_join_adjacent_pairs
|
AlifeLines/pythran
|
python
|
def _join_adjacent_pairs(starts_old, stops_old):
'\n Joins adjacent pairs into one. For example, 2-5 and 5-7\n will reduce to 2-7 (a single pair). This may help in\n returning a slice in the end which could be faster.\n\n Parameters\n ----------\n starts_old, stops_old : list[int]\n The input starts and stops\n\n Returns\n -------\n starts, stops : list[int]\n The reduced starts and stops.\n\n Examples\n --------\n >>> starts = [2, 5]\n >>> stops = [5, 7]\n >>> _join_adjacent_pairs(starts, stops)\n ([2], [7])\n '
if (len(starts_old) <= 1):
return (starts_old, stops_old)
starts = [starts_old[0]]
stops = []
for i in range(1, len(starts_old)):
if (starts_old[i] != stops_old[(i - 1)]):
starts.append(starts_old[i])
stops.append(stops_old[(i - 1)])
stops.append(stops_old[(- 1)])
return (starts, stops)
|
def _filter_pairs(starts, stops, coords, indices):
'\n Converts all the pairs into a single integer mask, additionally filtering\n by the indices.\n\n Parameters\n ----------\n starts, stops : list[int]\n The starts and stops to convert into an array.\n coords : np.ndarray\n The coordinates to filter by.\n indices : np.ndarray\n The indices in the form of slices such that indices[:, 0] are starts,\n indices[:, 1] are stops and indices[:, 2] are steps.\n\n Returns\n -------\n mask : list\n The output integer mask.\n\n Examples\n --------\n >>> import numpy as np\n >>> starts = [2]\n >>> stops = [7]\n >>> coords = np.array([[0, 1, 2, 3, 4, 5, 6, 7]])\n >>> indices = np.array([[2, 8, 2]]) # Start, stop, step pairs\n >>> _filter_pairs(starts, stops, coords, indices)\n [2, 4, 6]\n '
mask = []
for i in range(len(starts)):
for j in range(starts[i], stops[i]):
match = True
for k in range(len(indices)):
idx = indices[k]
elem = coords[(k, j)]
match &= ((((elem - idx[0]) % idx[2]) == 0) and (((idx[2] > 0) and (idx[0] <= elem < idx[1])) or ((idx[2] < 0) and (idx[0] >= elem > idx[1]))))
if match:
mask.append(j)
return mask
| 1,796,064,902,100,777,000
|
Converts all the pairs into a single integer mask, additionally filtering
by the indices.
Parameters
----------
starts, stops : list[int]
The starts and stops to convert into an array.
coords : np.ndarray
The coordinates to filter by.
indices : np.ndarray
The indices in the form of slices such that indices[:, 0] are starts,
indices[:, 1] are stops and indices[:, 2] are steps.
Returns
-------
mask : list
The output integer mask.
Examples
--------
>>> import numpy as np
>>> starts = [2]
>>> stops = [7]
>>> coords = np.array([[0, 1, 2, 3, 4, 5, 6, 7]])
>>> indices = np.array([[2, 8, 2]]) # Start, stop, step pairs
>>> _filter_pairs(starts, stops, coords, indices)
[2, 4, 6]
|
pythran/tests/pydata/compute_mask.py
|
_filter_pairs
|
AlifeLines/pythran
|
python
|
def _filter_pairs(starts, stops, coords, indices):
'\n Converts all the pairs into a single integer mask, additionally filtering\n by the indices.\n\n Parameters\n ----------\n starts, stops : list[int]\n The starts and stops to convert into an array.\n coords : np.ndarray\n The coordinates to filter by.\n indices : np.ndarray\n The indices in the form of slices such that indices[:, 0] are starts,\n indices[:, 1] are stops and indices[:, 2] are steps.\n\n Returns\n -------\n mask : list\n The output integer mask.\n\n Examples\n --------\n >>> import numpy as np\n >>> starts = [2]\n >>> stops = [7]\n >>> coords = np.array([[0, 1, 2, 3, 4, 5, 6, 7]])\n >>> indices = np.array([[2, 8, 2]]) # Start, stop, step pairs\n >>> _filter_pairs(starts, stops, coords, indices)\n [2, 4, 6]\n '
mask = []
for i in range(len(starts)):
for j in range(starts[i], stops[i]):
match = True
for k in range(len(indices)):
idx = indices[k]
elem = coords[(k, j)]
match &= ((((elem - idx[0]) % idx[2]) == 0) and (((idx[2] > 0) and (idx[0] <= elem < idx[1])) or ((idx[2] < 0) and (idx[0] >= elem > idx[1]))))
if match:
mask.append(j)
return mask
|
def collect_files(img_dir, gt_dir):
'Collect all images and their corresponding groundtruth files.\n\n Args:\n img_dir (str): The image directory\n gt_dir (str): The groundtruth directory\n\n Returns:\n files (list): The list of tuples (img_file, groundtruth_file)\n '
assert isinstance(img_dir, str)
assert img_dir
assert isinstance(gt_dir, str)
assert gt_dir
(ann_list, imgs_list) = ([], [])
for gt_file in os.listdir(gt_dir):
ann_list.append(osp.join(gt_dir, gt_file))
imgs_list.append(osp.join(img_dir, gt_file.replace('.json', '.png')))
files = list(zip(sorted(imgs_list), sorted(ann_list)))
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files
| 18,978,806,529,696,932
|
Collect all images and their corresponding groundtruth files.
Args:
img_dir (str): The image directory
gt_dir (str): The groundtruth directory
Returns:
files (list): The list of tuples (img_file, groundtruth_file)
|
tools/data/textdet/funsd_converter.py
|
collect_files
|
GHuiXin/mmocr
|
python
|
def collect_files(img_dir, gt_dir):
'Collect all images and their corresponding groundtruth files.\n\n Args:\n img_dir (str): The image directory\n gt_dir (str): The groundtruth directory\n\n Returns:\n files (list): The list of tuples (img_file, groundtruth_file)\n '
assert isinstance(img_dir, str)
assert img_dir
assert isinstance(gt_dir, str)
assert gt_dir
(ann_list, imgs_list) = ([], [])
for gt_file in os.listdir(gt_dir):
ann_list.append(osp.join(gt_dir, gt_file))
imgs_list.append(osp.join(img_dir, gt_file.replace('.json', '.png')))
files = list(zip(sorted(imgs_list), sorted(ann_list)))
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files
|
def collect_annotations(files, nproc=1):
'Collect the annotation information.\n\n Args:\n files (list): The list of tuples (image_file, groundtruth_file)\n nproc (int): The number of process to collect annotations\n\n Returns:\n images (list): The list of image information dicts\n '
assert isinstance(files, list)
assert isinstance(nproc, int)
if (nproc > 1):
images = mmcv.track_parallel_progress(load_img_info, files, nproc=nproc)
else:
images = mmcv.track_progress(load_img_info, files)
return images
| -5,933,038,107,388,529,000
|
Collect the annotation information.
Args:
files (list): The list of tuples (image_file, groundtruth_file)
nproc (int): The number of process to collect annotations
Returns:
images (list): The list of image information dicts
|
tools/data/textdet/funsd_converter.py
|
collect_annotations
|
GHuiXin/mmocr
|
python
|
def collect_annotations(files, nproc=1):
'Collect the annotation information.\n\n Args:\n files (list): The list of tuples (image_file, groundtruth_file)\n nproc (int): The number of process to collect annotations\n\n Returns:\n images (list): The list of image information dicts\n '
assert isinstance(files, list)
assert isinstance(nproc, int)
if (nproc > 1):
images = mmcv.track_parallel_progress(load_img_info, files, nproc=nproc)
else:
images = mmcv.track_progress(load_img_info, files)
return images
|
def load_img_info(files):
'Load the information of one image.\n\n Args:\n files (tuple): The tuple of (img_file, groundtruth_file)\n\n Returns:\n img_info (dict): The dict of the img and annotation information\n '
assert isinstance(files, tuple)
(img_file, gt_file) = files
assert (osp.basename(gt_file).split('.')[0] == osp.basename(img_file).split('.')[0])
img = mmcv.imread(img_file, 'unchanged')
img_info = dict(file_name=osp.join(osp.basename(img_file)), height=img.shape[0], width=img.shape[1], segm_file=osp.join(osp.basename(gt_file)))
if (osp.splitext(gt_file)[1] == '.json'):
img_info = load_json_info(gt_file, img_info)
else:
raise NotImplementedError
return img_info
| 7,809,661,699,419,274,000
|
Load the information of one image.
Args:
files (tuple): The tuple of (img_file, groundtruth_file)
Returns:
img_info (dict): The dict of the img and annotation information
|
tools/data/textdet/funsd_converter.py
|
load_img_info
|
GHuiXin/mmocr
|
python
|
def load_img_info(files):
'Load the information of one image.\n\n Args:\n files (tuple): The tuple of (img_file, groundtruth_file)\n\n Returns:\n img_info (dict): The dict of the img and annotation information\n '
assert isinstance(files, tuple)
(img_file, gt_file) = files
assert (osp.basename(gt_file).split('.')[0] == osp.basename(img_file).split('.')[0])
img = mmcv.imread(img_file, 'unchanged')
img_info = dict(file_name=osp.join(osp.basename(img_file)), height=img.shape[0], width=img.shape[1], segm_file=osp.join(osp.basename(gt_file)))
if (osp.splitext(gt_file)[1] == '.json'):
img_info = load_json_info(gt_file, img_info)
else:
raise NotImplementedError
return img_info
|
def load_json_info(gt_file, img_info):
'Collect the annotation information.\n\n Args:\n gt_file (str): The path to ground-truth\n img_info (dict): The dict of the img and annotation information\n\n Returns:\n img_info (dict): The dict of the img and annotation information\n '
annotation = mmcv.load(gt_file)
anno_info = []
for form in annotation['form']:
for ann in form['words']:
iscrowd = (1 if (len(ann['text']) == 0) else 0)
(x1, y1, x2, y2) = ann['box']
x = max(0, min(math.floor(x1), math.floor(x2)))
y = max(0, min(math.floor(y1), math.floor(y2)))
(w, h) = (math.ceil(abs((x2 - x1))), math.ceil(abs((y2 - y1))))
bbox = [x, y, w, h]
segmentation = [x, y, (x + w), y, (x + w), (y + h), x, (y + h)]
anno = dict(iscrowd=iscrowd, category_id=1, bbox=bbox, area=(w * h), segmentation=[segmentation])
anno_info.append(anno)
img_info.update(anno_info=anno_info)
return img_info
| 8,850,481,707,175,886,000
|
Collect the annotation information.
Args:
gt_file (str): The path to ground-truth
img_info (dict): The dict of the img and annotation information
Returns:
img_info (dict): The dict of the img and annotation information
|
tools/data/textdet/funsd_converter.py
|
load_json_info
|
GHuiXin/mmocr
|
python
|
def load_json_info(gt_file, img_info):
'Collect the annotation information.\n\n Args:\n gt_file (str): The path to ground-truth\n img_info (dict): The dict of the img and annotation information\n\n Returns:\n img_info (dict): The dict of the img and annotation information\n '
annotation = mmcv.load(gt_file)
anno_info = []
for form in annotation['form']:
for ann in form['words']:
iscrowd = (1 if (len(ann['text']) == 0) else 0)
(x1, y1, x2, y2) = ann['box']
x = max(0, min(math.floor(x1), math.floor(x2)))
y = max(0, min(math.floor(y1), math.floor(y2)))
(w, h) = (math.ceil(abs((x2 - x1))), math.ceil(abs((y2 - y1))))
bbox = [x, y, w, h]
segmentation = [x, y, (x + w), y, (x + w), (y + h), x, (y + h)]
anno = dict(iscrowd=iscrowd, category_id=1, bbox=bbox, area=(w * h), segmentation=[segmentation])
anno_info.append(anno)
img_info.update(anno_info=anno_info)
return img_info
|
def optimize(expr, optimizations):
" Apply optimizations to an expression.\n\n Parameters\n ==========\n\n expr : expression\n optimizations : iterable of ``Optimization`` instances\n The optimizations will be sorted with respect to ``priority`` (highest first).\n\n Examples\n ========\n\n >>> from sympy import log, Symbol\n >>> from sympy.codegen.rewriting import optims_c99, optimize\n >>> x = Symbol('x')\n >>> optimize(log(x+3)/log(2) + log(x**2 + 1), optims_c99)\n log1p(x**2) + log2(x + 3)\n\n "
for optim in sorted(optimizations, key=(lambda opt: opt.priority), reverse=True):
new_expr = optim(expr)
if (optim.cost_function is None):
expr = new_expr
else:
(before, after) = map((lambda x: optim.cost_function(x)), (expr, new_expr))
if (before > after):
expr = new_expr
return expr
| 1,866,487,192,515,732,200
|
Apply optimizations to an expression.
Parameters
==========
expr : expression
optimizations : iterable of ``Optimization`` instances
The optimizations will be sorted with respect to ``priority`` (highest first).
Examples
========
>>> from sympy import log, Symbol
>>> from sympy.codegen.rewriting import optims_c99, optimize
>>> x = Symbol('x')
>>> optimize(log(x+3)/log(2) + log(x**2 + 1), optims_c99)
log1p(x**2) + log2(x + 3)
|
sympy/codegen/rewriting.py
|
optimize
|
Abhishek-IOT/sympy
|
python
|
def optimize(expr, optimizations):
" Apply optimizations to an expression.\n\n Parameters\n ==========\n\n expr : expression\n optimizations : iterable of ``Optimization`` instances\n The optimizations will be sorted with respect to ``priority`` (highest first).\n\n Examples\n ========\n\n >>> from sympy import log, Symbol\n >>> from sympy.codegen.rewriting import optims_c99, optimize\n >>> x = Symbol('x')\n >>> optimize(log(x+3)/log(2) + log(x**2 + 1), optims_c99)\n log1p(x**2) + log2(x + 3)\n\n "
for optim in sorted(optimizations, key=(lambda opt: opt.priority), reverse=True):
new_expr = optim(expr)
if (optim.cost_function is None):
expr = new_expr
else:
(before, after) = map((lambda x: optim.cost_function(x)), (expr, new_expr))
if (before > after):
expr = new_expr
return expr
|
def create_expand_pow_optimization(limit):
" Creates an instance of :class:`ReplaceOptim` for expanding ``Pow``.\n\n The requirements for expansions are that the base needs to be a symbol\n and the exponent needs to be an Integer (and be less than or equal to\n ``limit``).\n\n Parameters\n ==========\n\n limit : int\n The highest power which is expanded into multiplication.\n\n Examples\n ========\n\n >>> from sympy import Symbol, sin\n >>> from sympy.codegen.rewriting import create_expand_pow_optimization\n >>> x = Symbol('x')\n >>> expand_opt = create_expand_pow_optimization(3)\n >>> expand_opt(x**5 + x**3)\n x**5 + x*x*x\n >>> expand_opt(x**5 + x**3 + sin(x)**3)\n x**5 + sin(x)**3 + x*x*x\n\n "
return ReplaceOptim((lambda e: (e.is_Pow and e.base.is_symbol and e.exp.is_Integer and (abs(e.exp) <= limit))), (lambda p: (UnevaluatedExpr(Mul(*([p.base] * (+ p.exp)), evaluate=False)) if (p.exp > 0) else (1 / UnevaluatedExpr(Mul(*([p.base] * (- p.exp)), evaluate=False))))))
| 6,880,643,915,469,284,000
|
Creates an instance of :class:`ReplaceOptim` for expanding ``Pow``.
The requirements for expansions are that the base needs to be a symbol
and the exponent needs to be an Integer (and be less than or equal to
``limit``).
Parameters
==========
limit : int
The highest power which is expanded into multiplication.
Examples
========
>>> from sympy import Symbol, sin
>>> from sympy.codegen.rewriting import create_expand_pow_optimization
>>> x = Symbol('x')
>>> expand_opt = create_expand_pow_optimization(3)
>>> expand_opt(x**5 + x**3)
x**5 + x*x*x
>>> expand_opt(x**5 + x**3 + sin(x)**3)
x**5 + sin(x)**3 + x*x*x
|
sympy/codegen/rewriting.py
|
create_expand_pow_optimization
|
Abhishek-IOT/sympy
|
python
|
def create_expand_pow_optimization(limit):
" Creates an instance of :class:`ReplaceOptim` for expanding ``Pow``.\n\n The requirements for expansions are that the base needs to be a symbol\n and the exponent needs to be an Integer (and be less than or equal to\n ``limit``).\n\n Parameters\n ==========\n\n limit : int\n The highest power which is expanded into multiplication.\n\n Examples\n ========\n\n >>> from sympy import Symbol, sin\n >>> from sympy.codegen.rewriting import create_expand_pow_optimization\n >>> x = Symbol('x')\n >>> expand_opt = create_expand_pow_optimization(3)\n >>> expand_opt(x**5 + x**3)\n x**5 + x*x*x\n >>> expand_opt(x**5 + x**3 + sin(x)**3)\n x**5 + sin(x)**3 + x*x*x\n\n "
return ReplaceOptim((lambda e: (e.is_Pow and e.base.is_symbol and e.exp.is_Integer and (abs(e.exp) <= limit))), (lambda p: (UnevaluatedExpr(Mul(*([p.base] * (+ p.exp)), evaluate=False)) if (p.exp > 0) else (1 / UnevaluatedExpr(Mul(*([p.base] * (- p.exp)), evaluate=False))))))
|
def main():
'Main function. Read coordinates, fetch addresses and write on file.'
logfile = open(LOGNAME, 'r')
datafile = open(DATANAME, 'w')
logfile.readline()
print('fetching addresses...')
line = logfile.readline()
while ((not line.startswith('***')) and line.strip()):
(cat, lat, lng) = line.split(';')
latlng = ('%s,%s' % (lat, lng))
params = {'latlng': latlng}
req = requests.get(GOOGLE_MAPS_API_URL, params=params)
res = req.json()
print(res)
result = res['results'][0]
address = result['formatted_address']
datafile.write(('%s en %s |%s,%s' % (cat, address.partition(',')[0], lat, lng)))
line = logfile.readline()
logfile.close()
datafile.close()
print('done.')
| 987,414,624,452,161,200
|
Main function. Read coordinates, fetch addresses and write on file.
|
locate.py
|
main
|
jdnietov/wazeReading
|
python
|
def main():
logfile = open(LOGNAME, 'r')
datafile = open(DATANAME, 'w')
logfile.readline()
print('fetching addresses...')
line = logfile.readline()
while ((not line.startswith('***')) and line.strip()):
(cat, lat, lng) = line.split(';')
latlng = ('%s,%s' % (lat, lng))
params = {'latlng': latlng}
req = requests.get(GOOGLE_MAPS_API_URL, params=params)
res = req.json()
print(res)
result = res['results'][0]
address = result['formatted_address']
datafile.write(('%s en %s |%s,%s' % (cat, address.partition(',')[0], lat, lng)))
line = logfile.readline()
logfile.close()
datafile.close()
print('done.')
|
def get_x_coordinate(width, year_index):
'\n Given the width of the canvas and the index of the current year\n in the YEARS list, returns the x coordinate of the vertical\n line associated with that year.\n\n Input:\n width (int): The width of the canvas\n year_index (int): The index of the current year in the YEARS list\n Returns:\n x_coordinate (int): The x coordinate of the vertical line associated\n with the specified year.\n '
space = ((width - (GRAPH_MARGIN_SIZE * 2)) / len(YEARS))
x_coordinate = (GRAPH_MARGIN_SIZE + (year_index * space))
return x_coordinate
| -1,885,833,101,397,108,200
|
Given the width of the canvas and the index of the current year
in the YEARS list, returns the x coordinate of the vertical
line associated with that year.
Input:
width (int): The width of the canvas
year_index (int): The index of the current year in the YEARS list
Returns:
x_coordinate (int): The x coordinate of the vertical line associated
with the specified year.
|
stancode_project/baby_names/babygraphics.py
|
get_x_coordinate
|
beomgyutxt/stanCode_project
|
python
|
def get_x_coordinate(width, year_index):
'\n Given the width of the canvas and the index of the current year\n in the YEARS list, returns the x coordinate of the vertical\n line associated with that year.\n\n Input:\n width (int): The width of the canvas\n year_index (int): The index of the current year in the YEARS list\n Returns:\n x_coordinate (int): The x coordinate of the vertical line associated\n with the specified year.\n '
space = ((width - (GRAPH_MARGIN_SIZE * 2)) / len(YEARS))
x_coordinate = (GRAPH_MARGIN_SIZE + (year_index * space))
return x_coordinate
|
def draw_fixed_lines(canvas):
'\n Erases all existing information on the given canvas and then\n draws the fixed background lines on it.\n\n Input:\n canvas (Tkinter Canvas): The canvas on which we are drawing.\n\n Returns:\n This function does not return any value.\n '
canvas.delete('all')
canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, (CANVAS_WIDTH - GRAPH_MARGIN_SIZE), GRAPH_MARGIN_SIZE, width=LINE_WIDTH)
canvas.create_line(GRAPH_MARGIN_SIZE, (CANVAS_HEIGHT - GRAPH_MARGIN_SIZE), (CANVAS_WIDTH - GRAPH_MARGIN_SIZE), (CANVAS_HEIGHT - GRAPH_MARGIN_SIZE), width=LINE_WIDTH)
for i in range(len(YEARS)):
x = get_x_coordinate(CANVAS_WIDTH, i)
canvas.create_line(x, 0, x, CANVAS_HEIGHT, width=LINE_WIDTH)
canvas.create_text((x + TEXT_DX), (CANVAS_HEIGHT - GRAPH_MARGIN_SIZE), text=YEARS[i], anchor=tkinter.NW)
| 7,858,595,460,280,777,000
|
Erases all existing information on the given canvas and then
draws the fixed background lines on it.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
Returns:
This function does not return any value.
|
stancode_project/baby_names/babygraphics.py
|
draw_fixed_lines
|
beomgyutxt/stanCode_project
|
python
|
def draw_fixed_lines(canvas):
'\n Erases all existing information on the given canvas and then\n draws the fixed background lines on it.\n\n Input:\n canvas (Tkinter Canvas): The canvas on which we are drawing.\n\n Returns:\n This function does not return any value.\n '
canvas.delete('all')
canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, (CANVAS_WIDTH - GRAPH_MARGIN_SIZE), GRAPH_MARGIN_SIZE, width=LINE_WIDTH)
canvas.create_line(GRAPH_MARGIN_SIZE, (CANVAS_HEIGHT - GRAPH_MARGIN_SIZE), (CANVAS_WIDTH - GRAPH_MARGIN_SIZE), (CANVAS_HEIGHT - GRAPH_MARGIN_SIZE), width=LINE_WIDTH)
for i in range(len(YEARS)):
x = get_x_coordinate(CANVAS_WIDTH, i)
canvas.create_line(x, 0, x, CANVAS_HEIGHT, width=LINE_WIDTH)
canvas.create_text((x + TEXT_DX), (CANVAS_HEIGHT - GRAPH_MARGIN_SIZE), text=YEARS[i], anchor=tkinter.NW)
|
def draw_names(canvas, name_data, lookup_names):
'\n Given a dict of baby name data and a list of name, plots\n the historical trend of those names onto the canvas.\n\n Input:\n canvas (Tkinter Canvas): The canvas on which we are drawing.\n name_data (dict): Dictionary holding baby name data\n lookup_names (List[str]): A list of names whose data you want to plot\n\n Returns:\n This function does not return any value.\n '
draw_fixed_lines(canvas)
x_previous = 0
y_previous = 0
for i in range(len(lookup_names)):
name = lookup_names[i]
color = COLORS[(i % len(COLORS))]
for j in range(len(YEARS)):
year = str(YEARS[j])
x = get_x_coordinate(CANVAS_WIDTH, j)
if (year in name_data[name]):
rank = int(name_data[name][year])
y = (GRAPH_MARGIN_SIZE + (((CANVAS_HEIGHT - (GRAPH_MARGIN_SIZE * 2)) / MAX_RANK) * (rank - 1)))
else:
rank = '*'
y = (CANVAS_HEIGHT - GRAPH_MARGIN_SIZE)
if (j != 0):
canvas.create_line(x_previous, y_previous, x, y, width=LINE_WIDTH, fill=color)
canvas.create_text((x + TEXT_DX), y, text=f'{name} {rank}', anchor=tkinter.SW, fill=color)
x_previous = x
y_previous = y
| -8,383,493,657,984,384,000
|
Given a dict of baby name data and a list of name, plots
the historical trend of those names onto the canvas.
Input:
canvas (Tkinter Canvas): The canvas on which we are drawing.
name_data (dict): Dictionary holding baby name data
lookup_names (List[str]): A list of names whose data you want to plot
Returns:
This function does not return any value.
|
stancode_project/baby_names/babygraphics.py
|
draw_names
|
beomgyutxt/stanCode_project
|
python
|
def draw_names(canvas, name_data, lookup_names):
'\n Given a dict of baby name data and a list of name, plots\n the historical trend of those names onto the canvas.\n\n Input:\n canvas (Tkinter Canvas): The canvas on which we are drawing.\n name_data (dict): Dictionary holding baby name data\n lookup_names (List[str]): A list of names whose data you want to plot\n\n Returns:\n This function does not return any value.\n '
draw_fixed_lines(canvas)
x_previous = 0
y_previous = 0
for i in range(len(lookup_names)):
name = lookup_names[i]
color = COLORS[(i % len(COLORS))]
for j in range(len(YEARS)):
year = str(YEARS[j])
x = get_x_coordinate(CANVAS_WIDTH, j)
if (year in name_data[name]):
rank = int(name_data[name][year])
y = (GRAPH_MARGIN_SIZE + (((CANVAS_HEIGHT - (GRAPH_MARGIN_SIZE * 2)) / MAX_RANK) * (rank - 1)))
else:
rank = '*'
y = (CANVAS_HEIGHT - GRAPH_MARGIN_SIZE)
if (j != 0):
canvas.create_line(x_previous, y_previous, x, y, width=LINE_WIDTH, fill=color)
canvas.create_text((x + TEXT_DX), y, text=f'{name} {rank}', anchor=tkinter.SW, fill=color)
x_previous = x
y_previous = y
|
def toElement(self, legacyDelay=False):
'\n Render into a domish Element.\n\n @param legacyDelay: If C{True} send the delayed delivery information\n in legacy format.\n '
element = xmppim.Message.toElement(self)
if self.delay:
element.addChild(self.delay.toElement(legacy=legacyDelay))
return element
| -3,488,027,034,297,016,000
|
Render into a domish Element.
@param legacyDelay: If C{True} send the delayed delivery information
in legacy format.
|
wokkel/muc.py
|
toElement
|
Gandi/wokkel
|
python
|
def toElement(self, legacyDelay=False):
'\n Render into a domish Element.\n\n @param legacyDelay: If C{True} send the delayed delivery information\n in legacy format.\n '
element = xmppim.Message.toElement(self)
if self.delay:
element.addChild(self.delay.toElement(legacy=legacyDelay))
return element
|
def toElement(self):
'\n Returns a L{domish.Element} representing the history options.\n '
element = domish.Element((NS_MUC, 'history'))
for key in self.attributes:
value = getattr(self, key, None)
if (value is not None):
if (key == 'since'):
stamp = value.astimezone(tzutc())
element[key] = stamp.strftime('%Y-%m-%dT%H:%M:%SZ')
else:
element[key.lower()] = str(value)
return element
| 957,642,058,368,581,200
|
Returns a L{domish.Element} representing the history options.
|
wokkel/muc.py
|
toElement
|
Gandi/wokkel
|
python
|
def toElement(self):
'\n \n '
element = domish.Element((NS_MUC, 'history'))
for key in self.attributes:
value = getattr(self, key, None)
if (value is not None):
if (key == 'since'):
stamp = value.astimezone(tzutc())
element[key] = stamp.strftime('%Y-%m-%dT%H:%M:%SZ')
else:
element[key.lower()] = str(value)
return element
|
def _childParser_mucUser(self, element):
'\n Parse the MUC user extension element.\n '
for child in element.elements():
if (child.uri != NS_MUC_USER):
continue
elif (child.name == 'status'):
try:
value = int(child.getAttribute('code'))
statusCode = STATUS_CODE.lookupByValue(value)
except (TypeError, ValueError):
continue
self.mucStatuses.add(statusCode)
elif (child.name == 'item'):
if child.hasAttribute('jid'):
self.entity = jid.JID(child['jid'])
self.nick = child.getAttribute('nick')
self.affiliation = child.getAttribute('affiliation')
self.role = child.getAttribute('role')
for reason in child.elements(NS_MUC_ADMIN, 'reason'):
self.reason = unicode(reason)
| -8,153,738,244,076,290,000
|
Parse the MUC user extension element.
|
wokkel/muc.py
|
_childParser_mucUser
|
Gandi/wokkel
|
python
|
def _childParser_mucUser(self, element):
'\n \n '
for child in element.elements():
if (child.uri != NS_MUC_USER):
continue
elif (child.name == 'status'):
try:
value = int(child.getAttribute('code'))
statusCode = STATUS_CODE.lookupByValue(value)
except (TypeError, ValueError):
continue
self.mucStatuses.add(statusCode)
elif (child.name == 'item'):
if child.hasAttribute('jid'):
self.entity = jid.JID(child['jid'])
self.nick = child.getAttribute('nick')
self.affiliation = child.getAttribute('affiliation')
self.role = child.getAttribute('role')
for reason in child.elements(NS_MUC_ADMIN, 'reason'):
self.reason = unicode(reason)
|
def connectionInitialized(self):
'\n Called when the XML stream has been initialized.\n\n It initializes several XPath events to handle MUC stanzas that come\n in.\n '
xmppim.BasePresenceProtocol.connectionInitialized(self)
self.xmlstream.addObserver(GROUPCHAT, self._onGroupChat)
self._roomOccupantMap = {}
| -618,956,503,203,230,200
|
Called when the XML stream has been initialized.
It initializes several XPath events to handle MUC stanzas that come
in.
|
wokkel/muc.py
|
connectionInitialized
|
Gandi/wokkel
|
python
|
def connectionInitialized(self):
'\n Called when the XML stream has been initialized.\n\n It initializes several XPath events to handle MUC stanzas that come\n in.\n '
xmppim.BasePresenceProtocol.connectionInitialized(self)
self.xmlstream.addObserver(GROUPCHAT, self._onGroupChat)
self._roomOccupantMap = {}
|
def _onGroupChat(self, element):
'\n A group chat message has been received from a MUC room.\n\n There are a few event methods that may get called here.\n L{receivedGroupChat}, L{receivedSubject} or L{receivedHistory}.\n '
message = GroupChat.fromElement(element)
self.groupChatReceived(message)
| 7,946,529,378,002,423,000
|
A group chat message has been received from a MUC room.
There are a few event methods that may get called here.
L{receivedGroupChat}, L{receivedSubject} or L{receivedHistory}.
|
wokkel/muc.py
|
_onGroupChat
|
Gandi/wokkel
|
python
|
def _onGroupChat(self, element):
'\n A group chat message has been received from a MUC room.\n\n There are a few event methods that may get called here.\n L{receivedGroupChat}, L{receivedSubject} or L{receivedHistory}.\n '
message = GroupChat.fromElement(element)
self.groupChatReceived(message)
|
def groupChatReceived(self, message):
'\n Called when a groupchat message was received.\n\n This method is called with a parsed representation of a received\n groupchat message and can be overridden for further processing.\n\n For regular groupchat message, the C{body} attribute contains the\n message body. Conversation history sent by the room upon joining, will\n have the C{delay} attribute set, room subject changes the C{subject}\n attribute. See L{GroupChat} for details.\n\n @param message: Groupchat message.\n @type message: L{GroupChat}\n '
pass
| -6,893,553,630,542,734,000
|
Called when a groupchat message was received.
This method is called with a parsed representation of a received
groupchat message and can be overridden for further processing.
For regular groupchat message, the C{body} attribute contains the
message body. Conversation history sent by the room upon joining, will
have the C{delay} attribute set, room subject changes the C{subject}
attribute. See L{GroupChat} for details.
@param message: Groupchat message.
@type message: L{GroupChat}
|
wokkel/muc.py
|
groupChatReceived
|
Gandi/wokkel
|
python
|
def groupChatReceived(self, message):
'\n Called when a groupchat message was received.\n\n This method is called with a parsed representation of a received\n groupchat message and can be overridden for further processing.\n\n For regular groupchat message, the C{body} attribute contains the\n message body. Conversation history sent by the room upon joining, will\n have the C{delay} attribute set, room subject changes the C{subject}\n attribute. See L{GroupChat} for details.\n\n @param message: Groupchat message.\n @type message: L{GroupChat}\n '
pass
|
def _sendDeferred(self, stanza):
'\n Send presence stanza, adding a deferred with a timeout.\n\n @param stanza: The presence stanza to send over the wire.\n @type stanza: L{generic.Stanza}\n\n @param timeout: The number of seconds to wait before the deferred is\n timed out.\n @type timeout: C{int}\n\n The deferred object L{defer.Deferred} is returned.\n '
def onResponse(element):
if (element.getAttribute('type') == 'error'):
d.errback(error.exceptionFromStanza(element))
else:
d.callback(UserPresence.fromElement(element))
def onTimeout():
d.errback(xmlstream.TimeoutError('Timeout waiting for response.'))
def cancelTimeout(result):
if call.active():
call.cancel()
return result
def recordOccupant(presence):
occupantJID = presence.sender
roomJID = occupantJID.userhostJID()
self._roomOccupantMap[roomJID] = occupantJID
return presence
call = self._reactor.callLater(DEFER_TIMEOUT, onTimeout)
d = defer.Deferred()
d.addBoth(cancelTimeout)
d.addCallback(recordOccupant)
query = ("/presence[@from='%s' or (@from='%s' and @type='error')]" % (stanza.recipient.full(), stanza.recipient.userhost()))
self.xmlstream.addOnetimeObserver(query, onResponse, priority=(- 1))
self.xmlstream.send(stanza.toElement())
return d
| -7,452,771,661,565,527,000
|
Send presence stanza, adding a deferred with a timeout.
@param stanza: The presence stanza to send over the wire.
@type stanza: L{generic.Stanza}
@param timeout: The number of seconds to wait before the deferred is
timed out.
@type timeout: C{int}
The deferred object L{defer.Deferred} is returned.
|
wokkel/muc.py
|
_sendDeferred
|
Gandi/wokkel
|
python
|
def _sendDeferred(self, stanza):
'\n Send presence stanza, adding a deferred with a timeout.\n\n @param stanza: The presence stanza to send over the wire.\n @type stanza: L{generic.Stanza}\n\n @param timeout: The number of seconds to wait before the deferred is\n timed out.\n @type timeout: C{int}\n\n The deferred object L{defer.Deferred} is returned.\n '
def onResponse(element):
if (element.getAttribute('type') == 'error'):
d.errback(error.exceptionFromStanza(element))
else:
d.callback(UserPresence.fromElement(element))
def onTimeout():
d.errback(xmlstream.TimeoutError('Timeout waiting for response.'))
def cancelTimeout(result):
if call.active():
call.cancel()
return result
def recordOccupant(presence):
occupantJID = presence.sender
roomJID = occupantJID.userhostJID()
self._roomOccupantMap[roomJID] = occupantJID
return presence
call = self._reactor.callLater(DEFER_TIMEOUT, onTimeout)
d = defer.Deferred()
d.addBoth(cancelTimeout)
d.addCallback(recordOccupant)
query = ("/presence[@from='%s' or (@from='%s' and @type='error')]" % (stanza.recipient.full(), stanza.recipient.userhost()))
self.xmlstream.addOnetimeObserver(query, onResponse, priority=(- 1))
self.xmlstream.send(stanza.toElement())
return d
|
def join(self, roomJID, nick, historyOptions=None, password=None):
'\n Join a MUC room by sending presence to it.\n\n @param roomJID: The JID of the room the entity is joining.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param nick: The nick name for the entitity joining the room.\n @type nick: C{unicode}\n\n @param historyOptions: Options for conversation history sent by the\n room upon joining.\n @type historyOptions: L{HistoryOptions}\n\n @param password: Optional password for the room.\n @type password: C{unicode}\n\n @return: A deferred that fires when the entity is in the room or an\n error has occurred.\n '
occupantJID = jid.JID(tuple=(roomJID.user, roomJID.host, nick))
presence = BasicPresence(recipient=occupantJID)
if password:
presence.password = password
if historyOptions:
presence.history = historyOptions
return self._sendDeferred(presence)
| 7,766,148,615,772,783,000
|
Join a MUC room by sending presence to it.
@param roomJID: The JID of the room the entity is joining.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nick: The nick name for the entitity joining the room.
@type nick: C{unicode}
@param historyOptions: Options for conversation history sent by the
room upon joining.
@type historyOptions: L{HistoryOptions}
@param password: Optional password for the room.
@type password: C{unicode}
@return: A deferred that fires when the entity is in the room or an
error has occurred.
|
wokkel/muc.py
|
join
|
Gandi/wokkel
|
python
|
def join(self, roomJID, nick, historyOptions=None, password=None):
'\n Join a MUC room by sending presence to it.\n\n @param roomJID: The JID of the room the entity is joining.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param nick: The nick name for the entitity joining the room.\n @type nick: C{unicode}\n\n @param historyOptions: Options for conversation history sent by the\n room upon joining.\n @type historyOptions: L{HistoryOptions}\n\n @param password: Optional password for the room.\n @type password: C{unicode}\n\n @return: A deferred that fires when the entity is in the room or an\n error has occurred.\n '
occupantJID = jid.JID(tuple=(roomJID.user, roomJID.host, nick))
presence = BasicPresence(recipient=occupantJID)
if password:
presence.password = password
if historyOptions:
presence.history = historyOptions
return self._sendDeferred(presence)
|
def nick(self, roomJID, nick):
"\n Change an entity's nick name in a MUC room.\n\n See: http://xmpp.org/extensions/xep-0045.html#changenick\n\n @param roomJID: The JID of the room.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param nick: The new nick name within the room.\n @type nick: C{unicode}\n "
occupantJID = jid.JID(tuple=(roomJID.user, roomJID.host, nick))
presence = BasicPresence(recipient=occupantJID)
return self._sendDeferred(presence)
| 8,459,624,295,962,374,000
|
Change an entity's nick name in a MUC room.
See: http://xmpp.org/extensions/xep-0045.html#changenick
@param roomJID: The JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param nick: The new nick name within the room.
@type nick: C{unicode}
|
wokkel/muc.py
|
nick
|
Gandi/wokkel
|
python
|
def nick(self, roomJID, nick):
"\n Change an entity's nick name in a MUC room.\n\n See: http://xmpp.org/extensions/xep-0045.html#changenick\n\n @param roomJID: The JID of the room.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param nick: The new nick name within the room.\n @type nick: C{unicode}\n "
occupantJID = jid.JID(tuple=(roomJID.user, roomJID.host, nick))
presence = BasicPresence(recipient=occupantJID)
return self._sendDeferred(presence)
|
def status(self, roomJID, show=None, status=None):
'\n Change user status.\n\n See: http://xmpp.org/extensions/xep-0045.html#changepres\n\n @param roomJID: The Room JID of the room.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param show: The availability of the entity. Common values are xa,\n available, etc\n @type show: C{unicode}\n\n @param status: The current status of the entity.\n @type status: C{unicode}\n '
occupantJID = self._roomOccupantMap[roomJID]
presence = BasicPresence(recipient=occupantJID, show=show, status=status)
return self._sendDeferred(presence)
| -6,188,022,030,358,750,000
|
Change user status.
See: http://xmpp.org/extensions/xep-0045.html#changepres
@param roomJID: The Room JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param show: The availability of the entity. Common values are xa,
available, etc
@type show: C{unicode}
@param status: The current status of the entity.
@type status: C{unicode}
|
wokkel/muc.py
|
status
|
Gandi/wokkel
|
python
|
def status(self, roomJID, show=None, status=None):
'\n Change user status.\n\n See: http://xmpp.org/extensions/xep-0045.html#changepres\n\n @param roomJID: The Room JID of the room.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param show: The availability of the entity. Common values are xa,\n available, etc\n @type show: C{unicode}\n\n @param status: The current status of the entity.\n @type status: C{unicode}\n '
occupantJID = self._roomOccupantMap[roomJID]
presence = BasicPresence(recipient=occupantJID, show=show, status=status)
return self._sendDeferred(presence)
|
def leave(self, roomJID):
'\n Leave a MUC room.\n\n See: http://xmpp.org/extensions/xep-0045.html#exit\n\n @param roomJID: The JID of the room.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n '
occupantJID = self._roomOccupantMap[roomJID]
presence = xmppim.AvailabilityPresence(recipient=occupantJID, available=False)
return self._sendDeferred(presence)
| -1,254,793,334,089,341,400
|
Leave a MUC room.
See: http://xmpp.org/extensions/xep-0045.html#exit
@param roomJID: The JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
|
wokkel/muc.py
|
leave
|
Gandi/wokkel
|
python
|
def leave(self, roomJID):
'\n Leave a MUC room.\n\n See: http://xmpp.org/extensions/xep-0045.html#exit\n\n @param roomJID: The JID of the room.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n '
occupantJID = self._roomOccupantMap[roomJID]
presence = xmppim.AvailabilityPresence(recipient=occupantJID, available=False)
return self._sendDeferred(presence)
|
def groupChat(self, roomJID, body):
'\n Send a groupchat message.\n '
message = GroupChat(recipient=roomJID, body=body)
self.send(message.toElement())
| 6,700,500,881,912,077,000
|
Send a groupchat message.
|
wokkel/muc.py
|
groupChat
|
Gandi/wokkel
|
python
|
def groupChat(self, roomJID, body):
'\n \n '
message = GroupChat(recipient=roomJID, body=body)
self.send(message.toElement())
|
def chat(self, occupantJID, body):
'\n Send a private chat message to a user in a MUC room.\n\n See: http://xmpp.org/extensions/xep-0045.html#privatemessage\n\n @param occupantJID: The Room JID of the other user.\n @type occupantJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n '
message = PrivateChat(recipient=occupantJID, body=body)
self.send(message.toElement())
| 6,678,183,103,771,934,000
|
Send a private chat message to a user in a MUC room.
See: http://xmpp.org/extensions/xep-0045.html#privatemessage
@param occupantJID: The Room JID of the other user.
@type occupantJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
|
wokkel/muc.py
|
chat
|
Gandi/wokkel
|
python
|
def chat(self, occupantJID, body):
'\n Send a private chat message to a user in a MUC room.\n\n See: http://xmpp.org/extensions/xep-0045.html#privatemessage\n\n @param occupantJID: The Room JID of the other user.\n @type occupantJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n '
message = PrivateChat(recipient=occupantJID, body=body)
self.send(message.toElement())
|
def subject(self, roomJID, subject):
'\n Change the subject of a MUC room.\n\n See: http://xmpp.org/extensions/xep-0045.html#subject-mod\n\n @param roomJID: The bare JID of the room.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param subject: The subject you want to set.\n @type subject: C{unicode}\n '
message = GroupChat(roomJID.userhostJID(), subject=subject)
self.send(message.toElement())
| 84,716,214,549,814,260
|
Change the subject of a MUC room.
See: http://xmpp.org/extensions/xep-0045.html#subject-mod
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param subject: The subject you want to set.
@type subject: C{unicode}
|
wokkel/muc.py
|
subject
|
Gandi/wokkel
|
python
|
def subject(self, roomJID, subject):
'\n Change the subject of a MUC room.\n\n See: http://xmpp.org/extensions/xep-0045.html#subject-mod\n\n @param roomJID: The bare JID of the room.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param subject: The subject you want to set.\n @type subject: C{unicode}\n '
message = GroupChat(roomJID.userhostJID(), subject=subject)
self.send(message.toElement())
|
def invite(self, roomJID, invitee, reason=None):
'\n Invite a xmpp entity to a MUC room.\n\n See: http://xmpp.org/extensions/xep-0045.html#invite\n\n @param roomJID: The bare JID of the room.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param invitee: The entity that is being invited.\n @type invitee: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param reason: The reason for the invite.\n @type reason: C{unicode}\n '
message = InviteMessage(recipient=roomJID, invitee=invitee, reason=reason)
self.send(message.toElement())
| 2,277,563,155,247,746,300
|
Invite a xmpp entity to a MUC room.
See: http://xmpp.org/extensions/xep-0045.html#invite
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param invitee: The entity that is being invited.
@type invitee: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param reason: The reason for the invite.
@type reason: C{unicode}
|
wokkel/muc.py
|
invite
|
Gandi/wokkel
|
python
|
def invite(self, roomJID, invitee, reason=None):
'\n Invite a xmpp entity to a MUC room.\n\n See: http://xmpp.org/extensions/xep-0045.html#invite\n\n @param roomJID: The bare JID of the room.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param invitee: The entity that is being invited.\n @type invitee: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param reason: The reason for the invite.\n @type reason: C{unicode}\n '
message = InviteMessage(recipient=roomJID, invitee=invitee, reason=reason)
self.send(message.toElement())
|
def getRegisterForm(self, roomJID):
'\n Grab the registration form for a MUC room.\n\n @param room: The room jabber/xmpp entity id for the requested\n registration form.\n @type room: L{JID<twisted.words.protocols.jabber.jid.JID>}\n '
def cb(response):
form = data_form.findForm(response.query, NS_MUC_REGISTER)
return form
request = RegisterRequest(recipient=roomJID, options=None)
d = self.request(request)
d.addCallback(cb)
return d
| 603,588,402,837,602,400
|
Grab the registration form for a MUC room.
@param room: The room jabber/xmpp entity id for the requested
registration form.
@type room: L{JID<twisted.words.protocols.jabber.jid.JID>}
|
wokkel/muc.py
|
getRegisterForm
|
Gandi/wokkel
|
python
|
def getRegisterForm(self, roomJID):
'\n Grab the registration form for a MUC room.\n\n @param room: The room jabber/xmpp entity id for the requested\n registration form.\n @type room: L{JID<twisted.words.protocols.jabber.jid.JID>}\n '
def cb(response):
form = data_form.findForm(response.query, NS_MUC_REGISTER)
return form
request = RegisterRequest(recipient=roomJID, options=None)
d = self.request(request)
d.addCallback(cb)
return d
|
def register(self, roomJID, options):
'\n Send a request to register for a room.\n\n @param roomJID: The bare JID of the room.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param options: A mapping of field names to values, or C{None} to\n cancel.\n @type options: C{dict}\n '
if (options is None):
options = False
request = RegisterRequest(recipient=roomJID, options=options)
return self.request(request)
| 8,946,513,596,048,187,000
|
Send a request to register for a room.
@param roomJID: The bare JID of the room.
@type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}
@param options: A mapping of field names to values, or C{None} to
cancel.
@type options: C{dict}
|
wokkel/muc.py
|
register
|
Gandi/wokkel
|
python
|
def register(self, roomJID, options):
'\n Send a request to register for a room.\n\n @param roomJID: The bare JID of the room.\n @type roomJID: L{JID<twisted.words.protocols.jabber.jid.JID>}\n\n @param options: A mapping of field names to values, or C{None} to\n cancel.\n @type options: C{dict}\n '
if (options is None):
options = False
request = RegisterRequest(recipient=roomJID, options=options)
return self.request(request)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.