repo
stringlengths
7
55
path
stringlengths
4
223
func_name
stringlengths
1
134
original_string
stringlengths
75
104k
language
stringclasses
1 value
code
stringlengths
75
104k
code_tokens
listlengths
19
28.4k
docstring
stringlengths
1
46.9k
docstring_tokens
listlengths
1
1.97k
sha
stringlengths
40
40
url
stringlengths
87
315
partition
stringclasses
1 value
tensorflow/tensorboard
tensorboard/plugins/hparams/list_session_groups.py
Handler._build_session
def _build_session(self, name, start_info, end_info): """Builds a session object.""" assert start_info is not None result = api_pb2.Session( name=name, start_time_secs=start_info.start_time_secs, model_uri=start_info.model_uri, metric_values=self._build_session_metric_values(name), monitor_url=start_info.monitor_url) if end_info is not None: result.status = end_info.status result.end_time_secs = end_info.end_time_secs return result
python
def _build_session(self, name, start_info, end_info): """Builds a session object.""" assert start_info is not None result = api_pb2.Session( name=name, start_time_secs=start_info.start_time_secs, model_uri=start_info.model_uri, metric_values=self._build_session_metric_values(name), monitor_url=start_info.monitor_url) if end_info is not None: result.status = end_info.status result.end_time_secs = end_info.end_time_secs return result
[ "def", "_build_session", "(", "self", ",", "name", ",", "start_info", ",", "end_info", ")", ":", "assert", "start_info", "is", "not", "None", "result", "=", "api_pb2", ".", "Session", "(", "name", "=", "name", ",", "start_time_secs", "=", "start_info", ".", "start_time_secs", ",", "model_uri", "=", "start_info", ".", "model_uri", ",", "metric_values", "=", "self", ".", "_build_session_metric_values", "(", "name", ")", ",", "monitor_url", "=", "start_info", ".", "monitor_url", ")", "if", "end_info", "is", "not", "None", ":", "result", ".", "status", "=", "end_info", ".", "status", "result", ".", "end_time_secs", "=", "end_info", ".", "end_time_secs", "return", "result" ]
Builds a session object.
[ "Builds", "a", "session", "object", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/list_session_groups.py#L132-L145
train
tensorflow/tensorboard
tensorboard/plugins/hparams/list_session_groups.py
Handler._build_session_metric_values
def _build_session_metric_values(self, session_name): """Builds the session metric values.""" # result is a list of api_pb2.MetricValue instances. result = [] metric_infos = self._experiment.metric_infos for metric_info in metric_infos: metric_name = metric_info.name try: metric_eval = metrics.last_metric_eval( self._context.multiplexer, session_name, metric_name) except KeyError: # It's ok if we don't find the metric in the session. # We skip it here. For filtering and sorting purposes its value is None. continue # metric_eval is a 3-tuple of the form [wall_time, step, value] result.append(api_pb2.MetricValue(name=metric_name, wall_time_secs=metric_eval[0], training_step=metric_eval[1], value=metric_eval[2])) return result
python
def _build_session_metric_values(self, session_name): """Builds the session metric values.""" # result is a list of api_pb2.MetricValue instances. result = [] metric_infos = self._experiment.metric_infos for metric_info in metric_infos: metric_name = metric_info.name try: metric_eval = metrics.last_metric_eval( self._context.multiplexer, session_name, metric_name) except KeyError: # It's ok if we don't find the metric in the session. # We skip it here. For filtering and sorting purposes its value is None. continue # metric_eval is a 3-tuple of the form [wall_time, step, value] result.append(api_pb2.MetricValue(name=metric_name, wall_time_secs=metric_eval[0], training_step=metric_eval[1], value=metric_eval[2])) return result
[ "def", "_build_session_metric_values", "(", "self", ",", "session_name", ")", ":", "# result is a list of api_pb2.MetricValue instances.", "result", "=", "[", "]", "metric_infos", "=", "self", ".", "_experiment", ".", "metric_infos", "for", "metric_info", "in", "metric_infos", ":", "metric_name", "=", "metric_info", ".", "name", "try", ":", "metric_eval", "=", "metrics", ".", "last_metric_eval", "(", "self", ".", "_context", ".", "multiplexer", ",", "session_name", ",", "metric_name", ")", "except", "KeyError", ":", "# It's ok if we don't find the metric in the session.", "# We skip it here. For filtering and sorting purposes its value is None.", "continue", "# metric_eval is a 3-tuple of the form [wall_time, step, value]", "result", ".", "append", "(", "api_pb2", ".", "MetricValue", "(", "name", "=", "metric_name", ",", "wall_time_secs", "=", "metric_eval", "[", "0", "]", ",", "training_step", "=", "metric_eval", "[", "1", "]", ",", "value", "=", "metric_eval", "[", "2", "]", ")", ")", "return", "result" ]
Builds the session metric values.
[ "Builds", "the", "session", "metric", "values", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/list_session_groups.py#L147-L170
train
tensorflow/tensorboard
tensorboard/plugins/hparams/list_session_groups.py
Handler._aggregate_metrics
def _aggregate_metrics(self, session_group): """Sets the metrics of the group based on aggregation_type.""" if (self._request.aggregation_type == api_pb2.AGGREGATION_AVG or self._request.aggregation_type == api_pb2.AGGREGATION_UNSET): _set_avg_session_metrics(session_group) elif self._request.aggregation_type == api_pb2.AGGREGATION_MEDIAN: _set_median_session_metrics(session_group, self._request.aggregation_metric) elif self._request.aggregation_type == api_pb2.AGGREGATION_MIN: _set_extremum_session_metrics(session_group, self._request.aggregation_metric, min) elif self._request.aggregation_type == api_pb2.AGGREGATION_MAX: _set_extremum_session_metrics(session_group, self._request.aggregation_metric, max) else: raise error.HParamsError('Unknown aggregation_type in request: %s' % self._request.aggregation_type)
python
def _aggregate_metrics(self, session_group): """Sets the metrics of the group based on aggregation_type.""" if (self._request.aggregation_type == api_pb2.AGGREGATION_AVG or self._request.aggregation_type == api_pb2.AGGREGATION_UNSET): _set_avg_session_metrics(session_group) elif self._request.aggregation_type == api_pb2.AGGREGATION_MEDIAN: _set_median_session_metrics(session_group, self._request.aggregation_metric) elif self._request.aggregation_type == api_pb2.AGGREGATION_MIN: _set_extremum_session_metrics(session_group, self._request.aggregation_metric, min) elif self._request.aggregation_type == api_pb2.AGGREGATION_MAX: _set_extremum_session_metrics(session_group, self._request.aggregation_metric, max) else: raise error.HParamsError('Unknown aggregation_type in request: %s' % self._request.aggregation_type)
[ "def", "_aggregate_metrics", "(", "self", ",", "session_group", ")", ":", "if", "(", "self", ".", "_request", ".", "aggregation_type", "==", "api_pb2", ".", "AGGREGATION_AVG", "or", "self", ".", "_request", ".", "aggregation_type", "==", "api_pb2", ".", "AGGREGATION_UNSET", ")", ":", "_set_avg_session_metrics", "(", "session_group", ")", "elif", "self", ".", "_request", ".", "aggregation_type", "==", "api_pb2", ".", "AGGREGATION_MEDIAN", ":", "_set_median_session_metrics", "(", "session_group", ",", "self", ".", "_request", ".", "aggregation_metric", ")", "elif", "self", ".", "_request", ".", "aggregation_type", "==", "api_pb2", ".", "AGGREGATION_MIN", ":", "_set_extremum_session_metrics", "(", "session_group", ",", "self", ".", "_request", ".", "aggregation_metric", ",", "min", ")", "elif", "self", ".", "_request", ".", "aggregation_type", "==", "api_pb2", ".", "AGGREGATION_MAX", ":", "_set_extremum_session_metrics", "(", "session_group", ",", "self", ".", "_request", ".", "aggregation_metric", ",", "max", ")", "else", ":", "raise", "error", ".", "HParamsError", "(", "'Unknown aggregation_type in request: %s'", "%", "self", ".", "_request", ".", "aggregation_type", ")" ]
Sets the metrics of the group based on aggregation_type.
[ "Sets", "the", "metrics", "of", "the", "group", "based", "on", "aggregation_type", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/list_session_groups.py#L172-L191
train
tensorflow/tensorboard
tensorboard/plugins/hparams/list_session_groups.py
Handler._sort
def _sort(self, session_groups): """Sorts 'session_groups' in place according to _request.col_params.""" # Sort by session_group name so we have a deterministic order. session_groups.sort(key=operator.attrgetter('name')) # Sort by lexicographical order of the _request.col_params whose order # is not ORDER_UNSPECIFIED. The first such column is the primary sorting # key, the second is the secondary sorting key, etc. To achieve that we # need to iterate on these columns in reverse order (thus the primary key # is the key used in the last sort). for col_param, extractor in reversed(list(zip(self._request.col_params, self._extractors))): if col_param.order == api_pb2.ORDER_UNSPECIFIED: continue if col_param.order == api_pb2.ORDER_ASC: session_groups.sort( key=_create_key_func( extractor, none_is_largest=not col_param.missing_values_first)) elif col_param.order == api_pb2.ORDER_DESC: session_groups.sort( key=_create_key_func( extractor, none_is_largest=col_param.missing_values_first), reverse=True) else: raise error.HParamsError('Unknown col_param.order given: %s' % col_param)
python
def _sort(self, session_groups): """Sorts 'session_groups' in place according to _request.col_params.""" # Sort by session_group name so we have a deterministic order. session_groups.sort(key=operator.attrgetter('name')) # Sort by lexicographical order of the _request.col_params whose order # is not ORDER_UNSPECIFIED. The first such column is the primary sorting # key, the second is the secondary sorting key, etc. To achieve that we # need to iterate on these columns in reverse order (thus the primary key # is the key used in the last sort). for col_param, extractor in reversed(list(zip(self._request.col_params, self._extractors))): if col_param.order == api_pb2.ORDER_UNSPECIFIED: continue if col_param.order == api_pb2.ORDER_ASC: session_groups.sort( key=_create_key_func( extractor, none_is_largest=not col_param.missing_values_first)) elif col_param.order == api_pb2.ORDER_DESC: session_groups.sort( key=_create_key_func( extractor, none_is_largest=col_param.missing_values_first), reverse=True) else: raise error.HParamsError('Unknown col_param.order given: %s' % col_param)
[ "def", "_sort", "(", "self", ",", "session_groups", ")", ":", "# Sort by session_group name so we have a deterministic order.", "session_groups", ".", "sort", "(", "key", "=", "operator", ".", "attrgetter", "(", "'name'", ")", ")", "# Sort by lexicographical order of the _request.col_params whose order", "# is not ORDER_UNSPECIFIED. The first such column is the primary sorting", "# key, the second is the secondary sorting key, etc. To achieve that we", "# need to iterate on these columns in reverse order (thus the primary key", "# is the key used in the last sort).", "for", "col_param", ",", "extractor", "in", "reversed", "(", "list", "(", "zip", "(", "self", ".", "_request", ".", "col_params", ",", "self", ".", "_extractors", ")", ")", ")", ":", "if", "col_param", ".", "order", "==", "api_pb2", ".", "ORDER_UNSPECIFIED", ":", "continue", "if", "col_param", ".", "order", "==", "api_pb2", ".", "ORDER_ASC", ":", "session_groups", ".", "sort", "(", "key", "=", "_create_key_func", "(", "extractor", ",", "none_is_largest", "=", "not", "col_param", ".", "missing_values_first", ")", ")", "elif", "col_param", ".", "order", "==", "api_pb2", ".", "ORDER_DESC", ":", "session_groups", ".", "sort", "(", "key", "=", "_create_key_func", "(", "extractor", ",", "none_is_largest", "=", "col_param", ".", "missing_values_first", ")", ",", "reverse", "=", "True", ")", "else", ":", "raise", "error", ".", "HParamsError", "(", "'Unknown col_param.order given: %s'", "%", "col_param", ")" ]
Sorts 'session_groups' in place according to _request.col_params.
[ "Sorts", "session_groups", "in", "place", "according", "to", "_request", ".", "col_params", "." ]
8e5f497b48e40f2a774f85416b8a35ac0693c35e
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/list_session_groups.py#L199-L226
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioAnalysisRecordAlsa.py
recordAnalyzeAudio
def recordAnalyzeAudio(duration, outputWavFile, midTermBufferSizeSec, modelName, modelType): ''' recordAnalyzeAudio(duration, outputWavFile, midTermBufferSizeSec, modelName, modelType) This function is used to record and analyze audio segments, in a fix window basis. ARGUMENTS: - duration total recording duration - outputWavFile path of the output WAV file - midTermBufferSizeSec (fix)segment length in seconds - modelName classification model name - modelType classification model type ''' if modelType=='svm': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, compute_beat] = aT.load_model(modelName) elif modelType=='knn': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, compute_beat] = aT.load_model_knn(modelName) else: Classifier = None inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK) inp.setchannels(1) inp.setrate(Fs) inp.setformat(alsaaudio.PCM_FORMAT_S16_LE) inp.setperiodsize(512) midTermBufferSize = int(midTermBufferSizeSec * Fs) allData = [] midTermBuffer = [] curWindow = [] count = 0 while len(allData)<duration*Fs: # Read data from device l,data = inp.read() if l: for i in range(l): curWindow.append(audioop.getsample(data, 2, i)) if (len(curWindow)+len(midTermBuffer)>midTermBufferSize): samplesToCopyToMidBuffer = midTermBufferSize - len(midTermBuffer) else: samplesToCopyToMidBuffer = len(curWindow) midTermBuffer = midTermBuffer + curWindow[0:samplesToCopyToMidBuffer]; del(curWindow[0:samplesToCopyToMidBuffer]) if len(midTermBuffer) == midTermBufferSize: count += 1 if Classifier!=None: [mtFeatures, stFeatures, _] = aF.mtFeatureExtraction(midTermBuffer, Fs, 2.0*Fs, 2.0*Fs, 0.020*Fs, 0.020*Fs) curFV = (mtFeatures[:,0] - MEAN) / STD; [result, P] = aT.classifierWrapper(Classifier, modelType, curFV) print classNames[int(result)] allData = allData + midTermBuffer plt.clf() plt.plot(midTermBuffer) plt.show(block = False) plt.draw() midTermBuffer = [] allDataArray = numpy.int16(allData) wavfile.write(outputWavFile, Fs, allDataArray)
python
def recordAnalyzeAudio(duration, outputWavFile, midTermBufferSizeSec, modelName, modelType): ''' recordAnalyzeAudio(duration, outputWavFile, midTermBufferSizeSec, modelName, modelType) This function is used to record and analyze audio segments, in a fix window basis. ARGUMENTS: - duration total recording duration - outputWavFile path of the output WAV file - midTermBufferSizeSec (fix)segment length in seconds - modelName classification model name - modelType classification model type ''' if modelType=='svm': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, compute_beat] = aT.load_model(modelName) elif modelType=='knn': [Classifier, MEAN, STD, classNames, mtWin, mtStep, stWin, stStep, compute_beat] = aT.load_model_knn(modelName) else: Classifier = None inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NONBLOCK) inp.setchannels(1) inp.setrate(Fs) inp.setformat(alsaaudio.PCM_FORMAT_S16_LE) inp.setperiodsize(512) midTermBufferSize = int(midTermBufferSizeSec * Fs) allData = [] midTermBuffer = [] curWindow = [] count = 0 while len(allData)<duration*Fs: # Read data from device l,data = inp.read() if l: for i in range(l): curWindow.append(audioop.getsample(data, 2, i)) if (len(curWindow)+len(midTermBuffer)>midTermBufferSize): samplesToCopyToMidBuffer = midTermBufferSize - len(midTermBuffer) else: samplesToCopyToMidBuffer = len(curWindow) midTermBuffer = midTermBuffer + curWindow[0:samplesToCopyToMidBuffer]; del(curWindow[0:samplesToCopyToMidBuffer]) if len(midTermBuffer) == midTermBufferSize: count += 1 if Classifier!=None: [mtFeatures, stFeatures, _] = aF.mtFeatureExtraction(midTermBuffer, Fs, 2.0*Fs, 2.0*Fs, 0.020*Fs, 0.020*Fs) curFV = (mtFeatures[:,0] - MEAN) / STD; [result, P] = aT.classifierWrapper(Classifier, modelType, curFV) print classNames[int(result)] allData = allData + midTermBuffer plt.clf() plt.plot(midTermBuffer) plt.show(block = False) plt.draw() midTermBuffer = [] allDataArray = numpy.int16(allData) wavfile.write(outputWavFile, Fs, allDataArray)
[ "def", "recordAnalyzeAudio", "(", "duration", ",", "outputWavFile", ",", "midTermBufferSizeSec", ",", "modelName", ",", "modelType", ")", ":", "if", "modelType", "==", "'svm'", ":", "[", "Classifier", ",", "MEAN", ",", "STD", ",", "classNames", ",", "mtWin", ",", "mtStep", ",", "stWin", ",", "stStep", ",", "compute_beat", "]", "=", "aT", ".", "load_model", "(", "modelName", ")", "elif", "modelType", "==", "'knn'", ":", "[", "Classifier", ",", "MEAN", ",", "STD", ",", "classNames", ",", "mtWin", ",", "mtStep", ",", "stWin", ",", "stStep", ",", "compute_beat", "]", "=", "aT", ".", "load_model_knn", "(", "modelName", ")", "else", ":", "Classifier", "=", "None", "inp", "=", "alsaaudio", ".", "PCM", "(", "alsaaudio", ".", "PCM_CAPTURE", ",", "alsaaudio", ".", "PCM_NONBLOCK", ")", "inp", ".", "setchannels", "(", "1", ")", "inp", ".", "setrate", "(", "Fs", ")", "inp", ".", "setformat", "(", "alsaaudio", ".", "PCM_FORMAT_S16_LE", ")", "inp", ".", "setperiodsize", "(", "512", ")", "midTermBufferSize", "=", "int", "(", "midTermBufferSizeSec", "*", "Fs", ")", "allData", "=", "[", "]", "midTermBuffer", "=", "[", "]", "curWindow", "=", "[", "]", "count", "=", "0", "while", "len", "(", "allData", ")", "<", "duration", "*", "Fs", ":", "# Read data from device", "l", ",", "data", "=", "inp", ".", "read", "(", ")", "if", "l", ":", "for", "i", "in", "range", "(", "l", ")", ":", "curWindow", ".", "append", "(", "audioop", ".", "getsample", "(", "data", ",", "2", ",", "i", ")", ")", "if", "(", "len", "(", "curWindow", ")", "+", "len", "(", "midTermBuffer", ")", ">", "midTermBufferSize", ")", ":", "samplesToCopyToMidBuffer", "=", "midTermBufferSize", "-", "len", "(", "midTermBuffer", ")", "else", ":", "samplesToCopyToMidBuffer", "=", "len", "(", "curWindow", ")", "midTermBuffer", "=", "midTermBuffer", "+", "curWindow", "[", "0", ":", "samplesToCopyToMidBuffer", "]", "del", "(", "curWindow", "[", "0", ":", "samplesToCopyToMidBuffer", "]", ")", "if", "len", "(", "midTermBuffer", ")", "==", "midTermBufferSize", ":", "count", "+=", "1", "if", "Classifier", "!=", "None", ":", "[", "mtFeatures", ",", "stFeatures", ",", "_", "]", "=", "aF", ".", "mtFeatureExtraction", "(", "midTermBuffer", ",", "Fs", ",", "2.0", "*", "Fs", ",", "2.0", "*", "Fs", ",", "0.020", "*", "Fs", ",", "0.020", "*", "Fs", ")", "curFV", "=", "(", "mtFeatures", "[", ":", ",", "0", "]", "-", "MEAN", ")", "/", "STD", "[", "result", ",", "P", "]", "=", "aT", ".", "classifierWrapper", "(", "Classifier", ",", "modelType", ",", "curFV", ")", "print", "classNames", "[", "int", "(", "result", ")", "]", "allData", "=", "allData", "+", "midTermBuffer", "plt", ".", "clf", "(", ")", "plt", ".", "plot", "(", "midTermBuffer", ")", "plt", ".", "show", "(", "block", "=", "False", ")", "plt", ".", "draw", "(", ")", "midTermBuffer", "=", "[", "]", "allDataArray", "=", "numpy", ".", "int16", "(", "allData", ")", "wavfile", ".", "write", "(", "outputWavFile", ",", "Fs", ",", "allDataArray", ")" ]
recordAnalyzeAudio(duration, outputWavFile, midTermBufferSizeSec, modelName, modelType) This function is used to record and analyze audio segments, in a fix window basis. ARGUMENTS: - duration total recording duration - outputWavFile path of the output WAV file - midTermBufferSizeSec (fix)segment length in seconds - modelName classification model name - modelType classification model type
[ "recordAnalyzeAudio", "(", "duration", "outputWavFile", "midTermBufferSizeSec", "modelName", "modelType", ")" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioAnalysisRecordAlsa.py#L62-L125
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audacityAnnotation2WAVs.py
annotation2files
def annotation2files(wavFile, csvFile): ''' Break an audio stream to segments of interest, defined by a csv file - wavFile: path to input wavfile - csvFile: path to csvFile of segment limits Input CSV file must be of the format <T1>\t<T2>\t<Label> ''' [Fs, x] = audioBasicIO.readAudioFile(wavFile) with open(csvFile, 'rb') as csvfile: reader = csv.reader(csvfile, delimiter='\t', quotechar='|') for j, row in enumerate(reader): T1 = float(row[0].replace(",",".")) T2 = float(row[1].replace(",",".")) label = "%s_%s_%.2f_%.2f.wav" % (wavFile, row[2], T1, T2) label = label.replace(" ", "_") xtemp = x[int(round(T1*Fs)):int(round(T2*Fs))] print T1, T2, label, xtemp.shape wavfile.write(label, Fs, xtemp)
python
def annotation2files(wavFile, csvFile): ''' Break an audio stream to segments of interest, defined by a csv file - wavFile: path to input wavfile - csvFile: path to csvFile of segment limits Input CSV file must be of the format <T1>\t<T2>\t<Label> ''' [Fs, x] = audioBasicIO.readAudioFile(wavFile) with open(csvFile, 'rb') as csvfile: reader = csv.reader(csvfile, delimiter='\t', quotechar='|') for j, row in enumerate(reader): T1 = float(row[0].replace(",",".")) T2 = float(row[1].replace(",",".")) label = "%s_%s_%.2f_%.2f.wav" % (wavFile, row[2], T1, T2) label = label.replace(" ", "_") xtemp = x[int(round(T1*Fs)):int(round(T2*Fs))] print T1, T2, label, xtemp.shape wavfile.write(label, Fs, xtemp)
[ "def", "annotation2files", "(", "wavFile", ",", "csvFile", ")", ":", "[", "Fs", ",", "x", "]", "=", "audioBasicIO", ".", "readAudioFile", "(", "wavFile", ")", "with", "open", "(", "csvFile", ",", "'rb'", ")", "as", "csvfile", ":", "reader", "=", "csv", ".", "reader", "(", "csvfile", ",", "delimiter", "=", "'\\t'", ",", "quotechar", "=", "'|'", ")", "for", "j", ",", "row", "in", "enumerate", "(", "reader", ")", ":", "T1", "=", "float", "(", "row", "[", "0", "]", ".", "replace", "(", "\",\"", ",", "\".\"", ")", ")", "T2", "=", "float", "(", "row", "[", "1", "]", ".", "replace", "(", "\",\"", ",", "\".\"", ")", ")", "label", "=", "\"%s_%s_%.2f_%.2f.wav\"", "%", "(", "wavFile", ",", "row", "[", "2", "]", ",", "T1", ",", "T2", ")", "label", "=", "label", ".", "replace", "(", "\" \"", ",", "\"_\"", ")", "xtemp", "=", "x", "[", "int", "(", "round", "(", "T1", "*", "Fs", ")", ")", ":", "int", "(", "round", "(", "T2", "*", "Fs", ")", ")", "]", "print", "T1", ",", "T2", ",", "label", ",", "xtemp", ".", "shape", "wavfile", ".", "write", "(", "label", ",", "Fs", ",", "xtemp", ")" ]
Break an audio stream to segments of interest, defined by a csv file - wavFile: path to input wavfile - csvFile: path to csvFile of segment limits Input CSV file must be of the format <T1>\t<T2>\t<Label>
[ "Break", "an", "audio", "stream", "to", "segments", "of", "interest", "defined", "by", "a", "csv", "file", "-", "wavFile", ":", "path", "to", "input", "wavfile", "-", "csvFile", ":", "path", "to", "csvFile", "of", "segment", "limits", "Input", "CSV", "file", "must", "be", "of", "the", "format", "<T1", ">", "\\", "t<T2", ">", "\\", "t<Label", ">" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audacityAnnotation2WAVs.py#L9-L30
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioBasicIO.py
convertDirMP3ToWav
def convertDirMP3ToWav(dirName, Fs, nC, useMp3TagsAsName = False): ''' This function converts the MP3 files stored in a folder to WAV. If required, the output names of the WAV files are based on MP3 tags, otherwise the same names are used. ARGUMENTS: - dirName: the path of the folder where the MP3s are stored - Fs: the sampling rate of the generated WAV files - nC: the number of channels of the generated WAV files - useMp3TagsAsName: True if the WAV filename is generated on MP3 tags ''' types = (dirName+os.sep+'*.mp3',) # the tuple of file types filesToProcess = [] for files in types: filesToProcess.extend(glob.glob(files)) for f in filesToProcess: #tag.link(f) audioFile = eyed3.load(f) if useMp3TagsAsName and audioFile.tag != None: artist = audioFile.tag.artist title = audioFile.tag.title if artist!=None and title!=None: if len(title)>0 and len(artist)>0: wavFileName = ntpath.split(f)[0] + os.sep + artist.replace(","," ") + " --- " + title.replace(","," ") + ".wav" else: wavFileName = f.replace(".mp3",".wav") else: wavFileName = f.replace(".mp3",".wav") else: wavFileName = f.replace(".mp3",".wav") command = "avconv -i \"" + f + "\" -ar " +str(Fs) + " -ac " + str(nC) + " \"" + wavFileName + "\""; print(command) os.system(command.decode('unicode_escape').encode('ascii','ignore').replace("\0",""))
python
def convertDirMP3ToWav(dirName, Fs, nC, useMp3TagsAsName = False): ''' This function converts the MP3 files stored in a folder to WAV. If required, the output names of the WAV files are based on MP3 tags, otherwise the same names are used. ARGUMENTS: - dirName: the path of the folder where the MP3s are stored - Fs: the sampling rate of the generated WAV files - nC: the number of channels of the generated WAV files - useMp3TagsAsName: True if the WAV filename is generated on MP3 tags ''' types = (dirName+os.sep+'*.mp3',) # the tuple of file types filesToProcess = [] for files in types: filesToProcess.extend(glob.glob(files)) for f in filesToProcess: #tag.link(f) audioFile = eyed3.load(f) if useMp3TagsAsName and audioFile.tag != None: artist = audioFile.tag.artist title = audioFile.tag.title if artist!=None and title!=None: if len(title)>0 and len(artist)>0: wavFileName = ntpath.split(f)[0] + os.sep + artist.replace(","," ") + " --- " + title.replace(","," ") + ".wav" else: wavFileName = f.replace(".mp3",".wav") else: wavFileName = f.replace(".mp3",".wav") else: wavFileName = f.replace(".mp3",".wav") command = "avconv -i \"" + f + "\" -ar " +str(Fs) + " -ac " + str(nC) + " \"" + wavFileName + "\""; print(command) os.system(command.decode('unicode_escape').encode('ascii','ignore').replace("\0",""))
[ "def", "convertDirMP3ToWav", "(", "dirName", ",", "Fs", ",", "nC", ",", "useMp3TagsAsName", "=", "False", ")", ":", "types", "=", "(", "dirName", "+", "os", ".", "sep", "+", "'*.mp3'", ",", ")", "# the tuple of file types", "filesToProcess", "=", "[", "]", "for", "files", "in", "types", ":", "filesToProcess", ".", "extend", "(", "glob", ".", "glob", "(", "files", ")", ")", "for", "f", "in", "filesToProcess", ":", "#tag.link(f)", "audioFile", "=", "eyed3", ".", "load", "(", "f", ")", "if", "useMp3TagsAsName", "and", "audioFile", ".", "tag", "!=", "None", ":", "artist", "=", "audioFile", ".", "tag", ".", "artist", "title", "=", "audioFile", ".", "tag", ".", "title", "if", "artist", "!=", "None", "and", "title", "!=", "None", ":", "if", "len", "(", "title", ")", ">", "0", "and", "len", "(", "artist", ")", ">", "0", ":", "wavFileName", "=", "ntpath", ".", "split", "(", "f", ")", "[", "0", "]", "+", "os", ".", "sep", "+", "artist", ".", "replace", "(", "\",\"", ",", "\" \"", ")", "+", "\" --- \"", "+", "title", ".", "replace", "(", "\",\"", ",", "\" \"", ")", "+", "\".wav\"", "else", ":", "wavFileName", "=", "f", ".", "replace", "(", "\".mp3\"", ",", "\".wav\"", ")", "else", ":", "wavFileName", "=", "f", ".", "replace", "(", "\".mp3\"", ",", "\".wav\"", ")", "else", ":", "wavFileName", "=", "f", ".", "replace", "(", "\".mp3\"", ",", "\".wav\"", ")", "command", "=", "\"avconv -i \\\"\"", "+", "f", "+", "\"\\\" -ar \"", "+", "str", "(", "Fs", ")", "+", "\" -ac \"", "+", "str", "(", "nC", ")", "+", "\" \\\"\"", "+", "wavFileName", "+", "\"\\\"\"", "print", "(", "command", ")", "os", ".", "system", "(", "command", ".", "decode", "(", "'unicode_escape'", ")", ".", "encode", "(", "'ascii'", ",", "'ignore'", ")", ".", "replace", "(", "\"\\0\"", ",", "\"\"", ")", ")" ]
This function converts the MP3 files stored in a folder to WAV. If required, the output names of the WAV files are based on MP3 tags, otherwise the same names are used. ARGUMENTS: - dirName: the path of the folder where the MP3s are stored - Fs: the sampling rate of the generated WAV files - nC: the number of channels of the generated WAV files - useMp3TagsAsName: True if the WAV filename is generated on MP3 tags
[ "This", "function", "converts", "the", "MP3", "files", "stored", "in", "a", "folder", "to", "WAV", ".", "If", "required", "the", "output", "names", "of", "the", "WAV", "files", "are", "based", "on", "MP3", "tags", "otherwise", "the", "same", "names", "are", "used", ".", "ARGUMENTS", ":", "-", "dirName", ":", "the", "path", "of", "the", "folder", "where", "the", "MP3s", "are", "stored", "-", "Fs", ":", "the", "sampling", "rate", "of", "the", "generated", "WAV", "files", "-", "nC", ":", "the", "number", "of", "channels", "of", "the", "generated", "WAV", "files", "-", "useMp3TagsAsName", ":", "True", "if", "the", "WAV", "filename", "is", "generated", "on", "MP3", "tags" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioBasicIO.py#L5-L38
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioBasicIO.py
convertFsDirWavToWav
def convertFsDirWavToWav(dirName, Fs, nC): ''' This function converts the WAV files stored in a folder to WAV using a different sampling freq and number of channels. ARGUMENTS: - dirName: the path of the folder where the WAVs are stored - Fs: the sampling rate of the generated WAV files - nC: the number of channesl of the generated WAV files ''' types = (dirName+os.sep+'*.wav',) # the tuple of file types filesToProcess = [] for files in types: filesToProcess.extend(glob.glob(files)) newDir = dirName + os.sep + "Fs" + str(Fs) + "_" + "NC"+str(nC) if os.path.exists(newDir) and newDir!=".": shutil.rmtree(newDir) os.makedirs(newDir) for f in filesToProcess: _, wavFileName = ntpath.split(f) command = "avconv -i \"" + f + "\" -ar " +str(Fs) + " -ac " + str(nC) + " \"" + newDir + os.sep + wavFileName + "\""; print(command) os.system(command)
python
def convertFsDirWavToWav(dirName, Fs, nC): ''' This function converts the WAV files stored in a folder to WAV using a different sampling freq and number of channels. ARGUMENTS: - dirName: the path of the folder where the WAVs are stored - Fs: the sampling rate of the generated WAV files - nC: the number of channesl of the generated WAV files ''' types = (dirName+os.sep+'*.wav',) # the tuple of file types filesToProcess = [] for files in types: filesToProcess.extend(glob.glob(files)) newDir = dirName + os.sep + "Fs" + str(Fs) + "_" + "NC"+str(nC) if os.path.exists(newDir) and newDir!=".": shutil.rmtree(newDir) os.makedirs(newDir) for f in filesToProcess: _, wavFileName = ntpath.split(f) command = "avconv -i \"" + f + "\" -ar " +str(Fs) + " -ac " + str(nC) + " \"" + newDir + os.sep + wavFileName + "\""; print(command) os.system(command)
[ "def", "convertFsDirWavToWav", "(", "dirName", ",", "Fs", ",", "nC", ")", ":", "types", "=", "(", "dirName", "+", "os", ".", "sep", "+", "'*.wav'", ",", ")", "# the tuple of file types", "filesToProcess", "=", "[", "]", "for", "files", "in", "types", ":", "filesToProcess", ".", "extend", "(", "glob", ".", "glob", "(", "files", ")", ")", "newDir", "=", "dirName", "+", "os", ".", "sep", "+", "\"Fs\"", "+", "str", "(", "Fs", ")", "+", "\"_\"", "+", "\"NC\"", "+", "str", "(", "nC", ")", "if", "os", ".", "path", ".", "exists", "(", "newDir", ")", "and", "newDir", "!=", "\".\"", ":", "shutil", ".", "rmtree", "(", "newDir", ")", "os", ".", "makedirs", "(", "newDir", ")", "for", "f", "in", "filesToProcess", ":", "_", ",", "wavFileName", "=", "ntpath", ".", "split", "(", "f", ")", "command", "=", "\"avconv -i \\\"\"", "+", "f", "+", "\"\\\" -ar \"", "+", "str", "(", "Fs", ")", "+", "\" -ac \"", "+", "str", "(", "nC", ")", "+", "\" \\\"\"", "+", "newDir", "+", "os", ".", "sep", "+", "wavFileName", "+", "\"\\\"\"", "print", "(", "command", ")", "os", ".", "system", "(", "command", ")" ]
This function converts the WAV files stored in a folder to WAV using a different sampling freq and number of channels. ARGUMENTS: - dirName: the path of the folder where the WAVs are stored - Fs: the sampling rate of the generated WAV files - nC: the number of channesl of the generated WAV files
[ "This", "function", "converts", "the", "WAV", "files", "stored", "in", "a", "folder", "to", "WAV", "using", "a", "different", "sampling", "freq", "and", "number", "of", "channels", ".", "ARGUMENTS", ":", "-", "dirName", ":", "the", "path", "of", "the", "folder", "where", "the", "WAVs", "are", "stored", "-", "Fs", ":", "the", "sampling", "rate", "of", "the", "generated", "WAV", "files", "-", "nC", ":", "the", "number", "of", "channesl", "of", "the", "generated", "WAV", "files" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioBasicIO.py#L40-L64
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioBasicIO.py
readAudioFile
def readAudioFile(path): ''' This function returns a numpy array that stores the audio samples of a specified WAV of AIFF file ''' extension = os.path.splitext(path)[1] try: #if extension.lower() == '.wav': #[Fs, x] = wavfile.read(path) if extension.lower() == '.aif' or extension.lower() == '.aiff': s = aifc.open(path, 'r') nframes = s.getnframes() strsig = s.readframes(nframes) x = numpy.fromstring(strsig, numpy.short).byteswap() Fs = s.getframerate() elif extension.lower() == '.mp3' or extension.lower() == '.wav' or extension.lower() == '.au' or extension.lower() == '.ogg': try: audiofile = AudioSegment.from_file(path) #except pydub.exceptions.CouldntDecodeError: except: print("Error: file not found or other I/O error. " "(DECODING FAILED)") return (-1,-1) if audiofile.sample_width==2: data = numpy.fromstring(audiofile._data, numpy.int16) elif audiofile.sample_width==4: data = numpy.fromstring(audiofile._data, numpy.int32) else: return (-1, -1) Fs = audiofile.frame_rate x = [] for chn in list(range(audiofile.channels)): x.append(data[chn::audiofile.channels]) x = numpy.array(x).T else: print("Error in readAudioFile(): Unknown file type!") return (-1,-1) except IOError: print("Error: file not found or other I/O error.") return (-1,-1) if x.ndim==2: if x.shape[1]==1: x = x.flatten() return (Fs, x)
python
def readAudioFile(path): ''' This function returns a numpy array that stores the audio samples of a specified WAV of AIFF file ''' extension = os.path.splitext(path)[1] try: #if extension.lower() == '.wav': #[Fs, x] = wavfile.read(path) if extension.lower() == '.aif' or extension.lower() == '.aiff': s = aifc.open(path, 'r') nframes = s.getnframes() strsig = s.readframes(nframes) x = numpy.fromstring(strsig, numpy.short).byteswap() Fs = s.getframerate() elif extension.lower() == '.mp3' or extension.lower() == '.wav' or extension.lower() == '.au' or extension.lower() == '.ogg': try: audiofile = AudioSegment.from_file(path) #except pydub.exceptions.CouldntDecodeError: except: print("Error: file not found or other I/O error. " "(DECODING FAILED)") return (-1,-1) if audiofile.sample_width==2: data = numpy.fromstring(audiofile._data, numpy.int16) elif audiofile.sample_width==4: data = numpy.fromstring(audiofile._data, numpy.int32) else: return (-1, -1) Fs = audiofile.frame_rate x = [] for chn in list(range(audiofile.channels)): x.append(data[chn::audiofile.channels]) x = numpy.array(x).T else: print("Error in readAudioFile(): Unknown file type!") return (-1,-1) except IOError: print("Error: file not found or other I/O error.") return (-1,-1) if x.ndim==2: if x.shape[1]==1: x = x.flatten() return (Fs, x)
[ "def", "readAudioFile", "(", "path", ")", ":", "extension", "=", "os", ".", "path", ".", "splitext", "(", "path", ")", "[", "1", "]", "try", ":", "#if extension.lower() == '.wav':", "#[Fs, x] = wavfile.read(path)", "if", "extension", ".", "lower", "(", ")", "==", "'.aif'", "or", "extension", ".", "lower", "(", ")", "==", "'.aiff'", ":", "s", "=", "aifc", ".", "open", "(", "path", ",", "'r'", ")", "nframes", "=", "s", ".", "getnframes", "(", ")", "strsig", "=", "s", ".", "readframes", "(", "nframes", ")", "x", "=", "numpy", ".", "fromstring", "(", "strsig", ",", "numpy", ".", "short", ")", ".", "byteswap", "(", ")", "Fs", "=", "s", ".", "getframerate", "(", ")", "elif", "extension", ".", "lower", "(", ")", "==", "'.mp3'", "or", "extension", ".", "lower", "(", ")", "==", "'.wav'", "or", "extension", ".", "lower", "(", ")", "==", "'.au'", "or", "extension", ".", "lower", "(", ")", "==", "'.ogg'", ":", "try", ":", "audiofile", "=", "AudioSegment", ".", "from_file", "(", "path", ")", "#except pydub.exceptions.CouldntDecodeError:", "except", ":", "print", "(", "\"Error: file not found or other I/O error. \"", "\"(DECODING FAILED)\"", ")", "return", "(", "-", "1", ",", "-", "1", ")", "if", "audiofile", ".", "sample_width", "==", "2", ":", "data", "=", "numpy", ".", "fromstring", "(", "audiofile", ".", "_data", ",", "numpy", ".", "int16", ")", "elif", "audiofile", ".", "sample_width", "==", "4", ":", "data", "=", "numpy", ".", "fromstring", "(", "audiofile", ".", "_data", ",", "numpy", ".", "int32", ")", "else", ":", "return", "(", "-", "1", ",", "-", "1", ")", "Fs", "=", "audiofile", ".", "frame_rate", "x", "=", "[", "]", "for", "chn", "in", "list", "(", "range", "(", "audiofile", ".", "channels", ")", ")", ":", "x", ".", "append", "(", "data", "[", "chn", ":", ":", "audiofile", ".", "channels", "]", ")", "x", "=", "numpy", ".", "array", "(", "x", ")", ".", "T", "else", ":", "print", "(", "\"Error in readAudioFile(): Unknown file type!\"", ")", "return", "(", "-", "1", ",", "-", "1", ")", "except", "IOError", ":", "print", "(", "\"Error: file not found or other I/O error.\"", ")", "return", "(", "-", "1", ",", "-", "1", ")", "if", "x", ".", "ndim", "==", "2", ":", "if", "x", ".", "shape", "[", "1", "]", "==", "1", ":", "x", "=", "x", ".", "flatten", "(", ")", "return", "(", "Fs", ",", "x", ")" ]
This function returns a numpy array that stores the audio samples of a specified WAV of AIFF file
[ "This", "function", "returns", "a", "numpy", "array", "that", "stores", "the", "audio", "samples", "of", "a", "specified", "WAV", "of", "AIFF", "file" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioBasicIO.py#L66-L112
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioBasicIO.py
stereo2mono
def stereo2mono(x): ''' This function converts the input signal (stored in a numpy array) to MONO (if it is STEREO) ''' if isinstance(x, int): return -1 if x.ndim==1: return x elif x.ndim==2: if x.shape[1]==1: return x.flatten() else: if x.shape[1]==2: return ( (x[:,1] / 2) + (x[:,0] / 2) ) else: return -1
python
def stereo2mono(x): ''' This function converts the input signal (stored in a numpy array) to MONO (if it is STEREO) ''' if isinstance(x, int): return -1 if x.ndim==1: return x elif x.ndim==2: if x.shape[1]==1: return x.flatten() else: if x.shape[1]==2: return ( (x[:,1] / 2) + (x[:,0] / 2) ) else: return -1
[ "def", "stereo2mono", "(", "x", ")", ":", "if", "isinstance", "(", "x", ",", "int", ")", ":", "return", "-", "1", "if", "x", ".", "ndim", "==", "1", ":", "return", "x", "elif", "x", ".", "ndim", "==", "2", ":", "if", "x", ".", "shape", "[", "1", "]", "==", "1", ":", "return", "x", ".", "flatten", "(", ")", "else", ":", "if", "x", ".", "shape", "[", "1", "]", "==", "2", ":", "return", "(", "(", "x", "[", ":", ",", "1", "]", "/", "2", ")", "+", "(", "x", "[", ":", ",", "0", "]", "/", "2", ")", ")", "else", ":", "return", "-", "1" ]
This function converts the input signal (stored in a numpy array) to MONO (if it is STEREO)
[ "This", "function", "converts", "the", "input", "signal", "(", "stored", "in", "a", "numpy", "array", ")", "to", "MONO", "(", "if", "it", "is", "STEREO", ")" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioBasicIO.py#L114-L130
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
selfSimilarityMatrix
def selfSimilarityMatrix(featureVectors): ''' This function computes the self-similarity matrix for a sequence of feature vectors. ARGUMENTS: - featureVectors: a numpy matrix (nDims x nVectors) whose i-th column corresponds to the i-th feature vector RETURNS: - S: the self-similarity matrix (nVectors x nVectors) ''' [nDims, nVectors] = featureVectors.shape [featureVectors2, MEAN, STD] = aT.normalizeFeatures([featureVectors.T]) featureVectors2 = featureVectors2[0].T S = 1.0 - distance.squareform(distance.pdist(featureVectors2.T, 'cosine')) return S
python
def selfSimilarityMatrix(featureVectors): ''' This function computes the self-similarity matrix for a sequence of feature vectors. ARGUMENTS: - featureVectors: a numpy matrix (nDims x nVectors) whose i-th column corresponds to the i-th feature vector RETURNS: - S: the self-similarity matrix (nVectors x nVectors) ''' [nDims, nVectors] = featureVectors.shape [featureVectors2, MEAN, STD] = aT.normalizeFeatures([featureVectors.T]) featureVectors2 = featureVectors2[0].T S = 1.0 - distance.squareform(distance.pdist(featureVectors2.T, 'cosine')) return S
[ "def", "selfSimilarityMatrix", "(", "featureVectors", ")", ":", "[", "nDims", ",", "nVectors", "]", "=", "featureVectors", ".", "shape", "[", "featureVectors2", ",", "MEAN", ",", "STD", "]", "=", "aT", ".", "normalizeFeatures", "(", "[", "featureVectors", ".", "T", "]", ")", "featureVectors2", "=", "featureVectors2", "[", "0", "]", ".", "T", "S", "=", "1.0", "-", "distance", ".", "squareform", "(", "distance", ".", "pdist", "(", "featureVectors2", ".", "T", ",", "'cosine'", ")", ")", "return", "S" ]
This function computes the self-similarity matrix for a sequence of feature vectors. ARGUMENTS: - featureVectors: a numpy matrix (nDims x nVectors) whose i-th column corresponds to the i-th feature vector RETURNS: - S: the self-similarity matrix (nVectors x nVectors)
[ "This", "function", "computes", "the", "self", "-", "similarity", "matrix", "for", "a", "sequence", "of", "feature", "vectors", ".", "ARGUMENTS", ":", "-", "featureVectors", ":", "a", "numpy", "matrix", "(", "nDims", "x", "nVectors", ")", "whose", "i", "-", "th", "column", "corresponds", "to", "the", "i", "-", "th", "feature", "vector" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L38-L54
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
flags2segs
def flags2segs(flags, window): ''' ARGUMENTS: - flags: a sequence of class flags (per time window) - window: window duration (in seconds) RETURNS: - segs: a sequence of segment's limits: segs[i,0] is start and segs[i,1] are start and end point of segment i - classes: a sequence of class flags: class[i] is the class ID of the i-th segment ''' preFlag = 0 cur_flag = 0 n_segs = 0 cur_val = flags[cur_flag] segsList = [] classes = [] while (cur_flag < len(flags) - 1): stop = 0 preFlag = cur_flag preVal = cur_val while (stop == 0): cur_flag = cur_flag + 1 tempVal = flags[cur_flag] if ((tempVal != cur_val) | (cur_flag == len(flags) - 1)): # stop n_segs = n_segs + 1 stop = 1 cur_seg = cur_val cur_val = flags[cur_flag] segsList.append((cur_flag * window)) classes.append(preVal) segs = numpy.zeros((len(segsList), 2)) for i in range(len(segsList)): if i > 0: segs[i, 0] = segsList[i-1] segs[i, 1] = segsList[i] return (segs, classes)
python
def flags2segs(flags, window): ''' ARGUMENTS: - flags: a sequence of class flags (per time window) - window: window duration (in seconds) RETURNS: - segs: a sequence of segment's limits: segs[i,0] is start and segs[i,1] are start and end point of segment i - classes: a sequence of class flags: class[i] is the class ID of the i-th segment ''' preFlag = 0 cur_flag = 0 n_segs = 0 cur_val = flags[cur_flag] segsList = [] classes = [] while (cur_flag < len(flags) - 1): stop = 0 preFlag = cur_flag preVal = cur_val while (stop == 0): cur_flag = cur_flag + 1 tempVal = flags[cur_flag] if ((tempVal != cur_val) | (cur_flag == len(flags) - 1)): # stop n_segs = n_segs + 1 stop = 1 cur_seg = cur_val cur_val = flags[cur_flag] segsList.append((cur_flag * window)) classes.append(preVal) segs = numpy.zeros((len(segsList), 2)) for i in range(len(segsList)): if i > 0: segs[i, 0] = segsList[i-1] segs[i, 1] = segsList[i] return (segs, classes)
[ "def", "flags2segs", "(", "flags", ",", "window", ")", ":", "preFlag", "=", "0", "cur_flag", "=", "0", "n_segs", "=", "0", "cur_val", "=", "flags", "[", "cur_flag", "]", "segsList", "=", "[", "]", "classes", "=", "[", "]", "while", "(", "cur_flag", "<", "len", "(", "flags", ")", "-", "1", ")", ":", "stop", "=", "0", "preFlag", "=", "cur_flag", "preVal", "=", "cur_val", "while", "(", "stop", "==", "0", ")", ":", "cur_flag", "=", "cur_flag", "+", "1", "tempVal", "=", "flags", "[", "cur_flag", "]", "if", "(", "(", "tempVal", "!=", "cur_val", ")", "|", "(", "cur_flag", "==", "len", "(", "flags", ")", "-", "1", ")", ")", ":", "# stop", "n_segs", "=", "n_segs", "+", "1", "stop", "=", "1", "cur_seg", "=", "cur_val", "cur_val", "=", "flags", "[", "cur_flag", "]", "segsList", ".", "append", "(", "(", "cur_flag", "*", "window", ")", ")", "classes", ".", "append", "(", "preVal", ")", "segs", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "segsList", ")", ",", "2", ")", ")", "for", "i", "in", "range", "(", "len", "(", "segsList", ")", ")", ":", "if", "i", ">", "0", ":", "segs", "[", "i", ",", "0", "]", "=", "segsList", "[", "i", "-", "1", "]", "segs", "[", "i", ",", "1", "]", "=", "segsList", "[", "i", "]", "return", "(", "segs", ",", "classes", ")" ]
ARGUMENTS: - flags: a sequence of class flags (per time window) - window: window duration (in seconds) RETURNS: - segs: a sequence of segment's limits: segs[i,0] is start and segs[i,1] are start and end point of segment i - classes: a sequence of class flags: class[i] is the class ID of the i-th segment
[ "ARGUMENTS", ":", "-", "flags", ":", "a", "sequence", "of", "class", "flags", "(", "per", "time", "window", ")", "-", "window", ":", "window", "duration", "(", "in", "seconds", ")" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L57-L97
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
segs2flags
def segs2flags(seg_start, seg_end, seg_label, win_size): ''' This function converts segment endpoints and respective segment labels to fix-sized class labels. ARGUMENTS: - seg_start: segment start points (in seconds) - seg_end: segment endpoints (in seconds) - seg_label: segment labels - win_size: fix-sized window (in seconds) RETURNS: - flags: numpy array of class indices - class_names: list of classnames (strings) ''' flags = [] class_names = list(set(seg_label)) curPos = win_size / 2.0 while curPos < seg_end[-1]: for i in range(len(seg_start)): if curPos > seg_start[i] and curPos <= seg_end[i]: break flags.append(class_names.index(seg_label[i])) curPos += win_size return numpy.array(flags), class_names
python
def segs2flags(seg_start, seg_end, seg_label, win_size): ''' This function converts segment endpoints and respective segment labels to fix-sized class labels. ARGUMENTS: - seg_start: segment start points (in seconds) - seg_end: segment endpoints (in seconds) - seg_label: segment labels - win_size: fix-sized window (in seconds) RETURNS: - flags: numpy array of class indices - class_names: list of classnames (strings) ''' flags = [] class_names = list(set(seg_label)) curPos = win_size / 2.0 while curPos < seg_end[-1]: for i in range(len(seg_start)): if curPos > seg_start[i] and curPos <= seg_end[i]: break flags.append(class_names.index(seg_label[i])) curPos += win_size return numpy.array(flags), class_names
[ "def", "segs2flags", "(", "seg_start", ",", "seg_end", ",", "seg_label", ",", "win_size", ")", ":", "flags", "=", "[", "]", "class_names", "=", "list", "(", "set", "(", "seg_label", ")", ")", "curPos", "=", "win_size", "/", "2.0", "while", "curPos", "<", "seg_end", "[", "-", "1", "]", ":", "for", "i", "in", "range", "(", "len", "(", "seg_start", ")", ")", ":", "if", "curPos", ">", "seg_start", "[", "i", "]", "and", "curPos", "<=", "seg_end", "[", "i", "]", ":", "break", "flags", ".", "append", "(", "class_names", ".", "index", "(", "seg_label", "[", "i", "]", ")", ")", "curPos", "+=", "win_size", "return", "numpy", ".", "array", "(", "flags", ")", ",", "class_names" ]
This function converts segment endpoints and respective segment labels to fix-sized class labels. ARGUMENTS: - seg_start: segment start points (in seconds) - seg_end: segment endpoints (in seconds) - seg_label: segment labels - win_size: fix-sized window (in seconds) RETURNS: - flags: numpy array of class indices - class_names: list of classnames (strings)
[ "This", "function", "converts", "segment", "endpoints", "and", "respective", "segment", "labels", "to", "fix", "-", "sized", "class", "labels", ".", "ARGUMENTS", ":", "-", "seg_start", ":", "segment", "start", "points", "(", "in", "seconds", ")", "-", "seg_end", ":", "segment", "endpoints", "(", "in", "seconds", ")", "-", "seg_label", ":", "segment", "labels", "-", "win_size", ":", "fix", "-", "sized", "window", "(", "in", "seconds", ")", "RETURNS", ":", "-", "flags", ":", "numpy", "array", "of", "class", "indices", "-", "class_names", ":", "list", "of", "classnames", "(", "strings", ")" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L100-L122
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
computePreRec
def computePreRec(cm, class_names): ''' This function computes the precision, recall and f1 measures, given a confusion matrix ''' n_classes = cm.shape[0] if len(class_names) != n_classes: print("Error in computePreRec! Confusion matrix and class_names " "list must be of the same size!") return precision = [] recall = [] f1 = [] for i, c in enumerate(class_names): precision.append(cm[i,i] / numpy.sum(cm[:,i])) recall.append(cm[i,i] / numpy.sum(cm[i,:])) f1.append( 2 * precision[-1] * recall[-1] / (precision[-1] + recall[-1])) return recall, precision, f1
python
def computePreRec(cm, class_names): ''' This function computes the precision, recall and f1 measures, given a confusion matrix ''' n_classes = cm.shape[0] if len(class_names) != n_classes: print("Error in computePreRec! Confusion matrix and class_names " "list must be of the same size!") return precision = [] recall = [] f1 = [] for i, c in enumerate(class_names): precision.append(cm[i,i] / numpy.sum(cm[:,i])) recall.append(cm[i,i] / numpy.sum(cm[i,:])) f1.append( 2 * precision[-1] * recall[-1] / (precision[-1] + recall[-1])) return recall, precision, f1
[ "def", "computePreRec", "(", "cm", ",", "class_names", ")", ":", "n_classes", "=", "cm", ".", "shape", "[", "0", "]", "if", "len", "(", "class_names", ")", "!=", "n_classes", ":", "print", "(", "\"Error in computePreRec! Confusion matrix and class_names \"", "\"list must be of the same size!\"", ")", "return", "precision", "=", "[", "]", "recall", "=", "[", "]", "f1", "=", "[", "]", "for", "i", ",", "c", "in", "enumerate", "(", "class_names", ")", ":", "precision", ".", "append", "(", "cm", "[", "i", ",", "i", "]", "/", "numpy", ".", "sum", "(", "cm", "[", ":", ",", "i", "]", ")", ")", "recall", ".", "append", "(", "cm", "[", "i", ",", "i", "]", "/", "numpy", ".", "sum", "(", "cm", "[", "i", ",", ":", "]", ")", ")", "f1", ".", "append", "(", "2", "*", "precision", "[", "-", "1", "]", "*", "recall", "[", "-", "1", "]", "/", "(", "precision", "[", "-", "1", "]", "+", "recall", "[", "-", "1", "]", ")", ")", "return", "recall", ",", "precision", ",", "f1" ]
This function computes the precision, recall and f1 measures, given a confusion matrix
[ "This", "function", "computes", "the", "precision", "recall", "and", "f1", "measures", "given", "a", "confusion", "matrix" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L124-L141
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
readSegmentGT
def readSegmentGT(gt_file): ''' This function reads a segmentation ground truth file, following a simple CSV format with the following columns: <segment start>,<segment end>,<class label> ARGUMENTS: - gt_file: the path of the CSV segment file RETURNS: - seg_start: a numpy array of segments' start positions - seg_end: a numpy array of segments' ending positions - seg_label: a list of respective class labels (strings) ''' f = open(gt_file, 'rt') reader = csv.reader(f, delimiter=',') seg_start = [] seg_end = [] seg_label = [] for row in reader: if len(row) == 3: seg_start.append(float(row[0])) seg_end.append(float(row[1])) #if row[2]!="other": # seg_label.append((row[2])) #else: # seg_label.append("silence") seg_label.append((row[2])) return numpy.array(seg_start), numpy.array(seg_end), seg_label
python
def readSegmentGT(gt_file): ''' This function reads a segmentation ground truth file, following a simple CSV format with the following columns: <segment start>,<segment end>,<class label> ARGUMENTS: - gt_file: the path of the CSV segment file RETURNS: - seg_start: a numpy array of segments' start positions - seg_end: a numpy array of segments' ending positions - seg_label: a list of respective class labels (strings) ''' f = open(gt_file, 'rt') reader = csv.reader(f, delimiter=',') seg_start = [] seg_end = [] seg_label = [] for row in reader: if len(row) == 3: seg_start.append(float(row[0])) seg_end.append(float(row[1])) #if row[2]!="other": # seg_label.append((row[2])) #else: # seg_label.append("silence") seg_label.append((row[2])) return numpy.array(seg_start), numpy.array(seg_end), seg_label
[ "def", "readSegmentGT", "(", "gt_file", ")", ":", "f", "=", "open", "(", "gt_file", ",", "'rt'", ")", "reader", "=", "csv", ".", "reader", "(", "f", ",", "delimiter", "=", "','", ")", "seg_start", "=", "[", "]", "seg_end", "=", "[", "]", "seg_label", "=", "[", "]", "for", "row", "in", "reader", ":", "if", "len", "(", "row", ")", "==", "3", ":", "seg_start", ".", "append", "(", "float", "(", "row", "[", "0", "]", ")", ")", "seg_end", ".", "append", "(", "float", "(", "row", "[", "1", "]", ")", ")", "#if row[2]!=\"other\":", "# seg_label.append((row[2]))", "#else:", "# seg_label.append(\"silence\")", "seg_label", ".", "append", "(", "(", "row", "[", "2", "]", ")", ")", "return", "numpy", ".", "array", "(", "seg_start", ")", ",", "numpy", ".", "array", "(", "seg_end", ")", ",", "seg_label" ]
This function reads a segmentation ground truth file, following a simple CSV format with the following columns: <segment start>,<segment end>,<class label> ARGUMENTS: - gt_file: the path of the CSV segment file RETURNS: - seg_start: a numpy array of segments' start positions - seg_end: a numpy array of segments' ending positions - seg_label: a list of respective class labels (strings)
[ "This", "function", "reads", "a", "segmentation", "ground", "truth", "file", "following", "a", "simple", "CSV", "format", "with", "the", "following", "columns", ":", "<segment", "start", ">", "<segment", "end", ">", "<class", "label", ">" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L144-L170
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
plotSegmentationResults
def plotSegmentationResults(flags_ind, flags_ind_gt, class_names, mt_step, ONLY_EVALUATE=False): ''' This function plots statistics on the classification-segmentation results produced either by the fix-sized supervised method or the HMM method. It also computes the overall accuracy achieved by the respective method if ground-truth is available. ''' flags = [class_names[int(f)] for f in flags_ind] (segs, classes) = flags2segs(flags, mt_step) min_len = min(flags_ind.shape[0], flags_ind_gt.shape[0]) if min_len > 0: accuracy = numpy.sum(flags_ind[0:min_len] == flags_ind_gt[0:min_len]) / float(min_len) else: accuracy = -1 if not ONLY_EVALUATE: duration = segs[-1, 1] s_percentages = numpy.zeros((len(class_names), 1)) percentages = numpy.zeros((len(class_names), 1)) av_durations = numpy.zeros((len(class_names), 1)) for iSeg in range(segs.shape[0]): s_percentages[class_names.index(classes[iSeg])] += \ (segs[iSeg, 1]-segs[iSeg, 0]) for i in range(s_percentages.shape[0]): percentages[i] = 100.0 * s_percentages[i] / duration S = sum(1 for c in classes if c == class_names[i]) if S > 0: av_durations[i] = s_percentages[i] / S else: av_durations[i] = 0.0 for i in range(percentages.shape[0]): print(class_names[i], percentages[i], av_durations[i]) font = {'size': 10} plt.rc('font', **font) fig = plt.figure() ax1 = fig.add_subplot(211) ax1.set_yticks(numpy.array(range(len(class_names)))) ax1.axis((0, duration, -1, len(class_names))) ax1.set_yticklabels(class_names) ax1.plot(numpy.array(range(len(flags_ind))) * mt_step + mt_step / 2.0, flags_ind) if flags_ind_gt.shape[0] > 0: ax1.plot(numpy.array(range(len(flags_ind_gt))) * mt_step + mt_step / 2.0, flags_ind_gt + 0.05, '--r') plt.xlabel("time (seconds)") if accuracy >= 0: plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy)) ax2 = fig.add_subplot(223) plt.title("Classes percentage durations") ax2.axis((0, len(class_names) + 1, 0, 100)) ax2.set_xticks(numpy.array(range(len(class_names) + 1))) ax2.set_xticklabels([" "] + class_names) ax2.bar(numpy.array(range(len(class_names))) + 0.5, percentages) ax3 = fig.add_subplot(224) plt.title("Segment average duration per class") ax3.axis((0, len(class_names)+1, 0, av_durations.max())) ax3.set_xticks(numpy.array(range(len(class_names) + 1))) ax3.set_xticklabels([" "] + class_names) ax3.bar(numpy.array(range(len(class_names))) + 0.5, av_durations) fig.tight_layout() plt.show() return accuracy
python
def plotSegmentationResults(flags_ind, flags_ind_gt, class_names, mt_step, ONLY_EVALUATE=False): ''' This function plots statistics on the classification-segmentation results produced either by the fix-sized supervised method or the HMM method. It also computes the overall accuracy achieved by the respective method if ground-truth is available. ''' flags = [class_names[int(f)] for f in flags_ind] (segs, classes) = flags2segs(flags, mt_step) min_len = min(flags_ind.shape[0], flags_ind_gt.shape[0]) if min_len > 0: accuracy = numpy.sum(flags_ind[0:min_len] == flags_ind_gt[0:min_len]) / float(min_len) else: accuracy = -1 if not ONLY_EVALUATE: duration = segs[-1, 1] s_percentages = numpy.zeros((len(class_names), 1)) percentages = numpy.zeros((len(class_names), 1)) av_durations = numpy.zeros((len(class_names), 1)) for iSeg in range(segs.shape[0]): s_percentages[class_names.index(classes[iSeg])] += \ (segs[iSeg, 1]-segs[iSeg, 0]) for i in range(s_percentages.shape[0]): percentages[i] = 100.0 * s_percentages[i] / duration S = sum(1 for c in classes if c == class_names[i]) if S > 0: av_durations[i] = s_percentages[i] / S else: av_durations[i] = 0.0 for i in range(percentages.shape[0]): print(class_names[i], percentages[i], av_durations[i]) font = {'size': 10} plt.rc('font', **font) fig = plt.figure() ax1 = fig.add_subplot(211) ax1.set_yticks(numpy.array(range(len(class_names)))) ax1.axis((0, duration, -1, len(class_names))) ax1.set_yticklabels(class_names) ax1.plot(numpy.array(range(len(flags_ind))) * mt_step + mt_step / 2.0, flags_ind) if flags_ind_gt.shape[0] > 0: ax1.plot(numpy.array(range(len(flags_ind_gt))) * mt_step + mt_step / 2.0, flags_ind_gt + 0.05, '--r') plt.xlabel("time (seconds)") if accuracy >= 0: plt.title('Accuracy = {0:.1f}%'.format(100.0 * accuracy)) ax2 = fig.add_subplot(223) plt.title("Classes percentage durations") ax2.axis((0, len(class_names) + 1, 0, 100)) ax2.set_xticks(numpy.array(range(len(class_names) + 1))) ax2.set_xticklabels([" "] + class_names) ax2.bar(numpy.array(range(len(class_names))) + 0.5, percentages) ax3 = fig.add_subplot(224) plt.title("Segment average duration per class") ax3.axis((0, len(class_names)+1, 0, av_durations.max())) ax3.set_xticks(numpy.array(range(len(class_names) + 1))) ax3.set_xticklabels([" "] + class_names) ax3.bar(numpy.array(range(len(class_names))) + 0.5, av_durations) fig.tight_layout() plt.show() return accuracy
[ "def", "plotSegmentationResults", "(", "flags_ind", ",", "flags_ind_gt", ",", "class_names", ",", "mt_step", ",", "ONLY_EVALUATE", "=", "False", ")", ":", "flags", "=", "[", "class_names", "[", "int", "(", "f", ")", "]", "for", "f", "in", "flags_ind", "]", "(", "segs", ",", "classes", ")", "=", "flags2segs", "(", "flags", ",", "mt_step", ")", "min_len", "=", "min", "(", "flags_ind", ".", "shape", "[", "0", "]", ",", "flags_ind_gt", ".", "shape", "[", "0", "]", ")", "if", "min_len", ">", "0", ":", "accuracy", "=", "numpy", ".", "sum", "(", "flags_ind", "[", "0", ":", "min_len", "]", "==", "flags_ind_gt", "[", "0", ":", "min_len", "]", ")", "/", "float", "(", "min_len", ")", "else", ":", "accuracy", "=", "-", "1", "if", "not", "ONLY_EVALUATE", ":", "duration", "=", "segs", "[", "-", "1", ",", "1", "]", "s_percentages", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "class_names", ")", ",", "1", ")", ")", "percentages", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "class_names", ")", ",", "1", ")", ")", "av_durations", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "class_names", ")", ",", "1", ")", ")", "for", "iSeg", "in", "range", "(", "segs", ".", "shape", "[", "0", "]", ")", ":", "s_percentages", "[", "class_names", ".", "index", "(", "classes", "[", "iSeg", "]", ")", "]", "+=", "(", "segs", "[", "iSeg", ",", "1", "]", "-", "segs", "[", "iSeg", ",", "0", "]", ")", "for", "i", "in", "range", "(", "s_percentages", ".", "shape", "[", "0", "]", ")", ":", "percentages", "[", "i", "]", "=", "100.0", "*", "s_percentages", "[", "i", "]", "/", "duration", "S", "=", "sum", "(", "1", "for", "c", "in", "classes", "if", "c", "==", "class_names", "[", "i", "]", ")", "if", "S", ">", "0", ":", "av_durations", "[", "i", "]", "=", "s_percentages", "[", "i", "]", "/", "S", "else", ":", "av_durations", "[", "i", "]", "=", "0.0", "for", "i", "in", "range", "(", "percentages", ".", "shape", "[", "0", "]", ")", ":", "print", "(", "class_names", "[", "i", "]", ",", "percentages", "[", "i", "]", ",", "av_durations", "[", "i", "]", ")", "font", "=", "{", "'size'", ":", "10", "}", "plt", ".", "rc", "(", "'font'", ",", "*", "*", "font", ")", "fig", "=", "plt", ".", "figure", "(", ")", "ax1", "=", "fig", ".", "add_subplot", "(", "211", ")", "ax1", ".", "set_yticks", "(", "numpy", ".", "array", "(", "range", "(", "len", "(", "class_names", ")", ")", ")", ")", "ax1", ".", "axis", "(", "(", "0", ",", "duration", ",", "-", "1", ",", "len", "(", "class_names", ")", ")", ")", "ax1", ".", "set_yticklabels", "(", "class_names", ")", "ax1", ".", "plot", "(", "numpy", ".", "array", "(", "range", "(", "len", "(", "flags_ind", ")", ")", ")", "*", "mt_step", "+", "mt_step", "/", "2.0", ",", "flags_ind", ")", "if", "flags_ind_gt", ".", "shape", "[", "0", "]", ">", "0", ":", "ax1", ".", "plot", "(", "numpy", ".", "array", "(", "range", "(", "len", "(", "flags_ind_gt", ")", ")", ")", "*", "mt_step", "+", "mt_step", "/", "2.0", ",", "flags_ind_gt", "+", "0.05", ",", "'--r'", ")", "plt", ".", "xlabel", "(", "\"time (seconds)\"", ")", "if", "accuracy", ">=", "0", ":", "plt", ".", "title", "(", "'Accuracy = {0:.1f}%'", ".", "format", "(", "100.0", "*", "accuracy", ")", ")", "ax2", "=", "fig", ".", "add_subplot", "(", "223", ")", "plt", ".", "title", "(", "\"Classes percentage durations\"", ")", "ax2", ".", "axis", "(", "(", "0", ",", "len", "(", "class_names", ")", "+", "1", ",", "0", ",", "100", ")", ")", "ax2", ".", "set_xticks", "(", "numpy", ".", "array", "(", "range", "(", "len", "(", "class_names", ")", "+", "1", ")", ")", ")", "ax2", ".", "set_xticklabels", "(", "[", "\" \"", "]", "+", "class_names", ")", "ax2", ".", "bar", "(", "numpy", ".", "array", "(", "range", "(", "len", "(", "class_names", ")", ")", ")", "+", "0.5", ",", "percentages", ")", "ax3", "=", "fig", ".", "add_subplot", "(", "224", ")", "plt", ".", "title", "(", "\"Segment average duration per class\"", ")", "ax3", ".", "axis", "(", "(", "0", ",", "len", "(", "class_names", ")", "+", "1", ",", "0", ",", "av_durations", ".", "max", "(", ")", ")", ")", "ax3", ".", "set_xticks", "(", "numpy", ".", "array", "(", "range", "(", "len", "(", "class_names", ")", "+", "1", ")", ")", ")", "ax3", ".", "set_xticklabels", "(", "[", "\" \"", "]", "+", "class_names", ")", "ax3", ".", "bar", "(", "numpy", ".", "array", "(", "range", "(", "len", "(", "class_names", ")", ")", ")", "+", "0.5", ",", "av_durations", ")", "fig", ".", "tight_layout", "(", ")", "plt", ".", "show", "(", ")", "return", "accuracy" ]
This function plots statistics on the classification-segmentation results produced either by the fix-sized supervised method or the HMM method. It also computes the overall accuracy achieved by the respective method if ground-truth is available.
[ "This", "function", "plots", "statistics", "on", "the", "classification", "-", "segmentation", "results", "produced", "either", "by", "the", "fix", "-", "sized", "supervised", "method", "or", "the", "HMM", "method", ".", "It", "also", "computes", "the", "overall", "accuracy", "achieved", "by", "the", "respective", "method", "if", "ground", "-", "truth", "is", "available", "." ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L173-L240
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
trainHMM_computeStatistics
def trainHMM_computeStatistics(features, labels): ''' This function computes the statistics used to train an HMM joint segmentation-classification model using a sequence of sequential features and respective labels ARGUMENTS: - features: a numpy matrix of feature vectors (numOfDimensions x n_wins) - labels: a numpy array of class indices (n_wins x 1) RETURNS: - start_prob: matrix of prior class probabilities (n_classes x 1) - transmat: transition matrix (n_classes x n_classes) - means: means matrix (numOfDimensions x 1) - cov: deviation matrix (numOfDimensions x 1) ''' u_labels = numpy.unique(labels) n_comps = len(u_labels) n_feats = features.shape[0] if features.shape[1] < labels.shape[0]: print("trainHMM warning: number of short-term feature vectors " "must be greater or equal to the labels length!") labels = labels[0:features.shape[1]] # compute prior probabilities: start_prob = numpy.zeros((n_comps,)) for i, u in enumerate(u_labels): start_prob[i] = numpy.count_nonzero(labels == u) # normalize prior probabilities start_prob = start_prob / start_prob.sum() # compute transition matrix: transmat = numpy.zeros((n_comps, n_comps)) for i in range(labels.shape[0]-1): transmat[int(labels[i]), int(labels[i + 1])] += 1 # normalize rows of transition matrix: for i in range(n_comps): transmat[i, :] /= transmat[i, :].sum() means = numpy.zeros((n_comps, n_feats)) for i in range(n_comps): means[i, :] = numpy.matrix(features[:, numpy.nonzero(labels == u_labels[i])[0]].mean(axis=1)) cov = numpy.zeros((n_comps, n_feats)) for i in range(n_comps): #cov[i,:,:] = numpy.cov(features[:,numpy.nonzero(labels==u_labels[i])[0]]) # use this lines if HMM using full gaussian distributions are to be used! cov[i, :] = numpy.std(features[:, numpy.nonzero(labels == u_labels[i])[0]], axis=1) return start_prob, transmat, means, cov
python
def trainHMM_computeStatistics(features, labels): ''' This function computes the statistics used to train an HMM joint segmentation-classification model using a sequence of sequential features and respective labels ARGUMENTS: - features: a numpy matrix of feature vectors (numOfDimensions x n_wins) - labels: a numpy array of class indices (n_wins x 1) RETURNS: - start_prob: matrix of prior class probabilities (n_classes x 1) - transmat: transition matrix (n_classes x n_classes) - means: means matrix (numOfDimensions x 1) - cov: deviation matrix (numOfDimensions x 1) ''' u_labels = numpy.unique(labels) n_comps = len(u_labels) n_feats = features.shape[0] if features.shape[1] < labels.shape[0]: print("trainHMM warning: number of short-term feature vectors " "must be greater or equal to the labels length!") labels = labels[0:features.shape[1]] # compute prior probabilities: start_prob = numpy.zeros((n_comps,)) for i, u in enumerate(u_labels): start_prob[i] = numpy.count_nonzero(labels == u) # normalize prior probabilities start_prob = start_prob / start_prob.sum() # compute transition matrix: transmat = numpy.zeros((n_comps, n_comps)) for i in range(labels.shape[0]-1): transmat[int(labels[i]), int(labels[i + 1])] += 1 # normalize rows of transition matrix: for i in range(n_comps): transmat[i, :] /= transmat[i, :].sum() means = numpy.zeros((n_comps, n_feats)) for i in range(n_comps): means[i, :] = numpy.matrix(features[:, numpy.nonzero(labels == u_labels[i])[0]].mean(axis=1)) cov = numpy.zeros((n_comps, n_feats)) for i in range(n_comps): #cov[i,:,:] = numpy.cov(features[:,numpy.nonzero(labels==u_labels[i])[0]]) # use this lines if HMM using full gaussian distributions are to be used! cov[i, :] = numpy.std(features[:, numpy.nonzero(labels == u_labels[i])[0]], axis=1) return start_prob, transmat, means, cov
[ "def", "trainHMM_computeStatistics", "(", "features", ",", "labels", ")", ":", "u_labels", "=", "numpy", ".", "unique", "(", "labels", ")", "n_comps", "=", "len", "(", "u_labels", ")", "n_feats", "=", "features", ".", "shape", "[", "0", "]", "if", "features", ".", "shape", "[", "1", "]", "<", "labels", ".", "shape", "[", "0", "]", ":", "print", "(", "\"trainHMM warning: number of short-term feature vectors \"", "\"must be greater or equal to the labels length!\"", ")", "labels", "=", "labels", "[", "0", ":", "features", ".", "shape", "[", "1", "]", "]", "# compute prior probabilities:", "start_prob", "=", "numpy", ".", "zeros", "(", "(", "n_comps", ",", ")", ")", "for", "i", ",", "u", "in", "enumerate", "(", "u_labels", ")", ":", "start_prob", "[", "i", "]", "=", "numpy", ".", "count_nonzero", "(", "labels", "==", "u", ")", "# normalize prior probabilities", "start_prob", "=", "start_prob", "/", "start_prob", ".", "sum", "(", ")", "# compute transition matrix:", "transmat", "=", "numpy", ".", "zeros", "(", "(", "n_comps", ",", "n_comps", ")", ")", "for", "i", "in", "range", "(", "labels", ".", "shape", "[", "0", "]", "-", "1", ")", ":", "transmat", "[", "int", "(", "labels", "[", "i", "]", ")", ",", "int", "(", "labels", "[", "i", "+", "1", "]", ")", "]", "+=", "1", "# normalize rows of transition matrix:", "for", "i", "in", "range", "(", "n_comps", ")", ":", "transmat", "[", "i", ",", ":", "]", "/=", "transmat", "[", "i", ",", ":", "]", ".", "sum", "(", ")", "means", "=", "numpy", ".", "zeros", "(", "(", "n_comps", ",", "n_feats", ")", ")", "for", "i", "in", "range", "(", "n_comps", ")", ":", "means", "[", "i", ",", ":", "]", "=", "numpy", ".", "matrix", "(", "features", "[", ":", ",", "numpy", ".", "nonzero", "(", "labels", "==", "u_labels", "[", "i", "]", ")", "[", "0", "]", "]", ".", "mean", "(", "axis", "=", "1", ")", ")", "cov", "=", "numpy", ".", "zeros", "(", "(", "n_comps", ",", "n_feats", ")", ")", "for", "i", "in", "range", "(", "n_comps", ")", ":", "#cov[i,:,:] = numpy.cov(features[:,numpy.nonzero(labels==u_labels[i])[0]]) # use this lines if HMM using full gaussian distributions are to be used!", "cov", "[", "i", ",", ":", "]", "=", "numpy", ".", "std", "(", "features", "[", ":", ",", "numpy", ".", "nonzero", "(", "labels", "==", "u_labels", "[", "i", "]", ")", "[", "0", "]", "]", ",", "axis", "=", "1", ")", "return", "start_prob", ",", "transmat", ",", "means", ",", "cov" ]
This function computes the statistics used to train an HMM joint segmentation-classification model using a sequence of sequential features and respective labels ARGUMENTS: - features: a numpy matrix of feature vectors (numOfDimensions x n_wins) - labels: a numpy array of class indices (n_wins x 1) RETURNS: - start_prob: matrix of prior class probabilities (n_classes x 1) - transmat: transition matrix (n_classes x n_classes) - means: means matrix (numOfDimensions x 1) - cov: deviation matrix (numOfDimensions x 1)
[ "This", "function", "computes", "the", "statistics", "used", "to", "train", "an", "HMM", "joint", "segmentation", "-", "classification", "model", "using", "a", "sequence", "of", "sequential", "features", "and", "respective", "labels" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L278-L330
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
trainHMM_fromFile
def trainHMM_fromFile(wav_file, gt_file, hmm_model_name, mt_win, mt_step): ''' This function trains a HMM model for segmentation-classification using a single annotated audio file ARGUMENTS: - wav_file: the path of the audio filename - gt_file: the path of the ground truth filename (a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row - hmm_model_name: the name of the HMM model to be stored - mt_win: mid-term window size - mt_step: mid-term window step RETURNS: - hmm: an object to the resulting HMM - class_names: a list of class_names After training, hmm, class_names, along with the mt_win and mt_step values are stored in the hmm_model_name file ''' [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file) flags, class_names = segs2flags(seg_start, seg_end, seg_labs, mt_step) [fs, x] = audioBasicIO.readAudioFile(wav_file) [F, _, _] = aF.mtFeatureExtraction(x, fs, mt_win * fs, mt_step * fs, round(fs * 0.050), round(fs * 0.050)) start_prob, transmat, means, cov = trainHMM_computeStatistics(F, flags) hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag") hmm.startprob_ = start_prob hmm.transmat_ = transmat hmm.means_ = means hmm.covars_ = cov fo = open(hmm_model_name, "wb") cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(class_names, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_win, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_step, fo, protocol=cPickle.HIGHEST_PROTOCOL) fo.close() return hmm, class_names
python
def trainHMM_fromFile(wav_file, gt_file, hmm_model_name, mt_win, mt_step): ''' This function trains a HMM model for segmentation-classification using a single annotated audio file ARGUMENTS: - wav_file: the path of the audio filename - gt_file: the path of the ground truth filename (a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row - hmm_model_name: the name of the HMM model to be stored - mt_win: mid-term window size - mt_step: mid-term window step RETURNS: - hmm: an object to the resulting HMM - class_names: a list of class_names After training, hmm, class_names, along with the mt_win and mt_step values are stored in the hmm_model_name file ''' [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file) flags, class_names = segs2flags(seg_start, seg_end, seg_labs, mt_step) [fs, x] = audioBasicIO.readAudioFile(wav_file) [F, _, _] = aF.mtFeatureExtraction(x, fs, mt_win * fs, mt_step * fs, round(fs * 0.050), round(fs * 0.050)) start_prob, transmat, means, cov = trainHMM_computeStatistics(F, flags) hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag") hmm.startprob_ = start_prob hmm.transmat_ = transmat hmm.means_ = means hmm.covars_ = cov fo = open(hmm_model_name, "wb") cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(class_names, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_win, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_step, fo, protocol=cPickle.HIGHEST_PROTOCOL) fo.close() return hmm, class_names
[ "def", "trainHMM_fromFile", "(", "wav_file", ",", "gt_file", ",", "hmm_model_name", ",", "mt_win", ",", "mt_step", ")", ":", "[", "seg_start", ",", "seg_end", ",", "seg_labs", "]", "=", "readSegmentGT", "(", "gt_file", ")", "flags", ",", "class_names", "=", "segs2flags", "(", "seg_start", ",", "seg_end", ",", "seg_labs", ",", "mt_step", ")", "[", "fs", ",", "x", "]", "=", "audioBasicIO", ".", "readAudioFile", "(", "wav_file", ")", "[", "F", ",", "_", ",", "_", "]", "=", "aF", ".", "mtFeatureExtraction", "(", "x", ",", "fs", ",", "mt_win", "*", "fs", ",", "mt_step", "*", "fs", ",", "round", "(", "fs", "*", "0.050", ")", ",", "round", "(", "fs", "*", "0.050", ")", ")", "start_prob", ",", "transmat", ",", "means", ",", "cov", "=", "trainHMM_computeStatistics", "(", "F", ",", "flags", ")", "hmm", "=", "hmmlearn", ".", "hmm", ".", "GaussianHMM", "(", "start_prob", ".", "shape", "[", "0", "]", ",", "\"diag\"", ")", "hmm", ".", "startprob_", "=", "start_prob", "hmm", ".", "transmat_", "=", "transmat", "hmm", ".", "means_", "=", "means", "hmm", ".", "covars_", "=", "cov", "fo", "=", "open", "(", "hmm_model_name", ",", "\"wb\"", ")", "cPickle", ".", "dump", "(", "hmm", ",", "fo", ",", "protocol", "=", "cPickle", ".", "HIGHEST_PROTOCOL", ")", "cPickle", ".", "dump", "(", "class_names", ",", "fo", ",", "protocol", "=", "cPickle", ".", "HIGHEST_PROTOCOL", ")", "cPickle", ".", "dump", "(", "mt_win", ",", "fo", ",", "protocol", "=", "cPickle", ".", "HIGHEST_PROTOCOL", ")", "cPickle", ".", "dump", "(", "mt_step", ",", "fo", ",", "protocol", "=", "cPickle", ".", "HIGHEST_PROTOCOL", ")", "fo", ".", "close", "(", ")", "return", "hmm", ",", "class_names" ]
This function trains a HMM model for segmentation-classification using a single annotated audio file ARGUMENTS: - wav_file: the path of the audio filename - gt_file: the path of the ground truth filename (a csv file of the form <segment start in seconds>,<segment end in seconds>,<segment label> in each row - hmm_model_name: the name of the HMM model to be stored - mt_win: mid-term window size - mt_step: mid-term window step RETURNS: - hmm: an object to the resulting HMM - class_names: a list of class_names After training, hmm, class_names, along with the mt_win and mt_step values are stored in the hmm_model_name file
[ "This", "function", "trains", "a", "HMM", "model", "for", "segmentation", "-", "classification", "using", "a", "single", "annotated", "audio", "file", "ARGUMENTS", ":", "-", "wav_file", ":", "the", "path", "of", "the", "audio", "filename", "-", "gt_file", ":", "the", "path", "of", "the", "ground", "truth", "filename", "(", "a", "csv", "file", "of", "the", "form", "<segment", "start", "in", "seconds", ">", "<segment", "end", "in", "seconds", ">", "<segment", "label", ">", "in", "each", "row", "-", "hmm_model_name", ":", "the", "name", "of", "the", "HMM", "model", "to", "be", "stored", "-", "mt_win", ":", "mid", "-", "term", "window", "size", "-", "mt_step", ":", "mid", "-", "term", "window", "step", "RETURNS", ":", "-", "hmm", ":", "an", "object", "to", "the", "resulting", "HMM", "-", "class_names", ":", "a", "list", "of", "class_names" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L333-L370
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
trainHMM_fromDir
def trainHMM_fromDir(dirPath, hmm_model_name, mt_win, mt_step): ''' This function trains a HMM model for segmentation-classification using a where WAV files and .segment (ground-truth files) are stored ARGUMENTS: - dirPath: the path of the data diretory - hmm_model_name: the name of the HMM model to be stored - mt_win: mid-term window size - mt_step: mid-term window step RETURNS: - hmm: an object to the resulting HMM - class_names: a list of class_names After training, hmm, class_names, along with the mt_win and mt_step values are stored in the hmm_model_name file ''' flags_all = numpy.array([]) classes_all = [] for i, f in enumerate(glob.glob(dirPath + os.sep + '*.wav')): # for each WAV file wav_file = f gt_file = f.replace('.wav', '.segments') if not os.path.isfile(gt_file): continue [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file) flags, class_names = segs2flags(seg_start, seg_end, seg_labs, mt_step) for c in class_names: # update class names: if c not in classes_all: classes_all.append(c) [fs, x] = audioBasicIO.readAudioFile(wav_file) [F, _, _] = aF.mtFeatureExtraction(x, fs, mt_win * fs, mt_step * fs, round(fs * 0.050), round(fs * 0.050)) lenF = F.shape[1] lenL = len(flags) min_sm = min(lenF, lenL) F = F[:, 0:min_sm] flags = flags[0:min_sm] flagsNew = [] for j, fl in enumerate(flags): # append features and labels flagsNew.append(classes_all.index(class_names[flags[j]])) flags_all = numpy.append(flags_all, numpy.array(flagsNew)) if i == 0: f_all = F else: f_all = numpy.concatenate((f_all, F), axis=1) start_prob, transmat, means, cov = trainHMM_computeStatistics(f_all, flags_all) # compute HMM statistics hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag") # train HMM hmm.startprob_ = start_prob hmm.transmat_ = transmat hmm.means_ = means hmm.covars_ = cov fo = open(hmm_model_name, "wb") # save HMM model cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(classes_all, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_win, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_step, fo, protocol=cPickle.HIGHEST_PROTOCOL) fo.close() return hmm, classes_all
python
def trainHMM_fromDir(dirPath, hmm_model_name, mt_win, mt_step): ''' This function trains a HMM model for segmentation-classification using a where WAV files and .segment (ground-truth files) are stored ARGUMENTS: - dirPath: the path of the data diretory - hmm_model_name: the name of the HMM model to be stored - mt_win: mid-term window size - mt_step: mid-term window step RETURNS: - hmm: an object to the resulting HMM - class_names: a list of class_names After training, hmm, class_names, along with the mt_win and mt_step values are stored in the hmm_model_name file ''' flags_all = numpy.array([]) classes_all = [] for i, f in enumerate(glob.glob(dirPath + os.sep + '*.wav')): # for each WAV file wav_file = f gt_file = f.replace('.wav', '.segments') if not os.path.isfile(gt_file): continue [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file) flags, class_names = segs2flags(seg_start, seg_end, seg_labs, mt_step) for c in class_names: # update class names: if c not in classes_all: classes_all.append(c) [fs, x] = audioBasicIO.readAudioFile(wav_file) [F, _, _] = aF.mtFeatureExtraction(x, fs, mt_win * fs, mt_step * fs, round(fs * 0.050), round(fs * 0.050)) lenF = F.shape[1] lenL = len(flags) min_sm = min(lenF, lenL) F = F[:, 0:min_sm] flags = flags[0:min_sm] flagsNew = [] for j, fl in enumerate(flags): # append features and labels flagsNew.append(classes_all.index(class_names[flags[j]])) flags_all = numpy.append(flags_all, numpy.array(flagsNew)) if i == 0: f_all = F else: f_all = numpy.concatenate((f_all, F), axis=1) start_prob, transmat, means, cov = trainHMM_computeStatistics(f_all, flags_all) # compute HMM statistics hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag") # train HMM hmm.startprob_ = start_prob hmm.transmat_ = transmat hmm.means_ = means hmm.covars_ = cov fo = open(hmm_model_name, "wb") # save HMM model cPickle.dump(hmm, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(classes_all, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_win, fo, protocol=cPickle.HIGHEST_PROTOCOL) cPickle.dump(mt_step, fo, protocol=cPickle.HIGHEST_PROTOCOL) fo.close() return hmm, classes_all
[ "def", "trainHMM_fromDir", "(", "dirPath", ",", "hmm_model_name", ",", "mt_win", ",", "mt_step", ")", ":", "flags_all", "=", "numpy", ".", "array", "(", "[", "]", ")", "classes_all", "=", "[", "]", "for", "i", ",", "f", "in", "enumerate", "(", "glob", ".", "glob", "(", "dirPath", "+", "os", ".", "sep", "+", "'*.wav'", ")", ")", ":", "# for each WAV file", "wav_file", "=", "f", "gt_file", "=", "f", ".", "replace", "(", "'.wav'", ",", "'.segments'", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "gt_file", ")", ":", "continue", "[", "seg_start", ",", "seg_end", ",", "seg_labs", "]", "=", "readSegmentGT", "(", "gt_file", ")", "flags", ",", "class_names", "=", "segs2flags", "(", "seg_start", ",", "seg_end", ",", "seg_labs", ",", "mt_step", ")", "for", "c", "in", "class_names", ":", "# update class names:", "if", "c", "not", "in", "classes_all", ":", "classes_all", ".", "append", "(", "c", ")", "[", "fs", ",", "x", "]", "=", "audioBasicIO", ".", "readAudioFile", "(", "wav_file", ")", "[", "F", ",", "_", ",", "_", "]", "=", "aF", ".", "mtFeatureExtraction", "(", "x", ",", "fs", ",", "mt_win", "*", "fs", ",", "mt_step", "*", "fs", ",", "round", "(", "fs", "*", "0.050", ")", ",", "round", "(", "fs", "*", "0.050", ")", ")", "lenF", "=", "F", ".", "shape", "[", "1", "]", "lenL", "=", "len", "(", "flags", ")", "min_sm", "=", "min", "(", "lenF", ",", "lenL", ")", "F", "=", "F", "[", ":", ",", "0", ":", "min_sm", "]", "flags", "=", "flags", "[", "0", ":", "min_sm", "]", "flagsNew", "=", "[", "]", "for", "j", ",", "fl", "in", "enumerate", "(", "flags", ")", ":", "# append features and labels", "flagsNew", ".", "append", "(", "classes_all", ".", "index", "(", "class_names", "[", "flags", "[", "j", "]", "]", ")", ")", "flags_all", "=", "numpy", ".", "append", "(", "flags_all", ",", "numpy", ".", "array", "(", "flagsNew", ")", ")", "if", "i", "==", "0", ":", "f_all", "=", "F", "else", ":", "f_all", "=", "numpy", ".", "concatenate", "(", "(", "f_all", ",", "F", ")", ",", "axis", "=", "1", ")", "start_prob", ",", "transmat", ",", "means", ",", "cov", "=", "trainHMM_computeStatistics", "(", "f_all", ",", "flags_all", ")", "# compute HMM statistics", "hmm", "=", "hmmlearn", ".", "hmm", ".", "GaussianHMM", "(", "start_prob", ".", "shape", "[", "0", "]", ",", "\"diag\"", ")", "# train HMM", "hmm", ".", "startprob_", "=", "start_prob", "hmm", ".", "transmat_", "=", "transmat", "hmm", ".", "means_", "=", "means", "hmm", ".", "covars_", "=", "cov", "fo", "=", "open", "(", "hmm_model_name", ",", "\"wb\"", ")", "# save HMM model", "cPickle", ".", "dump", "(", "hmm", ",", "fo", ",", "protocol", "=", "cPickle", ".", "HIGHEST_PROTOCOL", ")", "cPickle", ".", "dump", "(", "classes_all", ",", "fo", ",", "protocol", "=", "cPickle", ".", "HIGHEST_PROTOCOL", ")", "cPickle", ".", "dump", "(", "mt_win", ",", "fo", ",", "protocol", "=", "cPickle", ".", "HIGHEST_PROTOCOL", ")", "cPickle", ".", "dump", "(", "mt_step", ",", "fo", ",", "protocol", "=", "cPickle", ".", "HIGHEST_PROTOCOL", ")", "fo", ".", "close", "(", ")", "return", "hmm", ",", "classes_all" ]
This function trains a HMM model for segmentation-classification using a where WAV files and .segment (ground-truth files) are stored ARGUMENTS: - dirPath: the path of the data diretory - hmm_model_name: the name of the HMM model to be stored - mt_win: mid-term window size - mt_step: mid-term window step RETURNS: - hmm: an object to the resulting HMM - class_names: a list of class_names After training, hmm, class_names, along with the mt_win and mt_step values are stored in the hmm_model_name file
[ "This", "function", "trains", "a", "HMM", "model", "for", "segmentation", "-", "classification", "using", "a", "where", "WAV", "files", "and", ".", "segment", "(", "ground", "-", "truth", "files", ")", "are", "stored", "ARGUMENTS", ":", "-", "dirPath", ":", "the", "path", "of", "the", "data", "diretory", "-", "hmm_model_name", ":", "the", "name", "of", "the", "HMM", "model", "to", "be", "stored", "-", "mt_win", ":", "mid", "-", "term", "window", "size", "-", "mt_step", ":", "mid", "-", "term", "window", "step", "RETURNS", ":", "-", "hmm", ":", "an", "object", "to", "the", "resulting", "HMM", "-", "class_names", ":", "a", "list", "of", "class_names" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L373-L439
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
mtFileClassification
def mtFileClassification(input_file, model_name, model_type, plot_results=False, gt_file=""): ''' This function performs mid-term classification of an audio stream. Towards this end, supervised knowledge is used, i.e. a pre-trained classifier. ARGUMENTS: - input_file: path of the input WAV file - model_name: name of the classification model - model_type: svm or knn depending on the classifier type - plot_results: True if results are to be plotted using matplotlib along with a set of statistics RETURNS: - segs: a sequence of segment's endpoints: segs[i] is the endpoint of the i-th segment (in seconds) - classes: a sequence of class flags: class[i] is the class ID of the i-th segment ''' if not os.path.isfile(model_name): print("mtFileClassificationError: input model_type not found!") return (-1, -1, -1, -1) # Load classifier: if model_type == "knn": [classifier, MEAN, STD, class_names, mt_win, mt_step, st_win, st_step, compute_beat] = \ aT.load_model_knn(model_name) else: [classifier, MEAN, STD, class_names, mt_win, mt_step, st_win, st_step, compute_beat] = aT.load_model(model_name) if compute_beat: print("Model " + model_name + " contains long-term music features " "(beat etc) and cannot be used in " "segmentation") return (-1, -1, -1, -1) [fs, x] = audioBasicIO.readAudioFile(input_file) # load input file if fs == -1: # could not read file return (-1, -1, -1, -1) x = audioBasicIO.stereo2mono(x) # convert stereo (if) to mono duration = len(x) / fs # mid-term feature extraction: [mt_feats, _, _] = aF.mtFeatureExtraction(x, fs, mt_win * fs, mt_step * fs, round(fs * st_win), round(fs * st_step)) flags = [] Ps = [] flags_ind = [] for i in range(mt_feats.shape[1]): # for each feature vector (i.e. for each fix-sized segment): cur_fv = (mt_feats[:, i] - MEAN) / STD # normalize current feature vector [res, P] = aT.classifierWrapper(classifier, model_type, cur_fv) # classify vector flags_ind.append(res) flags.append(class_names[int(res)]) # update class label matrix Ps.append(numpy.max(P)) # update probability matrix flags_ind = numpy.array(flags_ind) # 1-window smoothing for i in range(1, len(flags_ind) - 1): if flags_ind[i-1] == flags_ind[i + 1]: flags_ind[i] = flags_ind[i + 1] # convert fix-sized flags to segments and classes (segs, classes) = flags2segs(flags, mt_step) segs[-1] = len(x) / float(fs) # Load grount-truth: if os.path.isfile(gt_file): [seg_start_gt, seg_end_gt, seg_l_gt] = readSegmentGT(gt_file) flags_gt, class_names_gt = segs2flags(seg_start_gt, seg_end_gt, seg_l_gt, mt_step) flags_ind_gt = [] for j, fl in enumerate(flags_gt): # "align" labels with GT if class_names_gt[flags_gt[j]] in class_names: flags_ind_gt.append(class_names.index(class_names_gt[flags_gt[j]])) else: flags_ind_gt.append(-1) flags_ind_gt = numpy.array(flags_ind_gt) cm = numpy.zeros((len(class_names_gt), len(class_names_gt))) for i in range(min(flags_ind.shape[0], flags_ind_gt.shape[0])): cm[int(flags_ind_gt[i]),int(flags_ind[i])] += 1 else: cm = [] flags_ind_gt = numpy.array([]) acc = plotSegmentationResults(flags_ind, flags_ind_gt, class_names, mt_step, not plot_results) if acc >= 0: print("Overall Accuracy: {0:.3f}".format(acc) ) return (flags_ind, class_names_gt, acc, cm) else: return (flags_ind, class_names, acc, cm)
python
def mtFileClassification(input_file, model_name, model_type, plot_results=False, gt_file=""): ''' This function performs mid-term classification of an audio stream. Towards this end, supervised knowledge is used, i.e. a pre-trained classifier. ARGUMENTS: - input_file: path of the input WAV file - model_name: name of the classification model - model_type: svm or knn depending on the classifier type - plot_results: True if results are to be plotted using matplotlib along with a set of statistics RETURNS: - segs: a sequence of segment's endpoints: segs[i] is the endpoint of the i-th segment (in seconds) - classes: a sequence of class flags: class[i] is the class ID of the i-th segment ''' if not os.path.isfile(model_name): print("mtFileClassificationError: input model_type not found!") return (-1, -1, -1, -1) # Load classifier: if model_type == "knn": [classifier, MEAN, STD, class_names, mt_win, mt_step, st_win, st_step, compute_beat] = \ aT.load_model_knn(model_name) else: [classifier, MEAN, STD, class_names, mt_win, mt_step, st_win, st_step, compute_beat] = aT.load_model(model_name) if compute_beat: print("Model " + model_name + " contains long-term music features " "(beat etc) and cannot be used in " "segmentation") return (-1, -1, -1, -1) [fs, x] = audioBasicIO.readAudioFile(input_file) # load input file if fs == -1: # could not read file return (-1, -1, -1, -1) x = audioBasicIO.stereo2mono(x) # convert stereo (if) to mono duration = len(x) / fs # mid-term feature extraction: [mt_feats, _, _] = aF.mtFeatureExtraction(x, fs, mt_win * fs, mt_step * fs, round(fs * st_win), round(fs * st_step)) flags = [] Ps = [] flags_ind = [] for i in range(mt_feats.shape[1]): # for each feature vector (i.e. for each fix-sized segment): cur_fv = (mt_feats[:, i] - MEAN) / STD # normalize current feature vector [res, P] = aT.classifierWrapper(classifier, model_type, cur_fv) # classify vector flags_ind.append(res) flags.append(class_names[int(res)]) # update class label matrix Ps.append(numpy.max(P)) # update probability matrix flags_ind = numpy.array(flags_ind) # 1-window smoothing for i in range(1, len(flags_ind) - 1): if flags_ind[i-1] == flags_ind[i + 1]: flags_ind[i] = flags_ind[i + 1] # convert fix-sized flags to segments and classes (segs, classes) = flags2segs(flags, mt_step) segs[-1] = len(x) / float(fs) # Load grount-truth: if os.path.isfile(gt_file): [seg_start_gt, seg_end_gt, seg_l_gt] = readSegmentGT(gt_file) flags_gt, class_names_gt = segs2flags(seg_start_gt, seg_end_gt, seg_l_gt, mt_step) flags_ind_gt = [] for j, fl in enumerate(flags_gt): # "align" labels with GT if class_names_gt[flags_gt[j]] in class_names: flags_ind_gt.append(class_names.index(class_names_gt[flags_gt[j]])) else: flags_ind_gt.append(-1) flags_ind_gt = numpy.array(flags_ind_gt) cm = numpy.zeros((len(class_names_gt), len(class_names_gt))) for i in range(min(flags_ind.shape[0], flags_ind_gt.shape[0])): cm[int(flags_ind_gt[i]),int(flags_ind[i])] += 1 else: cm = [] flags_ind_gt = numpy.array([]) acc = plotSegmentationResults(flags_ind, flags_ind_gt, class_names, mt_step, not plot_results) if acc >= 0: print("Overall Accuracy: {0:.3f}".format(acc) ) return (flags_ind, class_names_gt, acc, cm) else: return (flags_ind, class_names, acc, cm)
[ "def", "mtFileClassification", "(", "input_file", ",", "model_name", ",", "model_type", ",", "plot_results", "=", "False", ",", "gt_file", "=", "\"\"", ")", ":", "if", "not", "os", ".", "path", ".", "isfile", "(", "model_name", ")", ":", "print", "(", "\"mtFileClassificationError: input model_type not found!\"", ")", "return", "(", "-", "1", ",", "-", "1", ",", "-", "1", ",", "-", "1", ")", "# Load classifier:", "if", "model_type", "==", "\"knn\"", ":", "[", "classifier", ",", "MEAN", ",", "STD", ",", "class_names", ",", "mt_win", ",", "mt_step", ",", "st_win", ",", "st_step", ",", "compute_beat", "]", "=", "aT", ".", "load_model_knn", "(", "model_name", ")", "else", ":", "[", "classifier", ",", "MEAN", ",", "STD", ",", "class_names", ",", "mt_win", ",", "mt_step", ",", "st_win", ",", "st_step", ",", "compute_beat", "]", "=", "aT", ".", "load_model", "(", "model_name", ")", "if", "compute_beat", ":", "print", "(", "\"Model \"", "+", "model_name", "+", "\" contains long-term music features \"", "\"(beat etc) and cannot be used in \"", "\"segmentation\"", ")", "return", "(", "-", "1", ",", "-", "1", ",", "-", "1", ",", "-", "1", ")", "[", "fs", ",", "x", "]", "=", "audioBasicIO", ".", "readAudioFile", "(", "input_file", ")", "# load input file", "if", "fs", "==", "-", "1", ":", "# could not read file", "return", "(", "-", "1", ",", "-", "1", ",", "-", "1", ",", "-", "1", ")", "x", "=", "audioBasicIO", ".", "stereo2mono", "(", "x", ")", "# convert stereo (if) to mono", "duration", "=", "len", "(", "x", ")", "/", "fs", "# mid-term feature extraction:", "[", "mt_feats", ",", "_", ",", "_", "]", "=", "aF", ".", "mtFeatureExtraction", "(", "x", ",", "fs", ",", "mt_win", "*", "fs", ",", "mt_step", "*", "fs", ",", "round", "(", "fs", "*", "st_win", ")", ",", "round", "(", "fs", "*", "st_step", ")", ")", "flags", "=", "[", "]", "Ps", "=", "[", "]", "flags_ind", "=", "[", "]", "for", "i", "in", "range", "(", "mt_feats", ".", "shape", "[", "1", "]", ")", ":", "# for each feature vector (i.e. for each fix-sized segment):", "cur_fv", "=", "(", "mt_feats", "[", ":", ",", "i", "]", "-", "MEAN", ")", "/", "STD", "# normalize current feature vector", "[", "res", ",", "P", "]", "=", "aT", ".", "classifierWrapper", "(", "classifier", ",", "model_type", ",", "cur_fv", ")", "# classify vector", "flags_ind", ".", "append", "(", "res", ")", "flags", ".", "append", "(", "class_names", "[", "int", "(", "res", ")", "]", ")", "# update class label matrix", "Ps", ".", "append", "(", "numpy", ".", "max", "(", "P", ")", ")", "# update probability matrix", "flags_ind", "=", "numpy", ".", "array", "(", "flags_ind", ")", "# 1-window smoothing", "for", "i", "in", "range", "(", "1", ",", "len", "(", "flags_ind", ")", "-", "1", ")", ":", "if", "flags_ind", "[", "i", "-", "1", "]", "==", "flags_ind", "[", "i", "+", "1", "]", ":", "flags_ind", "[", "i", "]", "=", "flags_ind", "[", "i", "+", "1", "]", "# convert fix-sized flags to segments and classes", "(", "segs", ",", "classes", ")", "=", "flags2segs", "(", "flags", ",", "mt_step", ")", "segs", "[", "-", "1", "]", "=", "len", "(", "x", ")", "/", "float", "(", "fs", ")", "# Load grount-truth: ", "if", "os", ".", "path", ".", "isfile", "(", "gt_file", ")", ":", "[", "seg_start_gt", ",", "seg_end_gt", ",", "seg_l_gt", "]", "=", "readSegmentGT", "(", "gt_file", ")", "flags_gt", ",", "class_names_gt", "=", "segs2flags", "(", "seg_start_gt", ",", "seg_end_gt", ",", "seg_l_gt", ",", "mt_step", ")", "flags_ind_gt", "=", "[", "]", "for", "j", ",", "fl", "in", "enumerate", "(", "flags_gt", ")", ":", "# \"align\" labels with GT", "if", "class_names_gt", "[", "flags_gt", "[", "j", "]", "]", "in", "class_names", ":", "flags_ind_gt", ".", "append", "(", "class_names", ".", "index", "(", "class_names_gt", "[", "flags_gt", "[", "j", "]", "]", ")", ")", "else", ":", "flags_ind_gt", ".", "append", "(", "-", "1", ")", "flags_ind_gt", "=", "numpy", ".", "array", "(", "flags_ind_gt", ")", "cm", "=", "numpy", ".", "zeros", "(", "(", "len", "(", "class_names_gt", ")", ",", "len", "(", "class_names_gt", ")", ")", ")", "for", "i", "in", "range", "(", "min", "(", "flags_ind", ".", "shape", "[", "0", "]", ",", "flags_ind_gt", ".", "shape", "[", "0", "]", ")", ")", ":", "cm", "[", "int", "(", "flags_ind_gt", "[", "i", "]", ")", ",", "int", "(", "flags_ind", "[", "i", "]", ")", "]", "+=", "1", "else", ":", "cm", "=", "[", "]", "flags_ind_gt", "=", "numpy", ".", "array", "(", "[", "]", ")", "acc", "=", "plotSegmentationResults", "(", "flags_ind", ",", "flags_ind_gt", ",", "class_names", ",", "mt_step", ",", "not", "plot_results", ")", "if", "acc", ">=", "0", ":", "print", "(", "\"Overall Accuracy: {0:.3f}\"", ".", "format", "(", "acc", ")", ")", "return", "(", "flags_ind", ",", "class_names_gt", ",", "acc", ",", "cm", ")", "else", ":", "return", "(", "flags_ind", ",", "class_names", ",", "acc", ",", "cm", ")" ]
This function performs mid-term classification of an audio stream. Towards this end, supervised knowledge is used, i.e. a pre-trained classifier. ARGUMENTS: - input_file: path of the input WAV file - model_name: name of the classification model - model_type: svm or knn depending on the classifier type - plot_results: True if results are to be plotted using matplotlib along with a set of statistics RETURNS: - segs: a sequence of segment's endpoints: segs[i] is the endpoint of the i-th segment (in seconds) - classes: a sequence of class flags: class[i] is the class ID of the i-th segment
[ "This", "function", "performs", "mid", "-", "term", "classification", "of", "an", "audio", "stream", ".", "Towards", "this", "end", "supervised", "knowledge", "is", "used", "i", ".", "e", ".", "a", "pre", "-", "trained", "classifier", ".", "ARGUMENTS", ":", "-", "input_file", ":", "path", "of", "the", "input", "WAV", "file", "-", "model_name", ":", "name", "of", "the", "classification", "model", "-", "model_type", ":", "svm", "or", "knn", "depending", "on", "the", "classifier", "type", "-", "plot_results", ":", "True", "if", "results", "are", "to", "be", "plotted", "using", "matplotlib", "along", "with", "a", "set", "of", "statistics" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L491-L579
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
silenceRemoval
def silenceRemoval(x, fs, st_win, st_step, smoothWindow=0.5, weight=0.5, plot=False): ''' Event Detection (silence removal) ARGUMENTS: - x: the input audio signal - fs: sampling freq - st_win, st_step: window size and step in seconds - smoothWindow: (optinal) smooth window (in seconds) - weight: (optinal) weight factor (0 < weight < 1) the higher, the more strict - plot: (optinal) True if results are to be plotted RETURNS: - seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds ''' if weight >= 1: weight = 0.99 if weight <= 0: weight = 0.01 # Step 1: feature extraction x = audioBasicIO.stereo2mono(x) st_feats, _ = aF.stFeatureExtraction(x, fs, st_win * fs, st_step * fs) # Step 2: train binary svm classifier of low vs high energy frames # keep only the energy short-term sequence (2nd feature) st_energy = st_feats[1, :] en = numpy.sort(st_energy) # number of 10% of the total short-term windows l1 = int(len(en) / 10) # compute "lower" 10% energy threshold t1 = numpy.mean(en[0:l1]) + 0.000000000000001 # compute "higher" 10% energy threshold t2 = numpy.mean(en[-l1:-1]) + 0.000000000000001 # get all features that correspond to low energy class1 = st_feats[:, numpy.where(st_energy <= t1)[0]] # get all features that correspond to high energy class2 = st_feats[:, numpy.where(st_energy >= t2)[0]] # form the binary classification task and ... faets_s = [class1.T, class2.T] # normalize and train the respective svm probabilistic model # (ONSET vs SILENCE) [faets_s_norm, means_s, stds_s] = aT.normalizeFeatures(faets_s) svm = aT.trainSVM(faets_s_norm, 1.0) # Step 3: compute onset probability based on the trained svm prob_on_set = [] for i in range(st_feats.shape[1]): # for each frame cur_fv = (st_feats[:, i] - means_s) / stds_s # get svm probability (that it belongs to the ONSET class) prob_on_set.append(svm.predict_proba(cur_fv.reshape(1,-1))[0][1]) prob_on_set = numpy.array(prob_on_set) # smooth probability: prob_on_set = smoothMovingAvg(prob_on_set, smoothWindow / st_step) # Step 4A: detect onset frame indices: prog_on_set_sort = numpy.sort(prob_on_set) # find probability Threshold as a weighted average # of top 10% and lower 10% of the values Nt = int(prog_on_set_sort.shape[0] / 10) T = (numpy.mean((1 - weight) * prog_on_set_sort[0:Nt]) + weight * numpy.mean(prog_on_set_sort[-Nt::])) max_idx = numpy.where(prob_on_set > T)[0] # get the indices of the frames that satisfy the thresholding i = 0 time_clusters = [] seg_limits = [] # Step 4B: group frame indices to onset segments while i < len(max_idx): # for each of the detected onset indices cur_cluster = [max_idx[i]] if i == len(max_idx)-1: break while max_idx[i+1] - cur_cluster[-1] <= 2: cur_cluster.append(max_idx[i+1]) i += 1 if i == len(max_idx)-1: break i += 1 time_clusters.append(cur_cluster) seg_limits.append([cur_cluster[0] * st_step, cur_cluster[-1] * st_step]) # Step 5: Post process: remove very small segments: min_dur = 0.2 seg_limits_2 = [] for s in seg_limits: if s[1] - s[0] > min_dur: seg_limits_2.append(s) seg_limits = seg_limits_2 if plot: timeX = numpy.arange(0, x.shape[0] / float(fs), 1.0 / fs) plt.subplot(2, 1, 1) plt.plot(timeX, x) for s in seg_limits: plt.axvline(x=s[0]) plt.axvline(x=s[1]) plt.subplot(2, 1, 2) plt.plot(numpy.arange(0, prob_on_set.shape[0] * st_step, st_step), prob_on_set) plt.title('Signal') for s in seg_limits: plt.axvline(x=s[0]) plt.axvline(x=s[1]) plt.title('svm Probability') plt.show() return seg_limits
python
def silenceRemoval(x, fs, st_win, st_step, smoothWindow=0.5, weight=0.5, plot=False): ''' Event Detection (silence removal) ARGUMENTS: - x: the input audio signal - fs: sampling freq - st_win, st_step: window size and step in seconds - smoothWindow: (optinal) smooth window (in seconds) - weight: (optinal) weight factor (0 < weight < 1) the higher, the more strict - plot: (optinal) True if results are to be plotted RETURNS: - seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds ''' if weight >= 1: weight = 0.99 if weight <= 0: weight = 0.01 # Step 1: feature extraction x = audioBasicIO.stereo2mono(x) st_feats, _ = aF.stFeatureExtraction(x, fs, st_win * fs, st_step * fs) # Step 2: train binary svm classifier of low vs high energy frames # keep only the energy short-term sequence (2nd feature) st_energy = st_feats[1, :] en = numpy.sort(st_energy) # number of 10% of the total short-term windows l1 = int(len(en) / 10) # compute "lower" 10% energy threshold t1 = numpy.mean(en[0:l1]) + 0.000000000000001 # compute "higher" 10% energy threshold t2 = numpy.mean(en[-l1:-1]) + 0.000000000000001 # get all features that correspond to low energy class1 = st_feats[:, numpy.where(st_energy <= t1)[0]] # get all features that correspond to high energy class2 = st_feats[:, numpy.where(st_energy >= t2)[0]] # form the binary classification task and ... faets_s = [class1.T, class2.T] # normalize and train the respective svm probabilistic model # (ONSET vs SILENCE) [faets_s_norm, means_s, stds_s] = aT.normalizeFeatures(faets_s) svm = aT.trainSVM(faets_s_norm, 1.0) # Step 3: compute onset probability based on the trained svm prob_on_set = [] for i in range(st_feats.shape[1]): # for each frame cur_fv = (st_feats[:, i] - means_s) / stds_s # get svm probability (that it belongs to the ONSET class) prob_on_set.append(svm.predict_proba(cur_fv.reshape(1,-1))[0][1]) prob_on_set = numpy.array(prob_on_set) # smooth probability: prob_on_set = smoothMovingAvg(prob_on_set, smoothWindow / st_step) # Step 4A: detect onset frame indices: prog_on_set_sort = numpy.sort(prob_on_set) # find probability Threshold as a weighted average # of top 10% and lower 10% of the values Nt = int(prog_on_set_sort.shape[0] / 10) T = (numpy.mean((1 - weight) * prog_on_set_sort[0:Nt]) + weight * numpy.mean(prog_on_set_sort[-Nt::])) max_idx = numpy.where(prob_on_set > T)[0] # get the indices of the frames that satisfy the thresholding i = 0 time_clusters = [] seg_limits = [] # Step 4B: group frame indices to onset segments while i < len(max_idx): # for each of the detected onset indices cur_cluster = [max_idx[i]] if i == len(max_idx)-1: break while max_idx[i+1] - cur_cluster[-1] <= 2: cur_cluster.append(max_idx[i+1]) i += 1 if i == len(max_idx)-1: break i += 1 time_clusters.append(cur_cluster) seg_limits.append([cur_cluster[0] * st_step, cur_cluster[-1] * st_step]) # Step 5: Post process: remove very small segments: min_dur = 0.2 seg_limits_2 = [] for s in seg_limits: if s[1] - s[0] > min_dur: seg_limits_2.append(s) seg_limits = seg_limits_2 if plot: timeX = numpy.arange(0, x.shape[0] / float(fs), 1.0 / fs) plt.subplot(2, 1, 1) plt.plot(timeX, x) for s in seg_limits: plt.axvline(x=s[0]) plt.axvline(x=s[1]) plt.subplot(2, 1, 2) plt.plot(numpy.arange(0, prob_on_set.shape[0] * st_step, st_step), prob_on_set) plt.title('Signal') for s in seg_limits: plt.axvline(x=s[0]) plt.axvline(x=s[1]) plt.title('svm Probability') plt.show() return seg_limits
[ "def", "silenceRemoval", "(", "x", ",", "fs", ",", "st_win", ",", "st_step", ",", "smoothWindow", "=", "0.5", ",", "weight", "=", "0.5", ",", "plot", "=", "False", ")", ":", "if", "weight", ">=", "1", ":", "weight", "=", "0.99", "if", "weight", "<=", "0", ":", "weight", "=", "0.01", "# Step 1: feature extraction", "x", "=", "audioBasicIO", ".", "stereo2mono", "(", "x", ")", "st_feats", ",", "_", "=", "aF", ".", "stFeatureExtraction", "(", "x", ",", "fs", ",", "st_win", "*", "fs", ",", "st_step", "*", "fs", ")", "# Step 2: train binary svm classifier of low vs high energy frames", "# keep only the energy short-term sequence (2nd feature)", "st_energy", "=", "st_feats", "[", "1", ",", ":", "]", "en", "=", "numpy", ".", "sort", "(", "st_energy", ")", "# number of 10% of the total short-term windows", "l1", "=", "int", "(", "len", "(", "en", ")", "/", "10", ")", "# compute \"lower\" 10% energy threshold", "t1", "=", "numpy", ".", "mean", "(", "en", "[", "0", ":", "l1", "]", ")", "+", "0.000000000000001", "# compute \"higher\" 10% energy threshold", "t2", "=", "numpy", ".", "mean", "(", "en", "[", "-", "l1", ":", "-", "1", "]", ")", "+", "0.000000000000001", "# get all features that correspond to low energy", "class1", "=", "st_feats", "[", ":", ",", "numpy", ".", "where", "(", "st_energy", "<=", "t1", ")", "[", "0", "]", "]", "# get all features that correspond to high energy", "class2", "=", "st_feats", "[", ":", ",", "numpy", ".", "where", "(", "st_energy", ">=", "t2", ")", "[", "0", "]", "]", "# form the binary classification task and ...", "faets_s", "=", "[", "class1", ".", "T", ",", "class2", ".", "T", "]", "# normalize and train the respective svm probabilistic model", "# (ONSET vs SILENCE)", "[", "faets_s_norm", ",", "means_s", ",", "stds_s", "]", "=", "aT", ".", "normalizeFeatures", "(", "faets_s", ")", "svm", "=", "aT", ".", "trainSVM", "(", "faets_s_norm", ",", "1.0", ")", "# Step 3: compute onset probability based on the trained svm", "prob_on_set", "=", "[", "]", "for", "i", "in", "range", "(", "st_feats", ".", "shape", "[", "1", "]", ")", ":", "# for each frame", "cur_fv", "=", "(", "st_feats", "[", ":", ",", "i", "]", "-", "means_s", ")", "/", "stds_s", "# get svm probability (that it belongs to the ONSET class)", "prob_on_set", ".", "append", "(", "svm", ".", "predict_proba", "(", "cur_fv", ".", "reshape", "(", "1", ",", "-", "1", ")", ")", "[", "0", "]", "[", "1", "]", ")", "prob_on_set", "=", "numpy", ".", "array", "(", "prob_on_set", ")", "# smooth probability:", "prob_on_set", "=", "smoothMovingAvg", "(", "prob_on_set", ",", "smoothWindow", "/", "st_step", ")", "# Step 4A: detect onset frame indices:", "prog_on_set_sort", "=", "numpy", ".", "sort", "(", "prob_on_set", ")", "# find probability Threshold as a weighted average", "# of top 10% and lower 10% of the values", "Nt", "=", "int", "(", "prog_on_set_sort", ".", "shape", "[", "0", "]", "/", "10", ")", "T", "=", "(", "numpy", ".", "mean", "(", "(", "1", "-", "weight", ")", "*", "prog_on_set_sort", "[", "0", ":", "Nt", "]", ")", "+", "weight", "*", "numpy", ".", "mean", "(", "prog_on_set_sort", "[", "-", "Nt", ":", ":", "]", ")", ")", "max_idx", "=", "numpy", ".", "where", "(", "prob_on_set", ">", "T", ")", "[", "0", "]", "# get the indices of the frames that satisfy the thresholding", "i", "=", "0", "time_clusters", "=", "[", "]", "seg_limits", "=", "[", "]", "# Step 4B: group frame indices to onset segments", "while", "i", "<", "len", "(", "max_idx", ")", ":", "# for each of the detected onset indices", "cur_cluster", "=", "[", "max_idx", "[", "i", "]", "]", "if", "i", "==", "len", "(", "max_idx", ")", "-", "1", ":", "break", "while", "max_idx", "[", "i", "+", "1", "]", "-", "cur_cluster", "[", "-", "1", "]", "<=", "2", ":", "cur_cluster", ".", "append", "(", "max_idx", "[", "i", "+", "1", "]", ")", "i", "+=", "1", "if", "i", "==", "len", "(", "max_idx", ")", "-", "1", ":", "break", "i", "+=", "1", "time_clusters", ".", "append", "(", "cur_cluster", ")", "seg_limits", ".", "append", "(", "[", "cur_cluster", "[", "0", "]", "*", "st_step", ",", "cur_cluster", "[", "-", "1", "]", "*", "st_step", "]", ")", "# Step 5: Post process: remove very small segments:", "min_dur", "=", "0.2", "seg_limits_2", "=", "[", "]", "for", "s", "in", "seg_limits", ":", "if", "s", "[", "1", "]", "-", "s", "[", "0", "]", ">", "min_dur", ":", "seg_limits_2", ".", "append", "(", "s", ")", "seg_limits", "=", "seg_limits_2", "if", "plot", ":", "timeX", "=", "numpy", ".", "arange", "(", "0", ",", "x", ".", "shape", "[", "0", "]", "/", "float", "(", "fs", ")", ",", "1.0", "/", "fs", ")", "plt", ".", "subplot", "(", "2", ",", "1", ",", "1", ")", "plt", ".", "plot", "(", "timeX", ",", "x", ")", "for", "s", "in", "seg_limits", ":", "plt", ".", "axvline", "(", "x", "=", "s", "[", "0", "]", ")", "plt", ".", "axvline", "(", "x", "=", "s", "[", "1", "]", ")", "plt", ".", "subplot", "(", "2", ",", "1", ",", "2", ")", "plt", ".", "plot", "(", "numpy", ".", "arange", "(", "0", ",", "prob_on_set", ".", "shape", "[", "0", "]", "*", "st_step", ",", "st_step", ")", ",", "prob_on_set", ")", "plt", ".", "title", "(", "'Signal'", ")", "for", "s", "in", "seg_limits", ":", "plt", ".", "axvline", "(", "x", "=", "s", "[", "0", "]", ")", "plt", ".", "axvline", "(", "x", "=", "s", "[", "1", "]", ")", "plt", ".", "title", "(", "'svm Probability'", ")", "plt", ".", "show", "(", ")", "return", "seg_limits" ]
Event Detection (silence removal) ARGUMENTS: - x: the input audio signal - fs: sampling freq - st_win, st_step: window size and step in seconds - smoothWindow: (optinal) smooth window (in seconds) - weight: (optinal) weight factor (0 < weight < 1) the higher, the more strict - plot: (optinal) True if results are to be plotted RETURNS: - seg_limits: list of segment limits in seconds (e.g [[0.1, 0.9], [1.4, 3.0]] means that the resulting segments are (0.1 - 0.9) seconds and (1.4, 3.0) seconds
[ "Event", "Detection", "(", "silence", "removal", ")", "ARGUMENTS", ":", "-", "x", ":", "the", "input", "audio", "signal", "-", "fs", ":", "sampling", "freq", "-", "st_win", "st_step", ":", "window", "size", "and", "step", "in", "seconds", "-", "smoothWindow", ":", "(", "optinal", ")", "smooth", "window", "(", "in", "seconds", ")", "-", "weight", ":", "(", "optinal", ")", "weight", "factor", "(", "0", "<", "weight", "<", "1", ")", "the", "higher", "the", "more", "strict", "-", "plot", ":", "(", "optinal", ")", "True", "if", "results", "are", "to", "be", "plotted", "RETURNS", ":", "-", "seg_limits", ":", "list", "of", "segment", "limits", "in", "seconds", "(", "e", ".", "g", "[[", "0", ".", "1", "0", ".", "9", "]", "[", "1", ".", "4", "3", ".", "0", "]]", "means", "that", "the", "resulting", "segments", "are", "(", "0", ".", "1", "-", "0", ".", "9", ")", "seconds", "and", "(", "1", ".", "4", "3", ".", "0", ")", "seconds" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L625-L738
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
speakerDiarization
def speakerDiarization(filename, n_speakers, mt_size=2.0, mt_step=0.2, st_win=0.05, lda_dim=35, plot_res=False): ''' ARGUMENTS: - filename: the name of the WAV file to be analyzed - n_speakers the number of speakers (clusters) in the recording (<=0 for unknown) - mt_size (opt) mid-term window size - mt_step (opt) mid-term window step - st_win (opt) short-term window size - lda_dim (opt) LDA dimension (0 for no LDA) - plot_res (opt) 0 for not plotting the results 1 for plottingy ''' [fs, x] = audioBasicIO.readAudioFile(filename) x = audioBasicIO.stereo2mono(x) duration = len(x) / fs [classifier_1, MEAN1, STD1, classNames1, mtWin1, mtStep1, stWin1, stStep1, computeBEAT1] = aT.load_model_knn(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "knnSpeakerAll")) [classifier_2, MEAN2, STD2, classNames2, mtWin2, mtStep2, stWin2, stStep2, computeBEAT2] = aT.load_model_knn(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "knnSpeakerFemaleMale")) [mt_feats, st_feats, _] = aF.mtFeatureExtraction(x, fs, mt_size * fs, mt_step * fs, round(fs * st_win), round(fs*st_win * 0.5)) MidTermFeatures2 = numpy.zeros((mt_feats.shape[0] + len(classNames1) + len(classNames2), mt_feats.shape[1])) for i in range(mt_feats.shape[1]): cur_f1 = (mt_feats[:, i] - MEAN1) / STD1 cur_f2 = (mt_feats[:, i] - MEAN2) / STD2 [res, P1] = aT.classifierWrapper(classifier_1, "knn", cur_f1) [res, P2] = aT.classifierWrapper(classifier_2, "knn", cur_f2) MidTermFeatures2[0:mt_feats.shape[0], i] = mt_feats[:, i] MidTermFeatures2[mt_feats.shape[0]:mt_feats.shape[0]+len(classNames1), i] = P1 + 0.0001 MidTermFeatures2[mt_feats.shape[0] + len(classNames1)::, i] = P2 + 0.0001 mt_feats = MidTermFeatures2 # TODO iFeaturesSelect = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53] mt_feats = mt_feats[iFeaturesSelect, :] (mt_feats_norm, MEAN, STD) = aT.normalizeFeatures([mt_feats.T]) mt_feats_norm = mt_feats_norm[0].T n_wins = mt_feats.shape[1] # remove outliers: dist_all = numpy.sum(distance.squareform(distance.pdist(mt_feats_norm.T)), axis=0) m_dist_all = numpy.mean(dist_all) i_non_outliers = numpy.nonzero(dist_all < 1.2 * m_dist_all)[0] # TODO: Combine energy threshold for outlier removal: #EnergyMin = numpy.min(mt_feats[1,:]) #EnergyMean = numpy.mean(mt_feats[1,:]) #Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0 #i_non_outliers = numpy.nonzero(mt_feats[1,:] > Thres)[0] #print i_non_outliers perOutLier = (100.0 * (n_wins - i_non_outliers.shape[0])) / n_wins mt_feats_norm_or = mt_feats_norm mt_feats_norm = mt_feats_norm[:, i_non_outliers] # LDA dimensionality reduction: if lda_dim > 0: #[mt_feats_to_red, _, _] = aF.mtFeatureExtraction(x, fs, mt_size * fs, st_win * fs, round(fs*st_win), round(fs*st_win)); # extract mid-term features with minimum step: mt_win_ratio = int(round(mt_size / st_win)) mt_step_ratio = int(round(st_win / st_win)) mt_feats_to_red = [] num_of_features = len(st_feats) num_of_stats = 2 #for i in range(num_of_stats * num_of_features + 1): for i in range(num_of_stats * num_of_features): mt_feats_to_red.append([]) for i in range(num_of_features): # for each of the short-term features: curPos = 0 N = len(st_feats[i]) while (curPos < N): N1 = curPos N2 = curPos + mt_win_ratio if N2 > N: N2 = N curStFeatures = st_feats[i][N1:N2] mt_feats_to_red[i].append(numpy.mean(curStFeatures)) mt_feats_to_red[i+num_of_features].append(numpy.std(curStFeatures)) curPos += mt_step_ratio mt_feats_to_red = numpy.array(mt_feats_to_red) mt_feats_to_red_2 = numpy.zeros((mt_feats_to_red.shape[0] + len(classNames1) + len(classNames2), mt_feats_to_red.shape[1])) for i in range(mt_feats_to_red.shape[1]): cur_f1 = (mt_feats_to_red[:, i] - MEAN1) / STD1 cur_f2 = (mt_feats_to_red[:, i] - MEAN2) / STD2 [res, P1] = aT.classifierWrapper(classifier_1, "knn", cur_f1) [res, P2] = aT.classifierWrapper(classifier_2, "knn", cur_f2) mt_feats_to_red_2[0:mt_feats_to_red.shape[0], i] = mt_feats_to_red[:, i] mt_feats_to_red_2[mt_feats_to_red.shape[0]:mt_feats_to_red.shape[0] + len(classNames1), i] = P1 + 0.0001 mt_feats_to_red_2[mt_feats_to_red.shape[0]+len(classNames1)::, i] = P2 + 0.0001 mt_feats_to_red = mt_feats_to_red_2 mt_feats_to_red = mt_feats_to_red[iFeaturesSelect, :] #mt_feats_to_red += numpy.random.rand(mt_feats_to_red.shape[0], mt_feats_to_red.shape[1]) * 0.0000010 (mt_feats_to_red, MEAN, STD) = aT.normalizeFeatures([mt_feats_to_red.T]) mt_feats_to_red = mt_feats_to_red[0].T #dist_all = numpy.sum(distance.squareform(distance.pdist(mt_feats_to_red.T)), axis=0) #m_dist_all = numpy.mean(dist_all) #iNonOutLiers2 = numpy.nonzero(dist_all < 3.0*m_dist_all)[0] #mt_feats_to_red = mt_feats_to_red[:, iNonOutLiers2] Labels = numpy.zeros((mt_feats_to_red.shape[1], )); LDAstep = 1.0 LDAstepRatio = LDAstep / st_win #print LDAstep, LDAstepRatio for i in range(Labels.shape[0]): Labels[i] = int(i*st_win/LDAstepRatio); clf = sklearn.discriminant_analysis.LinearDiscriminantAnalysis(n_components=lda_dim) clf.fit(mt_feats_to_red.T, Labels) mt_feats_norm = (clf.transform(mt_feats_norm.T)).T if n_speakers <= 0: s_range = range(2, 10) else: s_range = [n_speakers] clsAll = [] sil_all = [] centersAll = [] for iSpeakers in s_range: k_means = sklearn.cluster.KMeans(n_clusters=iSpeakers) k_means.fit(mt_feats_norm.T) cls = k_means.labels_ means = k_means.cluster_centers_ # Y = distance.squareform(distance.pdist(mt_feats_norm.T)) clsAll.append(cls) centersAll.append(means) sil_1 = []; sil_2 = [] for c in range(iSpeakers): # for each speaker (i.e. for each extracted cluster) clust_per_cent = numpy.nonzero(cls == c)[0].shape[0] / \ float(len(cls)) if clust_per_cent < 0.020: sil_1.append(0.0) sil_2.append(0.0) else: # get subset of feature vectors mt_feats_norm_temp = mt_feats_norm[:, cls==c] # compute average distance between samples # that belong to the cluster (a values) Yt = distance.pdist(mt_feats_norm_temp.T) sil_1.append(numpy.mean(Yt)*clust_per_cent) silBs = [] for c2 in range(iSpeakers): # compute distances from samples of other clusters if c2 != c: clust_per_cent_2 = numpy.nonzero(cls == c2)[0].shape[0] /\ float(len(cls)) MidTermFeaturesNormTemp2 = mt_feats_norm[:, cls == c2] Yt = distance.cdist(mt_feats_norm_temp.T, MidTermFeaturesNormTemp2.T) silBs.append(numpy.mean(Yt)*(clust_per_cent + clust_per_cent_2)/2.0) silBs = numpy.array(silBs) # ... and keep the minimum value (i.e. # the distance from the "nearest" cluster) sil_2.append(min(silBs)) sil_1 = numpy.array(sil_1); sil_2 = numpy.array(sil_2); sil = [] for c in range(iSpeakers): # for each cluster (speaker) compute silhouette sil.append( ( sil_2[c] - sil_1[c]) / (max(sil_2[c], sil_1[c]) + 0.00001)) # keep the AVERAGE SILLOUETTE sil_all.append(numpy.mean(sil)) imax = numpy.argmax(sil_all) # optimal number of clusters nSpeakersFinal = s_range[imax] # generate the final set of cluster labels # (important: need to retrieve the outlier windows: # this is achieved by giving them the value of their # nearest non-outlier window) cls = numpy.zeros((n_wins,)) for i in range(n_wins): j = numpy.argmin(numpy.abs(i-i_non_outliers)) cls[i] = clsAll[imax][j] # Post-process method 1: hmm smoothing for i in range(1): # hmm training start_prob, transmat, means, cov = \ trainHMM_computeStatistics(mt_feats_norm_or, cls) hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag") hmm.startprob_ = start_prob hmm.transmat_ = transmat hmm.means_ = means; hmm.covars_ = cov cls = hmm.predict(mt_feats_norm_or.T) # Post-process method 2: median filtering: cls = scipy.signal.medfilt(cls, 13) cls = scipy.signal.medfilt(cls, 11) sil = sil_all[imax] class_names = ["speaker{0:d}".format(c) for c in range(nSpeakersFinal)]; # load ground-truth if available gt_file = filename.replace('.wav', '.segments') # if groundturh exists if os.path.isfile(gt_file): [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file) flags_gt, class_names_gt = segs2flags(seg_start, seg_end, seg_labs, mt_step) if plot_res: fig = plt.figure() if n_speakers > 0: ax1 = fig.add_subplot(111) else: ax1 = fig.add_subplot(211) ax1.set_yticks(numpy.array(range(len(class_names)))) ax1.axis((0, duration, -1, len(class_names))) ax1.set_yticklabels(class_names) ax1.plot(numpy.array(range(len(cls)))*mt_step+mt_step/2.0, cls) if os.path.isfile(gt_file): if plot_res: ax1.plot(numpy.array(range(len(flags_gt))) * mt_step + mt_step / 2.0, flags_gt, 'r') purity_cluster_m, purity_speaker_m = \ evaluateSpeakerDiarization(cls, flags_gt) print("{0:.1f}\t{1:.1f}".format(100 * purity_cluster_m, 100 * purity_speaker_m)) if plot_res: plt.title("Cluster purity: {0:.1f}% - " "Speaker purity: {1:.1f}%".format(100 * purity_cluster_m, 100 * purity_speaker_m)) if plot_res: plt.xlabel("time (seconds)") #print s_range, sil_all if n_speakers<=0: plt.subplot(212) plt.plot(s_range, sil_all) plt.xlabel("number of clusters"); plt.ylabel("average clustering's sillouette"); plt.show() return cls
python
def speakerDiarization(filename, n_speakers, mt_size=2.0, mt_step=0.2, st_win=0.05, lda_dim=35, plot_res=False): ''' ARGUMENTS: - filename: the name of the WAV file to be analyzed - n_speakers the number of speakers (clusters) in the recording (<=0 for unknown) - mt_size (opt) mid-term window size - mt_step (opt) mid-term window step - st_win (opt) short-term window size - lda_dim (opt) LDA dimension (0 for no LDA) - plot_res (opt) 0 for not plotting the results 1 for plottingy ''' [fs, x] = audioBasicIO.readAudioFile(filename) x = audioBasicIO.stereo2mono(x) duration = len(x) / fs [classifier_1, MEAN1, STD1, classNames1, mtWin1, mtStep1, stWin1, stStep1, computeBEAT1] = aT.load_model_knn(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "knnSpeakerAll")) [classifier_2, MEAN2, STD2, classNames2, mtWin2, mtStep2, stWin2, stStep2, computeBEAT2] = aT.load_model_knn(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "knnSpeakerFemaleMale")) [mt_feats, st_feats, _] = aF.mtFeatureExtraction(x, fs, mt_size * fs, mt_step * fs, round(fs * st_win), round(fs*st_win * 0.5)) MidTermFeatures2 = numpy.zeros((mt_feats.shape[0] + len(classNames1) + len(classNames2), mt_feats.shape[1])) for i in range(mt_feats.shape[1]): cur_f1 = (mt_feats[:, i] - MEAN1) / STD1 cur_f2 = (mt_feats[:, i] - MEAN2) / STD2 [res, P1] = aT.classifierWrapper(classifier_1, "knn", cur_f1) [res, P2] = aT.classifierWrapper(classifier_2, "knn", cur_f2) MidTermFeatures2[0:mt_feats.shape[0], i] = mt_feats[:, i] MidTermFeatures2[mt_feats.shape[0]:mt_feats.shape[0]+len(classNames1), i] = P1 + 0.0001 MidTermFeatures2[mt_feats.shape[0] + len(classNames1)::, i] = P2 + 0.0001 mt_feats = MidTermFeatures2 # TODO iFeaturesSelect = [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53] mt_feats = mt_feats[iFeaturesSelect, :] (mt_feats_norm, MEAN, STD) = aT.normalizeFeatures([mt_feats.T]) mt_feats_norm = mt_feats_norm[0].T n_wins = mt_feats.shape[1] # remove outliers: dist_all = numpy.sum(distance.squareform(distance.pdist(mt_feats_norm.T)), axis=0) m_dist_all = numpy.mean(dist_all) i_non_outliers = numpy.nonzero(dist_all < 1.2 * m_dist_all)[0] # TODO: Combine energy threshold for outlier removal: #EnergyMin = numpy.min(mt_feats[1,:]) #EnergyMean = numpy.mean(mt_feats[1,:]) #Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0 #i_non_outliers = numpy.nonzero(mt_feats[1,:] > Thres)[0] #print i_non_outliers perOutLier = (100.0 * (n_wins - i_non_outliers.shape[0])) / n_wins mt_feats_norm_or = mt_feats_norm mt_feats_norm = mt_feats_norm[:, i_non_outliers] # LDA dimensionality reduction: if lda_dim > 0: #[mt_feats_to_red, _, _] = aF.mtFeatureExtraction(x, fs, mt_size * fs, st_win * fs, round(fs*st_win), round(fs*st_win)); # extract mid-term features with minimum step: mt_win_ratio = int(round(mt_size / st_win)) mt_step_ratio = int(round(st_win / st_win)) mt_feats_to_red = [] num_of_features = len(st_feats) num_of_stats = 2 #for i in range(num_of_stats * num_of_features + 1): for i in range(num_of_stats * num_of_features): mt_feats_to_red.append([]) for i in range(num_of_features): # for each of the short-term features: curPos = 0 N = len(st_feats[i]) while (curPos < N): N1 = curPos N2 = curPos + mt_win_ratio if N2 > N: N2 = N curStFeatures = st_feats[i][N1:N2] mt_feats_to_red[i].append(numpy.mean(curStFeatures)) mt_feats_to_red[i+num_of_features].append(numpy.std(curStFeatures)) curPos += mt_step_ratio mt_feats_to_red = numpy.array(mt_feats_to_red) mt_feats_to_red_2 = numpy.zeros((mt_feats_to_red.shape[0] + len(classNames1) + len(classNames2), mt_feats_to_red.shape[1])) for i in range(mt_feats_to_red.shape[1]): cur_f1 = (mt_feats_to_red[:, i] - MEAN1) / STD1 cur_f2 = (mt_feats_to_red[:, i] - MEAN2) / STD2 [res, P1] = aT.classifierWrapper(classifier_1, "knn", cur_f1) [res, P2] = aT.classifierWrapper(classifier_2, "knn", cur_f2) mt_feats_to_red_2[0:mt_feats_to_red.shape[0], i] = mt_feats_to_red[:, i] mt_feats_to_red_2[mt_feats_to_red.shape[0]:mt_feats_to_red.shape[0] + len(classNames1), i] = P1 + 0.0001 mt_feats_to_red_2[mt_feats_to_red.shape[0]+len(classNames1)::, i] = P2 + 0.0001 mt_feats_to_red = mt_feats_to_red_2 mt_feats_to_red = mt_feats_to_red[iFeaturesSelect, :] #mt_feats_to_red += numpy.random.rand(mt_feats_to_red.shape[0], mt_feats_to_red.shape[1]) * 0.0000010 (mt_feats_to_red, MEAN, STD) = aT.normalizeFeatures([mt_feats_to_red.T]) mt_feats_to_red = mt_feats_to_red[0].T #dist_all = numpy.sum(distance.squareform(distance.pdist(mt_feats_to_red.T)), axis=0) #m_dist_all = numpy.mean(dist_all) #iNonOutLiers2 = numpy.nonzero(dist_all < 3.0*m_dist_all)[0] #mt_feats_to_red = mt_feats_to_red[:, iNonOutLiers2] Labels = numpy.zeros((mt_feats_to_red.shape[1], )); LDAstep = 1.0 LDAstepRatio = LDAstep / st_win #print LDAstep, LDAstepRatio for i in range(Labels.shape[0]): Labels[i] = int(i*st_win/LDAstepRatio); clf = sklearn.discriminant_analysis.LinearDiscriminantAnalysis(n_components=lda_dim) clf.fit(mt_feats_to_red.T, Labels) mt_feats_norm = (clf.transform(mt_feats_norm.T)).T if n_speakers <= 0: s_range = range(2, 10) else: s_range = [n_speakers] clsAll = [] sil_all = [] centersAll = [] for iSpeakers in s_range: k_means = sklearn.cluster.KMeans(n_clusters=iSpeakers) k_means.fit(mt_feats_norm.T) cls = k_means.labels_ means = k_means.cluster_centers_ # Y = distance.squareform(distance.pdist(mt_feats_norm.T)) clsAll.append(cls) centersAll.append(means) sil_1 = []; sil_2 = [] for c in range(iSpeakers): # for each speaker (i.e. for each extracted cluster) clust_per_cent = numpy.nonzero(cls == c)[0].shape[0] / \ float(len(cls)) if clust_per_cent < 0.020: sil_1.append(0.0) sil_2.append(0.0) else: # get subset of feature vectors mt_feats_norm_temp = mt_feats_norm[:, cls==c] # compute average distance between samples # that belong to the cluster (a values) Yt = distance.pdist(mt_feats_norm_temp.T) sil_1.append(numpy.mean(Yt)*clust_per_cent) silBs = [] for c2 in range(iSpeakers): # compute distances from samples of other clusters if c2 != c: clust_per_cent_2 = numpy.nonzero(cls == c2)[0].shape[0] /\ float(len(cls)) MidTermFeaturesNormTemp2 = mt_feats_norm[:, cls == c2] Yt = distance.cdist(mt_feats_norm_temp.T, MidTermFeaturesNormTemp2.T) silBs.append(numpy.mean(Yt)*(clust_per_cent + clust_per_cent_2)/2.0) silBs = numpy.array(silBs) # ... and keep the minimum value (i.e. # the distance from the "nearest" cluster) sil_2.append(min(silBs)) sil_1 = numpy.array(sil_1); sil_2 = numpy.array(sil_2); sil = [] for c in range(iSpeakers): # for each cluster (speaker) compute silhouette sil.append( ( sil_2[c] - sil_1[c]) / (max(sil_2[c], sil_1[c]) + 0.00001)) # keep the AVERAGE SILLOUETTE sil_all.append(numpy.mean(sil)) imax = numpy.argmax(sil_all) # optimal number of clusters nSpeakersFinal = s_range[imax] # generate the final set of cluster labels # (important: need to retrieve the outlier windows: # this is achieved by giving them the value of their # nearest non-outlier window) cls = numpy.zeros((n_wins,)) for i in range(n_wins): j = numpy.argmin(numpy.abs(i-i_non_outliers)) cls[i] = clsAll[imax][j] # Post-process method 1: hmm smoothing for i in range(1): # hmm training start_prob, transmat, means, cov = \ trainHMM_computeStatistics(mt_feats_norm_or, cls) hmm = hmmlearn.hmm.GaussianHMM(start_prob.shape[0], "diag") hmm.startprob_ = start_prob hmm.transmat_ = transmat hmm.means_ = means; hmm.covars_ = cov cls = hmm.predict(mt_feats_norm_or.T) # Post-process method 2: median filtering: cls = scipy.signal.medfilt(cls, 13) cls = scipy.signal.medfilt(cls, 11) sil = sil_all[imax] class_names = ["speaker{0:d}".format(c) for c in range(nSpeakersFinal)]; # load ground-truth if available gt_file = filename.replace('.wav', '.segments') # if groundturh exists if os.path.isfile(gt_file): [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file) flags_gt, class_names_gt = segs2flags(seg_start, seg_end, seg_labs, mt_step) if plot_res: fig = plt.figure() if n_speakers > 0: ax1 = fig.add_subplot(111) else: ax1 = fig.add_subplot(211) ax1.set_yticks(numpy.array(range(len(class_names)))) ax1.axis((0, duration, -1, len(class_names))) ax1.set_yticklabels(class_names) ax1.plot(numpy.array(range(len(cls)))*mt_step+mt_step/2.0, cls) if os.path.isfile(gt_file): if plot_res: ax1.plot(numpy.array(range(len(flags_gt))) * mt_step + mt_step / 2.0, flags_gt, 'r') purity_cluster_m, purity_speaker_m = \ evaluateSpeakerDiarization(cls, flags_gt) print("{0:.1f}\t{1:.1f}".format(100 * purity_cluster_m, 100 * purity_speaker_m)) if plot_res: plt.title("Cluster purity: {0:.1f}% - " "Speaker purity: {1:.1f}%".format(100 * purity_cluster_m, 100 * purity_speaker_m)) if plot_res: plt.xlabel("time (seconds)") #print s_range, sil_all if n_speakers<=0: plt.subplot(212) plt.plot(s_range, sil_all) plt.xlabel("number of clusters"); plt.ylabel("average clustering's sillouette"); plt.show() return cls
[ "def", "speakerDiarization", "(", "filename", ",", "n_speakers", ",", "mt_size", "=", "2.0", ",", "mt_step", "=", "0.2", ",", "st_win", "=", "0.05", ",", "lda_dim", "=", "35", ",", "plot_res", "=", "False", ")", ":", "[", "fs", ",", "x", "]", "=", "audioBasicIO", ".", "readAudioFile", "(", "filename", ")", "x", "=", "audioBasicIO", ".", "stereo2mono", "(", "x", ")", "duration", "=", "len", "(", "x", ")", "/", "fs", "[", "classifier_1", ",", "MEAN1", ",", "STD1", ",", "classNames1", ",", "mtWin1", ",", "mtStep1", ",", "stWin1", ",", "stStep1", ",", "computeBEAT1", "]", "=", "aT", ".", "load_model_knn", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "\"data\"", ",", "\"knnSpeakerAll\"", ")", ")", "[", "classifier_2", ",", "MEAN2", ",", "STD2", ",", "classNames2", ",", "mtWin2", ",", "mtStep2", ",", "stWin2", ",", "stStep2", ",", "computeBEAT2", "]", "=", "aT", ".", "load_model_knn", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "\"data\"", ",", "\"knnSpeakerFemaleMale\"", ")", ")", "[", "mt_feats", ",", "st_feats", ",", "_", "]", "=", "aF", ".", "mtFeatureExtraction", "(", "x", ",", "fs", ",", "mt_size", "*", "fs", ",", "mt_step", "*", "fs", ",", "round", "(", "fs", "*", "st_win", ")", ",", "round", "(", "fs", "*", "st_win", "*", "0.5", ")", ")", "MidTermFeatures2", "=", "numpy", ".", "zeros", "(", "(", "mt_feats", ".", "shape", "[", "0", "]", "+", "len", "(", "classNames1", ")", "+", "len", "(", "classNames2", ")", ",", "mt_feats", ".", "shape", "[", "1", "]", ")", ")", "for", "i", "in", "range", "(", "mt_feats", ".", "shape", "[", "1", "]", ")", ":", "cur_f1", "=", "(", "mt_feats", "[", ":", ",", "i", "]", "-", "MEAN1", ")", "/", "STD1", "cur_f2", "=", "(", "mt_feats", "[", ":", ",", "i", "]", "-", "MEAN2", ")", "/", "STD2", "[", "res", ",", "P1", "]", "=", "aT", ".", "classifierWrapper", "(", "classifier_1", ",", "\"knn\"", ",", "cur_f1", ")", "[", "res", ",", "P2", "]", "=", "aT", ".", "classifierWrapper", "(", "classifier_2", ",", "\"knn\"", ",", "cur_f2", ")", "MidTermFeatures2", "[", "0", ":", "mt_feats", ".", "shape", "[", "0", "]", ",", "i", "]", "=", "mt_feats", "[", ":", ",", "i", "]", "MidTermFeatures2", "[", "mt_feats", ".", "shape", "[", "0", "]", ":", "mt_feats", ".", "shape", "[", "0", "]", "+", "len", "(", "classNames1", ")", ",", "i", "]", "=", "P1", "+", "0.0001", "MidTermFeatures2", "[", "mt_feats", ".", "shape", "[", "0", "]", "+", "len", "(", "classNames1", ")", ":", ":", ",", "i", "]", "=", "P2", "+", "0.0001", "mt_feats", "=", "MidTermFeatures2", "# TODO", "iFeaturesSelect", "=", "[", "8", ",", "9", ",", "10", ",", "11", ",", "12", ",", "13", ",", "14", ",", "15", ",", "16", ",", "17", ",", "18", ",", "19", ",", "20", ",", "41", ",", "42", ",", "43", ",", "44", ",", "45", ",", "46", ",", "47", ",", "48", ",", "49", ",", "50", ",", "51", ",", "52", ",", "53", "]", "mt_feats", "=", "mt_feats", "[", "iFeaturesSelect", ",", ":", "]", "(", "mt_feats_norm", ",", "MEAN", ",", "STD", ")", "=", "aT", ".", "normalizeFeatures", "(", "[", "mt_feats", ".", "T", "]", ")", "mt_feats_norm", "=", "mt_feats_norm", "[", "0", "]", ".", "T", "n_wins", "=", "mt_feats", ".", "shape", "[", "1", "]", "# remove outliers:", "dist_all", "=", "numpy", ".", "sum", "(", "distance", ".", "squareform", "(", "distance", ".", "pdist", "(", "mt_feats_norm", ".", "T", ")", ")", ",", "axis", "=", "0", ")", "m_dist_all", "=", "numpy", ".", "mean", "(", "dist_all", ")", "i_non_outliers", "=", "numpy", ".", "nonzero", "(", "dist_all", "<", "1.2", "*", "m_dist_all", ")", "[", "0", "]", "# TODO: Combine energy threshold for outlier removal:", "#EnergyMin = numpy.min(mt_feats[1,:])", "#EnergyMean = numpy.mean(mt_feats[1,:])", "#Thres = (1.5*EnergyMin + 0.5*EnergyMean) / 2.0", "#i_non_outliers = numpy.nonzero(mt_feats[1,:] > Thres)[0]", "#print i_non_outliers", "perOutLier", "=", "(", "100.0", "*", "(", "n_wins", "-", "i_non_outliers", ".", "shape", "[", "0", "]", ")", ")", "/", "n_wins", "mt_feats_norm_or", "=", "mt_feats_norm", "mt_feats_norm", "=", "mt_feats_norm", "[", ":", ",", "i_non_outliers", "]", "# LDA dimensionality reduction:", "if", "lda_dim", ">", "0", ":", "#[mt_feats_to_red, _, _] = aF.mtFeatureExtraction(x, fs, mt_size * fs, st_win * fs, round(fs*st_win), round(fs*st_win));", "# extract mid-term features with minimum step:", "mt_win_ratio", "=", "int", "(", "round", "(", "mt_size", "/", "st_win", ")", ")", "mt_step_ratio", "=", "int", "(", "round", "(", "st_win", "/", "st_win", ")", ")", "mt_feats_to_red", "=", "[", "]", "num_of_features", "=", "len", "(", "st_feats", ")", "num_of_stats", "=", "2", "#for i in range(num_of_stats * num_of_features + 1):", "for", "i", "in", "range", "(", "num_of_stats", "*", "num_of_features", ")", ":", "mt_feats_to_red", ".", "append", "(", "[", "]", ")", "for", "i", "in", "range", "(", "num_of_features", ")", ":", "# for each of the short-term features:", "curPos", "=", "0", "N", "=", "len", "(", "st_feats", "[", "i", "]", ")", "while", "(", "curPos", "<", "N", ")", ":", "N1", "=", "curPos", "N2", "=", "curPos", "+", "mt_win_ratio", "if", "N2", ">", "N", ":", "N2", "=", "N", "curStFeatures", "=", "st_feats", "[", "i", "]", "[", "N1", ":", "N2", "]", "mt_feats_to_red", "[", "i", "]", ".", "append", "(", "numpy", ".", "mean", "(", "curStFeatures", ")", ")", "mt_feats_to_red", "[", "i", "+", "num_of_features", "]", ".", "append", "(", "numpy", ".", "std", "(", "curStFeatures", ")", ")", "curPos", "+=", "mt_step_ratio", "mt_feats_to_red", "=", "numpy", ".", "array", "(", "mt_feats_to_red", ")", "mt_feats_to_red_2", "=", "numpy", ".", "zeros", "(", "(", "mt_feats_to_red", ".", "shape", "[", "0", "]", "+", "len", "(", "classNames1", ")", "+", "len", "(", "classNames2", ")", ",", "mt_feats_to_red", ".", "shape", "[", "1", "]", ")", ")", "for", "i", "in", "range", "(", "mt_feats_to_red", ".", "shape", "[", "1", "]", ")", ":", "cur_f1", "=", "(", "mt_feats_to_red", "[", ":", ",", "i", "]", "-", "MEAN1", ")", "/", "STD1", "cur_f2", "=", "(", "mt_feats_to_red", "[", ":", ",", "i", "]", "-", "MEAN2", ")", "/", "STD2", "[", "res", ",", "P1", "]", "=", "aT", ".", "classifierWrapper", "(", "classifier_1", ",", "\"knn\"", ",", "cur_f1", ")", "[", "res", ",", "P2", "]", "=", "aT", ".", "classifierWrapper", "(", "classifier_2", ",", "\"knn\"", ",", "cur_f2", ")", "mt_feats_to_red_2", "[", "0", ":", "mt_feats_to_red", ".", "shape", "[", "0", "]", ",", "i", "]", "=", "mt_feats_to_red", "[", ":", ",", "i", "]", "mt_feats_to_red_2", "[", "mt_feats_to_red", ".", "shape", "[", "0", "]", ":", "mt_feats_to_red", ".", "shape", "[", "0", "]", "+", "len", "(", "classNames1", ")", ",", "i", "]", "=", "P1", "+", "0.0001", "mt_feats_to_red_2", "[", "mt_feats_to_red", ".", "shape", "[", "0", "]", "+", "len", "(", "classNames1", ")", ":", ":", ",", "i", "]", "=", "P2", "+", "0.0001", "mt_feats_to_red", "=", "mt_feats_to_red_2", "mt_feats_to_red", "=", "mt_feats_to_red", "[", "iFeaturesSelect", ",", ":", "]", "#mt_feats_to_red += numpy.random.rand(mt_feats_to_red.shape[0], mt_feats_to_red.shape[1]) * 0.0000010", "(", "mt_feats_to_red", ",", "MEAN", ",", "STD", ")", "=", "aT", ".", "normalizeFeatures", "(", "[", "mt_feats_to_red", ".", "T", "]", ")", "mt_feats_to_red", "=", "mt_feats_to_red", "[", "0", "]", ".", "T", "#dist_all = numpy.sum(distance.squareform(distance.pdist(mt_feats_to_red.T)), axis=0)", "#m_dist_all = numpy.mean(dist_all)", "#iNonOutLiers2 = numpy.nonzero(dist_all < 3.0*m_dist_all)[0]", "#mt_feats_to_red = mt_feats_to_red[:, iNonOutLiers2]", "Labels", "=", "numpy", ".", "zeros", "(", "(", "mt_feats_to_red", ".", "shape", "[", "1", "]", ",", ")", ")", "LDAstep", "=", "1.0", "LDAstepRatio", "=", "LDAstep", "/", "st_win", "#print LDAstep, LDAstepRatio", "for", "i", "in", "range", "(", "Labels", ".", "shape", "[", "0", "]", ")", ":", "Labels", "[", "i", "]", "=", "int", "(", "i", "*", "st_win", "/", "LDAstepRatio", ")", "clf", "=", "sklearn", ".", "discriminant_analysis", ".", "LinearDiscriminantAnalysis", "(", "n_components", "=", "lda_dim", ")", "clf", ".", "fit", "(", "mt_feats_to_red", ".", "T", ",", "Labels", ")", "mt_feats_norm", "=", "(", "clf", ".", "transform", "(", "mt_feats_norm", ".", "T", ")", ")", ".", "T", "if", "n_speakers", "<=", "0", ":", "s_range", "=", "range", "(", "2", ",", "10", ")", "else", ":", "s_range", "=", "[", "n_speakers", "]", "clsAll", "=", "[", "]", "sil_all", "=", "[", "]", "centersAll", "=", "[", "]", "for", "iSpeakers", "in", "s_range", ":", "k_means", "=", "sklearn", ".", "cluster", ".", "KMeans", "(", "n_clusters", "=", "iSpeakers", ")", "k_means", ".", "fit", "(", "mt_feats_norm", ".", "T", ")", "cls", "=", "k_means", ".", "labels_", "means", "=", "k_means", ".", "cluster_centers_", "# Y = distance.squareform(distance.pdist(mt_feats_norm.T))", "clsAll", ".", "append", "(", "cls", ")", "centersAll", ".", "append", "(", "means", ")", "sil_1", "=", "[", "]", "sil_2", "=", "[", "]", "for", "c", "in", "range", "(", "iSpeakers", ")", ":", "# for each speaker (i.e. for each extracted cluster)", "clust_per_cent", "=", "numpy", ".", "nonzero", "(", "cls", "==", "c", ")", "[", "0", "]", ".", "shape", "[", "0", "]", "/", "float", "(", "len", "(", "cls", ")", ")", "if", "clust_per_cent", "<", "0.020", ":", "sil_1", ".", "append", "(", "0.0", ")", "sil_2", ".", "append", "(", "0.0", ")", "else", ":", "# get subset of feature vectors", "mt_feats_norm_temp", "=", "mt_feats_norm", "[", ":", ",", "cls", "==", "c", "]", "# compute average distance between samples", "# that belong to the cluster (a values)", "Yt", "=", "distance", ".", "pdist", "(", "mt_feats_norm_temp", ".", "T", ")", "sil_1", ".", "append", "(", "numpy", ".", "mean", "(", "Yt", ")", "*", "clust_per_cent", ")", "silBs", "=", "[", "]", "for", "c2", "in", "range", "(", "iSpeakers", ")", ":", "# compute distances from samples of other clusters", "if", "c2", "!=", "c", ":", "clust_per_cent_2", "=", "numpy", ".", "nonzero", "(", "cls", "==", "c2", ")", "[", "0", "]", ".", "shape", "[", "0", "]", "/", "float", "(", "len", "(", "cls", ")", ")", "MidTermFeaturesNormTemp2", "=", "mt_feats_norm", "[", ":", ",", "cls", "==", "c2", "]", "Yt", "=", "distance", ".", "cdist", "(", "mt_feats_norm_temp", ".", "T", ",", "MidTermFeaturesNormTemp2", ".", "T", ")", "silBs", ".", "append", "(", "numpy", ".", "mean", "(", "Yt", ")", "*", "(", "clust_per_cent", "+", "clust_per_cent_2", ")", "/", "2.0", ")", "silBs", "=", "numpy", ".", "array", "(", "silBs", ")", "# ... and keep the minimum value (i.e.", "# the distance from the \"nearest\" cluster)", "sil_2", ".", "append", "(", "min", "(", "silBs", ")", ")", "sil_1", "=", "numpy", ".", "array", "(", "sil_1", ")", "sil_2", "=", "numpy", ".", "array", "(", "sil_2", ")", "sil", "=", "[", "]", "for", "c", "in", "range", "(", "iSpeakers", ")", ":", "# for each cluster (speaker) compute silhouette", "sil", ".", "append", "(", "(", "sil_2", "[", "c", "]", "-", "sil_1", "[", "c", "]", ")", "/", "(", "max", "(", "sil_2", "[", "c", "]", ",", "sil_1", "[", "c", "]", ")", "+", "0.00001", ")", ")", "# keep the AVERAGE SILLOUETTE", "sil_all", ".", "append", "(", "numpy", ".", "mean", "(", "sil", ")", ")", "imax", "=", "numpy", ".", "argmax", "(", "sil_all", ")", "# optimal number of clusters", "nSpeakersFinal", "=", "s_range", "[", "imax", "]", "# generate the final set of cluster labels", "# (important: need to retrieve the outlier windows:", "# this is achieved by giving them the value of their", "# nearest non-outlier window)", "cls", "=", "numpy", ".", "zeros", "(", "(", "n_wins", ",", ")", ")", "for", "i", "in", "range", "(", "n_wins", ")", ":", "j", "=", "numpy", ".", "argmin", "(", "numpy", ".", "abs", "(", "i", "-", "i_non_outliers", ")", ")", "cls", "[", "i", "]", "=", "clsAll", "[", "imax", "]", "[", "j", "]", "# Post-process method 1: hmm smoothing", "for", "i", "in", "range", "(", "1", ")", ":", "# hmm training", "start_prob", ",", "transmat", ",", "means", ",", "cov", "=", "trainHMM_computeStatistics", "(", "mt_feats_norm_or", ",", "cls", ")", "hmm", "=", "hmmlearn", ".", "hmm", ".", "GaussianHMM", "(", "start_prob", ".", "shape", "[", "0", "]", ",", "\"diag\"", ")", "hmm", ".", "startprob_", "=", "start_prob", "hmm", ".", "transmat_", "=", "transmat", "hmm", ".", "means_", "=", "means", "hmm", ".", "covars_", "=", "cov", "cls", "=", "hmm", ".", "predict", "(", "mt_feats_norm_or", ".", "T", ")", "# Post-process method 2: median filtering:", "cls", "=", "scipy", ".", "signal", ".", "medfilt", "(", "cls", ",", "13", ")", "cls", "=", "scipy", ".", "signal", ".", "medfilt", "(", "cls", ",", "11", ")", "sil", "=", "sil_all", "[", "imax", "]", "class_names", "=", "[", "\"speaker{0:d}\"", ".", "format", "(", "c", ")", "for", "c", "in", "range", "(", "nSpeakersFinal", ")", "]", "# load ground-truth if available", "gt_file", "=", "filename", ".", "replace", "(", "'.wav'", ",", "'.segments'", ")", "# if groundturh exists", "if", "os", ".", "path", ".", "isfile", "(", "gt_file", ")", ":", "[", "seg_start", ",", "seg_end", ",", "seg_labs", "]", "=", "readSegmentGT", "(", "gt_file", ")", "flags_gt", ",", "class_names_gt", "=", "segs2flags", "(", "seg_start", ",", "seg_end", ",", "seg_labs", ",", "mt_step", ")", "if", "plot_res", ":", "fig", "=", "plt", ".", "figure", "(", ")", "if", "n_speakers", ">", "0", ":", "ax1", "=", "fig", ".", "add_subplot", "(", "111", ")", "else", ":", "ax1", "=", "fig", ".", "add_subplot", "(", "211", ")", "ax1", ".", "set_yticks", "(", "numpy", ".", "array", "(", "range", "(", "len", "(", "class_names", ")", ")", ")", ")", "ax1", ".", "axis", "(", "(", "0", ",", "duration", ",", "-", "1", ",", "len", "(", "class_names", ")", ")", ")", "ax1", ".", "set_yticklabels", "(", "class_names", ")", "ax1", ".", "plot", "(", "numpy", ".", "array", "(", "range", "(", "len", "(", "cls", ")", ")", ")", "*", "mt_step", "+", "mt_step", "/", "2.0", ",", "cls", ")", "if", "os", ".", "path", ".", "isfile", "(", "gt_file", ")", ":", "if", "plot_res", ":", "ax1", ".", "plot", "(", "numpy", ".", "array", "(", "range", "(", "len", "(", "flags_gt", ")", ")", ")", "*", "mt_step", "+", "mt_step", "/", "2.0", ",", "flags_gt", ",", "'r'", ")", "purity_cluster_m", ",", "purity_speaker_m", "=", "evaluateSpeakerDiarization", "(", "cls", ",", "flags_gt", ")", "print", "(", "\"{0:.1f}\\t{1:.1f}\"", ".", "format", "(", "100", "*", "purity_cluster_m", ",", "100", "*", "purity_speaker_m", ")", ")", "if", "plot_res", ":", "plt", ".", "title", "(", "\"Cluster purity: {0:.1f}% - \"", "\"Speaker purity: {1:.1f}%\"", ".", "format", "(", "100", "*", "purity_cluster_m", ",", "100", "*", "purity_speaker_m", ")", ")", "if", "plot_res", ":", "plt", ".", "xlabel", "(", "\"time (seconds)\"", ")", "#print s_range, sil_all ", "if", "n_speakers", "<=", "0", ":", "plt", ".", "subplot", "(", "212", ")", "plt", ".", "plot", "(", "s_range", ",", "sil_all", ")", "plt", ".", "xlabel", "(", "\"number of clusters\"", ")", "plt", ".", "ylabel", "(", "\"average clustering's sillouette\"", ")", "plt", ".", "show", "(", ")", "return", "cls" ]
ARGUMENTS: - filename: the name of the WAV file to be analyzed - n_speakers the number of speakers (clusters) in the recording (<=0 for unknown) - mt_size (opt) mid-term window size - mt_step (opt) mid-term window step - st_win (opt) short-term window size - lda_dim (opt) LDA dimension (0 for no LDA) - plot_res (opt) 0 for not plotting the results 1 for plottingy
[ "ARGUMENTS", ":", "-", "filename", ":", "the", "name", "of", "the", "WAV", "file", "to", "be", "analyzed", "-", "n_speakers", "the", "number", "of", "speakers", "(", "clusters", ")", "in", "the", "recording", "(", "<", "=", "0", "for", "unknown", ")", "-", "mt_size", "(", "opt", ")", "mid", "-", "term", "window", "size", "-", "mt_step", "(", "opt", ")", "mid", "-", "term", "window", "step", "-", "st_win", "(", "opt", ")", "short", "-", "term", "window", "size", "-", "lda_dim", "(", "opt", ")", "LDA", "dimension", "(", "0", "for", "no", "LDA", ")", "-", "plot_res", "(", "opt", ")", "0", "for", "not", "plotting", "the", "results", "1", "for", "plottingy" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L741-L988
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
speakerDiarizationEvaluateScript
def speakerDiarizationEvaluateScript(folder_name, ldas): ''' This function prints the cluster purity and speaker purity for each WAV file stored in a provided directory (.SEGMENT files are needed as ground-truth) ARGUMENTS: - folder_name: the full path of the folder where the WAV and SEGMENT (ground-truth) files are stored - ldas: a list of LDA dimensions (0 for no LDA) ''' types = ('*.wav', ) wavFilesList = [] for files in types: wavFilesList.extend(glob.glob(os.path.join(folder_name, files))) wavFilesList = sorted(wavFilesList) # get number of unique speakers per file (from ground-truth) N = [] for wav_file in wavFilesList: gt_file = wav_file.replace('.wav', '.segments'); if os.path.isfile(gt_file): [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file) N.append(len(list(set(seg_labs)))) else: N.append(-1) for l in ldas: print("LDA = {0:d}".format(l)) for i, wav_file in enumerate(wavFilesList): speakerDiarization(wav_file, N[i], 2.0, 0.2, 0.05, l, plot_res=False) print
python
def speakerDiarizationEvaluateScript(folder_name, ldas): ''' This function prints the cluster purity and speaker purity for each WAV file stored in a provided directory (.SEGMENT files are needed as ground-truth) ARGUMENTS: - folder_name: the full path of the folder where the WAV and SEGMENT (ground-truth) files are stored - ldas: a list of LDA dimensions (0 for no LDA) ''' types = ('*.wav', ) wavFilesList = [] for files in types: wavFilesList.extend(glob.glob(os.path.join(folder_name, files))) wavFilesList = sorted(wavFilesList) # get number of unique speakers per file (from ground-truth) N = [] for wav_file in wavFilesList: gt_file = wav_file.replace('.wav', '.segments'); if os.path.isfile(gt_file): [seg_start, seg_end, seg_labs] = readSegmentGT(gt_file) N.append(len(list(set(seg_labs)))) else: N.append(-1) for l in ldas: print("LDA = {0:d}".format(l)) for i, wav_file in enumerate(wavFilesList): speakerDiarization(wav_file, N[i], 2.0, 0.2, 0.05, l, plot_res=False) print
[ "def", "speakerDiarizationEvaluateScript", "(", "folder_name", ",", "ldas", ")", ":", "types", "=", "(", "'*.wav'", ",", ")", "wavFilesList", "=", "[", "]", "for", "files", "in", "types", ":", "wavFilesList", ".", "extend", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "folder_name", ",", "files", ")", ")", ")", "wavFilesList", "=", "sorted", "(", "wavFilesList", ")", "# get number of unique speakers per file (from ground-truth) ", "N", "=", "[", "]", "for", "wav_file", "in", "wavFilesList", ":", "gt_file", "=", "wav_file", ".", "replace", "(", "'.wav'", ",", "'.segments'", ")", "if", "os", ".", "path", ".", "isfile", "(", "gt_file", ")", ":", "[", "seg_start", ",", "seg_end", ",", "seg_labs", "]", "=", "readSegmentGT", "(", "gt_file", ")", "N", ".", "append", "(", "len", "(", "list", "(", "set", "(", "seg_labs", ")", ")", ")", ")", "else", ":", "N", ".", "append", "(", "-", "1", ")", "for", "l", "in", "ldas", ":", "print", "(", "\"LDA = {0:d}\"", ".", "format", "(", "l", ")", ")", "for", "i", ",", "wav_file", "in", "enumerate", "(", "wavFilesList", ")", ":", "speakerDiarization", "(", "wav_file", ",", "N", "[", "i", "]", ",", "2.0", ",", "0.2", ",", "0.05", ",", "l", ",", "plot_res", "=", "False", ")", "print" ]
This function prints the cluster purity and speaker purity for each WAV file stored in a provided directory (.SEGMENT files are needed as ground-truth) ARGUMENTS: - folder_name: the full path of the folder where the WAV and SEGMENT (ground-truth) files are stored - ldas: a list of LDA dimensions (0 for no LDA)
[ "This", "function", "prints", "the", "cluster", "purity", "and", "speaker", "purity", "for", "each", "WAV", "file", "stored", "in", "a", "provided", "directory", "(", ".", "SEGMENT", "files", "are", "needed", "as", "ground", "-", "truth", ")", "ARGUMENTS", ":", "-", "folder_name", ":", "the", "full", "path", "of", "the", "folder", "where", "the", "WAV", "and", "SEGMENT", "(", "ground", "-", "truth", ")", "files", "are", "stored", "-", "ldas", ":", "a", "list", "of", "LDA", "dimensions", "(", "0", "for", "no", "LDA", ")" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L990-L1021
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioSegmentation.py
musicThumbnailing
def musicThumbnailing(x, fs, short_term_size=1.0, short_term_step=0.5, thumb_size=10.0, limit_1 = 0, limit_2 = 1): ''' This function detects instances of the most representative part of a music recording, also called "music thumbnails". A technique similar to the one proposed in [1], however a wider set of audio features is used instead of chroma features. In particular the following steps are followed: - Extract short-term audio features. Typical short-term window size: 1 second - Compute the self-silimarity matrix, i.e. all pairwise similarities between feature vectors - Apply a diagonal mask is as a moving average filter on the values of the self-similarty matrix. The size of the mask is equal to the desirable thumbnail length. - Find the position of the maximum value of the new (filtered) self-similarity matrix. The audio segments that correspond to the diagonial around that position are the selected thumbnails ARGUMENTS: - x: input signal - fs: sampling frequency - short_term_size: window size (in seconds) - short_term_step: window step (in seconds) - thumb_size: desider thumbnail size (in seconds) RETURNS: - A1: beginning of 1st thumbnail (in seconds) - A2: ending of 1st thumbnail (in seconds) - B1: beginning of 2nd thumbnail (in seconds) - B2: ending of 2nd thumbnail (in seconds) USAGE EXAMPLE: import audioFeatureExtraction as aF [fs, x] = basicIO.readAudioFile(input_file) [A1, A2, B1, B2] = musicThumbnailing(x, fs) [1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing of popular music using chroma-based representations. Multimedia, IEEE Transactions on, 7(1), 96-104. ''' x = audioBasicIO.stereo2mono(x); # feature extraction: st_feats, _ = aF.stFeatureExtraction(x, fs, fs * short_term_size, fs * short_term_step) # self-similarity matrix S = selfSimilarityMatrix(st_feats) # moving filter: M = int(round(thumb_size / short_term_step)) B = numpy.eye(M,M) S = scipy.signal.convolve2d(S, B, 'valid') # post-processing (remove main diagonal elements) min_sm = numpy.min(S) for i in range(S.shape[0]): for j in range(S.shape[1]): if abs(i-j) < 5.0 / short_term_step or i > j: S[i,j] = min_sm; # find max position: S[0:int(limit_1 * S.shape[0]), :] = min_sm S[:, 0:int(limit_1 * S.shape[0])] = min_sm S[int(limit_2 * S.shape[0])::, :] = min_sm S[:, int(limit_2 * S.shape[0])::] = min_sm maxVal = numpy.max(S) [I, J] = numpy.unravel_index(S.argmax(), S.shape) #plt.imshow(S) #plt.show() # expand: i1 = I i2 = I j1 = J j2 = J while i2-i1<M: if i1 <=0 or j1<=0 or i2 >= S.shape[0]-2 or j2 >= S.shape[1]-2: break if S[i1-1, j1-1] > S[i2 + 1, j2 + 1]: i1 -= 1 j1 -= 1 else: i2 += 1 j2 += 1 return short_term_step * i1, short_term_step * i2, \ short_term_step * j1, short_term_step * j2, S
python
def musicThumbnailing(x, fs, short_term_size=1.0, short_term_step=0.5, thumb_size=10.0, limit_1 = 0, limit_2 = 1): ''' This function detects instances of the most representative part of a music recording, also called "music thumbnails". A technique similar to the one proposed in [1], however a wider set of audio features is used instead of chroma features. In particular the following steps are followed: - Extract short-term audio features. Typical short-term window size: 1 second - Compute the self-silimarity matrix, i.e. all pairwise similarities between feature vectors - Apply a diagonal mask is as a moving average filter on the values of the self-similarty matrix. The size of the mask is equal to the desirable thumbnail length. - Find the position of the maximum value of the new (filtered) self-similarity matrix. The audio segments that correspond to the diagonial around that position are the selected thumbnails ARGUMENTS: - x: input signal - fs: sampling frequency - short_term_size: window size (in seconds) - short_term_step: window step (in seconds) - thumb_size: desider thumbnail size (in seconds) RETURNS: - A1: beginning of 1st thumbnail (in seconds) - A2: ending of 1st thumbnail (in seconds) - B1: beginning of 2nd thumbnail (in seconds) - B2: ending of 2nd thumbnail (in seconds) USAGE EXAMPLE: import audioFeatureExtraction as aF [fs, x] = basicIO.readAudioFile(input_file) [A1, A2, B1, B2] = musicThumbnailing(x, fs) [1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing of popular music using chroma-based representations. Multimedia, IEEE Transactions on, 7(1), 96-104. ''' x = audioBasicIO.stereo2mono(x); # feature extraction: st_feats, _ = aF.stFeatureExtraction(x, fs, fs * short_term_size, fs * short_term_step) # self-similarity matrix S = selfSimilarityMatrix(st_feats) # moving filter: M = int(round(thumb_size / short_term_step)) B = numpy.eye(M,M) S = scipy.signal.convolve2d(S, B, 'valid') # post-processing (remove main diagonal elements) min_sm = numpy.min(S) for i in range(S.shape[0]): for j in range(S.shape[1]): if abs(i-j) < 5.0 / short_term_step or i > j: S[i,j] = min_sm; # find max position: S[0:int(limit_1 * S.shape[0]), :] = min_sm S[:, 0:int(limit_1 * S.shape[0])] = min_sm S[int(limit_2 * S.shape[0])::, :] = min_sm S[:, int(limit_2 * S.shape[0])::] = min_sm maxVal = numpy.max(S) [I, J] = numpy.unravel_index(S.argmax(), S.shape) #plt.imshow(S) #plt.show() # expand: i1 = I i2 = I j1 = J j2 = J while i2-i1<M: if i1 <=0 or j1<=0 or i2 >= S.shape[0]-2 or j2 >= S.shape[1]-2: break if S[i1-1, j1-1] > S[i2 + 1, j2 + 1]: i1 -= 1 j1 -= 1 else: i2 += 1 j2 += 1 return short_term_step * i1, short_term_step * i2, \ short_term_step * j1, short_term_step * j2, S
[ "def", "musicThumbnailing", "(", "x", ",", "fs", ",", "short_term_size", "=", "1.0", ",", "short_term_step", "=", "0.5", ",", "thumb_size", "=", "10.0", ",", "limit_1", "=", "0", ",", "limit_2", "=", "1", ")", ":", "x", "=", "audioBasicIO", ".", "stereo2mono", "(", "x", ")", "# feature extraction:", "st_feats", ",", "_", "=", "aF", ".", "stFeatureExtraction", "(", "x", ",", "fs", ",", "fs", "*", "short_term_size", ",", "fs", "*", "short_term_step", ")", "# self-similarity matrix", "S", "=", "selfSimilarityMatrix", "(", "st_feats", ")", "# moving filter:", "M", "=", "int", "(", "round", "(", "thumb_size", "/", "short_term_step", ")", ")", "B", "=", "numpy", ".", "eye", "(", "M", ",", "M", ")", "S", "=", "scipy", ".", "signal", ".", "convolve2d", "(", "S", ",", "B", ",", "'valid'", ")", "# post-processing (remove main diagonal elements)", "min_sm", "=", "numpy", ".", "min", "(", "S", ")", "for", "i", "in", "range", "(", "S", ".", "shape", "[", "0", "]", ")", ":", "for", "j", "in", "range", "(", "S", ".", "shape", "[", "1", "]", ")", ":", "if", "abs", "(", "i", "-", "j", ")", "<", "5.0", "/", "short_term_step", "or", "i", ">", "j", ":", "S", "[", "i", ",", "j", "]", "=", "min_sm", "# find max position:", "S", "[", "0", ":", "int", "(", "limit_1", "*", "S", ".", "shape", "[", "0", "]", ")", ",", ":", "]", "=", "min_sm", "S", "[", ":", ",", "0", ":", "int", "(", "limit_1", "*", "S", ".", "shape", "[", "0", "]", ")", "]", "=", "min_sm", "S", "[", "int", "(", "limit_2", "*", "S", ".", "shape", "[", "0", "]", ")", ":", ":", ",", ":", "]", "=", "min_sm", "S", "[", ":", ",", "int", "(", "limit_2", "*", "S", ".", "shape", "[", "0", "]", ")", ":", ":", "]", "=", "min_sm", "maxVal", "=", "numpy", ".", "max", "(", "S", ")", "[", "I", ",", "J", "]", "=", "numpy", ".", "unravel_index", "(", "S", ".", "argmax", "(", ")", ",", "S", ".", "shape", ")", "#plt.imshow(S)", "#plt.show()", "# expand:", "i1", "=", "I", "i2", "=", "I", "j1", "=", "J", "j2", "=", "J", "while", "i2", "-", "i1", "<", "M", ":", "if", "i1", "<=", "0", "or", "j1", "<=", "0", "or", "i2", ">=", "S", ".", "shape", "[", "0", "]", "-", "2", "or", "j2", ">=", "S", ".", "shape", "[", "1", "]", "-", "2", ":", "break", "if", "S", "[", "i1", "-", "1", ",", "j1", "-", "1", "]", ">", "S", "[", "i2", "+", "1", ",", "j2", "+", "1", "]", ":", "i1", "-=", "1", "j1", "-=", "1", "else", ":", "i2", "+=", "1", "j2", "+=", "1", "return", "short_term_step", "*", "i1", ",", "short_term_step", "*", "i2", ",", "short_term_step", "*", "j1", ",", "short_term_step", "*", "j2", ",", "S" ]
This function detects instances of the most representative part of a music recording, also called "music thumbnails". A technique similar to the one proposed in [1], however a wider set of audio features is used instead of chroma features. In particular the following steps are followed: - Extract short-term audio features. Typical short-term window size: 1 second - Compute the self-silimarity matrix, i.e. all pairwise similarities between feature vectors - Apply a diagonal mask is as a moving average filter on the values of the self-similarty matrix. The size of the mask is equal to the desirable thumbnail length. - Find the position of the maximum value of the new (filtered) self-similarity matrix. The audio segments that correspond to the diagonial around that position are the selected thumbnails ARGUMENTS: - x: input signal - fs: sampling frequency - short_term_size: window size (in seconds) - short_term_step: window step (in seconds) - thumb_size: desider thumbnail size (in seconds) RETURNS: - A1: beginning of 1st thumbnail (in seconds) - A2: ending of 1st thumbnail (in seconds) - B1: beginning of 2nd thumbnail (in seconds) - B2: ending of 2nd thumbnail (in seconds) USAGE EXAMPLE: import audioFeatureExtraction as aF [fs, x] = basicIO.readAudioFile(input_file) [A1, A2, B1, B2] = musicThumbnailing(x, fs) [1] Bartsch, M. A., & Wakefield, G. H. (2005). Audio thumbnailing of popular music using chroma-based representations. Multimedia, IEEE Transactions on, 7(1), 96-104.
[ "This", "function", "detects", "instances", "of", "the", "most", "representative", "part", "of", "a", "music", "recording", "also", "called", "music", "thumbnails", ".", "A", "technique", "similar", "to", "the", "one", "proposed", "in", "[", "1", "]", "however", "a", "wider", "set", "of", "audio", "features", "is", "used", "instead", "of", "chroma", "features", ".", "In", "particular", "the", "following", "steps", "are", "followed", ":", "-", "Extract", "short", "-", "term", "audio", "features", ".", "Typical", "short", "-", "term", "window", "size", ":", "1", "second", "-", "Compute", "the", "self", "-", "silimarity", "matrix", "i", ".", "e", ".", "all", "pairwise", "similarities", "between", "feature", "vectors", "-", "Apply", "a", "diagonal", "mask", "is", "as", "a", "moving", "average", "filter", "on", "the", "values", "of", "the", "self", "-", "similarty", "matrix", ".", "The", "size", "of", "the", "mask", "is", "equal", "to", "the", "desirable", "thumbnail", "length", ".", "-", "Find", "the", "position", "of", "the", "maximum", "value", "of", "the", "new", "(", "filtered", ")", "self", "-", "similarity", "matrix", ".", "The", "audio", "segments", "that", "correspond", "to", "the", "diagonial", "around", "that", "position", "are", "the", "selected", "thumbnails" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioSegmentation.py#L1023-L1109
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioVisualization.py
generateColorMap
def generateColorMap(): ''' This function generates a 256 jet colormap of HTML-like hex string colors (e.g. FF88AA) ''' Map = cm.jet(np.arange(256)) stringColors = [] for i in range(Map.shape[0]): rgb = (int(255*Map[i][0]), int(255*Map[i][1]), int(255*Map[i][2])) if (sys.version_info > (3, 0)): stringColors.append((struct.pack('BBB', *rgb).hex())) # python 3 else: stringColors.append( struct.pack('BBB', *rgb).encode('hex')) # python2 return stringColors
python
def generateColorMap(): ''' This function generates a 256 jet colormap of HTML-like hex string colors (e.g. FF88AA) ''' Map = cm.jet(np.arange(256)) stringColors = [] for i in range(Map.shape[0]): rgb = (int(255*Map[i][0]), int(255*Map[i][1]), int(255*Map[i][2])) if (sys.version_info > (3, 0)): stringColors.append((struct.pack('BBB', *rgb).hex())) # python 3 else: stringColors.append( struct.pack('BBB', *rgb).encode('hex')) # python2 return stringColors
[ "def", "generateColorMap", "(", ")", ":", "Map", "=", "cm", ".", "jet", "(", "np", ".", "arange", "(", "256", ")", ")", "stringColors", "=", "[", "]", "for", "i", "in", "range", "(", "Map", ".", "shape", "[", "0", "]", ")", ":", "rgb", "=", "(", "int", "(", "255", "*", "Map", "[", "i", "]", "[", "0", "]", ")", ",", "int", "(", "255", "*", "Map", "[", "i", "]", "[", "1", "]", ")", ",", "int", "(", "255", "*", "Map", "[", "i", "]", "[", "2", "]", ")", ")", "if", "(", "sys", ".", "version_info", ">", "(", "3", ",", "0", ")", ")", ":", "stringColors", ".", "append", "(", "(", "struct", ".", "pack", "(", "'BBB'", ",", "*", "rgb", ")", ".", "hex", "(", ")", ")", ")", "# python 3", "else", ":", "stringColors", ".", "append", "(", "struct", ".", "pack", "(", "'BBB'", ",", "*", "rgb", ")", ".", "encode", "(", "'hex'", ")", ")", "# python2", "return", "stringColors" ]
This function generates a 256 jet colormap of HTML-like hex string colors (e.g. FF88AA)
[ "This", "function", "generates", "a", "256", "jet", "colormap", "of", "HTML", "-", "like", "hex", "string", "colors", "(", "e", ".", "g", ".", "FF88AA", ")" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioVisualization.py#L14-L29
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioVisualization.py
levenshtein
def levenshtein(str1, s2): ''' Distance between two strings ''' N1 = len(str1) N2 = len(s2) stringRange = [range(N1 + 1)] * (N2 + 1) for i in range(N2 + 1): stringRange[i] = range(i,i + N1 + 1) for i in range(0,N2): for j in range(0,N1): if str1[j] == s2[i]: stringRange[i+1][j+1] = min(stringRange[i+1][j] + 1, stringRange[i][j+1] + 1, stringRange[i][j]) else: stringRange[i+1][j+1] = min(stringRange[i+1][j] + 1, stringRange[i][j+1] + 1, stringRange[i][j] + 1) return stringRange[N2][N1]
python
def levenshtein(str1, s2): ''' Distance between two strings ''' N1 = len(str1) N2 = len(s2) stringRange = [range(N1 + 1)] * (N2 + 1) for i in range(N2 + 1): stringRange[i] = range(i,i + N1 + 1) for i in range(0,N2): for j in range(0,N1): if str1[j] == s2[i]: stringRange[i+1][j+1] = min(stringRange[i+1][j] + 1, stringRange[i][j+1] + 1, stringRange[i][j]) else: stringRange[i+1][j+1] = min(stringRange[i+1][j] + 1, stringRange[i][j+1] + 1, stringRange[i][j] + 1) return stringRange[N2][N1]
[ "def", "levenshtein", "(", "str1", ",", "s2", ")", ":", "N1", "=", "len", "(", "str1", ")", "N2", "=", "len", "(", "s2", ")", "stringRange", "=", "[", "range", "(", "N1", "+", "1", ")", "]", "*", "(", "N2", "+", "1", ")", "for", "i", "in", "range", "(", "N2", "+", "1", ")", ":", "stringRange", "[", "i", "]", "=", "range", "(", "i", ",", "i", "+", "N1", "+", "1", ")", "for", "i", "in", "range", "(", "0", ",", "N2", ")", ":", "for", "j", "in", "range", "(", "0", ",", "N1", ")", ":", "if", "str1", "[", "j", "]", "==", "s2", "[", "i", "]", ":", "stringRange", "[", "i", "+", "1", "]", "[", "j", "+", "1", "]", "=", "min", "(", "stringRange", "[", "i", "+", "1", "]", "[", "j", "]", "+", "1", ",", "stringRange", "[", "i", "]", "[", "j", "+", "1", "]", "+", "1", ",", "stringRange", "[", "i", "]", "[", "j", "]", ")", "else", ":", "stringRange", "[", "i", "+", "1", "]", "[", "j", "+", "1", "]", "=", "min", "(", "stringRange", "[", "i", "+", "1", "]", "[", "j", "]", "+", "1", ",", "stringRange", "[", "i", "]", "[", "j", "+", "1", "]", "+", "1", ",", "stringRange", "[", "i", "]", "[", "j", "]", "+", "1", ")", "return", "stringRange", "[", "N2", "]", "[", "N1", "]" ]
Distance between two strings
[ "Distance", "between", "two", "strings" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioVisualization.py#L32-L52
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioVisualization.py
text_list_to_colors
def text_list_to_colors(names): ''' Generates a list of colors based on a list of names (strings). Similar strings correspond to similar colors. ''' # STEP A: compute strings distance between all combnations of strings Dnames = np.zeros( (len(names), len(names)) ) for i in range(len(names)): for j in range(len(names)): Dnames[i,j] = 1 - 2.0 * levenshtein(names[i], names[j]) / float(len(names[i]+names[j])) # STEP B: pca dimanesionality reduction to a single-dimension (from the distance space) pca = sklearn.decomposition.PCA(n_components = 1) pca.fit(Dnames) # STEP C: mapping of 1-dimensional values to colors in a jet-colormap textToColor = pca.transform(Dnames) textToColor = 255 * (textToColor - textToColor.min()) / (textToColor.max() - textToColor.min()) textmaps = generateColorMap(); colors = [textmaps[int(c)] for c in textToColor] return colors
python
def text_list_to_colors(names): ''' Generates a list of colors based on a list of names (strings). Similar strings correspond to similar colors. ''' # STEP A: compute strings distance between all combnations of strings Dnames = np.zeros( (len(names), len(names)) ) for i in range(len(names)): for j in range(len(names)): Dnames[i,j] = 1 - 2.0 * levenshtein(names[i], names[j]) / float(len(names[i]+names[j])) # STEP B: pca dimanesionality reduction to a single-dimension (from the distance space) pca = sklearn.decomposition.PCA(n_components = 1) pca.fit(Dnames) # STEP C: mapping of 1-dimensional values to colors in a jet-colormap textToColor = pca.transform(Dnames) textToColor = 255 * (textToColor - textToColor.min()) / (textToColor.max() - textToColor.min()) textmaps = generateColorMap(); colors = [textmaps[int(c)] for c in textToColor] return colors
[ "def", "text_list_to_colors", "(", "names", ")", ":", "# STEP A: compute strings distance between all combnations of strings", "Dnames", "=", "np", ".", "zeros", "(", "(", "len", "(", "names", ")", ",", "len", "(", "names", ")", ")", ")", "for", "i", "in", "range", "(", "len", "(", "names", ")", ")", ":", "for", "j", "in", "range", "(", "len", "(", "names", ")", ")", ":", "Dnames", "[", "i", ",", "j", "]", "=", "1", "-", "2.0", "*", "levenshtein", "(", "names", "[", "i", "]", ",", "names", "[", "j", "]", ")", "/", "float", "(", "len", "(", "names", "[", "i", "]", "+", "names", "[", "j", "]", ")", ")", "# STEP B: pca dimanesionality reduction to a single-dimension (from the distance space)", "pca", "=", "sklearn", ".", "decomposition", ".", "PCA", "(", "n_components", "=", "1", ")", "pca", ".", "fit", "(", "Dnames", ")", "# STEP C: mapping of 1-dimensional values to colors in a jet-colormap", "textToColor", "=", "pca", ".", "transform", "(", "Dnames", ")", "textToColor", "=", "255", "*", "(", "textToColor", "-", "textToColor", ".", "min", "(", ")", ")", "/", "(", "textToColor", ".", "max", "(", ")", "-", "textToColor", ".", "min", "(", ")", ")", "textmaps", "=", "generateColorMap", "(", ")", "colors", "=", "[", "textmaps", "[", "int", "(", "c", ")", "]", "for", "c", "in", "textToColor", "]", "return", "colors" ]
Generates a list of colors based on a list of names (strings). Similar strings correspond to similar colors.
[ "Generates", "a", "list", "of", "colors", "based", "on", "a", "list", "of", "names", "(", "strings", ")", ".", "Similar", "strings", "correspond", "to", "similar", "colors", "." ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioVisualization.py#L55-L74
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioVisualization.py
text_list_to_colors_simple
def text_list_to_colors_simple(names): ''' Generates a list of colors based on a list of names (strings). Similar strings correspond to similar colors. ''' uNames = list(set(names)) uNames.sort() textToColor = [ uNames.index(n) for n in names ] textToColor = np.array(textToColor) textToColor = 255 * (textToColor - textToColor.min()) / \ (textToColor.max() - textToColor.min()) textmaps = generateColorMap(); colors = [textmaps[int(c)] for c in textToColor] return colors
python
def text_list_to_colors_simple(names): ''' Generates a list of colors based on a list of names (strings). Similar strings correspond to similar colors. ''' uNames = list(set(names)) uNames.sort() textToColor = [ uNames.index(n) for n in names ] textToColor = np.array(textToColor) textToColor = 255 * (textToColor - textToColor.min()) / \ (textToColor.max() - textToColor.min()) textmaps = generateColorMap(); colors = [textmaps[int(c)] for c in textToColor] return colors
[ "def", "text_list_to_colors_simple", "(", "names", ")", ":", "uNames", "=", "list", "(", "set", "(", "names", ")", ")", "uNames", ".", "sort", "(", ")", "textToColor", "=", "[", "uNames", ".", "index", "(", "n", ")", "for", "n", "in", "names", "]", "textToColor", "=", "np", ".", "array", "(", "textToColor", ")", "textToColor", "=", "255", "*", "(", "textToColor", "-", "textToColor", ".", "min", "(", ")", ")", "/", "(", "textToColor", ".", "max", "(", ")", "-", "textToColor", ".", "min", "(", ")", ")", "textmaps", "=", "generateColorMap", "(", ")", "colors", "=", "[", "textmaps", "[", "int", "(", "c", ")", "]", "for", "c", "in", "textToColor", "]", "return", "colors" ]
Generates a list of colors based on a list of names (strings). Similar strings correspond to similar colors.
[ "Generates", "a", "list", "of", "colors", "based", "on", "a", "list", "of", "names", "(", "strings", ")", ".", "Similar", "strings", "correspond", "to", "similar", "colors", "." ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioVisualization.py#L77-L89
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioVisualization.py
chordialDiagram
def chordialDiagram(fileStr, SM, Threshold, names, namesCategories): ''' Generates a d3js chordial diagram that illustrates similarites ''' colors = text_list_to_colors_simple(namesCategories) SM2 = SM.copy() SM2 = (SM2 + SM2.T) / 2.0 for i in range(SM2.shape[0]): M = Threshold # a = np.sort(SM2[i,:])[::-1] # M = np.mean(a[0:int(SM2.shape[1]/3+1)]) SM2[i, SM2[i, :] < M] = 0; dirChordial = fileStr + "_Chordial" if not os.path.isdir(dirChordial): os.mkdir(dirChordial) jsonPath = dirChordial + os.sep + "matrix.json" namesPath = dirChordial + os.sep + "Names.csv" jsonSMMatrix = simplejson.dumps(SM2.tolist()) f = open(jsonPath,'w'); f.write(jsonSMMatrix); f.close() f = open(namesPath,'w'); f.write("name,color\n"); for i, n in enumerate(names): f.write("{0:s},{1:s}\n".format(n,"#"+str(colors[i]))) f.close() shutil.copyfile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "similarities.html"), dirChordial+os.sep+"similarities.html") shutil.copyfile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "style.css"), dirChordial+os.sep+"style.css")
python
def chordialDiagram(fileStr, SM, Threshold, names, namesCategories): ''' Generates a d3js chordial diagram that illustrates similarites ''' colors = text_list_to_colors_simple(namesCategories) SM2 = SM.copy() SM2 = (SM2 + SM2.T) / 2.0 for i in range(SM2.shape[0]): M = Threshold # a = np.sort(SM2[i,:])[::-1] # M = np.mean(a[0:int(SM2.shape[1]/3+1)]) SM2[i, SM2[i, :] < M] = 0; dirChordial = fileStr + "_Chordial" if not os.path.isdir(dirChordial): os.mkdir(dirChordial) jsonPath = dirChordial + os.sep + "matrix.json" namesPath = dirChordial + os.sep + "Names.csv" jsonSMMatrix = simplejson.dumps(SM2.tolist()) f = open(jsonPath,'w'); f.write(jsonSMMatrix); f.close() f = open(namesPath,'w'); f.write("name,color\n"); for i, n in enumerate(names): f.write("{0:s},{1:s}\n".format(n,"#"+str(colors[i]))) f.close() shutil.copyfile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "similarities.html"), dirChordial+os.sep+"similarities.html") shutil.copyfile(os.path.join(os.path.dirname(os.path.realpath(__file__)), "data", "style.css"), dirChordial+os.sep+"style.css")
[ "def", "chordialDiagram", "(", "fileStr", ",", "SM", ",", "Threshold", ",", "names", ",", "namesCategories", ")", ":", "colors", "=", "text_list_to_colors_simple", "(", "namesCategories", ")", "SM2", "=", "SM", ".", "copy", "(", ")", "SM2", "=", "(", "SM2", "+", "SM2", ".", "T", ")", "/", "2.0", "for", "i", "in", "range", "(", "SM2", ".", "shape", "[", "0", "]", ")", ":", "M", "=", "Threshold", "# a = np.sort(SM2[i,:])[::-1]", "# M = np.mean(a[0:int(SM2.shape[1]/3+1)])", "SM2", "[", "i", ",", "SM2", "[", "i", ",", ":", "]", "<", "M", "]", "=", "0", "dirChordial", "=", "fileStr", "+", "\"_Chordial\"", "if", "not", "os", ".", "path", ".", "isdir", "(", "dirChordial", ")", ":", "os", ".", "mkdir", "(", "dirChordial", ")", "jsonPath", "=", "dirChordial", "+", "os", ".", "sep", "+", "\"matrix.json\"", "namesPath", "=", "dirChordial", "+", "os", ".", "sep", "+", "\"Names.csv\"", "jsonSMMatrix", "=", "simplejson", ".", "dumps", "(", "SM2", ".", "tolist", "(", ")", ")", "f", "=", "open", "(", "jsonPath", ",", "'w'", ")", "f", ".", "write", "(", "jsonSMMatrix", ")", "f", ".", "close", "(", ")", "f", "=", "open", "(", "namesPath", ",", "'w'", ")", "f", ".", "write", "(", "\"name,color\\n\"", ")", "for", "i", ",", "n", "in", "enumerate", "(", "names", ")", ":", "f", ".", "write", "(", "\"{0:s},{1:s}\\n\"", ".", "format", "(", "n", ",", "\"#\"", "+", "str", "(", "colors", "[", "i", "]", ")", ")", ")", "f", ".", "close", "(", ")", "shutil", ".", "copyfile", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "\"data\"", ",", "\"similarities.html\"", ")", ",", "dirChordial", "+", "os", ".", "sep", "+", "\"similarities.html\"", ")", "shutil", ".", "copyfile", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", ",", "\"data\"", ",", "\"style.css\"", ")", ",", "dirChordial", "+", "os", ".", "sep", "+", "\"style.css\"", ")" ]
Generates a d3js chordial diagram that illustrates similarites
[ "Generates", "a", "d3js", "chordial", "diagram", "that", "illustrates", "similarites" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioVisualization.py#L92-L123
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioVisualization.py
visualizeFeaturesFolder
def visualizeFeaturesFolder(folder, dimReductionMethod, priorKnowledge = "none"): ''' This function generates a chordial visualization for the recordings of the provided path. ARGUMENTS: - folder: path of the folder that contains the WAV files to be processed - dimReductionMethod: method used to reduce the dimension of the initial feature space before computing the similarity. - priorKnowledge: if this is set equal to "artist" ''' if dimReductionMethod=="pca": allMtFeatures, wavFilesList, _ = aF.dirWavFeatureExtraction(folder, 30.0, 30.0, 0.050, 0.050, compute_beat = True) if allMtFeatures.shape[0]==0: print("Error: No data found! Check input folder") return namesCategoryToVisualize = [ntpath.basename(w).replace('.wav','').split(" --- ")[0] for w in wavFilesList]; namesToVisualize = [ntpath.basename(w).replace('.wav','') for w in wavFilesList]; (F, MEAN, STD) = aT.normalizeFeatures([allMtFeatures]) F = np.concatenate(F) # check that the new PCA dimension is at most equal to the number of samples K1 = 2 K2 = 10 if K1 > F.shape[0]: K1 = F.shape[0] if K2 > F.shape[0]: K2 = F.shape[0] pca1 = sklearn.decomposition.PCA(n_components = K1) pca1.fit(F) pca2 = sklearn.decomposition.PCA(n_components = K2) pca2.fit(F) finalDims = pca1.transform(F) finalDims2 = pca2.transform(F) else: allMtFeatures, Ys, wavFilesList = aF.dirWavFeatureExtractionNoAveraging(folder, 20.0, 5.0, 0.040, 0.040) # long-term statistics cannot be applied in this context (LDA needs mid-term features) if allMtFeatures.shape[0]==0: print("Error: No data found! Check input folder") return namesCategoryToVisualize = [ntpath.basename(w).replace('.wav','').split(" --- ")[0] for w in wavFilesList]; namesToVisualize = [ntpath.basename(w).replace('.wav','') for w in wavFilesList]; ldaLabels = Ys if priorKnowledge=="artist": uNamesCategoryToVisualize = list(set(namesCategoryToVisualize)) YsNew = np.zeros( Ys.shape ) for i, uname in enumerate(uNamesCategoryToVisualize): # for each unique artist name: indicesUCategories = [j for j, x in enumerate(namesCategoryToVisualize) if x == uname] for j in indicesUCategories: indices = np.nonzero(Ys==j) YsNew[indices] = i ldaLabels = YsNew (F, MEAN, STD) = aT.normalizeFeatures([allMtFeatures]) F = np.array(F[0]) clf = sklearn.discriminant_analysis.LinearDiscriminantAnalysis(n_components=10) clf.fit(F, ldaLabels) reducedDims = clf.transform(F) pca = sklearn.decomposition.PCA(n_components = 2) pca.fit(reducedDims) reducedDims = pca.transform(reducedDims) # TODO: CHECK THIS ... SHOULD LDA USED IN SEMI-SUPERVISED ONLY???? uLabels = np.sort(np.unique((Ys))) # uLabels must have as many labels as the number of wavFilesList elements reducedDimsAvg = np.zeros( (uLabels.shape[0], reducedDims.shape[1] ) ) finalDims = np.zeros( (uLabels.shape[0], 2) ) for i, u in enumerate(uLabels): indices = [j for j, x in enumerate(Ys) if x == u] f = reducedDims[indices, :] finalDims[i, :] = f.mean(axis=0) finalDims2 = reducedDims for i in range(finalDims.shape[0]): plt.text(finalDims[i,0], finalDims[i,1], ntpath.basename(wavFilesList[i].replace('.wav','')), horizontalalignment='center', verticalalignment='center', fontsize=10) plt.plot(finalDims[i,0], finalDims[i,1], '*r') plt.xlim([1.2*finalDims[:,0].min(), 1.2*finalDims[:,0].max()]) plt.ylim([1.2*finalDims[:,1].min(), 1.2*finalDims[:,1].max()]) plt.show() SM = 1.0 - distance.squareform(distance.pdist(finalDims2, 'cosine')) for i in range(SM.shape[0]): SM[i,i] = 0.0; chordialDiagram("visualization", SM, 0.50, namesToVisualize, namesCategoryToVisualize) SM = 1.0 - distance.squareform(distance.pdist(F, 'cosine')) for i in range(SM.shape[0]): SM[i,i] = 0.0; chordialDiagram("visualizationInitial", SM, 0.50, namesToVisualize, namesCategoryToVisualize) # plot super-categories (i.e. artistname uNamesCategoryToVisualize = sort(list(set(namesCategoryToVisualize))) finalDimsGroup = np.zeros( (len(uNamesCategoryToVisualize), finalDims2.shape[1] ) ) for i, uname in enumerate(uNamesCategoryToVisualize): indices = [j for j, x in enumerate(namesCategoryToVisualize) if x == uname] f = finalDims2[indices, :] finalDimsGroup[i, :] = f.mean(axis=0) SMgroup = 1.0 - distance.squareform(distance.pdist(finalDimsGroup, 'cosine')) for i in range(SMgroup.shape[0]): SMgroup[i,i] = 0.0; chordialDiagram("visualizationGroup", SMgroup, 0.50, uNamesCategoryToVisualize, uNamesCategoryToVisualize)
python
def visualizeFeaturesFolder(folder, dimReductionMethod, priorKnowledge = "none"): ''' This function generates a chordial visualization for the recordings of the provided path. ARGUMENTS: - folder: path of the folder that contains the WAV files to be processed - dimReductionMethod: method used to reduce the dimension of the initial feature space before computing the similarity. - priorKnowledge: if this is set equal to "artist" ''' if dimReductionMethod=="pca": allMtFeatures, wavFilesList, _ = aF.dirWavFeatureExtraction(folder, 30.0, 30.0, 0.050, 0.050, compute_beat = True) if allMtFeatures.shape[0]==0: print("Error: No data found! Check input folder") return namesCategoryToVisualize = [ntpath.basename(w).replace('.wav','').split(" --- ")[0] for w in wavFilesList]; namesToVisualize = [ntpath.basename(w).replace('.wav','') for w in wavFilesList]; (F, MEAN, STD) = aT.normalizeFeatures([allMtFeatures]) F = np.concatenate(F) # check that the new PCA dimension is at most equal to the number of samples K1 = 2 K2 = 10 if K1 > F.shape[0]: K1 = F.shape[0] if K2 > F.shape[0]: K2 = F.shape[0] pca1 = sklearn.decomposition.PCA(n_components = K1) pca1.fit(F) pca2 = sklearn.decomposition.PCA(n_components = K2) pca2.fit(F) finalDims = pca1.transform(F) finalDims2 = pca2.transform(F) else: allMtFeatures, Ys, wavFilesList = aF.dirWavFeatureExtractionNoAveraging(folder, 20.0, 5.0, 0.040, 0.040) # long-term statistics cannot be applied in this context (LDA needs mid-term features) if allMtFeatures.shape[0]==0: print("Error: No data found! Check input folder") return namesCategoryToVisualize = [ntpath.basename(w).replace('.wav','').split(" --- ")[0] for w in wavFilesList]; namesToVisualize = [ntpath.basename(w).replace('.wav','') for w in wavFilesList]; ldaLabels = Ys if priorKnowledge=="artist": uNamesCategoryToVisualize = list(set(namesCategoryToVisualize)) YsNew = np.zeros( Ys.shape ) for i, uname in enumerate(uNamesCategoryToVisualize): # for each unique artist name: indicesUCategories = [j for j, x in enumerate(namesCategoryToVisualize) if x == uname] for j in indicesUCategories: indices = np.nonzero(Ys==j) YsNew[indices] = i ldaLabels = YsNew (F, MEAN, STD) = aT.normalizeFeatures([allMtFeatures]) F = np.array(F[0]) clf = sklearn.discriminant_analysis.LinearDiscriminantAnalysis(n_components=10) clf.fit(F, ldaLabels) reducedDims = clf.transform(F) pca = sklearn.decomposition.PCA(n_components = 2) pca.fit(reducedDims) reducedDims = pca.transform(reducedDims) # TODO: CHECK THIS ... SHOULD LDA USED IN SEMI-SUPERVISED ONLY???? uLabels = np.sort(np.unique((Ys))) # uLabels must have as many labels as the number of wavFilesList elements reducedDimsAvg = np.zeros( (uLabels.shape[0], reducedDims.shape[1] ) ) finalDims = np.zeros( (uLabels.shape[0], 2) ) for i, u in enumerate(uLabels): indices = [j for j, x in enumerate(Ys) if x == u] f = reducedDims[indices, :] finalDims[i, :] = f.mean(axis=0) finalDims2 = reducedDims for i in range(finalDims.shape[0]): plt.text(finalDims[i,0], finalDims[i,1], ntpath.basename(wavFilesList[i].replace('.wav','')), horizontalalignment='center', verticalalignment='center', fontsize=10) plt.plot(finalDims[i,0], finalDims[i,1], '*r') plt.xlim([1.2*finalDims[:,0].min(), 1.2*finalDims[:,0].max()]) plt.ylim([1.2*finalDims[:,1].min(), 1.2*finalDims[:,1].max()]) plt.show() SM = 1.0 - distance.squareform(distance.pdist(finalDims2, 'cosine')) for i in range(SM.shape[0]): SM[i,i] = 0.0; chordialDiagram("visualization", SM, 0.50, namesToVisualize, namesCategoryToVisualize) SM = 1.0 - distance.squareform(distance.pdist(F, 'cosine')) for i in range(SM.shape[0]): SM[i,i] = 0.0; chordialDiagram("visualizationInitial", SM, 0.50, namesToVisualize, namesCategoryToVisualize) # plot super-categories (i.e. artistname uNamesCategoryToVisualize = sort(list(set(namesCategoryToVisualize))) finalDimsGroup = np.zeros( (len(uNamesCategoryToVisualize), finalDims2.shape[1] ) ) for i, uname in enumerate(uNamesCategoryToVisualize): indices = [j for j, x in enumerate(namesCategoryToVisualize) if x == uname] f = finalDims2[indices, :] finalDimsGroup[i, :] = f.mean(axis=0) SMgroup = 1.0 - distance.squareform(distance.pdist(finalDimsGroup, 'cosine')) for i in range(SMgroup.shape[0]): SMgroup[i,i] = 0.0; chordialDiagram("visualizationGroup", SMgroup, 0.50, uNamesCategoryToVisualize, uNamesCategoryToVisualize)
[ "def", "visualizeFeaturesFolder", "(", "folder", ",", "dimReductionMethod", ",", "priorKnowledge", "=", "\"none\"", ")", ":", "if", "dimReductionMethod", "==", "\"pca\"", ":", "allMtFeatures", ",", "wavFilesList", ",", "_", "=", "aF", ".", "dirWavFeatureExtraction", "(", "folder", ",", "30.0", ",", "30.0", ",", "0.050", ",", "0.050", ",", "compute_beat", "=", "True", ")", "if", "allMtFeatures", ".", "shape", "[", "0", "]", "==", "0", ":", "print", "(", "\"Error: No data found! Check input folder\"", ")", "return", "namesCategoryToVisualize", "=", "[", "ntpath", ".", "basename", "(", "w", ")", ".", "replace", "(", "'.wav'", ",", "''", ")", ".", "split", "(", "\" --- \"", ")", "[", "0", "]", "for", "w", "in", "wavFilesList", "]", "namesToVisualize", "=", "[", "ntpath", ".", "basename", "(", "w", ")", ".", "replace", "(", "'.wav'", ",", "''", ")", "for", "w", "in", "wavFilesList", "]", "(", "F", ",", "MEAN", ",", "STD", ")", "=", "aT", ".", "normalizeFeatures", "(", "[", "allMtFeatures", "]", ")", "F", "=", "np", ".", "concatenate", "(", "F", ")", "# check that the new PCA dimension is at most equal to the number of samples", "K1", "=", "2", "K2", "=", "10", "if", "K1", ">", "F", ".", "shape", "[", "0", "]", ":", "K1", "=", "F", ".", "shape", "[", "0", "]", "if", "K2", ">", "F", ".", "shape", "[", "0", "]", ":", "K2", "=", "F", ".", "shape", "[", "0", "]", "pca1", "=", "sklearn", ".", "decomposition", ".", "PCA", "(", "n_components", "=", "K1", ")", "pca1", ".", "fit", "(", "F", ")", "pca2", "=", "sklearn", ".", "decomposition", ".", "PCA", "(", "n_components", "=", "K2", ")", "pca2", ".", "fit", "(", "F", ")", "finalDims", "=", "pca1", ".", "transform", "(", "F", ")", "finalDims2", "=", "pca2", ".", "transform", "(", "F", ")", "else", ":", "allMtFeatures", ",", "Ys", ",", "wavFilesList", "=", "aF", ".", "dirWavFeatureExtractionNoAveraging", "(", "folder", ",", "20.0", ",", "5.0", ",", "0.040", ",", "0.040", ")", "# long-term statistics cannot be applied in this context (LDA needs mid-term features)", "if", "allMtFeatures", ".", "shape", "[", "0", "]", "==", "0", ":", "print", "(", "\"Error: No data found! Check input folder\"", ")", "return", "namesCategoryToVisualize", "=", "[", "ntpath", ".", "basename", "(", "w", ")", ".", "replace", "(", "'.wav'", ",", "''", ")", ".", "split", "(", "\" --- \"", ")", "[", "0", "]", "for", "w", "in", "wavFilesList", "]", "namesToVisualize", "=", "[", "ntpath", ".", "basename", "(", "w", ")", ".", "replace", "(", "'.wav'", ",", "''", ")", "for", "w", "in", "wavFilesList", "]", "ldaLabels", "=", "Ys", "if", "priorKnowledge", "==", "\"artist\"", ":", "uNamesCategoryToVisualize", "=", "list", "(", "set", "(", "namesCategoryToVisualize", ")", ")", "YsNew", "=", "np", ".", "zeros", "(", "Ys", ".", "shape", ")", "for", "i", ",", "uname", "in", "enumerate", "(", "uNamesCategoryToVisualize", ")", ":", "# for each unique artist name:", "indicesUCategories", "=", "[", "j", "for", "j", ",", "x", "in", "enumerate", "(", "namesCategoryToVisualize", ")", "if", "x", "==", "uname", "]", "for", "j", "in", "indicesUCategories", ":", "indices", "=", "np", ".", "nonzero", "(", "Ys", "==", "j", ")", "YsNew", "[", "indices", "]", "=", "i", "ldaLabels", "=", "YsNew", "(", "F", ",", "MEAN", ",", "STD", ")", "=", "aT", ".", "normalizeFeatures", "(", "[", "allMtFeatures", "]", ")", "F", "=", "np", ".", "array", "(", "F", "[", "0", "]", ")", "clf", "=", "sklearn", ".", "discriminant_analysis", ".", "LinearDiscriminantAnalysis", "(", "n_components", "=", "10", ")", "clf", ".", "fit", "(", "F", ",", "ldaLabels", ")", "reducedDims", "=", "clf", ".", "transform", "(", "F", ")", "pca", "=", "sklearn", ".", "decomposition", ".", "PCA", "(", "n_components", "=", "2", ")", "pca", ".", "fit", "(", "reducedDims", ")", "reducedDims", "=", "pca", ".", "transform", "(", "reducedDims", ")", "# TODO: CHECK THIS ... SHOULD LDA USED IN SEMI-SUPERVISED ONLY????", "uLabels", "=", "np", ".", "sort", "(", "np", ".", "unique", "(", "(", "Ys", ")", ")", ")", "# uLabels must have as many labels as the number of wavFilesList elements", "reducedDimsAvg", "=", "np", ".", "zeros", "(", "(", "uLabels", ".", "shape", "[", "0", "]", ",", "reducedDims", ".", "shape", "[", "1", "]", ")", ")", "finalDims", "=", "np", ".", "zeros", "(", "(", "uLabels", ".", "shape", "[", "0", "]", ",", "2", ")", ")", "for", "i", ",", "u", "in", "enumerate", "(", "uLabels", ")", ":", "indices", "=", "[", "j", "for", "j", ",", "x", "in", "enumerate", "(", "Ys", ")", "if", "x", "==", "u", "]", "f", "=", "reducedDims", "[", "indices", ",", ":", "]", "finalDims", "[", "i", ",", ":", "]", "=", "f", ".", "mean", "(", "axis", "=", "0", ")", "finalDims2", "=", "reducedDims", "for", "i", "in", "range", "(", "finalDims", ".", "shape", "[", "0", "]", ")", ":", "plt", ".", "text", "(", "finalDims", "[", "i", ",", "0", "]", ",", "finalDims", "[", "i", ",", "1", "]", ",", "ntpath", ".", "basename", "(", "wavFilesList", "[", "i", "]", ".", "replace", "(", "'.wav'", ",", "''", ")", ")", ",", "horizontalalignment", "=", "'center'", ",", "verticalalignment", "=", "'center'", ",", "fontsize", "=", "10", ")", "plt", ".", "plot", "(", "finalDims", "[", "i", ",", "0", "]", ",", "finalDims", "[", "i", ",", "1", "]", ",", "'*r'", ")", "plt", ".", "xlim", "(", "[", "1.2", "*", "finalDims", "[", ":", ",", "0", "]", ".", "min", "(", ")", ",", "1.2", "*", "finalDims", "[", ":", ",", "0", "]", ".", "max", "(", ")", "]", ")", "plt", ".", "ylim", "(", "[", "1.2", "*", "finalDims", "[", ":", ",", "1", "]", ".", "min", "(", ")", ",", "1.2", "*", "finalDims", "[", ":", ",", "1", "]", ".", "max", "(", ")", "]", ")", "plt", ".", "show", "(", ")", "SM", "=", "1.0", "-", "distance", ".", "squareform", "(", "distance", ".", "pdist", "(", "finalDims2", ",", "'cosine'", ")", ")", "for", "i", "in", "range", "(", "SM", ".", "shape", "[", "0", "]", ")", ":", "SM", "[", "i", ",", "i", "]", "=", "0.0", "chordialDiagram", "(", "\"visualization\"", ",", "SM", ",", "0.50", ",", "namesToVisualize", ",", "namesCategoryToVisualize", ")", "SM", "=", "1.0", "-", "distance", ".", "squareform", "(", "distance", ".", "pdist", "(", "F", ",", "'cosine'", ")", ")", "for", "i", "in", "range", "(", "SM", ".", "shape", "[", "0", "]", ")", ":", "SM", "[", "i", ",", "i", "]", "=", "0.0", "chordialDiagram", "(", "\"visualizationInitial\"", ",", "SM", ",", "0.50", ",", "namesToVisualize", ",", "namesCategoryToVisualize", ")", "# plot super-categories (i.e. artistname", "uNamesCategoryToVisualize", "=", "sort", "(", "list", "(", "set", "(", "namesCategoryToVisualize", ")", ")", ")", "finalDimsGroup", "=", "np", ".", "zeros", "(", "(", "len", "(", "uNamesCategoryToVisualize", ")", ",", "finalDims2", ".", "shape", "[", "1", "]", ")", ")", "for", "i", ",", "uname", "in", "enumerate", "(", "uNamesCategoryToVisualize", ")", ":", "indices", "=", "[", "j", "for", "j", ",", "x", "in", "enumerate", "(", "namesCategoryToVisualize", ")", "if", "x", "==", "uname", "]", "f", "=", "finalDims2", "[", "indices", ",", ":", "]", "finalDimsGroup", "[", "i", ",", ":", "]", "=", "f", ".", "mean", "(", "axis", "=", "0", ")", "SMgroup", "=", "1.0", "-", "distance", ".", "squareform", "(", "distance", ".", "pdist", "(", "finalDimsGroup", ",", "'cosine'", ")", ")", "for", "i", "in", "range", "(", "SMgroup", ".", "shape", "[", "0", "]", ")", ":", "SMgroup", "[", "i", ",", "i", "]", "=", "0.0", "chordialDiagram", "(", "\"visualizationGroup\"", ",", "SMgroup", ",", "0.50", ",", "uNamesCategoryToVisualize", ",", "uNamesCategoryToVisualize", ")" ]
This function generates a chordial visualization for the recordings of the provided path. ARGUMENTS: - folder: path of the folder that contains the WAV files to be processed - dimReductionMethod: method used to reduce the dimension of the initial feature space before computing the similarity. - priorKnowledge: if this is set equal to "artist"
[ "This", "function", "generates", "a", "chordial", "visualization", "for", "the", "recordings", "of", "the", "provided", "path", ".", "ARGUMENTS", ":", "-", "folder", ":", "path", "of", "the", "folder", "that", "contains", "the", "WAV", "files", "to", "be", "processed", "-", "dimReductionMethod", ":", "method", "used", "to", "reduce", "the", "dimension", "of", "the", "initial", "feature", "space", "before", "computing", "the", "similarity", ".", "-", "priorKnowledge", ":", "if", "this", "is", "set", "equal", "to", "artist" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioVisualization.py#L126-L232
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
stZCR
def stZCR(frame): """Computes zero crossing rate of frame""" count = len(frame) countZ = numpy.sum(numpy.abs(numpy.diff(numpy.sign(frame)))) / 2 return (numpy.float64(countZ) / numpy.float64(count-1.0))
python
def stZCR(frame): """Computes zero crossing rate of frame""" count = len(frame) countZ = numpy.sum(numpy.abs(numpy.diff(numpy.sign(frame)))) / 2 return (numpy.float64(countZ) / numpy.float64(count-1.0))
[ "def", "stZCR", "(", "frame", ")", ":", "count", "=", "len", "(", "frame", ")", "countZ", "=", "numpy", ".", "sum", "(", "numpy", ".", "abs", "(", "numpy", ".", "diff", "(", "numpy", ".", "sign", "(", "frame", ")", ")", ")", ")", "/", "2", "return", "(", "numpy", ".", "float64", "(", "countZ", ")", "/", "numpy", ".", "float64", "(", "count", "-", "1.0", ")", ")" ]
Computes zero crossing rate of frame
[ "Computes", "zero", "crossing", "rate", "of", "frame" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L19-L23
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
stEnergyEntropy
def stEnergyEntropy(frame, n_short_blocks=10): """Computes entropy of energy""" Eol = numpy.sum(frame ** 2) # total frame energy L = len(frame) sub_win_len = int(numpy.floor(L / n_short_blocks)) if L != sub_win_len * n_short_blocks: frame = frame[0:sub_win_len * n_short_blocks] # sub_wins is of size [n_short_blocks x L] sub_wins = frame.reshape(sub_win_len, n_short_blocks, order='F').copy() # Compute normalized sub-frame energies: s = numpy.sum(sub_wins ** 2, axis=0) / (Eol + eps) # Compute entropy of the normalized sub-frame energies: Entropy = -numpy.sum(s * numpy.log2(s + eps)) return Entropy
python
def stEnergyEntropy(frame, n_short_blocks=10): """Computes entropy of energy""" Eol = numpy.sum(frame ** 2) # total frame energy L = len(frame) sub_win_len = int(numpy.floor(L / n_short_blocks)) if L != sub_win_len * n_short_blocks: frame = frame[0:sub_win_len * n_short_blocks] # sub_wins is of size [n_short_blocks x L] sub_wins = frame.reshape(sub_win_len, n_short_blocks, order='F').copy() # Compute normalized sub-frame energies: s = numpy.sum(sub_wins ** 2, axis=0) / (Eol + eps) # Compute entropy of the normalized sub-frame energies: Entropy = -numpy.sum(s * numpy.log2(s + eps)) return Entropy
[ "def", "stEnergyEntropy", "(", "frame", ",", "n_short_blocks", "=", "10", ")", ":", "Eol", "=", "numpy", ".", "sum", "(", "frame", "**", "2", ")", "# total frame energy", "L", "=", "len", "(", "frame", ")", "sub_win_len", "=", "int", "(", "numpy", ".", "floor", "(", "L", "/", "n_short_blocks", ")", ")", "if", "L", "!=", "sub_win_len", "*", "n_short_blocks", ":", "frame", "=", "frame", "[", "0", ":", "sub_win_len", "*", "n_short_blocks", "]", "# sub_wins is of size [n_short_blocks x L]", "sub_wins", "=", "frame", ".", "reshape", "(", "sub_win_len", ",", "n_short_blocks", ",", "order", "=", "'F'", ")", ".", "copy", "(", ")", "# Compute normalized sub-frame energies:", "s", "=", "numpy", ".", "sum", "(", "sub_wins", "**", "2", ",", "axis", "=", "0", ")", "/", "(", "Eol", "+", "eps", ")", "# Compute entropy of the normalized sub-frame energies:", "Entropy", "=", "-", "numpy", ".", "sum", "(", "s", "*", "numpy", ".", "log2", "(", "s", "+", "eps", ")", ")", "return", "Entropy" ]
Computes entropy of energy
[ "Computes", "entropy", "of", "energy" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L31-L46
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
stSpectralCentroidAndSpread
def stSpectralCentroidAndSpread(X, fs): """Computes spectral centroid of frame (given abs(FFT))""" ind = (numpy.arange(1, len(X) + 1)) * (fs/(2.0 * len(X))) Xt = X.copy() Xt = Xt / Xt.max() NUM = numpy.sum(ind * Xt) DEN = numpy.sum(Xt) + eps # Centroid: C = (NUM / DEN) # Spread: S = numpy.sqrt(numpy.sum(((ind - C) ** 2) * Xt) / DEN) # Normalize: C = C / (fs / 2.0) S = S / (fs / 2.0) return (C, S)
python
def stSpectralCentroidAndSpread(X, fs): """Computes spectral centroid of frame (given abs(FFT))""" ind = (numpy.arange(1, len(X) + 1)) * (fs/(2.0 * len(X))) Xt = X.copy() Xt = Xt / Xt.max() NUM = numpy.sum(ind * Xt) DEN = numpy.sum(Xt) + eps # Centroid: C = (NUM / DEN) # Spread: S = numpy.sqrt(numpy.sum(((ind - C) ** 2) * Xt) / DEN) # Normalize: C = C / (fs / 2.0) S = S / (fs / 2.0) return (C, S)
[ "def", "stSpectralCentroidAndSpread", "(", "X", ",", "fs", ")", ":", "ind", "=", "(", "numpy", ".", "arange", "(", "1", ",", "len", "(", "X", ")", "+", "1", ")", ")", "*", "(", "fs", "/", "(", "2.0", "*", "len", "(", "X", ")", ")", ")", "Xt", "=", "X", ".", "copy", "(", ")", "Xt", "=", "Xt", "/", "Xt", ".", "max", "(", ")", "NUM", "=", "numpy", ".", "sum", "(", "ind", "*", "Xt", ")", "DEN", "=", "numpy", ".", "sum", "(", "Xt", ")", "+", "eps", "# Centroid:", "C", "=", "(", "NUM", "/", "DEN", ")", "# Spread:", "S", "=", "numpy", ".", "sqrt", "(", "numpy", ".", "sum", "(", "(", "(", "ind", "-", "C", ")", "**", "2", ")", "*", "Xt", ")", "/", "DEN", ")", "# Normalize:", "C", "=", "C", "/", "(", "fs", "/", "2.0", ")", "S", "=", "S", "/", "(", "fs", "/", "2.0", ")", "return", "(", "C", ",", "S", ")" ]
Computes spectral centroid of frame (given abs(FFT))
[ "Computes", "spectral", "centroid", "of", "frame", "(", "given", "abs", "(", "FFT", "))" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L52-L71
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
stSpectralEntropy
def stSpectralEntropy(X, n_short_blocks=10): """Computes the spectral entropy""" L = len(X) # number of frame samples Eol = numpy.sum(X ** 2) # total spectral energy sub_win_len = int(numpy.floor(L / n_short_blocks)) # length of sub-frame if L != sub_win_len * n_short_blocks: X = X[0:sub_win_len * n_short_blocks] sub_wins = X.reshape(sub_win_len, n_short_blocks, order='F').copy() # define sub-frames (using matrix reshape) s = numpy.sum(sub_wins ** 2, axis=0) / (Eol + eps) # compute spectral sub-energies En = -numpy.sum(s*numpy.log2(s + eps)) # compute spectral entropy return En
python
def stSpectralEntropy(X, n_short_blocks=10): """Computes the spectral entropy""" L = len(X) # number of frame samples Eol = numpy.sum(X ** 2) # total spectral energy sub_win_len = int(numpy.floor(L / n_short_blocks)) # length of sub-frame if L != sub_win_len * n_short_blocks: X = X[0:sub_win_len * n_short_blocks] sub_wins = X.reshape(sub_win_len, n_short_blocks, order='F').copy() # define sub-frames (using matrix reshape) s = numpy.sum(sub_wins ** 2, axis=0) / (Eol + eps) # compute spectral sub-energies En = -numpy.sum(s*numpy.log2(s + eps)) # compute spectral entropy return En
[ "def", "stSpectralEntropy", "(", "X", ",", "n_short_blocks", "=", "10", ")", ":", "L", "=", "len", "(", "X", ")", "# number of frame samples", "Eol", "=", "numpy", ".", "sum", "(", "X", "**", "2", ")", "# total spectral energy", "sub_win_len", "=", "int", "(", "numpy", ".", "floor", "(", "L", "/", "n_short_blocks", ")", ")", "# length of sub-frame", "if", "L", "!=", "sub_win_len", "*", "n_short_blocks", ":", "X", "=", "X", "[", "0", ":", "sub_win_len", "*", "n_short_blocks", "]", "sub_wins", "=", "X", ".", "reshape", "(", "sub_win_len", ",", "n_short_blocks", ",", "order", "=", "'F'", ")", ".", "copy", "(", ")", "# define sub-frames (using matrix reshape)", "s", "=", "numpy", ".", "sum", "(", "sub_wins", "**", "2", ",", "axis", "=", "0", ")", "/", "(", "Eol", "+", "eps", ")", "# compute spectral sub-energies", "En", "=", "-", "numpy", ".", "sum", "(", "s", "*", "numpy", ".", "log2", "(", "s", "+", "eps", ")", ")", "# compute spectral entropy", "return", "En" ]
Computes the spectral entropy
[ "Computes", "the", "spectral", "entropy" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L74-L87
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
stSpectralFlux
def stSpectralFlux(X, X_prev): """ Computes the spectral flux feature of the current frame ARGUMENTS: X: the abs(fft) of the current frame X_prev: the abs(fft) of the previous frame """ # compute the spectral flux as the sum of square distances: sumX = numpy.sum(X + eps) sumPrevX = numpy.sum(X_prev + eps) F = numpy.sum((X / sumX - X_prev/sumPrevX) ** 2) return F
python
def stSpectralFlux(X, X_prev): """ Computes the spectral flux feature of the current frame ARGUMENTS: X: the abs(fft) of the current frame X_prev: the abs(fft) of the previous frame """ # compute the spectral flux as the sum of square distances: sumX = numpy.sum(X + eps) sumPrevX = numpy.sum(X_prev + eps) F = numpy.sum((X / sumX - X_prev/sumPrevX) ** 2) return F
[ "def", "stSpectralFlux", "(", "X", ",", "X_prev", ")", ":", "# compute the spectral flux as the sum of square distances:", "sumX", "=", "numpy", ".", "sum", "(", "X", "+", "eps", ")", "sumPrevX", "=", "numpy", ".", "sum", "(", "X_prev", "+", "eps", ")", "F", "=", "numpy", ".", "sum", "(", "(", "X", "/", "sumX", "-", "X_prev", "/", "sumPrevX", ")", "**", "2", ")", "return", "F" ]
Computes the spectral flux feature of the current frame ARGUMENTS: X: the abs(fft) of the current frame X_prev: the abs(fft) of the previous frame
[ "Computes", "the", "spectral", "flux", "feature", "of", "the", "current", "frame", "ARGUMENTS", ":", "X", ":", "the", "abs", "(", "fft", ")", "of", "the", "current", "frame", "X_prev", ":", "the", "abs", "(", "fft", ")", "of", "the", "previous", "frame" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L90-L102
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
stSpectralRollOff
def stSpectralRollOff(X, c, fs): """Computes spectral roll-off""" totalEnergy = numpy.sum(X ** 2) fftLength = len(X) Thres = c*totalEnergy # Ffind the spectral rolloff as the frequency position # where the respective spectral energy is equal to c*totalEnergy CumSum = numpy.cumsum(X ** 2) + eps [a, ] = numpy.nonzero(CumSum > Thres) if len(a) > 0: mC = numpy.float64(a[0]) / (float(fftLength)) else: mC = 0.0 return (mC)
python
def stSpectralRollOff(X, c, fs): """Computes spectral roll-off""" totalEnergy = numpy.sum(X ** 2) fftLength = len(X) Thres = c*totalEnergy # Ffind the spectral rolloff as the frequency position # where the respective spectral energy is equal to c*totalEnergy CumSum = numpy.cumsum(X ** 2) + eps [a, ] = numpy.nonzero(CumSum > Thres) if len(a) > 0: mC = numpy.float64(a[0]) / (float(fftLength)) else: mC = 0.0 return (mC)
[ "def", "stSpectralRollOff", "(", "X", ",", "c", ",", "fs", ")", ":", "totalEnergy", "=", "numpy", ".", "sum", "(", "X", "**", "2", ")", "fftLength", "=", "len", "(", "X", ")", "Thres", "=", "c", "*", "totalEnergy", "# Ffind the spectral rolloff as the frequency position ", "# where the respective spectral energy is equal to c*totalEnergy", "CumSum", "=", "numpy", ".", "cumsum", "(", "X", "**", "2", ")", "+", "eps", "[", "a", ",", "]", "=", "numpy", ".", "nonzero", "(", "CumSum", ">", "Thres", ")", "if", "len", "(", "a", ")", ">", "0", ":", "mC", "=", "numpy", ".", "float64", "(", "a", "[", "0", "]", ")", "/", "(", "float", "(", "fftLength", ")", ")", "else", ":", "mC", "=", "0.0", "return", "(", "mC", ")" ]
Computes spectral roll-off
[ "Computes", "spectral", "roll", "-", "off" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L105-L118
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
stHarmonic
def stHarmonic(frame, fs): """ Computes harmonic ratio and pitch """ M = numpy.round(0.016 * fs) - 1 R = numpy.correlate(frame, frame, mode='full') g = R[len(frame)-1] R = R[len(frame):-1] # estimate m0 (as the first zero crossing of R) [a, ] = numpy.nonzero(numpy.diff(numpy.sign(R))) if len(a) == 0: m0 = len(R)-1 else: m0 = a[0] if M > len(R): M = len(R) - 1 Gamma = numpy.zeros((M), dtype=numpy.float64) CSum = numpy.cumsum(frame ** 2) Gamma[m0:M] = R[m0:M] / (numpy.sqrt((g * CSum[M:m0:-1])) + eps) ZCR = stZCR(Gamma) if ZCR > 0.15: HR = 0.0 f0 = 0.0 else: if len(Gamma) == 0: HR = 1.0 blag = 0.0 Gamma = numpy.zeros((M), dtype=numpy.float64) else: HR = numpy.max(Gamma) blag = numpy.argmax(Gamma) # Get fundamental frequency: f0 = fs / (blag + eps) if f0 > 5000: f0 = 0.0 if HR < 0.1: f0 = 0.0 return (HR, f0)
python
def stHarmonic(frame, fs): """ Computes harmonic ratio and pitch """ M = numpy.round(0.016 * fs) - 1 R = numpy.correlate(frame, frame, mode='full') g = R[len(frame)-1] R = R[len(frame):-1] # estimate m0 (as the first zero crossing of R) [a, ] = numpy.nonzero(numpy.diff(numpy.sign(R))) if len(a) == 0: m0 = len(R)-1 else: m0 = a[0] if M > len(R): M = len(R) - 1 Gamma = numpy.zeros((M), dtype=numpy.float64) CSum = numpy.cumsum(frame ** 2) Gamma[m0:M] = R[m0:M] / (numpy.sqrt((g * CSum[M:m0:-1])) + eps) ZCR = stZCR(Gamma) if ZCR > 0.15: HR = 0.0 f0 = 0.0 else: if len(Gamma) == 0: HR = 1.0 blag = 0.0 Gamma = numpy.zeros((M), dtype=numpy.float64) else: HR = numpy.max(Gamma) blag = numpy.argmax(Gamma) # Get fundamental frequency: f0 = fs / (blag + eps) if f0 > 5000: f0 = 0.0 if HR < 0.1: f0 = 0.0 return (HR, f0)
[ "def", "stHarmonic", "(", "frame", ",", "fs", ")", ":", "M", "=", "numpy", ".", "round", "(", "0.016", "*", "fs", ")", "-", "1", "R", "=", "numpy", ".", "correlate", "(", "frame", ",", "frame", ",", "mode", "=", "'full'", ")", "g", "=", "R", "[", "len", "(", "frame", ")", "-", "1", "]", "R", "=", "R", "[", "len", "(", "frame", ")", ":", "-", "1", "]", "# estimate m0 (as the first zero crossing of R)", "[", "a", ",", "]", "=", "numpy", ".", "nonzero", "(", "numpy", ".", "diff", "(", "numpy", ".", "sign", "(", "R", ")", ")", ")", "if", "len", "(", "a", ")", "==", "0", ":", "m0", "=", "len", "(", "R", ")", "-", "1", "else", ":", "m0", "=", "a", "[", "0", "]", "if", "M", ">", "len", "(", "R", ")", ":", "M", "=", "len", "(", "R", ")", "-", "1", "Gamma", "=", "numpy", ".", "zeros", "(", "(", "M", ")", ",", "dtype", "=", "numpy", ".", "float64", ")", "CSum", "=", "numpy", ".", "cumsum", "(", "frame", "**", "2", ")", "Gamma", "[", "m0", ":", "M", "]", "=", "R", "[", "m0", ":", "M", "]", "/", "(", "numpy", ".", "sqrt", "(", "(", "g", "*", "CSum", "[", "M", ":", "m0", ":", "-", "1", "]", ")", ")", "+", "eps", ")", "ZCR", "=", "stZCR", "(", "Gamma", ")", "if", "ZCR", ">", "0.15", ":", "HR", "=", "0.0", "f0", "=", "0.0", "else", ":", "if", "len", "(", "Gamma", ")", "==", "0", ":", "HR", "=", "1.0", "blag", "=", "0.0", "Gamma", "=", "numpy", ".", "zeros", "(", "(", "M", ")", ",", "dtype", "=", "numpy", ".", "float64", ")", "else", ":", "HR", "=", "numpy", ".", "max", "(", "Gamma", ")", "blag", "=", "numpy", ".", "argmax", "(", "Gamma", ")", "# Get fundamental frequency:", "f0", "=", "fs", "/", "(", "blag", "+", "eps", ")", "if", "f0", ">", "5000", ":", "f0", "=", "0.0", "if", "HR", "<", "0.1", ":", "f0", "=", "0.0", "return", "(", "HR", ",", "f0", ")" ]
Computes harmonic ratio and pitch
[ "Computes", "harmonic", "ratio", "and", "pitch" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L121-L166
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
mfccInitFilterBanks
def mfccInitFilterBanks(fs, nfft): """ Computes the triangular filterbank for MFCC computation (used in the stFeatureExtraction function before the stMFCC function call) This function is taken from the scikits.talkbox library (MIT Licence): https://pypi.python.org/pypi/scikits.talkbox """ # filter bank params: lowfreq = 133.33 linsc = 200/3. logsc = 1.0711703 numLinFiltTotal = 13 numLogFilt = 27 if fs < 8000: nlogfil = 5 # Total number of filters nFiltTotal = numLinFiltTotal + numLogFilt # Compute frequency points of the triangle: freqs = numpy.zeros(nFiltTotal+2) freqs[:numLinFiltTotal] = lowfreq + numpy.arange(numLinFiltTotal) * linsc freqs[numLinFiltTotal:] = freqs[numLinFiltTotal-1] * logsc ** numpy.arange(1, numLogFilt + 3) heights = 2./(freqs[2:] - freqs[0:-2]) # Compute filterbank coeff (in fft domain, in bins) fbank = numpy.zeros((nFiltTotal, nfft)) nfreqs = numpy.arange(nfft) / (1. * nfft) * fs for i in range(nFiltTotal): lowTrFreq = freqs[i] cenTrFreq = freqs[i+1] highTrFreq = freqs[i+2] lid = numpy.arange(numpy.floor(lowTrFreq * nfft / fs) + 1, numpy.floor(cenTrFreq * nfft / fs) + 1, dtype=numpy.int) lslope = heights[i] / (cenTrFreq - lowTrFreq) rid = numpy.arange(numpy.floor(cenTrFreq * nfft / fs) + 1, numpy.floor(highTrFreq * nfft / fs) + 1, dtype=numpy.int) rslope = heights[i] / (highTrFreq - cenTrFreq) fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq) fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid]) return fbank, freqs
python
def mfccInitFilterBanks(fs, nfft): """ Computes the triangular filterbank for MFCC computation (used in the stFeatureExtraction function before the stMFCC function call) This function is taken from the scikits.talkbox library (MIT Licence): https://pypi.python.org/pypi/scikits.talkbox """ # filter bank params: lowfreq = 133.33 linsc = 200/3. logsc = 1.0711703 numLinFiltTotal = 13 numLogFilt = 27 if fs < 8000: nlogfil = 5 # Total number of filters nFiltTotal = numLinFiltTotal + numLogFilt # Compute frequency points of the triangle: freqs = numpy.zeros(nFiltTotal+2) freqs[:numLinFiltTotal] = lowfreq + numpy.arange(numLinFiltTotal) * linsc freqs[numLinFiltTotal:] = freqs[numLinFiltTotal-1] * logsc ** numpy.arange(1, numLogFilt + 3) heights = 2./(freqs[2:] - freqs[0:-2]) # Compute filterbank coeff (in fft domain, in bins) fbank = numpy.zeros((nFiltTotal, nfft)) nfreqs = numpy.arange(nfft) / (1. * nfft) * fs for i in range(nFiltTotal): lowTrFreq = freqs[i] cenTrFreq = freqs[i+1] highTrFreq = freqs[i+2] lid = numpy.arange(numpy.floor(lowTrFreq * nfft / fs) + 1, numpy.floor(cenTrFreq * nfft / fs) + 1, dtype=numpy.int) lslope = heights[i] / (cenTrFreq - lowTrFreq) rid = numpy.arange(numpy.floor(cenTrFreq * nfft / fs) + 1, numpy.floor(highTrFreq * nfft / fs) + 1, dtype=numpy.int) rslope = heights[i] / (highTrFreq - cenTrFreq) fbank[i][lid] = lslope * (nfreqs[lid] - lowTrFreq) fbank[i][rid] = rslope * (highTrFreq - nfreqs[rid]) return fbank, freqs
[ "def", "mfccInitFilterBanks", "(", "fs", ",", "nfft", ")", ":", "# filter bank params:", "lowfreq", "=", "133.33", "linsc", "=", "200", "/", "3.", "logsc", "=", "1.0711703", "numLinFiltTotal", "=", "13", "numLogFilt", "=", "27", "if", "fs", "<", "8000", ":", "nlogfil", "=", "5", "# Total number of filters", "nFiltTotal", "=", "numLinFiltTotal", "+", "numLogFilt", "# Compute frequency points of the triangle:", "freqs", "=", "numpy", ".", "zeros", "(", "nFiltTotal", "+", "2", ")", "freqs", "[", ":", "numLinFiltTotal", "]", "=", "lowfreq", "+", "numpy", ".", "arange", "(", "numLinFiltTotal", ")", "*", "linsc", "freqs", "[", "numLinFiltTotal", ":", "]", "=", "freqs", "[", "numLinFiltTotal", "-", "1", "]", "*", "logsc", "**", "numpy", ".", "arange", "(", "1", ",", "numLogFilt", "+", "3", ")", "heights", "=", "2.", "/", "(", "freqs", "[", "2", ":", "]", "-", "freqs", "[", "0", ":", "-", "2", "]", ")", "# Compute filterbank coeff (in fft domain, in bins)", "fbank", "=", "numpy", ".", "zeros", "(", "(", "nFiltTotal", ",", "nfft", ")", ")", "nfreqs", "=", "numpy", ".", "arange", "(", "nfft", ")", "/", "(", "1.", "*", "nfft", ")", "*", "fs", "for", "i", "in", "range", "(", "nFiltTotal", ")", ":", "lowTrFreq", "=", "freqs", "[", "i", "]", "cenTrFreq", "=", "freqs", "[", "i", "+", "1", "]", "highTrFreq", "=", "freqs", "[", "i", "+", "2", "]", "lid", "=", "numpy", ".", "arange", "(", "numpy", ".", "floor", "(", "lowTrFreq", "*", "nfft", "/", "fs", ")", "+", "1", ",", "numpy", ".", "floor", "(", "cenTrFreq", "*", "nfft", "/", "fs", ")", "+", "1", ",", "dtype", "=", "numpy", ".", "int", ")", "lslope", "=", "heights", "[", "i", "]", "/", "(", "cenTrFreq", "-", "lowTrFreq", ")", "rid", "=", "numpy", ".", "arange", "(", "numpy", ".", "floor", "(", "cenTrFreq", "*", "nfft", "/", "fs", ")", "+", "1", ",", "numpy", ".", "floor", "(", "highTrFreq", "*", "nfft", "/", "fs", ")", "+", "1", ",", "dtype", "=", "numpy", ".", "int", ")", "rslope", "=", "heights", "[", "i", "]", "/", "(", "highTrFreq", "-", "cenTrFreq", ")", "fbank", "[", "i", "]", "[", "lid", "]", "=", "lslope", "*", "(", "nfreqs", "[", "lid", "]", "-", "lowTrFreq", ")", "fbank", "[", "i", "]", "[", "rid", "]", "=", "rslope", "*", "(", "highTrFreq", "-", "nfreqs", "[", "rid", "]", ")", "return", "fbank", ",", "freqs" ]
Computes the triangular filterbank for MFCC computation (used in the stFeatureExtraction function before the stMFCC function call) This function is taken from the scikits.talkbox library (MIT Licence): https://pypi.python.org/pypi/scikits.talkbox
[ "Computes", "the", "triangular", "filterbank", "for", "MFCC", "computation", "(", "used", "in", "the", "stFeatureExtraction", "function", "before", "the", "stMFCC", "function", "call", ")", "This", "function", "is", "taken", "from", "the", "scikits", ".", "talkbox", "library", "(", "MIT", "Licence", ")", ":", "https", ":", "//", "pypi", ".", "python", ".", "org", "/", "pypi", "/", "scikits", ".", "talkbox" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L169-L216
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
stMFCC
def stMFCC(X, fbank, n_mfcc_feats): """ Computes the MFCCs of a frame, given the fft mag ARGUMENTS: X: fft magnitude abs(FFT) fbank: filter bank (see mfccInitFilterBanks) RETURN ceps: MFCCs (13 element vector) Note: MFCC calculation is, in general, taken from the scikits.talkbox library (MIT Licence), # with a small number of modifications to make it more compact and suitable for the pyAudioAnalysis Lib """ mspec = numpy.log10(numpy.dot(X, fbank.T)+eps) ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:n_mfcc_feats] return ceps
python
def stMFCC(X, fbank, n_mfcc_feats): """ Computes the MFCCs of a frame, given the fft mag ARGUMENTS: X: fft magnitude abs(FFT) fbank: filter bank (see mfccInitFilterBanks) RETURN ceps: MFCCs (13 element vector) Note: MFCC calculation is, in general, taken from the scikits.talkbox library (MIT Licence), # with a small number of modifications to make it more compact and suitable for the pyAudioAnalysis Lib """ mspec = numpy.log10(numpy.dot(X, fbank.T)+eps) ceps = dct(mspec, type=2, norm='ortho', axis=-1)[:n_mfcc_feats] return ceps
[ "def", "stMFCC", "(", "X", ",", "fbank", ",", "n_mfcc_feats", ")", ":", "mspec", "=", "numpy", ".", "log10", "(", "numpy", ".", "dot", "(", "X", ",", "fbank", ".", "T", ")", "+", "eps", ")", "ceps", "=", "dct", "(", "mspec", ",", "type", "=", "2", ",", "norm", "=", "'ortho'", ",", "axis", "=", "-", "1", ")", "[", ":", "n_mfcc_feats", "]", "return", "ceps" ]
Computes the MFCCs of a frame, given the fft mag ARGUMENTS: X: fft magnitude abs(FFT) fbank: filter bank (see mfccInitFilterBanks) RETURN ceps: MFCCs (13 element vector) Note: MFCC calculation is, in general, taken from the scikits.talkbox library (MIT Licence), # with a small number of modifications to make it more compact and suitable for the pyAudioAnalysis Lib
[ "Computes", "the", "MFCCs", "of", "a", "frame", "given", "the", "fft", "mag" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L219-L237
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
stChromaFeaturesInit
def stChromaFeaturesInit(nfft, fs): """ This function initializes the chroma matrices used in the calculation of the chroma features """ freqs = numpy.array([((f + 1) * fs) / (2 * nfft) for f in range(nfft)]) Cp = 27.50 nChroma = numpy.round(12.0 * numpy.log2(freqs / Cp)).astype(int) nFreqsPerChroma = numpy.zeros((nChroma.shape[0], )) uChroma = numpy.unique(nChroma) for u in uChroma: idx = numpy.nonzero(nChroma == u) nFreqsPerChroma[idx] = idx[0].shape return nChroma, nFreqsPerChroma
python
def stChromaFeaturesInit(nfft, fs): """ This function initializes the chroma matrices used in the calculation of the chroma features """ freqs = numpy.array([((f + 1) * fs) / (2 * nfft) for f in range(nfft)]) Cp = 27.50 nChroma = numpy.round(12.0 * numpy.log2(freqs / Cp)).astype(int) nFreqsPerChroma = numpy.zeros((nChroma.shape[0], )) uChroma = numpy.unique(nChroma) for u in uChroma: idx = numpy.nonzero(nChroma == u) nFreqsPerChroma[idx] = idx[0].shape return nChroma, nFreqsPerChroma
[ "def", "stChromaFeaturesInit", "(", "nfft", ",", "fs", ")", ":", "freqs", "=", "numpy", ".", "array", "(", "[", "(", "(", "f", "+", "1", ")", "*", "fs", ")", "/", "(", "2", "*", "nfft", ")", "for", "f", "in", "range", "(", "nfft", ")", "]", ")", "Cp", "=", "27.50", "nChroma", "=", "numpy", ".", "round", "(", "12.0", "*", "numpy", ".", "log2", "(", "freqs", "/", "Cp", ")", ")", ".", "astype", "(", "int", ")", "nFreqsPerChroma", "=", "numpy", ".", "zeros", "(", "(", "nChroma", ".", "shape", "[", "0", "]", ",", ")", ")", "uChroma", "=", "numpy", ".", "unique", "(", "nChroma", ")", "for", "u", "in", "uChroma", ":", "idx", "=", "numpy", ".", "nonzero", "(", "nChroma", "==", "u", ")", "nFreqsPerChroma", "[", "idx", "]", "=", "idx", "[", "0", "]", ".", "shape", "return", "nChroma", ",", "nFreqsPerChroma" ]
This function initializes the chroma matrices used in the calculation of the chroma features
[ "This", "function", "initializes", "the", "chroma", "matrices", "used", "in", "the", "calculation", "of", "the", "chroma", "features" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L240-L255
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
stChromagram
def stChromagram(signal, fs, win, step, PLOT=False): """ Short-term FFT mag for spectogram estimation: Returns: a numpy array (nFFT x numOfShortTermWindows) ARGUMENTS: signal: the input signal samples fs: the sampling freq (in Hz) win: the short-term window size (in samples) step: the short-term window step (in samples) PLOT: flag, 1 if results are to be ploted RETURNS: """ win = int(win) step = int(step) signal = numpy.double(signal) signal = signal / (2.0 ** 15) DC = signal.mean() MAX = (numpy.abs(signal)).max() signal = (signal - DC) / (MAX - DC) N = len(signal) # total number of signals cur_p = 0 count_fr = 0 nfft = int(win / 2) nChroma, nFreqsPerChroma = stChromaFeaturesInit(nfft, fs) chromaGram = numpy.array([], dtype=numpy.float64) while (cur_p + win - 1 < N): count_fr += 1 x = signal[cur_p:cur_p + win] cur_p = cur_p + step X = abs(fft(x)) X = X[0:nfft] X = X / len(X) chromaNames, C = stChromaFeatures(X, fs, nChroma, nFreqsPerChroma) C = C[:, 0] if count_fr == 1: chromaGram = C.T else: chromaGram = numpy.vstack((chromaGram, C.T)) FreqAxis = chromaNames TimeAxis = [(t * step) / fs for t in range(chromaGram.shape[0])] if (PLOT): fig, ax = plt.subplots() chromaGramToPlot = chromaGram.transpose()[::-1, :] Ratio = int(chromaGramToPlot.shape[1] / (3*chromaGramToPlot.shape[0])) if Ratio < 1: Ratio = 1 chromaGramToPlot = numpy.repeat(chromaGramToPlot, Ratio, axis=0) imgplot = plt.imshow(chromaGramToPlot) fstep = int(nfft / 5.0) # FreqTicks = range(0, int(nfft) + fstep, fstep) # FreqTicksLabels = [str(fs/2-int((f*fs) / (2*nfft))) for f in FreqTicks] ax.set_yticks(range(int(Ratio / 2), len(FreqAxis) * Ratio, Ratio)) ax.set_yticklabels(FreqAxis[::-1]) TStep = int(count_fr / 3) TimeTicks = range(0, count_fr, TStep) TimeTicksLabels = ['%.2f' % (float(t * step) / fs) for t in TimeTicks] ax.set_xticks(TimeTicks) ax.set_xticklabels(TimeTicksLabels) ax.set_xlabel('time (secs)') imgplot.set_cmap('jet') plt.colorbar() plt.show() return (chromaGram, TimeAxis, FreqAxis)
python
def stChromagram(signal, fs, win, step, PLOT=False): """ Short-term FFT mag for spectogram estimation: Returns: a numpy array (nFFT x numOfShortTermWindows) ARGUMENTS: signal: the input signal samples fs: the sampling freq (in Hz) win: the short-term window size (in samples) step: the short-term window step (in samples) PLOT: flag, 1 if results are to be ploted RETURNS: """ win = int(win) step = int(step) signal = numpy.double(signal) signal = signal / (2.0 ** 15) DC = signal.mean() MAX = (numpy.abs(signal)).max() signal = (signal - DC) / (MAX - DC) N = len(signal) # total number of signals cur_p = 0 count_fr = 0 nfft = int(win / 2) nChroma, nFreqsPerChroma = stChromaFeaturesInit(nfft, fs) chromaGram = numpy.array([], dtype=numpy.float64) while (cur_p + win - 1 < N): count_fr += 1 x = signal[cur_p:cur_p + win] cur_p = cur_p + step X = abs(fft(x)) X = X[0:nfft] X = X / len(X) chromaNames, C = stChromaFeatures(X, fs, nChroma, nFreqsPerChroma) C = C[:, 0] if count_fr == 1: chromaGram = C.T else: chromaGram = numpy.vstack((chromaGram, C.T)) FreqAxis = chromaNames TimeAxis = [(t * step) / fs for t in range(chromaGram.shape[0])] if (PLOT): fig, ax = plt.subplots() chromaGramToPlot = chromaGram.transpose()[::-1, :] Ratio = int(chromaGramToPlot.shape[1] / (3*chromaGramToPlot.shape[0])) if Ratio < 1: Ratio = 1 chromaGramToPlot = numpy.repeat(chromaGramToPlot, Ratio, axis=0) imgplot = plt.imshow(chromaGramToPlot) fstep = int(nfft / 5.0) # FreqTicks = range(0, int(nfft) + fstep, fstep) # FreqTicksLabels = [str(fs/2-int((f*fs) / (2*nfft))) for f in FreqTicks] ax.set_yticks(range(int(Ratio / 2), len(FreqAxis) * Ratio, Ratio)) ax.set_yticklabels(FreqAxis[::-1]) TStep = int(count_fr / 3) TimeTicks = range(0, count_fr, TStep) TimeTicksLabels = ['%.2f' % (float(t * step) / fs) for t in TimeTicks] ax.set_xticks(TimeTicks) ax.set_xticklabels(TimeTicksLabels) ax.set_xlabel('time (secs)') imgplot.set_cmap('jet') plt.colorbar() plt.show() return (chromaGram, TimeAxis, FreqAxis)
[ "def", "stChromagram", "(", "signal", ",", "fs", ",", "win", ",", "step", ",", "PLOT", "=", "False", ")", ":", "win", "=", "int", "(", "win", ")", "step", "=", "int", "(", "step", ")", "signal", "=", "numpy", ".", "double", "(", "signal", ")", "signal", "=", "signal", "/", "(", "2.0", "**", "15", ")", "DC", "=", "signal", ".", "mean", "(", ")", "MAX", "=", "(", "numpy", ".", "abs", "(", "signal", ")", ")", ".", "max", "(", ")", "signal", "=", "(", "signal", "-", "DC", ")", "/", "(", "MAX", "-", "DC", ")", "N", "=", "len", "(", "signal", ")", "# total number of signals", "cur_p", "=", "0", "count_fr", "=", "0", "nfft", "=", "int", "(", "win", "/", "2", ")", "nChroma", ",", "nFreqsPerChroma", "=", "stChromaFeaturesInit", "(", "nfft", ",", "fs", ")", "chromaGram", "=", "numpy", ".", "array", "(", "[", "]", ",", "dtype", "=", "numpy", ".", "float64", ")", "while", "(", "cur_p", "+", "win", "-", "1", "<", "N", ")", ":", "count_fr", "+=", "1", "x", "=", "signal", "[", "cur_p", ":", "cur_p", "+", "win", "]", "cur_p", "=", "cur_p", "+", "step", "X", "=", "abs", "(", "fft", "(", "x", ")", ")", "X", "=", "X", "[", "0", ":", "nfft", "]", "X", "=", "X", "/", "len", "(", "X", ")", "chromaNames", ",", "C", "=", "stChromaFeatures", "(", "X", ",", "fs", ",", "nChroma", ",", "nFreqsPerChroma", ")", "C", "=", "C", "[", ":", ",", "0", "]", "if", "count_fr", "==", "1", ":", "chromaGram", "=", "C", ".", "T", "else", ":", "chromaGram", "=", "numpy", ".", "vstack", "(", "(", "chromaGram", ",", "C", ".", "T", ")", ")", "FreqAxis", "=", "chromaNames", "TimeAxis", "=", "[", "(", "t", "*", "step", ")", "/", "fs", "for", "t", "in", "range", "(", "chromaGram", ".", "shape", "[", "0", "]", ")", "]", "if", "(", "PLOT", ")", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "chromaGramToPlot", "=", "chromaGram", ".", "transpose", "(", ")", "[", ":", ":", "-", "1", ",", ":", "]", "Ratio", "=", "int", "(", "chromaGramToPlot", ".", "shape", "[", "1", "]", "/", "(", "3", "*", "chromaGramToPlot", ".", "shape", "[", "0", "]", ")", ")", "if", "Ratio", "<", "1", ":", "Ratio", "=", "1", "chromaGramToPlot", "=", "numpy", ".", "repeat", "(", "chromaGramToPlot", ",", "Ratio", ",", "axis", "=", "0", ")", "imgplot", "=", "plt", ".", "imshow", "(", "chromaGramToPlot", ")", "fstep", "=", "int", "(", "nfft", "/", "5.0", ")", "# FreqTicks = range(0, int(nfft) + fstep, fstep)", "# FreqTicksLabels = [str(fs/2-int((f*fs) / (2*nfft))) for f in FreqTicks]", "ax", ".", "set_yticks", "(", "range", "(", "int", "(", "Ratio", "/", "2", ")", ",", "len", "(", "FreqAxis", ")", "*", "Ratio", ",", "Ratio", ")", ")", "ax", ".", "set_yticklabels", "(", "FreqAxis", "[", ":", ":", "-", "1", "]", ")", "TStep", "=", "int", "(", "count_fr", "/", "3", ")", "TimeTicks", "=", "range", "(", "0", ",", "count_fr", ",", "TStep", ")", "TimeTicksLabels", "=", "[", "'%.2f'", "%", "(", "float", "(", "t", "*", "step", ")", "/", "fs", ")", "for", "t", "in", "TimeTicks", "]", "ax", ".", "set_xticks", "(", "TimeTicks", ")", "ax", ".", "set_xticklabels", "(", "TimeTicksLabels", ")", "ax", ".", "set_xlabel", "(", "'time (secs)'", ")", "imgplot", ".", "set_cmap", "(", "'jet'", ")", "plt", ".", "colorbar", "(", ")", "plt", ".", "show", "(", ")", "return", "(", "chromaGram", ",", "TimeAxis", ",", "FreqAxis", ")" ]
Short-term FFT mag for spectogram estimation: Returns: a numpy array (nFFT x numOfShortTermWindows) ARGUMENTS: signal: the input signal samples fs: the sampling freq (in Hz) win: the short-term window size (in samples) step: the short-term window step (in samples) PLOT: flag, 1 if results are to be ploted RETURNS:
[ "Short", "-", "term", "FFT", "mag", "for", "spectogram", "estimation", ":", "Returns", ":", "a", "numpy", "array", "(", "nFFT", "x", "numOfShortTermWindows", ")", "ARGUMENTS", ":", "signal", ":", "the", "input", "signal", "samples", "fs", ":", "the", "sampling", "freq", "(", "in", "Hz", ")", "win", ":", "the", "short", "-", "term", "window", "size", "(", "in", "samples", ")", "step", ":", "the", "short", "-", "term", "window", "step", "(", "in", "samples", ")", "PLOT", ":", "flag", "1", "if", "results", "are", "to", "be", "ploted", "RETURNS", ":" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L298-L365
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
beatExtraction
def beatExtraction(st_features, win_len, PLOT=False): """ This function extracts an estimate of the beat rate for a musical signal. ARGUMENTS: - st_features: a numpy array (n_feats x numOfShortTermWindows) - win_len: window size in seconds RETURNS: - BPM: estimates of beats per minute - Ratio: a confidence measure """ # Features that are related to the beat tracking task: toWatch = [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] max_beat_time = int(round(2.0 / win_len)) hist_all = numpy.zeros((max_beat_time,)) for ii, i in enumerate(toWatch): # for each feature DifThres = 2.0 * (numpy.abs(st_features[i, 0:-1] - st_features[i, 1::])).mean() # dif threshold (3 x Mean of Difs) if DifThres<=0: DifThres = 0.0000000000000001 [pos1, _] = utilities.peakdet(st_features[i, :], DifThres) # detect local maxima posDifs = [] # compute histograms of local maxima changes for j in range(len(pos1)-1): posDifs.append(pos1[j+1]-pos1[j]) [hist_times, HistEdges] = numpy.histogram(posDifs, numpy.arange(0.5, max_beat_time + 1.5)) hist_centers = (HistEdges[0:-1] + HistEdges[1::]) / 2.0 hist_times = hist_times.astype(float) / st_features.shape[1] hist_all += hist_times if PLOT: plt.subplot(9, 2, ii + 1) plt.plot(st_features[i, :], 'k') for k in pos1: plt.plot(k, st_features[i, k], 'k*') f1 = plt.gca() f1.axes.get_xaxis().set_ticks([]) f1.axes.get_yaxis().set_ticks([]) if PLOT: plt.show(block=False) plt.figure() # Get beat as the argmax of the agregated histogram: I = numpy.argmax(hist_all) bpms = 60 / (hist_centers * win_len) BPM = bpms[I] # ... and the beat ratio: Ratio = hist_all[I] / hist_all.sum() if PLOT: # filter out >500 beats from plotting: hist_all = hist_all[bpms < 500] bpms = bpms[bpms < 500] plt.plot(bpms, hist_all, 'k') plt.xlabel('Beats per minute') plt.ylabel('Freq Count') plt.show(block=True) return BPM, Ratio
python
def beatExtraction(st_features, win_len, PLOT=False): """ This function extracts an estimate of the beat rate for a musical signal. ARGUMENTS: - st_features: a numpy array (n_feats x numOfShortTermWindows) - win_len: window size in seconds RETURNS: - BPM: estimates of beats per minute - Ratio: a confidence measure """ # Features that are related to the beat tracking task: toWatch = [0, 1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18] max_beat_time = int(round(2.0 / win_len)) hist_all = numpy.zeros((max_beat_time,)) for ii, i in enumerate(toWatch): # for each feature DifThres = 2.0 * (numpy.abs(st_features[i, 0:-1] - st_features[i, 1::])).mean() # dif threshold (3 x Mean of Difs) if DifThres<=0: DifThres = 0.0000000000000001 [pos1, _] = utilities.peakdet(st_features[i, :], DifThres) # detect local maxima posDifs = [] # compute histograms of local maxima changes for j in range(len(pos1)-1): posDifs.append(pos1[j+1]-pos1[j]) [hist_times, HistEdges] = numpy.histogram(posDifs, numpy.arange(0.5, max_beat_time + 1.5)) hist_centers = (HistEdges[0:-1] + HistEdges[1::]) / 2.0 hist_times = hist_times.astype(float) / st_features.shape[1] hist_all += hist_times if PLOT: plt.subplot(9, 2, ii + 1) plt.plot(st_features[i, :], 'k') for k in pos1: plt.plot(k, st_features[i, k], 'k*') f1 = plt.gca() f1.axes.get_xaxis().set_ticks([]) f1.axes.get_yaxis().set_ticks([]) if PLOT: plt.show(block=False) plt.figure() # Get beat as the argmax of the agregated histogram: I = numpy.argmax(hist_all) bpms = 60 / (hist_centers * win_len) BPM = bpms[I] # ... and the beat ratio: Ratio = hist_all[I] / hist_all.sum() if PLOT: # filter out >500 beats from plotting: hist_all = hist_all[bpms < 500] bpms = bpms[bpms < 500] plt.plot(bpms, hist_all, 'k') plt.xlabel('Beats per minute') plt.ylabel('Freq Count') plt.show(block=True) return BPM, Ratio
[ "def", "beatExtraction", "(", "st_features", ",", "win_len", ",", "PLOT", "=", "False", ")", ":", "# Features that are related to the beat tracking task:", "toWatch", "=", "[", "0", ",", "1", ",", "3", ",", "4", ",", "5", ",", "6", ",", "7", ",", "8", ",", "9", ",", "10", ",", "11", ",", "12", ",", "13", ",", "14", ",", "15", ",", "16", ",", "17", ",", "18", "]", "max_beat_time", "=", "int", "(", "round", "(", "2.0", "/", "win_len", ")", ")", "hist_all", "=", "numpy", ".", "zeros", "(", "(", "max_beat_time", ",", ")", ")", "for", "ii", ",", "i", "in", "enumerate", "(", "toWatch", ")", ":", "# for each feature", "DifThres", "=", "2.0", "*", "(", "numpy", ".", "abs", "(", "st_features", "[", "i", ",", "0", ":", "-", "1", "]", "-", "st_features", "[", "i", ",", "1", ":", ":", "]", ")", ")", ".", "mean", "(", ")", "# dif threshold (3 x Mean of Difs)", "if", "DifThres", "<=", "0", ":", "DifThres", "=", "0.0000000000000001", "[", "pos1", ",", "_", "]", "=", "utilities", ".", "peakdet", "(", "st_features", "[", "i", ",", ":", "]", ",", "DifThres", ")", "# detect local maxima", "posDifs", "=", "[", "]", "# compute histograms of local maxima changes", "for", "j", "in", "range", "(", "len", "(", "pos1", ")", "-", "1", ")", ":", "posDifs", ".", "append", "(", "pos1", "[", "j", "+", "1", "]", "-", "pos1", "[", "j", "]", ")", "[", "hist_times", ",", "HistEdges", "]", "=", "numpy", ".", "histogram", "(", "posDifs", ",", "numpy", ".", "arange", "(", "0.5", ",", "max_beat_time", "+", "1.5", ")", ")", "hist_centers", "=", "(", "HistEdges", "[", "0", ":", "-", "1", "]", "+", "HistEdges", "[", "1", ":", ":", "]", ")", "/", "2.0", "hist_times", "=", "hist_times", ".", "astype", "(", "float", ")", "/", "st_features", ".", "shape", "[", "1", "]", "hist_all", "+=", "hist_times", "if", "PLOT", ":", "plt", ".", "subplot", "(", "9", ",", "2", ",", "ii", "+", "1", ")", "plt", ".", "plot", "(", "st_features", "[", "i", ",", ":", "]", ",", "'k'", ")", "for", "k", "in", "pos1", ":", "plt", ".", "plot", "(", "k", ",", "st_features", "[", "i", ",", "k", "]", ",", "'k*'", ")", "f1", "=", "plt", ".", "gca", "(", ")", "f1", ".", "axes", ".", "get_xaxis", "(", ")", ".", "set_ticks", "(", "[", "]", ")", "f1", ".", "axes", ".", "get_yaxis", "(", ")", ".", "set_ticks", "(", "[", "]", ")", "if", "PLOT", ":", "plt", ".", "show", "(", "block", "=", "False", ")", "plt", ".", "figure", "(", ")", "# Get beat as the argmax of the agregated histogram:", "I", "=", "numpy", ".", "argmax", "(", "hist_all", ")", "bpms", "=", "60", "/", "(", "hist_centers", "*", "win_len", ")", "BPM", "=", "bpms", "[", "I", "]", "# ... and the beat ratio:", "Ratio", "=", "hist_all", "[", "I", "]", "/", "hist_all", ".", "sum", "(", ")", "if", "PLOT", ":", "# filter out >500 beats from plotting:", "hist_all", "=", "hist_all", "[", "bpms", "<", "500", "]", "bpms", "=", "bpms", "[", "bpms", "<", "500", "]", "plt", ".", "plot", "(", "bpms", ",", "hist_all", ",", "'k'", ")", "plt", ".", "xlabel", "(", "'Beats per minute'", ")", "plt", ".", "ylabel", "(", "'Freq Count'", ")", "plt", ".", "show", "(", "block", "=", "True", ")", "return", "BPM", ",", "Ratio" ]
This function extracts an estimate of the beat rate for a musical signal. ARGUMENTS: - st_features: a numpy array (n_feats x numOfShortTermWindows) - win_len: window size in seconds RETURNS: - BPM: estimates of beats per minute - Ratio: a confidence measure
[ "This", "function", "extracts", "an", "estimate", "of", "the", "beat", "rate", "for", "a", "musical", "signal", ".", "ARGUMENTS", ":", "-", "st_features", ":", "a", "numpy", "array", "(", "n_feats", "x", "numOfShortTermWindows", ")", "-", "win_len", ":", "window", "size", "in", "seconds", "RETURNS", ":", "-", "BPM", ":", "estimates", "of", "beats", "per", "minute", "-", "Ratio", ":", "a", "confidence", "measure" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L392-L450
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
stSpectogram
def stSpectogram(signal, fs, win, step, PLOT=False): """ Short-term FFT mag for spectogram estimation: Returns: a numpy array (nFFT x numOfShortTermWindows) ARGUMENTS: signal: the input signal samples fs: the sampling freq (in Hz) win: the short-term window size (in samples) step: the short-term window step (in samples) PLOT: flag, 1 if results are to be ploted RETURNS: """ win = int(win) step = int(step) signal = numpy.double(signal) signal = signal / (2.0 ** 15) DC = signal.mean() MAX = (numpy.abs(signal)).max() signal = (signal - DC) / (MAX - DC) N = len(signal) # total number of signals cur_p = 0 count_fr = 0 nfft = int(win / 2) specgram = numpy.array([], dtype=numpy.float64) while (cur_p + win - 1 < N): count_fr += 1 x = signal[cur_p:cur_p+win] cur_p = cur_p + step X = abs(fft(x)) X = X[0:nfft] X = X / len(X) if count_fr == 1: specgram = X ** 2 else: specgram = numpy.vstack((specgram, X)) FreqAxis = [float((f + 1) * fs) / (2 * nfft) for f in range(specgram.shape[1])] TimeAxis = [float(t * step) / fs for t in range(specgram.shape[0])] if (PLOT): fig, ax = plt.subplots() imgplot = plt.imshow(specgram.transpose()[::-1, :]) fstep = int(nfft / 5.0) FreqTicks = range(0, int(nfft) + fstep, fstep) FreqTicksLabels = [str(fs / 2 - int((f * fs) / (2 * nfft))) for f in FreqTicks] ax.set_yticks(FreqTicks) ax.set_yticklabels(FreqTicksLabels) TStep = int(count_fr/3) TimeTicks = range(0, count_fr, TStep) TimeTicksLabels = ['%.2f' % (float(t * step) / fs) for t in TimeTicks] ax.set_xticks(TimeTicks) ax.set_xticklabels(TimeTicksLabels) ax.set_xlabel('time (secs)') ax.set_ylabel('freq (Hz)') imgplot.set_cmap('jet') plt.colorbar() plt.show() return (specgram, TimeAxis, FreqAxis)
python
def stSpectogram(signal, fs, win, step, PLOT=False): """ Short-term FFT mag for spectogram estimation: Returns: a numpy array (nFFT x numOfShortTermWindows) ARGUMENTS: signal: the input signal samples fs: the sampling freq (in Hz) win: the short-term window size (in samples) step: the short-term window step (in samples) PLOT: flag, 1 if results are to be ploted RETURNS: """ win = int(win) step = int(step) signal = numpy.double(signal) signal = signal / (2.0 ** 15) DC = signal.mean() MAX = (numpy.abs(signal)).max() signal = (signal - DC) / (MAX - DC) N = len(signal) # total number of signals cur_p = 0 count_fr = 0 nfft = int(win / 2) specgram = numpy.array([], dtype=numpy.float64) while (cur_p + win - 1 < N): count_fr += 1 x = signal[cur_p:cur_p+win] cur_p = cur_p + step X = abs(fft(x)) X = X[0:nfft] X = X / len(X) if count_fr == 1: specgram = X ** 2 else: specgram = numpy.vstack((specgram, X)) FreqAxis = [float((f + 1) * fs) / (2 * nfft) for f in range(specgram.shape[1])] TimeAxis = [float(t * step) / fs for t in range(specgram.shape[0])] if (PLOT): fig, ax = plt.subplots() imgplot = plt.imshow(specgram.transpose()[::-1, :]) fstep = int(nfft / 5.0) FreqTicks = range(0, int(nfft) + fstep, fstep) FreqTicksLabels = [str(fs / 2 - int((f * fs) / (2 * nfft))) for f in FreqTicks] ax.set_yticks(FreqTicks) ax.set_yticklabels(FreqTicksLabels) TStep = int(count_fr/3) TimeTicks = range(0, count_fr, TStep) TimeTicksLabels = ['%.2f' % (float(t * step) / fs) for t in TimeTicks] ax.set_xticks(TimeTicks) ax.set_xticklabels(TimeTicksLabels) ax.set_xlabel('time (secs)') ax.set_ylabel('freq (Hz)') imgplot.set_cmap('jet') plt.colorbar() plt.show() return (specgram, TimeAxis, FreqAxis)
[ "def", "stSpectogram", "(", "signal", ",", "fs", ",", "win", ",", "step", ",", "PLOT", "=", "False", ")", ":", "win", "=", "int", "(", "win", ")", "step", "=", "int", "(", "step", ")", "signal", "=", "numpy", ".", "double", "(", "signal", ")", "signal", "=", "signal", "/", "(", "2.0", "**", "15", ")", "DC", "=", "signal", ".", "mean", "(", ")", "MAX", "=", "(", "numpy", ".", "abs", "(", "signal", ")", ")", ".", "max", "(", ")", "signal", "=", "(", "signal", "-", "DC", ")", "/", "(", "MAX", "-", "DC", ")", "N", "=", "len", "(", "signal", ")", "# total number of signals", "cur_p", "=", "0", "count_fr", "=", "0", "nfft", "=", "int", "(", "win", "/", "2", ")", "specgram", "=", "numpy", ".", "array", "(", "[", "]", ",", "dtype", "=", "numpy", ".", "float64", ")", "while", "(", "cur_p", "+", "win", "-", "1", "<", "N", ")", ":", "count_fr", "+=", "1", "x", "=", "signal", "[", "cur_p", ":", "cur_p", "+", "win", "]", "cur_p", "=", "cur_p", "+", "step", "X", "=", "abs", "(", "fft", "(", "x", ")", ")", "X", "=", "X", "[", "0", ":", "nfft", "]", "X", "=", "X", "/", "len", "(", "X", ")", "if", "count_fr", "==", "1", ":", "specgram", "=", "X", "**", "2", "else", ":", "specgram", "=", "numpy", ".", "vstack", "(", "(", "specgram", ",", "X", ")", ")", "FreqAxis", "=", "[", "float", "(", "(", "f", "+", "1", ")", "*", "fs", ")", "/", "(", "2", "*", "nfft", ")", "for", "f", "in", "range", "(", "specgram", ".", "shape", "[", "1", "]", ")", "]", "TimeAxis", "=", "[", "float", "(", "t", "*", "step", ")", "/", "fs", "for", "t", "in", "range", "(", "specgram", ".", "shape", "[", "0", "]", ")", "]", "if", "(", "PLOT", ")", ":", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "imgplot", "=", "plt", ".", "imshow", "(", "specgram", ".", "transpose", "(", ")", "[", ":", ":", "-", "1", ",", ":", "]", ")", "fstep", "=", "int", "(", "nfft", "/", "5.0", ")", "FreqTicks", "=", "range", "(", "0", ",", "int", "(", "nfft", ")", "+", "fstep", ",", "fstep", ")", "FreqTicksLabels", "=", "[", "str", "(", "fs", "/", "2", "-", "int", "(", "(", "f", "*", "fs", ")", "/", "(", "2", "*", "nfft", ")", ")", ")", "for", "f", "in", "FreqTicks", "]", "ax", ".", "set_yticks", "(", "FreqTicks", ")", "ax", ".", "set_yticklabels", "(", "FreqTicksLabels", ")", "TStep", "=", "int", "(", "count_fr", "/", "3", ")", "TimeTicks", "=", "range", "(", "0", ",", "count_fr", ",", "TStep", ")", "TimeTicksLabels", "=", "[", "'%.2f'", "%", "(", "float", "(", "t", "*", "step", ")", "/", "fs", ")", "for", "t", "in", "TimeTicks", "]", "ax", ".", "set_xticks", "(", "TimeTicks", ")", "ax", ".", "set_xticklabels", "(", "TimeTicksLabels", ")", "ax", ".", "set_xlabel", "(", "'time (secs)'", ")", "ax", ".", "set_ylabel", "(", "'freq (Hz)'", ")", "imgplot", ".", "set_cmap", "(", "'jet'", ")", "plt", ".", "colorbar", "(", ")", "plt", ".", "show", "(", ")", "return", "(", "specgram", ",", "TimeAxis", ",", "FreqAxis", ")" ]
Short-term FFT mag for spectogram estimation: Returns: a numpy array (nFFT x numOfShortTermWindows) ARGUMENTS: signal: the input signal samples fs: the sampling freq (in Hz) win: the short-term window size (in samples) step: the short-term window step (in samples) PLOT: flag, 1 if results are to be ploted RETURNS:
[ "Short", "-", "term", "FFT", "mag", "for", "spectogram", "estimation", ":", "Returns", ":", "a", "numpy", "array", "(", "nFFT", "x", "numOfShortTermWindows", ")", "ARGUMENTS", ":", "signal", ":", "the", "input", "signal", "samples", "fs", ":", "the", "sampling", "freq", "(", "in", "Hz", ")", "win", ":", "the", "short", "-", "term", "window", "size", "(", "in", "samples", ")", "step", ":", "the", "short", "-", "term", "window", "step", "(", "in", "samples", ")", "PLOT", ":", "flag", "1", "if", "results", "are", "to", "be", "ploted", "RETURNS", ":" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L453-L515
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
stFeatureExtraction
def stFeatureExtraction(signal, fs, win, step): """ This function implements the shor-term windowing process. For each short-term window a set of features is extracted. This results to a sequence of feature vectors, stored in a numpy matrix. ARGUMENTS signal: the input signal samples fs: the sampling freq (in Hz) win: the short-term window size (in samples) step: the short-term window step (in samples) RETURNS st_features: a numpy array (n_feats x numOfShortTermWindows) """ win = int(win) step = int(step) # Signal normalization signal = numpy.double(signal) signal = signal / (2.0 ** 15) DC = signal.mean() MAX = (numpy.abs(signal)).max() signal = (signal - DC) / (MAX + 0.0000000001) N = len(signal) # total number of samples cur_p = 0 count_fr = 0 nFFT = int(win / 2) [fbank, freqs] = mfccInitFilterBanks(fs, nFFT) # compute the triangular filter banks used in the mfcc calculation nChroma, nFreqsPerChroma = stChromaFeaturesInit(nFFT, fs) n_time_spectral_feats = 8 n_harmonic_feats = 0 n_mfcc_feats = 13 n_chroma_feats = 13 n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats + n_chroma_feats # n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats feature_names = [] feature_names.append("zcr") feature_names.append("energy") feature_names.append("energy_entropy") feature_names += ["spectral_centroid", "spectral_spread"] feature_names.append("spectral_entropy") feature_names.append("spectral_flux") feature_names.append("spectral_rolloff") feature_names += ["mfcc_{0:d}".format(mfcc_i) for mfcc_i in range(1, n_mfcc_feats+1)] feature_names += ["chroma_{0:d}".format(chroma_i) for chroma_i in range(1, n_chroma_feats)] feature_names.append("chroma_std") st_features = [] while (cur_p + win - 1 < N): # for each short-term window until the end of signal count_fr += 1 x = signal[cur_p:cur_p+win] # get current window cur_p = cur_p + step # update window position X = abs(fft(x)) # get fft magnitude X = X[0:nFFT] # normalize fft X = X / len(X) if count_fr == 1: X_prev = X.copy() # keep previous fft mag (used in spectral flux) curFV = numpy.zeros((n_total_feats, 1)) curFV[0] = stZCR(x) # zero crossing rate curFV[1] = stEnergy(x) # short-term energy curFV[2] = stEnergyEntropy(x) # short-term entropy of energy [curFV[3], curFV[4]] = stSpectralCentroidAndSpread(X, fs) # spectral centroid and spread curFV[5] = stSpectralEntropy(X) # spectral entropy curFV[6] = stSpectralFlux(X, X_prev) # spectral flux curFV[7] = stSpectralRollOff(X, 0.90, fs) # spectral rolloff curFV[n_time_spectral_feats:n_time_spectral_feats+n_mfcc_feats, 0] = \ stMFCC(X, fbank, n_mfcc_feats).copy() # MFCCs chromaNames, chromaF = stChromaFeatures(X, fs, nChroma, nFreqsPerChroma) curFV[n_time_spectral_feats + n_mfcc_feats: n_time_spectral_feats + n_mfcc_feats + n_chroma_feats - 1] = \ chromaF curFV[n_time_spectral_feats + n_mfcc_feats + n_chroma_feats - 1] = \ chromaF.std() st_features.append(curFV) # delta features ''' if count_fr>1: delta = curFV - prevFV curFVFinal = numpy.concatenate((curFV, delta)) else: curFVFinal = numpy.concatenate((curFV, curFV)) prevFV = curFV st_features.append(curFVFinal) ''' # end of delta X_prev = X.copy() st_features = numpy.concatenate(st_features, 1) return st_features, feature_names
python
def stFeatureExtraction(signal, fs, win, step): """ This function implements the shor-term windowing process. For each short-term window a set of features is extracted. This results to a sequence of feature vectors, stored in a numpy matrix. ARGUMENTS signal: the input signal samples fs: the sampling freq (in Hz) win: the short-term window size (in samples) step: the short-term window step (in samples) RETURNS st_features: a numpy array (n_feats x numOfShortTermWindows) """ win = int(win) step = int(step) # Signal normalization signal = numpy.double(signal) signal = signal / (2.0 ** 15) DC = signal.mean() MAX = (numpy.abs(signal)).max() signal = (signal - DC) / (MAX + 0.0000000001) N = len(signal) # total number of samples cur_p = 0 count_fr = 0 nFFT = int(win / 2) [fbank, freqs] = mfccInitFilterBanks(fs, nFFT) # compute the triangular filter banks used in the mfcc calculation nChroma, nFreqsPerChroma = stChromaFeaturesInit(nFFT, fs) n_time_spectral_feats = 8 n_harmonic_feats = 0 n_mfcc_feats = 13 n_chroma_feats = 13 n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats + n_chroma_feats # n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats feature_names = [] feature_names.append("zcr") feature_names.append("energy") feature_names.append("energy_entropy") feature_names += ["spectral_centroid", "spectral_spread"] feature_names.append("spectral_entropy") feature_names.append("spectral_flux") feature_names.append("spectral_rolloff") feature_names += ["mfcc_{0:d}".format(mfcc_i) for mfcc_i in range(1, n_mfcc_feats+1)] feature_names += ["chroma_{0:d}".format(chroma_i) for chroma_i in range(1, n_chroma_feats)] feature_names.append("chroma_std") st_features = [] while (cur_p + win - 1 < N): # for each short-term window until the end of signal count_fr += 1 x = signal[cur_p:cur_p+win] # get current window cur_p = cur_p + step # update window position X = abs(fft(x)) # get fft magnitude X = X[0:nFFT] # normalize fft X = X / len(X) if count_fr == 1: X_prev = X.copy() # keep previous fft mag (used in spectral flux) curFV = numpy.zeros((n_total_feats, 1)) curFV[0] = stZCR(x) # zero crossing rate curFV[1] = stEnergy(x) # short-term energy curFV[2] = stEnergyEntropy(x) # short-term entropy of energy [curFV[3], curFV[4]] = stSpectralCentroidAndSpread(X, fs) # spectral centroid and spread curFV[5] = stSpectralEntropy(X) # spectral entropy curFV[6] = stSpectralFlux(X, X_prev) # spectral flux curFV[7] = stSpectralRollOff(X, 0.90, fs) # spectral rolloff curFV[n_time_spectral_feats:n_time_spectral_feats+n_mfcc_feats, 0] = \ stMFCC(X, fbank, n_mfcc_feats).copy() # MFCCs chromaNames, chromaF = stChromaFeatures(X, fs, nChroma, nFreqsPerChroma) curFV[n_time_spectral_feats + n_mfcc_feats: n_time_spectral_feats + n_mfcc_feats + n_chroma_feats - 1] = \ chromaF curFV[n_time_spectral_feats + n_mfcc_feats + n_chroma_feats - 1] = \ chromaF.std() st_features.append(curFV) # delta features ''' if count_fr>1: delta = curFV - prevFV curFVFinal = numpy.concatenate((curFV, delta)) else: curFVFinal = numpy.concatenate((curFV, curFV)) prevFV = curFV st_features.append(curFVFinal) ''' # end of delta X_prev = X.copy() st_features = numpy.concatenate(st_features, 1) return st_features, feature_names
[ "def", "stFeatureExtraction", "(", "signal", ",", "fs", ",", "win", ",", "step", ")", ":", "win", "=", "int", "(", "win", ")", "step", "=", "int", "(", "step", ")", "# Signal normalization", "signal", "=", "numpy", ".", "double", "(", "signal", ")", "signal", "=", "signal", "/", "(", "2.0", "**", "15", ")", "DC", "=", "signal", ".", "mean", "(", ")", "MAX", "=", "(", "numpy", ".", "abs", "(", "signal", ")", ")", ".", "max", "(", ")", "signal", "=", "(", "signal", "-", "DC", ")", "/", "(", "MAX", "+", "0.0000000001", ")", "N", "=", "len", "(", "signal", ")", "# total number of samples", "cur_p", "=", "0", "count_fr", "=", "0", "nFFT", "=", "int", "(", "win", "/", "2", ")", "[", "fbank", ",", "freqs", "]", "=", "mfccInitFilterBanks", "(", "fs", ",", "nFFT", ")", "# compute the triangular filter banks used in the mfcc calculation", "nChroma", ",", "nFreqsPerChroma", "=", "stChromaFeaturesInit", "(", "nFFT", ",", "fs", ")", "n_time_spectral_feats", "=", "8", "n_harmonic_feats", "=", "0", "n_mfcc_feats", "=", "13", "n_chroma_feats", "=", "13", "n_total_feats", "=", "n_time_spectral_feats", "+", "n_mfcc_feats", "+", "n_harmonic_feats", "+", "n_chroma_feats", "# n_total_feats = n_time_spectral_feats + n_mfcc_feats + n_harmonic_feats", "feature_names", "=", "[", "]", "feature_names", ".", "append", "(", "\"zcr\"", ")", "feature_names", ".", "append", "(", "\"energy\"", ")", "feature_names", ".", "append", "(", "\"energy_entropy\"", ")", "feature_names", "+=", "[", "\"spectral_centroid\"", ",", "\"spectral_spread\"", "]", "feature_names", ".", "append", "(", "\"spectral_entropy\"", ")", "feature_names", ".", "append", "(", "\"spectral_flux\"", ")", "feature_names", ".", "append", "(", "\"spectral_rolloff\"", ")", "feature_names", "+=", "[", "\"mfcc_{0:d}\"", ".", "format", "(", "mfcc_i", ")", "for", "mfcc_i", "in", "range", "(", "1", ",", "n_mfcc_feats", "+", "1", ")", "]", "feature_names", "+=", "[", "\"chroma_{0:d}\"", ".", "format", "(", "chroma_i", ")", "for", "chroma_i", "in", "range", "(", "1", ",", "n_chroma_feats", ")", "]", "feature_names", ".", "append", "(", "\"chroma_std\"", ")", "st_features", "=", "[", "]", "while", "(", "cur_p", "+", "win", "-", "1", "<", "N", ")", ":", "# for each short-term window until the end of signal", "count_fr", "+=", "1", "x", "=", "signal", "[", "cur_p", ":", "cur_p", "+", "win", "]", "# get current window", "cur_p", "=", "cur_p", "+", "step", "# update window position", "X", "=", "abs", "(", "fft", "(", "x", ")", ")", "# get fft magnitude", "X", "=", "X", "[", "0", ":", "nFFT", "]", "# normalize fft", "X", "=", "X", "/", "len", "(", "X", ")", "if", "count_fr", "==", "1", ":", "X_prev", "=", "X", ".", "copy", "(", ")", "# keep previous fft mag (used in spectral flux)", "curFV", "=", "numpy", ".", "zeros", "(", "(", "n_total_feats", ",", "1", ")", ")", "curFV", "[", "0", "]", "=", "stZCR", "(", "x", ")", "# zero crossing rate", "curFV", "[", "1", "]", "=", "stEnergy", "(", "x", ")", "# short-term energy", "curFV", "[", "2", "]", "=", "stEnergyEntropy", "(", "x", ")", "# short-term entropy of energy", "[", "curFV", "[", "3", "]", ",", "curFV", "[", "4", "]", "]", "=", "stSpectralCentroidAndSpread", "(", "X", ",", "fs", ")", "# spectral centroid and spread", "curFV", "[", "5", "]", "=", "stSpectralEntropy", "(", "X", ")", "# spectral entropy", "curFV", "[", "6", "]", "=", "stSpectralFlux", "(", "X", ",", "X_prev", ")", "# spectral flux", "curFV", "[", "7", "]", "=", "stSpectralRollOff", "(", "X", ",", "0.90", ",", "fs", ")", "# spectral rolloff", "curFV", "[", "n_time_spectral_feats", ":", "n_time_spectral_feats", "+", "n_mfcc_feats", ",", "0", "]", "=", "stMFCC", "(", "X", ",", "fbank", ",", "n_mfcc_feats", ")", ".", "copy", "(", ")", "# MFCCs", "chromaNames", ",", "chromaF", "=", "stChromaFeatures", "(", "X", ",", "fs", ",", "nChroma", ",", "nFreqsPerChroma", ")", "curFV", "[", "n_time_spectral_feats", "+", "n_mfcc_feats", ":", "n_time_spectral_feats", "+", "n_mfcc_feats", "+", "n_chroma_feats", "-", "1", "]", "=", "chromaF", "curFV", "[", "n_time_spectral_feats", "+", "n_mfcc_feats", "+", "n_chroma_feats", "-", "1", "]", "=", "chromaF", ".", "std", "(", ")", "st_features", ".", "append", "(", "curFV", ")", "# delta features", "'''\n if count_fr>1:\n delta = curFV - prevFV\n curFVFinal = numpy.concatenate((curFV, delta)) \n else:\n curFVFinal = numpy.concatenate((curFV, curFV))\n prevFV = curFV\n st_features.append(curFVFinal) \n '''", "# end of delta", "X_prev", "=", "X", ".", "copy", "(", ")", "st_features", "=", "numpy", ".", "concatenate", "(", "st_features", ",", "1", ")", "return", "st_features", ",", "feature_names" ]
This function implements the shor-term windowing process. For each short-term window a set of features is extracted. This results to a sequence of feature vectors, stored in a numpy matrix. ARGUMENTS signal: the input signal samples fs: the sampling freq (in Hz) win: the short-term window size (in samples) step: the short-term window step (in samples) RETURNS st_features: a numpy array (n_feats x numOfShortTermWindows)
[ "This", "function", "implements", "the", "shor", "-", "term", "windowing", "process", ".", "For", "each", "short", "-", "term", "window", "a", "set", "of", "features", "is", "extracted", ".", "This", "results", "to", "a", "sequence", "of", "feature", "vectors", "stored", "in", "a", "numpy", "matrix", "." ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L521-L614
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
mtFeatureExtraction
def mtFeatureExtraction(signal, fs, mt_win, mt_step, st_win, st_step): """ Mid-term feature extraction """ mt_win_ratio = int(round(mt_win / st_step)) mt_step_ratio = int(round(mt_step / st_step)) mt_features = [] st_features, f_names = stFeatureExtraction(signal, fs, st_win, st_step) n_feats = len(st_features) n_stats = 2 mt_features, mid_feature_names = [], [] #for i in range(n_stats * n_feats + 1): for i in range(n_stats * n_feats): mt_features.append([]) mid_feature_names.append("") for i in range(n_feats): # for each of the short-term features: cur_p = 0 N = len(st_features[i]) mid_feature_names[i] = f_names[i] + "_" + "mean" mid_feature_names[i + n_feats] = f_names[i] + "_" + "std" while (cur_p < N): N1 = cur_p N2 = cur_p + mt_win_ratio if N2 > N: N2 = N cur_st_feats = st_features[i][N1:N2] mt_features[i].append(numpy.mean(cur_st_feats)) mt_features[i + n_feats].append(numpy.std(cur_st_feats)) #mt_features[i+2*n_feats].append(numpy.std(cur_st_feats) / (numpy.mean(cur_st_feats)+0.00000010)) cur_p += mt_step_ratio return numpy.array(mt_features), st_features, mid_feature_names
python
def mtFeatureExtraction(signal, fs, mt_win, mt_step, st_win, st_step): """ Mid-term feature extraction """ mt_win_ratio = int(round(mt_win / st_step)) mt_step_ratio = int(round(mt_step / st_step)) mt_features = [] st_features, f_names = stFeatureExtraction(signal, fs, st_win, st_step) n_feats = len(st_features) n_stats = 2 mt_features, mid_feature_names = [], [] #for i in range(n_stats * n_feats + 1): for i in range(n_stats * n_feats): mt_features.append([]) mid_feature_names.append("") for i in range(n_feats): # for each of the short-term features: cur_p = 0 N = len(st_features[i]) mid_feature_names[i] = f_names[i] + "_" + "mean" mid_feature_names[i + n_feats] = f_names[i] + "_" + "std" while (cur_p < N): N1 = cur_p N2 = cur_p + mt_win_ratio if N2 > N: N2 = N cur_st_feats = st_features[i][N1:N2] mt_features[i].append(numpy.mean(cur_st_feats)) mt_features[i + n_feats].append(numpy.std(cur_st_feats)) #mt_features[i+2*n_feats].append(numpy.std(cur_st_feats) / (numpy.mean(cur_st_feats)+0.00000010)) cur_p += mt_step_ratio return numpy.array(mt_features), st_features, mid_feature_names
[ "def", "mtFeatureExtraction", "(", "signal", ",", "fs", ",", "mt_win", ",", "mt_step", ",", "st_win", ",", "st_step", ")", ":", "mt_win_ratio", "=", "int", "(", "round", "(", "mt_win", "/", "st_step", ")", ")", "mt_step_ratio", "=", "int", "(", "round", "(", "mt_step", "/", "st_step", ")", ")", "mt_features", "=", "[", "]", "st_features", ",", "f_names", "=", "stFeatureExtraction", "(", "signal", ",", "fs", ",", "st_win", ",", "st_step", ")", "n_feats", "=", "len", "(", "st_features", ")", "n_stats", "=", "2", "mt_features", ",", "mid_feature_names", "=", "[", "]", ",", "[", "]", "#for i in range(n_stats * n_feats + 1):", "for", "i", "in", "range", "(", "n_stats", "*", "n_feats", ")", ":", "mt_features", ".", "append", "(", "[", "]", ")", "mid_feature_names", ".", "append", "(", "\"\"", ")", "for", "i", "in", "range", "(", "n_feats", ")", ":", "# for each of the short-term features:", "cur_p", "=", "0", "N", "=", "len", "(", "st_features", "[", "i", "]", ")", "mid_feature_names", "[", "i", "]", "=", "f_names", "[", "i", "]", "+", "\"_\"", "+", "\"mean\"", "mid_feature_names", "[", "i", "+", "n_feats", "]", "=", "f_names", "[", "i", "]", "+", "\"_\"", "+", "\"std\"", "while", "(", "cur_p", "<", "N", ")", ":", "N1", "=", "cur_p", "N2", "=", "cur_p", "+", "mt_win_ratio", "if", "N2", ">", "N", ":", "N2", "=", "N", "cur_st_feats", "=", "st_features", "[", "i", "]", "[", "N1", ":", "N2", "]", "mt_features", "[", "i", "]", ".", "append", "(", "numpy", ".", "mean", "(", "cur_st_feats", ")", ")", "mt_features", "[", "i", "+", "n_feats", "]", ".", "append", "(", "numpy", ".", "std", "(", "cur_st_feats", ")", ")", "#mt_features[i+2*n_feats].append(numpy.std(cur_st_feats) / (numpy.mean(cur_st_feats)+0.00000010))", "cur_p", "+=", "mt_step_ratio", "return", "numpy", ".", "array", "(", "mt_features", ")", ",", "st_features", ",", "mid_feature_names" ]
Mid-term feature extraction
[ "Mid", "-", "term", "feature", "extraction" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L617-L654
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
dirWavFeatureExtraction
def dirWavFeatureExtraction(dirName, mt_win, mt_step, st_win, st_step, compute_beat=False): """ This function extracts the mid-term features of the WAVE files of a particular folder. The resulting feature vector is extracted by long-term averaging the mid-term features. Therefore ONE FEATURE VECTOR is extracted for each WAV file. ARGUMENTS: - dirName: the path of the WAVE directory - mt_win, mt_step: mid-term window and step (in seconds) - st_win, st_step: short-term window and step (in seconds) """ all_mt_feats = numpy.array([]) process_times = [] types = ('*.wav', '*.aif', '*.aiff', '*.mp3', '*.au', '*.ogg') wav_file_list = [] for files in types: wav_file_list.extend(glob.glob(os.path.join(dirName, files))) wav_file_list = sorted(wav_file_list) wav_file_list2, mt_feature_names = [], [] for i, wavFile in enumerate(wav_file_list): print("Analyzing file {0:d} of " "{1:d}: {2:s}".format(i+1, len(wav_file_list), wavFile)) if os.stat(wavFile).st_size == 0: print(" (EMPTY FILE -- SKIPPING)") continue [fs, x] = audioBasicIO.readAudioFile(wavFile) if isinstance(x, int): continue t1 = time.clock() x = audioBasicIO.stereo2mono(x) if x.shape[0]<float(fs)/5: print(" (AUDIO FILE TOO SMALL - SKIPPING)") continue wav_file_list2.append(wavFile) if compute_beat: [mt_term_feats, st_features, mt_feature_names] = \ mtFeatureExtraction(x, fs, round(mt_win * fs), round(mt_step * fs), round(fs * st_win), round(fs * st_step)) [beat, beat_conf] = beatExtraction(st_features, st_step) else: [mt_term_feats, _, mt_feature_names] = \ mtFeatureExtraction(x, fs, round(mt_win * fs), round(mt_step * fs), round(fs * st_win), round(fs * st_step)) mt_term_feats = numpy.transpose(mt_term_feats) mt_term_feats = mt_term_feats.mean(axis=0) # long term averaging of mid-term statistics if (not numpy.isnan(mt_term_feats).any()) and \ (not numpy.isinf(mt_term_feats).any()): if compute_beat: mt_term_feats = numpy.append(mt_term_feats, beat) mt_term_feats = numpy.append(mt_term_feats, beat_conf) if len(all_mt_feats) == 0: # append feature vector all_mt_feats = mt_term_feats else: all_mt_feats = numpy.vstack((all_mt_feats, mt_term_feats)) t2 = time.clock() duration = float(len(x)) / fs process_times.append((t2 - t1) / duration) if len(process_times) > 0: print("Feature extraction complexity ratio: " "{0:.1f} x realtime".format((1.0 / numpy.mean(numpy.array(process_times))))) return (all_mt_feats, wav_file_list2, mt_feature_names)
python
def dirWavFeatureExtraction(dirName, mt_win, mt_step, st_win, st_step, compute_beat=False): """ This function extracts the mid-term features of the WAVE files of a particular folder. The resulting feature vector is extracted by long-term averaging the mid-term features. Therefore ONE FEATURE VECTOR is extracted for each WAV file. ARGUMENTS: - dirName: the path of the WAVE directory - mt_win, mt_step: mid-term window and step (in seconds) - st_win, st_step: short-term window and step (in seconds) """ all_mt_feats = numpy.array([]) process_times = [] types = ('*.wav', '*.aif', '*.aiff', '*.mp3', '*.au', '*.ogg') wav_file_list = [] for files in types: wav_file_list.extend(glob.glob(os.path.join(dirName, files))) wav_file_list = sorted(wav_file_list) wav_file_list2, mt_feature_names = [], [] for i, wavFile in enumerate(wav_file_list): print("Analyzing file {0:d} of " "{1:d}: {2:s}".format(i+1, len(wav_file_list), wavFile)) if os.stat(wavFile).st_size == 0: print(" (EMPTY FILE -- SKIPPING)") continue [fs, x] = audioBasicIO.readAudioFile(wavFile) if isinstance(x, int): continue t1 = time.clock() x = audioBasicIO.stereo2mono(x) if x.shape[0]<float(fs)/5: print(" (AUDIO FILE TOO SMALL - SKIPPING)") continue wav_file_list2.append(wavFile) if compute_beat: [mt_term_feats, st_features, mt_feature_names] = \ mtFeatureExtraction(x, fs, round(mt_win * fs), round(mt_step * fs), round(fs * st_win), round(fs * st_step)) [beat, beat_conf] = beatExtraction(st_features, st_step) else: [mt_term_feats, _, mt_feature_names] = \ mtFeatureExtraction(x, fs, round(mt_win * fs), round(mt_step * fs), round(fs * st_win), round(fs * st_step)) mt_term_feats = numpy.transpose(mt_term_feats) mt_term_feats = mt_term_feats.mean(axis=0) # long term averaging of mid-term statistics if (not numpy.isnan(mt_term_feats).any()) and \ (not numpy.isinf(mt_term_feats).any()): if compute_beat: mt_term_feats = numpy.append(mt_term_feats, beat) mt_term_feats = numpy.append(mt_term_feats, beat_conf) if len(all_mt_feats) == 0: # append feature vector all_mt_feats = mt_term_feats else: all_mt_feats = numpy.vstack((all_mt_feats, mt_term_feats)) t2 = time.clock() duration = float(len(x)) / fs process_times.append((t2 - t1) / duration) if len(process_times) > 0: print("Feature extraction complexity ratio: " "{0:.1f} x realtime".format((1.0 / numpy.mean(numpy.array(process_times))))) return (all_mt_feats, wav_file_list2, mt_feature_names)
[ "def", "dirWavFeatureExtraction", "(", "dirName", ",", "mt_win", ",", "mt_step", ",", "st_win", ",", "st_step", ",", "compute_beat", "=", "False", ")", ":", "all_mt_feats", "=", "numpy", ".", "array", "(", "[", "]", ")", "process_times", "=", "[", "]", "types", "=", "(", "'*.wav'", ",", "'*.aif'", ",", "'*.aiff'", ",", "'*.mp3'", ",", "'*.au'", ",", "'*.ogg'", ")", "wav_file_list", "=", "[", "]", "for", "files", "in", "types", ":", "wav_file_list", ".", "extend", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "dirName", ",", "files", ")", ")", ")", "wav_file_list", "=", "sorted", "(", "wav_file_list", ")", "wav_file_list2", ",", "mt_feature_names", "=", "[", "]", ",", "[", "]", "for", "i", ",", "wavFile", "in", "enumerate", "(", "wav_file_list", ")", ":", "print", "(", "\"Analyzing file {0:d} of \"", "\"{1:d}: {2:s}\"", ".", "format", "(", "i", "+", "1", ",", "len", "(", "wav_file_list", ")", ",", "wavFile", ")", ")", "if", "os", ".", "stat", "(", "wavFile", ")", ".", "st_size", "==", "0", ":", "print", "(", "\" (EMPTY FILE -- SKIPPING)\"", ")", "continue", "[", "fs", ",", "x", "]", "=", "audioBasicIO", ".", "readAudioFile", "(", "wavFile", ")", "if", "isinstance", "(", "x", ",", "int", ")", ":", "continue", "t1", "=", "time", ".", "clock", "(", ")", "x", "=", "audioBasicIO", ".", "stereo2mono", "(", "x", ")", "if", "x", ".", "shape", "[", "0", "]", "<", "float", "(", "fs", ")", "/", "5", ":", "print", "(", "\" (AUDIO FILE TOO SMALL - SKIPPING)\"", ")", "continue", "wav_file_list2", ".", "append", "(", "wavFile", ")", "if", "compute_beat", ":", "[", "mt_term_feats", ",", "st_features", ",", "mt_feature_names", "]", "=", "mtFeatureExtraction", "(", "x", ",", "fs", ",", "round", "(", "mt_win", "*", "fs", ")", ",", "round", "(", "mt_step", "*", "fs", ")", ",", "round", "(", "fs", "*", "st_win", ")", ",", "round", "(", "fs", "*", "st_step", ")", ")", "[", "beat", ",", "beat_conf", "]", "=", "beatExtraction", "(", "st_features", ",", "st_step", ")", "else", ":", "[", "mt_term_feats", ",", "_", ",", "mt_feature_names", "]", "=", "mtFeatureExtraction", "(", "x", ",", "fs", ",", "round", "(", "mt_win", "*", "fs", ")", ",", "round", "(", "mt_step", "*", "fs", ")", ",", "round", "(", "fs", "*", "st_win", ")", ",", "round", "(", "fs", "*", "st_step", ")", ")", "mt_term_feats", "=", "numpy", ".", "transpose", "(", "mt_term_feats", ")", "mt_term_feats", "=", "mt_term_feats", ".", "mean", "(", "axis", "=", "0", ")", "# long term averaging of mid-term statistics", "if", "(", "not", "numpy", ".", "isnan", "(", "mt_term_feats", ")", ".", "any", "(", ")", ")", "and", "(", "not", "numpy", ".", "isinf", "(", "mt_term_feats", ")", ".", "any", "(", ")", ")", ":", "if", "compute_beat", ":", "mt_term_feats", "=", "numpy", ".", "append", "(", "mt_term_feats", ",", "beat", ")", "mt_term_feats", "=", "numpy", ".", "append", "(", "mt_term_feats", ",", "beat_conf", ")", "if", "len", "(", "all_mt_feats", ")", "==", "0", ":", "# append feature vector", "all_mt_feats", "=", "mt_term_feats", "else", ":", "all_mt_feats", "=", "numpy", ".", "vstack", "(", "(", "all_mt_feats", ",", "mt_term_feats", ")", ")", "t2", "=", "time", ".", "clock", "(", ")", "duration", "=", "float", "(", "len", "(", "x", ")", ")", "/", "fs", "process_times", ".", "append", "(", "(", "t2", "-", "t1", ")", "/", "duration", ")", "if", "len", "(", "process_times", ")", ">", "0", ":", "print", "(", "\"Feature extraction complexity ratio: \"", "\"{0:.1f} x realtime\"", ".", "format", "(", "(", "1.0", "/", "numpy", ".", "mean", "(", "numpy", ".", "array", "(", "process_times", ")", ")", ")", ")", ")", "return", "(", "all_mt_feats", ",", "wav_file_list2", ",", "mt_feature_names", ")" ]
This function extracts the mid-term features of the WAVE files of a particular folder. The resulting feature vector is extracted by long-term averaging the mid-term features. Therefore ONE FEATURE VECTOR is extracted for each WAV file. ARGUMENTS: - dirName: the path of the WAVE directory - mt_win, mt_step: mid-term window and step (in seconds) - st_win, st_step: short-term window and step (in seconds)
[ "This", "function", "extracts", "the", "mid", "-", "term", "features", "of", "the", "WAVE", "files", "of", "a", "particular", "folder", "." ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L726-L799
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
dirsWavFeatureExtraction
def dirsWavFeatureExtraction(dirNames, mt_win, mt_step, st_win, st_step, compute_beat=False): ''' Same as dirWavFeatureExtraction, but instead of a single dir it takes a list of paths as input and returns a list of feature matrices. EXAMPLE: [features, classNames] = a.dirsWavFeatureExtraction(['audioData/classSegmentsRec/noise','audioData/classSegmentsRec/speech', 'audioData/classSegmentsRec/brush-teeth','audioData/classSegmentsRec/shower'], 1, 1, 0.02, 0.02); It can be used during the training process of a classification model , in order to get feature matrices from various audio classes (each stored in a seperate path) ''' # feature extraction for each class: features = [] classNames = [] fileNames = [] for i, d in enumerate(dirNames): [f, fn, feature_names] = dirWavFeatureExtraction(d, mt_win, mt_step, st_win, st_step, compute_beat=compute_beat) if f.shape[0] > 0: # if at least one audio file has been found in the provided folder: features.append(f) fileNames.append(fn) if d[-1] == os.sep: classNames.append(d.split(os.sep)[-2]) else: classNames.append(d.split(os.sep)[-1]) return features, classNames, fileNames
python
def dirsWavFeatureExtraction(dirNames, mt_win, mt_step, st_win, st_step, compute_beat=False): ''' Same as dirWavFeatureExtraction, but instead of a single dir it takes a list of paths as input and returns a list of feature matrices. EXAMPLE: [features, classNames] = a.dirsWavFeatureExtraction(['audioData/classSegmentsRec/noise','audioData/classSegmentsRec/speech', 'audioData/classSegmentsRec/brush-teeth','audioData/classSegmentsRec/shower'], 1, 1, 0.02, 0.02); It can be used during the training process of a classification model , in order to get feature matrices from various audio classes (each stored in a seperate path) ''' # feature extraction for each class: features = [] classNames = [] fileNames = [] for i, d in enumerate(dirNames): [f, fn, feature_names] = dirWavFeatureExtraction(d, mt_win, mt_step, st_win, st_step, compute_beat=compute_beat) if f.shape[0] > 0: # if at least one audio file has been found in the provided folder: features.append(f) fileNames.append(fn) if d[-1] == os.sep: classNames.append(d.split(os.sep)[-2]) else: classNames.append(d.split(os.sep)[-1]) return features, classNames, fileNames
[ "def", "dirsWavFeatureExtraction", "(", "dirNames", ",", "mt_win", ",", "mt_step", ",", "st_win", ",", "st_step", ",", "compute_beat", "=", "False", ")", ":", "# feature extraction for each class:", "features", "=", "[", "]", "classNames", "=", "[", "]", "fileNames", "=", "[", "]", "for", "i", ",", "d", "in", "enumerate", "(", "dirNames", ")", ":", "[", "f", ",", "fn", ",", "feature_names", "]", "=", "dirWavFeatureExtraction", "(", "d", ",", "mt_win", ",", "mt_step", ",", "st_win", ",", "st_step", ",", "compute_beat", "=", "compute_beat", ")", "if", "f", ".", "shape", "[", "0", "]", ">", "0", ":", "# if at least one audio file has been found in the provided folder:", "features", ".", "append", "(", "f", ")", "fileNames", ".", "append", "(", "fn", ")", "if", "d", "[", "-", "1", "]", "==", "os", ".", "sep", ":", "classNames", ".", "append", "(", "d", ".", "split", "(", "os", ".", "sep", ")", "[", "-", "2", "]", ")", "else", ":", "classNames", ".", "append", "(", "d", ".", "split", "(", "os", ".", "sep", ")", "[", "-", "1", "]", ")", "return", "features", ",", "classNames", ",", "fileNames" ]
Same as dirWavFeatureExtraction, but instead of a single dir it takes a list of paths as input and returns a list of feature matrices. EXAMPLE: [features, classNames] = a.dirsWavFeatureExtraction(['audioData/classSegmentsRec/noise','audioData/classSegmentsRec/speech', 'audioData/classSegmentsRec/brush-teeth','audioData/classSegmentsRec/shower'], 1, 1, 0.02, 0.02); It can be used during the training process of a classification model , in order to get feature matrices from various audio classes (each stored in a seperate path)
[ "Same", "as", "dirWavFeatureExtraction", "but", "instead", "of", "a", "single", "dir", "it", "takes", "a", "list", "of", "paths", "as", "input", "and", "returns", "a", "list", "of", "feature", "matrices", ".", "EXAMPLE", ":", "[", "features", "classNames", "]", "=", "a", ".", "dirsWavFeatureExtraction", "(", "[", "audioData", "/", "classSegmentsRec", "/", "noise", "audioData", "/", "classSegmentsRec", "/", "speech", "audioData", "/", "classSegmentsRec", "/", "brush", "-", "teeth", "audioData", "/", "classSegmentsRec", "/", "shower", "]", "1", "1", "0", ".", "02", "0", ".", "02", ")", ";" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L802-L831
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
dirWavFeatureExtractionNoAveraging
def dirWavFeatureExtractionNoAveraging(dirName, mt_win, mt_step, st_win, st_step): """ This function extracts the mid-term features of the WAVE files of a particular folder without averaging each file. ARGUMENTS: - dirName: the path of the WAVE directory - mt_win, mt_step: mid-term window and step (in seconds) - st_win, st_step: short-term window and step (in seconds) RETURNS: - X: A feature matrix - Y: A matrix of file labels - filenames: """ all_mt_feats = numpy.array([]) signal_idx = numpy.array([]) process_times = [] types = ('*.wav', '*.aif', '*.aiff', '*.ogg') wav_file_list = [] for files in types: wav_file_list.extend(glob.glob(os.path.join(dirName, files))) wav_file_list = sorted(wav_file_list) for i, wavFile in enumerate(wav_file_list): [fs, x] = audioBasicIO.readAudioFile(wavFile) if isinstance(x, int): continue x = audioBasicIO.stereo2mono(x) [mt_term_feats, _, _] = mtFeatureExtraction(x, fs, round(mt_win * fs), round(mt_step * fs), round(fs * st_win), round(fs * st_step)) mt_term_feats = numpy.transpose(mt_term_feats) if len(all_mt_feats) == 0: # append feature vector all_mt_feats = mt_term_feats signal_idx = numpy.zeros((mt_term_feats.shape[0], )) else: all_mt_feats = numpy.vstack((all_mt_feats, mt_term_feats)) signal_idx = numpy.append(signal_idx, i * numpy.ones((mt_term_feats.shape[0], ))) return (all_mt_feats, signal_idx, wav_file_list)
python
def dirWavFeatureExtractionNoAveraging(dirName, mt_win, mt_step, st_win, st_step): """ This function extracts the mid-term features of the WAVE files of a particular folder without averaging each file. ARGUMENTS: - dirName: the path of the WAVE directory - mt_win, mt_step: mid-term window and step (in seconds) - st_win, st_step: short-term window and step (in seconds) RETURNS: - X: A feature matrix - Y: A matrix of file labels - filenames: """ all_mt_feats = numpy.array([]) signal_idx = numpy.array([]) process_times = [] types = ('*.wav', '*.aif', '*.aiff', '*.ogg') wav_file_list = [] for files in types: wav_file_list.extend(glob.glob(os.path.join(dirName, files))) wav_file_list = sorted(wav_file_list) for i, wavFile in enumerate(wav_file_list): [fs, x] = audioBasicIO.readAudioFile(wavFile) if isinstance(x, int): continue x = audioBasicIO.stereo2mono(x) [mt_term_feats, _, _] = mtFeatureExtraction(x, fs, round(mt_win * fs), round(mt_step * fs), round(fs * st_win), round(fs * st_step)) mt_term_feats = numpy.transpose(mt_term_feats) if len(all_mt_feats) == 0: # append feature vector all_mt_feats = mt_term_feats signal_idx = numpy.zeros((mt_term_feats.shape[0], )) else: all_mt_feats = numpy.vstack((all_mt_feats, mt_term_feats)) signal_idx = numpy.append(signal_idx, i * numpy.ones((mt_term_feats.shape[0], ))) return (all_mt_feats, signal_idx, wav_file_list)
[ "def", "dirWavFeatureExtractionNoAveraging", "(", "dirName", ",", "mt_win", ",", "mt_step", ",", "st_win", ",", "st_step", ")", ":", "all_mt_feats", "=", "numpy", ".", "array", "(", "[", "]", ")", "signal_idx", "=", "numpy", ".", "array", "(", "[", "]", ")", "process_times", "=", "[", "]", "types", "=", "(", "'*.wav'", ",", "'*.aif'", ",", "'*.aiff'", ",", "'*.ogg'", ")", "wav_file_list", "=", "[", "]", "for", "files", "in", "types", ":", "wav_file_list", ".", "extend", "(", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "dirName", ",", "files", ")", ")", ")", "wav_file_list", "=", "sorted", "(", "wav_file_list", ")", "for", "i", ",", "wavFile", "in", "enumerate", "(", "wav_file_list", ")", ":", "[", "fs", ",", "x", "]", "=", "audioBasicIO", ".", "readAudioFile", "(", "wavFile", ")", "if", "isinstance", "(", "x", ",", "int", ")", ":", "continue", "x", "=", "audioBasicIO", ".", "stereo2mono", "(", "x", ")", "[", "mt_term_feats", ",", "_", ",", "_", "]", "=", "mtFeatureExtraction", "(", "x", ",", "fs", ",", "round", "(", "mt_win", "*", "fs", ")", ",", "round", "(", "mt_step", "*", "fs", ")", ",", "round", "(", "fs", "*", "st_win", ")", ",", "round", "(", "fs", "*", "st_step", ")", ")", "mt_term_feats", "=", "numpy", ".", "transpose", "(", "mt_term_feats", ")", "if", "len", "(", "all_mt_feats", ")", "==", "0", ":", "# append feature vector", "all_mt_feats", "=", "mt_term_feats", "signal_idx", "=", "numpy", ".", "zeros", "(", "(", "mt_term_feats", ".", "shape", "[", "0", "]", ",", ")", ")", "else", ":", "all_mt_feats", "=", "numpy", ".", "vstack", "(", "(", "all_mt_feats", ",", "mt_term_feats", ")", ")", "signal_idx", "=", "numpy", ".", "append", "(", "signal_idx", ",", "i", "*", "numpy", ".", "ones", "(", "(", "mt_term_feats", ".", "shape", "[", "0", "]", ",", ")", ")", ")", "return", "(", "all_mt_feats", ",", "signal_idx", ",", "wav_file_list", ")" ]
This function extracts the mid-term features of the WAVE files of a particular folder without averaging each file. ARGUMENTS: - dirName: the path of the WAVE directory - mt_win, mt_step: mid-term window and step (in seconds) - st_win, st_step: short-term window and step (in seconds) RETURNS: - X: A feature matrix - Y: A matrix of file labels - filenames:
[ "This", "function", "extracts", "the", "mid", "-", "term", "features", "of", "the", "WAVE", "files", "of", "a", "particular", "folder", "without", "averaging", "each", "file", "." ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L834-L879
train
tyiannak/pyAudioAnalysis
pyAudioAnalysis/audioFeatureExtraction.py
mtFeatureExtractionToFile
def mtFeatureExtractionToFile(fileName, midTermSize, midTermStep, shortTermSize, shortTermStep, outPutFile, storeStFeatures=False, storeToCSV=False, PLOT=False): """ This function is used as a wrapper to: a) read the content of a WAV file b) perform mid-term feature extraction on that signal c) write the mid-term feature sequences to a numpy file """ [fs, x] = audioBasicIO.readAudioFile(fileName) x = audioBasicIO.stereo2mono(x) if storeStFeatures: [mtF, stF, _] = mtFeatureExtraction(x, fs, round(fs * midTermSize), round(fs * midTermStep), round(fs * shortTermSize), round(fs * shortTermStep)) else: [mtF, _, _] = mtFeatureExtraction(x, fs, round(fs*midTermSize), round(fs * midTermStep), round(fs * shortTermSize), round(fs * shortTermStep)) # save mt features to numpy file numpy.save(outPutFile, mtF) if PLOT: print("Mid-term numpy file: " + outPutFile + ".npy saved") if storeToCSV: numpy.savetxt(outPutFile+".csv", mtF.T, delimiter=",") if PLOT: print("Mid-term CSV file: " + outPutFile + ".csv saved") if storeStFeatures: # save st features to numpy file numpy.save(outPutFile+"_st", stF) if PLOT: print("Short-term numpy file: " + outPutFile + "_st.npy saved") if storeToCSV: # store st features to CSV file numpy.savetxt(outPutFile+"_st.csv", stF.T, delimiter=",") if PLOT: print("Short-term CSV file: " + outPutFile + "_st.csv saved")
python
def mtFeatureExtractionToFile(fileName, midTermSize, midTermStep, shortTermSize, shortTermStep, outPutFile, storeStFeatures=False, storeToCSV=False, PLOT=False): """ This function is used as a wrapper to: a) read the content of a WAV file b) perform mid-term feature extraction on that signal c) write the mid-term feature sequences to a numpy file """ [fs, x] = audioBasicIO.readAudioFile(fileName) x = audioBasicIO.stereo2mono(x) if storeStFeatures: [mtF, stF, _] = mtFeatureExtraction(x, fs, round(fs * midTermSize), round(fs * midTermStep), round(fs * shortTermSize), round(fs * shortTermStep)) else: [mtF, _, _] = mtFeatureExtraction(x, fs, round(fs*midTermSize), round(fs * midTermStep), round(fs * shortTermSize), round(fs * shortTermStep)) # save mt features to numpy file numpy.save(outPutFile, mtF) if PLOT: print("Mid-term numpy file: " + outPutFile + ".npy saved") if storeToCSV: numpy.savetxt(outPutFile+".csv", mtF.T, delimiter=",") if PLOT: print("Mid-term CSV file: " + outPutFile + ".csv saved") if storeStFeatures: # save st features to numpy file numpy.save(outPutFile+"_st", stF) if PLOT: print("Short-term numpy file: " + outPutFile + "_st.npy saved") if storeToCSV: # store st features to CSV file numpy.savetxt(outPutFile+"_st.csv", stF.T, delimiter=",") if PLOT: print("Short-term CSV file: " + outPutFile + "_st.csv saved")
[ "def", "mtFeatureExtractionToFile", "(", "fileName", ",", "midTermSize", ",", "midTermStep", ",", "shortTermSize", ",", "shortTermStep", ",", "outPutFile", ",", "storeStFeatures", "=", "False", ",", "storeToCSV", "=", "False", ",", "PLOT", "=", "False", ")", ":", "[", "fs", ",", "x", "]", "=", "audioBasicIO", ".", "readAudioFile", "(", "fileName", ")", "x", "=", "audioBasicIO", ".", "stereo2mono", "(", "x", ")", "if", "storeStFeatures", ":", "[", "mtF", ",", "stF", ",", "_", "]", "=", "mtFeatureExtraction", "(", "x", ",", "fs", ",", "round", "(", "fs", "*", "midTermSize", ")", ",", "round", "(", "fs", "*", "midTermStep", ")", ",", "round", "(", "fs", "*", "shortTermSize", ")", ",", "round", "(", "fs", "*", "shortTermStep", ")", ")", "else", ":", "[", "mtF", ",", "_", ",", "_", "]", "=", "mtFeatureExtraction", "(", "x", ",", "fs", ",", "round", "(", "fs", "*", "midTermSize", ")", ",", "round", "(", "fs", "*", "midTermStep", ")", ",", "round", "(", "fs", "*", "shortTermSize", ")", ",", "round", "(", "fs", "*", "shortTermStep", ")", ")", "# save mt features to numpy file", "numpy", ".", "save", "(", "outPutFile", ",", "mtF", ")", "if", "PLOT", ":", "print", "(", "\"Mid-term numpy file: \"", "+", "outPutFile", "+", "\".npy saved\"", ")", "if", "storeToCSV", ":", "numpy", ".", "savetxt", "(", "outPutFile", "+", "\".csv\"", ",", "mtF", ".", "T", ",", "delimiter", "=", "\",\"", ")", "if", "PLOT", ":", "print", "(", "\"Mid-term CSV file: \"", "+", "outPutFile", "+", "\".csv saved\"", ")", "if", "storeStFeatures", ":", "# save st features to numpy file", "numpy", ".", "save", "(", "outPutFile", "+", "\"_st\"", ",", "stF", ")", "if", "PLOT", ":", "print", "(", "\"Short-term numpy file: \"", "+", "outPutFile", "+", "\"_st.npy saved\"", ")", "if", "storeToCSV", ":", "# store st features to CSV file", "numpy", ".", "savetxt", "(", "outPutFile", "+", "\"_st.csv\"", ",", "stF", ".", "T", ",", "delimiter", "=", "\",\"", ")", "if", "PLOT", ":", "print", "(", "\"Short-term CSV file: \"", "+", "outPutFile", "+", "\"_st.csv saved\"", ")" ]
This function is used as a wrapper to: a) read the content of a WAV file b) perform mid-term feature extraction on that signal c) write the mid-term feature sequences to a numpy file
[ "This", "function", "is", "used", "as", "a", "wrapper", "to", ":", "a", ")", "read", "the", "content", "of", "a", "WAV", "file", "b", ")", "perform", "mid", "-", "term", "feature", "extraction", "on", "that", "signal", "c", ")", "write", "the", "mid", "-", "term", "feature", "sequences", "to", "a", "numpy", "file" ]
e3da991e7247492deba50648a4c7c0f41e684af4
https://github.com/tyiannak/pyAudioAnalysis/blob/e3da991e7247492deba50648a4c7c0f41e684af4/pyAudioAnalysis/audioFeatureExtraction.py#L889-L928
train
ricequant/rqalpha
rqalpha/model/base_account.py
BaseAccount.market_value
def market_value(self): """ [float] 市值 """ return sum(position.market_value for position in six.itervalues(self._positions))
python
def market_value(self): """ [float] 市值 """ return sum(position.market_value for position in six.itervalues(self._positions))
[ "def", "market_value", "(", "self", ")", ":", "return", "sum", "(", "position", ".", "market_value", "for", "position", "in", "six", ".", "itervalues", "(", "self", ".", "_positions", ")", ")" ]
[float] 市值
[ "[", "float", "]", "市值" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/base_account.py#L103-L107
train
ricequant/rqalpha
rqalpha/model/base_account.py
BaseAccount.transaction_cost
def transaction_cost(self): """ [float] 总费用 """ return sum(position.transaction_cost for position in six.itervalues(self._positions))
python
def transaction_cost(self): """ [float] 总费用 """ return sum(position.transaction_cost for position in six.itervalues(self._positions))
[ "def", "transaction_cost", "(", "self", ")", ":", "return", "sum", "(", "position", ".", "transaction_cost", "for", "position", "in", "six", ".", "itervalues", "(", "self", ".", "_positions", ")", ")" ]
[float] 总费用
[ "[", "float", "]", "总费用" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/base_account.py#L110-L114
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/api/api_future.py
buy_open
def buy_open(id_or_ins, amount, price=None, style=None): """ 买入开仓。 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None :example: .. code-block:: python #以价格为3500的限价单开仓买入2张上期所AG1607合约: buy_open('AG1607', amount=2, price=3500)) """ return order(id_or_ins, amount, SIDE.BUY, POSITION_EFFECT.OPEN, cal_style(price, style))
python
def buy_open(id_or_ins, amount, price=None, style=None): """ 买入开仓。 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None :example: .. code-block:: python #以价格为3500的限价单开仓买入2张上期所AG1607合约: buy_open('AG1607', amount=2, price=3500)) """ return order(id_or_ins, amount, SIDE.BUY, POSITION_EFFECT.OPEN, cal_style(price, style))
[ "def", "buy_open", "(", "id_or_ins", ",", "amount", ",", "price", "=", "None", ",", "style", "=", "None", ")", ":", "return", "order", "(", "id_or_ins", ",", "amount", ",", "SIDE", ".", "BUY", ",", "POSITION_EFFECT", ".", "OPEN", ",", "cal_style", "(", "price", ",", "style", ")", ")" ]
买入开仓。 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None :example: .. code-block:: python #以价格为3500的限价单开仓买入2张上期所AG1607合约: buy_open('AG1607', amount=2, price=3500))
[ "买入开仓。" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/api/api_future.py#L208-L231
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/api/api_future.py
buy_close
def buy_close(id_or_ins, amount, price=None, style=None, close_today=False): """ 平卖仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :param bool close_today: 是否指定发平今仓单,默认为False,发送平仓单 :return: :class:`~Order` object | list[:class:`~Order`] | None :example: .. code-block:: python #市价单将现有IF1603空仓买入平仓2张: buy_close('IF1603', 2) """ position_effect = POSITION_EFFECT.CLOSE_TODAY if close_today else POSITION_EFFECT.CLOSE return order(id_or_ins, amount, SIDE.BUY, position_effect, cal_style(price, style))
python
def buy_close(id_or_ins, amount, price=None, style=None, close_today=False): """ 平卖仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :param bool close_today: 是否指定发平今仓单,默认为False,发送平仓单 :return: :class:`~Order` object | list[:class:`~Order`] | None :example: .. code-block:: python #市价单将现有IF1603空仓买入平仓2张: buy_close('IF1603', 2) """ position_effect = POSITION_EFFECT.CLOSE_TODAY if close_today else POSITION_EFFECT.CLOSE return order(id_or_ins, amount, SIDE.BUY, position_effect, cal_style(price, style))
[ "def", "buy_close", "(", "id_or_ins", ",", "amount", ",", "price", "=", "None", ",", "style", "=", "None", ",", "close_today", "=", "False", ")", ":", "position_effect", "=", "POSITION_EFFECT", ".", "CLOSE_TODAY", "if", "close_today", "else", "POSITION_EFFECT", ".", "CLOSE", "return", "order", "(", "id_or_ins", ",", "amount", ",", "SIDE", ".", "BUY", ",", "position_effect", ",", "cal_style", "(", "price", ",", "style", ")", ")" ]
平卖仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :param bool close_today: 是否指定发平今仓单,默认为False,发送平仓单 :return: :class:`~Order` object | list[:class:`~Order`] | None :example: .. code-block:: python #市价单将现有IF1603空仓买入平仓2张: buy_close('IF1603', 2)
[ "平卖仓" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/api/api_future.py#L235-L261
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/api/api_future.py
sell_open
def sell_open(id_or_ins, amount, price=None, style=None): """ 卖出开仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None """ return order(id_or_ins, amount, SIDE.SELL, POSITION_EFFECT.OPEN, cal_style(price, style))
python
def sell_open(id_or_ins, amount, price=None, style=None): """ 卖出开仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None """ return order(id_or_ins, amount, SIDE.SELL, POSITION_EFFECT.OPEN, cal_style(price, style))
[ "def", "sell_open", "(", "id_or_ins", ",", "amount", ",", "price", "=", "None", ",", "style", "=", "None", ")", ":", "return", "order", "(", "id_or_ins", ",", "amount", ",", "SIDE", ".", "SELL", ",", "POSITION_EFFECT", ".", "OPEN", ",", "cal_style", "(", "price", ",", "style", ")", ")" ]
卖出开仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :return: :class:`~Order` object | None
[ "卖出开仓" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/api/api_future.py#L265-L281
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/api/api_future.py
sell_close
def sell_close(id_or_ins, amount, price=None, style=None, close_today=False): """ 平买仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :param bool close_today: 是否指定发平今仓单,默认为False,发送平仓单 :return: :class:`~Order` object | list[:class:`~Order`] | None """ position_effect = POSITION_EFFECT.CLOSE_TODAY if close_today else POSITION_EFFECT.CLOSE return order(id_or_ins, amount, SIDE.SELL, position_effect, cal_style(price, style))
python
def sell_close(id_or_ins, amount, price=None, style=None, close_today=False): """ 平买仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :param bool close_today: 是否指定发平今仓单,默认为False,发送平仓单 :return: :class:`~Order` object | list[:class:`~Order`] | None """ position_effect = POSITION_EFFECT.CLOSE_TODAY if close_today else POSITION_EFFECT.CLOSE return order(id_or_ins, amount, SIDE.SELL, position_effect, cal_style(price, style))
[ "def", "sell_close", "(", "id_or_ins", ",", "amount", ",", "price", "=", "None", ",", "style", "=", "None", ",", "close_today", "=", "False", ")", ":", "position_effect", "=", "POSITION_EFFECT", ".", "CLOSE_TODAY", "if", "close_today", "else", "POSITION_EFFECT", ".", "CLOSE", "return", "order", "(", "id_or_ins", ",", "amount", ",", "SIDE", ".", "SELL", ",", "position_effect", ",", "cal_style", "(", "price", ",", "style", ")", ")" ]
平买仓 :param id_or_ins: 下单标的物 :type id_or_ins: :class:`~Instrument` object | `str` | List[:class:`~Instrument`] | List[`str`] :param int amount: 下单手数 :param float price: 下单价格,默认为None,表示 :class:`~MarketOrder`, 此参数主要用于简化 `style` 参数。 :param style: 下单类型, 默认是市价单。目前支持的订单类型有 :class:`~LimitOrder` 和 :class:`~MarketOrder` :type style: `OrderStyle` object :param bool close_today: 是否指定发平今仓单,默认为False,发送平仓单 :return: :class:`~Order` object | list[:class:`~Order`] | None
[ "平买仓" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/api/api_future.py#L285-L304
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/api/api_future.py
get_future_contracts
def get_future_contracts(underlying_symbol): """ 获取某一期货品种在策略当前日期的可交易合约order_book_id列表。按照到期月份,下标从小到大排列,返回列表中第一个合约对应的就是该品种的近月合约。 :param str underlying_symbol: 期货合约品种,例如沪深300股指期货为'IF' :return: list[`str`] :example: 获取某一天的主力合约代码(策略当前日期是20161201): .. code-block:: python [In] logger.info(get_future_contracts('IF')) [Out] ['IF1612', 'IF1701', 'IF1703', 'IF1706'] """ env = Environment.get_instance() return env.data_proxy.get_future_contracts(underlying_symbol, env.trading_dt)
python
def get_future_contracts(underlying_symbol): """ 获取某一期货品种在策略当前日期的可交易合约order_book_id列表。按照到期月份,下标从小到大排列,返回列表中第一个合约对应的就是该品种的近月合约。 :param str underlying_symbol: 期货合约品种,例如沪深300股指期货为'IF' :return: list[`str`] :example: 获取某一天的主力合约代码(策略当前日期是20161201): .. code-block:: python [In] logger.info(get_future_contracts('IF')) [Out] ['IF1612', 'IF1701', 'IF1703', 'IF1706'] """ env = Environment.get_instance() return env.data_proxy.get_future_contracts(underlying_symbol, env.trading_dt)
[ "def", "get_future_contracts", "(", "underlying_symbol", ")", ":", "env", "=", "Environment", ".", "get_instance", "(", ")", "return", "env", ".", "data_proxy", ".", "get_future_contracts", "(", "underlying_symbol", ",", "env", ".", "trading_dt", ")" ]
获取某一期货品种在策略当前日期的可交易合约order_book_id列表。按照到期月份,下标从小到大排列,返回列表中第一个合约对应的就是该品种的近月合约。 :param str underlying_symbol: 期货合约品种,例如沪深300股指期货为'IF' :return: list[`str`] :example: 获取某一天的主力合约代码(策略当前日期是20161201): .. code-block:: python [In] logger.info(get_future_contracts('IF')) [Out] ['IF1612', 'IF1701', 'IF1703', 'IF1706']
[ "获取某一期货品种在策略当前日期的可交易合约order_book_id列表。按照到期月份,下标从小到大排列,返回列表中第一个合约对应的就是该品种的近月合约。" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/api/api_future.py#L329-L349
train
ricequant/rqalpha
rqalpha/model/order.py
Order.quantity
def quantity(self): """ [int] 订单数量 """ if np.isnan(self._quantity): raise RuntimeError("Quantity of order {} is not supposed to be nan.".format(self.order_id)) return self._quantity
python
def quantity(self): """ [int] 订单数量 """ if np.isnan(self._quantity): raise RuntimeError("Quantity of order {} is not supposed to be nan.".format(self.order_id)) return self._quantity
[ "def", "quantity", "(", "self", ")", ":", "if", "np", ".", "isnan", "(", "self", ".", "_quantity", ")", ":", "raise", "RuntimeError", "(", "\"Quantity of order {} is not supposed to be nan.\"", ".", "format", "(", "self", ".", "order_id", ")", ")", "return", "self", ".", "_quantity" ]
[int] 订单数量
[ "[", "int", "]", "订单数量" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/order.py#L156-L162
train
ricequant/rqalpha
rqalpha/model/order.py
Order.filled_quantity
def filled_quantity(self): """ [int] 订单已成交数量 """ if np.isnan(self._filled_quantity): raise RuntimeError("Filled quantity of order {} is not supposed to be nan.".format(self.order_id)) return self._filled_quantity
python
def filled_quantity(self): """ [int] 订单已成交数量 """ if np.isnan(self._filled_quantity): raise RuntimeError("Filled quantity of order {} is not supposed to be nan.".format(self.order_id)) return self._filled_quantity
[ "def", "filled_quantity", "(", "self", ")", ":", "if", "np", ".", "isnan", "(", "self", ".", "_filled_quantity", ")", ":", "raise", "RuntimeError", "(", "\"Filled quantity of order {} is not supposed to be nan.\"", ".", "format", "(", "self", ".", "order_id", ")", ")", "return", "self", ".", "_filled_quantity" ]
[int] 订单已成交数量
[ "[", "int", "]", "订单已成交数量" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/order.py#L200-L206
train
ricequant/rqalpha
rqalpha/model/order.py
Order.frozen_price
def frozen_price(self): """ [float] 冻结价格 """ if np.isnan(self._frozen_price): raise RuntimeError("Frozen price of order {} is not supposed to be nan.".format(self.order_id)) return self._frozen_price
python
def frozen_price(self): """ [float] 冻结价格 """ if np.isnan(self._frozen_price): raise RuntimeError("Frozen price of order {} is not supposed to be nan.".format(self.order_id)) return self._frozen_price
[ "def", "frozen_price", "(", "self", ")", ":", "if", "np", ".", "isnan", "(", "self", ".", "_frozen_price", ")", ":", "raise", "RuntimeError", "(", "\"Frozen price of order {} is not supposed to be nan.\"", ".", "format", "(", "self", ".", "order_id", ")", ")", "return", "self", ".", "_frozen_price" ]
[float] 冻结价格
[ "[", "float", "]", "冻结价格" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/order.py#L244-L250
train
ricequant/rqalpha
rqalpha/model/tick.py
TickObject.datetime
def datetime(self): """ [datetime.datetime] 当前快照数据的时间戳 """ try: dt = self._tick_dict['datetime'] except (KeyError, ValueError): return datetime.datetime.min else: if not isinstance(dt, datetime.datetime): if dt > 10000000000000000: # ms return convert_ms_int_to_datetime(dt) else: return convert_int_to_datetime(dt) return dt
python
def datetime(self): """ [datetime.datetime] 当前快照数据的时间戳 """ try: dt = self._tick_dict['datetime'] except (KeyError, ValueError): return datetime.datetime.min else: if not isinstance(dt, datetime.datetime): if dt > 10000000000000000: # ms return convert_ms_int_to_datetime(dt) else: return convert_int_to_datetime(dt) return dt
[ "def", "datetime", "(", "self", ")", ":", "try", ":", "dt", "=", "self", ".", "_tick_dict", "[", "'datetime'", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "return", "datetime", ".", "datetime", ".", "min", "else", ":", "if", "not", "isinstance", "(", "dt", ",", "datetime", ".", "datetime", ")", ":", "if", "dt", ">", "10000000000000000", ":", "# ms", "return", "convert_ms_int_to_datetime", "(", "dt", ")", "else", ":", "return", "convert_int_to_datetime", "(", "dt", ")", "return", "dt" ]
[datetime.datetime] 当前快照数据的时间戳
[ "[", "datetime", ".", "datetime", "]", "当前快照数据的时间戳" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/tick.py#L44-L58
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/stock_position.py
StockPosition.value_percent
def value_percent(self): """ [float] 获得该持仓的实时市场价值在股票投资组合价值中所占比例,取值范围[0, 1] """ accounts = Environment.get_instance().portfolio.accounts if DEFAULT_ACCOUNT_TYPE.STOCK.name not in accounts: return 0 total_value = accounts[DEFAULT_ACCOUNT_TYPE.STOCK.name].total_value return 0 if total_value == 0 else self.market_value / total_value
python
def value_percent(self): """ [float] 获得该持仓的实时市场价值在股票投资组合价值中所占比例,取值范围[0, 1] """ accounts = Environment.get_instance().portfolio.accounts if DEFAULT_ACCOUNT_TYPE.STOCK.name not in accounts: return 0 total_value = accounts[DEFAULT_ACCOUNT_TYPE.STOCK.name].total_value return 0 if total_value == 0 else self.market_value / total_value
[ "def", "value_percent", "(", "self", ")", ":", "accounts", "=", "Environment", ".", "get_instance", "(", ")", ".", "portfolio", ".", "accounts", "if", "DEFAULT_ACCOUNT_TYPE", ".", "STOCK", ".", "name", "not", "in", "accounts", ":", "return", "0", "total_value", "=", "accounts", "[", "DEFAULT_ACCOUNT_TYPE", ".", "STOCK", ".", "name", "]", ".", "total_value", "return", "0", "if", "total_value", "==", "0", "else", "self", ".", "market_value", "/", "total_value" ]
[float] 获得该持仓的实时市场价值在股票投资组合价值中所占比例,取值范围[0, 1]
[ "[", "float", "]", "获得该持仓的实时市场价值在股票投资组合价值中所占比例,取值范围", "[", "0", "1", "]" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/stock_position.py#L155-L163
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/stock_position.py
StockPosition.is_de_listed
def is_de_listed(self): """ 判断合约是否过期 """ env = Environment.get_instance() instrument = env.get_instrument(self._order_book_id) current_date = env.trading_dt if instrument.de_listed_date is not None: if instrument.de_listed_date.date() > env.config.base.end_date: return False if current_date >= env.data_proxy.get_previous_trading_date(instrument.de_listed_date): return True return False
python
def is_de_listed(self): """ 判断合约是否过期 """ env = Environment.get_instance() instrument = env.get_instrument(self._order_book_id) current_date = env.trading_dt if instrument.de_listed_date is not None: if instrument.de_listed_date.date() > env.config.base.end_date: return False if current_date >= env.data_proxy.get_previous_trading_date(instrument.de_listed_date): return True return False
[ "def", "is_de_listed", "(", "self", ")", ":", "env", "=", "Environment", ".", "get_instance", "(", ")", "instrument", "=", "env", ".", "get_instrument", "(", "self", ".", "_order_book_id", ")", "current_date", "=", "env", ".", "trading_dt", "if", "instrument", ".", "de_listed_date", "is", "not", "None", ":", "if", "instrument", ".", "de_listed_date", ".", "date", "(", ")", ">", "env", ".", "config", ".", "base", ".", "end_date", ":", "return", "False", "if", "current_date", ">=", "env", ".", "data_proxy", ".", "get_previous_trading_date", "(", "instrument", ".", "de_listed_date", ")", ":", "return", "True", "return", "False" ]
判断合约是否过期
[ "判断合约是否过期" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/stock_position.py#L166-L179
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/stock_position.py
StockPosition.bought_value
def bought_value(self): """ [已弃用] """ user_system_log.warn(_(u"[abandon] {} is no longer valid.").format('stock_position.bought_value')) return self._quantity * self._avg_price
python
def bought_value(self): """ [已弃用] """ user_system_log.warn(_(u"[abandon] {} is no longer valid.").format('stock_position.bought_value')) return self._quantity * self._avg_price
[ "def", "bought_value", "(", "self", ")", ":", "user_system_log", ".", "warn", "(", "_", "(", "u\"[abandon] {} is no longer valid.\"", ")", ".", "format", "(", "'stock_position.bought_value'", ")", ")", "return", "self", ".", "_quantity", "*", "self", ".", "_avg_price" ]
[已弃用]
[ "[", "已弃用", "]" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/stock_position.py#L200-L205
train
ricequant/rqalpha
rqalpha/model/booking.py
BookingPosition.trading_pnl
def trading_pnl(self): """ [float] 交易盈亏,策略在当前交易日产生的盈亏中来源于当日成交的部分 """ last_price = self._data_proxy.get_last_price(self._order_book_id) return self._contract_multiplier * (self._trade_quantity * last_price - self._trade_cost)
python
def trading_pnl(self): """ [float] 交易盈亏,策略在当前交易日产生的盈亏中来源于当日成交的部分 """ last_price = self._data_proxy.get_last_price(self._order_book_id) return self._contract_multiplier * (self._trade_quantity * last_price - self._trade_cost)
[ "def", "trading_pnl", "(", "self", ")", ":", "last_price", "=", "self", ".", "_data_proxy", ".", "get_last_price", "(", "self", ".", "_order_book_id", ")", "return", "self", ".", "_contract_multiplier", "*", "(", "self", ".", "_trade_quantity", "*", "last_price", "-", "self", ".", "_trade_cost", ")" ]
[float] 交易盈亏,策略在当前交易日产生的盈亏中来源于当日成交的部分
[ "[", "float", "]", "交易盈亏,策略在当前交易日产生的盈亏中来源于当日成交的部分" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/booking.py#L223-L228
train
ricequant/rqalpha
rqalpha/model/booking.py
BookingPosition.position_pnl
def position_pnl(self): """ [float] 昨仓盈亏,策略在当前交易日产生的盈亏中来源于昨仓的部分 """ last_price = self._data_proxy.get_last_price(self._order_book_id) if self._direction == POSITION_DIRECTION.LONG: price_spread = last_price - self._last_price else: price_spread = self._last_price - last_price return self._logical_old_quantity * self._contract_multiplier * price_spread
python
def position_pnl(self): """ [float] 昨仓盈亏,策略在当前交易日产生的盈亏中来源于昨仓的部分 """ last_price = self._data_proxy.get_last_price(self._order_book_id) if self._direction == POSITION_DIRECTION.LONG: price_spread = last_price - self._last_price else: price_spread = self._last_price - last_price return self._logical_old_quantity * self._contract_multiplier * price_spread
[ "def", "position_pnl", "(", "self", ")", ":", "last_price", "=", "self", ".", "_data_proxy", ".", "get_last_price", "(", "self", ".", "_order_book_id", ")", "if", "self", ".", "_direction", "==", "POSITION_DIRECTION", ".", "LONG", ":", "price_spread", "=", "last_price", "-", "self", ".", "_last_price", "else", ":", "price_spread", "=", "self", ".", "_last_price", "-", "last_price", "return", "self", ".", "_logical_old_quantity", "*", "self", ".", "_contract_multiplier", "*", "price_spread" ]
[float] 昨仓盈亏,策略在当前交易日产生的盈亏中来源于昨仓的部分
[ "[", "float", "]", "昨仓盈亏,策略在当前交易日产生的盈亏中来源于昨仓的部分" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/booking.py#L231-L241
train
ricequant/rqalpha
rqalpha/model/portfolio.py
Portfolio.register_event
def register_event(self): """ 注册事件 """ event_bus = Environment.get_instance().event_bus event_bus.prepend_listener(EVENT.PRE_BEFORE_TRADING, self._pre_before_trading) event_bus.prepend_listener(EVENT.POST_SETTLEMENT, self._post_settlement)
python
def register_event(self): """ 注册事件 """ event_bus = Environment.get_instance().event_bus event_bus.prepend_listener(EVENT.PRE_BEFORE_TRADING, self._pre_before_trading) event_bus.prepend_listener(EVENT.POST_SETTLEMENT, self._post_settlement)
[ "def", "register_event", "(", "self", ")", ":", "event_bus", "=", "Environment", ".", "get_instance", "(", ")", ".", "event_bus", "event_bus", ".", "prepend_listener", "(", "EVENT", ".", "PRE_BEFORE_TRADING", ",", "self", ".", "_pre_before_trading", ")", "event_bus", ".", "prepend_listener", "(", "EVENT", ".", "POST_SETTLEMENT", ",", "self", ".", "_post_settlement", ")" ]
注册事件
[ "注册事件" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/portfolio.py#L41-L47
train
ricequant/rqalpha
rqalpha/model/portfolio.py
Portfolio.unit_net_value
def unit_net_value(self): """ [float] 实时净值 """ if self._units == 0: return np.nan return self.total_value / self._units
python
def unit_net_value(self): """ [float] 实时净值 """ if self._units == 0: return np.nan return self.total_value / self._units
[ "def", "unit_net_value", "(", "self", ")", ":", "if", "self", ".", "_units", "==", "0", ":", "return", "np", ".", "nan", "return", "self", ".", "total_value", "/", "self", ".", "_units" ]
[float] 实时净值
[ "[", "float", "]", "实时净值" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/portfolio.py#L119-L125
train
ricequant/rqalpha
rqalpha/model/portfolio.py
Portfolio.daily_returns
def daily_returns(self): """ [float] 当前最新一天的日收益 """ if self._static_unit_net_value == 0: return np.nan return 0 if self._static_unit_net_value == 0 else self.unit_net_value / self._static_unit_net_value - 1
python
def daily_returns(self): """ [float] 当前最新一天的日收益 """ if self._static_unit_net_value == 0: return np.nan return 0 if self._static_unit_net_value == 0 else self.unit_net_value / self._static_unit_net_value - 1
[ "def", "daily_returns", "(", "self", ")", ":", "if", "self", ".", "_static_unit_net_value", "==", "0", ":", "return", "np", ".", "nan", "return", "0", "if", "self", ".", "_static_unit_net_value", "==", "0", "else", "self", ".", "unit_net_value", "/", "self", ".", "_static_unit_net_value", "-", "1" ]
[float] 当前最新一天的日收益
[ "[", "float", "]", "当前最新一天的日收益" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/portfolio.py#L139-L145
train
ricequant/rqalpha
rqalpha/model/portfolio.py
Portfolio.total_value
def total_value(self): """ [float]总权益 """ return sum(account.total_value for account in six.itervalues(self._accounts))
python
def total_value(self): """ [float]总权益 """ return sum(account.total_value for account in six.itervalues(self._accounts))
[ "def", "total_value", "(", "self", ")", ":", "return", "sum", "(", "account", ".", "total_value", "for", "account", "in", "six", ".", "itervalues", "(", "self", ".", "_accounts", ")", ")" ]
[float]总权益
[ "[", "float", "]", "总权益" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/portfolio.py#L167-L171
train
ricequant/rqalpha
rqalpha/model/portfolio.py
Portfolio.positions
def positions(self): """ [dict] 持仓 """ if self._mixed_positions is None: self._mixed_positions = MixedPositions(self._accounts) return self._mixed_positions
python
def positions(self): """ [dict] 持仓 """ if self._mixed_positions is None: self._mixed_positions = MixedPositions(self._accounts) return self._mixed_positions
[ "def", "positions", "(", "self", ")", ":", "if", "self", ".", "_mixed_positions", "is", "None", ":", "self", ".", "_mixed_positions", "=", "MixedPositions", "(", "self", ".", "_accounts", ")", "return", "self", ".", "_mixed_positions" ]
[dict] 持仓
[ "[", "dict", "]", "持仓" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/portfolio.py#L181-L187
train
ricequant/rqalpha
rqalpha/model/portfolio.py
Portfolio.cash
def cash(self): """ [float] 可用资金 """ return sum(account.cash for account in six.itervalues(self._accounts))
python
def cash(self): """ [float] 可用资金 """ return sum(account.cash for account in six.itervalues(self._accounts))
[ "def", "cash", "(", "self", ")", ":", "return", "sum", "(", "account", ".", "cash", "for", "account", "in", "six", ".", "itervalues", "(", "self", ".", "_accounts", ")", ")" ]
[float] 可用资金
[ "[", "float", "]", "可用资金" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/portfolio.py#L190-L194
train
ricequant/rqalpha
rqalpha/model/portfolio.py
Portfolio.market_value
def market_value(self): """ [float] 市值 """ return sum(account.market_value for account in six.itervalues(self._accounts))
python
def market_value(self): """ [float] 市值 """ return sum(account.market_value for account in six.itervalues(self._accounts))
[ "def", "market_value", "(", "self", ")", ":", "return", "sum", "(", "account", ".", "market_value", "for", "account", "in", "six", ".", "itervalues", "(", "self", ".", "_accounts", ")", ")" ]
[float] 市值
[ "[", "float", "]", "市值" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/portfolio.py#L205-L209
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
FuturePosition.buy_holding_pnl
def buy_holding_pnl(self): """ [float] 买方向当日持仓盈亏 """ return (self.last_price - self.buy_avg_holding_price) * self.buy_quantity * self.contract_multiplier
python
def buy_holding_pnl(self): """ [float] 买方向当日持仓盈亏 """ return (self.last_price - self.buy_avg_holding_price) * self.buy_quantity * self.contract_multiplier
[ "def", "buy_holding_pnl", "(", "self", ")", ":", "return", "(", "self", ".", "last_price", "-", "self", ".", "buy_avg_holding_price", ")", "*", "self", ".", "buy_quantity", "*", "self", ".", "contract_multiplier" ]
[float] 买方向当日持仓盈亏
[ "[", "float", "]", "买方向当日持仓盈亏" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L107-L111
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
FuturePosition.sell_holding_pnl
def sell_holding_pnl(self): """ [float] 卖方向当日持仓盈亏 """ return (self.sell_avg_holding_price - self.last_price) * self.sell_quantity * self.contract_multiplier
python
def sell_holding_pnl(self): """ [float] 卖方向当日持仓盈亏 """ return (self.sell_avg_holding_price - self.last_price) * self.sell_quantity * self.contract_multiplier
[ "def", "sell_holding_pnl", "(", "self", ")", ":", "return", "(", "self", ".", "sell_avg_holding_price", "-", "self", ".", "last_price", ")", "*", "self", ".", "sell_quantity", "*", "self", ".", "contract_multiplier" ]
[float] 卖方向当日持仓盈亏
[ "[", "float", "]", "卖方向当日持仓盈亏" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L114-L118
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
FuturePosition.buy_pnl
def buy_pnl(self): """ [float] 买方向累计盈亏 """ return (self.last_price - self._buy_avg_open_price) * self.buy_quantity * self.contract_multiplier
python
def buy_pnl(self): """ [float] 买方向累计盈亏 """ return (self.last_price - self._buy_avg_open_price) * self.buy_quantity * self.contract_multiplier
[ "def", "buy_pnl", "(", "self", ")", ":", "return", "(", "self", ".", "last_price", "-", "self", ".", "_buy_avg_open_price", ")", "*", "self", ".", "buy_quantity", "*", "self", ".", "contract_multiplier" ]
[float] 买方向累计盈亏
[ "[", "float", "]", "买方向累计盈亏" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L170-L174
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
FuturePosition.sell_pnl
def sell_pnl(self): """ [float] 卖方向累计盈亏 """ return (self._sell_avg_open_price - self.last_price) * self.sell_quantity * self.contract_multiplier
python
def sell_pnl(self): """ [float] 卖方向累计盈亏 """ return (self._sell_avg_open_price - self.last_price) * self.sell_quantity * self.contract_multiplier
[ "def", "sell_pnl", "(", "self", ")", ":", "return", "(", "self", ".", "_sell_avg_open_price", "-", "self", ".", "last_price", ")", "*", "self", ".", "sell_quantity", "*", "self", ".", "contract_multiplier" ]
[float] 卖方向累计盈亏
[ "[", "float", "]", "卖方向累计盈亏" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L177-L181
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
FuturePosition.buy_open_order_quantity
def buy_open_order_quantity(self): """ [int] 买方向挂单量 """ return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and order.position_effect == POSITION_EFFECT.OPEN)
python
def buy_open_order_quantity(self): """ [int] 买方向挂单量 """ return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and order.position_effect == POSITION_EFFECT.OPEN)
[ "def", "buy_open_order_quantity", "(", "self", ")", ":", "return", "sum", "(", "order", ".", "unfilled_quantity", "for", "order", "in", "self", ".", "open_orders", "if", "order", ".", "side", "==", "SIDE", ".", "BUY", "and", "order", ".", "position_effect", "==", "POSITION_EFFECT", ".", "OPEN", ")" ]
[int] 买方向挂单量
[ "[", "int", "]", "买方向挂单量" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L192-L197
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
FuturePosition.sell_open_order_quantity
def sell_open_order_quantity(self): """ [int] 卖方向挂单量 """ return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and order.position_effect == POSITION_EFFECT.OPEN)
python
def sell_open_order_quantity(self): """ [int] 卖方向挂单量 """ return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and order.position_effect == POSITION_EFFECT.OPEN)
[ "def", "sell_open_order_quantity", "(", "self", ")", ":", "return", "sum", "(", "order", ".", "unfilled_quantity", "for", "order", "in", "self", ".", "open_orders", "if", "order", ".", "side", "==", "SIDE", ".", "SELL", "and", "order", ".", "position_effect", "==", "POSITION_EFFECT", ".", "OPEN", ")" ]
[int] 卖方向挂单量
[ "[", "int", "]", "卖方向挂单量" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L200-L205
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
FuturePosition.buy_close_order_quantity
def buy_close_order_quantity(self): """ [int] 买方向挂单量 """ return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
python
def buy_close_order_quantity(self): """ [int] 买方向挂单量 """ return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.BUY and order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
[ "def", "buy_close_order_quantity", "(", "self", ")", ":", "return", "sum", "(", "order", ".", "unfilled_quantity", "for", "order", "in", "self", ".", "open_orders", "if", "order", ".", "side", "==", "SIDE", ".", "BUY", "and", "order", ".", "position_effect", "in", "[", "POSITION_EFFECT", ".", "CLOSE", ",", "POSITION_EFFECT", ".", "CLOSE_TODAY", "]", ")" ]
[int] 买方向挂单量
[ "[", "int", "]", "买方向挂单量" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L208-L213
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
FuturePosition.sell_close_order_quantity
def sell_close_order_quantity(self): """ [int] 卖方向挂单量 """ return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
python
def sell_close_order_quantity(self): """ [int] 卖方向挂单量 """ return sum(order.unfilled_quantity for order in self.open_orders if order.side == SIDE.SELL and order.position_effect in [POSITION_EFFECT.CLOSE, POSITION_EFFECT.CLOSE_TODAY])
[ "def", "sell_close_order_quantity", "(", "self", ")", ":", "return", "sum", "(", "order", ".", "unfilled_quantity", "for", "order", "in", "self", ".", "open_orders", "if", "order", ".", "side", "==", "SIDE", ".", "SELL", "and", "order", ".", "position_effect", "in", "[", "POSITION_EFFECT", ".", "CLOSE", ",", "POSITION_EFFECT", ".", "CLOSE_TODAY", "]", ")" ]
[int] 卖方向挂单量
[ "[", "int", "]", "卖方向挂单量" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L216-L221
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
FuturePosition.buy_avg_holding_price
def buy_avg_holding_price(self): """ [float] 买方向持仓均价 """ return 0 if self.buy_quantity == 0 else self._buy_holding_cost / self.buy_quantity / self.contract_multiplier
python
def buy_avg_holding_price(self): """ [float] 买方向持仓均价 """ return 0 if self.buy_quantity == 0 else self._buy_holding_cost / self.buy_quantity / self.contract_multiplier
[ "def", "buy_avg_holding_price", "(", "self", ")", ":", "return", "0", "if", "self", ".", "buy_quantity", "==", "0", "else", "self", ".", "_buy_holding_cost", "/", "self", ".", "buy_quantity", "/", "self", ".", "contract_multiplier" ]
[float] 买方向持仓均价
[ "[", "float", "]", "买方向持仓均价" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L322-L326
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
FuturePosition.sell_avg_holding_price
def sell_avg_holding_price(self): """ [float] 卖方向持仓均价 """ return 0 if self.sell_quantity == 0 else self._sell_holding_cost / self.sell_quantity / self.contract_multiplier
python
def sell_avg_holding_price(self): """ [float] 卖方向持仓均价 """ return 0 if self.sell_quantity == 0 else self._sell_holding_cost / self.sell_quantity / self.contract_multiplier
[ "def", "sell_avg_holding_price", "(", "self", ")", ":", "return", "0", "if", "self", ".", "sell_quantity", "==", "0", "else", "self", ".", "_sell_holding_cost", "/", "self", ".", "sell_quantity", "/", "self", ".", "contract_multiplier" ]
[float] 卖方向持仓均价
[ "[", "float", "]", "卖方向持仓均价" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L329-L333
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
FuturePosition.is_de_listed
def is_de_listed(self): """ 判断合约是否过期 """ instrument = Environment.get_instance().get_instrument(self._order_book_id) current_date = Environment.get_instance().trading_dt if instrument.de_listed_date is not None and current_date >= instrument.de_listed_date: return True return False
python
def is_de_listed(self): """ 判断合约是否过期 """ instrument = Environment.get_instance().get_instrument(self._order_book_id) current_date = Environment.get_instance().trading_dt if instrument.de_listed_date is not None and current_date >= instrument.de_listed_date: return True return False
[ "def", "is_de_listed", "(", "self", ")", ":", "instrument", "=", "Environment", ".", "get_instance", "(", ")", ".", "get_instrument", "(", "self", ".", "_order_book_id", ")", "current_date", "=", "Environment", ".", "get_instance", "(", ")", ".", "trading_dt", "if", "instrument", ".", "de_listed_date", "is", "not", "None", "and", "current_date", ">=", "instrument", ".", "de_listed_date", ":", "return", "True", "return", "False" ]
判断合约是否过期
[ "判断合约是否过期" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L372-L380
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
FuturePosition.apply_trade
def apply_trade(self, trade): """ 应用成交,并计算交易产生的现金变动。 开仓: delta_cash = -1 * margin = -1 * quantity * contract_multiplier * price * margin_rate 平仓: delta_cash = old_margin - margin + delta_realized_pnl = (sum of (cost_price * quantity) of closed trade) * contract_multiplier * margin_rate + delta_realized_pnl :param trade: rqalpha.model.trade.Trade :return: float """ # close_trade: delta_cash = old_margin - margin + delta_realized_pnl trade_quantity = trade.last_quantity if trade.side == SIDE.BUY: if trade.position_effect == POSITION_EFFECT.OPEN: self._buy_avg_open_price = (self._buy_avg_open_price * self.buy_quantity + trade_quantity * trade.last_price) / (self.buy_quantity + trade_quantity) self._buy_transaction_cost += trade.transaction_cost self._buy_today_holding_list.insert(0, (trade.last_price, trade_quantity)) return -1 * self._margin_of(trade_quantity, trade.last_price) else: old_margin = self.margin self._sell_transaction_cost += trade.transaction_cost delta_realized_pnl = self._close_holding(trade) self._sell_realized_pnl += delta_realized_pnl return old_margin - self.margin + delta_realized_pnl else: if trade.position_effect == POSITION_EFFECT.OPEN: self._sell_avg_open_price = (self._sell_avg_open_price * self.sell_quantity + trade_quantity * trade.last_price) / (self.sell_quantity + trade_quantity) self._sell_transaction_cost += trade.transaction_cost self._sell_today_holding_list.insert(0, (trade.last_price, trade_quantity)) return -1 * self._margin_of(trade_quantity, trade.last_price) else: old_margin = self.margin self._buy_transaction_cost += trade.transaction_cost delta_realized_pnl = self._close_holding(trade) self._buy_realized_pnl += delta_realized_pnl return old_margin - self.margin + delta_realized_pnl
python
def apply_trade(self, trade): """ 应用成交,并计算交易产生的现金变动。 开仓: delta_cash = -1 * margin = -1 * quantity * contract_multiplier * price * margin_rate 平仓: delta_cash = old_margin - margin + delta_realized_pnl = (sum of (cost_price * quantity) of closed trade) * contract_multiplier * margin_rate + delta_realized_pnl :param trade: rqalpha.model.trade.Trade :return: float """ # close_trade: delta_cash = old_margin - margin + delta_realized_pnl trade_quantity = trade.last_quantity if trade.side == SIDE.BUY: if trade.position_effect == POSITION_EFFECT.OPEN: self._buy_avg_open_price = (self._buy_avg_open_price * self.buy_quantity + trade_quantity * trade.last_price) / (self.buy_quantity + trade_quantity) self._buy_transaction_cost += trade.transaction_cost self._buy_today_holding_list.insert(0, (trade.last_price, trade_quantity)) return -1 * self._margin_of(trade_quantity, trade.last_price) else: old_margin = self.margin self._sell_transaction_cost += trade.transaction_cost delta_realized_pnl = self._close_holding(trade) self._sell_realized_pnl += delta_realized_pnl return old_margin - self.margin + delta_realized_pnl else: if trade.position_effect == POSITION_EFFECT.OPEN: self._sell_avg_open_price = (self._sell_avg_open_price * self.sell_quantity + trade_quantity * trade.last_price) / (self.sell_quantity + trade_quantity) self._sell_transaction_cost += trade.transaction_cost self._sell_today_holding_list.insert(0, (trade.last_price, trade_quantity)) return -1 * self._margin_of(trade_quantity, trade.last_price) else: old_margin = self.margin self._buy_transaction_cost += trade.transaction_cost delta_realized_pnl = self._close_holding(trade) self._buy_realized_pnl += delta_realized_pnl return old_margin - self.margin + delta_realized_pnl
[ "def", "apply_trade", "(", "self", ",", "trade", ")", ":", "# close_trade: delta_cash = old_margin - margin + delta_realized_pnl", "trade_quantity", "=", "trade", ".", "last_quantity", "if", "trade", ".", "side", "==", "SIDE", ".", "BUY", ":", "if", "trade", ".", "position_effect", "==", "POSITION_EFFECT", ".", "OPEN", ":", "self", ".", "_buy_avg_open_price", "=", "(", "self", ".", "_buy_avg_open_price", "*", "self", ".", "buy_quantity", "+", "trade_quantity", "*", "trade", ".", "last_price", ")", "/", "(", "self", ".", "buy_quantity", "+", "trade_quantity", ")", "self", ".", "_buy_transaction_cost", "+=", "trade", ".", "transaction_cost", "self", ".", "_buy_today_holding_list", ".", "insert", "(", "0", ",", "(", "trade", ".", "last_price", ",", "trade_quantity", ")", ")", "return", "-", "1", "*", "self", ".", "_margin_of", "(", "trade_quantity", ",", "trade", ".", "last_price", ")", "else", ":", "old_margin", "=", "self", ".", "margin", "self", ".", "_sell_transaction_cost", "+=", "trade", ".", "transaction_cost", "delta_realized_pnl", "=", "self", ".", "_close_holding", "(", "trade", ")", "self", ".", "_sell_realized_pnl", "+=", "delta_realized_pnl", "return", "old_margin", "-", "self", ".", "margin", "+", "delta_realized_pnl", "else", ":", "if", "trade", ".", "position_effect", "==", "POSITION_EFFECT", ".", "OPEN", ":", "self", ".", "_sell_avg_open_price", "=", "(", "self", ".", "_sell_avg_open_price", "*", "self", ".", "sell_quantity", "+", "trade_quantity", "*", "trade", ".", "last_price", ")", "/", "(", "self", ".", "sell_quantity", "+", "trade_quantity", ")", "self", ".", "_sell_transaction_cost", "+=", "trade", ".", "transaction_cost", "self", ".", "_sell_today_holding_list", ".", "insert", "(", "0", ",", "(", "trade", ".", "last_price", ",", "trade_quantity", ")", ")", "return", "-", "1", "*", "self", ".", "_margin_of", "(", "trade_quantity", ",", "trade", ".", "last_price", ")", "else", ":", "old_margin", "=", "self", ".", "margin", "self", ".", "_buy_transaction_cost", "+=", "trade", ".", "transaction_cost", "delta_realized_pnl", "=", "self", ".", "_close_holding", "(", "trade", ")", "self", ".", "_buy_realized_pnl", "+=", "delta_realized_pnl", "return", "old_margin", "-", "self", ".", "margin", "+", "delta_realized_pnl" ]
应用成交,并计算交易产生的现金变动。 开仓: delta_cash = -1 * margin = -1 * quantity * contract_multiplier * price * margin_rate 平仓: delta_cash = old_margin - margin + delta_realized_pnl = (sum of (cost_price * quantity) of closed trade) * contract_multiplier * margin_rate + delta_realized_pnl :param trade: rqalpha.model.trade.Trade :return: float
[ "应用成交,并计算交易产生的现金变动。" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L411-L455
train
ricequant/rqalpha
rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py
FuturePosition._close_holding
def _close_holding(self, trade): """ 应用平仓,并计算平仓盈亏 买平: delta_realized_pnl = sum of ((trade_price - cost_price)* quantity) of closed trades * contract_multiplier 卖平: delta_realized_pnl = sum of ((cost_price - trade_price)* quantity) of closed trades * contract_multiplier :param trade: rqalpha.model.trade.Trade :return: float """ left_quantity = trade.last_quantity delta = 0 if trade.side == SIDE.BUY: # 先平昨仓 if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._sell_old_holding_list) != 0: old_price, old_quantity = self._sell_old_holding_list.pop() if old_quantity > left_quantity: consumed_quantity = left_quantity self._sell_old_holding_list = [(old_price, old_quantity - left_quantity)] else: consumed_quantity = old_quantity left_quantity -= consumed_quantity delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity) # 再平今仓 while True: if left_quantity <= 0: break oldest_price, oldest_quantity = self._sell_today_holding_list.pop() if oldest_quantity > left_quantity: consumed_quantity = left_quantity self._sell_today_holding_list.append((oldest_price, oldest_quantity - left_quantity)) else: consumed_quantity = oldest_quantity left_quantity -= consumed_quantity delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity) else: # 先平昨仓 if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._buy_old_holding_list) != 0: old_price, old_quantity = self._buy_old_holding_list.pop() if old_quantity > left_quantity: consumed_quantity = left_quantity self._buy_old_holding_list = [(old_price, old_quantity - left_quantity)] else: consumed_quantity = old_quantity left_quantity -= consumed_quantity delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity) # 再平今仓 while True: if left_quantity <= 0: break oldest_price, oldest_quantity = self._buy_today_holding_list.pop() if oldest_quantity > left_quantity: consumed_quantity = left_quantity self._buy_today_holding_list.append((oldest_price, oldest_quantity - left_quantity)) left_quantity = 0 else: consumed_quantity = oldest_quantity left_quantity -= consumed_quantity delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity) return delta
python
def _close_holding(self, trade): """ 应用平仓,并计算平仓盈亏 买平: delta_realized_pnl = sum of ((trade_price - cost_price)* quantity) of closed trades * contract_multiplier 卖平: delta_realized_pnl = sum of ((cost_price - trade_price)* quantity) of closed trades * contract_multiplier :param trade: rqalpha.model.trade.Trade :return: float """ left_quantity = trade.last_quantity delta = 0 if trade.side == SIDE.BUY: # 先平昨仓 if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._sell_old_holding_list) != 0: old_price, old_quantity = self._sell_old_holding_list.pop() if old_quantity > left_quantity: consumed_quantity = left_quantity self._sell_old_holding_list = [(old_price, old_quantity - left_quantity)] else: consumed_quantity = old_quantity left_quantity -= consumed_quantity delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity) # 再平今仓 while True: if left_quantity <= 0: break oldest_price, oldest_quantity = self._sell_today_holding_list.pop() if oldest_quantity > left_quantity: consumed_quantity = left_quantity self._sell_today_holding_list.append((oldest_price, oldest_quantity - left_quantity)) else: consumed_quantity = oldest_quantity left_quantity -= consumed_quantity delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity) else: # 先平昨仓 if trade.position_effect == POSITION_EFFECT.CLOSE and len(self._buy_old_holding_list) != 0: old_price, old_quantity = self._buy_old_holding_list.pop() if old_quantity > left_quantity: consumed_quantity = left_quantity self._buy_old_holding_list = [(old_price, old_quantity - left_quantity)] else: consumed_quantity = old_quantity left_quantity -= consumed_quantity delta += self._cal_realized_pnl(old_price, trade.last_price, trade.side, consumed_quantity) # 再平今仓 while True: if left_quantity <= 0: break oldest_price, oldest_quantity = self._buy_today_holding_list.pop() if oldest_quantity > left_quantity: consumed_quantity = left_quantity self._buy_today_holding_list.append((oldest_price, oldest_quantity - left_quantity)) left_quantity = 0 else: consumed_quantity = oldest_quantity left_quantity -= consumed_quantity delta += self._cal_realized_pnl(oldest_price, trade.last_price, trade.side, consumed_quantity) return delta
[ "def", "_close_holding", "(", "self", ",", "trade", ")", ":", "left_quantity", "=", "trade", ".", "last_quantity", "delta", "=", "0", "if", "trade", ".", "side", "==", "SIDE", ".", "BUY", ":", "# 先平昨仓", "if", "trade", ".", "position_effect", "==", "POSITION_EFFECT", ".", "CLOSE", "and", "len", "(", "self", ".", "_sell_old_holding_list", ")", "!=", "0", ":", "old_price", ",", "old_quantity", "=", "self", ".", "_sell_old_holding_list", ".", "pop", "(", ")", "if", "old_quantity", ">", "left_quantity", ":", "consumed_quantity", "=", "left_quantity", "self", ".", "_sell_old_holding_list", "=", "[", "(", "old_price", ",", "old_quantity", "-", "left_quantity", ")", "]", "else", ":", "consumed_quantity", "=", "old_quantity", "left_quantity", "-=", "consumed_quantity", "delta", "+=", "self", ".", "_cal_realized_pnl", "(", "old_price", ",", "trade", ".", "last_price", ",", "trade", ".", "side", ",", "consumed_quantity", ")", "# 再平今仓", "while", "True", ":", "if", "left_quantity", "<=", "0", ":", "break", "oldest_price", ",", "oldest_quantity", "=", "self", ".", "_sell_today_holding_list", ".", "pop", "(", ")", "if", "oldest_quantity", ">", "left_quantity", ":", "consumed_quantity", "=", "left_quantity", "self", ".", "_sell_today_holding_list", ".", "append", "(", "(", "oldest_price", ",", "oldest_quantity", "-", "left_quantity", ")", ")", "else", ":", "consumed_quantity", "=", "oldest_quantity", "left_quantity", "-=", "consumed_quantity", "delta", "+=", "self", ".", "_cal_realized_pnl", "(", "oldest_price", ",", "trade", ".", "last_price", ",", "trade", ".", "side", ",", "consumed_quantity", ")", "else", ":", "# 先平昨仓", "if", "trade", ".", "position_effect", "==", "POSITION_EFFECT", ".", "CLOSE", "and", "len", "(", "self", ".", "_buy_old_holding_list", ")", "!=", "0", ":", "old_price", ",", "old_quantity", "=", "self", ".", "_buy_old_holding_list", ".", "pop", "(", ")", "if", "old_quantity", ">", "left_quantity", ":", "consumed_quantity", "=", "left_quantity", "self", ".", "_buy_old_holding_list", "=", "[", "(", "old_price", ",", "old_quantity", "-", "left_quantity", ")", "]", "else", ":", "consumed_quantity", "=", "old_quantity", "left_quantity", "-=", "consumed_quantity", "delta", "+=", "self", ".", "_cal_realized_pnl", "(", "old_price", ",", "trade", ".", "last_price", ",", "trade", ".", "side", ",", "consumed_quantity", ")", "# 再平今仓", "while", "True", ":", "if", "left_quantity", "<=", "0", ":", "break", "oldest_price", ",", "oldest_quantity", "=", "self", ".", "_buy_today_holding_list", ".", "pop", "(", ")", "if", "oldest_quantity", ">", "left_quantity", ":", "consumed_quantity", "=", "left_quantity", "self", ".", "_buy_today_holding_list", ".", "append", "(", "(", "oldest_price", ",", "oldest_quantity", "-", "left_quantity", ")", ")", "left_quantity", "=", "0", "else", ":", "consumed_quantity", "=", "oldest_quantity", "left_quantity", "-=", "consumed_quantity", "delta", "+=", "self", ".", "_cal_realized_pnl", "(", "oldest_price", ",", "trade", ".", "last_price", ",", "trade", ".", "side", ",", "consumed_quantity", ")", "return", "delta" ]
应用平仓,并计算平仓盈亏 买平: delta_realized_pnl = sum of ((trade_price - cost_price)* quantity) of closed trades * contract_multiplier 卖平: delta_realized_pnl = sum of ((cost_price - trade_price)* quantity) of closed trades * contract_multiplier :param trade: rqalpha.model.trade.Trade :return: float
[ "应用平仓,并计算平仓盈亏" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/mod/rqalpha_mod_sys_accounts/position_model/future_position.py#L457-L520
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.sector_code
def sector_code(self): """ [str] 板块缩写代码,全球通用标准定义(股票专用) """ try: return self.__dict__["sector_code"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'sector_code' ".format(self.order_book_id) )
python
def sector_code(self): """ [str] 板块缩写代码,全球通用标准定义(股票专用) """ try: return self.__dict__["sector_code"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'sector_code' ".format(self.order_book_id) )
[ "def", "sector_code", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"sector_code\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'sector_code' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[str] 板块缩写代码,全球通用标准定义(股票专用)
[ "[", "str", "]", "板块缩写代码,全球通用标准定义(股票专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L116-L125
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.sector_code_name
def sector_code_name(self): """ [str] 以当地语言为标准的板块代码名(股票专用) """ try: return self.__dict__["sector_code_name"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'sector_code_name' ".format(self.order_book_id) )
python
def sector_code_name(self): """ [str] 以当地语言为标准的板块代码名(股票专用) """ try: return self.__dict__["sector_code_name"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'sector_code_name' ".format(self.order_book_id) )
[ "def", "sector_code_name", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"sector_code_name\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'sector_code_name' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[str] 以当地语言为标准的板块代码名(股票专用)
[ "[", "str", "]", "以当地语言为标准的板块代码名(股票专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L128-L137
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.industry_code
def industry_code(self): """ [str] 国民经济行业分类代码,具体可参考“Industry列表” (股票专用) """ try: return self.__dict__["industry_code"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'industry_code' ".format(self.order_book_id) )
python
def industry_code(self): """ [str] 国民经济行业分类代码,具体可参考“Industry列表” (股票专用) """ try: return self.__dict__["industry_code"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'industry_code' ".format(self.order_book_id) )
[ "def", "industry_code", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"industry_code\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'industry_code' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[str] 国民经济行业分类代码,具体可参考“Industry列表” (股票专用)
[ "[", "str", "]", "国民经济行业分类代码,具体可参考“Industry列表”", "(股票专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L140-L149
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.industry_name
def industry_name(self): """ [str] 国民经济行业分类名称(股票专用) """ try: return self.__dict__["industry_name"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'industry_name' ".format(self.order_book_id) )
python
def industry_name(self): """ [str] 国民经济行业分类名称(股票专用) """ try: return self.__dict__["industry_name"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'industry_name' ".format(self.order_book_id) )
[ "def", "industry_name", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"industry_name\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'industry_name' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[str] 国民经济行业分类名称(股票专用)
[ "[", "str", "]", "国民经济行业分类名称(股票专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L152-L161
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.concept_names
def concept_names(self): """ [str] 概念股分类,例如:’铁路基建’,’基金重仓’等(股票专用) """ try: return self.__dict__["concept_names"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'concept_names' ".format(self.order_book_id) )
python
def concept_names(self): """ [str] 概念股分类,例如:’铁路基建’,’基金重仓’等(股票专用) """ try: return self.__dict__["concept_names"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'concept_names' ".format(self.order_book_id) )
[ "def", "concept_names", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"concept_names\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'concept_names' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[str] 概念股分类,例如:’铁路基建’,’基金重仓’等(股票专用)
[ "[", "str", "]", "概念股分类,例如:’铁路基建’,’基金重仓’等(股票专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L164-L173
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.board_type
def board_type(self): """ [str] 板块类别,’MainBoard’ - 主板,’GEM’ - 创业板(股票专用) """ try: return self.__dict__["board_type"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'board_type' ".format(self.order_book_id) )
python
def board_type(self): """ [str] 板块类别,’MainBoard’ - 主板,’GEM’ - 创业板(股票专用) """ try: return self.__dict__["board_type"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'board_type' ".format(self.order_book_id) )
[ "def", "board_type", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"board_type\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'board_type' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[str] 板块类别,’MainBoard’ - 主板,’GEM’ - 创业板(股票专用)
[ "[", "str", "]", "板块类别,’MainBoard’", "-", "主板", "’GEM’", "-", "创业板(股票专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L176-L185
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.status
def status(self): """ [str] 合约状态。’Active’ - 正常上市, ‘Delisted’ - 终止上市, ‘TemporarySuspended’ - 暂停上市, ‘PreIPO’ - 发行配售期间, ‘FailIPO’ - 发行失败(股票专用) """ try: return self.__dict__["status"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'status' ".format(self.order_book_id) )
python
def status(self): """ [str] 合约状态。’Active’ - 正常上市, ‘Delisted’ - 终止上市, ‘TemporarySuspended’ - 暂停上市, ‘PreIPO’ - 发行配售期间, ‘FailIPO’ - 发行失败(股票专用) """ try: return self.__dict__["status"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'status' ".format(self.order_book_id) )
[ "def", "status", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"status\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'status' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[str] 合约状态。’Active’ - 正常上市, ‘Delisted’ - 终止上市, ‘TemporarySuspended’ - 暂停上市, ‘PreIPO’ - 发行配售期间, ‘FailIPO’ - 发行失败(股票专用)
[ "[", "str", "]", "合约状态。’Active’", "-", "正常上市", "‘Delisted’", "-", "终止上市", "‘TemporarySuspended’", "-", "暂停上市", "‘PreIPO’", "-", "发行配售期间", "‘FailIPO’", "-", "发行失败(股票专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L188-L198
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.special_type
def special_type(self): """ [str] 特别处理状态。’Normal’ - 正常上市, ‘ST’ - ST处理, ‘StarST’ - *ST代表该股票正在接受退市警告, ‘PT’ - 代表该股票连续3年收入为负,将被暂停交易, ‘Other’ - 其他(股票专用) """ try: return self.__dict__["special_type"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'special_type' ".format(self.order_book_id) )
python
def special_type(self): """ [str] 特别处理状态。’Normal’ - 正常上市, ‘ST’ - ST处理, ‘StarST’ - *ST代表该股票正在接受退市警告, ‘PT’ - 代表该股票连续3年收入为负,将被暂停交易, ‘Other’ - 其他(股票专用) """ try: return self.__dict__["special_type"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'special_type' ".format(self.order_book_id) )
[ "def", "special_type", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"special_type\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'special_type' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[str] 特别处理状态。’Normal’ - 正常上市, ‘ST’ - ST处理, ‘StarST’ - *ST代表该股票正在接受退市警告, ‘PT’ - 代表该股票连续3年收入为负,将被暂停交易, ‘Other’ - 其他(股票专用)
[ "[", "str", "]", "特别处理状态。’Normal’", "-", "正常上市", "‘ST’", "-", "ST处理", "‘StarST’", "-", "*", "ST代表该股票正在接受退市警告", "‘PT’", "-", "代表该股票连续3年收入为负,将被暂停交易", "‘Other’", "-", "其他(股票专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L201-L211
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.contract_multiplier
def contract_multiplier(self): """ [float] 合约乘数,例如沪深300股指期货的乘数为300.0(期货专用) """ try: return self.__dict__["contract_multiplier"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'contract_multiplier' ".format(self.order_book_id) )
python
def contract_multiplier(self): """ [float] 合约乘数,例如沪深300股指期货的乘数为300.0(期货专用) """ try: return self.__dict__["contract_multiplier"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'contract_multiplier' ".format(self.order_book_id) )
[ "def", "contract_multiplier", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"contract_multiplier\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'contract_multiplier' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[float] 合约乘数,例如沪深300股指期货的乘数为300.0(期货专用)
[ "[", "float", "]", "合约乘数,例如沪深300股指期货的乘数为300", ".", "0(期货专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L214-L223
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.margin_rate
def margin_rate(self): """ [float] 合约最低保证金率(期货专用) """ try: return self.__dict__["margin_rate"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'margin_rate' ".format(self.order_book_id) )
python
def margin_rate(self): """ [float] 合约最低保证金率(期货专用) """ try: return self.__dict__["margin_rate"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'margin_rate' ".format(self.order_book_id) )
[ "def", "margin_rate", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"margin_rate\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'margin_rate' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[float] 合约最低保证金率(期货专用)
[ "[", "float", "]", "合约最低保证金率(期货专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L226-L235
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.underlying_order_book_id
def underlying_order_book_id(self): """ [str] 合约标的代码,目前除股指期货(IH, IF, IC)之外的期货合约,这一字段全部为’null’(期货专用) """ try: return self.__dict__["underlying_order_book_id"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'underlying_order_book_id' ".format(self.order_book_id) )
python
def underlying_order_book_id(self): """ [str] 合约标的代码,目前除股指期货(IH, IF, IC)之外的期货合约,这一字段全部为’null’(期货专用) """ try: return self.__dict__["underlying_order_book_id"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'underlying_order_book_id' ".format(self.order_book_id) )
[ "def", "underlying_order_book_id", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"underlying_order_book_id\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'underlying_order_book_id' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[str] 合约标的代码,目前除股指期货(IH, IF, IC)之外的期货合约,这一字段全部为’null’(期货专用)
[ "[", "str", "]", "合约标的代码,目前除股指期货", "(", "IH", "IF", "IC", ")", "之外的期货合约,这一字段全部为’null’(期货专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L238-L247
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.underlying_symbol
def underlying_symbol(self): """ [str] 合约标的代码,目前除股指期货(IH, IF, IC)之外的期货合约,这一字段全部为’null’(期货专用) """ try: return self.__dict__["underlying_symbol"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'underlying_symbol' ".format(self.order_book_id) )
python
def underlying_symbol(self): """ [str] 合约标的代码,目前除股指期货(IH, IF, IC)之外的期货合约,这一字段全部为’null’(期货专用) """ try: return self.__dict__["underlying_symbol"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'underlying_symbol' ".format(self.order_book_id) )
[ "def", "underlying_symbol", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"underlying_symbol\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'underlying_symbol' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[str] 合约标的代码,目前除股指期货(IH, IF, IC)之外的期货合约,这一字段全部为’null’(期货专用)
[ "[", "str", "]", "合约标的代码,目前除股指期货", "(", "IH", "IF", "IC", ")", "之外的期货合约,这一字段全部为’null’(期货专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L250-L259
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.maturity_date
def maturity_date(self): """ [datetime] 期货到期日。主力连续合约与指数连续合约都为 datetime(2999, 12, 31)(期货专用) """ try: return self.__dict__["maturity_date"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'maturity_date' ".format(self.order_book_id) )
python
def maturity_date(self): """ [datetime] 期货到期日。主力连续合约与指数连续合约都为 datetime(2999, 12, 31)(期货专用) """ try: return self.__dict__["maturity_date"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'maturity_date' ".format(self.order_book_id) )
[ "def", "maturity_date", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"maturity_date\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'maturity_date' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[datetime] 期货到期日。主力连续合约与指数连续合约都为 datetime(2999, 12, 31)(期货专用)
[ "[", "datetime", "]", "期货到期日。主力连续合约与指数连续合约都为", "datetime", "(", "2999", "12", "31", ")", "(期货专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L262-L271
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.settlement_method
def settlement_method(self): """ [str] 交割方式,’CashSettlementRequired’ - 现金交割, ‘PhysicalSettlementRequired’ - 实物交割(期货专用) """ try: return self.__dict__["settlement_method"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'settlement_method' ".format(self.order_book_id) )
python
def settlement_method(self): """ [str] 交割方式,’CashSettlementRequired’ - 现金交割, ‘PhysicalSettlementRequired’ - 实物交割(期货专用) """ try: return self.__dict__["settlement_method"] except (KeyError, ValueError): raise AttributeError( "Instrument(order_book_id={}) has no attribute 'settlement_method' ".format(self.order_book_id) )
[ "def", "settlement_method", "(", "self", ")", ":", "try", ":", "return", "self", ".", "__dict__", "[", "\"settlement_method\"", "]", "except", "(", "KeyError", ",", "ValueError", ")", ":", "raise", "AttributeError", "(", "\"Instrument(order_book_id={}) has no attribute 'settlement_method' \"", ".", "format", "(", "self", ".", "order_book_id", ")", ")" ]
[str] 交割方式,’CashSettlementRequired’ - 现金交割, ‘PhysicalSettlementRequired’ - 实物交割(期货专用)
[ "[", "str", "]", "交割方式,’CashSettlementRequired’", "-", "现金交割", "‘PhysicalSettlementRequired’", "-", "实物交割(期货专用)" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L274-L283
train
ricequant/rqalpha
rqalpha/model/instrument.py
Instrument.listing
def listing(self): """ [bool] 该合约当前日期是否在交易 """ now = Environment.get_instance().calendar_dt return self.listed_date <= now <= self.de_listed_date
python
def listing(self): """ [bool] 该合约当前日期是否在交易 """ now = Environment.get_instance().calendar_dt return self.listed_date <= now <= self.de_listed_date
[ "def", "listing", "(", "self", ")", ":", "now", "=", "Environment", ".", "get_instance", "(", ")", ".", "calendar_dt", "return", "self", ".", "listed_date", "<=", "now", "<=", "self", ".", "de_listed_date" ]
[bool] 该合约当前日期是否在交易
[ "[", "bool", "]", "该合约当前日期是否在交易" ]
ac40a62d4e7eca9494b4d0a14f46facf5616820c
https://github.com/ricequant/rqalpha/blob/ac40a62d4e7eca9494b4d0a14f46facf5616820c/rqalpha/model/instrument.py#L286-L292
train