"
+ ],
+ "text/plain": [
+ "GridSearchCV(cv=5,\n",
+ " estimator=Pipeline(steps=[('indobert', IndoBertEncoder()),\n",
+ " ('clf',\n",
+ " LogisticRegression(max_iter=1000,\n",
+ " n_jobs=-1))]),\n",
+ " n_jobs=-1,\n",
+ " param_grid={'clf__C': [0.1, 1.0, 5.0],\n",
+ " 'clf__class_weight': [None, 'balanced']},\n",
+ " verbose=2)"
+ ]
+ },
+ "execution_count": 31,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "grid_logreg_indobert.fit(X_train, y_train)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 32,
+ "id": "dab3db37",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Best params (LogReg): {'clf__C': 5.0, 'clf__class_weight': 'balanced'}\n",
+ "Best CV score (LogReg): 0.7617846971710651\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"Best params (LogReg):\", grid_logreg_indobert.best_params_)\n",
+ "print(\"Best CV score (LogReg):\", grid_logreg_indobert.best_score_)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 33,
+ "id": "88c544aa",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "best_logreg_indobert = grid_logreg_indobert.best_estimator_"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 34,
+ "id": "0f7ad22c",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Test Accuracy (LogReg IndoBERT): 0.767\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "y_pred_logreg_indobert = best_logreg_indobert.predict(X_test)\n",
+ "acc_logreg_indobert = accuracy_score(y_test, y_pred_logreg_indobert)\n",
+ "print(f\"Test Accuracy (LogReg IndoBERT): {acc_logreg_indobert:.3f}\\n\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 35,
+ "id": "bfd79866",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Classification Report (LogReg IndoBERT):\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " akses_repository 0.77 0.77 0.77 13\n",
+ "cari_buku_isbn_callnumber 0.92 0.85 0.88 13\n",
+ " cari_buku_judul 0.56 0.77 0.65 13\n",
+ " cari_buku_penulis 0.67 0.77 0.71 13\n",
+ " cari_buku_topik 0.75 0.46 0.57 13\n",
+ " cari_rekomendasi 0.77 0.77 0.77 13\n",
+ " cek_ketersediaan_buku 0.70 0.54 0.61 13\n",
+ " donasi_buku 0.80 0.92 0.86 13\n",
+ " info_denda 1.00 0.85 0.92 13\n",
+ " jam_buka 0.77 0.77 0.77 13\n",
+ " lainnya 0.62 0.77 0.69 13\n",
+ " layanan_ejournal_ebook 0.62 0.62 0.62 13\n",
+ " layanan_ruang_diskusi 0.86 0.92 0.89 13\n",
+ " layanan_turnitin 0.92 0.92 0.92 13\n",
+ " lokasi_buku_rak 0.67 0.77 0.71 13\n",
+ " lokasi_perpustakaan 1.00 0.92 0.96 13\n",
+ " panduan_peminjaman 0.62 0.62 0.62 13\n",
+ " panduan_pengembalian 0.67 0.92 0.77 13\n",
+ " panduan_perpanjangan 0.89 0.62 0.73 13\n",
+ " salam 0.77 0.71 0.74 14\n",
+ " tanya_fungsi_mlibbot 1.00 0.92 0.96 13\n",
+ " tata_tertib 0.82 0.69 0.75 13\n",
+ "\n",
+ " accuracy 0.77 287\n",
+ " macro avg 0.78 0.77 0.77 287\n",
+ " weighted avg 0.78 0.77 0.77 287\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"Classification Report (LogReg IndoBERT):\")\n",
+ "print(classification_report(y_test, y_pred_logreg_indobert))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 36,
+ "id": "3adb28b8",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "Analisis Intent Paling Sulit (LogReg IndoBERT):\n",
+ " Intent F1-Score Precision Recall TP FP FN\n",
+ " cari_buku_topik 0.5714 0.7500 0.4615 6 2 7\n",
+ " cek_ketersediaan_buku 0.6087 0.7000 0.5385 7 3 6\n",
+ "layanan_ejournal_ebook 0.6154 0.6154 0.6154 8 5 5\n",
+ " panduan_peminjaman 0.6154 0.6154 0.6154 8 5 5\n",
+ " cari_buku_judul 0.6452 0.5556 0.7692 10 8 3\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"\\nAnalisis Intent Paling Sulit (LogReg IndoBERT):\")\n",
+ "df_indobert = analyze_intent_difficulty(y_test, y_pred_logreg_indobert, best_logreg_indobert)\n",
+ "print(df_indobert[[\"Intent\", \"F1-Score\", \"Precision\", \"Recall\", \"TP\", \"FP\", \"FN\"]].head(5).to_string(index=False))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 37,
+ "id": "896ade18",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "pipe_nb = Pipeline([\n",
+ " (\"tfidf\", TfidfVectorizer(\n",
+ " preprocessor=None,\n",
+ " lowercase=False\n",
+ " )),\n",
+ " (\"clf\", MultinomialNB())\n",
+ "])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 38,
+ "id": "8f7361d0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "param_grid_nb = {\n",
+ " \"tfidf__ngram_range\": [(1, 1), (1, 2)],\n",
+ " \"tfidf__min_df\": [1, 2],\n",
+ " \"clf__alpha\": [0.1, 0.5, 1.0]\n",
+ "}"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 39,
+ "id": "4d4eb089",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "grid_nb = GridSearchCV(\n",
+ " pipe_nb,\n",
+ " param_grid_nb,\n",
+ " cv=5,\n",
+ " n_jobs=-1,\n",
+ " verbose=2\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 40,
+ "id": "a9e9ad6b",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Fitting 5 folds for each of 12 candidates, totalling 60 fits\n"
+ ]
+ },
+ {
+ "data": {
+ "text/html": [
+ "
GridSearchCV(cv=5,\n",
+ " estimator=Pipeline(steps=[('tfidf',\n",
+ " TfidfVectorizer(lowercase=False)),\n",
+ " ('clf', MultinomialNB())]),\n",
+ " n_jobs=-1,\n",
+ " param_grid={'clf__alpha': [0.1, 0.5, 1.0], 'tfidf__min_df': [1, 2],\n",
+ " 'tfidf__ngram_range': [(1, 1), (1, 2)]},\n",
+ " verbose=2) In a Jupyter environment, please rerun this cell to show the HTML representation or trust the notebook. On GitHub, the HTML representation is unable to render, please try loading this page with nbviewer.org. \n",
+ "
\n",
+ "
\n",
+ " Parameters \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " estimator\n",
+ " estimator: estimator object This is assumed to implement the scikit-learn estimator interface. Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed. \n",
+ " \n",
+ " \n",
+ " Pipeline(step...inomialNB())]) \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " param_grid\n",
+ " param_grid: dict or list of dictionaries Dictionary with parameters names (`str`) as keys and lists of parameter settings to try as values, or a list of such dictionaries, in which case the grids spanned by each dictionary in the list are explored. This enables searching over any sequence of parameter settings. \n",
+ " \n",
+ " \n",
+ " {'clf__alpha': [0.1, 0.5, ...], 'tfidf__min_df': [1, 2], 'tfidf__ngram_range': [(1, ...), (1, ...)]} \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " scoring\n",
+ " scoring: str, callable, list, tuple or dict, default=None Strategy to evaluate the performance of the cross-validated model on the test set. If `scoring` represents a single score, one can use: - a single string (see :ref:`scoring_string_names`); - a callable (see :ref:`scoring_callable`) that returns a single value; - `None`, the `estimator`'s :ref:`default evaluation criterion ` is used. If `scoring` represents multiple scores, one can use: - a list or tuple of unique strings; - a callable returning a dictionary where the keys are the metric names and the values are the metric scores; - a dictionary with metric names as keys and callables as values. See :ref:`multimetric_grid_search` for an example. \n",
+ " \n",
+ " \n",
+ " None \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " n_jobs\n",
+ " n_jobs: int, default=None Number of jobs to run in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See :term:`Glossary ` for more details. .. versionchanged:: v0.20 `n_jobs` default changed from 1 to None \n",
+ " \n",
+ " \n",
+ " -1 \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " refit\n",
+ " refit: bool, str, or callable, default=True Refit an estimator using the best found parameters on the whole dataset. For multiple metric evaluation, this needs to be a `str` denoting the scorer that would be used to find the best parameters for refitting the estimator at the end. Where there are considerations other than maximum score in choosing a best estimator, ``refit`` can be set to a function which returns the selected ``best_index_`` given ``cv_results_``. In that case, the ``best_estimator_`` and ``best_params_`` will be set according to the returned ``best_index_`` while the ``best_score_`` attribute will not be available. The refitted estimator is made available at the ``best_estimator_`` attribute and permits using ``predict`` directly on this ``GridSearchCV`` instance. Also for multiple metric evaluation, the attributes ``best_index_``, ``best_score_`` and ``best_params_`` will only be available if ``refit`` is set and all of them will be determined w.r.t this specific scorer. See ``scoring`` parameter to know more about multiple metric evaluation. See :ref:`sphx_glr_auto_examples_model_selection_plot_grid_search_digits.py` to see how to design a custom selection strategy using a callable via `refit`. See :ref:`this example` for an example of how to use ``refit=callable`` to balance model complexity and cross-validated score. .. versionchanged:: 0.20 Support for callable added. \n",
+ " \n",
+ " \n",
+ " True \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " cv\n",
+ " cv: int, cross-validation generator or an iterable, default=None Determines the cross-validation splitting strategy. Possible inputs for cv are: - None, to use the default 5-fold cross validation, - integer, to specify the number of folds in a `(Stratified)KFold`, - :term:`CV splitter`, - An iterable yielding (train, test) splits as arrays of indices. For integer/None inputs, if the estimator is a classifier and ``y`` is either binary or multiclass, :class:`StratifiedKFold` is used. In all other cases, :class:`KFold` is used. These splitters are instantiated with `shuffle=False` so the splits will be the same across calls. Refer :ref:`User Guide ` for the various cross-validation strategies that can be used here. .. versionchanged:: 0.22 ``cv`` default value if None changed from 3-fold to 5-fold. \n",
+ " \n",
+ " \n",
+ " 5 \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " verbose\n",
+ " verbose: int Controls the verbosity: the higher, the more messages. - >1 : the computation time for each fold and parameter candidate is displayed; - >2 : the score is also displayed; - >3 : the fold and candidate parameter indexes are also displayed together with the starting time of the computation. \n",
+ " \n",
+ " \n",
+ " 2 \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " pre_dispatch\n",
+ " pre_dispatch: int, or str, default='2*n_jobs' Controls the number of jobs that get dispatched during parallel execution. Reducing this number can be useful to avoid an explosion of memory consumption when more jobs get dispatched than CPUs can process. This parameter can be: - None, in which case all the jobs are immediately created and spawned. Use this for lightweight and fast-running jobs, to avoid delays due to on-demand spawning of the jobs - An int, giving the exact number of total jobs that are spawned - A str, giving an expression as a function of n_jobs, as in '2*n_jobs' \n",
+ " \n",
+ " \n",
+ " '2*n_jobs' \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " error_score\n",
+ " error_score: 'raise' or numeric, default=np.nan Value to assign to the score if an error occurs in estimator fitting. If set to 'raise', the error is raised. If a numeric value is given, FitFailedWarning is raised. This parameter does not affect the refit step, which will always raise the error. \n",
+ " \n",
+ " \n",
+ " nan \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " return_train_score\n",
+ " return_train_score: bool, default=False If ``False``, the ``cv_results_`` attribute will not include training scores. Computing training scores is used to get insights on how different parameter settings impact the overfitting/underfitting trade-off. However computing the scores on the training set can be computationally expensive and is not strictly required to select the parameters that yield the best generalization performance. .. versionadded:: 0.19 .. versionchanged:: 0.21 Default value was changed from ``True`` to ``False`` \n",
+ " \n",
+ " \n",
+ " False \n",
+ " \n",
+ " \n",
+ " \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ " Parameters \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " input\n",
+ " input: {'filename', 'file', 'content'}, default='content' - If `'filename'`, the sequence passed as an argument to fit is expected to be a list of filenames that need reading to fetch the raw content to analyze. - If `'file'`, the sequence items must have a 'read' method (file-like object) that is called to fetch the bytes in memory. - If `'content'`, the input is expected to be a sequence of items that can be of type string or byte. \n",
+ " \n",
+ " \n",
+ " 'content' \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " encoding\n",
+ " encoding: str, default='utf-8' If bytes or files are given to analyze, this encoding is used to decode. \n",
+ " \n",
+ " \n",
+ " 'utf-8' \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " decode_error\n",
+ " decode_error: {'strict', 'ignore', 'replace'}, default='strict' Instruction on what to do if a byte sequence is given to analyze that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. \n",
+ " \n",
+ " \n",
+ " 'strict' \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " strip_accents\n",
+ " strip_accents: {'ascii', 'unicode'} or callable, default=None Remove accents and perform other character normalization during the preprocessing step. 'ascii' is a fast method that only works on characters that have a direct ASCII mapping. 'unicode' is a slightly slower method that works on any characters. None (default) means no character normalization is performed. Both 'ascii' and 'unicode' use NFKD normalization from :func:`unicodedata.normalize`. \n",
+ " \n",
+ " \n",
+ " None \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " lowercase\n",
+ " lowercase: bool, default=True Convert all characters to lowercase before tokenizing. \n",
+ " \n",
+ " \n",
+ " False \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " preprocessor\n",
+ " preprocessor: callable, default=None Override the preprocessing (string transformation) stage while preserving the tokenizing and n-grams generation steps. Only applies if ``analyzer`` is not callable. \n",
+ " \n",
+ " \n",
+ " None \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " tokenizer\n",
+ " tokenizer: callable, default=None Override the string tokenization step while preserving the preprocessing and n-grams generation steps. Only applies if ``analyzer == 'word'``. \n",
+ " \n",
+ " \n",
+ " None \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " analyzer\n",
+ " analyzer: {'word', 'char', 'char_wb'} or callable, default='word' Whether the feature should be made of word or character n-grams. Option 'char_wb' creates character n-grams only from text inside word boundaries; n-grams at the edges of words are padded with space. If a callable is passed it is used to extract the sequence of features out of the raw, unprocessed input. .. versionchanged:: 0.21 Since v0.21, if ``input`` is ``'filename'`` or ``'file'``, the data is first read from the file and then passed to the given callable analyzer. \n",
+ " \n",
+ " \n",
+ " 'word' \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " stop_words\n",
+ " stop_words: {'english'}, list, default=None If a string, it is passed to _check_stop_list and the appropriate stop list is returned. 'english' is currently the only supported string value. There are several known issues with 'english' and you should consider an alternative (see :ref:`stop_words`). If a list, that list is assumed to contain stop words, all of which will be removed from the resulting tokens. Only applies if ``analyzer == 'word'``. If None, no stop words will be used. In this case, setting `max_df` to a higher value, such as in the range (0.7, 1.0), can automatically detect and filter stop words based on intra corpus document frequency of terms. \n",
+ " \n",
+ " \n",
+ " None \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " token_pattern\n",
+ " token_pattern: str, default=r\"(?u)\\\\b\\\\w\\\\w+\\\\b\" Regular expression denoting what constitutes a \"token\", only used if ``analyzer == 'word'``. The default regexp selects tokens of 2 or more alphanumeric characters (punctuation is completely ignored and always treated as a token separator). If there is a capturing group in token_pattern then the captured group content, not the entire match, becomes the token. At most one capturing group is permitted. \n",
+ " \n",
+ " \n",
+ " '(?u)\\\\b\\\\w\\\\w+\\\\b' \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " ngram_range\n",
+ " ngram_range: tuple (min_n, max_n), default=(1, 1) The lower and upper boundary of the range of n-values for different n-grams to be extracted. All values of n such that min_n <= n <= max_n will be used. For example an ``ngram_range`` of ``(1, 1)`` means only unigrams, ``(1, 2)`` means unigrams and bigrams, and ``(2, 2)`` means only bigrams. Only applies if ``analyzer`` is not callable. \n",
+ " \n",
+ " \n",
+ " (1, ...) \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " max_df\n",
+ " max_df: float or int, default=1.0 When building the vocabulary ignore terms that have a document frequency strictly higher than the given threshold (corpus-specific stop words). If float in range [0.0, 1.0], the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. \n",
+ " \n",
+ " \n",
+ " 1.0 \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " min_df\n",
+ " min_df: float or int, default=1 When building the vocabulary ignore terms that have a document frequency strictly lower than the given threshold. This value is also called cut-off in the literature. If float in range of [0.0, 1.0], the parameter represents a proportion of documents, integer absolute counts. This parameter is ignored if vocabulary is not None. \n",
+ " \n",
+ " \n",
+ " 1 \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " max_features\n",
+ " max_features: int, default=None If not None, build a vocabulary that only consider the top `max_features` ordered by term frequency across the corpus. Otherwise, all features are used. This parameter is ignored if vocabulary is not None. \n",
+ " \n",
+ " \n",
+ " None \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " vocabulary\n",
+ " vocabulary: Mapping or iterable, default=None Either a Mapping (e.g., a dict) where keys are terms and values are indices in the feature matrix, or an iterable over terms. If not given, a vocabulary is determined from the input documents. \n",
+ " \n",
+ " \n",
+ " None \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " binary\n",
+ " binary: bool, default=False If True, all non-zero term counts are set to 1. This does not mean outputs will have only 0/1 values, only that the tf term in tf-idf is binary. (Set `binary` to True, `use_idf` to False and `norm` to None to get 0/1 outputs). \n",
+ " \n",
+ " \n",
+ " False \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " dtype\n",
+ " dtype: dtype, default=float64 Type of the matrix returned by fit_transform() or transform(). \n",
+ " \n",
+ " \n",
+ " <class 'numpy.float64'> \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " norm\n",
+ " norm: {'l1', 'l2'} or None, default='l2' Each output row will have unit norm, either: - 'l2': Sum of squares of vector elements is 1. The cosine similarity between two vectors is their dot product when l2 norm has been applied. - 'l1': Sum of absolute values of vector elements is 1. See :func:`~sklearn.preprocessing.normalize`. - None: No normalization. \n",
+ " \n",
+ " \n",
+ " 'l2' \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " use_idf\n",
+ " use_idf: bool, default=True Enable inverse-document-frequency reweighting. If False, idf(t) = 1. \n",
+ " \n",
+ " \n",
+ " True \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " smooth_idf\n",
+ " smooth_idf: bool, default=True Smooth idf weights by adding one to document frequencies, as if an extra document was seen containing every term in the collection exactly once. Prevents zero divisions. \n",
+ " \n",
+ " \n",
+ " True \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " sublinear_tf\n",
+ " sublinear_tf: bool, default=False Apply sublinear tf scaling, i.e. replace tf with 1 + log(tf). \n",
+ " \n",
+ " \n",
+ " False \n",
+ " \n",
+ " \n",
+ " \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ " Parameters \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " alpha\n",
+ " alpha: float or array-like of shape (n_features,), default=1.0 Additive (Laplace/Lidstone) smoothing parameter (set alpha=0 and force_alpha=True, for no smoothing). \n",
+ " \n",
+ " \n",
+ " 0.1 \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " force_alpha\n",
+ " force_alpha: bool, default=True If False and alpha is less than 1e-10, it will set alpha to 1e-10. If True, alpha will remain unchanged. This may cause numerical errors if alpha is too close to 0. .. versionadded:: 1.2 .. versionchanged:: 1.4 The default value of `force_alpha` changed to `True`. \n",
+ " \n",
+ " \n",
+ " True \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " fit_prior\n",
+ " fit_prior: bool, default=True Whether to learn class prior probabilities or not. If false, a uniform prior will be used. \n",
+ " \n",
+ " \n",
+ " True \n",
+ " \n",
+ " \n",
+ "\n",
+ " \n",
+ " \n",
+ " \n",
+ " \n",
+ " class_prior\n",
+ " class_prior: array-like of shape (n_classes,), default=None Prior probabilities of the classes. If specified, the priors are not adjusted according to the data. \n",
+ " \n",
+ " \n",
+ " None \n",
+ " \n",
+ " \n",
+ " \n",
+ "
\n",
+ " \n",
+ "
\n",
+ "
"
+ ],
+ "text/plain": [
+ "GridSearchCV(cv=5,\n",
+ " estimator=Pipeline(steps=[('tfidf',\n",
+ " TfidfVectorizer(lowercase=False)),\n",
+ " ('clf', MultinomialNB())]),\n",
+ " n_jobs=-1,\n",
+ " param_grid={'clf__alpha': [0.1, 0.5, 1.0], 'tfidf__min_df': [1, 2],\n",
+ " 'tfidf__ngram_range': [(1, 1), (1, 2)]},\n",
+ " verbose=2)"
+ ]
+ },
+ "execution_count": 40,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "grid_nb.fit(X_train, y_train)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 41,
+ "id": "7b7acd19",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Best params (NB): {'clf__alpha': 0.1, 'tfidf__min_df': 1, 'tfidf__ngram_range': (1, 2)}\n",
+ "Best CV score (NB): 0.7731573951015759\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"Best params (NB):\", grid_nb.best_params_)\n",
+ "print(\"Best CV score (NB):\", grid_nb.best_score_)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 42,
+ "id": "66191eef",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "best_nb = grid_nb.best_estimator_"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 43,
+ "id": "50a22d39",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Test Accuracy (Naive Bayes): 0.787\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "y_pred_nb = best_nb.predict(X_test)\n",
+ "acc_nb = accuracy_score(y_test, y_pred_nb)\n",
+ "print(f\"Test Accuracy (Naive Bayes): {acc_nb:.3f}\\n\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 44,
+ "id": "5bf52ba7",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Classification Report (Naive Bayes):\n",
+ " precision recall f1-score support\n",
+ "\n",
+ " akses_repository 0.73 0.85 0.79 13\n",
+ "cari_buku_isbn_callnumber 0.86 0.92 0.89 13\n",
+ " cari_buku_judul 0.86 0.92 0.89 13\n",
+ " cari_buku_penulis 0.73 0.62 0.67 13\n",
+ " cari_buku_topik 0.71 0.77 0.74 13\n",
+ " cari_rekomendasi 0.85 0.85 0.85 13\n",
+ " cek_ketersediaan_buku 0.90 0.69 0.78 13\n",
+ " donasi_buku 0.57 0.62 0.59 13\n",
+ " info_denda 0.78 0.54 0.64 13\n",
+ " jam_buka 0.92 0.92 0.92 13\n",
+ " lainnya 0.75 0.92 0.83 13\n",
+ " layanan_ejournal_ebook 0.77 0.77 0.77 13\n",
+ " layanan_ruang_diskusi 0.92 0.92 0.92 13\n",
+ " layanan_turnitin 0.67 0.77 0.71 13\n",
+ " lokasi_buku_rak 0.71 0.77 0.74 13\n",
+ " lokasi_perpustakaan 0.92 0.92 0.92 13\n",
+ " panduan_peminjaman 0.71 0.77 0.74 13\n",
+ " panduan_pengembalian 0.58 0.54 0.56 13\n",
+ " panduan_perpanjangan 0.77 0.77 0.77 13\n",
+ " salam 1.00 0.86 0.92 14\n",
+ " tanya_fungsi_mlibbot 0.87 1.00 0.93 13\n",
+ " tata_tertib 0.80 0.62 0.70 13\n",
+ "\n",
+ " accuracy 0.79 287\n",
+ " macro avg 0.79 0.79 0.78 287\n",
+ " weighted avg 0.79 0.79 0.79 287\n",
+ "\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"Classification Report (Naive Bayes):\")\n",
+ "print(classification_report(y_test, y_pred_nb))"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 45,
+ "id": "5825a038",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "Analisis Intent Paling Sulit (Naive Bayes):\n",
+ " Intent F1-Score Precision Recall TP FP FN\n",
+ "panduan_pengembalian 0.5600 0.5833 0.5385 7 5 6\n",
+ " donasi_buku 0.5926 0.5714 0.6154 8 6 5\n",
+ " info_denda 0.6364 0.7778 0.5385 7 2 6\n",
+ " cari_buku_penulis 0.6667 0.7273 0.6154 8 3 5\n",
+ " tata_tertib 0.6957 0.8000 0.6154 8 2 5\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(\"\\nAnalisis Intent Paling Sulit (Naive Bayes):\")\n",
+ "df_nb = analyze_intent_difficulty(y_test, y_pred_nb, best_nb)\n",
+ "print(df_nb[[\"Intent\", \"F1-Score\", \"Precision\", \"Recall\", \"TP\", \"FP\", \"FN\"]].head(5).to_string(index=False))"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "086da60e",
+ "metadata": {
+ "lines_to_next_cell": 2
+ },
+ "source": [
+ "Pada percobaan ini, model Naive Bayes tetap menggunakan fitur TF-IDF dan tidak digabung dengan embedding IndoBERT. Alasannya karena secara prinsip, Multinomial Naive Bayes dirancang untuk bekerja dengan fitur berupa frekuensi kata atau bobot yang mirip frekuensi (seperti count dan TF-IDF) yang bernilai non-negatif.\n",
+ "\n",
+ "Sementara itu, embedding IndoBERT berbentuk vektor dens dengan nilai kontinu yang bisa positif maupun negatif, dan tidak lagi merepresentasikan \"jumlah kemunculan kata\", tetapi makna kalimat di ruang vektor. Tipe fitur seperti ini tidak sesuai dengan asumsi probabilistik Multinomial Naive Bayes, sehingga performanya justru bisa tidak stabil atau menurun."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 46,
+ "id": "7324de39",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "LogReg (TF-IDF) Test Accuracy : 0.826\n",
+ "Naive Bayes (TF-IDF) Test Accuracy : 0.787\n",
+ "LogReg (IndoBERT) Test Accuracy : 0.767\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(f\"LogReg (TF-IDF) Test Accuracy : {acc_logreg:.3f}\")\n",
+ "print(f\"Naive Bayes (TF-IDF) Test Accuracy : {acc_nb:.3f}\")\n",
+ "print(f\"LogReg (IndoBERT) Test Accuracy : {acc_logreg_indobert:.3f}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 47,
+ "id": "c7f802ea",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "candidates = {\n",
+ " \"logreg_tfidf\": (acc_logreg, best_logreg),\n",
+ " \"naive_bayes_tfidf\": (acc_nb, best_nb),\n",
+ " \"logreg_indobert\": (acc_logreg_indobert, best_logreg_indobert),\n",
+ "}"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 48,
+ "id": "10f797ff",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "best_model_name, (best_acc, final_model) = max(\n",
+ " candidates.items(),\n",
+ " key=lambda item: item[1][0] \n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 49,
+ "id": "950677e7",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Chosen model: logreg_tfidf (accuracy = 0.826)\n"
+ ]
+ }
+ ],
+ "source": [
+ "print(f\"Chosen model: {best_model_name} (accuracy = {best_acc:.3f})\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 50,
+ "id": "67fbe771",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\n",
+ "Menyimpan semua model...\n"
+ ]
+ }
+ ],
+ "source": [
+ "# Simpan semua model\n",
+ "print(\"\\nMenyimpan semua model...\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 51,
+ "id": "e2f4f6a0",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Saved LogReg TF-IDF to: model/intent_model_logreg_tfidf.pkl\n"
+ ]
+ }
+ ],
+ "source": [
+ "logreg_tfidf_path = \"model/intent_model_logreg_tfidf.pkl\"\n",
+ "joblib.dump(best_logreg, logreg_tfidf_path)\n",
+ "print(f\"Saved LogReg TF-IDF to: {logreg_tfidf_path}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 52,
+ "id": "2095be1c",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Saved Naive Bayes TF-IDF to: model/intent_model_naive_bayes_tfidf.pkl\n"
+ ]
+ }
+ ],
+ "source": [
+ "nb_tfidf_path = \"model/intent_model_naive_bayes_tfidf.pkl\"\n",
+ "joblib.dump(best_nb, nb_tfidf_path)\n",
+ "print(f\"Saved Naive Bayes TF-IDF to: {nb_tfidf_path}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 53,
+ "id": "f228591c",
+ "metadata": {
+ "lines_to_next_cell": 1
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Saved LogReg IndoBERT to: model/intent_model_logreg_indobert.pkl\n"
+ ]
+ }
+ ],
+ "source": [
+ "indobert_path = \"model/intent_model_logreg_indobert.pkl\"\n",
+ "joblib.dump(best_logreg_indobert, indobert_path)\n",
+ "print(f\"Saved LogReg IndoBERT to: {indobert_path}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 54,
+ "id": "8fef94f8",
+ "metadata": {
+ "lines_to_next_cell": 1
+ },
+ "outputs": [],
+ "source": [
+ "def predict_intent_sentence(s):\n",
+ " s_clean = preprocess(s)\n",
+ " return final_model.predict([s_clean])[0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 55,
+ "id": "e930cca5",
+ "metadata": {
+ "lines_to_next_cell": 2
+ },
+ "outputs": [],
+ "source": [
+ "tests = [\n",
+ " \"jam buka perpustakaan hari sabtu\",\n",
+ " \"perpus maranatha buka sampe jam berapa ya?\",\n",
+ " \"besok minggu perpus buka gak?\",\n",
+ " \"jam operasional perpustakaan pas libur nasional gimana?\",\n",
+ " \"hari ini perpus udah buka belum?\",\n",
+ "\n",
+ " \"ada buku basis data fathansyah gak\",\n",
+ " \"ada buku tentang machine learning terbaru gak?\",\n",
+ " \"cek dong buku pemrograman python masih tersedia ga\",\n",
+ " \"di perpus ada novel laskar pelangi gak sih?\",\n",
+ " \"kalo mau cari skripsi tentang data mining ada ga?\",\n",
+ "\n",
+ " \"cara booking ruang diskusi gimana\",\n",
+ " \"book ruang belajar kelompok bisa lewat mana?\",\n",
+ " \"ruang diskusi bisa dipake berapa jam maksimal?\",\n",
+ " \"bisa reservasi ruang belajar lewat online gak?\",\n",
+ "\n",
+ " \"kalau telat balikin buku dendanya berapa\",\n",
+ " \"telat ngembaliin buku 2 hari berapa ya?\",\n",
+ " \"Kalau saya telat mengembalikan, konsekuensinya apa?\",\n",
+ " \"kalau hilangin buku perpus dendanya gimana ya?\",\n",
+ " \"batas maksimal telat pengembalian sebelum kena blokir berapa hari?\",\n",
+ "\n",
+ " \"cara akses e journal dari luar kampus\",\n",
+ " \"akses database journal lewat wifi kos bisa gak?\",\n",
+ " \"punya akses ke ieee atau sciencedirect gak ya?\",\n",
+ " \"login e-resources pake akun apa ya?\",\n",
+ " \"kalo lupa password e journal harus gimana?\",\n",
+ "\n",
+ " \"perpus maranatha ada dmn sih\",\n",
+ " \"alamat lengkap perpustakaan maranatha di mana ya?\",\n",
+ " \"nomor telepon perpustakaan ada?\",\n",
+ " \"perpus ada di gedung mana ya di kampus?\",\n",
+ "\n",
+ " \"cara pinjam buku di perpus gimana\",\n",
+ " \"bisa perpanjang peminjaman buku lewat online gak?\",\n",
+ " \"kalo mau pinjem buku harus bawa ktm gak?\",\n",
+ " \"maksimal bisa pinjam berapa buku sekaligus?\",\n",
+ " \"lama peminjaman buku berapa hari ya?\",\n",
+ "\n",
+ " \"halo mlibbot\",\n",
+ " \"hi bot, bisa bantu cari buku?\",\n",
+ " \"p\",\n",
+ " \"halo, ini perpus maranatha ya?\",\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 56,
+ "id": "cbda5478",
+ "metadata": {
+ "lines_to_next_cell": 1
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "'jam buka perpustakaan hari sabtu' -> jam_buka\n",
+ "'perpus maranatha buka sampe jam berapa ya?' -> jam_buka\n",
+ "'besok minggu perpus buka gak?' -> jam_buka\n",
+ "'jam operasional perpustakaan pas libur nasional gimana?' -> jam_buka\n",
+ "'hari ini perpus udah buka belum?' -> jam_buka\n",
+ "'ada buku basis data fathansyah gak' -> cari_buku_judul\n",
+ "'ada buku tentang machine learning terbaru gak?' -> cari_rekomendasi\n",
+ "'cek dong buku pemrograman python masih tersedia ga' -> cek_ketersediaan_buku\n",
+ "'di perpus ada novel laskar pelangi gak sih?' -> cari_buku_judul\n",
+ "'kalo mau cari skripsi tentang data mining ada ga?' -> cari_buku_topik\n",
+ "'cara booking ruang diskusi gimana' -> layanan_ruang_diskusi\n",
+ "'book ruang belajar kelompok bisa lewat mana?' -> layanan_ruang_diskusi\n",
+ "'ruang diskusi bisa dipake berapa jam maksimal?' -> layanan_ruang_diskusi\n",
+ "'bisa reservasi ruang belajar lewat online gak?' -> panduan_perpanjangan\n",
+ "'kalau telat balikin buku dendanya berapa' -> info_denda\n",
+ "'telat ngembaliin buku 2 hari berapa ya?' -> info_denda\n",
+ "'Kalau saya telat mengembalikan, konsekuensinya apa?' -> panduan_pengembalian\n",
+ "'kalau hilangin buku perpus dendanya gimana ya?' -> panduan_pengembalian\n",
+ "'batas maksimal telat pengembalian sebelum kena blokir berapa hari?' -> info_denda\n",
+ "'cara akses e journal dari luar kampus' -> layanan_ejournal_ebook\n",
+ "'akses database journal lewat wifi kos bisa gak?' -> layanan_ejournal_ebook\n",
+ "'punya akses ke ieee atau sciencedirect gak ya?' -> layanan_ejournal_ebook\n",
+ "'login e-resources pake akun apa ya?' -> cari_buku_isbn_callnumber\n",
+ "'kalo lupa password e journal harus gimana?' -> layanan_ejournal_ebook\n",
+ "'perpus maranatha ada dmn sih' -> lokasi_perpustakaan\n",
+ "'alamat lengkap perpustakaan maranatha di mana ya?' -> lokasi_perpustakaan\n",
+ "'nomor telepon perpustakaan ada?' -> lokasi_perpustakaan\n",
+ "'perpus ada di gedung mana ya di kampus?' -> lokasi_perpustakaan\n",
+ "'cara pinjam buku di perpus gimana' -> panduan_peminjaman\n",
+ "'bisa perpanjang peminjaman buku lewat online gak?' -> panduan_perpanjangan\n",
+ "'kalo mau pinjem buku harus bawa ktm gak?' -> panduan_peminjaman\n",
+ "'maksimal bisa pinjam berapa buku sekaligus?' -> panduan_peminjaman\n",
+ "'lama peminjaman buku berapa hari ya?' -> panduan_peminjaman\n",
+ "'halo mlibbot' -> salam\n",
+ "'hi bot, bisa bantu cari buku?' -> tanya_fungsi_mlibbot\n",
+ "'p' -> lainnya\n",
+ "'halo, ini perpus maranatha ya?' -> salam\n"
+ ]
+ }
+ ],
+ "source": [
+ "for t in tests:\n",
+ " print(f\"{t!r} -> {predict_intent_sentence(t)}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "df1eac42",
+ "metadata": {},
+ "source": [
+ "Test dengan IndoBERT model"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 57,
+ "id": "5678285a",
+ "metadata": {
+ "lines_to_next_cell": 1
+ },
+ "outputs": [],
+ "source": [
+ "def predict_intent_sentence(s):\n",
+ " s_clean = preprocess(s)\n",
+ " return best_logreg_indobert.predict([s_clean])[0]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 58,
+ "id": "373f65c0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "tests = [\n",
+ " \"jam buka perpustakaan hari sabtu\",\n",
+ " \"perpus maranatha buka sampe jam berapa ya?\",\n",
+ " \"besok minggu perpus buka gak?\",\n",
+ " \"jam operasional perpustakaan pas libur nasional gimana?\",\n",
+ " \"hari ini perpus udah buka belum?\",\n",
+ "\n",
+ " \"ada buku basis data fathansyah gak\",\n",
+ " \"ada buku tentang machine learning terbaru gak?\",\n",
+ " \"cek dong buku pemrograman python masih tersedia ga\",\n",
+ " \"di perpus ada novel laskar pelangi gak sih?\",\n",
+ " \"kalo mau cari skripsi tentang data mining ada ga?\",\n",
+ "\n",
+ " \"cara booking ruang diskusi gimana\",\n",
+ " \"book ruang belajar kelompok bisa lewat mana?\",\n",
+ " \"ruang diskusi bisa dipake berapa jam maksimal?\",\n",
+ " \"bisa reservasi ruang belajar lewat online gak?\",\n",
+ "\n",
+ " \"kalau telat balikin buku dendanya berapa\",\n",
+ " \"telat ngembaliin buku 2 hari berapa ya?\",\n",
+ " \"Kalau saya telat mengembalikan, konsekuensinya apa?\",\n",
+ " \"kalau hilangin buku perpus dendanya gimana ya?\",\n",
+ " \"batas maksimal telat pengembalian sebelum kena blokir berapa hari?\",\n",
+ "\n",
+ " \"cara akses e journal dari luar kampus\",\n",
+ " \"akses database journal lewat wifi kos bisa gak?\",\n",
+ " \"punya akses ke ieee atau sciencedirect gak ya?\",\n",
+ " \"login e-resources pake akun apa ya?\",\n",
+ " \"kalo lupa password e journal harus gimana?\",\n",
+ "\n",
+ " \"perpus maranatha ada dmn sih\",\n",
+ " \"alamat lengkap perpustakaan maranatha di mana ya?\",\n",
+ " \"nomor telepon perpustakaan ada?\",\n",
+ " \"perpus ada di gedung mana ya di kampus?\",\n",
+ "\n",
+ " \"cara pinjam buku di perpus gimana\",\n",
+ " \"bisa perpanjang peminjaman buku lewat online gak?\",\n",
+ " \"kalo mau pinjem buku harus bawa ktm gak?\",\n",
+ " \"maksimal bisa pinjam berapa buku sekaligus?\",\n",
+ " \"lama peminjaman buku berapa hari ya?\",\n",
+ "\n",
+ " \"halo mlibbot\",\n",
+ " \"hi bot, bisa bantu cari buku?\",\n",
+ " \"p\",\n",
+ " \"halo, ini perpus maranatha ya?\",\n",
+ "]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 59,
+ "id": "85d65c9f",
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "'jam buka perpustakaan hari sabtu' -> jam_buka\n",
+ "'perpus maranatha buka sampe jam berapa ya?' -> jam_buka\n",
+ "'besok minggu perpus buka gak?' -> salam\n",
+ "'jam operasional perpustakaan pas libur nasional gimana?' -> jam_buka\n",
+ "'hari ini perpus udah buka belum?' -> salam\n",
+ "'ada buku basis data fathansyah gak' -> cari_buku_penulis\n",
+ "'ada buku tentang machine learning terbaru gak?' -> cari_buku_judul\n",
+ "'cek dong buku pemrograman python masih tersedia ga' -> cari_buku_judul\n",
+ "'di perpus ada novel laskar pelangi gak sih?' -> cari_buku_penulis\n",
+ "'kalo mau cari skripsi tentang data mining ada ga?' -> cari_buku_judul\n",
+ "'cara booking ruang diskusi gimana' -> layanan_ruang_diskusi\n",
+ "'book ruang belajar kelompok bisa lewat mana?' -> layanan_ruang_diskusi\n",
+ "'ruang diskusi bisa dipake berapa jam maksimal?' -> layanan_ruang_diskusi\n",
+ "'bisa reservasi ruang belajar lewat online gak?' -> layanan_ejournal_ebook\n",
+ "'kalau telat balikin buku dendanya berapa' -> info_denda\n",
+ "'telat ngembaliin buku 2 hari berapa ya?' -> panduan_perpanjangan\n",
+ "'Kalau saya telat mengembalikan, konsekuensinya apa?' -> info_denda\n",
+ "'kalau hilangin buku perpus dendanya gimana ya?' -> info_denda\n",
+ "'batas maksimal telat pengembalian sebelum kena blokir berapa hari?' -> panduan_perpanjangan\n",
+ "'cara akses e journal dari luar kampus' -> layanan_ejournal_ebook\n",
+ "'akses database journal lewat wifi kos bisa gak?' -> layanan_ejournal_ebook\n",
+ "'punya akses ke ieee atau sciencedirect gak ya?' -> layanan_ejournal_ebook\n",
+ "'login e-resources pake akun apa ya?' -> cari_buku_judul\n",
+ "'kalo lupa password e journal harus gimana?' -> layanan_ejournal_ebook\n",
+ "'perpus maranatha ada dmn sih' -> lokasi_perpustakaan\n",
+ "'alamat lengkap perpustakaan maranatha di mana ya?' -> lokasi_perpustakaan\n",
+ "'nomor telepon perpustakaan ada?' -> jam_buka\n",
+ "'perpus ada di gedung mana ya di kampus?' -> lokasi_perpustakaan\n",
+ "'cara pinjam buku di perpus gimana' -> panduan_peminjaman\n",
+ "'bisa perpanjang peminjaman buku lewat online gak?' -> panduan_perpanjangan\n",
+ "'kalo mau pinjem buku harus bawa ktm gak?' -> panduan_peminjaman\n",
+ "'maksimal bisa pinjam berapa buku sekaligus?' -> panduan_peminjaman\n",
+ "'lama peminjaman buku berapa hari ya?' -> panduan_perpanjangan\n",
+ "'halo mlibbot' -> salam\n",
+ "'hi bot, bisa bantu cari buku?' -> tanya_fungsi_mlibbot\n",
+ "'p' -> cari_buku_penulis\n",
+ "'halo, ini perpus maranatha ya?' -> salam\n"
+ ]
+ }
+ ],
+ "source": [
+ "for t in tests:\n",
+ " print(f\"{t!r} -> {predict_intent_sentence(t)}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "46d71456-0a12-472d-ad1d-7b37cd481a21",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "jupytext": {
+ "cell_metadata_filter": "-all",
+ "main_language": "python",
+ "notebook_metadata_filter": "-all"
+ },
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.14"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}