AnonymousPaperSubmission123 commited on
Commit
ea03829
·
verified ·
1 Parent(s): bb85a51

Upload 13 files

Browse files
Dockerfile ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.10.8
2
+
3
+ # CMD python download_private_model.py
4
+
5
+ WORKDIR /app
6
+
7
+ RUN apt-get update && apt-get install -y \
8
+ build-essential \
9
+ curl \
10
+ git \
11
+ && rm -rf /var/lib/apt/lists/*
12
+
13
+ COPY requirements.txt ./
14
+ COPY src/ ./src/
15
+
16
+ RUN pip3 install -r requirements.txt
17
+
18
+ EXPOSE 8501
19
+
20
+ HEALTHCHECK CMD curl --fail http://localhost:8501/_stcore/health
21
+
22
+ # WORKDIR /app/src
23
+ # ENTRYPOINT ["streamlit", "run", "app.py", "--server.port=8501", "--server.address=0.0.0.0"]
24
+ ENTRYPOINT ["streamlit", "run", "src/app.py", "--server.port=8501", "--server.address=0.0.0.0"]
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: AdaDetectGPT
3
  emoji: 🐨
4
  colorFrom: blue
5
  colorTo: pink
 
1
  ---
2
+ title: DetectGPTProMax
3
  emoji: 🐨
4
  colorFrom: blue
5
  colorTo: pink
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # requirements.txt
2
+ altair
3
+ streamlit
4
+ pandas==2.3.1
5
+ torch==2.8.0
6
+ numpy==2.1.3
7
+ transformers==4.55.2
8
+ peft==0.17.1
9
+ tqdm
10
+ scikit-learn
11
+ huggingface_hub
src/FineTune/.gitignore ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ ckpt/*
12
+ logs/*/
13
+ models/*/
14
+ build/
15
+ develop-eggs/
16
+ dist/
17
+ downloads/
18
+ eggs/
19
+ .eggs/
20
+ lib/
21
+ lib64/
22
+ parts/
23
+ sdist/
24
+ var/
25
+ wheels/
26
+ share/python-wheels/
27
+ *.egg-info/
28
+ .installed.cfg
29
+ *.egg
30
+ MANIFEST
31
+
32
+ # PyInstaller
33
+ # Usually these files are written by a python script from a template
34
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
35
+ *.manifest
36
+ *.spec
37
+
38
+ # Installer logs
39
+ pip-log.txt
40
+ pip-delete-this-directory.txt
41
+
42
+ # Unit test / coverage reports
43
+ htmlcov/
44
+ .tox/
45
+ .nox/
46
+ .coverage
47
+ .coverage.*
48
+ .cache
49
+ nosetests.xml
50
+ coverage.xml
51
+ *.cover
52
+ *.py,cover
53
+ .hypothesis/
54
+ .pytest_cache/
55
+ cover/
56
+
57
+ # Translations
58
+ *.mo
59
+ *.pot
60
+
61
+ # Django stuff:
62
+ *.log
63
+ local_settings.py
64
+ db.sqlite3
65
+ db.sqlite3-journal
66
+
67
+ # Flask stuff:
68
+ instance/
69
+ .webassets-cache
70
+
71
+ # Scrapy stuff:
72
+ .scrapy
73
+
74
+ # Sphinx documentation
75
+ docs/_build/
76
+
77
+ # PyBuilder
78
+ .pybuilder/
79
+ target/
80
+
81
+ # Jupyter Notebook
82
+ .ipynb_checkpoints
83
+
84
+ # IPython
85
+ profile_default/
86
+ ipython_config.py
87
+
88
+ # pyenv
89
+ # For a library or package, you might want to ignore these files since the code is
90
+ # intended to run in multiple environments; otherwise, check them in:
91
+ # .python-version
92
+
93
+ # pipenv
94
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
95
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
96
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
97
+ # install all needed dependencies.
98
+ #Pipfile.lock
99
+
100
+ # poetry
101
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
102
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
103
+ # commonly ignored for libraries.
104
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
105
+ #poetry.lock
106
+
107
+ # pdm
108
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
109
+ #pdm.lock
110
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
111
+ # in version control.
112
+ # https://pdm.fming.dev/latest/usage/project/#working-with-version-control
113
+ .pdm.toml
114
+ .pdm-python
115
+ .pdm-build/
116
+
117
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
118
+ __pypackages__/
119
+
120
+ # Celery stuff
121
+ celerybeat-schedule
122
+ celerybeat.pid
123
+
124
+ # SageMath parsed files
125
+ *.sage.py
126
+
127
+ # Environments
128
+ .env
129
+ .venv
130
+ env/
131
+ venv/
132
+ ENV/
133
+ env.bak/
134
+ venv.bak/
135
+
136
+ # Spyder project settings
137
+ .spyderproject
138
+ .spyproject
139
+
140
+ # Rope project settings
141
+ .ropeproject
142
+
143
+ # mkdocs documentation
144
+ /site
145
+
146
+ # mypy
147
+ .mypy_cache/
148
+ .dmypy.json
149
+ dmypy.json
150
+
151
+ # Pyre type checker
152
+ .pyre/
153
+
154
+ # pytype static type analyzer
155
+ .pytype/
156
+
157
+ # Cython debug symbols
158
+ cython_debug/
159
+
160
+ # PyCharm
161
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
162
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
163
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
164
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
165
+ #.idea/
src/FineTune/ckpt/config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"domains": ["Academia", "Finance", "Government", "Knowledge", "Legislation", "Medicine", "News", "UserReview", "General"], "criterion": "mean"}
src/FineTune/ckpt/null_distrs.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b54878910ccb7bcd7575dc032dccdd61e8ad604e5e195922c0051564ef8acd81
3
+ size 3030341
src/FineTune/ckpt/scoring_model/README.md ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model: google/gemma-3-1b-pt
3
+ library_name: peft
4
+ ---
5
+
6
+ # Model Card for Model ID
7
+
8
+ <!-- Provide a quick summary of what the model is/does. -->
9
+
10
+
11
+
12
+ ## Model Details
13
+
14
+ ### Model Description
15
+
16
+ <!-- Provide a longer summary of what this model is. -->
17
+
18
+
19
+
20
+ - **Developed by:** [More Information Needed]
21
+ - **Funded by [optional]:** [More Information Needed]
22
+ - **Shared by [optional]:** [More Information Needed]
23
+ - **Model type:** [More Information Needed]
24
+ - **Language(s) (NLP):** [More Information Needed]
25
+ - **License:** [More Information Needed]
26
+ - **Finetuned from model [optional]:** [More Information Needed]
27
+
28
+ ### Model Sources [optional]
29
+
30
+ <!-- Provide the basic links for the model. -->
31
+
32
+ - **Repository:** [More Information Needed]
33
+ - **Paper [optional]:** [More Information Needed]
34
+ - **Demo [optional]:** [More Information Needed]
35
+
36
+ ## Uses
37
+
38
+ <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. -->
39
+
40
+ ### Direct Use
41
+
42
+ <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. -->
43
+
44
+ [More Information Needed]
45
+
46
+ ### Downstream Use [optional]
47
+
48
+ <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app -->
49
+
50
+ [More Information Needed]
51
+
52
+ ### Out-of-Scope Use
53
+
54
+ <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. -->
55
+
56
+ [More Information Needed]
57
+
58
+ ## Bias, Risks, and Limitations
59
+
60
+ <!-- This section is meant to convey both technical and sociotechnical limitations. -->
61
+
62
+ [More Information Needed]
63
+
64
+ ### Recommendations
65
+
66
+ <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. -->
67
+
68
+ Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.
69
+
70
+ ## How to Get Started with the Model
71
+
72
+ Use the code below to get started with the model.
73
+
74
+ [More Information Needed]
75
+
76
+ ## Training Details
77
+
78
+ ### Training Data
79
+
80
+ <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. -->
81
+
82
+ [More Information Needed]
83
+
84
+ ### Training Procedure
85
+
86
+ <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. -->
87
+
88
+ #### Preprocessing [optional]
89
+
90
+ [More Information Needed]
91
+
92
+
93
+ #### Training Hyperparameters
94
+
95
+ - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision -->
96
+
97
+ #### Speeds, Sizes, Times [optional]
98
+
99
+ <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. -->
100
+
101
+ [More Information Needed]
102
+
103
+ ## Evaluation
104
+
105
+ <!-- This section describes the evaluation protocols and provides the results. -->
106
+
107
+ ### Testing Data, Factors & Metrics
108
+
109
+ #### Testing Data
110
+
111
+ <!-- This should link to a Dataset Card if possible. -->
112
+
113
+ [More Information Needed]
114
+
115
+ #### Factors
116
+
117
+ <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. -->
118
+
119
+ [More Information Needed]
120
+
121
+ #### Metrics
122
+
123
+ <!-- These are the evaluation metrics being used, ideally with a description of why. -->
124
+
125
+ [More Information Needed]
126
+
127
+ ### Results
128
+
129
+ [More Information Needed]
130
+
131
+ #### Summary
132
+
133
+
134
+
135
+ ## Model Examination [optional]
136
+
137
+ <!-- Relevant interpretability work for the model goes here -->
138
+
139
+ [More Information Needed]
140
+
141
+ ## Environmental Impact
142
+
143
+ <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly -->
144
+
145
+ Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).
146
+
147
+ - **Hardware Type:** [More Information Needed]
148
+ - **Hours used:** [More Information Needed]
149
+ - **Cloud Provider:** [More Information Needed]
150
+ - **Compute Region:** [More Information Needed]
151
+ - **Carbon Emitted:** [More Information Needed]
152
+
153
+ ## Technical Specifications [optional]
154
+
155
+ ### Model Architecture and Objective
156
+
157
+ [More Information Needed]
158
+
159
+ ### Compute Infrastructure
160
+
161
+ [More Information Needed]
162
+
163
+ #### Hardware
164
+
165
+ [More Information Needed]
166
+
167
+ #### Software
168
+
169
+ [More Information Needed]
170
+
171
+ ## Citation [optional]
172
+
173
+ <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. -->
174
+
175
+ **BibTeX:**
176
+
177
+ [More Information Needed]
178
+
179
+ **APA:**
180
+
181
+ [More Information Needed]
182
+
183
+ ## Glossary [optional]
184
+
185
+ <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. -->
186
+
187
+ [More Information Needed]
188
+
189
+ ## More Information [optional]
190
+
191
+ [More Information Needed]
192
+
193
+ ## Model Card Authors [optional]
194
+
195
+ [More Information Needed]
196
+
197
+ ## Model Card Contact
198
+
199
+ [More Information Needed]
200
+ ### Framework versions
201
+
202
+ - PEFT 0.15.2
src/FineTune/ckpt/scoring_model/adapter_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "google/gemma-3-1b-pt",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 16,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "r": 4,
24
+ "rank_pattern": {},
25
+ "revision": null,
26
+ "target_modules": [
27
+ "q_proj",
28
+ "k_proj",
29
+ "o_proj",
30
+ "v_proj"
31
+ ],
32
+ "task_type": "CAUSAL_LM",
33
+ "trainable_token_indices": null,
34
+ "use_dora": false,
35
+ "use_rslora": false
36
+ }
src/FineTune/ckpt/scoring_model/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e74e22eead36ed3eef207c50d0ead88ea37f7748a0a0148be6dbc0a5d4701e37
3
+ size 3009096
src/FineTune/model.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ from peft import get_peft_model, LoraConfig, TaskType, AutoPeftModelForCausalLM
4
+ from transformers import AutoModelForCausalLM, AutoTokenizer
5
+ import time
6
+ import json
7
+
8
+ import os
9
+
10
+ def calculate_MMD_loss(human_crit, sample_crit):
11
+ mmd_loss = human_crit.mean() - sample_crit.mean()
12
+ return mmd_loss
13
+
14
+ def from_pretrained(cls, model_name, kwargs, cache_dir):
15
+ # use local model if it exists
16
+ if "/" in model_name:
17
+ local_path = os.path.join(cache_dir, model_name.split("/")[1])
18
+ else:
19
+ local_path = os.path.join(cache_dir, model_name)
20
+
21
+ if os.path.exists(local_path):
22
+ return cls.from_pretrained(local_path, **kwargs)
23
+ return cls.from_pretrained(model_name, **kwargs, cache_dir=cache_dir, device_map='auto')
24
+
25
+ model_fullnames = {
26
+ 'gemma-1b': 'google/gemma-3-1b-pt',
27
+ }
28
+ float16_models = []
29
+
30
+ def get_model_fullname(model_name):
31
+ return model_fullnames[model_name] if model_name in model_fullnames else model_name
32
+
33
+ def load_tokenizer(model_name, for_dataset, cache_dir):
34
+ model_fullname = get_model_fullname(model_name)
35
+ optional_tok_kwargs = {}
36
+ if for_dataset in ['pubmed']:
37
+ optional_tok_kwargs['padding_side'] = 'left'
38
+ else:
39
+ optional_tok_kwargs['padding_side'] = 'right'
40
+ base_tokenizer = from_pretrained(AutoTokenizer, model_fullname, optional_tok_kwargs, cache_dir=cache_dir)
41
+ if base_tokenizer.pad_token_id is None:
42
+ base_tokenizer.pad_token_id = base_tokenizer.eos_token_id
43
+ if '13b' in model_fullname:
44
+ base_tokenizer.pad_token_id = 0
45
+ return base_tokenizer
46
+
47
+ def get_sampling_discrepancy_analytic(logits_ref, logits_score, labels):
48
+ if logits_ref.size(-1) != logits_score.size(-1):
49
+ vocab_size = min(logits_ref.size(-1), logits_score.size(-1))
50
+ logits_ref = logits_ref[:, :, :vocab_size]
51
+ logits_score = logits_score[:, :, :vocab_size]
52
+
53
+ labels = labels.unsqueeze(-1) if labels.ndim == logits_score.ndim - 1 else labels
54
+ lprobs_score = torch.log_softmax(logits_score, dim=-1)
55
+ probs_ref = torch.softmax(logits_ref, dim=-1)
56
+
57
+ log_likelihood = lprobs_score.gather(dim=-1, index=labels).squeeze(-1)
58
+ mean_ref = (probs_ref * lprobs_score).sum(dim=-1)
59
+ var_ref = (probs_ref * torch.square(lprobs_score)).sum(dim=-1) - torch.square(mean_ref)
60
+ discrepancy = (log_likelihood.sum(dim=-1) - mean_ref.sum(dim=-1)) / var_ref.sum(dim=-1).clamp_min(0.0001).sqrt()
61
+
62
+ return discrepancy, log_likelihood.sum(dim=-1)
63
+
64
+ class ComputeStat(nn.Module):
65
+ def __init__(self, model_name, dataset='xsum', device='cuda', cache_dir='./models'):
66
+ super().__init__()
67
+ self.device = device
68
+ self.reference_model_name = get_model_fullname(model_name)
69
+ self.scoring_model_name = get_model_fullname(model_name)
70
+
71
+ def load_model(model_name, device, cache_dir):
72
+ model_fullname = get_model_fullname(model_name)
73
+ print(f'Loading model {model_fullname}...')
74
+ model_kwargs = {}
75
+ if model_name in float16_models:
76
+ model_kwargs.update(dict(torch_dtype=torch.float16))
77
+ if torch.__version__ >= '2.0.0' and 'gemma' in model_name:
78
+ model_kwargs.update({'attn_implementation': 'sdpa'})
79
+ model = from_pretrained(AutoModelForCausalLM, model_fullname, model_kwargs, cache_dir)
80
+ print(f'Moving model to {device}...', end='', flush=True)
81
+ start = time.time()
82
+ model.to(device)
83
+ print(f'DONE ({time.time() - start:.2f}s)')
84
+ return model
85
+
86
+ # load scoring model
87
+ self.scoring_tokenizer = load_tokenizer(model_name, dataset, cache_dir)
88
+ scoring_model = load_model(model_name, device, cache_dir)
89
+ if model_name in ['gemma-1b']:
90
+ self.peft_config = LoraConfig(
91
+ task_type=TaskType.CAUSAL_LM,
92
+ inference_mode=False,
93
+ r=4,
94
+ lora_alpha=16,
95
+ lora_dropout=0.05,
96
+ target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
97
+ )
98
+ else:
99
+ self.peft_config = LoraConfig(
100
+ task_type=TaskType.CAUSAL_LM,
101
+ inference_mode=False,
102
+ r=8,
103
+ lora_alpha=32,
104
+ lora_dropout=0.1,
105
+ )
106
+ self.scoring_model = get_peft_model(scoring_model, self.peft_config)
107
+
108
+ # load sampling model
109
+ self.reference_tokenizer = load_tokenizer(model_name, dataset, cache_dir)
110
+ reference_model = load_model(model_name, device, cache_dir)
111
+ self.reference_model = reference_model
112
+ self.reference_model.eval()
113
+ for p in self.reference_model.parameters():
114
+ p.requires_grad = False
115
+
116
+ total = sum(p.numel() for p in self.scoring_model.parameters())
117
+ trainable = sum(p.numel() for p in self.scoring_model.parameters() if p.requires_grad)
118
+ print(f"Trainable / total (parameters): {trainable}/{total}={trainable/total}")
119
+
120
+ def set_criterion_fn(self, criterion_fn):
121
+ if criterion_fn == "mean":
122
+ self.criterion = 'mean'
123
+ self.criterion_fn = get_sampling_discrepancy_analytic
124
+ else:
125
+ raise ValueError(f"Unknown criterion function: {criterion_fn}")
126
+
127
+ def print_gradient_requirement(self):
128
+ for name, param in self.named_parameters():
129
+ gradient_requirement = 'Requires Grad' if param.requires_grad else 'Does not require grad'
130
+ color_code = '\033[92m' if param.requires_grad else '\033[91m' # Green for requires grad, red for does not require grad
131
+ reset_color = '\033[0m' # Reset color after printing
132
+ print(f"{name}: {color_code}{gradient_requirement}{reset_color}")
133
+
134
+ def register_no_grad(self, module_names):
135
+ for name, param in self.named_parameters():
136
+ for selected_module in module_names:
137
+ # print(selected_module, name)
138
+ if selected_module in name:
139
+ param.requires_grad = False
140
+
141
+ def save_pretrained(self, save_directory: str, save_null_distr_only=False):
142
+ """
143
+ Save the scoring model (with LoRA adapter) and all null_distr buffers in Hugging Face format.
144
+ """
145
+ os.makedirs(save_directory, exist_ok=True)
146
+
147
+ # 1. 保存 scoring_model (LoRA adapter + 基础模型)
148
+ if not save_null_distr_only:
149
+ scoring_dir = os.path.join(save_directory, "scoring_model")
150
+ self.scoring_model.save_pretrained(scoring_dir, safe_serialization=True)
151
+
152
+ # 2. 保存所有 null_distr_* buffers
153
+ null_distrs = {}
154
+ for buffer_name, buffer_value in self.named_buffers():
155
+ if buffer_name.startswith("null_distr_"):
156
+ domain = buffer_name.replace("null_distr_", "")
157
+ null_distrs[domain] = buffer_value.detach().cpu()
158
+
159
+ if null_distrs:
160
+ torch.save(null_distrs, os.path.join(save_directory, "null_distrs.pt"))
161
+ print(f"✅ Saved {len(null_distrs)} null distributions: {list(null_distrs.keys())}")
162
+
163
+ # 3. 保存配置信息(包括domain列表)
164
+ config = {
165
+ "domains": list(null_distrs.keys()),
166
+ "criterion": getattr(self, "criterion", None),
167
+ }
168
+ with open(os.path.join(save_directory, "config.json"), "w") as f:
169
+ json.dump(config, f)
170
+
171
+ print(f"✅ Model saved to {save_directory}")
172
+
173
+ @classmethod
174
+ def from_pretrained(cls, load_directory: str, *args, **kwargs):
175
+ """
176
+ Load the scoring model, reference model, and all null_distr buffers.
177
+ """
178
+ # 1. 初始化类
179
+ model = cls(*args, **kwargs)
180
+
181
+ # 2. 加载 scoring_model
182
+ scoring_dir = os.path.join(load_directory, "scoring_model")
183
+ model.scoring_model = AutoPeftModelForCausalLM.from_pretrained(
184
+ scoring_dir,
185
+ device_map="auto",
186
+ low_cpu_mem_usage=True,
187
+ use_safetensors=True
188
+ )
189
+
190
+ # 3. 加载所有 null_distr
191
+ null_distrs_path = os.path.join(load_directory, "null_distrs.pt")
192
+ if os.path.exists(null_distrs_path):
193
+ null_distrs = torch.load(null_distrs_path, map_location="cpu")
194
+ for domain, null_distr in null_distrs.items():
195
+ model.set_null_distr(null_distr, domain)
196
+ print(f"✅ Restored {len(null_distrs)} null distributions: {list(null_distrs.keys())}")
197
+
198
+ # 4. 加载配置信息
199
+ config_path = os.path.join(load_directory, "config.json")
200
+ if os.path.exists(config_path):
201
+ with open(config_path, "r") as f:
202
+ config = json.load(f)
203
+ if "criterion" in config and config["criterion"] is not None:
204
+ model.criterion = config["criterion"]
205
+ print(f"✅ Loaded config: {config}")
206
+
207
+ print(f"✅ Model loaded from {load_directory}")
208
+ return model
209
+
210
+ def compute_stats(self, tokenized=None, labels=[""], training_module=False):
211
+ if training_module:
212
+ logits_score = self.scoring_model(tokenized.input_ids, attention_mask=tokenized.attention_mask).logits[:,:-1,:]
213
+ logits_ref = self.reference_model(tokenized.input_ids, attention_mask=tokenized.attention_mask).logits[:,:-1,:]
214
+ crit, SPO_input = self.criterion_fn(logits_ref, logits_score, labels)
215
+ else:
216
+ with torch.no_grad(): # get reference
217
+ logits_score = self.scoring_model(tokenized.input_ids, attention_mask=tokenized.attention_mask).logits[:,:-1,:] # shape: [bsz, sentence_len, dim]
218
+ logits_ref = self.reference_model(tokenized.input_ids, attention_mask=tokenized.attention_mask).logits[:,:-1,:]
219
+ crit, SPO_input = self.criterion_fn(logits_ref, logits_score, labels)
220
+ return crit, SPO_input, logits_score
221
+
222
+ def forward(self, text, training_module=True):
223
+ original_text = text[0]
224
+ sampled_text = text[1]
225
+
226
+ tokenized = self.scoring_tokenizer(original_text, return_tensors="pt", padding=True, return_token_type_ids=False).to(self.device)
227
+ labels = tokenized.input_ids[:, 1:]
228
+ train_original_crit, _, _ = self.compute_stats(tokenized, labels, training_module=training_module)
229
+
230
+ tokenized = self.scoring_tokenizer(sampled_text, return_tensors="pt", padding=True, return_token_type_ids=False).to(self.device)
231
+ labels = tokenized.input_ids[:, 1:]
232
+ train_sampled_crit, _, _ = self.compute_stats(tokenized, labels, training_module=training_module)
233
+
234
+ MMDloss = calculate_MMD_loss(train_original_crit, train_sampled_crit)
235
+ output = dict(crit=[train_original_crit.detach(), train_original_crit, train_sampled_crit.detach(), train_sampled_crit], loss=MMDloss)
236
+ return output
237
+
238
+ def set_null_distr(self, null_distr: torch.Tensor, domain: str):
239
+ """
240
+ Set the null distribution tensor safely.
241
+ """
242
+ distr_name = f"null_distr_{domain}"
243
+ self.register_buffer(distr_name, torch.empty(0))
244
+
245
+ if not isinstance(null_distr, torch.Tensor):
246
+ null_distr = torch.tensor(null_distr)
247
+
248
+ # detach + clone + 移到正确设备
249
+ null_distr = null_distr.detach().clone().to(self.device)
250
+
251
+ # 直接覆盖 buffer,避免 delattr 带来的问题
252
+ self._buffers[distr_name] = null_distr
253
+ print(f"✅ Null distribution on {domain} with shape: {self._buffers[distr_name].shape} with mean {self._buffers[distr_name].mean():.4f} and std {self._buffers[distr_name].std():.4f}")
254
+
255
+ def compute_p_value(self, text, domain: str):
256
+ """
257
+ Compute p-value for given text using the null distribution of specified domain.
258
+
259
+ Args:
260
+ text: Input text to compute score for
261
+ domain: Domain name to use for null distribution
262
+ """
263
+ tokenized = self.scoring_tokenizer(
264
+ text,
265
+ return_tensors="pt",
266
+ padding=True,
267
+ return_token_type_ids=False
268
+ ).to(self.device)
269
+ labels = tokenized.input_ids[:, 1:]
270
+
271
+ with torch.inference_mode():
272
+ crit, _, _ = self.compute_stats(tokenized, labels, training_module=False)
273
+
274
+ # 获取对应domain的null distribution
275
+ distr_name = f"null_distr_{domain}"
276
+ if not hasattr(self, distr_name):
277
+ raise ValueError(
278
+ f"No null distribution found for domain '{domain}'. "
279
+ f"Available domains: {self.get_available_domains()}"
280
+ )
281
+ null_distr = getattr(self, distr_name)
282
+ p_value = self.empirical_p_value(crit, null_distr)
283
+
284
+ return crit, p_value
285
+
286
+ def empirical_p_value(self, crit: torch.Tensor, null_distr: torch.Tensor):
287
+ # Compute p-value: (count + 1) / (total + 1)
288
+ total = null_distr.numel()
289
+ # count = (null_distr >= crit.unsqueeze(-1)).float().sum() # slow computation
290
+ count = total - torch.searchsorted(null_distr, crit, right=False)[0]
291
+ p_value = (count + 1.0) / (total + 1.0)
292
+ # print(f"p_value (slow): {p_value} & p_value (fast): {(count + 1) / (total + 1)}", )
293
+ return p_value
294
+
295
+ def get_available_domains(self):
296
+ """
297
+ Get list of all available domains with null distributions.
298
+ """
299
+ domains = []
300
+ for buffer_name in self._buffers.keys():
301
+ if buffer_name.startswith("null_distr_"):
302
+ domain = buffer_name.replace("null_distr_", "")
303
+ domains.append(domain)
304
+ return domains
src/app.py ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+
4
+ # -----------------
5
+ # Get the directory where app.py is located
6
+ # -----------------
7
+ APP_DIR = Path(__file__).parent.resolve()
8
+
9
+ # -----------------
10
+ # Fix Streamlit Permission Issues
11
+ # -----------------
12
+ # 在 HF Space 中,将 Streamlit 配置目录设置到可写位置
13
+ if os.environ.get('SPACE_ID'):
14
+ os.environ['STREAMLIT_SERVER_FILE_WATCHER_TYPE'] = 'none'
15
+ os.environ['STREAMLIT_BROWSER_GATHER_USAGE_STATS'] = 'false'
16
+ os.environ['STREAMLIT_SERVER_ENABLE_CORS'] = 'false'
17
+
18
+ # 设置 HuggingFace 缓存到可写目录
19
+ CACHE_DIR = '/tmp/huggingface_cache'
20
+ os.makedirs(CACHE_DIR, exist_ok=True)
21
+
22
+ os.environ['HF_HOME'] = CACHE_DIR
23
+ os.environ['TRANSFORMERS_CACHE'] = CACHE_DIR
24
+ os.environ['HF_DATASETS_CACHE'] = CACHE_DIR
25
+ os.environ['HUGGINGFACE_HUB_CACHE'] = CACHE_DIR
26
+
27
+ # 设置可写的配置目录
28
+ streamlit_dir = Path('/tmp/.streamlit')
29
+ streamlit_dir.mkdir(exist_ok=True, parents=True)
30
+ # os.environ['STREAMLIT_HOME'] = '/tmp/.streamlit'
31
+
32
+
33
+ import streamlit as st
34
+ from FineTune.model import ComputeStat
35
+ import time
36
+
37
+ # -----------------
38
+ # Page Configuration
39
+ # -----------------
40
+ st.set_page_config(
41
+ page_title="AdaDetectGPT",
42
+ page_icon="🔍",
43
+ layout="wide"
44
+ )
45
+
46
+ # -----------------
47
+ # Model Loading (Cached)
48
+ # -----------------
49
+ @st.cache_resource
50
+ def load_model(from_pretrained, base_model, cache_dir, device):
51
+ """
52
+ Load and cache the model to avoid reloading on every user interaction.
53
+ This function runs only once when the app starts or when parameters change.
54
+ """
55
+ # is_hf_space = os.environ.get('SPACE_ID') is not None
56
+ is_hf_space = False
57
+ if is_hf_space:
58
+ cache_dir = '/tmp/huggingface_cache'
59
+ os.makedirs(cache_dir, exist_ok=True)
60
+
61
+ device = 'cpu'
62
+ print("Using **CPU** now!")
63
+
64
+ # 获取 HF Token(用于访问 gated 模型)
65
+ hf_token = os.environ.get('HF_TOKEN', None)
66
+ if hf_token:
67
+ # 也可以用 login 方式
68
+ try:
69
+ from huggingface_hub import login
70
+ login(token=hf_token)
71
+ print("✅ Successfully authenticated with HF token")
72
+ except Exception as e:
73
+ print(f"⚠️ HF login warning: {e}")
74
+
75
+ # 🔥 新增:从 HF Hub 下载模型
76
+ # 检查是否是 HF Hub 路径(格式:username/repo-name)
77
+ is_hf_hub = '/' in from_pretrained and not from_pretrained.startswith('.')
78
+ if is_hf_hub:
79
+ from huggingface_hub import snapshot_download
80
+ print(f"📥 Downloading model from HuggingFace Hub: {from_pretrained}")
81
+ try:
82
+ # 下载整个仓库到本地
83
+ local_model_path = snapshot_download(
84
+ repo_id=from_pretrained,
85
+ cache_dir=cache_dir,
86
+ token=hf_token,
87
+ repo_type="model"
88
+ )
89
+ print(f"✅ Model downloaded to: {local_model_path}")
90
+ # 使用下载后的本地路径
91
+ from_pretrained = local_model_path
92
+ except Exception as e:
93
+ print(f"❌ Failed to download model: {e}")
94
+ raise
95
+ else:
96
+ cache_dir = cache_dir
97
+
98
+ with st.spinner("🔄 Loading model... This may take a moment on first launch."):
99
+ model = ComputeStat.from_pretrained(
100
+ from_pretrained,
101
+ base_model,
102
+ device=device,
103
+ cache_dir=cache_dir
104
+ )
105
+ model.set_criterion_fn('mean')
106
+ return model
107
+
108
+ # -----------------
109
+ # Result Feedback Module Import
110
+ # -----------------
111
+ from feedback import FeedbackManager
112
+
113
+ # Initialize Feedback Manager with HF dataset
114
+ # 请将 'your-username/your-dataset-name' 替换为您的实际 HF 数据集仓库 ID
115
+ # 确保在环境变量中设置了 HF_TOKEN 以访问私有数据集
116
+ FEEDBACK_DATASET_ID = os.environ.get('FEEDBACK_DATASET_ID', 'mamba413/user-feedback')
117
+ feedback_manager = FeedbackManager(
118
+ dataset_repo_id=FEEDBACK_DATASET_ID,
119
+ hf_token=os.environ.get('HF_TOKEN'),
120
+ local_backup=False if os.environ.get('SPACE_ID') else True # 保留本地备份
121
+ )
122
+
123
+ # -----------------
124
+ # Configuration
125
+ # -----------------
126
+ MODEL_CONFIG = {
127
+ 'from_pretrained': './src/FineTune/ckpt/',
128
+ 'base_model': 'gemma-1b',
129
+ 'cache_dir': '../cache',
130
+ 'device': 'cpu' if os.environ.get('SPACE_ID') else 'mps',
131
+ # 'device': 'cuda',
132
+ }
133
+
134
+ DOMAINS = [
135
+ "General",
136
+ "Academia",
137
+ "Finance",
138
+ "Government",
139
+ "Knowledge",
140
+ "Legislation",
141
+ "Medicine",
142
+ "News",
143
+ "UserReview"
144
+ ]
145
+
146
+ # Load model once at startup
147
+ try:
148
+ model = load_model(
149
+ MODEL_CONFIG['from_pretrained'],
150
+ MODEL_CONFIG['base_model'],
151
+ MODEL_CONFIG['cache_dir'],
152
+ MODEL_CONFIG['device']
153
+ )
154
+ model_loaded = True
155
+ except Exception as e:
156
+ model_loaded = False
157
+ error_message = str(e)
158
+
159
+ # =========== 🆕 session_state ===========
160
+ if 'last_detection' not in st.session_state:
161
+ st.session_state.last_detection = None
162
+ if 'feedback_given' not in st.session_state:
163
+ st.session_state.feedback_given = False
164
+ # ========================================
165
+
166
+ # -----------------
167
+ # Streamlit Layout
168
+ # -----------------
169
+ _, col0, _ = st.columns((1, 5, 1))
170
+ with col0:
171
+ st.markdown(
172
+ "<h1 style='text-align: center; color: #0072C3;'>AdaDetectGPT: Adaptive LLM's Texts Detection</h1>",
173
+ unsafe_allow_html=True,
174
+ )
175
+
176
+ st.markdown(
177
+ """Pasted the text to be detected below and click the 'Detect' button to get the p-value. Use a better option may improve detection."""
178
+ )
179
+
180
+ # Display model loading status
181
+ if not model_loaded:
182
+ st.error(f"❌ Failed to load model: {error_message}")
183
+ st.stop()
184
+
185
+ # -----------------
186
+ # Main Interface
187
+ # -----------------
188
+ # --- Two columns: Input text & button | Result displays ---
189
+ col1, col2 = st.columns((1, 1))
190
+
191
+ with col1:
192
+ text_input = st.text_area(
193
+ label="",
194
+ placeholder="Paste your text to be detected here",
195
+ help="Typically, providing text with a longer content would get a more reliable result.",
196
+ height=200,
197
+ )
198
+
199
+ detect_clicked = st.button("Detect", type="primary", use_container_width=True)
200
+
201
+ selected_domain = st.selectbox(
202
+ label="⚙️ Domain (Optional)",
203
+ options=DOMAINS,
204
+ index=0, # Default to General
205
+ help="💡 **Tip:** Select the domain that best matches your text for improving detection accuracy. Default is 'General' that means consider all domains."
206
+ )
207
+
208
+ with col2:
209
+ statistics_ph = st.empty()
210
+ statistics_ph.text_input(
211
+ label="Statistics",
212
+ value="",
213
+ disabled=True,
214
+ help="Statistics will appear here after clicking the Detect button.",
215
+ )
216
+
217
+ pvalue_ph = st.empty()
218
+ pvalue_ph.text_input(
219
+ label="p-value",
220
+ value="",
221
+ disabled=True,
222
+ help="p-value will appear here after clicking the Detect button.",
223
+ )
224
+
225
+ # -----------------
226
+ # Detection Logic
227
+ # -----------------
228
+ if detect_clicked:
229
+ if not text_input.strip():
230
+ st.warning("⚠️ Please enter some text before detecting.")
231
+ else:
232
+ # ========== Reset feedback state ==========
233
+ st.session_state.feedback_given = False
234
+ # ==========================================
235
+
236
+ # Start timing to decide whether to show progress bar
237
+ start_time = time.time()
238
+
239
+ # Use a placeholder for dynamic updates
240
+ status_placeholder = st.empty()
241
+ result_placeholder = st.empty()
242
+
243
+ try:
244
+ # Show spinner for quick operations (< 2 seconds expected)
245
+ with status_placeholder:
246
+ with st.spinner(f"🔍 Analyzing text in {selected_domain} domain..."):
247
+ # Perform inference
248
+ crit, p_value = model.compute_p_value(text_input, selected_domain)
249
+ elapsed_time = time.time() - start_time
250
+
251
+ # Convert tensors to Python scalars if needed
252
+ if hasattr(crit, 'item'):
253
+ crit = crit.item()
254
+ if hasattr(p_value, 'item'):
255
+ p_value = p_value.item()
256
+
257
+ # Clear status and show results
258
+ status_placeholder.empty()
259
+
260
+ # ========== 🆕 保存检测结果到 session_state ==========
261
+ st.session_state.last_detection = {
262
+ 'text': text_input,
263
+ 'domain': selected_domain,
264
+ 'statistics': crit,
265
+ 'p_value': p_value,
266
+ 'elapsed_time': elapsed_time
267
+ }
268
+
269
+ # Update score displays
270
+ with col2:
271
+ statistics_ph.text_input(
272
+ label="Statistics",
273
+ value=f"{crit:.6f}",
274
+ disabled=True,
275
+ help="Detection statistics will appear here after clicking Detect.",
276
+ )
277
+
278
+ pvalue_ph.text_input(
279
+ label="p-value",
280
+ value=f"{p_value:.6f}",
281
+ disabled=True,
282
+ help="p-value will appear here after clicking Detect.",
283
+ )
284
+
285
+ st.info(
286
+ """
287
+ **📊 p-value:**
288
+ - **Lower p-value** (closer to 0) indicates text is **more likely AI-generated**
289
+ - **Higher p-value** (closer to 1) indicates text is **more likely human-written**
290
+ - Generally, p-value < 0.05 suggests the text may be LLM-generated
291
+ """,
292
+ icon="💡"
293
+ )
294
+
295
+ # ========== 🆕 Feedback buttons (moved here for better UX) ==========
296
+ st.markdown("**📝 Result Feedback**: Does this detection result meet your expectations?")
297
+
298
+ current_text = text_input
299
+ current_domain = selected_domain
300
+ current_statistics = crit
301
+ current_pvalue = p_value
302
+ feedback_col1, feedback_col2 = st.columns(2)
303
+
304
+ with feedback_col1:
305
+ if st.button("✅ Expected", use_container_width=True, type="secondary", key=f"expected_btn_{hash(text_input[:50])}"):
306
+ try:
307
+ success, message = feedback_manager.save_feedback(
308
+ current_text,
309
+ current_domain,
310
+ current_statistics,
311
+ current_pvalue,
312
+ 'expected'
313
+ )
314
+ if success:
315
+ st.success("✅ Thank you for your feedback!")
316
+ st.caption(f"💾 {message}")
317
+ else:
318
+ st.error(f"Failed to save feedback: {message}")
319
+ except Exception as e:
320
+ st.error(f"Failed to save feedback: {str(e)}")
321
+ import traceback
322
+ st.code(traceback.format_exc())
323
+
324
+ with feedback_col2:
325
+ if st.button("❌ Unexpected", use_container_width=True, type="secondary", key=f"unexpected_btn_{hash(text_input[:50])}"):
326
+ try:
327
+ success, message = feedback_manager.save_feedback(
328
+ current_text,
329
+ current_domain,
330
+ current_statistics,
331
+ current_pvalue,
332
+ 'unexpected'
333
+ )
334
+ if success:
335
+ st.warning("❌ Feedback recorded! This will help us improve.")
336
+ st.caption(f"💾 {message}")
337
+ else:
338
+ st.error(f"Failed to save feedback: {message}")
339
+ except Exception as e:
340
+ st.error(f"Failed to save feedback: {str(e)}")
341
+ import traceback
342
+ st.code(traceback.format_exc())
343
+
344
+ if st.session_state.feedback_given:
345
+ st.success("✅ Feedback submitted successfully!")
346
+ # ============================================
347
+
348
+ # Show detailed results
349
+ with result_placeholder:
350
+ st.caption(f"⏱️ Processing time: {elapsed_time:.2f} seconds")
351
+
352
+ except Exception as e:
353
+ status_placeholder.empty()
354
+ st.error(f"❌ Error during detection: {str(e)}")
355
+ st.exception(e)
356
+
357
+ # ========== 🆕 Citation and paper reference section ==========
358
+ # st.markdown("---")
359
+ # st.markdown(
360
+ # """
361
+ # 📄 **Citation** If you find this tool useful for you, please cite our paper: **[AdaDetectGPT: Adaptive Detection of LLM-Generated Text with Statistical Guarantees](https://arxiv.org/abs/2510.01268)**
362
+ # """
363
+ # )
364
+ # with st.expander("📋 BibTeX Citation"):
365
+ # st.code(
366
+ # """
367
+ # @inproceedings{zhou2024adadetectgpt,
368
+ # title={AdaDetectGPT: Adaptive Detection of LLM-Generated Text with Statistical Guarantees},
369
+ # author={Hongyi Zhou and Jin Zhu and Pingfan Su and Kai Ye and Ying Yang and Shakeel A O B Gavioli-Akilagun and Chengchun Shi},
370
+ # booktitle={The Thirty-Ninth Annual Conference on Neural Information Processing Systems (Accepted)},
371
+ # year={2025},
372
+ # }
373
+ # """,
374
+ # language="bibtex"
375
+ # )
376
+
377
+ # -----------------
378
+ # Footer
379
+ # -----------------
380
+ st.markdown(
381
+ """
382
+ <style>
383
+ .footer {
384
+ position: fixed;
385
+ left: 0;
386
+ bottom: 0;
387
+ width: 100%;
388
+ background-color: white;
389
+ color: gray;
390
+ text-align: center;
391
+ padding: 10px;
392
+ border-top: 1px solid #e0e0e0;
393
+ z-index: 999;
394
+ }
395
+
396
+ /* Add padding to main content to prevent overlap with fixed footer */
397
+ .main .block-container {
398
+ padding-bottom: 60px;
399
+ }
400
+ </style>
401
+ <div class='footer'>
402
+ <small>Powered by Adaptive LLM Text Detection | For research purposes only</small>
403
+ </div>
404
+ """,
405
+ unsafe_allow_html=True
406
+ )
src/feedback.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from datetime import datetime
4
+ from pathlib import Path
5
+ from huggingface_hub import HfApi, upload_file, hf_hub_download
6
+ from typing import Optional
7
+ import pandas as pd
8
+
9
+ class FeedbackManager:
10
+ """管理用户反馈,支持保存到 Hugging Face 私有数据集"""
11
+
12
+ def __init__(
13
+ self,
14
+ dataset_repo_id: str = None,
15
+ hf_token: str = None,
16
+ local_backup: bool = True
17
+ ):
18
+ """
19
+ 初始化 FeedbackManager
20
+
21
+ Args:
22
+ dataset_repo_id: Hugging Face 数据集仓库 ID (格式: username/dataset-name)
23
+ hf_token: Hugging Face API token (用于私有数据集)
24
+ local_backup: 是否在本地保留备份
25
+ """
26
+ self.dataset_repo_id = dataset_repo_id
27
+ self.hf_token = hf_token or os.environ.get('HF_TOKEN')
28
+ self.local_backup = local_backup
29
+
30
+ # 初始化 HF API
31
+ if self.dataset_repo_id and self.hf_token:
32
+ self.api = HfApi(token=self.hf_token)
33
+ # 确保数据集存在
34
+ self._ensure_dataset_exists()
35
+ else:
36
+ self.api = None
37
+ print("⚠️ No HF dataset configured. Will only save locally.")
38
+
39
+ # 设置本地存储路径
40
+ if os.environ.get('SPACE_ID'):
41
+ self.local_dir = Path('/tmp/feedback_data')
42
+ else:
43
+ self.local_dir = Path(__file__).parent / 'feedback_data'
44
+
45
+ self.local_dir.mkdir(exist_ok=True, parents=True)
46
+ self.local_file = self.local_dir / 'user_feedback.json'
47
+
48
+ def _ensure_dataset_exists(self):
49
+ """确保 HF 数据集存在,如果不存在则创建"""
50
+ try:
51
+ from huggingface_hub import create_repo
52
+ # 尝试创建数据集仓库(如果已存在会抛出异常)
53
+ try:
54
+ create_repo(
55
+ repo_id=self.dataset_repo_id,
56
+ token=self.hf_token,
57
+ private=True,
58
+ repo_type="dataset"
59
+ )
60
+ print(f"✅ Created new private dataset: {self.dataset_repo_id}")
61
+
62
+ # 创建初始的 README.md
63
+ readme_content = f"""---
64
+ license: mit
65
+ ---
66
+
67
+ # AdaDetectGPT User Feedback Dataset
68
+
69
+ This dataset contains user feedback from the AdaDetectGPT detection system.
70
+
71
+ ## Data Format
72
+
73
+ Each entry contains:
74
+ - `timestamp`: When the feedback was submitted
75
+ - `text`: The text that was analyzed
76
+ - `domain`: The domain selected for analysis
77
+ - `statistics`: The computed statistics value
78
+ - `p_value`: The p-value from the detection
79
+ - `feedback`: User feedback (expected/unexpected)
80
+ """
81
+ readme_file = self.local_dir / 'README.md'
82
+ readme_file.write_text(readme_content)
83
+
84
+ upload_file(
85
+ path_or_fileobj=str(readme_file),
86
+ path_in_repo="README.md",
87
+ repo_id=self.dataset_repo_id,
88
+ repo_type="dataset",
89
+ token=self.hf_token
90
+ )
91
+
92
+ except Exception as e:
93
+ if "already exists" not in str(e):
94
+ print(f"⚠️ Dataset check: {e}")
95
+
96
+ except Exception as e:
97
+ print(f"⚠️ Could not verify dataset: {e}")
98
+
99
+ def _load_existing_data(self) -> list:
100
+ """从 HF 数据集加载现有数据"""
101
+ existing_data = []
102
+
103
+ # 首先尝试从 HF 数据集加载
104
+ if self.api and self.dataset_repo_id:
105
+ try:
106
+ # 下载现有的反馈文件
107
+ local_path = hf_hub_download(
108
+ repo_id=self.dataset_repo_id,
109
+ filename="feedback_data.json",
110
+ repo_type="dataset",
111
+ token=self.hf_token,
112
+ cache_dir=str(self.local_dir)
113
+ )
114
+ with open(local_path, 'r', encoding='utf-8') as f:
115
+ existing_data = json.load(f)
116
+ print(f"📥 Loaded {len(existing_data)} existing feedback entries from HF")
117
+ except Exception as e:
118
+ # 文件可能还不存在
119
+ if "404" not in str(e):
120
+ print(f"⚠️ Could not load from HF dataset: {e}")
121
+
122
+ # 如果 HF 加载失败,尝试本地文件
123
+ if not existing_data and self.local_file.exists():
124
+ try:
125
+ with open(self.local_file, 'r', encoding='utf-8') as f:
126
+ existing_data = json.load(f)
127
+ print(f"📥 Loaded {len(existing_data)} existing feedback entries from local")
128
+ except Exception as e:
129
+ print(f"⚠️ Could not load local data: {e}")
130
+
131
+ return existing_data
132
+
133
+ def save_feedback(
134
+ self,
135
+ text: str,
136
+ domain: str,
137
+ statistics: float,
138
+ p_value: float,
139
+ feedback_type: str
140
+ ) -> tuple[bool, str]:
141
+ """
142
+ 保存用户反馈到 HF 数据集和/或本地文件
143
+
144
+ Args:
145
+ text: 被检测的文本
146
+ domain: 选择的领域
147
+ statistics: 统计值
148
+ p_value: p值
149
+ feedback_type: 'expected' 或 'unexpected'
150
+
151
+ Returns:
152
+ (success, message): 是否成功和相关消息
153
+ """
154
+ # 准备反馈数据
155
+ feedback_entry = {
156
+ 'timestamp': datetime.now().isoformat(),
157
+ 'text': text,
158
+ 'domain': domain,
159
+ 'statistics': float(statistics),
160
+ 'p_value': float(p_value),
161
+ 'feedback': feedback_type
162
+ }
163
+
164
+ # 加载现有数据
165
+ feedback_data = self._load_existing_data()
166
+
167
+ # 添加新反馈
168
+ feedback_data.append(feedback_entry)
169
+
170
+ success = False
171
+ messages = []
172
+
173
+ # 保存到本地(作为备份)
174
+ if self.local_backup:
175
+ try:
176
+ with open(self.local_file, 'w', encoding='utf-8') as f:
177
+ json.dump(feedback_data, f, ensure_ascii=False, indent=2)
178
+ messages.append(f"💾 Local backup saved")
179
+ success = True
180
+ except Exception as e:
181
+ messages.append(f"❌ Local save failed: {e}")
182
+
183
+ # 上传到 HF 数据集
184
+ if self.api and self.dataset_repo_id:
185
+ try:
186
+ # 保存为 JSON 文件
187
+ upload_file(
188
+ path_or_fileobj=str(self.local_file),
189
+ path_in_repo="feedback_data.json",
190
+ repo_id=self.dataset_repo_id,
191
+ repo_type="dataset",
192
+ token=self.hf_token,
193
+ commit_message=f"Add feedback: {feedback_type} at {feedback_entry['timestamp']}"
194
+ )
195
+
196
+ # 同时创建/更新 CSV 版本(方便查看)
197
+ df = pd.DataFrame(feedback_data)
198
+ csv_file = self.local_dir / 'feedback_data.csv'
199
+ df.to_csv(csv_file, index=False)
200
+
201
+ upload_file(
202
+ path_or_fileobj=str(csv_file),
203
+ path_in_repo="feedback_data.csv",
204
+ repo_id=self.dataset_repo_id,
205
+ repo_type="dataset",
206
+ token=self.hf_token,
207
+ commit_message=f"Update CSV: {len(feedback_data)} total entries"
208
+ )
209
+
210
+ messages.append(f"☁️ Uploaded to HF dataset: {self.dataset_repo_id}")
211
+ success = True
212
+
213
+ except Exception as e:
214
+ messages.append(f"⚠️ HF upload failed: {e}")
215
+ # 如果 HF 上传失败但本地保存成功,仍然返回成功
216
+ success = success or self.local_backup
217
+
218
+ return success, " | ".join(messages)
219
+
220
+ def get_feedback_stats(self) -> dict:
221
+ """获取反馈统计信息"""
222
+ feedback_data = self._load_existing_data()
223
+
224
+ if not feedback_data:
225
+ return {
226
+ 'total_count': 0,
227
+ 'expected_count': 0,
228
+ 'unexpected_count': 0,
229
+ 'domains': {}
230
+ }
231
+
232
+ df = pd.DataFrame(feedback_data)
233
+ stats = {
234
+ 'total_count': len(df),
235
+ 'expected_count': len(df[df['feedback'] == 'expected']),
236
+ 'unexpected_count': len(df[df['feedback'] == 'unexpected']),
237
+ 'domains': df['domain'].value_counts().to_dict() if 'domain' in df.columns else {}
238
+ }
239
+
240
+ return stats
241
+
242
+
243
+ # 便捷函数(向后兼容)
244
+ _default_manager: Optional[FeedbackManager] = None
245
+
246
+ def init_feedback_manager(dataset_repo_id: str = None, hf_token: str = None):
247
+ """初始化全局反馈管理器"""
248
+ global _default_manager
249
+ _default_manager = FeedbackManager(
250
+ dataset_repo_id=dataset_repo_id,
251
+ hf_token=hf_token
252
+ )
253
+ return _default_manager
254
+
255
+ def save_feedback(text: str, domain: str, statistics: float, p_value: float, feedback_type: str):
256
+ """
257
+ 使用默认管理器保存反馈(向后兼容)
258
+ """
259
+ global _default_manager
260
+ if _default_manager is None:
261
+ # 从环境变量读取配置
262
+ dataset_repo_id = os.environ.get('FEEDBACK_DATASET_ID')
263
+ _default_manager = FeedbackManager(dataset_repo_id=dataset_repo_id)
264
+
265
+ success, message = _default_manager.save_feedback(
266
+ text, domain, statistics, p_value, feedback_type
267
+ )
268
+
269
+ if not success:
270
+ raise Exception(f"Failed to save feedback: {message}")
271
+
272
+ return message