holcombzv commited on
Commit
de2fb9f
·
0 Parent(s):

Initial commit for model LFS.

Browse files
.gitattributes ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ *.keras filter=lfs diff=lfs merge=lfs -text
2
+ *.pickle filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # Virtual environments
7
+ venv/
8
+ ENV/
9
+ env/
10
+ .venv/
11
+
12
+ # Distribution / packaging
13
+ .Python
14
+ build/
15
+ develop-eggs/
16
+ dist/
17
+ downloads/
18
+ eggs/
19
+ .eggs/
20
+ lib/
21
+ lib64/
22
+ parts/
23
+ sdist/
24
+ var/
25
+ wheels/
26
+ pip-wheel-metadata/
27
+ share/python-wheels/
28
+ *.egg-info/
29
+ .installed.cfg
30
+ *.egg
31
+ MANIFEST
32
+
33
+ # PyInstaller
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Unit test / coverage reports
38
+ htmlcov/
39
+ .tox/
40
+ .nox/
41
+ .coverage
42
+ .coverage.*
43
+ .cache
44
+ nosetests.xml
45
+ coverage.xml
46
+ *.cover
47
+ *.py,cover
48
+ .hypothesis/
49
+ .pytest_cache/
50
+
51
+ # Jupyter Notebook checkpoints
52
+ .ipynb_checkpoints
53
+
54
+ # IDE settings
55
+ .vscode/
56
+ .idea/
57
+ *.sublime-workspace
58
+ *.sublime-project
59
+
60
+ # MyPy / type checker
61
+ .mypy_cache/
62
+ .dmypy.json
63
+ dmypy.json
64
+
65
+ # Pyre type checker
66
+ .pyre/
67
+
68
+ # Logs
69
+ *.log
70
+
71
+ # Local environment files
72
+ .env
73
+ .env.*
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI
2
+ from fastapi.middleware.cors import CORSMiddleware
3
+ from pydantic import BaseModel
4
+ import tensorflow as tf
5
+ import pickle
6
+
7
+ from functions import *
8
+
9
+ # Icon links for attribution: <a href="https://www.flaticon.com/free-icons/detective" title="detective icons">Detective icons created by Freepik - Flaticon</a>
10
+
11
+ model = tf.keras.models.load_model('model/fact_checker_trained.keras')
12
+ with open('model/fact_checker_tokenizer.pickle', 'rb') as handle:
13
+ tokenizer = pickle.load(handle)
14
+
15
+ app = FastAPI()
16
+
17
+ # Enable CORS for frontend/extension
18
+ app.add_middleware(
19
+ CORSMiddleware,
20
+ allow_origins=["*"], # or ["chrome-extension://<your-extension-id>"]
21
+ allow_credentials=True,
22
+ allow_methods=["*"],
23
+ allow_headers=["*"],
24
+ )
25
+
26
+ # Input from frontend
27
+ class TextRequest(BaseModel):
28
+ text: str
29
+
30
+ # Model output
31
+ class PredictionResponse(BaseModel):
32
+ score: float
33
+ label: str
34
+
35
+ @app.post("/predict", response_model=PredictionResponse)
36
+ async def predict(request: TextRequest):
37
+ text_clean = clean_text(request.text)
38
+ score = evaluate_text(text_clean, model, tokenizer)
39
+
40
+ label = 'Real' if score > .5 else 'Fake'
41
+
42
+ return {
43
+ "score": score,
44
+ 'label': label
45
+ }
extension/background.js ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ chrome.runtime.onInstalled.addListener(() => {
2
+ chrome.contextMenus.create({
3
+ id: "fakeNewsCheck",
4
+ title: "Check Fake News",
5
+ contexts: ["selection"] // only show when text is selected
6
+ });
7
+ });
8
+
9
+ chrome.contextMenus.onClicked.addListener((info, tab) => {
10
+ if (info.menuItemId === "fakeNewsCheck" && info.selectionText) {
11
+ // Simulate fake prediction
12
+ const fakeScore = Math.random();
13
+ const verdict = fakeScore > 0.5 ? "Fake" : "Real";
14
+
15
+ // Send result to content script to display
16
+ chrome.scripting.executeScript({
17
+ target: { tabId: tab.id },
18
+ func: (score, verdict) => {
19
+ alert(`Score: ${(score * 100).toFixed(1)}%\nVerdict: ${verdict}`);
20
+ },
21
+ args: [fakeScore, verdict]
22
+ });
23
+ }
24
+ });
extension/icons/icon128.png ADDED
extension/icons/icon16.png ADDED
extension/icons/icon48.png ADDED
extension/manifest.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "manifest_version": 3,
3
+ "name": "Fake News Lie Detector",
4
+ "version": "0.0",
5
+ "description": "Check if an article or snippet might be fake news",
6
+ "background": {
7
+ "service_worker": "background.js"
8
+ },
9
+ "action": {
10
+ "default_popup": "popup.html",
11
+ "default_title": "Fake News Checker",
12
+ "default_icon": {
13
+ "16": "icons/icon16.png",
14
+ "48": "icons/icon48.png",
15
+ "128": "icons/icon128.png"
16
+ }
17
+ },
18
+ "permissions": ["contextMenus", "scripting", "activeTab"]
19
+ }
extension/popup.html ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <title>Fake News Checker</title>
5
+ <style>
6
+ body { font-family: sans-serif; width: 250px; padding: 10px; }
7
+ textarea { width: 100%; height: 100px; }
8
+ button { margin-top: 10px; width: 100%; }
9
+ #result { margin-top: 10px; font-weight: bold; }
10
+ </style>
11
+ </head>
12
+ <body>
13
+ <h3>Fake News Checker</h3>
14
+ <textarea id="inputText" placeholder="Paste article text here..."></textarea>
15
+ <button id="checkButton">Check</button>
16
+ <div id="result"></div>
17
+ <script src="popup.js"></script>
18
+ </body>
19
+ </html>
extension/popup.js ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ document.getElementById("checkButton").addEventListener("click", () => {
2
+ const text = document.getElementById("inputText").value;
3
+
4
+ if (!text.trim()) {
5
+ document.getElementById("result").innerText = "Please enter some text.";
6
+ return;
7
+ }
8
+
9
+ // Simulate fake prediction result for now
10
+ const fakeScore = Math.random(); // placeholder
11
+ document.getElementById("result").innerText =
12
+ `Score: ${(fakeScore).toFixed(1)}%`;
13
+ `Verdict: Real`;
14
+ });
functions.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+
3
+ def clean_text(text):
4
+ text = text.lower()
5
+ if '-' in text:
6
+ text = text.split('-', 1)[1].strip()
7
+ text = ''.join(char for char in text if char.isalnum() or char.isspace())
8
+
9
+ # Use model on scraped text
10
+
11
+ def evaluate_text(text, model, tokenizer, max_len=1000):
12
+ sequence = tokenizer.texts_to_sequences([text]) # Convert text to sequence
13
+ padded_sequence = tf.keras.preprocessing.sequence.pad_sequences(sequence, maxlen=max_len) # Pad to max_len
14
+
15
+ result = float(model.predict(padded_sequence))
16
+ return result
17
+
model/fact_checker_tokenizer.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ae2140f53084a4ee054aa2c085dd56d3acafc87b7ddd633ea1cdda2fa8520de
3
+ size 9815919
model/fact_checker_trained.keras ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eac58e5fb21c6c80b1568d4f2b5c345faac6bab02e62eea2c470f78024c36cc5
3
+ size 21633699
requirements.txt ADDED
Binary file (1.84 kB). View file