mohamed99sayed commited on
Commit
6ddb4c0
·
1 Parent(s): cab9c2b
Files changed (3) hide show
  1. app.py +23 -6
  2. src/analyzers.py +46 -0
  3. src/ensemble.py +3 -3
app.py CHANGED
@@ -1,6 +1,6 @@
1
  import streamlit as st
2
  from src.preprocessing import prepare_image
3
- from src.analyzers import run_fft, run_ela, run_resnet, run_siglip
4
  from src.ensemble import calculate_final_verdict
5
 
6
  st.set_page_config(page_title="AI Image Detector", layout="centered", page_icon="👁️")
@@ -34,14 +34,16 @@ st.markdown("<h1 style='text-align: center; color: #f0f2f6; font-weight: 800;'>
34
  with st.sidebar:
35
  st.header("Methodology")
36
  st.write('''
37
- This tool uses a 4-method ensemble approach to detect AI-generated images vs. real images:
38
 
39
  1. **FFT (Fast Fourier Transform):** Analyzes the image in the frequency domain to find repeating artifacts common in AI generation (like checkerboard patterns).
40
  2. **ELA (Error Level Analysis):** Highlights areas of an image that are compressed at different quality levels, which can indicate tampering or non-uniform synthesis.
41
- 3. **ResNet Model:** Uses `umm-maybe/AI-image-detector` to classify AI vs. Real features using a deep neural network approach.
42
- 4. **SigLIP Model:** Uses `Ateeqq/ai-vs-human-image-detector` for an additional vision-language-based confidence score.
 
 
43
 
44
- **The Final Judge:** The final verdict averages the ResNet and SigLIP scores.
45
  ''')
46
  st.markdown("---")
47
  st.write("Developed for transparency in AI generation.")
@@ -92,11 +94,26 @@ if uploaded_file is not None:
92
  siglip_score = run_siglip(rgb_img)
93
  st.metric(label="SigLIP Pipeline ('Fake' Confidence)", value=f"{siglip_score * 100:.2f}%")
94
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
  st.markdown("---")
96
  st.header("The Final Judge")
97
 
98
  with st.spinner("Combining scores and formulating final verdict..."):
99
- verdict, confidence = calculate_final_verdict(resnet_score, siglip_score)
100
 
101
  if verdict == "FAKE":
102
  st.error(f"### Final Verdict: {verdict}")
 
1
  import streamlit as st
2
  from src.preprocessing import prepare_image
3
+ from src.analyzers import run_fft, run_ela, run_resnet, run_siglip, run_sdxl_detector, run_deepfake_detector
4
  from src.ensemble import calculate_final_verdict
5
 
6
  st.set_page_config(page_title="AI Image Detector", layout="centered", page_icon="👁️")
 
34
  with st.sidebar:
35
  st.header("Methodology")
36
  st.write('''
37
+ This tool uses a 6-method ensemble approach to detect AI-generated images vs. real images:
38
 
39
  1. **FFT (Fast Fourier Transform):** Analyzes the image in the frequency domain to find repeating artifacts common in AI generation (like checkerboard patterns).
40
  2. **ELA (Error Level Analysis):** Highlights areas of an image that are compressed at different quality levels, which can indicate tampering or non-uniform synthesis.
41
+ 3. **ResNet Model:** Uses `umm-maybe/AI-image-detector`.
42
+ 4. **SigLIP Model:** Uses `Ateeqq/ai-vs-human-image-detector`.
43
+ 5. **SDXL Detector:** Uses `Organika/sdxl-detector` to catch modern diffusion artifacts.
44
+ 6. **DeepFake Detector (ViT):** Uses `prithivMLmods/Deep-Fake-Detector-v2-Model` for state-of-the-art vision transformer detection.
45
 
46
+ **The Final Judge:** The final verdict averages all 4 deep-learning AI model scores.
47
  ''')
48
  st.markdown("---")
49
  st.write("Developed for transparency in AI generation.")
 
94
  siglip_score = run_siglip(rgb_img)
95
  st.metric(label="SigLIP Pipeline ('Fake' Confidence)", value=f"{siglip_score * 100:.2f}%")
96
 
97
+ st.markdown("<br>", unsafe_allow_html=True)
98
+
99
+ # Analysis row 3: Additional Advanced Models
100
+ col5, col6 = st.columns(2)
101
+
102
+ with col5:
103
+ with st.spinner("Engine 5: Analyzing with SDXL Detector..."):
104
+ sdxl_score = run_sdxl_detector(rgb_img)
105
+ st.metric(label="SDXL Detector ('Fake' Confidence)", value=f"{sdxl_score * 100:.2f}%")
106
+
107
+ with col6:
108
+ with st.spinner("Engine 6: Analyzing with ViT Deepfake Detector..."):
109
+ deepfake_score = run_deepfake_detector(rgb_img)
110
+ st.metric(label="ViT Deepfake Detector ('Fake' Confidence)", value=f"{deepfake_score * 100:.2f}%")
111
+
112
  st.markdown("---")
113
  st.header("The Final Judge")
114
 
115
  with st.spinner("Combining scores and formulating final verdict..."):
116
+ verdict, confidence = calculate_final_verdict(resnet_score, siglip_score, sdxl_score, deepfake_score)
117
 
118
  if verdict == "FAKE":
119
  st.error(f"### Final Verdict: {verdict}")
src/analyzers.py CHANGED
@@ -13,6 +13,14 @@ def load_resnet_pipeline():
13
  def load_siglip_pipeline():
14
  return pipeline("image-classification", model="Ateeqq/ai-vs-human-image-detector")
15
 
 
 
 
 
 
 
 
 
16
  def run_fft(grayscale_array):
17
  """
18
  Calculates the Fast Fourier Transform magnitude spectrum on the grayscale array.
@@ -88,3 +96,41 @@ def run_siglip(image):
88
  fake_score = 1.0 - res['score']
89
 
90
  return float(fake_score)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
13
  def load_siglip_pipeline():
14
  return pipeline("image-classification", model="Ateeqq/ai-vs-human-image-detector")
15
 
16
+ @st.cache_resource
17
+ def load_sdxl_pipeline():
18
+ return pipeline("image-classification", model="Organika/sdxl-detector")
19
+
20
+ @st.cache_resource
21
+ def load_deepfake_pipeline():
22
+ return pipeline("image-classification", model="prithivMLmods/Deep-Fake-Detector-v2-Model")
23
+
24
  def run_fft(grayscale_array):
25
  """
26
  Calculates the Fast Fourier Transform magnitude spectrum on the grayscale array.
 
96
  fake_score = 1.0 - res['score']
97
 
98
  return float(fake_score)
99
+
100
+ def run_sdxl_detector(image):
101
+ """
102
+ Uses the Organika/sdxl-detector pipeline.
103
+ Returns the float confidence score for 'fake'/'AI'.
104
+ """
105
+ sdxl_pipeline = load_sdxl_pipeline()
106
+ results = sdxl_pipeline(image)
107
+
108
+ fake_score = 0.0
109
+ for res in results:
110
+ label_lower = res['label'].lower()
111
+ if label_lower in ['artificial', 'fake', 'ai', 'ai generated']:
112
+ fake_score = res['score']
113
+ break
114
+ elif label_lower in ['human', 'real']:
115
+ fake_score = 1.0 - res['score']
116
+
117
+ return float(fake_score)
118
+
119
+ def run_deepfake_detector(image):
120
+ """
121
+ Uses the prithivMLmods/Deep-Fake-Detector-v2-Model pipeline.
122
+ Returns the float confidence score for 'fake'/'Deepfake'.
123
+ """
124
+ deepfake_pipeline = load_deepfake_pipeline()
125
+ results = deepfake_pipeline(image)
126
+
127
+ fake_score = 0.0
128
+ for res in results:
129
+ label_lower = res['label'].lower()
130
+ if label_lower in ['deepfake', 'fake', 'artificial', 'ai']:
131
+ fake_score = res['score']
132
+ break
133
+ elif label_lower in ['realism', 'real', 'human']:
134
+ fake_score = 1.0 - res['score']
135
+
136
+ return float(fake_score)
src/ensemble.py CHANGED
@@ -1,9 +1,9 @@
1
- def calculate_final_verdict(resnet_score, siglip_score):
2
  """
3
- Averages the two AI model scores and returns a final string ("REAL" or "FAKE")
4
  alongside the combined percentage confidence.
5
  """
6
- average_score = (resnet_score + siglip_score) / 2.0
7
 
8
  # If average "fake" score is > 0.5, it's considered FAKE
9
  if average_score > 0.5:
 
1
+ def calculate_final_verdict(resnet_score, siglip_score, sdxl_score, deepfake_score):
2
  """
3
+ Averages the four AI model scores and returns a final string ("REAL" or "FAKE")
4
  alongside the combined percentage confidence.
5
  """
6
+ average_score = (resnet_score + siglip_score + sdxl_score + deepfake_score) / 4.0
7
 
8
  # If average "fake" score is > 0.5, it's considered FAKE
9
  if average_score > 0.5: