niobures commited on
Commit
96d632b
·
verified ·
1 Parent(s): ebab9ac

Remote Sensing Collection (code, models, papers)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. When Remote Sensing Meets Foundation Model. A Survey and Beyond.pdf +3 -0
  3. code/remote-sensing-projects-archive.zip +3 -0
  4. models/AI-Flood-Prediction-System/.gitattributes +35 -0
  5. models/AI-Flood-Prediction-System/README.md +12 -0
  6. models/AI-Flood-Prediction-System/app.py +271 -0
  7. models/AI-Flood-Prediction-System/model_metadata.json +51 -0
  8. models/AI-Flood-Prediction-System/random_forest_model.pkl +3 -0
  9. models/AI-Flood-Prediction-System/requirements.txt +9 -0
  10. models/AI-Flood-Prediction-System/sample_forecast.csv +6 -0
  11. models/AI-Flood-Prediction-System/scaler.pkl +3 -0
  12. models/AI-Flood-Prediction-System/source.txt +1 -0
  13. models/abandoned_home_detector_mini_v1/.gitattributes +35 -0
  14. models/abandoned_home_detector_mini_v1/README.md +16 -0
  15. models/abandoned_home_detector_mini_v1/abandoned_house_detector_mini_model.pt +3 -0
  16. models/abandoned_home_detector_mini_v1/config.json +38 -0
  17. models/abandoned_home_detector_mini_v1/source.txt +1 -0
  18. models/forest_plot_analysis_2024_2025/.gitattributes +35 -0
  19. models/forest_plot_analysis_2024_2025/README.md +23 -0
  20. models/forest_plot_analysis_2024_2025/RandomForest.joblib +3 -0
  21. models/forest_plot_analysis_2024_2025/XGB.joblib +3 -0
  22. models/forest_plot_analysis_2024_2025/XGB_le.joblib +3 -0
  23. models/forest_plot_analysis_2024_2025/source.txt +1 -0
  24. models/land_use_classifier/.gitattributes +35 -0
  25. models/land_use_classifier/DESCRIPTION.txt +34 -0
  26. models/land_use_classifier/README.md +3 -0
  27. models/land_use_classifier/model.onnx +3 -0
  28. models/land_use_classifier/source.txt +1 -0
  29. models/landuse_ratio_classification_geochat/.gitattributes +35 -0
  30. models/landuse_ratio_classification_geochat/config.json +43 -0
  31. models/landuse_ratio_classification_geochat/generation_config.json +7 -0
  32. models/landuse_ratio_classification_geochat/pytorch_model-00001-of-00002.bin +3 -0
  33. models/landuse_ratio_classification_geochat/pytorch_model-00002-of-00002.bin +3 -0
  34. models/landuse_ratio_classification_geochat/pytorch_model.bin.index.json +725 -0
  35. models/landuse_ratio_classification_geochat/source.txt +1 -0
  36. models/landuse_ratio_classification_geochat/special_tokens_map.json +24 -0
  37. models/landuse_ratio_classification_geochat/tokenizer.model +3 -0
  38. models/landuse_ratio_classification_geochat/tokenizer_config.json +35 -0
  39. models/remote-sensing-Qwen2-VL-2B-Instruct-GGUF/source.txt +1 -0
  40. models/ship_detection_homemade_2024_2025/.gitattributes +36 -0
  41. models/ship_detection_homemade_2024_2025/README.md +21 -0
  42. models/ship_detection_homemade_2024_2025/model_weights.pth +3 -0
  43. models/ship_detection_homemade_2024_2025/source.txt +1 -0
  44. models/ship_detection_yolo8_2024_2025/.gitattributes +37 -0
  45. models/ship_detection_yolo8_2024_2025/README.md +24 -0
  46. models/ship_detection_yolo8_2024_2025/checkpoint_best_total.pth +3 -0
  47. models/ship_detection_yolo8_2024_2025/rf_detr_training.ipynb +0 -0
  48. models/ship_detection_yolo8_2024_2025/source.txt +1 -0
  49. models/urban_classification_2024_2025/.gitattributes +43 -0
  50. models/urban_classification_2024_2025/README.md +23 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ When[[:space:]]Remote[[:space:]]Sensing[[:space:]]Meets[[:space:]]Foundation[[:space:]]Model.[[:space:]]A[[:space:]]Survey[[:space:]]and[[:space:]]Beyond.pdf filter=lfs diff=lfs merge=lfs -text
When Remote Sensing Meets Foundation Model. A Survey and Beyond.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2e2e2f77523146b7e2f45054a4c1ff21304237304b8ff70e56bbda16e8f3c8aa
3
+ size 2964140
code/remote-sensing-projects-archive.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:741e6d0c303bd9a690f62d638f3716f347ac1d8b938c52162161770c6933924d
3
+ size 2377985280
models/AI-Flood-Prediction-System/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
models/AI-Flood-Prediction-System/README.md ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: AI Flood Prediction System
3
+ emoji: 📉
4
+ colorFrom: indigo
5
+ colorTo: purple
6
+ sdk: streamlit
7
+ sdk_version: 1.47.0
8
+ app_file: app.py
9
+ pinned: false
10
+ ---
11
+
12
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
models/AI-Flood-Prediction-System/app.py ADDED
@@ -0,0 +1,271 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import pandas as pd
3
+ import numpy as np
4
+ import joblib
5
+ import json
6
+ import folium
7
+ from streamlit_folium import st_folium
8
+ import plotly.graph_objects as go
9
+ import plotly.express as px
10
+ from datetime import datetime, timedelta
11
+ import requests
12
+
13
+ # Page configuration
14
+ st.set_page_config(
15
+ page_title="Charsadda Flood Prediction System",
16
+ page_icon="🌊",
17
+ layout="wide",
18
+ initial_sidebar_state="expanded"
19
+ )
20
+
21
+ # Custom CSS
22
+ st.markdown("""<style>
23
+ .main-header {
24
+ font-size: 2.5rem;
25
+ font-weight: bold;
26
+ color: #1f77b4;
27
+ text-align: center;
28
+ margin-bottom: 2rem;
29
+ }
30
+ .warning-box {
31
+ background-color: #fff3cd;
32
+ border-left: 5px solid #ffc107;
33
+ padding: 1rem;
34
+ margin: 1rem 0;
35
+ }
36
+ .danger-box {
37
+ background-color: #f8d7da;
38
+ border-left: 5px solid #dc3545;
39
+ padding: 1rem;
40
+ margin: 1rem 0;
41
+ }
42
+ .safe-box {
43
+ background-color: #d4edda;
44
+ border-left: 5px solid #28a745;
45
+ padding: 1rem;
46
+ margin: 1rem 0;
47
+ }
48
+ </style>""", unsafe_allow_html=True)
49
+
50
+ # Load model and scaler
51
+ @st.cache_resource
52
+ def load_models():
53
+ try:
54
+ rf_model = joblib.load('random_forest_model.pkl')
55
+ scaler = joblib.load('scaler.pkl')
56
+ with open('model_metadata.json', 'r') as f:
57
+ metadata = json.load(f)
58
+ return rf_model, scaler, metadata
59
+ except Exception as e:
60
+ st.error(f"Error loading models: {e}")
61
+ return None, None, None
62
+
63
+ rf_model, scaler, metadata = load_models()
64
+
65
+ # Header
66
+ st.markdown('<div class="main-header">🌊 AI-Powered Flood Prediction System</div>', unsafe_allow_html=True)
67
+ st.markdown("### Charsadda, Khyber Pakhtunkhwa, Pakistan")
68
+
69
+ # Sidebar
70
+ with st.sidebar:
71
+ st.image("https://upload.wikimedia.org/wikipedia/commons/thumb/3/32/Flag_of_Pakistan.svg/320px-Flag_of_Pakistan.svg.png", width=150)
72
+ st.title("Navigation")
73
+ page = st.radio("Select Page", ["🏠 Home", "🔮 Flood Prediction", "🗺️ Interactive Map", "📊 Model Info", "ℹ️ About"])
74
+ st.markdown("---")
75
+ st.markdown("### Study Area")
76
+ st.markdown("**Location:** Charsadda District")
77
+ st.markdown("**Province:** Khyber Pakhtunkhwa")
78
+ st.markdown("**Coordinates:** 34.15°N, 71.72°E")
79
+
80
+ # HOME PAGE
81
+ if page == "🏠 Home":
82
+ col1, col2 = st.columns(2)
83
+ with col1:
84
+ st.markdown("### 📋 Project Overview")
85
+ st.write("""This AI-powered system predicts flood risks in Charsadda district using:
86
+ - **Satellite imagery** (Sentinel-1 SAR & Sentinel-2 optical)
87
+ - **Weather data** (temperature, humidity, rainfall)
88
+ - **Topographic data** (elevation, slope)
89
+ - **Machine Learning** (Random Forest, CNN, LSTM models)""")
90
+ st.markdown("### 🎯 Key Features")
91
+ st.markdown("""- ✅ Real-time flood risk assessment
92
+ - ✅ Multi-source data integration
93
+ - ✅ Interactive flood mapping
94
+ - ✅ 1-3 day advance predictions
95
+ - ✅ High-accuracy ML models""")
96
+ with col2:
97
+ st.markdown("### 📈 Historical Floods")
98
+ st.info("""**2022 Flood (August 25-31)**\n- Affected thousands of people\n- Major infrastructure damage\n- Rivers Swat and Kabul overflowed\n\n**2025 Floods (June & August)**\n- Multiple flood events\n- ~35 km² area affected\n- ~41,000 people impacted""")
99
+ st.markdown("### 🚨 Current Status")
100
+ st.success("System operational and monitoring ongoing")
101
+
102
+ # FLOOD PREDICTION PAGE
103
+ elif page == "🔮 Flood Prediction":
104
+ st.markdown("## Flood Risk Prediction")
105
+ tab1, tab2 = st.tabs(["📤 Upload Data", "⌨️ Manual Input"])
106
+
107
+ with tab1:
108
+ st.markdown("### Upload Weather Forecast CSV")
109
+ st.info("Upload a CSV file with columns: temperature, humidity, rainfall, pressure, wind_speed")
110
+ uploaded_file = st.file_uploader("Choose a CSV file", type=['csv'])
111
+
112
+ if uploaded_file is not None:
113
+ df = pd.read_csv(uploaded_file)
114
+ st.write("**Preview of uploaded data:**")
115
+ st.dataframe(df.head())
116
+
117
+ if st.button("Predict Flood Risk", key="predict_upload"):
118
+ df['VV'] = -10
119
+ df['B2'] = 800
120
+ df['B3'] = 900
121
+ df['B4'] = 600
122
+ df['B8'] = 2000
123
+ df['B11'] = 1500
124
+ df['NDWI'] = 0.0
125
+ df['MNDWI'] = 0.0
126
+ df['NDVI'] = 0.4
127
+ df['elevation'] = 300
128
+ df['slope'] = 2
129
+
130
+ feature_order = metadata['features']
131
+ df = df[feature_order]
132
+ X_scaled = scaler.transform(df)
133
+ predictions = rf_model.predict(X_scaled)
134
+ probabilities = rf_model.predict_proba(X_scaled)[:, 1]
135
+
136
+ df['Flood_Risk'] = predictions
137
+ df['Confidence'] = probabilities * 100
138
+
139
+ st.markdown("### Prediction Results")
140
+ flood_count = predictions.sum()
141
+ col1, col2, col3 = st.columns(3)
142
+ col1.metric("Total Predictions", len(predictions))
143
+ col2.metric("Flood Risk Days", int(flood_count))
144
+ col3.metric("Risk Percentage", f"{flood_count/len(predictions)*100:.1f}%")
145
+ st.dataframe(df[['temperature', 'humidity', 'rainfall', 'Flood_Risk', 'Confidence']])
146
+
147
+ with tab2:
148
+ st.markdown("### Manual Weather Input")
149
+ col1, col2 = st.columns(2)
150
+
151
+ with col1:
152
+ temperature = st.slider("Temperature (°C)", 15.0, 45.0, 30.0, 0.5)
153
+ humidity = st.slider("Humidity (%)", 30.0, 100.0, 70.0, 1.0)
154
+ rainfall = st.slider("Rainfall (mm)", 0.0, 300.0, 50.0, 5.0)
155
+ with col2:
156
+ pressure = st.slider("Pressure (hPa)", 950.0, 1050.0, 1010.0, 1.0)
157
+ wind_speed = st.slider("Wind Speed (m/s)", 0.0, 30.0, 5.0, 0.5)
158
+
159
+ if st.button("Predict Flood Risk", key="predict_manual"):
160
+ input_data = pd.DataFrame([{
161
+ 'VV': -10, 'B2': 800, 'B3': 900, 'B4': 600, 'B8': 2000, 'B11': 1500,
162
+ 'NDWI': 0.0, 'MNDWI': 0.0, 'NDVI': 0.4, 'elevation': 300, 'slope': 2,
163
+ 'temperature': temperature, 'humidity': humidity, 'rainfall': rainfall,
164
+ 'pressure': pressure, 'wind_speed': wind_speed
165
+ }])
166
+
167
+ input_data = input_data[metadata['features']]
168
+ X_scaled = scaler.transform(input_data)
169
+ prediction = rf_model.predict(X_scaled)[0]
170
+ probability = rf_model.predict_proba(X_scaled)[0, 1] * 100
171
+
172
+ st.markdown("---")
173
+ st.markdown("### Prediction Result")
174
+
175
+ if prediction == 1:
176
+ if probability > 75:
177
+ st.markdown(f'<div class="danger-box"><h3>🚨 HIGH FLOOD RISK</h3><p>Confidence: {probability:.1f}%</p><p><strong>Action Required:</strong> Immediate evacuation recommended.</p></div>', unsafe_allow_html=True)
178
+ else:
179
+ st.markdown(f'<div class="warning-box"><h3>⚠️ MODERATE FLOOD RISK</h3><p>Confidence: {probability:.1f}%</p><p><strong>Action Required:</strong> Stay alert.</p></div>', unsafe_allow_html=True)
180
+ else:
181
+ st.markdown(f'<div class="safe-box"><h3>✅ LOW FLOOD RISK</h3><p>Confidence: {100-probability:.1f}%</p><p>Conditions are favorable.</p></div>', unsafe_allow_html=True)
182
+
183
+ fig = go.Figure(go.Indicator(
184
+ mode="gauge+number", value=probability, title={'text': "Flood Risk Level"},
185
+ gauge={'axis': {'range': [0, 100]}, 'bar': {'color': "darkblue"},
186
+ 'steps': [{'range': [0, 33], 'color': "lightgreen"},
187
+ {'range': [33, 66], 'color': "yellow"},
188
+ {'range': [66, 100], 'color': "red"}],
189
+ 'threshold': {'line': {'color': "red", 'width': 4}, 'thickness': 0.75, 'value': 50}}
190
+ ))
191
+ fig.update_layout(height=300)
192
+ st.plotly_chart(fig, use_container_width=True)
193
+
194
+ # INTERACTIVE MAP PAGE
195
+ elif page == "🗺️ Interactive Map":
196
+ st.markdown("## Interactive Flood Risk Map")
197
+ st.info("This map shows the Charsadda study area with historical flood zones.")
198
+
199
+ m = folium.Map(location=[34.15, 71.72], zoom_start=11)
200
+ boundary_coords = [[34.05, 71.55], [34.05, 71.90], [34.25, 71.90], [34.25, 71.55], [34.05, 71.55]]
201
+ folium.Polygon(locations=boundary_coords, color='red', weight=3, fill=False, popup='Charsadda Study Area').add_to(m)
202
+ folium.Marker([34.15, 71.72], popup='Charsadda Center', tooltip='Charsadda District', icon=folium.Icon(color='blue', icon='info-sign')).add_to(m)
203
+
204
+ flood_zones = [([34.10, 71.65], '2022 Flood Zone', 'red'),
205
+ ([34.18, 71.75], '2025 June Flood', 'orange'),
206
+ ([34.12, 71.80], '2025 August Flood', 'darkred')]
207
+ for coords, name, color in flood_zones:
208
+ folium.Circle(location=coords, radius=2000, popup=name, color=color, fill=True, fillColor=color, fillOpacity=0.4).add_to(m)
209
+
210
+ st_folium(m, width=1000, height=600)
211
+ st.markdown("### Map Legend")
212
+ col1, col2, col3 = st.columns(3)
213
+ col1.markdown("🔴 **2022 Flood Zone**")
214
+ col2.markdown("🟠 **2025 June Flood**")
215
+ col3.markdown("🔴 **2025 August Flood**")
216
+
217
+ # MODEL INFO PAGE
218
+ elif page == "📊 Model Info":
219
+ st.markdown("## Model Information")
220
+ if metadata:
221
+ col1, col2 = st.columns(2)
222
+ with col1:
223
+ st.markdown("### Model Details")
224
+ st.info(f"""**Best Model:** {metadata['best_model']}\n**Training Date:** {metadata['training_date'][:10]}\n**Features:** {metadata['n_features']}\n**Training Samples:** {metadata['training_samples']:,}\n**Test Samples:** {metadata['test_samples']:,}""")
225
+ with col2:
226
+ st.markdown("### Performance Metrics")
227
+ metrics_df = pd.DataFrame(metadata['performance_metrics'])
228
+ st.dataframe(metrics_df, use_container_width=True)
229
+ st.markdown("### Features Used")
230
+ st.write(metadata['features'])
231
+ fig = px.bar(metrics_df, x='Model', y=['Accuracy', 'Precision', 'Recall', 'F1-Score', 'ROC-AUC'], barmode='group', title='Model Performance Comparison')
232
+ st.plotly_chart(fig, use_container_width=True)
233
+
234
+ # ABOUT PAGE
235
+ elif page == "ℹ️ About":
236
+ st.markdown("## About This Project")
237
+ st.markdown("""### AI-Powered Flood Prediction and Emergency Response System
238
+
239
+ #### Objective
240
+ To develop an AI-based flood prediction system that provides early warnings and supports efficient coordination of emergency services for Charsadda district, KPK, Pakistan.
241
+
242
+ #### Methodology
243
+ - **Multi-Source Data Fusion:** Combines Sentinel-1 SAR, Sentinel-2 optical imagery, SRTM elevation data, and weather forecasts
244
+ - **Machine Learning Models:** Random Forest, CNN, and LSTM models trained on historical flood events (2022 and 2025)
245
+ - **Real-time Monitoring:** Integration with OpenWeatherMap API for current conditions
246
+ - **Prediction Timeframe:** 1-3 days advance flood risk assessment
247
+
248
+ #### Data Sources
249
+ - **Satellite:** Sentinel-1, Sentinel-2 via Google Earth Engine
250
+ - **Elevation:** SRTM Digital Elevation Model
251
+ - **Weather:** OpenWeatherMap API
252
+ - **Historical Events:** 2022 and 2025 flood data
253
+
254
+ #### Technical Stack
255
+ - **Backend:** Python, scikit-learn, TensorFlow, Google Earth Engine
256
+ - **Frontend:** Streamlit
257
+ - **Deployment:** Hugging Face Spaces
258
+ - **Geospatial:** Folium, Geopandas, GDAL
259
+
260
+ #### Impact
261
+ - Early warning system for flood-prone communities
262
+ - Optimized emergency resource allocation
263
+ - Reduced flood-related casualties and economic losses
264
+ - Enhanced disaster preparedness and response""")
265
+ st.markdown("---")
266
+ st.markdown("### 📧 Contact & Support")
267
+ st.info("For questions or support, please contact the development team.")
268
+
269
+ # Footer
270
+ st.markdown("---")
271
+ st.markdown('<div style="text-align: center; color: gray;"><p>© 2025 Charsadda Flood Prediction System | Powered by AI & Satellite Data</p></div>', unsafe_allow_html=True)
models/AI-Flood-Prediction-System/model_metadata.json ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_model": "Random Forest",
3
+ "training_date": "2025-10-28T03:42:55.678155",
4
+ "performance_metrics": [
5
+ {
6
+ "Model": "Random Forest",
7
+ "Accuracy": 1.0,
8
+ "Precision": 1.0,
9
+ "Recall": 1.0,
10
+ "F1-Score": 1.0,
11
+ "ROC-AUC": 1.0
12
+ },
13
+ {
14
+ "Model": "CNN",
15
+ "Accuracy": 1.0,
16
+ "Precision": 1.0,
17
+ "Recall": 1.0,
18
+ "F1-Score": 1.0,
19
+ "ROC-AUC": 1.0
20
+ },
21
+ {
22
+ "Model": "LSTM",
23
+ "Accuracy": 0.5003340013360054,
24
+ "Precision": 0.5005847953216375,
25
+ "Recall": 0.5714285714285714,
26
+ "F1-Score": 0.5336658354114713,
27
+ "ROC-AUC": 0.5032726344573514
28
+ }
29
+ ],
30
+ "features": [
31
+ "VV",
32
+ "B2",
33
+ "B3",
34
+ "B4",
35
+ "B8",
36
+ "B11",
37
+ "NDWI",
38
+ "MNDWI",
39
+ "NDVI",
40
+ "elevation",
41
+ "slope",
42
+ "temperature",
43
+ "humidity",
44
+ "rainfall",
45
+ "pressure",
46
+ "wind_speed"
47
+ ],
48
+ "n_features": 16,
49
+ "training_samples": 7004,
50
+ "test_samples": 1500
51
+ }
models/AI-Flood-Prediction-System/random_forest_model.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:036d940344bb35cc495e0fc6014eae120e8954a0cb77d5a11c49c6bfe518c92b
3
+ size 134313
models/AI-Flood-Prediction-System/requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ streamlit==1.31.0
2
+ pandas==2.1.4
3
+ numpy==1.26.3
4
+ scikit-learn==1.3.2
5
+ joblib==1.3.2
6
+ folium==0.15.1
7
+ streamlit-folium==0.15.1
8
+ plotly==5.18.0
9
+ requests==2.31.0
models/AI-Flood-Prediction-System/sample_forecast.csv ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ temperature,humidity,rainfall,pressure,wind_speed
2
+ 28,75,20,1010,5
3
+ 30,80,50,1008,8
4
+ 32,85,100,1005,12
5
+ 31,82,80,1007,10
6
+ 29,78,30,1012,6
models/AI-Flood-Prediction-System/scaler.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0ac45456c367db7df4fe35367d3e398bc2efc11eb68c2cbc3e9dc1b548479753
3
+ size 1383
models/AI-Flood-Prediction-System/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/spaces/Sumayyea/AI-Flood-Prediction-System
models/abandoned_home_detector_mini_v1/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
models/abandoned_home_detector_mini_v1/README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ pipeline_tag: object-detection
3
+ license: apache-2.0
4
+ language:
5
+ - en
6
+ ---
7
+
8
+ # Abandoned Home Detector
9
+
10
+ This model is designed to detect abandoned homes from images by identifying high-confidence indicators, such as:
11
+ - Damaged roofs
12
+ - Surface damage
13
+ - Boarded-up windows
14
+ - Graffiti
15
+
16
+ The model is built using YOLOv8, optimized for detecting these indicators with polygon-based annotations, and supports PyTorch framework for deployment. It can assist in tasks such as urban area analysis, property risk assessment, and disaster recovery planning.
models/abandoned_home_detector_mini_v1/abandoned_house_detector_mini_model.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:79e1de35ddb86b52fb04af01f98d2c52abe71e08daff699674f0bd9203448629
3
+ size 22520803
models/abandoned_home_detector_mini_v1/config.json ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "framework": "PyTorch",
3
+ "name": "Abandoned Home Detector Mini v1",
4
+ "type": "detector",
5
+ "description": "This model detects abandoned homes by identifying high-confidence indicators such as damaged roofs, surface damage, boarded-up windows, and graffiti. Designed with YOLOv8 for polygon-based annotation.",
6
+ "labels": [
7
+ {
8
+ "id": 1,
9
+ "name": "damaged_roof",
10
+ "attributes": [],
11
+ "color": "#157177"
12
+ },
13
+ {
14
+ "id": 2,
15
+ "name": "surface_damage",
16
+ "attributes": [],
17
+ "color": "#31fa0d"
18
+ },
19
+ {
20
+ "id": 3,
21
+ "name": "boarded_up_window",
22
+ "attributes": [],
23
+ "color": "#4cb5c7"
24
+ },
25
+ {
26
+ "id": 4,
27
+ "name": "graffiti",
28
+ "attributes": [],
29
+ "color": "#f59331"
30
+ }
31
+ ],
32
+ "mapping": {
33
+ "damaged_roof": "damaged_roof",
34
+ "surface_damage": "surface_damage",
35
+ "boarded_up_window": "boarded_up_window",
36
+ "graffiti": "graffiti"
37
+ }
38
+ }
models/abandoned_home_detector_mini_v1/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/prestonzen/abandoned_home_detector_mini_v1
models/forest_plot_analysis_2024_2025/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
models/forest_plot_analysis_2024_2025/README.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-sa-4.0
3
+ ---
4
+
5
+
6
+ ## Description
7
+
8
+ Set of weights generated for the project of forest plot analysis for the course of
9
+ Remote Sensing in Grenoble-INP ENSE3 2024/2025.
10
+
11
+ The related project is available at:
12
+
13
+ https://gricad-gitlab.univ-grenoble-alpes.fr/piconed/remote-sensing-projects-archive
14
+
15
+ in the folder `projects/2024_2025/05_forest`
16
+
17
+
18
+ ## Credits
19
+
20
+ - Julien Delahaye (Julien.Delahaye1@grenoble-inp.org)
21
+ - Nicolas Dessailly (Nicolas.Desailly@grenoble-inp.org)
22
+ - Raphaël Evrard (Raphael.Evrard@grenoble-inp.org)
23
+ - Carla Philippon (Carla.Philippon@grenoble-inp.org)
models/forest_plot_analysis_2024_2025/RandomForest.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0e6c1972513cef4686dfb053ea59f08f85a969b50d7e92cbd8813809dc1a722
3
+ size 259575305
models/forest_plot_analysis_2024_2025/XGB.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b333a6f93bae68382f3a4ddcd5d4276fceb3ff357a70936c742cb86865941db9
3
+ size 15517721
models/forest_plot_analysis_2024_2025/XGB_le.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:226bae41fea13b50032e74cb1158e67d0f808c8678664b5968da8865f0c38f22
3
+ size 671
models/forest_plot_analysis_2024_2025/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/remote-sensing-ense3-grenoble-inp/forest_plot_analysis_2024_2025
models/land_use_classifier/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
models/land_use_classifier/DESCRIPTION.txt ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Обычный сверточный классификатор земельного покрова (land-use / land-cover). Небольшой ResNet-18 классификатор землепользования, специально уменьшенный под маленькую задачу.
2
+ По архитектуре упрощённый ResNet-подобный CNN: несколько стеков Conv -> ReLU -> Add с остаточными связями, затем GlobalAveragePool и финальный Gemm на 21 класс.
3
+ Применение: только baseline-классификация 21 класса land-use.
4
+
5
+ ### УСТРОЙСТВО МОДЕЛИ ###
6
+
7
+ Параметров: ~11.1 млн
8
+ Вход: 1x3x256x256
9
+ Выход: 1x21
10
+
11
+ ### ЧТО ДЕЛАЕТ МОДЕЛЬ ###
12
+
13
+ По входному RGB-изображению 256x256 выдает вероятность 21 класса землепользования.
14
+
15
+ Примеры типов классов:
16
+ > сельхозземли
17
+ > лес
18
+ > вода
19
+ > жилые районы
20
+ > промзоны
21
+ > дороги
22
+ > пустыни
23
+ > горные массивы
24
+ > и т.д.
25
+ (конкретные 21 класса зависят от набора данных, обычно UC Merced Land Use или EuroSAT)
26
+
27
+ Никаких функций обнаружения объектов, сегментации, change-detection, геопространственного reasoning нет.
28
+ Модель не геопривязана. Она не знает, где север, где страна, какая проекция. Это обычная картинка.
29
+
30
+ Классы фиксированные (21), модель не умеет понимать новые классы.
31
+ Не работает с тайлами разных масштабов. Только 256x256.
32
+
33
+ Низкая точность на реальных спутниковых данных. Модель обучена на датасете с высоким качеством и без шумов.
34
+ Реальное разрешение (Sentinel-2, 10-20 m/px) она классифицирует плохо.
models/land_use_classifier/README.md ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
models/land_use_classifier/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fcef6ac6efe4e0bed0cc33d1933d4fb842966b6522436aa5206366faca22d4c
3
+ size 44740572
models/land_use_classifier/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/SatwikKambham/land_use_classifier
models/landuse_ratio_classification_geochat/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
models/landuse_ratio_classification_geochat/config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./models/geochat_base",
3
+ "architectures": [
4
+ "GeoChatLlamaForCausalLM"
5
+ ],
6
+ "bos_token_id": 1,
7
+ "eos_token_id": 2,
8
+ "freeze_mm_mlp_adapter": false,
9
+ "freeze_mm_vision_resampler": false,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 4096,
12
+ "image_aspect_ratio": "pad",
13
+ "image_grid_pinpoints": null,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 11008,
16
+ "max_length": 4096,
17
+ "max_position_embeddings": 4096,
18
+ "mm_hidden_size": 1024,
19
+ "mm_projector_type": "mlp2x_gelu",
20
+ "mm_resampler_type": null,
21
+ "mm_use_im_patch_token": false,
22
+ "mm_use_im_start_end": false,
23
+ "mm_vision_select_feature": "patch",
24
+ "mm_vision_select_layer": -2,
25
+ "mm_vision_tower": "openai/clip-vit-large-patch14-336",
26
+ "model_type": "geochat",
27
+ "num_attention_heads": 32,
28
+ "num_hidden_layers": 32,
29
+ "num_key_value_heads": 32,
30
+ "pad_token_id": 0,
31
+ "pretraining_tp": 1,
32
+ "rms_norm_eps": 1e-05,
33
+ "rope_scaling": null,
34
+ "tie_word_embeddings": false,
35
+ "torch_dtype": "float16",
36
+ "transformers_version": "4.31.0",
37
+ "tune_mm_mlp_adapter": false,
38
+ "tune_mm_vision_resampler": false,
39
+ "unfreeze_mm_vision_tower": false,
40
+ "use_cache": true,
41
+ "use_mm_proj": true,
42
+ "vocab_size": 32000
43
+ }
models/landuse_ratio_classification_geochat/generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 1,
3
+ "eos_token_id": 2,
4
+ "max_length": 4096,
5
+ "pad_token_id": 0,
6
+ "transformers_version": "4.31.0"
7
+ }
models/landuse_ratio_classification_geochat/pytorch_model-00001-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70d50341f85d033efeb065ef3ab25be564381afd9f0fe1d2b4c357ba60f40f80
3
+ size 9976634558
models/landuse_ratio_classification_geochat/pytorch_model-00002-of-00002.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1170c5cdef76af776fc16f555fe3e2ccc76ffebe9dbb86f392897a7120715b4a
3
+ size 4149444818
models/landuse_ratio_classification_geochat/pytorch_model.bin.index.json ADDED
@@ -0,0 +1,725 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 14127288320
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "pytorch_model-00002-of-00002.bin",
7
+ "model.embed_tokens.weight": "pytorch_model-00001-of-00002.bin",
8
+ "model.layers.0.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
9
+ "model.layers.0.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
10
+ "model.layers.0.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
11
+ "model.layers.0.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
12
+ "model.layers.0.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
13
+ "model.layers.0.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
14
+ "model.layers.0.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
15
+ "model.layers.0.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
16
+ "model.layers.0.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
17
+ "model.layers.0.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
18
+ "model.layers.1.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
19
+ "model.layers.1.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
20
+ "model.layers.1.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
21
+ "model.layers.1.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
22
+ "model.layers.1.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
23
+ "model.layers.1.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
24
+ "model.layers.1.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
25
+ "model.layers.1.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
26
+ "model.layers.1.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
27
+ "model.layers.1.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
28
+ "model.layers.10.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
29
+ "model.layers.10.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
30
+ "model.layers.10.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
31
+ "model.layers.10.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
32
+ "model.layers.10.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
33
+ "model.layers.10.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
34
+ "model.layers.10.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
35
+ "model.layers.10.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
36
+ "model.layers.10.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
37
+ "model.layers.10.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
38
+ "model.layers.11.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
39
+ "model.layers.11.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
40
+ "model.layers.11.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
41
+ "model.layers.11.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
42
+ "model.layers.11.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
43
+ "model.layers.11.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
44
+ "model.layers.11.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
45
+ "model.layers.11.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
46
+ "model.layers.11.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
47
+ "model.layers.11.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
48
+ "model.layers.12.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
49
+ "model.layers.12.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
50
+ "model.layers.12.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
51
+ "model.layers.12.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
52
+ "model.layers.12.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
53
+ "model.layers.12.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
54
+ "model.layers.12.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
55
+ "model.layers.12.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
56
+ "model.layers.12.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
57
+ "model.layers.12.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
58
+ "model.layers.13.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
59
+ "model.layers.13.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
60
+ "model.layers.13.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
61
+ "model.layers.13.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
62
+ "model.layers.13.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
63
+ "model.layers.13.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
64
+ "model.layers.13.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
65
+ "model.layers.13.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
66
+ "model.layers.13.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
67
+ "model.layers.13.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
68
+ "model.layers.14.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
69
+ "model.layers.14.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
70
+ "model.layers.14.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
71
+ "model.layers.14.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
72
+ "model.layers.14.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
73
+ "model.layers.14.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
74
+ "model.layers.14.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
75
+ "model.layers.14.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
76
+ "model.layers.14.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
77
+ "model.layers.14.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
78
+ "model.layers.15.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
79
+ "model.layers.15.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
80
+ "model.layers.15.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
81
+ "model.layers.15.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
82
+ "model.layers.15.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
83
+ "model.layers.15.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
84
+ "model.layers.15.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
85
+ "model.layers.15.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
86
+ "model.layers.15.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
87
+ "model.layers.15.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
88
+ "model.layers.16.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
89
+ "model.layers.16.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
90
+ "model.layers.16.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
91
+ "model.layers.16.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
92
+ "model.layers.16.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
93
+ "model.layers.16.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
94
+ "model.layers.16.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
95
+ "model.layers.16.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
96
+ "model.layers.16.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
97
+ "model.layers.16.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
98
+ "model.layers.17.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
99
+ "model.layers.17.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
100
+ "model.layers.17.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
101
+ "model.layers.17.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
102
+ "model.layers.17.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
103
+ "model.layers.17.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
104
+ "model.layers.17.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
105
+ "model.layers.17.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
106
+ "model.layers.17.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
107
+ "model.layers.17.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
108
+ "model.layers.18.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
109
+ "model.layers.18.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
110
+ "model.layers.18.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
111
+ "model.layers.18.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
112
+ "model.layers.18.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
113
+ "model.layers.18.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
114
+ "model.layers.18.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
115
+ "model.layers.18.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
116
+ "model.layers.18.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
117
+ "model.layers.18.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
118
+ "model.layers.19.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
119
+ "model.layers.19.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
120
+ "model.layers.19.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
121
+ "model.layers.19.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
122
+ "model.layers.19.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
123
+ "model.layers.19.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
124
+ "model.layers.19.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
125
+ "model.layers.19.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
126
+ "model.layers.19.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
127
+ "model.layers.19.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
128
+ "model.layers.2.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
129
+ "model.layers.2.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
130
+ "model.layers.2.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
131
+ "model.layers.2.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
132
+ "model.layers.2.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
133
+ "model.layers.2.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
134
+ "model.layers.2.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
135
+ "model.layers.2.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
136
+ "model.layers.2.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
137
+ "model.layers.2.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
138
+ "model.layers.20.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
139
+ "model.layers.20.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
140
+ "model.layers.20.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
141
+ "model.layers.20.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
142
+ "model.layers.20.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
143
+ "model.layers.20.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
144
+ "model.layers.20.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
145
+ "model.layers.20.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
146
+ "model.layers.20.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
147
+ "model.layers.20.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
148
+ "model.layers.21.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
149
+ "model.layers.21.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
150
+ "model.layers.21.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
151
+ "model.layers.21.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
152
+ "model.layers.21.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
153
+ "model.layers.21.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
154
+ "model.layers.21.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
155
+ "model.layers.21.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
156
+ "model.layers.21.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
157
+ "model.layers.21.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
158
+ "model.layers.22.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
159
+ "model.layers.22.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
160
+ "model.layers.22.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
161
+ "model.layers.22.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
162
+ "model.layers.22.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
163
+ "model.layers.22.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
164
+ "model.layers.22.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
165
+ "model.layers.22.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
166
+ "model.layers.22.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
167
+ "model.layers.22.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
168
+ "model.layers.23.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
169
+ "model.layers.23.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
170
+ "model.layers.23.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
171
+ "model.layers.23.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
172
+ "model.layers.23.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
173
+ "model.layers.23.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
174
+ "model.layers.23.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
175
+ "model.layers.23.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
176
+ "model.layers.23.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
177
+ "model.layers.23.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
178
+ "model.layers.24.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
179
+ "model.layers.24.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
180
+ "model.layers.24.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
181
+ "model.layers.24.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
182
+ "model.layers.24.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
183
+ "model.layers.24.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
184
+ "model.layers.24.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
185
+ "model.layers.24.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
186
+ "model.layers.24.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
187
+ "model.layers.24.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
188
+ "model.layers.25.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
189
+ "model.layers.25.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
190
+ "model.layers.25.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
191
+ "model.layers.25.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
192
+ "model.layers.25.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
193
+ "model.layers.25.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
194
+ "model.layers.25.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
195
+ "model.layers.25.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
196
+ "model.layers.25.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
197
+ "model.layers.25.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
198
+ "model.layers.26.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
199
+ "model.layers.26.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
200
+ "model.layers.26.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
201
+ "model.layers.26.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
202
+ "model.layers.26.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
203
+ "model.layers.26.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
204
+ "model.layers.26.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
205
+ "model.layers.26.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
206
+ "model.layers.26.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
207
+ "model.layers.26.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
208
+ "model.layers.27.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
209
+ "model.layers.27.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
210
+ "model.layers.27.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
211
+ "model.layers.27.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
212
+ "model.layers.27.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
213
+ "model.layers.27.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
214
+ "model.layers.27.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
215
+ "model.layers.27.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
216
+ "model.layers.27.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
217
+ "model.layers.27.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
218
+ "model.layers.28.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
219
+ "model.layers.28.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
220
+ "model.layers.28.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
221
+ "model.layers.28.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
222
+ "model.layers.28.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
223
+ "model.layers.28.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
224
+ "model.layers.28.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
225
+ "model.layers.28.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
226
+ "model.layers.28.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
227
+ "model.layers.28.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
228
+ "model.layers.29.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
229
+ "model.layers.29.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
230
+ "model.layers.29.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
231
+ "model.layers.29.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
232
+ "model.layers.29.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
233
+ "model.layers.29.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
234
+ "model.layers.29.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
235
+ "model.layers.29.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
236
+ "model.layers.29.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
237
+ "model.layers.29.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
238
+ "model.layers.3.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
239
+ "model.layers.3.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
240
+ "model.layers.3.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
241
+ "model.layers.3.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
242
+ "model.layers.3.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
243
+ "model.layers.3.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
244
+ "model.layers.3.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
245
+ "model.layers.3.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
246
+ "model.layers.3.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
247
+ "model.layers.3.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
248
+ "model.layers.30.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
249
+ "model.layers.30.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
250
+ "model.layers.30.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
251
+ "model.layers.30.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
252
+ "model.layers.30.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
253
+ "model.layers.30.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
254
+ "model.layers.30.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
255
+ "model.layers.30.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
256
+ "model.layers.30.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
257
+ "model.layers.30.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
258
+ "model.layers.31.input_layernorm.weight": "pytorch_model-00002-of-00002.bin",
259
+ "model.layers.31.mlp.down_proj.weight": "pytorch_model-00002-of-00002.bin",
260
+ "model.layers.31.mlp.gate_proj.weight": "pytorch_model-00002-of-00002.bin",
261
+ "model.layers.31.mlp.up_proj.weight": "pytorch_model-00002-of-00002.bin",
262
+ "model.layers.31.post_attention_layernorm.weight": "pytorch_model-00002-of-00002.bin",
263
+ "model.layers.31.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
264
+ "model.layers.31.self_attn.o_proj.weight": "pytorch_model-00002-of-00002.bin",
265
+ "model.layers.31.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
266
+ "model.layers.31.self_attn.rotary_emb.inv_freq": "pytorch_model-00002-of-00002.bin",
267
+ "model.layers.31.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
268
+ "model.layers.4.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
269
+ "model.layers.4.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
270
+ "model.layers.4.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
271
+ "model.layers.4.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
272
+ "model.layers.4.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
273
+ "model.layers.4.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
274
+ "model.layers.4.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
275
+ "model.layers.4.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
276
+ "model.layers.4.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
277
+ "model.layers.4.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
278
+ "model.layers.5.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
279
+ "model.layers.5.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
280
+ "model.layers.5.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
281
+ "model.layers.5.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
282
+ "model.layers.5.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
283
+ "model.layers.5.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
284
+ "model.layers.5.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
285
+ "model.layers.5.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
286
+ "model.layers.5.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
287
+ "model.layers.5.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
288
+ "model.layers.6.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
289
+ "model.layers.6.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
290
+ "model.layers.6.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
291
+ "model.layers.6.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
292
+ "model.layers.6.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
293
+ "model.layers.6.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
294
+ "model.layers.6.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
295
+ "model.layers.6.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
296
+ "model.layers.6.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
297
+ "model.layers.6.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
298
+ "model.layers.7.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
299
+ "model.layers.7.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
300
+ "model.layers.7.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
301
+ "model.layers.7.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
302
+ "model.layers.7.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
303
+ "model.layers.7.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
304
+ "model.layers.7.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
305
+ "model.layers.7.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
306
+ "model.layers.7.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
307
+ "model.layers.7.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
308
+ "model.layers.8.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
309
+ "model.layers.8.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
310
+ "model.layers.8.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
311
+ "model.layers.8.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
312
+ "model.layers.8.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
313
+ "model.layers.8.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
314
+ "model.layers.8.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
315
+ "model.layers.8.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
316
+ "model.layers.8.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
317
+ "model.layers.8.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
318
+ "model.layers.9.input_layernorm.weight": "pytorch_model-00001-of-00002.bin",
319
+ "model.layers.9.mlp.down_proj.weight": "pytorch_model-00001-of-00002.bin",
320
+ "model.layers.9.mlp.gate_proj.weight": "pytorch_model-00001-of-00002.bin",
321
+ "model.layers.9.mlp.up_proj.weight": "pytorch_model-00001-of-00002.bin",
322
+ "model.layers.9.post_attention_layernorm.weight": "pytorch_model-00001-of-00002.bin",
323
+ "model.layers.9.self_attn.k_proj.weight": "pytorch_model-00001-of-00002.bin",
324
+ "model.layers.9.self_attn.o_proj.weight": "pytorch_model-00001-of-00002.bin",
325
+ "model.layers.9.self_attn.q_proj.weight": "pytorch_model-00001-of-00002.bin",
326
+ "model.layers.9.self_attn.rotary_emb.inv_freq": "pytorch_model-00001-of-00002.bin",
327
+ "model.layers.9.self_attn.v_proj.weight": "pytorch_model-00001-of-00002.bin",
328
+ "model.mm_projector.0.bias": "pytorch_model-00002-of-00002.bin",
329
+ "model.mm_projector.0.weight": "pytorch_model-00002-of-00002.bin",
330
+ "model.mm_projector.2.bias": "pytorch_model-00002-of-00002.bin",
331
+ "model.mm_projector.2.weight": "pytorch_model-00002-of-00002.bin",
332
+ "model.norm.weight": "pytorch_model-00002-of-00002.bin",
333
+ "model.vision_tower.vision_tower.vision_model.embeddings.class_embedding": "pytorch_model-00002-of-00002.bin",
334
+ "model.vision_tower.vision_tower.vision_model.embeddings.patch_embedding.weight": "pytorch_model-00002-of-00002.bin",
335
+ "model.vision_tower.vision_tower.vision_model.embeddings.position_embedding.weight": "pytorch_model-00002-of-00002.bin",
336
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
337
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
338
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
339
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
340
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
341
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
342
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
343
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
344
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
345
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
346
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
347
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
348
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
349
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
350
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
351
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.0.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
352
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
353
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
354
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
355
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
356
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
357
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
358
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
359
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
360
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
361
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
362
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
363
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
364
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
365
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
366
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
367
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.1.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
368
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
369
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
370
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
371
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
372
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
373
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
374
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
375
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
376
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
377
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
378
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
379
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
380
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
381
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
382
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
383
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.10.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
384
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
385
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
386
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
387
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
388
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
389
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
390
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
391
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
392
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
393
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
394
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
395
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
396
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
397
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
398
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
399
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.11.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
400
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
401
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
402
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
403
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
404
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
405
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
406
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
407
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
408
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
409
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
410
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
411
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
412
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
413
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
414
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
415
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.12.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
416
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
417
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
418
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
419
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
420
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
421
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
422
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
423
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
424
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
425
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
426
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
427
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
428
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
429
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
430
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
431
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.13.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
432
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
433
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
434
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
435
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
436
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
437
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
438
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
439
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
440
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
441
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
442
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
443
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
444
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
445
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
446
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
447
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.14.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
448
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
449
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
450
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
451
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
452
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
453
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
454
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
455
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
456
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
457
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
458
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
459
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
460
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
461
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
462
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
463
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.15.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
464
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
465
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
466
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
467
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
468
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
469
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
470
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
471
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
472
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
473
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
474
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
475
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
476
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
477
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
478
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
479
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.16.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
480
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
481
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
482
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
483
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
484
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
485
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
486
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
487
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
488
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
489
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
490
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
491
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
492
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
493
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
494
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
495
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.17.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
496
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
497
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
498
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
499
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
500
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
501
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
502
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
503
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
504
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
505
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
506
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
507
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
508
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
509
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
510
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
511
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.18.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
512
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
513
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
514
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
515
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
516
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
517
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
518
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
519
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
520
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
521
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
522
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
523
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
524
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
525
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
526
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
527
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.19.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
528
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
529
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
530
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
531
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
532
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
533
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
534
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
535
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
536
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
537
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
538
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
539
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
540
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
541
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
542
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
543
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.2.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
544
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
545
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
546
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
547
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
548
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
549
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
550
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
551
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
552
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
553
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
554
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
555
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
556
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
557
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
558
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
559
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.20.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
560
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
561
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
562
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
563
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
564
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
565
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
566
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
567
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
568
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
569
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
570
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
571
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
572
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
573
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
574
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
575
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.21.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
576
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
577
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
578
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
579
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
580
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
581
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
582
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
583
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
584
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
585
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
586
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
587
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
588
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
589
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
590
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
591
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.22.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
592
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
593
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
594
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
595
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
596
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
597
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
598
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
599
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
600
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
601
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
602
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
603
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
604
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
605
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
606
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
607
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.23.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
608
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
609
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
610
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
611
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
612
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
613
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
614
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
615
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
616
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
617
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
618
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
619
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
620
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
621
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
622
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
623
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.3.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
624
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
625
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
626
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
627
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
628
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
629
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
630
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
631
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
632
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
633
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
634
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
635
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
636
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
637
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
638
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
639
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.4.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
640
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
641
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
642
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
643
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
644
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
645
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
646
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
647
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
648
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
649
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
650
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
651
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
652
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
653
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
654
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
655
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.5.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
656
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
657
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
658
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
659
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
660
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
661
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
662
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
663
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
664
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
665
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
666
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
667
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
668
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
669
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
670
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
671
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.6.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
672
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
673
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
674
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
675
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
676
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
677
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
678
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
679
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
680
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
681
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
682
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
683
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
684
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
685
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
686
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
687
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.7.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
688
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
689
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
690
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
691
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
692
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
693
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
694
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
695
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
696
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
697
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
698
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
699
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
700
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
701
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
702
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
703
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.8.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
704
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.bias": "pytorch_model-00002-of-00002.bin",
705
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm1.weight": "pytorch_model-00002-of-00002.bin",
706
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.bias": "pytorch_model-00002-of-00002.bin",
707
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.layer_norm2.weight": "pytorch_model-00002-of-00002.bin",
708
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.bias": "pytorch_model-00002-of-00002.bin",
709
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc1.weight": "pytorch_model-00002-of-00002.bin",
710
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.bias": "pytorch_model-00002-of-00002.bin",
711
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.mlp.fc2.weight": "pytorch_model-00002-of-00002.bin",
712
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.bias": "pytorch_model-00002-of-00002.bin",
713
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.k_proj.weight": "pytorch_model-00002-of-00002.bin",
714
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.bias": "pytorch_model-00002-of-00002.bin",
715
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.out_proj.weight": "pytorch_model-00002-of-00002.bin",
716
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.bias": "pytorch_model-00002-of-00002.bin",
717
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.q_proj.weight": "pytorch_model-00002-of-00002.bin",
718
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.bias": "pytorch_model-00002-of-00002.bin",
719
+ "model.vision_tower.vision_tower.vision_model.encoder.layers.9.self_attn.v_proj.weight": "pytorch_model-00002-of-00002.bin",
720
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.bias": "pytorch_model-00002-of-00002.bin",
721
+ "model.vision_tower.vision_tower.vision_model.post_layernorm.weight": "pytorch_model-00002-of-00002.bin",
722
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.bias": "pytorch_model-00002-of-00002.bin",
723
+ "model.vision_tower.vision_tower.vision_model.pre_layrnorm.weight": "pytorch_model-00002-of-00002.bin"
724
+ }
725
+ }
models/landuse_ratio_classification_geochat/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/YounhyungChae/landuse_ratio_classification_geochat
models/landuse_ratio_classification_geochat/special_tokens_map.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": "<unk>",
17
+ "unk_token": {
18
+ "content": "<unk>",
19
+ "lstrip": false,
20
+ "normalized": false,
21
+ "rstrip": false,
22
+ "single_word": false
23
+ }
24
+ }
models/landuse_ratio_classification_geochat/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
models/landuse_ratio_classification_geochat/tokenizer_config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "bos_token": {
5
+ "__type": "AddedToken",
6
+ "content": "<s>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false
11
+ },
12
+ "clean_up_tokenization_spaces": false,
13
+ "eos_token": {
14
+ "__type": "AddedToken",
15
+ "content": "</s>",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false
20
+ },
21
+ "legacy": false,
22
+ "model_max_length": 2048,
23
+ "pad_token": null,
24
+ "padding_side": "right",
25
+ "sp_model_kwargs": {},
26
+ "tokenizer_class": "LlamaTokenizer",
27
+ "unk_token": {
28
+ "__type": "AddedToken",
29
+ "content": "<unk>",
30
+ "lstrip": false,
31
+ "normalized": false,
32
+ "rstrip": false,
33
+ "single_word": false
34
+ }
35
+ }
models/remote-sensing-Qwen2-VL-2B-Instruct-GGUF/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/mradermacher/remote-sensing-Qwen2-VL-2B-Instruct-GGUF
models/ship_detection_homemade_2024_2025/.gitattributes ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ model_weights.pth filter=lfs diff=lfs merge=lfs -text
models/ship_detection_homemade_2024_2025/README.md ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-sa-4.0
3
+ ---
4
+
5
+ ## Description
6
+
7
+ Weights generated for a homemade model of the project of
8
+ ship detection for Remote Sensing in Grenoble-INP ENSE3 2024/2025.
9
+
10
+ The related project is available at:
11
+
12
+ https://gricad-gitlab.univ-grenoble-alpes.fr/piconed/remote-sensing-projects-archive
13
+
14
+ in the folder `projects/2024_2025/08_ship_detection`
15
+
16
+
17
+ ## Credits
18
+
19
+ Hugo Fournier (Hugo.Fournier2@grenoble-inp.org)
20
+ Lilian Haemmerer (Lilian.Haemmerer@grenoble-inp.org)
21
+ Paul Jablonski (Paul.Jablonski@grenoble-inp.org)
models/ship_detection_homemade_2024_2025/model_weights.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa685dea0987c7823a6c6cfe56f96642b8149358a1b90340d4db99520f42882e
3
+ size 2179937
models/ship_detection_homemade_2024_2025/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/remote-sensing-ense3-grenoble-inp/ship_detection_homemade_2024_2025
models/ship_detection_yolo8_2024_2025/.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ checkpoint_best_total.pth filter=lfs diff=lfs merge=lfs -text
37
+ rf_detr_training.ipynb filter=lfs diff=lfs merge=lfs -text
models/ship_detection_yolo8_2024_2025/README.md ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-sa-4.0
3
+ ---
4
+
5
+ ## Description
6
+
7
+ Collection of weights for the yolo8 model for the project of
8
+ ship detection for Remote Sensing in Grenoble-INP ENSE3 2024/2025.
9
+
10
+ The related project is available at:
11
+
12
+ https://gricad-gitlab.univ-grenoble-alpes.fr/piconed/remote-sensing-projects-archive
13
+
14
+ in the folder `projects/2024_2025/08_ship_detection`
15
+
16
+ The related dataset used for training is available at:
17
+ https://huggingface.co/datasets/remote-sensing-ense3-grenoble-inp/ship_detection_roboflow_2024_2025
18
+
19
+
20
+ ## Credits
21
+
22
+ Hugo Fournier (Hugo.Fournier2@grenoble-inp.org)
23
+ Lilian Haemmerer (Lilian.Haemmerer@grenoble-inp.org)
24
+ Paul Jablonski (Paul.Jablonski@grenoble-inp.org)
models/ship_detection_yolo8_2024_2025/checkpoint_best_total.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b14b316ef80df486ffdef32e033ea296cd19ab36c853dea36da7283a8370536
3
+ size 368993467
models/ship_detection_yolo8_2024_2025/rf_detr_training.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
models/ship_detection_yolo8_2024_2025/source.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/remote-sensing-ense3-grenoble-inp/ship_detection_yolo8_2024_2025
models/urban_classification_2024_2025/.gitattributes ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ bestmodel_DeepLabV3Plus_resnet101_paris_2-DeveloppedSpaces_CELIAN.pth filter=lfs diff=lfs merge=lfs -text
37
+ bestmodel_FPN_efficientnet-b5_paris_1-Rangeland_CELIAN.pth filter=lfs diff=lfs merge=lfs -text
38
+ bestmodel_FPN_efficientnet-b6_paris_4-Tree_CELIAN.pth filter=lfs diff=lfs merge=lfs -text
39
+ bestmodel_FPN_efficientnet-b6_paris_5-Water_CELIAN.pth filter=lfs diff=lfs merge=lfs -text
40
+ bestmodel_Unet_efficientnet-b5_paris_3-Road_CELIAN.pth filter=lfs diff=lfs merge=lfs -text
41
+ bestmodel_Unet_efficientnet-b5_world_CELIAN.pth filter=lfs diff=lfs merge=lfs -text
42
+ bestmodel_Unet_efficientnet-b7_paris_CELIAN.pth filter=lfs diff=lfs merge=lfs -text
43
+ bestmodel_Unet_mit_b5_paris_7-Buildings_MOMO.pth filter=lfs diff=lfs merge=lfs -text
models/urban_classification_2024_2025/README.md ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: cc-by-nc-sa-4.0
3
+ ---
4
+
5
+
6
+ ## Description
7
+
8
+ Set of weights generated for urban classification for the project of Remote
9
+ Sensing in Grenoble-INP ENSE3 2024/2025.
10
+
11
+ The related project is available at:
12
+
13
+ https://gricad-gitlab.univ-grenoble-alpes.fr/piconed/remote-sensing-projects-archive
14
+
15
+ in the folder `projects/2024_2025/03_urban`
16
+
17
+
18
+ ## Credits
19
+
20
+ Celian Charrin (Celian.Charrin@grenoble-inp.org)
21
+ Alexandre Jolly (Alexandre.Jolly@grenoble-inp.org)
22
+ Morgan Santalucia (Morgan.Santalucia@grenoble-inp.org)
23
+ Timon Taule--Bonnaire (Timon.Taule--Bonnaire@grenoble-inp.org)