Spaces:
Sleeping
Sleeping
Dyuti Dasmahapatra
commited on
Commit
Β·
4814c8e
1
Parent(s):
9bf5c2d
Updated readme file
Browse files- .github/workflows/ci.yml +111 -0
- CHANGELOG.md +86 -0
- CONTRIBUTING.md +306 -0
- Dockerfile +34 -0
- LICENSE +21 -0
- QUICKSTART.md +348 -0
- README.md +495 -0
- docker-compose.yml +21 -0
.github/workflows/ci.yml
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: CI/CD Pipeline
|
| 2 |
+
|
| 3 |
+
on:
|
| 4 |
+
push:
|
| 5 |
+
branches: [ main, develop ]
|
| 6 |
+
pull_request:
|
| 7 |
+
branches: [ main, develop ]
|
| 8 |
+
|
| 9 |
+
jobs:
|
| 10 |
+
test:
|
| 11 |
+
runs-on: ubuntu-latest
|
| 12 |
+
strategy:
|
| 13 |
+
matrix:
|
| 14 |
+
python-version: ['3.8', '3.9', '3.10', '3.11']
|
| 15 |
+
|
| 16 |
+
steps:
|
| 17 |
+
- name: Checkout code
|
| 18 |
+
uses: actions/checkout@v3
|
| 19 |
+
|
| 20 |
+
- name: Set up Python ${{ matrix.python-version }}
|
| 21 |
+
uses: actions/setup-python@v4
|
| 22 |
+
with:
|
| 23 |
+
python-version: ${{ matrix.python-version }}
|
| 24 |
+
|
| 25 |
+
- name: Cache pip dependencies
|
| 26 |
+
uses: actions/cache@v3
|
| 27 |
+
with:
|
| 28 |
+
path: ~/.cache/pip
|
| 29 |
+
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}
|
| 30 |
+
restore-keys: |
|
| 31 |
+
${{ runner.os }}-pip-
|
| 32 |
+
|
| 33 |
+
- name: Install dependencies
|
| 34 |
+
run: |
|
| 35 |
+
python -m pip install --upgrade pip
|
| 36 |
+
pip install -r requirements.txt
|
| 37 |
+
pip install pytest pytest-cov flake8 black
|
| 38 |
+
|
| 39 |
+
- name: Lint with flake8
|
| 40 |
+
run: |
|
| 41 |
+
# Stop the build if there are Python syntax errors or undefined names
|
| 42 |
+
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
| 43 |
+
# Exit-zero treats all errors as warnings
|
| 44 |
+
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=100 --statistics
|
| 45 |
+
|
| 46 |
+
- name: Check code formatting with Black
|
| 47 |
+
run: |
|
| 48 |
+
black --check src/ tests/ app.py || echo "Code formatting check completed"
|
| 49 |
+
|
| 50 |
+
- name: Run tests with pytest
|
| 51 |
+
run: |
|
| 52 |
+
pytest tests/ -v --cov=src --cov-report=xml --cov-report=html
|
| 53 |
+
|
| 54 |
+
- name: Upload coverage to Codecov
|
| 55 |
+
uses: codecov/codecov-action@v3
|
| 56 |
+
with:
|
| 57 |
+
file: ./coverage.xml
|
| 58 |
+
flags: unittests
|
| 59 |
+
name: codecov-umbrella
|
| 60 |
+
|
| 61 |
+
build-docker:
|
| 62 |
+
runs-on: ubuntu-latest
|
| 63 |
+
needs: test
|
| 64 |
+
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
| 65 |
+
|
| 66 |
+
steps:
|
| 67 |
+
- name: Checkout code
|
| 68 |
+
uses: actions/checkout@v3
|
| 69 |
+
|
| 70 |
+
- name: Set up Docker Buildx
|
| 71 |
+
uses: docker/setup-buildx-action@v2
|
| 72 |
+
|
| 73 |
+
- name: Build Docker image
|
| 74 |
+
run: |
|
| 75 |
+
docker build -t vit-auditing-toolkit:latest .
|
| 76 |
+
|
| 77 |
+
- name: Test Docker image
|
| 78 |
+
run: |
|
| 79 |
+
docker run -d --name test-container -p 7860:7860 vit-auditing-toolkit:latest
|
| 80 |
+
sleep 30
|
| 81 |
+
curl -f http://localhost:7860/ || exit 1
|
| 82 |
+
docker stop test-container
|
| 83 |
+
docker rm test-container
|
| 84 |
+
|
| 85 |
+
lint:
|
| 86 |
+
runs-on: ubuntu-latest
|
| 87 |
+
|
| 88 |
+
steps:
|
| 89 |
+
- name: Checkout code
|
| 90 |
+
uses: actions/checkout@v3
|
| 91 |
+
|
| 92 |
+
- name: Set up Python
|
| 93 |
+
uses: actions/setup-python@v4
|
| 94 |
+
with:
|
| 95 |
+
python-version: '3.10'
|
| 96 |
+
|
| 97 |
+
- name: Install linting tools
|
| 98 |
+
run: |
|
| 99 |
+
pip install flake8 black mypy
|
| 100 |
+
|
| 101 |
+
- name: Run flake8
|
| 102 |
+
run: |
|
| 103 |
+
flake8 src/ tests/ app.py --max-line-length=100
|
| 104 |
+
|
| 105 |
+
- name: Run Black
|
| 106 |
+
run: |
|
| 107 |
+
black --check src/ tests/ app.py
|
| 108 |
+
|
| 109 |
+
- name: Run mypy
|
| 110 |
+
run: |
|
| 111 |
+
mypy src/ --ignore-missing-imports || echo "Type checking completed"
|
CHANGELOG.md
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Changelog
|
| 2 |
+
|
| 3 |
+
All notable changes to the ViT Auditing Toolkit will be documented in this file.
|
| 4 |
+
|
| 5 |
+
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
|
| 6 |
+
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
| 7 |
+
|
| 8 |
+
## [Unreleased]
|
| 9 |
+
|
| 10 |
+
### Planned
|
| 11 |
+
- Support for additional ViT variants (DeiT, BEiT, Swin Transformer)
|
| 12 |
+
- Batch processing capabilities
|
| 13 |
+
- Export functionality for reports
|
| 14 |
+
- Custom model upload support
|
| 15 |
+
- API endpoints for programmatic access
|
| 16 |
+
|
| 17 |
+
## [1.0.0] - 2024-10-26
|
| 18 |
+
|
| 19 |
+
### Added
|
| 20 |
+
- Initial release of ViT Auditing Toolkit
|
| 21 |
+
- Basic Explainability features:
|
| 22 |
+
- Attention Visualization with layer/head selection
|
| 23 |
+
- GradCAM implementation using Captum
|
| 24 |
+
- GradientSHAP for pixel-level attribution
|
| 25 |
+
- Advanced Auditing features:
|
| 26 |
+
- Counterfactual Analysis with patch perturbation
|
| 27 |
+
- Confidence Calibration analysis
|
| 28 |
+
- Bias Detection across subgroups
|
| 29 |
+
- Web interface using Gradio:
|
| 30 |
+
- Modern, responsive UI with custom styling
|
| 31 |
+
- Four-tab interface for different analysis types
|
| 32 |
+
- Real-time visualization of results
|
| 33 |
+
- Model support:
|
| 34 |
+
- ViT-Base (google/vit-base-patch16-224)
|
| 35 |
+
- ViT-Large (google/vit-large-patch16-224)
|
| 36 |
+
- Comprehensive documentation:
|
| 37 |
+
- Detailed README with usage guides
|
| 38 |
+
- Technical explanations of methods
|
| 39 |
+
- Installation instructions
|
| 40 |
+
- Testing suite:
|
| 41 |
+
- Unit tests for core functionality
|
| 42 |
+
- Integration tests for advanced features
|
| 43 |
+
- Docker support for easy deployment
|
| 44 |
+
- CI/CD pipeline with GitHub Actions
|
| 45 |
+
|
| 46 |
+
### Technical Details
|
| 47 |
+
- PyTorch 2.2+ compatibility
|
| 48 |
+
- Hugging Face Transformers integration
|
| 49 |
+
- Captum for model interpretability
|
| 50 |
+
- Gradio 4.19+ for web interface
|
| 51 |
+
- Matplotlib for visualizations
|
| 52 |
+
|
| 53 |
+
### Documentation
|
| 54 |
+
- Comprehensive README.md
|
| 55 |
+
- Contributing guidelines
|
| 56 |
+
- MIT License
|
| 57 |
+
- Code of conduct
|
| 58 |
+
|
| 59 |
+
## [0.1.0] - 2024-10-15
|
| 60 |
+
|
| 61 |
+
### Added
|
| 62 |
+
- Project initialization
|
| 63 |
+
- Basic project structure
|
| 64 |
+
- Core module implementations
|
| 65 |
+
- Initial model loading functionality
|
| 66 |
+
|
| 67 |
+
---
|
| 68 |
+
|
| 69 |
+
## Version History
|
| 70 |
+
|
| 71 |
+
### Version Numbering
|
| 72 |
+
- **Major version (X.0.0)**: Incompatible API changes
|
| 73 |
+
- **Minor version (0.X.0)**: New functionality, backwards-compatible
|
| 74 |
+
- **Patch version (0.0.X)**: Backwards-compatible bug fixes
|
| 75 |
+
|
| 76 |
+
### Release Notes Format
|
| 77 |
+
- **Added**: New features
|
| 78 |
+
- **Changed**: Changes in existing functionality
|
| 79 |
+
- **Deprecated**: Soon-to-be removed features
|
| 80 |
+
- **Removed**: Removed features
|
| 81 |
+
- **Fixed**: Bug fixes
|
| 82 |
+
- **Security**: Vulnerability fixes
|
| 83 |
+
|
| 84 |
+
---
|
| 85 |
+
|
| 86 |
+
For more details on any version, see the [GitHub Releases](https://github.com/dyra-12/ViT-XAI-Dashboard/releases) page.
|
CONTRIBUTING.md
ADDED
|
@@ -0,0 +1,306 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Contributing to ViT Auditing Toolkit
|
| 2 |
+
|
| 3 |
+
First off, thank you for considering contributing to the ViT Auditing Toolkit! It's people like you that make this tool better for everyone.
|
| 4 |
+
|
| 5 |
+
## π Ways to Contribute
|
| 6 |
+
|
| 7 |
+
### 1. Reporting Bugs π
|
| 8 |
+
|
| 9 |
+
Before creating bug reports, please check existing issues to avoid duplicates. When creating a bug report, include:
|
| 10 |
+
|
| 11 |
+
- **Clear title and description**
|
| 12 |
+
- **Steps to reproduce** the behavior
|
| 13 |
+
- **Expected vs actual behavior**
|
| 14 |
+
- **Screenshots** if applicable
|
| 15 |
+
- **Environment details** (OS, Python version, etc.)
|
| 16 |
+
|
| 17 |
+
**Example:**
|
| 18 |
+
```markdown
|
| 19 |
+
**Bug**: GradCAM visualization fails with ViT-Large model
|
| 20 |
+
|
| 21 |
+
**Steps to reproduce:**
|
| 22 |
+
1. Select ViT-Large from dropdown
|
| 23 |
+
2. Upload any image
|
| 24 |
+
3. Select GradCAM method
|
| 25 |
+
4. Click "Analyze Image"
|
| 26 |
+
|
| 27 |
+
**Expected:** GradCAM heatmap visualization
|
| 28 |
+
**Actual:** Error message "AttributeError: ..."
|
| 29 |
+
|
| 30 |
+
**Environment:**
|
| 31 |
+
- OS: Ubuntu 22.04
|
| 32 |
+
- Python: 3.10.12
|
| 33 |
+
- PyTorch: 2.2.0
|
| 34 |
+
```
|
| 35 |
+
|
| 36 |
+
### 2. Suggesting Features β¨
|
| 37 |
+
|
| 38 |
+
Feature requests are welcome! Please provide:
|
| 39 |
+
|
| 40 |
+
- **Clear use case**: Why is this feature needed?
|
| 41 |
+
- **Proposed solution**: How should it work?
|
| 42 |
+
- **Alternatives considered**: Other approaches you've thought about
|
| 43 |
+
- **Additional context**: Screenshots, mockups, references
|
| 44 |
+
|
| 45 |
+
### 3. Contributing Code π»
|
| 46 |
+
|
| 47 |
+
#### Development Setup
|
| 48 |
+
|
| 49 |
+
```bash
|
| 50 |
+
# 1. Fork the repository on GitHub
|
| 51 |
+
# 2. Clone your fork
|
| 52 |
+
git clone https://github.com/YOUR-USERNAME/ViT-XAI-Dashboard.git
|
| 53 |
+
cd ViT-XAI-Dashboard
|
| 54 |
+
|
| 55 |
+
# 3. Create a virtual environment
|
| 56 |
+
python -m venv venv
|
| 57 |
+
source venv/bin/activate # Windows: venv\Scripts\activate
|
| 58 |
+
|
| 59 |
+
# 4. Install dependencies
|
| 60 |
+
pip install -r requirements.txt
|
| 61 |
+
|
| 62 |
+
# 5. Install development dependencies
|
| 63 |
+
pip install pytest black flake8 mypy
|
| 64 |
+
|
| 65 |
+
# 6. Create a feature branch
|
| 66 |
+
git checkout -b feature/amazing-feature
|
| 67 |
+
```
|
| 68 |
+
|
| 69 |
+
#### Code Style Guidelines
|
| 70 |
+
|
| 71 |
+
**Python Style:**
|
| 72 |
+
- Follow [PEP 8](https://pep8.org/)
|
| 73 |
+
- Use 4 spaces for indentation
|
| 74 |
+
- Maximum line length: 100 characters
|
| 75 |
+
- Use meaningful variable names
|
| 76 |
+
|
| 77 |
+
**Formatting:**
|
| 78 |
+
```bash
|
| 79 |
+
# Format code with Black
|
| 80 |
+
black src/ tests/ app.py
|
| 81 |
+
|
| 82 |
+
# Check style with flake8
|
| 83 |
+
flake8 src/ tests/ app.py --max-line-length=100
|
| 84 |
+
|
| 85 |
+
# Type checking with mypy
|
| 86 |
+
mypy src/ --ignore-missing-imports
|
| 87 |
+
```
|
| 88 |
+
|
| 89 |
+
**Documentation:**
|
| 90 |
+
- Add docstrings to all functions and classes
|
| 91 |
+
- Use Google-style docstrings
|
| 92 |
+
- Update README.md if adding new features
|
| 93 |
+
|
| 94 |
+
**Example:**
|
| 95 |
+
```python
|
| 96 |
+
def explain_attention(model, processor, image, layer_index=6, head_index=0):
|
| 97 |
+
"""
|
| 98 |
+
Extract and visualize attention weights from a specific layer and head.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
model: Pre-trained ViT model with attention outputs enabled.
|
| 102 |
+
processor: Image processor for model input preparation.
|
| 103 |
+
image (PIL.Image): Input image to analyze.
|
| 104 |
+
layer_index (int): Transformer layer index (0-11 for base model).
|
| 105 |
+
head_index (int): Attention head index (0-11 for base model).
|
| 106 |
+
|
| 107 |
+
Returns:
|
| 108 |
+
matplotlib.figure.Figure: Visualization of attention patterns.
|
| 109 |
+
|
| 110 |
+
Raises:
|
| 111 |
+
ValueError: If layer_index or head_index is out of range.
|
| 112 |
+
RuntimeError: If attention weights cannot be extracted.
|
| 113 |
+
|
| 114 |
+
Example:
|
| 115 |
+
>>> from PIL import Image
|
| 116 |
+
>>> image = Image.open("cat.jpg")
|
| 117 |
+
>>> fig = explain_attention(model, processor, image, layer_index=6)
|
| 118 |
+
>>> fig.savefig("attention.png")
|
| 119 |
+
"""
|
| 120 |
+
# Implementation...
|
| 121 |
+
```
|
| 122 |
+
|
| 123 |
+
#### Testing
|
| 124 |
+
|
| 125 |
+
All new features must include tests:
|
| 126 |
+
|
| 127 |
+
```bash
|
| 128 |
+
# Run all tests
|
| 129 |
+
pytest tests/
|
| 130 |
+
|
| 131 |
+
# Run specific test file
|
| 132 |
+
pytest tests/test_explainer.py
|
| 133 |
+
|
| 134 |
+
# Run with coverage
|
| 135 |
+
pytest --cov=src tests/
|
| 136 |
+
```
|
| 137 |
+
|
| 138 |
+
**Writing Tests:**
|
| 139 |
+
```python
|
| 140 |
+
import pytest
|
| 141 |
+
from src.explainer import explain_attention
|
| 142 |
+
|
| 143 |
+
def test_attention_visualization():
|
| 144 |
+
"""Test attention visualization with valid inputs."""
|
| 145 |
+
# Setup
|
| 146 |
+
model, processor = load_test_model()
|
| 147 |
+
image = create_test_image()
|
| 148 |
+
|
| 149 |
+
# Execute
|
| 150 |
+
fig = explain_attention(model, processor, image, layer_index=6)
|
| 151 |
+
|
| 152 |
+
# Assert
|
| 153 |
+
assert fig is not None
|
| 154 |
+
assert len(fig.axes) > 0
|
| 155 |
+
|
| 156 |
+
def test_attention_invalid_layer():
|
| 157 |
+
"""Test attention visualization with invalid layer index."""
|
| 158 |
+
model, processor = load_test_model()
|
| 159 |
+
image = create_test_image()
|
| 160 |
+
|
| 161 |
+
with pytest.raises(ValueError):
|
| 162 |
+
explain_attention(model, processor, image, layer_index=99)
|
| 163 |
+
```
|
| 164 |
+
|
| 165 |
+
#### Commit Messages
|
| 166 |
+
|
| 167 |
+
Follow the [Conventional Commits](https://www.conventionalcommits.org/) specification:
|
| 168 |
+
|
| 169 |
+
```
|
| 170 |
+
<type>(<scope>): <subject>
|
| 171 |
+
|
| 172 |
+
<body>
|
| 173 |
+
|
| 174 |
+
<footer>
|
| 175 |
+
```
|
| 176 |
+
|
| 177 |
+
**Types:**
|
| 178 |
+
- `feat`: New feature
|
| 179 |
+
- `fix`: Bug fix
|
| 180 |
+
- `docs`: Documentation changes
|
| 181 |
+
- `style`: Code style changes (formatting, etc.)
|
| 182 |
+
- `refactor`: Code refactoring
|
| 183 |
+
- `test`: Adding or updating tests
|
| 184 |
+
- `chore`: Maintenance tasks
|
| 185 |
+
|
| 186 |
+
**Examples:**
|
| 187 |
+
```
|
| 188 |
+
feat(explainer): add LIME explainability method
|
| 189 |
+
|
| 190 |
+
- Implement LIME-based explanations
|
| 191 |
+
- Add visualization function
|
| 192 |
+
- Update documentation
|
| 193 |
+
|
| 194 |
+
Closes #42
|
| 195 |
+
```
|
| 196 |
+
|
| 197 |
+
```
|
| 198 |
+
fix(gradcam): resolve tensor dimension mismatch
|
| 199 |
+
|
| 200 |
+
GradCAM was failing for batch size != 1 due to
|
| 201 |
+
incorrect tensor reshaping. Now properly handles
|
| 202 |
+
single image inputs.
|
| 203 |
+
|
| 204 |
+
Fixes #38
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
#### Pull Request Process
|
| 208 |
+
|
| 209 |
+
1. **Update documentation**: README, docstrings, etc.
|
| 210 |
+
2. **Add tests**: Ensure your code is tested
|
| 211 |
+
3. **Run tests locally**: All tests must pass
|
| 212 |
+
4. **Update CHANGELOG**: Add your changes
|
| 213 |
+
5. **Create PR**: Use a clear title and description
|
| 214 |
+
|
| 215 |
+
**PR Template:**
|
| 216 |
+
```markdown
|
| 217 |
+
## Description
|
| 218 |
+
Brief description of changes
|
| 219 |
+
|
| 220 |
+
## Type of Change
|
| 221 |
+
- [ ] Bug fix
|
| 222 |
+
- [ ] New feature
|
| 223 |
+
- [ ] Breaking change
|
| 224 |
+
- [ ] Documentation update
|
| 225 |
+
|
| 226 |
+
## Testing
|
| 227 |
+
- [ ] All existing tests pass
|
| 228 |
+
- [ ] New tests added for new functionality
|
| 229 |
+
- [ ] Tested manually with various inputs
|
| 230 |
+
|
| 231 |
+
## Checklist
|
| 232 |
+
- [ ] Code follows style guidelines
|
| 233 |
+
- [ ] Documentation updated
|
| 234 |
+
- [ ] No new warnings or errors
|
| 235 |
+
- [ ] Commit messages are clear
|
| 236 |
+
```
|
| 237 |
+
|
| 238 |
+
### 4. Improving Documentation π
|
| 239 |
+
|
| 240 |
+
Documentation improvements are always welcome:
|
| 241 |
+
|
| 242 |
+
- Fix typos or unclear explanations
|
| 243 |
+
- Add examples and tutorials
|
| 244 |
+
- Improve code comments
|
| 245 |
+
- Create video demonstrations
|
| 246 |
+
- Translate to other languages
|
| 247 |
+
|
| 248 |
+
### 5. Reviewing Pull Requests π
|
| 249 |
+
|
| 250 |
+
Help review open pull requests:
|
| 251 |
+
|
| 252 |
+
- Test the changes locally
|
| 253 |
+
- Provide constructive feedback
|
| 254 |
+
- Check for potential issues
|
| 255 |
+
- Verify documentation is updated
|
| 256 |
+
|
| 257 |
+
## π― Good First Issues
|
| 258 |
+
|
| 259 |
+
Look for issues labeled `good first issue` or `help wanted` - these are great starting points!
|
| 260 |
+
|
| 261 |
+
## π Project Priorities
|
| 262 |
+
|
| 263 |
+
Current focus areas:
|
| 264 |
+
1. **Stability**: Bug fixes and error handling
|
| 265 |
+
2. **Performance**: Optimization for large models
|
| 266 |
+
3. **Features**: Additional explainability methods
|
| 267 |
+
4. **Documentation**: More examples and tutorials
|
| 268 |
+
5. **Testing**: Improved test coverage
|
| 269 |
+
|
| 270 |
+
## π€ Code of Conduct
|
| 271 |
+
|
| 272 |
+
### Our Pledge
|
| 273 |
+
|
| 274 |
+
We are committed to providing a welcoming and inspiring community for all.
|
| 275 |
+
|
| 276 |
+
### Our Standards
|
| 277 |
+
|
| 278 |
+
**Positive behavior includes:**
|
| 279 |
+
- Being respectful of differing viewpoints
|
| 280 |
+
- Gracefully accepting constructive criticism
|
| 281 |
+
- Focusing on what is best for the community
|
| 282 |
+
- Showing empathy towards other community members
|
| 283 |
+
|
| 284 |
+
**Unacceptable behavior includes:**
|
| 285 |
+
- Harassment, trolling, or discriminatory comments
|
| 286 |
+
- Personal or political attacks
|
| 287 |
+
- Publishing others' private information
|
| 288 |
+
- Other conduct which could reasonably be considered inappropriate
|
| 289 |
+
|
| 290 |
+
### Enforcement
|
| 291 |
+
|
| 292 |
+
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the project maintainers. All complaints will be reviewed and investigated.
|
| 293 |
+
|
| 294 |
+
## π¬ Getting Help
|
| 295 |
+
|
| 296 |
+
- **Questions**: Use [GitHub Discussions](https://github.com/dyra-12/ViT-XAI-Dashboard/discussions)
|
| 297 |
+
- **Bugs**: Open an [issue](https://github.com/dyra-12/ViT-XAI-Dashboard/issues)
|
| 298 |
+
- **Chat**: Join our community (link coming soon)
|
| 299 |
+
|
| 300 |
+
## π Thank You!
|
| 301 |
+
|
| 302 |
+
Your contributions, large or small, make this project better. We appreciate your time and effort!
|
| 303 |
+
|
| 304 |
+
---
|
| 305 |
+
|
| 306 |
+
**Happy Contributing! π**
|
Dockerfile
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use Python 3.10 slim image
|
| 2 |
+
FROM python:3.10-slim
|
| 3 |
+
|
| 4 |
+
# Set working directory
|
| 5 |
+
WORKDIR /app
|
| 6 |
+
|
| 7 |
+
# Install system dependencies
|
| 8 |
+
RUN apt-get update && apt-get install -y \
|
| 9 |
+
git \
|
| 10 |
+
curl \
|
| 11 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 12 |
+
|
| 13 |
+
# Copy requirements first for better caching
|
| 14 |
+
COPY requirements.txt .
|
| 15 |
+
|
| 16 |
+
# Install Python dependencies
|
| 17 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 18 |
+
|
| 19 |
+
# Copy application files
|
| 20 |
+
COPY . .
|
| 21 |
+
|
| 22 |
+
# Expose Gradio port
|
| 23 |
+
EXPOSE 7860
|
| 24 |
+
|
| 25 |
+
# Set environment variables
|
| 26 |
+
ENV GRADIO_SERVER_NAME="0.0.0.0"
|
| 27 |
+
ENV GRADIO_SERVER_PORT=7860
|
| 28 |
+
|
| 29 |
+
# Health check
|
| 30 |
+
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
| 31 |
+
CMD curl -f http://localhost:7860/ || exit 1
|
| 32 |
+
|
| 33 |
+
# Run the application
|
| 34 |
+
CMD ["python", "app.py"]
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2024 ViT Auditing Toolkit Contributors
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
QUICKSTART.md
ADDED
|
@@ -0,0 +1,348 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# π Quick Start Guide
|
| 2 |
+
|
| 3 |
+
Get up and running with the ViT Auditing Toolkit in under 5 minutes!
|
| 4 |
+
|
| 5 |
+
## π Prerequisites
|
| 6 |
+
|
| 7 |
+
Before you begin, ensure you have:
|
| 8 |
+
- Python 3.8 or higher installed
|
| 9 |
+
- pip package manager
|
| 10 |
+
- (Optional) CUDA-compatible GPU for faster inference
|
| 11 |
+
|
| 12 |
+
Check your Python version:
|
| 13 |
+
```bash
|
| 14 |
+
python --version # Should be 3.8+
|
| 15 |
+
```
|
| 16 |
+
|
| 17 |
+
## β‘ Installation Options
|
| 18 |
+
|
| 19 |
+
### Option 1: Quick Install (Recommended for Most Users)
|
| 20 |
+
|
| 21 |
+
```bash
|
| 22 |
+
# Clone the repository
|
| 23 |
+
git clone https://github.com/dyra-12/ViT-XAI-Dashboard.git
|
| 24 |
+
cd ViT-XAI-Dashboard
|
| 25 |
+
|
| 26 |
+
# Install dependencies
|
| 27 |
+
pip install -r requirements.txt
|
| 28 |
+
|
| 29 |
+
# Run the application
|
| 30 |
+
python app.py
|
| 31 |
+
```
|
| 32 |
+
|
| 33 |
+
Open your browser to `http://localhost:7860` π
|
| 34 |
+
|
| 35 |
+
### Option 2: Virtual Environment (Recommended for Development)
|
| 36 |
+
|
| 37 |
+
```bash
|
| 38 |
+
# Clone the repository
|
| 39 |
+
git clone https://github.com/dyra-12/ViT-XAI-Dashboard.git
|
| 40 |
+
cd ViT-XAI-Dashboard
|
| 41 |
+
|
| 42 |
+
# Create virtual environment
|
| 43 |
+
python -m venv venv
|
| 44 |
+
|
| 45 |
+
# Activate virtual environment
|
| 46 |
+
# On macOS/Linux:
|
| 47 |
+
source venv/bin/activate
|
| 48 |
+
# On Windows:
|
| 49 |
+
venv\Scripts\activate
|
| 50 |
+
|
| 51 |
+
# Install dependencies
|
| 52 |
+
pip install -r requirements.txt
|
| 53 |
+
|
| 54 |
+
# Run the application
|
| 55 |
+
python app.py
|
| 56 |
+
```
|
| 57 |
+
|
| 58 |
+
### Option 3: Docker (Production Ready)
|
| 59 |
+
|
| 60 |
+
```bash
|
| 61 |
+
# Build the image
|
| 62 |
+
docker build -t vit-auditing-toolkit .
|
| 63 |
+
|
| 64 |
+
# Run the container
|
| 65 |
+
docker run -p 7860:7860 vit-auditing-toolkit
|
| 66 |
+
|
| 67 |
+
# Or use docker-compose
|
| 68 |
+
docker-compose up
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
### Option 4: Google Colab (No Installation Required)
|
| 72 |
+
|
| 73 |
+
[](https://colab.research.google.com/github/dyra-12/ViT-XAI-Dashboard/blob/main/notebooks/colab_demo.ipynb)
|
| 74 |
+
|
| 75 |
+
Run directly in your browser with free GPU access!
|
| 76 |
+
|
| 77 |
+
## π― First-Time Usage
|
| 78 |
+
|
| 79 |
+
### Step 1: Launch the Application
|
| 80 |
+
|
| 81 |
+
```bash
|
| 82 |
+
python app.py
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
You should see:
|
| 86 |
+
```
|
| 87 |
+
β
Model and processor loaded successfully on cpu!
|
| 88 |
+
Running on local URL: http://localhost:7860
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
### Step 2: Open the Dashboard
|
| 92 |
+
|
| 93 |
+
Open your web browser and navigate to:
|
| 94 |
+
```
|
| 95 |
+
http://localhost:7860
|
| 96 |
+
```
|
| 97 |
+
|
| 98 |
+
### Step 3: Load a Model
|
| 99 |
+
|
| 100 |
+
1. In the **"Select Model"** dropdown, choose `ViT-Base`
|
| 101 |
+
2. Click the **"π Load Model"** button
|
| 102 |
+
3. Wait for the confirmation: `β
Model loaded: google/vit-base-patch16-224`
|
| 103 |
+
|
| 104 |
+
### Step 4: Analyze Your First Image
|
| 105 |
+
|
| 106 |
+
#### Option A: Use a Sample Image
|
| 107 |
+
1. Download a sample image:
|
| 108 |
+
```bash
|
| 109 |
+
curl -o sample.jpg https://images.unsplash.com/photo-1574158622682-e40e69881006
|
| 110 |
+
```
|
| 111 |
+
2. Or use any image from your computer
|
| 112 |
+
|
| 113 |
+
#### Option B: Try the Demo Images
|
| 114 |
+
We provide sample images in the `examples/` directory:
|
| 115 |
+
- `examples/cat.jpg` - Cat portrait
|
| 116 |
+
- `examples/dog.jpg` - Dog portrait
|
| 117 |
+
- `examples/bird.jpg` - Bird in flight
|
| 118 |
+
- `examples/car.jpg` - Sports car
|
| 119 |
+
|
| 120 |
+
### Step 5: Run Your First Analysis
|
| 121 |
+
|
| 122 |
+
1. Go to the **"π Basic Explainability"** tab
|
| 123 |
+
2. Click **"π Upload Image"** and select your image
|
| 124 |
+
3. Keep default settings (Attention Visualization, Layer 6, Head 0)
|
| 125 |
+
4. Click **"π Analyze Image"**
|
| 126 |
+
5. View the results:
|
| 127 |
+
- **Processed Image**: Your input image
|
| 128 |
+
- **Top Predictions**: Bar chart of confidence scores
|
| 129 |
+
- **Explanation Visualization**: Attention heatmap
|
| 130 |
+
|
| 131 |
+
## π Learning Path
|
| 132 |
+
|
| 133 |
+
### Beginner: Understanding Predictions
|
| 134 |
+
Start with **Basic Explainability** to see:
|
| 135 |
+
- What objects the model recognizes
|
| 136 |
+
- Which image regions are most important
|
| 137 |
+
- How confident the model is
|
| 138 |
+
|
| 139 |
+
**Try this:**
|
| 140 |
+
```
|
| 141 |
+
1. Upload a clear photo of a single object
|
| 142 |
+
2. Use Attention Visualization (default)
|
| 143 |
+
3. Try different layers (0-11) to see how features evolve
|
| 144 |
+
4. Switch to GradCAM for a different perspective
|
| 145 |
+
```
|
| 146 |
+
|
| 147 |
+
### Intermediate: Testing Robustness
|
| 148 |
+
Move to **Counterfactual Analysis** to explore:
|
| 149 |
+
- How stable are predictions when parts of the image change?
|
| 150 |
+
- Which regions are critical vs. irrelevant?
|
| 151 |
+
|
| 152 |
+
**Try this:**
|
| 153 |
+
```
|
| 154 |
+
1. Upload the same image from before
|
| 155 |
+
2. Start with patch_size=32, perturbation_type="blur"
|
| 156 |
+
3. Click "Run Counterfactual Analysis"
|
| 157 |
+
4. Try different perturbation types to see variations
|
| 158 |
+
```
|
| 159 |
+
|
| 160 |
+
### Advanced: Model Validation
|
| 161 |
+
Use **Confidence Calibration** and **Bias Detection**:
|
| 162 |
+
- Is the model overconfident?
|
| 163 |
+
- Does performance vary across different image conditions?
|
| 164 |
+
|
| 165 |
+
**Try this:**
|
| 166 |
+
```
|
| 167 |
+
1. Test calibration with various images
|
| 168 |
+
2. Check if confidence matches actual reliability
|
| 169 |
+
3. Use bias detection to compare subgroups
|
| 170 |
+
```
|
| 171 |
+
|
| 172 |
+
## π‘ Common Use Cases
|
| 173 |
+
|
| 174 |
+
### Use Case 1: Debugging Misclassifications
|
| 175 |
+
|
| 176 |
+
**Problem**: Model misclassifies your image
|
| 177 |
+
**Solution**: Use Basic Explainability to see what it's looking at
|
| 178 |
+
|
| 179 |
+
```python
|
| 180 |
+
# Steps:
|
| 181 |
+
1. Upload misclassified image
|
| 182 |
+
2. Check top predictions (might be close to correct class)
|
| 183 |
+
3. View attention maps - is it focusing on the right region?
|
| 184 |
+
4. Try GradCAM to see discriminative regions
|
| 185 |
+
5. Use counterfactual analysis to find sensitive areas
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
### Use Case 2: Model Selection
|
| 189 |
+
|
| 190 |
+
**Problem**: Choosing between ViT-Base and ViT-Large
|
| 191 |
+
**Solution**: Compare predictions and confidence
|
| 192 |
+
|
| 193 |
+
```python
|
| 194 |
+
# Steps:
|
| 195 |
+
1. Load ViT-Base, analyze your image
|
| 196 |
+
2. Note confidence scores and predictions
|
| 197 |
+
3. Load ViT-Large, analyze same image
|
| 198 |
+
4. Compare:
|
| 199 |
+
- Prediction accuracy
|
| 200 |
+
- Confidence levels
|
| 201 |
+
- Attention patterns
|
| 202 |
+
- Inference time
|
| 203 |
+
```
|
| 204 |
+
|
| 205 |
+
### Use Case 3: Dataset Quality Check
|
| 206 |
+
|
| 207 |
+
**Problem**: Ensuring your dataset is suitable for ViT
|
| 208 |
+
**Solution**: Use bias detection and calibration
|
| 209 |
+
|
| 210 |
+
```python
|
| 211 |
+
# Steps:
|
| 212 |
+
1. Sample random images from dataset
|
| 213 |
+
2. Run bias detection to check for systematic issues
|
| 214 |
+
3. Check calibration to see if model is overconfident
|
| 215 |
+
4. Identify problematic image categories
|
| 216 |
+
```
|
| 217 |
+
|
| 218 |
+
## π§ Troubleshooting
|
| 219 |
+
|
| 220 |
+
### Problem: Out of Memory Error
|
| 221 |
+
|
| 222 |
+
**Solution:**
|
| 223 |
+
```bash
|
| 224 |
+
# Use ViT-Base instead of ViT-Large
|
| 225 |
+
# Or reduce image batch size
|
| 226 |
+
# Or close other applications
|
| 227 |
+
|
| 228 |
+
# For programmatic use:
|
| 229 |
+
import torch
|
| 230 |
+
torch.cuda.empty_cache() # Clear GPU memory
|
| 231 |
+
```
|
| 232 |
+
|
| 233 |
+
### Problem: Slow Inference
|
| 234 |
+
|
| 235 |
+
**Solution:**
|
| 236 |
+
```bash
|
| 237 |
+
# Check if using GPU:
|
| 238 |
+
python -c "import torch; print(torch.cuda.is_available())"
|
| 239 |
+
|
| 240 |
+
# Install CUDA-enabled PyTorch:
|
| 241 |
+
pip install torch torchvision --index-url https://download.pytorch.org/whl/cu118
|
| 242 |
+
```
|
| 243 |
+
|
| 244 |
+
### Problem: Model Download Fails
|
| 245 |
+
|
| 246 |
+
**Solution:**
|
| 247 |
+
```bash
|
| 248 |
+
# Set Hugging Face cache directory:
|
| 249 |
+
export HF_HOME="/path/to/writable/directory"
|
| 250 |
+
|
| 251 |
+
# Or download manually:
|
| 252 |
+
python -c "from transformers import ViTImageProcessor; ViTImageProcessor.from_pretrained('google/vit-base-patch16-224')"
|
| 253 |
+
```
|
| 254 |
+
|
| 255 |
+
### Problem: Port Already in Use
|
| 256 |
+
|
| 257 |
+
**Solution:**
|
| 258 |
+
```bash
|
| 259 |
+
# Use a different port:
|
| 260 |
+
# Modify app.py, line: demo.launch(server_port=7861)
|
| 261 |
+
|
| 262 |
+
# Or kill the process using port 7860:
|
| 263 |
+
# Linux/Mac:
|
| 264 |
+
lsof -ti:7860 | xargs kill -9
|
| 265 |
+
|
| 266 |
+
# Windows:
|
| 267 |
+
netstat -ano | findstr :7860
|
| 268 |
+
taskkill /PID <PID> /F
|
| 269 |
+
```
|
| 270 |
+
|
| 271 |
+
## π Example Workflows
|
| 272 |
+
|
| 273 |
+
### Workflow 1: Quick Image Classification
|
| 274 |
+
|
| 275 |
+
```bash
|
| 276 |
+
# 1. Start application
|
| 277 |
+
python app.py
|
| 278 |
+
|
| 279 |
+
# 2. In browser (http://localhost:7860):
|
| 280 |
+
# - Load ViT-Base model
|
| 281 |
+
# - Upload image
|
| 282 |
+
# - Click "Analyze Image"
|
| 283 |
+
# - View top predictions
|
| 284 |
+
|
| 285 |
+
# Total time: < 1 minute
|
| 286 |
+
```
|
| 287 |
+
|
| 288 |
+
### Workflow 2: Comprehensive Model Audit
|
| 289 |
+
|
| 290 |
+
```bash
|
| 291 |
+
# 1. Start application
|
| 292 |
+
python app.py
|
| 293 |
+
|
| 294 |
+
# 2. For each test image:
|
| 295 |
+
# Tab 1: Check predictions and attention
|
| 296 |
+
# Tab 2: Test robustness with perturbations
|
| 297 |
+
# Tab 3: Validate confidence calibration
|
| 298 |
+
# Tab 4: Check for bias across variations
|
| 299 |
+
|
| 300 |
+
# 3. Document findings
|
| 301 |
+
# 4. Iterate on model/data as needed
|
| 302 |
+
|
| 303 |
+
# Total time: 5-10 minutes per image
|
| 304 |
+
```
|
| 305 |
+
|
| 306 |
+
### Workflow 3: Research Experiment
|
| 307 |
+
|
| 308 |
+
```bash
|
| 309 |
+
# 1. Collect dataset of test images
|
| 310 |
+
# 2. For each explainability method:
|
| 311 |
+
# - Run on all test images
|
| 312 |
+
# - Export visualizations
|
| 313 |
+
# - Compute metrics
|
| 314 |
+
# 3. Compare methods quantitatively
|
| 315 |
+
# 4. Generate paper figures
|
| 316 |
+
|
| 317 |
+
# Total time: Varies by dataset size
|
| 318 |
+
```
|
| 319 |
+
|
| 320 |
+
## π― Next Steps
|
| 321 |
+
|
| 322 |
+
After completing this quick start:
|
| 323 |
+
|
| 324 |
+
1. **Explore Advanced Features**: Try all four tabs with different images
|
| 325 |
+
2. **Read Technical Docs**: Understand the methods in detail
|
| 326 |
+
3. **Customize Settings**: Adjust parameters for your use case
|
| 327 |
+
4. **Integrate into Workflow**: Use programmatically or via API
|
| 328 |
+
5. **Contribute**: Share improvements or report issues
|
| 329 |
+
|
| 330 |
+
## π Additional Resources
|
| 331 |
+
|
| 332 |
+
- **Full Documentation**: [README.md](README.md)
|
| 333 |
+
- **API Reference**: [docs/api.md](docs/api.md)
|
| 334 |
+
- **Video Tutorials**: [YouTube Playlist](#)
|
| 335 |
+
- **Example Notebooks**: [notebooks/](notebooks/)
|
| 336 |
+
- **Community Forum**: [GitHub Discussions](https://github.com/dyra-12/ViT-XAI-Dashboard/discussions)
|
| 337 |
+
|
| 338 |
+
## π Getting Help
|
| 339 |
+
|
| 340 |
+
- **Issues**: [Report bugs](https://github.com/dyra-12/ViT-XAI-Dashboard/issues)
|
| 341 |
+
- **Discussions**: [Ask questions](https://github.com/dyra-12/ViT-XAI-Dashboard/discussions)
|
| 342 |
+
- **Email**: dyra12@example.com
|
| 343 |
+
|
| 344 |
+
---
|
| 345 |
+
|
| 346 |
+
**Ready to dive deeper?** Check out the [full documentation](README.md) or [contributing guidelines](CONTRIBUTING.md)!
|
| 347 |
+
|
| 348 |
+
π **Happy Exploring!** π
|
README.md
CHANGED
|
@@ -0,0 +1,495 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# π― ViT Auditing Toolkit
|
| 2 |
+
|
| 3 |
+
<div align="center">
|
| 4 |
+
|
| 5 |
+

|
| 6 |
+

|
| 7 |
+

|
| 8 |
+

|
| 9 |
+

|
| 10 |
+
|
| 11 |
+
**A Comprehensive Explainability and Validation Dashboard for Vision Transformers**
|
| 12 |
+
|
| 13 |
+
[π Live Demo](#) | [π Documentation](#features) | [π‘ Examples](#usage-guide) | [π€ Contributing](#contributing)
|
| 14 |
+
|
| 15 |
+
<img src="https://via.placeholder.com/800x400/0f1419/6366f1?text=ViT+Auditing+Toolkit+Dashboard" alt="Dashboard Preview" width="800"/>
|
| 16 |
+
|
| 17 |
+
</div>
|
| 18 |
+
|
| 19 |
+
---
|
| 20 |
+
|
| 21 |
+
## π Overview
|
| 22 |
+
|
| 23 |
+
The **ViT Auditing Toolkit** is an advanced, interactive dashboard designed to help researchers, ML practitioners, and AI auditors understand, validate, and improve Vision Transformer (ViT) models. It provides a comprehensive suite of explainability techniques and auditing tools through an intuitive web interface.
|
| 24 |
+
|
| 25 |
+
### π Why This Toolkit?
|
| 26 |
+
|
| 27 |
+
- **π Transparency**: Understand what your ViT models actually "see" and learn
|
| 28 |
+
- **β
Validation**: Verify model reliability through systematic testing
|
| 29 |
+
- **βοΈ Fairness**: Detect potential biases across different data subgroups
|
| 30 |
+
- **π‘οΈ Robustness**: Test prediction stability under various perturbations
|
| 31 |
+
- **π Calibration**: Ensure confidence scores reflect true prediction accuracy
|
| 32 |
+
|
| 33 |
+
---
|
| 34 |
+
|
| 35 |
+
## β¨ Features
|
| 36 |
+
|
| 37 |
+
### π¬ Basic Explainability
|
| 38 |
+
Visualize and understand model predictions through multiple state-of-the-art techniques:
|
| 39 |
+
|
| 40 |
+
- **π¨ Attention Visualization**: See which image patches the transformer focuses on at each layer and head
|
| 41 |
+
- **π₯ GradCAM**: Gradient-weighted Class Activation Mapping for highlighting discriminative regions
|
| 42 |
+
- **π« GradientSHAP**: Shapley value-based attribution for pixel-level importance
|
| 43 |
+
|
| 44 |
+
### π Counterfactual Analysis
|
| 45 |
+
Test model robustness by systematically perturbing image regions:
|
| 46 |
+
|
| 47 |
+
- **Patch Perturbation**: Apply blur, blackout, grayscale, or noise to image patches
|
| 48 |
+
- **Sensitivity Mapping**: Identify which regions are critical for predictions
|
| 49 |
+
- **Prediction Stability**: Measure confidence changes and prediction flip rates
|
| 50 |
+
|
| 51 |
+
### π Confidence Calibration
|
| 52 |
+
Evaluate whether model confidence scores accurately reflect prediction reliability:
|
| 53 |
+
|
| 54 |
+
- **Calibration Curves**: Visual assessment of confidence vs accuracy alignment
|
| 55 |
+
- **Reliability Diagrams**: Binned analysis of prediction calibration
|
| 56 |
+
- **Metrics Dashboard**: Mean confidence, overconfidence rate, and underconfidence rate
|
| 57 |
+
|
| 58 |
+
### βοΈ Bias Detection
|
| 59 |
+
Identify performance disparities across different data subgroups:
|
| 60 |
+
|
| 61 |
+
- **Subgroup Analysis**: Compare performance across demographic or environmental variations
|
| 62 |
+
- **Fairness Metrics**: Detect systematic biases in model predictions
|
| 63 |
+
- **Comparative Visualization**: Side-by-side analysis of confidence distributions
|
| 64 |
+
|
| 65 |
+
---
|
| 66 |
+
|
| 67 |
+
## π Live Demo
|
| 68 |
+
|
| 69 |
+
Try the toolkit instantly on Hugging Face Spaces:
|
| 70 |
+
|
| 71 |
+
### π [Launch Interactive Demo](https://huggingface.co/spaces/YOUR-USERNAME/vit-auditing-toolkit)
|
| 72 |
+
|
| 73 |
+
*No installation required! Upload an image and start exploring.*
|
| 74 |
+
|
| 75 |
+
---
|
| 76 |
+
|
| 77 |
+
## πΈ Screenshots
|
| 78 |
+
|
| 79 |
+
<div align="center">
|
| 80 |
+
|
| 81 |
+
### Basic Explainability Interface
|
| 82 |
+
<img src="https://via.placeholder.com/700x400/1a1f2e/a5b4fc?text=Attention+Visualization+%26+Predictions" alt="Basic Explainability" width="700"/>
|
| 83 |
+
|
| 84 |
+
### Counterfactual Analysis
|
| 85 |
+
<img src="https://via.placeholder.com/700x400/1a1f2e/c4b5fd?text=Patch+Perturbation+Analysis" alt="Counterfactual Analysis" width="700"/>
|
| 86 |
+
|
| 87 |
+
### Calibration & Bias Detection
|
| 88 |
+
<img src="https://via.placeholder.com/700x400/1a1f2e/f9a8d4?text=Calibration+%26+Bias+Metrics" alt="Advanced Auditing" width="700"/>
|
| 89 |
+
|
| 90 |
+
</div>
|
| 91 |
+
|
| 92 |
+
---
|
| 93 |
+
|
| 94 |
+
## π― Usage Guide
|
| 95 |
+
|
| 96 |
+
### Quick Start (3 Steps)
|
| 97 |
+
|
| 98 |
+
1. **Select a Model**: Choose between ViT-Base or ViT-Large from the dropdown
|
| 99 |
+
2. **Upload Your Image**: Any image you want to analyze (JPG, PNG, etc.)
|
| 100 |
+
3. **Choose Analysis Type**: Select from 4 tabs based on your needs
|
| 101 |
+
|
| 102 |
+
### Detailed Workflow
|
| 103 |
+
|
| 104 |
+
#### π For Understanding Predictions:
|
| 105 |
+
```
|
| 106 |
+
1. Go to "Basic Explainability" tab
|
| 107 |
+
2. Upload your image
|
| 108 |
+
3. Select explanation method (Attention/GradCAM/SHAP)
|
| 109 |
+
4. Adjust layer/head indices if needed
|
| 110 |
+
5. Click "Analyze Image"
|
| 111 |
+
6. View predictions and visual explanations
|
| 112 |
+
```
|
| 113 |
+
|
| 114 |
+
#### π For Testing Robustness:
|
| 115 |
+
```
|
| 116 |
+
1. Go to "Counterfactual Analysis" tab
|
| 117 |
+
2. Upload your image
|
| 118 |
+
3. Set patch size (16-64 pixels)
|
| 119 |
+
4. Choose perturbation type (blur/blackout/gray/noise)
|
| 120 |
+
5. Click "Run Analysis"
|
| 121 |
+
6. Review sensitivity maps and metrics
|
| 122 |
+
```
|
| 123 |
+
|
| 124 |
+
#### π For Validating Confidence:
|
| 125 |
+
```
|
| 126 |
+
1. Go to "Confidence Calibration" tab
|
| 127 |
+
2. Upload a sample image
|
| 128 |
+
3. Adjust number of bins for analysis
|
| 129 |
+
4. Click "Analyze Calibration"
|
| 130 |
+
5. Review calibration curves and metrics
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
#### βοΈ For Detecting Bias:
|
| 134 |
+
```
|
| 135 |
+
1. Go to "Bias Detection" tab
|
| 136 |
+
2. Upload a sample image
|
| 137 |
+
3. Click "Detect Bias"
|
| 138 |
+
4. Compare performance across generated subgroups
|
| 139 |
+
5. Review fairness metrics
|
| 140 |
+
```
|
| 141 |
+
|
| 142 |
+
---
|
| 143 |
+
|
| 144 |
+
## π» Local Installation
|
| 145 |
+
|
| 146 |
+
### Prerequisites
|
| 147 |
+
|
| 148 |
+
- Python 3.8 or higher
|
| 149 |
+
- CUDA-compatible GPU (optional, but recommended for faster inference)
|
| 150 |
+
- 8GB+ RAM
|
| 151 |
+
|
| 152 |
+
### Step 1: Clone the Repository
|
| 153 |
+
|
| 154 |
+
```bash
|
| 155 |
+
git clone https://github.com/dyra-12/ViT-XAI-Dashboard.git
|
| 156 |
+
cd ViT-XAI-Dashboard
|
| 157 |
+
```
|
| 158 |
+
|
| 159 |
+
### Step 2: Create Virtual Environment (Recommended)
|
| 160 |
+
|
| 161 |
+
```bash
|
| 162 |
+
# Using venv
|
| 163 |
+
python -m venv venv
|
| 164 |
+
source venv/bin/activate # On Windows: venv\Scripts\activate
|
| 165 |
+
|
| 166 |
+
# OR using conda
|
| 167 |
+
conda create -n vit-audit python=3.10
|
| 168 |
+
conda activate vit-audit
|
| 169 |
+
```
|
| 170 |
+
|
| 171 |
+
### Step 3: Install Dependencies
|
| 172 |
+
|
| 173 |
+
```bash
|
| 174 |
+
pip install -r requirements.txt
|
| 175 |
+
```
|
| 176 |
+
|
| 177 |
+
### Step 4: Run the Application
|
| 178 |
+
|
| 179 |
+
```bash
|
| 180 |
+
python app.py
|
| 181 |
+
```
|
| 182 |
+
|
| 183 |
+
The dashboard will be available at `http://localhost:7860`
|
| 184 |
+
|
| 185 |
+
### π³ Docker Installation (Alternative)
|
| 186 |
+
|
| 187 |
+
```bash
|
| 188 |
+
# Build the Docker image
|
| 189 |
+
docker build -t vit-auditing-toolkit .
|
| 190 |
+
|
| 191 |
+
# Run the container
|
| 192 |
+
docker run -p 7860:7860 vit-auditing-toolkit
|
| 193 |
+
```
|
| 194 |
+
|
| 195 |
+
---
|
| 196 |
+
|
| 197 |
+
## ποΈ Project Structure
|
| 198 |
+
|
| 199 |
+
```
|
| 200 |
+
ViT-XAI-Dashboard/
|
| 201 |
+
β
|
| 202 |
+
βββ app.py # Main Gradio application
|
| 203 |
+
βββ requirements.txt # Python dependencies
|
| 204 |
+
βββ README.md # This file
|
| 205 |
+
β
|
| 206 |
+
βββ src/
|
| 207 |
+
β βββ __init__.py
|
| 208 |
+
β βββ model_loader.py # ViT model loading from Hugging Face
|
| 209 |
+
β βββ predictor.py # Prediction and classification logic
|
| 210 |
+
β βββ explainer.py # XAI methods (Attention, GradCAM, SHAP)
|
| 211 |
+
β βββ auditor.py # Advanced auditing tools
|
| 212 |
+
β βββ utils.py # Helper functions and preprocessing
|
| 213 |
+
β
|
| 214 |
+
βββ tests/
|
| 215 |
+
βββ test_phase1_complete.py # Basic functionality tests
|
| 216 |
+
βββ test_advanced_features.py # Advanced auditing tests
|
| 217 |
+
```
|
| 218 |
+
|
| 219 |
+
---
|
| 220 |
+
|
| 221 |
+
## π§ Technical Details
|
| 222 |
+
|
| 223 |
+
### Vision Transformers (ViT)
|
| 224 |
+
|
| 225 |
+
Vision Transformers apply the transformer architecture (originally designed for NLP) to computer vision tasks. Key concepts:
|
| 226 |
+
|
| 227 |
+
- **Patch Embedding**: Images are split into fixed-size patches (e.g., 16Γ16 pixels)
|
| 228 |
+
- **Self-Attention**: Each patch attends to all other patches to capture global context
|
| 229 |
+
- **Layer Hierarchy**: Multiple transformer layers progressively refine representations
|
| 230 |
+
- **Classification Token**: A special [CLS] token aggregates information for final prediction
|
| 231 |
+
|
| 232 |
+
**Advantages:**
|
| 233 |
+
- Strong performance on large-scale datasets
|
| 234 |
+
- Captures long-range dependencies better than CNNs
|
| 235 |
+
- More interpretable through attention mechanisms
|
| 236 |
+
|
| 237 |
+
### Explainability Techniques
|
| 238 |
+
|
| 239 |
+
#### 1. Attention Visualization
|
| 240 |
+
**Method**: Direct visualization of transformer attention weights
|
| 241 |
+
**Purpose**: Shows which image patches the model focuses on
|
| 242 |
+
**Implementation**: Extracts attention matrices from specified layers/heads
|
| 243 |
+
|
| 244 |
+
```python
|
| 245 |
+
# Example: Layer 6, Head 0 typically captures semantic patterns
|
| 246 |
+
attention_map = model.encoder.layer[6].attention.self.attention_weights
|
| 247 |
+
```
|
| 248 |
+
|
| 249 |
+
#### 2. GradCAM (Gradient-weighted Class Activation Mapping)
|
| 250 |
+
**Method**: Uses gradients flowing into the final conv layer
|
| 251 |
+
**Purpose**: Highlights discriminative regions for target class
|
| 252 |
+
**Implementation**: Via Captum's `LayerGradCam`
|
| 253 |
+
|
| 254 |
+
```python
|
| 255 |
+
# Generates heatmap showing which regions support the prediction
|
| 256 |
+
gradcam = LayerGradCam(model, target_layer)
|
| 257 |
+
attribution = gradcam.attribute(input, target=predicted_class)
|
| 258 |
+
```
|
| 259 |
+
|
| 260 |
+
#### 3. GradientSHAP (Gradient-based Shapley Values)
|
| 261 |
+
**Method**: Combines Shapley values with gradient information
|
| 262 |
+
**Purpose**: Pixel-level attribution with theoretical guarantees
|
| 263 |
+
**Implementation**: Via Captum's `GradientShap`
|
| 264 |
+
|
| 265 |
+
```python
|
| 266 |
+
# Computes fair attribution using random baselines
|
| 267 |
+
gradient_shap = GradientShap(model)
|
| 268 |
+
attributions = gradient_shap.attribute(input, baselines=random_baselines)
|
| 269 |
+
```
|
| 270 |
+
|
| 271 |
+
### Auditing Methodologies
|
| 272 |
+
|
| 273 |
+
#### Counterfactual Analysis
|
| 274 |
+
Systematically modifies image regions to test:
|
| 275 |
+
- **Robustness**: Does the prediction remain stable?
|
| 276 |
+
- **Feature Importance**: Which regions matter most?
|
| 277 |
+
- **Adversarial Vulnerability**: How easy is it to fool the model?
|
| 278 |
+
|
| 279 |
+
#### Confidence Calibration
|
| 280 |
+
Measures alignment between predicted confidence and actual accuracy:
|
| 281 |
+
- **Well-calibrated**: 80% confidence β 80% correct
|
| 282 |
+
- **Overconfident**: 90% confidence β 60% correct (problem!)
|
| 283 |
+
- **Underconfident**: 50% confidence β 80% correct (less critical)
|
| 284 |
+
|
| 285 |
+
#### Bias Detection
|
| 286 |
+
Compares performance across subgroups to identify:
|
| 287 |
+
- **Demographic bias**: Different accuracy for different groups
|
| 288 |
+
- **Environmental bias**: Performance varies with lighting, quality, etc.
|
| 289 |
+
- **Systematic patterns**: Consistent over/under-performance
|
| 290 |
+
|
| 291 |
+
---
|
| 292 |
+
|
| 293 |
+
## π§ Supported Models
|
| 294 |
+
|
| 295 |
+
Currently supported Vision Transformer models from Hugging Face:
|
| 296 |
+
|
| 297 |
+
| Model | Parameters | Input Size | Accuracy (ImageNet) |
|
| 298 |
+
|-------|-----------|------------|---------------------|
|
| 299 |
+
| `google/vit-base-patch16-224` | 86M | 224Γ224 | ~81.3% |
|
| 300 |
+
| `google/vit-large-patch16-224` | 304M | 224Γ224 | ~82.6% |
|
| 301 |
+
|
| 302 |
+
**Easy to extend**: Add any Hugging Face ViT model to `src/model_loader.py`
|
| 303 |
+
|
| 304 |
+
---
|
| 305 |
+
|
| 306 |
+
## π¦ Dependencies
|
| 307 |
+
|
| 308 |
+
### Core Libraries
|
| 309 |
+
|
| 310 |
+
- **PyTorch** (β₯2.2.0): Deep learning framework
|
| 311 |
+
- **Transformers** (β₯4.35.0): Hugging Face model hub
|
| 312 |
+
- **Gradio** (β₯4.19.0): Web interface framework
|
| 313 |
+
- **Captum** (β₯0.7.0): Model interpretability library
|
| 314 |
+
|
| 315 |
+
### Supporting Libraries
|
| 316 |
+
|
| 317 |
+
- **Pillow**: Image processing
|
| 318 |
+
- **Matplotlib**: Visualization
|
| 319 |
+
- **NumPy**: Numerical computations
|
| 320 |
+
|
| 321 |
+
See `requirements.txt` for complete list with version constraints.
|
| 322 |
+
|
| 323 |
+
---
|
| 324 |
+
|
| 325 |
+
## π Use Cases
|
| 326 |
+
|
| 327 |
+
### Research
|
| 328 |
+
- **Interpretability Studies**: Analyze transformer attention patterns
|
| 329 |
+
- **Benchmark Explainability**: Compare XAI methods systematically
|
| 330 |
+
- **Model Auditing**: Validate models before deployment
|
| 331 |
+
|
| 332 |
+
### Industry
|
| 333 |
+
- **Model Validation**: Ensure reliability before production
|
| 334 |
+
- **Bias Auditing**: Detect and mitigate fairness issues
|
| 335 |
+
- **Regulatory Compliance**: Document model decision-making
|
| 336 |
+
|
| 337 |
+
### Education
|
| 338 |
+
- **Teaching Tool**: Demonstrate XAI concepts interactively
|
| 339 |
+
- **Student Projects**: Foundation for ML course assignments
|
| 340 |
+
- **Research Training**: Hands-on experience with modern techniques
|
| 341 |
+
|
| 342 |
+
---
|
| 343 |
+
|
| 344 |
+
## π£οΈ Roadmap
|
| 345 |
+
|
| 346 |
+
### Upcoming Features
|
| 347 |
+
- [ ] Support for additional ViT variants (DeiT, BEiT, Swin Transformer)
|
| 348 |
+
- [ ] Batch processing for multiple images
|
| 349 |
+
- [ ] Export functionality for reports and visualizations
|
| 350 |
+
- [ ] Custom model upload support
|
| 351 |
+
- [ ] Comparative analysis across multiple models
|
| 352 |
+
- [ ] Integration with model monitoring platforms
|
| 353 |
+
- [ ] Advanced bias metrics (demographic parity, equalized odds)
|
| 354 |
+
- [ ] Adversarial robustness testing
|
| 355 |
+
- [ ] API endpoint for programmatic access
|
| 356 |
+
|
| 357 |
+
### Long-term Vision
|
| 358 |
+
- Multi-modal transformer support (CLIP, ViLT)
|
| 359 |
+
- Video analysis capabilities
|
| 360 |
+
- Automated auditing pipelines
|
| 361 |
+
- Integration with MLOps platforms
|
| 362 |
+
|
| 363 |
+
---
|
| 364 |
+
|
| 365 |
+
## π€ Contributing
|
| 366 |
+
|
| 367 |
+
Contributions are welcome! Here's how you can help:
|
| 368 |
+
|
| 369 |
+
### Ways to Contribute
|
| 370 |
+
|
| 371 |
+
1. **π Bug Reports**: Open an issue with detailed reproduction steps
|
| 372 |
+
2. **β¨ Feature Requests**: Suggest new explainability methods or auditing tools
|
| 373 |
+
3. **π Documentation**: Improve guides, add examples, fix typos
|
| 374 |
+
4. **π» Code**: Submit pull requests for new features or fixes
|
| 375 |
+
5. **π¨ UI/UX**: Enhance the dashboard design and user experience
|
| 376 |
+
|
| 377 |
+
### Development Setup
|
| 378 |
+
|
| 379 |
+
```bash
|
| 380 |
+
# Fork and clone the repository
|
| 381 |
+
git clone https://github.com/YOUR-USERNAME/ViT-XAI-Dashboard.git
|
| 382 |
+
cd ViT-XAI-Dashboard
|
| 383 |
+
|
| 384 |
+
# Create a feature branch
|
| 385 |
+
git checkout -b feature/your-feature-name
|
| 386 |
+
|
| 387 |
+
# Make changes and test
|
| 388 |
+
python -m pytest tests/
|
| 389 |
+
|
| 390 |
+
# Commit and push
|
| 391 |
+
git commit -m "Add: your feature description"
|
| 392 |
+
git push origin feature/your-feature-name
|
| 393 |
+
|
| 394 |
+
# Open a pull request
|
| 395 |
+
```
|
| 396 |
+
|
| 397 |
+
### Code Style
|
| 398 |
+
- Follow PEP 8 guidelines
|
| 399 |
+
- Add docstrings to all functions
|
| 400 |
+
- Include type hints where applicable
|
| 401 |
+
- Write unit tests for new features
|
| 402 |
+
|
| 403 |
+
---
|
| 404 |
+
|
| 405 |
+
## π License
|
| 406 |
+
|
| 407 |
+
This project is licensed under the **MIT License** - see the [LICENSE](LICENSE) file for details.
|
| 408 |
+
|
| 409 |
+
```
|
| 410 |
+
MIT License
|
| 411 |
+
|
| 412 |
+
Copyright (c) 2024 ViT Auditing Toolkit Contributors
|
| 413 |
+
|
| 414 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 415 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 416 |
+
in the Software without restriction, including without limitation the rights
|
| 417 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 418 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 419 |
+
furnished to do so, subject to the following conditions:
|
| 420 |
+
|
| 421 |
+
[Full license text...]
|
| 422 |
+
```
|
| 423 |
+
|
| 424 |
+
---
|
| 425 |
+
|
| 426 |
+
## π References & Citations
|
| 427 |
+
|
| 428 |
+
### Academic Papers
|
| 429 |
+
|
| 430 |
+
1. **Vision Transformers**
|
| 431 |
+
Dosovitskiy, A., et al. (2021). "An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale." *ICLR 2021*.
|
| 432 |
+
|
| 433 |
+
2. **GradCAM**
|
| 434 |
+
Selvaraju, R. R., et al. (2017). "Grad-CAM: Visual Explanations from Deep Networks via Gradient-based Localization." *ICCV 2017*.
|
| 435 |
+
|
| 436 |
+
3. **SHAP**
|
| 437 |
+
Lundberg, S. M., & Lee, S. I. (2017). "A Unified Approach to Interpreting Model Predictions." *NeurIPS 2017*.
|
| 438 |
+
|
| 439 |
+
4. **Model Calibration**
|
| 440 |
+
Guo, C., et al. (2017). "On Calibration of Modern Neural Networks." *ICML 2017*.
|
| 441 |
+
|
| 442 |
+
### Related Tools
|
| 443 |
+
|
| 444 |
+
- [Captum](https://captum.ai/): Model interpretability for PyTorch
|
| 445 |
+
- [Hugging Face Transformers](https://huggingface.co/transformers/): State-of-the-art NLP and Vision models
|
| 446 |
+
- [Gradio](https://gradio.app/): Fast ML demo creation
|
| 447 |
+
|
| 448 |
+
### Citation
|
| 449 |
+
|
| 450 |
+
If you use this toolkit in your research, please cite:
|
| 451 |
+
|
| 452 |
+
```bibtex
|
| 453 |
+
@software{vit_auditing_toolkit_2024,
|
| 454 |
+
title={ViT Auditing Toolkit: Comprehensive Explainability for Vision Transformers},
|
| 455 |
+
author={dyra-12},
|
| 456 |
+
year={2024},
|
| 457 |
+
url={https://github.com/dyra-12/ViT-XAI-Dashboard}
|
| 458 |
+
}
|
| 459 |
+
```
|
| 460 |
+
|
| 461 |
+
---
|
| 462 |
+
|
| 463 |
+
## π Acknowledgments
|
| 464 |
+
|
| 465 |
+
- **Hugging Face** for providing pre-trained ViT models and the Transformers library
|
| 466 |
+
- **Captum Team** for the excellent interpretability library
|
| 467 |
+
- **Gradio Team** for the intuitive ML interface framework
|
| 468 |
+
- **PyTorch Community** for the robust deep learning ecosystem
|
| 469 |
+
- All contributors and users who provide feedback and improvements
|
| 470 |
+
|
| 471 |
+
---
|
| 472 |
+
|
| 473 |
+
## π§ Contact & Support
|
| 474 |
+
|
| 475 |
+
- **GitHub Issues**: [Report bugs or request features](https://github.com/dyra-12/ViT-XAI-Dashboard/issues)
|
| 476 |
+
- **Discussions**: [Ask questions or share ideas](https://github.com/dyra-12/ViT-XAI-Dashboard/discussions)
|
| 477 |
+
- **Email**: dyra12@example.com
|
| 478 |
+
|
| 479 |
+
---
|
| 480 |
+
|
| 481 |
+
## π Star History
|
| 482 |
+
|
| 483 |
+
If you find this project useful, please consider giving it a βοΈ on GitHub!
|
| 484 |
+
|
| 485 |
+
[](https://star-history.com/#dyra-12/ViT-XAI-Dashboard&Date)
|
| 486 |
+
|
| 487 |
+
---
|
| 488 |
+
|
| 489 |
+
<div align="center">
|
| 490 |
+
|
| 491 |
+
**Built with β€οΈ by the community**
|
| 492 |
+
|
| 493 |
+
[β¬ Back to Top](#-vit-auditing-toolkit)
|
| 494 |
+
|
| 495 |
+
</div>
|
docker-compose.yml
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version: '3.8'
|
| 2 |
+
|
| 3 |
+
services:
|
| 4 |
+
vit-auditing-toolkit:
|
| 5 |
+
build: .
|
| 6 |
+
container_name: vit-auditing-toolkit
|
| 7 |
+
ports:
|
| 8 |
+
- "7860:7860"
|
| 9 |
+
environment:
|
| 10 |
+
- GRADIO_SERVER_NAME=0.0.0.0
|
| 11 |
+
- GRADIO_SERVER_PORT=7860
|
| 12 |
+
volumes:
|
| 13 |
+
- ./models:/app/models # Cache downloaded models
|
| 14 |
+
- ./examples:/app/examples # Mount example images
|
| 15 |
+
restart: unless-stopped
|
| 16 |
+
healthcheck:
|
| 17 |
+
test: ["CMD", "curl", "-f", "http://localhost:7860/"]
|
| 18 |
+
interval: 30s
|
| 19 |
+
timeout: 10s
|
| 20 |
+
retries: 3
|
| 21 |
+
start_period: 60s
|