Spaces:
Sleeping
feat: Implementa estrutura completa de testes com pytest
Browse files- Adiciona framework de testes robusto com pytest
- Cria estrutura organizada: tests/unit/, tests/integration/, tests/data/
- Implementa fixtures reutilizáveis e padrões de teste
- Adiciona testes unitários para sistema de votação e processamento
- Cria testes de integração para análise de mercado completa
- Configura pytest.ini com marcadores e configurações otimizadas
- Adiciona Makefile com comandos automatizados para desenvolvimento
- Implementa configurações de qualidade: .pylintrc, .isort.cfg
- Atualiza requirements.txt com bibliotecas de teste
- Cria .gitignore abrangente para Python/ML/DevOps
- Adiciona scripts de análise de logs avançados
- Atualiza documentação com seção completa de testes
- Documenta comandos make e pytest para execução de testes
- .gitignore +567 -0
- .isort.cfg +141 -0
- .pylintrc +429 -0
- Makefile +296 -0
- README.md +66 -1
- pytest.ini +55 -0
- requirements.txt +9 -0
- scripts/log_analysis/README.md +219 -0
- scripts/log_analysis/advanced_log_analyzer.py +283 -0
- scripts/log_analysis/demo.py +257 -0
- scripts/log_analysis/realtime_monitor.py +253 -0
- scripts/log_analysis/setup.py +366 -0
- tests/README.md +404 -0
- tests/conftest.py +136 -0
- tests/integration/conftest.py +175 -0
- tests/integration/test_market_analysis_integration.py +356 -0
- tests/test_patterns.py +330 -0
- tests/unit/conftest.py +87 -0
- tests/unit/test_advanced_market_processing.py +251 -0
- tests/unit/test_voting_system.py +332 -0
|
@@ -0,0 +1,567 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / optimized / DLL files
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*$py.class
|
| 5 |
+
|
| 6 |
+
# C extensions
|
| 7 |
+
*.so
|
| 8 |
+
|
| 9 |
+
# Distribution / packaging
|
| 10 |
+
.Python
|
| 11 |
+
build/
|
| 12 |
+
develop-eggs/
|
| 13 |
+
dist/
|
| 14 |
+
downloads/
|
| 15 |
+
eggs/
|
| 16 |
+
.eggs/
|
| 17 |
+
lib/
|
| 18 |
+
lib64/
|
| 19 |
+
parts/
|
| 20 |
+
sdist/
|
| 21 |
+
var/
|
| 22 |
+
wheels/
|
| 23 |
+
share/python-wheels/
|
| 24 |
+
*.egg-info/
|
| 25 |
+
.installed.cfg
|
| 26 |
+
*.egg
|
| 27 |
+
MANIFEST
|
| 28 |
+
|
| 29 |
+
# PyInstaller
|
| 30 |
+
# Usually these files are written by a python script from a template
|
| 31 |
+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
| 32 |
+
*.manifest
|
| 33 |
+
*.spec
|
| 34 |
+
|
| 35 |
+
# Installer logs
|
| 36 |
+
pip-log.txt
|
| 37 |
+
pip-delete-this-directory.txt
|
| 38 |
+
|
| 39 |
+
# Unit test / coverage reports
|
| 40 |
+
htmlcov/
|
| 41 |
+
.tox/
|
| 42 |
+
.nox/
|
| 43 |
+
.coverage
|
| 44 |
+
.coverage.*
|
| 45 |
+
.cache
|
| 46 |
+
nosetests.xml
|
| 47 |
+
coverage.xml
|
| 48 |
+
*.cover
|
| 49 |
+
*.py,cover
|
| 50 |
+
.hypothesis/
|
| 51 |
+
.pytest_cache/
|
| 52 |
+
cover/
|
| 53 |
+
|
| 54 |
+
# Translations
|
| 55 |
+
*.mo
|
| 56 |
+
*.pot
|
| 57 |
+
|
| 58 |
+
# Django stuff:
|
| 59 |
+
*.log
|
| 60 |
+
local_settings.py
|
| 61 |
+
db.sqlite3
|
| 62 |
+
db.sqlite3-journal
|
| 63 |
+
|
| 64 |
+
# Flask stuff:
|
| 65 |
+
instance/
|
| 66 |
+
.webassets-cache
|
| 67 |
+
|
| 68 |
+
# Scrapy stuff:
|
| 69 |
+
.scrapy
|
| 70 |
+
|
| 71 |
+
# Sphinx documentation
|
| 72 |
+
docs/_build/
|
| 73 |
+
|
| 74 |
+
# PyBuilder
|
| 75 |
+
.pybuilder/
|
| 76 |
+
target/
|
| 77 |
+
|
| 78 |
+
# Jupyter Notebook
|
| 79 |
+
.ipynb_checkpoints
|
| 80 |
+
|
| 81 |
+
# IPython
|
| 82 |
+
profile_default/
|
| 83 |
+
ipython_config.py
|
| 84 |
+
|
| 85 |
+
# pyenv
|
| 86 |
+
# For a library or package, you might want to ignore these files since the code is
|
| 87 |
+
# intended to run in multiple environments; otherwise, check them in:
|
| 88 |
+
# .python-version
|
| 89 |
+
|
| 90 |
+
# pipenv
|
| 91 |
+
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
| 92 |
+
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
| 93 |
+
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
| 94 |
+
# install all needed dependencies.
|
| 95 |
+
#Pipfile.lock
|
| 96 |
+
|
| 97 |
+
# poetry
|
| 98 |
+
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
| 99 |
+
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
| 100 |
+
# commonly ignored for libraries.
|
| 101 |
+
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
| 102 |
+
#poetry.lock
|
| 103 |
+
|
| 104 |
+
# pdm
|
| 105 |
+
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
| 106 |
+
#pdm.lock
|
| 107 |
+
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
| 108 |
+
# in version control.
|
| 109 |
+
# https://pdm.fming.dev/#use-with-ide
|
| 110 |
+
.pdm.toml
|
| 111 |
+
|
| 112 |
+
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
| 113 |
+
__pypackages__/
|
| 114 |
+
|
| 115 |
+
# Celery stuff
|
| 116 |
+
celerybeat-schedule
|
| 117 |
+
celerybeat.pid
|
| 118 |
+
|
| 119 |
+
# SageMath parsed files
|
| 120 |
+
*.sage.py
|
| 121 |
+
|
| 122 |
+
# Environments
|
| 123 |
+
.env
|
| 124 |
+
.venv
|
| 125 |
+
env/
|
| 126 |
+
venv/
|
| 127 |
+
ENV/
|
| 128 |
+
env.bak/
|
| 129 |
+
venv.bak/
|
| 130 |
+
|
| 131 |
+
# Spyder project settings
|
| 132 |
+
.spyderproject
|
| 133 |
+
.spyproject
|
| 134 |
+
|
| 135 |
+
# Rope project settings
|
| 136 |
+
.ropeproject
|
| 137 |
+
|
| 138 |
+
# mkdocs documentation
|
| 139 |
+
/site
|
| 140 |
+
|
| 141 |
+
# mypy
|
| 142 |
+
.mypy_cache/
|
| 143 |
+
.dmypy.json
|
| 144 |
+
dmypy.json
|
| 145 |
+
|
| 146 |
+
# Pyre type checker
|
| 147 |
+
.pyre/
|
| 148 |
+
|
| 149 |
+
# pytype static type analyzer
|
| 150 |
+
.pytype/
|
| 151 |
+
|
| 152 |
+
# Cython debug symbols
|
| 153 |
+
cython_debug/
|
| 154 |
+
|
| 155 |
+
# PyCharm
|
| 156 |
+
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
| 157 |
+
# be added to the global gitignore or merged into this project gitignore. For a PyCharm
|
| 158 |
+
# project, it is recommended to ignore the entire .idea directory.
|
| 159 |
+
.idea/
|
| 160 |
+
|
| 161 |
+
# VS Code
|
| 162 |
+
.vscode/
|
| 163 |
+
!.vscode/settings.json
|
| 164 |
+
!.vscode/tasks.json
|
| 165 |
+
!.vscode/launch.json
|
| 166 |
+
!.vscode/extensions.json
|
| 167 |
+
!.vscode/*.code-snippets
|
| 168 |
+
|
| 169 |
+
# Local History for Visual Studio Code
|
| 170 |
+
.history/
|
| 171 |
+
|
| 172 |
+
# Built Visual Studio Code Extensions
|
| 173 |
+
*.vsix
|
| 174 |
+
|
| 175 |
+
# Project specific ignores
|
| 176 |
+
|
| 177 |
+
# Logs and databases
|
| 178 |
+
logs/
|
| 179 |
+
*.log
|
| 180 |
+
*.db
|
| 181 |
+
*.sqlite
|
| 182 |
+
*.sqlite3
|
| 183 |
+
application.db
|
| 184 |
+
|
| 185 |
+
# Cache directories
|
| 186 |
+
cache/
|
| 187 |
+
.cache/
|
| 188 |
+
ai_cache/
|
| 189 |
+
model_cache/
|
| 190 |
+
data_cache/
|
| 191 |
+
__pycache__/
|
| 192 |
+
|
| 193 |
+
# Temporary files
|
| 194 |
+
tmp/
|
| 195 |
+
temp/
|
| 196 |
+
*.tmp
|
| 197 |
+
*.temp
|
| 198 |
+
|
| 199 |
+
# OS generated files
|
| 200 |
+
.DS_Store
|
| 201 |
+
.DS_Store?
|
| 202 |
+
._*
|
| 203 |
+
.Spotlight-V100
|
| 204 |
+
.Trashes
|
| 205 |
+
ehthumbs.db
|
| 206 |
+
Thumbs.db
|
| 207 |
+
|
| 208 |
+
# IDE and editor files
|
| 209 |
+
*.swp
|
| 210 |
+
*.swo
|
| 211 |
+
*~
|
| 212 |
+
.vscode/
|
| 213 |
+
.idea/
|
| 214 |
+
*.sublime-project
|
| 215 |
+
*.sublime-workspace
|
| 216 |
+
|
| 217 |
+
# Test reports and coverage
|
| 218 |
+
reports/
|
| 219 |
+
htmlcov/
|
| 220 |
+
.coverage
|
| 221 |
+
coverage.xml
|
| 222 |
+
*.cover
|
| 223 |
+
junit.xml
|
| 224 |
+
test-results/
|
| 225 |
+
test-reports/
|
| 226 |
+
pytest-report.html
|
| 227 |
+
|
| 228 |
+
# Performance profiling
|
| 229 |
+
*.prof
|
| 230 |
+
profile_results.prof
|
| 231 |
+
*.pstats
|
| 232 |
+
|
| 233 |
+
# Security and secrets
|
| 234 |
+
.env
|
| 235 |
+
.env.local
|
| 236 |
+
.env.*.local
|
| 237 |
+
secrets.json
|
| 238 |
+
config/secrets/
|
| 239 |
+
*.key
|
| 240 |
+
*.pem
|
| 241 |
+
*.p12
|
| 242 |
+
*.pfx
|
| 243 |
+
|
| 244 |
+
# Model files (large binary files)
|
| 245 |
+
*.pkl
|
| 246 |
+
*.pickle
|
| 247 |
+
*.joblib
|
| 248 |
+
*.h5
|
| 249 |
+
*.hdf5
|
| 250 |
+
*.pt
|
| 251 |
+
*.pth
|
| 252 |
+
*.onnx
|
| 253 |
+
*.pb
|
| 254 |
+
*.tflite
|
| 255 |
+
models/
|
| 256 |
+
checkpoints/
|
| 257 |
+
weights/
|
| 258 |
+
|
| 259 |
+
# Data files (should be managed separately)
|
| 260 |
+
data/raw/
|
| 261 |
+
data/processed/
|
| 262 |
+
data/external/
|
| 263 |
+
*.csv
|
| 264 |
+
*.json
|
| 265 |
+
*.parquet
|
| 266 |
+
*.feather
|
| 267 |
+
*.xlsx
|
| 268 |
+
*.xls
|
| 269 |
+
|
| 270 |
+
# Jupyter notebook outputs
|
| 271 |
+
*.ipynb
|
| 272 |
+
.ipynb_checkpoints/
|
| 273 |
+
|
| 274 |
+
# Documentation builds
|
| 275 |
+
docs/_build/
|
| 276 |
+
docs/build/
|
| 277 |
+
site/
|
| 278 |
+
|
| 279 |
+
# Package builds
|
| 280 |
+
build/
|
| 281 |
+
dist/
|
| 282 |
+
*.egg-info/
|
| 283 |
+
|
| 284 |
+
# Virtual environments
|
| 285 |
+
venv/
|
| 286 |
+
env/
|
| 287 |
+
.venv/
|
| 288 |
+
.env/
|
| 289 |
+
conda-env/
|
| 290 |
+
|
| 291 |
+
# Docker
|
| 292 |
+
.dockerignore
|
| 293 |
+
Dockerfile.dev
|
| 294 |
+
docker-compose.override.yml
|
| 295 |
+
|
| 296 |
+
# Kubernetes
|
| 297 |
+
*.yaml.local
|
| 298 |
+
*.yml.local
|
| 299 |
+
kustomization.yaml
|
| 300 |
+
|
| 301 |
+
# Terraform
|
| 302 |
+
*.tfstate
|
| 303 |
+
*.tfstate.*
|
| 304 |
+
.terraform/
|
| 305 |
+
.terraform.lock.hcl
|
| 306 |
+
|
| 307 |
+
# AWS
|
| 308 |
+
.aws/
|
| 309 |
+
|
| 310 |
+
# Google Cloud
|
| 311 |
+
.gcloud/
|
| 312 |
+
service-account-key.json
|
| 313 |
+
|
| 314 |
+
# Azure
|
| 315 |
+
.azure/
|
| 316 |
+
|
| 317 |
+
# Monitoring and observability
|
| 318 |
+
prometheus/
|
| 319 |
+
grafana/data/
|
| 320 |
+
|
| 321 |
+
# CI/CD
|
| 322 |
+
.github/workflows/*.local.yml
|
| 323 |
+
.gitlab-ci.local.yml
|
| 324 |
+
|
| 325 |
+
# Local development
|
| 326 |
+
local/
|
| 327 |
+
dev/
|
| 328 |
+
sandbox/
|
| 329 |
+
experiments/
|
| 330 |
+
playground/
|
| 331 |
+
|
| 332 |
+
# Backup files
|
| 333 |
+
*.bak
|
| 334 |
+
*.backup
|
| 335 |
+
*.old
|
| 336 |
+
|
| 337 |
+
# Archive files
|
| 338 |
+
*.zip
|
| 339 |
+
*.tar
|
| 340 |
+
*.tar.gz
|
| 341 |
+
*.rar
|
| 342 |
+
*.7z
|
| 343 |
+
|
| 344 |
+
# Media files (unless specifically needed)
|
| 345 |
+
*.mp4
|
| 346 |
+
*.avi
|
| 347 |
+
*.mov
|
| 348 |
+
*.wmv
|
| 349 |
+
*.flv
|
| 350 |
+
*.mp3
|
| 351 |
+
*.wav
|
| 352 |
+
*.flac
|
| 353 |
+
*.jpg
|
| 354 |
+
*.jpeg
|
| 355 |
+
*.png
|
| 356 |
+
*.gif
|
| 357 |
+
*.bmp
|
| 358 |
+
*.tiff
|
| 359 |
+
*.svg
|
| 360 |
+
*.ico
|
| 361 |
+
|
| 362 |
+
# Office documents
|
| 363 |
+
*.doc
|
| 364 |
+
*.docx
|
| 365 |
+
*.ppt
|
| 366 |
+
*.pptx
|
| 367 |
+
*.xls
|
| 368 |
+
*.xlsx
|
| 369 |
+
*.pdf
|
| 370 |
+
|
| 371 |
+
# Compiled binaries
|
| 372 |
+
*.exe
|
| 373 |
+
*.dll
|
| 374 |
+
*.so
|
| 375 |
+
*.dylib
|
| 376 |
+
|
| 377 |
+
# Node.js (if using any JS tools)
|
| 378 |
+
node_modules/
|
| 379 |
+
npm-debug.log*
|
| 380 |
+
yarn-debug.log*
|
| 381 |
+
yarn-error.log*
|
| 382 |
+
package-lock.json
|
| 383 |
+
yarn.lock
|
| 384 |
+
|
| 385 |
+
# Python package management
|
| 386 |
+
Pipfile
|
| 387 |
+
Pipfile.lock
|
| 388 |
+
poetry.lock
|
| 389 |
+
requirements-frozen.txt
|
| 390 |
+
|
| 391 |
+
# Gradio specific
|
| 392 |
+
gradio_cached_examples/
|
| 393 |
+
flagged/
|
| 394 |
+
|
| 395 |
+
# Hugging Face specific
|
| 396 |
+
.huggingface/
|
| 397 |
+
transformers_cache/
|
| 398 |
+
hf_cache/
|
| 399 |
+
|
| 400 |
+
# MLflow
|
| 401 |
+
mlruns/
|
| 402 |
+
mlartifacts/
|
| 403 |
+
|
| 404 |
+
# Weights & Biases
|
| 405 |
+
wandb/
|
| 406 |
+
|
| 407 |
+
# TensorBoard
|
| 408 |
+
runs/
|
| 409 |
+
logs/tensorboard/
|
| 410 |
+
|
| 411 |
+
# CUDA
|
| 412 |
+
*.cu
|
| 413 |
+
*.cuh
|
| 414 |
+
|
| 415 |
+
# Profiling
|
| 416 |
+
*.nvprof
|
| 417 |
+
*.nvvp
|
| 418 |
+
|
| 419 |
+
# Local configuration overrides
|
| 420 |
+
config.local.py
|
| 421 |
+
settings.local.py
|
| 422 |
+
local_config.py
|
| 423 |
+
|
| 424 |
+
# Database migrations
|
| 425 |
+
migrations/versions/
|
| 426 |
+
alembic/versions/
|
| 427 |
+
|
| 428 |
+
# Redis dumps
|
| 429 |
+
dump.rdb
|
| 430 |
+
|
| 431 |
+
# Elasticsearch
|
| 432 |
+
data/elasticsearch/
|
| 433 |
+
|
| 434 |
+
# Logs from various services
|
| 435 |
+
nohup.out
|
| 436 |
+
celery.log
|
| 437 |
+
gunicorn.log
|
| 438 |
+
nginx.log
|
| 439 |
+
|
| 440 |
+
# Temporary directories created by tests
|
| 441 |
+
test_temp/
|
| 442 |
+
tmp_test/
|
| 443 |
+
.tmp_test/
|
| 444 |
+
|
| 445 |
+
# Coverage reports in different formats
|
| 446 |
+
.coverage.*
|
| 447 |
+
coverage/
|
| 448 |
+
|
| 449 |
+
# Pytest
|
| 450 |
+
.pytest_cache/
|
| 451 |
+
pytest.ini.local
|
| 452 |
+
|
| 453 |
+
# MyPy
|
| 454 |
+
.mypy_cache/
|
| 455 |
+
.dmypy.json
|
| 456 |
+
dmypy.json
|
| 457 |
+
|
| 458 |
+
# Bandit security reports
|
| 459 |
+
.bandit
|
| 460 |
+
bandit-report.json
|
| 461 |
+
|
| 462 |
+
# Safety vulnerability reports
|
| 463 |
+
safety-report.json
|
| 464 |
+
|
| 465 |
+
# Black code formatter
|
| 466 |
+
.black
|
| 467 |
+
|
| 468 |
+
# isort
|
| 469 |
+
.isort.cfg.local
|
| 470 |
+
|
| 471 |
+
# Flake8
|
| 472 |
+
.flake8.local
|
| 473 |
+
|
| 474 |
+
# Pylint
|
| 475 |
+
pylint-report.txt
|
| 476 |
+
.pylintrc.local
|
| 477 |
+
|
| 478 |
+
# Pre-commit
|
| 479 |
+
.pre-commit-config.yaml.local
|
| 480 |
+
|
| 481 |
+
# Local scripts and utilities
|
| 482 |
+
scripts/local/
|
| 483 |
+
utils/local/
|
| 484 |
+
tools/local/
|
| 485 |
+
|
| 486 |
+
# Experimental code
|
| 487 |
+
experiments/
|
| 488 |
+
prototypes/
|
| 489 |
+
scratch/
|
| 490 |
+
|
| 491 |
+
# Performance benchmarks
|
| 492 |
+
benchmarks/results/
|
| 493 |
+
perf/
|
| 494 |
+
|
| 495 |
+
# Load testing
|
| 496 |
+
load_test_results/
|
| 497 |
+
|
| 498 |
+
# API documentation
|
| 499 |
+
api_docs/
|
| 500 |
+
openapi.json
|
| 501 |
+
swagger.json
|
| 502 |
+
|
| 503 |
+
# Database backups
|
| 504 |
+
*.sql
|
| 505 |
+
*.dump
|
| 506 |
+
|
| 507 |
+
# Configuration templates
|
| 508 |
+
*.template
|
| 509 |
+
*.example
|
| 510 |
+
|
| 511 |
+
# Lock files for various tools
|
| 512 |
+
*.lock
|
| 513 |
+
|
| 514 |
+
# Temporary Python files
|
| 515 |
+
*.pyc
|
| 516 |
+
*.pyo
|
| 517 |
+
*.pyd
|
| 518 |
+
|
| 519 |
+
# Caches
|
| 520 |
+
.cache/
|
| 521 |
+
.pytest_cache/
|
| 522 |
+
.mypy_cache/
|
| 523 |
+
.ruff_cache/
|
| 524 |
+
|
| 525 |
+
# IDE specific files
|
| 526 |
+
.vscode/settings.json.local
|
| 527 |
+
.idea/workspace.xml
|
| 528 |
+
.idea/tasks.xml
|
| 529 |
+
.idea/usage.statistics.xml
|
| 530 |
+
.idea/dictionaries/
|
| 531 |
+
.idea/shelf/
|
| 532 |
+
|
| 533 |
+
# OS specific
|
| 534 |
+
# Windows
|
| 535 |
+
desktop.ini
|
| 536 |
+
$RECYCLE.BIN/
|
| 537 |
+
*.cab
|
| 538 |
+
*.msi
|
| 539 |
+
*.msix
|
| 540 |
+
*.msm
|
| 541 |
+
*.msp
|
| 542 |
+
*.lnk
|
| 543 |
+
|
| 544 |
+
# macOS
|
| 545 |
+
.AppleDouble
|
| 546 |
+
.LSOverride
|
| 547 |
+
Icon
|
| 548 |
+
._*
|
| 549 |
+
.DocumentRevisions-V100
|
| 550 |
+
.fseventsd
|
| 551 |
+
.Spotlight-V100
|
| 552 |
+
.TemporaryItems
|
| 553 |
+
.Trashes
|
| 554 |
+
.VolumeIcon.icns
|
| 555 |
+
.com.apple.timemachine.donotpresent
|
| 556 |
+
.AppleDB
|
| 557 |
+
.AppleDesktop
|
| 558 |
+
Network Trash Folder
|
| 559 |
+
Temporary Items
|
| 560 |
+
.apdisk
|
| 561 |
+
|
| 562 |
+
# Linux
|
| 563 |
+
*~
|
| 564 |
+
.fuse_hidden*
|
| 565 |
+
.directory
|
| 566 |
+
.Trash-*
|
| 567 |
+
.nfs*
|
|
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[settings]
|
| 2 |
+
# Make isort compatible with black
|
| 3 |
+
profile = black
|
| 4 |
+
line_length = 88
|
| 5 |
+
multi_line_output = 3
|
| 6 |
+
include_trailing_comma = True
|
| 7 |
+
force_grid_wrap = 0
|
| 8 |
+
use_parentheses = True
|
| 9 |
+
ensure_newline_before_comments = True
|
| 10 |
+
|
| 11 |
+
# Import sections
|
| 12 |
+
sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER
|
| 13 |
+
default_section = THIRDPARTY
|
| 14 |
+
known_first_party = src
|
| 15 |
+
known_local_folder = tests
|
| 16 |
+
|
| 17 |
+
# Skip files
|
| 18 |
+
skip = __pycache__,venv,env,.env,.venv,migrations,node_modules,.git
|
| 19 |
+
skip_glob = */migrations/*,*/__pycache__/*,*/venv/*,*/env/*,*/.env/*,*/.venv/*
|
| 20 |
+
|
| 21 |
+
# Force imports to be sorted within their section
|
| 22 |
+
force_sort_within_sections = True
|
| 23 |
+
|
| 24 |
+
# Show diff when check fails
|
| 25 |
+
show_diff = True
|
| 26 |
+
|
| 27 |
+
# Check only, don't modify files (useful for CI)
|
| 28 |
+
# check_only = True
|
| 29 |
+
|
| 30 |
+
# Treat comments as code
|
| 31 |
+
treat_comments_as_code = True
|
| 32 |
+
|
| 33 |
+
# Force single line imports for specific modules
|
| 34 |
+
force_single_line = True
|
| 35 |
+
single_line_exclusions = typing,collections.abc
|
| 36 |
+
|
| 37 |
+
# Import headings
|
| 38 |
+
import_heading_future = Future imports
|
| 39 |
+
import_heading_stdlib = Standard library imports
|
| 40 |
+
import_heading_thirdparty = Third-party imports
|
| 41 |
+
import_heading_firstparty = Local application imports
|
| 42 |
+
import_heading_localfolder = Local folder imports
|
| 43 |
+
|
| 44 |
+
# Balanced wrapping
|
| 45 |
+
balanced_wrapping = True
|
| 46 |
+
|
| 47 |
+
# Honor noqa comments
|
| 48 |
+
honor_noqa = True
|
| 49 |
+
|
| 50 |
+
# Atomic imports (prevent partial imports during interruption)
|
| 51 |
+
atomic = True
|
| 52 |
+
|
| 53 |
+
# Remove redundant aliases
|
| 54 |
+
remove_redundant_aliases = True
|
| 55 |
+
|
| 56 |
+
# Float to top (move imports to top of file)
|
| 57 |
+
float_to_top = True
|
| 58 |
+
|
| 59 |
+
# Filter files
|
| 60 |
+
filter_files = True
|
| 61 |
+
|
| 62 |
+
# Verbose output
|
| 63 |
+
verbose = False
|
| 64 |
+
|
| 65 |
+
# Quiet mode
|
| 66 |
+
quiet = False
|
| 67 |
+
|
| 68 |
+
# Force adds
|
| 69 |
+
force_adds = True
|
| 70 |
+
|
| 71 |
+
# Combine as imports
|
| 72 |
+
combine_as_imports = True
|
| 73 |
+
|
| 74 |
+
# Combine star imports
|
| 75 |
+
combine_star = True
|
| 76 |
+
|
| 77 |
+
# Order by type
|
| 78 |
+
order_by_type = True
|
| 79 |
+
|
| 80 |
+
# Group by package
|
| 81 |
+
group_by_package = True
|
| 82 |
+
|
| 83 |
+
# Reverse relative imports
|
| 84 |
+
reverse_relative = True
|
| 85 |
+
|
| 86 |
+
# Add imports
|
| 87 |
+
add_imports = from __future__ import annotations
|
| 88 |
+
|
| 89 |
+
# Known standard library modules (Python 3.8+)
|
| 90 |
+
known_standard_library =
|
| 91 |
+
abc,aifc,argparse,array,ast,asynchat,asyncio,asyncore,atexit,
|
| 92 |
+
audioop,base64,bdb,binascii,binhex,bisect,builtins,bz2,
|
| 93 |
+
calendar,cgi,cgitb,chunk,cmd,code,codecs,codeop,collections,
|
| 94 |
+
colorsys,compileall,concurrent,configparser,contextlib,copy,
|
| 95 |
+
copyreg,cProfile,crypt,csv,ctypes,curses,datetime,dbm,
|
| 96 |
+
decimal,difflib,dis,distutils,doctest,email,encodings,
|
| 97 |
+
ensurepip,enum,errno,faulthandler,fcntl,filecmp,fileinput,
|
| 98 |
+
fnmatch,formatter,fractions,ftplib,functools,gc,getopt,
|
| 99 |
+
getpass,gettext,glob,grp,gzip,hashlib,heapq,hmac,html,
|
| 100 |
+
http,imaplib,imghdr,imp,importlib,inspect,io,ipaddress,
|
| 101 |
+
itertools,json,keyword,lib2to3,linecache,locale,logging,
|
| 102 |
+
lzma,mailbox,mailcap,marshal,math,mimetypes,mmap,modulefinder,
|
| 103 |
+
msilib,msvcrt,multiprocessing,netrc,nntplib,numbers,operator,
|
| 104 |
+
optparse,os,ossaudiodev,parser,pathlib,pdb,pickle,pickletools,
|
| 105 |
+
pipes,pkgutil,platform,plistlib,poplib,posix,pprint,profile,
|
| 106 |
+
pstats,pty,pwd,py_compile,pyclbr,pydoc,queue,quopri,random,
|
| 107 |
+
re,readline,reprlib,resource,rlcompleter,runpy,sched,secrets,
|
| 108 |
+
select,selectors,shelve,shlex,shutil,signal,site,smtpd,
|
| 109 |
+
smtplib,sndhdr,socket,socketserver,sqlite3,ssl,stat,statistics,
|
| 110 |
+
string,stringprep,struct,subprocess,sunau,symbol,symtable,
|
| 111 |
+
sys,sysconfig,syslog,tabnanny,tarfile,telnetlib,tempfile,
|
| 112 |
+
termios,test,textwrap,threading,time,timeit,tkinter,token,
|
| 113 |
+
tokenize,trace,traceback,tracemalloc,tty,turtle,turtledemo,
|
| 114 |
+
types,typing,unicodedata,unittest,urllib,uu,uuid,venv,
|
| 115 |
+
warnings,wave,weakref,webbrowser,winreg,winsound,wsgiref,
|
| 116 |
+
xdrlib,xml,xmlrpc,zipapp,zipfile,zipimport,zlib
|
| 117 |
+
|
| 118 |
+
# Known third party modules
|
| 119 |
+
known_third_party =
|
| 120 |
+
numpy,pandas,scipy,matplotlib,seaborn,plotly,sklearn,
|
| 121 |
+
torch,tensorflow,keras,transformers,gradio,streamlit,
|
| 122 |
+
flask,django,fastapi,requests,aiohttp,sqlalchemy,
|
| 123 |
+
pytest,mock,factory_boy,faker,coverage,
|
| 124 |
+
click,typer,rich,tqdm,joblib,psutil,
|
| 125 |
+
yfinance,ta,xgboost,lightgbm,catboost,
|
| 126 |
+
pillow,opencv,imageio,scikit_image,
|
| 127 |
+
jupyter,ipython,notebook,
|
| 128 |
+
pydantic,marshmallow,cerberus,
|
| 129 |
+
redis,celery,rabbitmq,
|
| 130 |
+
boto3,azure,gcp,
|
| 131 |
+
docker,kubernetes,
|
| 132 |
+
prometheus_client,grafana,
|
| 133 |
+
sentry_sdk,rollbar,
|
| 134 |
+
stripe,paypal,
|
| 135 |
+
twilio,sendgrid,
|
| 136 |
+
beautifulsoup4,scrapy,selenium,
|
| 137 |
+
nltk,spacy,gensim,
|
| 138 |
+
networkx,igraph,
|
| 139 |
+
dask,ray,
|
| 140 |
+
mlflow,wandb,tensorboard,
|
| 141 |
+
black,flake8,mypy,pylint,bandit,safety
|
|
@@ -0,0 +1,429 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[MASTER]
|
| 2 |
+
# Specify a configuration file.
|
| 3 |
+
# rcfile=
|
| 4 |
+
|
| 5 |
+
# Python code to execute, usually for sys.path manipulation such as
|
| 6 |
+
# pygtk.require().
|
| 7 |
+
init-hook='import sys; sys.path.append("src")'
|
| 8 |
+
|
| 9 |
+
# Add files or directories to the blacklist. They should be base names, not
|
| 10 |
+
# paths.
|
| 11 |
+
ignore=CVS,.git,__pycache__,.pytest_cache,.mypy_cache,venv,env,.env
|
| 12 |
+
|
| 13 |
+
# Add files or directories matching the regex patterns to the blacklist. The
|
| 14 |
+
# regex matches against base names, not paths.
|
| 15 |
+
ignore-patterns=
|
| 16 |
+
|
| 17 |
+
# Pickle collected data for later comparisons.
|
| 18 |
+
persistent=yes
|
| 19 |
+
|
| 20 |
+
# List of plugins (as comma separated values of python modules names) to load,
|
| 21 |
+
# usually to register additional checkers.
|
| 22 |
+
load-plugins=
|
| 23 |
+
|
| 24 |
+
# Use multiple processes to speed up Pylint.
|
| 25 |
+
jobs=1
|
| 26 |
+
|
| 27 |
+
# Allow loading of arbitrary C extensions. Extensions are imported into the
|
| 28 |
+
# active Python interpreter and may run arbitrary code.
|
| 29 |
+
unsafe-load-any-extension=no
|
| 30 |
+
|
| 31 |
+
# A comma-separated list of package or module names from where C extensions may
|
| 32 |
+
# be loaded. Extensions are loading into the active Python interpreter and may
|
| 33 |
+
# run arbitrary code
|
| 34 |
+
extension-pkg-whitelist=
|
| 35 |
+
|
| 36 |
+
[MESSAGES CONTROL]
|
| 37 |
+
# Only show warnings with the listed confidence levels. Leave empty to show
|
| 38 |
+
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
|
| 39 |
+
confidence=
|
| 40 |
+
|
| 41 |
+
# Enable the message, report, category or checker with the given id(s). You can
|
| 42 |
+
# either give multiple identifier separated by comma (,) or put this option
|
| 43 |
+
# multiple time (only on the command line, not in the configuration file where
|
| 44 |
+
# it should appear only once).
|
| 45 |
+
enable=
|
| 46 |
+
|
| 47 |
+
# Disable the message, report, category or checker with the given id(s). You
|
| 48 |
+
# can either give multiple identifiers separated by comma (,) or put this
|
| 49 |
+
# option multiple times (only on the command line, not in the configuration
|
| 50 |
+
# file where it should appear only once).
|
| 51 |
+
disable=
|
| 52 |
+
C0103, # Invalid name (doesn't conform to naming convention)
|
| 53 |
+
C0111, # Missing docstring
|
| 54 |
+
C0301, # Line too long (handled by black)
|
| 55 |
+
C0330, # Wrong hanging indentation (handled by black)
|
| 56 |
+
W0613, # Unused argument
|
| 57 |
+
W0622, # Redefining built-in
|
| 58 |
+
R0903, # Too few public methods
|
| 59 |
+
R0913, # Too many arguments
|
| 60 |
+
R0914, # Too many local variables
|
| 61 |
+
R0915, # Too many statements
|
| 62 |
+
R0801, # Similar lines in files
|
| 63 |
+
E1101, # Instance has no member (often false positive with dynamic attrs)
|
| 64 |
+
W0212, # Access to a protected member
|
| 65 |
+
C0415, # Import outside toplevel
|
| 66 |
+
W0511, # TODO/FIXME comments
|
| 67 |
+
R1705, # Unnecessary "else" after "return"
|
| 68 |
+
C0114, # Missing module docstring
|
| 69 |
+
C0115, # Missing class docstring
|
| 70 |
+
C0116, # Missing function docstring
|
| 71 |
+
W1203, # Use lazy % formatting in logging functions
|
| 72 |
+
R0902, # Too many instance attributes
|
| 73 |
+
R0904, # Too many public methods
|
| 74 |
+
R0912, # Too many branches
|
| 75 |
+
W0703, # Catching too general exception
|
| 76 |
+
W0621, # Redefining name from outer scope
|
| 77 |
+
C0209, # Consider using f-string
|
| 78 |
+
R1720, # Unnecessary "else" after "raise"
|
| 79 |
+
R1721, # Unnecessary use of a comprehension
|
| 80 |
+
W0107, # Unnecessary pass statement
|
| 81 |
+
R0916, # Too many boolean expressions
|
| 82 |
+
C0302, # Too many lines in module
|
| 83 |
+
R0911, # Too many return statements
|
| 84 |
+
W0201, # Attribute defined outside __init__
|
| 85 |
+
W0102, # Dangerous default value as argument
|
| 86 |
+
C0412, # Imports from package are not grouped
|
| 87 |
+
W0640, # Cell variable defined in loop
|
| 88 |
+
R1714, # Consider merging these comparisons
|
| 89 |
+
R1716, # Simplify chained comparison
|
| 90 |
+
R1717, # Consider using a dictionary comprehension
|
| 91 |
+
R1718, # Consider using a set comprehension
|
| 92 |
+
R1719, # The if expression can be replaced with 'bool(test)'
|
| 93 |
+
R1722, # Consider using sys.exit()
|
| 94 |
+
R1723, # Unnecessary "elif" after "break"
|
| 95 |
+
R1724, # Unnecessary "elif" after "continue"
|
| 96 |
+
R1725, # Consider using Python 3 style super()
|
| 97 |
+
R1726, # Tuple passed to format is not used
|
| 98 |
+
R1727, # Condition will always evaluate to the same value
|
| 99 |
+
R1728, # Consider using a generator instead 'list(generator_exp)'
|
| 100 |
+
R1729, # Use a {} to create a dictionary
|
| 101 |
+
R1730, # Consider using 'with' for resource-allocating operations
|
| 102 |
+
R1731, # Consider merging these isinstance calls
|
| 103 |
+
R1732, # Consider using 'with' for resource-allocating operations
|
| 104 |
+
R1733, # Unnecessary dictionary index lookup
|
| 105 |
+
R1734, # Use [] instead of list()
|
| 106 |
+
R1735, # Use {} instead of dict()
|
| 107 |
+
|
| 108 |
+
[REPORTS]
|
| 109 |
+
# Set the output format. Available formats are text, parseable, colorized, msvs
|
| 110 |
+
# (visual studio) and html. You can also give a reporter class, eg
|
| 111 |
+
# mypackage.mymodule.MyReporterClass.
|
| 112 |
+
output-format=text
|
| 113 |
+
|
| 114 |
+
# Put messages in a separate file for each module / package specified on the
|
| 115 |
+
# command line instead of printing them on stdout. Reports (if any) will be
|
| 116 |
+
# written to a file name "pylint_global.[txt|html]".
|
| 117 |
+
files-output=no
|
| 118 |
+
|
| 119 |
+
# Tells whether to display a full report or only the messages
|
| 120 |
+
reports=no
|
| 121 |
+
|
| 122 |
+
# Python expression which should return a note less than 10 (10 is the highest
|
| 123 |
+
# note). You have access to the variables errors warning, statement which
|
| 124 |
+
# respectively contain the number of errors / warnings messages and the total
|
| 125 |
+
# number of statements analyzed. This is used by the global evaluation report
|
| 126 |
+
# (RP0004).
|
| 127 |
+
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
|
| 128 |
+
|
| 129 |
+
# Template used to display messages. This is a python new-style format string
|
| 130 |
+
# used to format the message information. See doc for all details
|
| 131 |
+
msg-template={path}:{line}:{column}: {msg_id}: {msg} ({symbol})
|
| 132 |
+
|
| 133 |
+
[BASIC]
|
| 134 |
+
# Good variable names which should always be accepted, separated by a comma
|
| 135 |
+
good-names=i,j,k,ex,Run,_,id,db,df,ax,fig,lr,x,y,z,f,g,h,n,m,p,q,r,s,t,u,v,w
|
| 136 |
+
|
| 137 |
+
# Bad variable names which should always be refused, separated by a comma
|
| 138 |
+
bad-names=foo,bar,baz,toto,tutu,tata
|
| 139 |
+
|
| 140 |
+
# Colon-delimited sets of names that determine each other's naming style when
|
| 141 |
+
# the name regexes allow several styles.
|
| 142 |
+
name-group=
|
| 143 |
+
|
| 144 |
+
# Include a hint for the correct naming format with invalid-name
|
| 145 |
+
include-naming-hint=no
|
| 146 |
+
|
| 147 |
+
# List of decorators that produce properties, such as abc.abstractproperty. Add
|
| 148 |
+
# to this list to register other decorators that produce valid properties.
|
| 149 |
+
property-classes=abc.abstractproperty
|
| 150 |
+
|
| 151 |
+
# Regular expression matching correct function names
|
| 152 |
+
function-rgx=[a-z_][a-z0-9_]{2,30}$
|
| 153 |
+
|
| 154 |
+
# Naming hint for function names
|
| 155 |
+
function-name-hint=[a-z_][a-z0-9_]{2,30}$
|
| 156 |
+
|
| 157 |
+
# Regular expression matching correct variable names
|
| 158 |
+
variable-rgx=[a-z_][a-z0-9_]{2,30}$
|
| 159 |
+
|
| 160 |
+
# Naming hint for variable names
|
| 161 |
+
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
|
| 162 |
+
|
| 163 |
+
# Regular expression matching correct constant names
|
| 164 |
+
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
| 165 |
+
|
| 166 |
+
# Naming hint for constant names
|
| 167 |
+
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
|
| 168 |
+
|
| 169 |
+
# Regular expression matching correct attribute names
|
| 170 |
+
attr-rgx=[a-z_][a-z0-9_]{2,30}$
|
| 171 |
+
|
| 172 |
+
# Naming hint for attribute names
|
| 173 |
+
attr-name-hint=[a-z_][a-z0-9_]{2,30}$
|
| 174 |
+
|
| 175 |
+
# Regular expression matching correct argument names
|
| 176 |
+
argument-rgx=[a-z_][a-z0-9_]{2,30}$
|
| 177 |
+
|
| 178 |
+
# Naming hint for argument names
|
| 179 |
+
argument-name-hint=[a-z_][a-z0-9_]{2,30}$
|
| 180 |
+
|
| 181 |
+
# Regular expression matching correct class attribute names
|
| 182 |
+
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
|
| 183 |
+
|
| 184 |
+
# Naming hint for class attribute names
|
| 185 |
+
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
|
| 186 |
+
|
| 187 |
+
# Regular expression matching correct inline iteration names
|
| 188 |
+
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
|
| 189 |
+
|
| 190 |
+
# Naming hint for inline iteration names
|
| 191 |
+
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
|
| 192 |
+
|
| 193 |
+
# Regular expression matching correct class names
|
| 194 |
+
class-rgx=[A-Z_][a-zA-Z0-9]+$
|
| 195 |
+
|
| 196 |
+
# Naming hint for class names
|
| 197 |
+
class-name-hint=[A-Z_][a-zA-Z0-9]+$
|
| 198 |
+
|
| 199 |
+
# Regular expression matching correct module names
|
| 200 |
+
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
| 201 |
+
|
| 202 |
+
# Naming hint for module names
|
| 203 |
+
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
|
| 204 |
+
|
| 205 |
+
# Regular expression matching correct method names
|
| 206 |
+
method-rgx=[a-z_][a-z0-9_]{2,30}$
|
| 207 |
+
|
| 208 |
+
# Naming hint for method names
|
| 209 |
+
method-name-hint=[a-z_][a-z0-9_]{2,30}$
|
| 210 |
+
|
| 211 |
+
# Regular expression which should only match function or class names that do
|
| 212 |
+
# not require a docstring.
|
| 213 |
+
no-docstring-rgx=^_
|
| 214 |
+
|
| 215 |
+
# Minimum line length for functions/classes that require docstrings, shorter
|
| 216 |
+
# ones are exempt.
|
| 217 |
+
docstring-min-length=-1
|
| 218 |
+
|
| 219 |
+
[ELIF]
|
| 220 |
+
# Maximum number of nested blocks for function / method body
|
| 221 |
+
max-nested-blocks=5
|
| 222 |
+
|
| 223 |
+
[FORMAT]
|
| 224 |
+
# Maximum number of characters on a single line.
|
| 225 |
+
max-line-length=88
|
| 226 |
+
|
| 227 |
+
# Regexp for a line that is allowed to be longer than the limit.
|
| 228 |
+
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
|
| 229 |
+
|
| 230 |
+
# Allow the body of an if to be on the same line as the test if there is no
|
| 231 |
+
# else.
|
| 232 |
+
single-line-if-stmt=no
|
| 233 |
+
|
| 234 |
+
# List of optional constructs for which whitespace checking is disabled. `dict-
|
| 235 |
+
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
|
| 236 |
+
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
|
| 237 |
+
no-space-check=trailing-comma,
|
| 238 |
+
dict-separator
|
| 239 |
+
|
| 240 |
+
# Maximum number of lines in a module
|
| 241 |
+
max-module-lines=1000
|
| 242 |
+
|
| 243 |
+
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
|
| 244 |
+
# tab).
|
| 245 |
+
indent-string=' '
|
| 246 |
+
|
| 247 |
+
# Number of spaces of indent required inside a hanging or continued line.
|
| 248 |
+
indent-after-paren=4
|
| 249 |
+
|
| 250 |
+
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
|
| 251 |
+
expected-line-ending-format=
|
| 252 |
+
|
| 253 |
+
[LOGGING]
|
| 254 |
+
# Logging modules to check that the string format arguments are in logging
|
| 255 |
+
# function parameter format
|
| 256 |
+
logging-modules=logging
|
| 257 |
+
|
| 258 |
+
[MISCELLANEOUS]
|
| 259 |
+
# List of note tags to take in consideration, separated by a comma.
|
| 260 |
+
notes=FIXME,
|
| 261 |
+
XXX,
|
| 262 |
+
TODO
|
| 263 |
+
|
| 264 |
+
[SIMILARITIES]
|
| 265 |
+
# Minimum lines number of a similarity.
|
| 266 |
+
min-similarity-lines=4
|
| 267 |
+
|
| 268 |
+
# Ignore comments when computing similarities.
|
| 269 |
+
ignore-comments=yes
|
| 270 |
+
|
| 271 |
+
# Ignore docstrings when computing similarities.
|
| 272 |
+
ignore-docstrings=yes
|
| 273 |
+
|
| 274 |
+
# Ignore imports when computing similarities.
|
| 275 |
+
ignore-imports=no
|
| 276 |
+
|
| 277 |
+
[SPELLING]
|
| 278 |
+
# Spelling dictionary name. Available dictionaries: none. To make it working
|
| 279 |
+
# install python-enchant package.
|
| 280 |
+
spelling-dict=
|
| 281 |
+
|
| 282 |
+
# List of comma separated words that should not be checked.
|
| 283 |
+
spelling-ignore-words=
|
| 284 |
+
|
| 285 |
+
# A path to a file that contains private dictionary; one word per line.
|
| 286 |
+
spelling-private-dict-file=
|
| 287 |
+
|
| 288 |
+
# Tells whether to store unknown words to indicated private dictionary in
|
| 289 |
+
# --spelling-private-dict-file option instead of raising a message.
|
| 290 |
+
spelling-store-unknown-words=no
|
| 291 |
+
|
| 292 |
+
[TYPECHECK]
|
| 293 |
+
# List of decorators that produce context managers, such as
|
| 294 |
+
# contextlib.contextmanager. Add to this list to register other decorators that
|
| 295 |
+
# produce valid context managers.
|
| 296 |
+
contextmanager-decorators=contextlib.contextmanager
|
| 297 |
+
|
| 298 |
+
# List of members which are set dynamically and miss from static analysis; and
|
| 299 |
+
# thus shouldn't trigger E1101 when accessed. Python regular expressions are
|
| 300 |
+
# accepted.
|
| 301 |
+
generated-members=
|
| 302 |
+
|
| 303 |
+
# Tells whether missing members accessed in mixin class should be ignored. A
|
| 304 |
+
# mixin class is detected if its name ends with "mixin" (case insensitive).
|
| 305 |
+
ignore-mixin-members=yes
|
| 306 |
+
|
| 307 |
+
# This flag controls whether pylint should warn about no-member and similar
|
| 308 |
+
# checks whenever an opaque object is returned when inferring. The inference
|
| 309 |
+
# can return multiple potential results, and some of them can be the same as
|
| 310 |
+
# the inference done on each property of the object.
|
| 311 |
+
ignore-on-opaque-inference=yes
|
| 312 |
+
|
| 313 |
+
# List of class names for which member attributes should not be checked (useful
|
| 314 |
+
# for classes with dynamically set attributes). This supports the use of
|
| 315 |
+
# qualified names.
|
| 316 |
+
ignored-classes=optparse.Values,thread._local,_thread._local
|
| 317 |
+
|
| 318 |
+
# List of module names for which member attributes should not be checked
|
| 319 |
+
# (useful for modules/projects where namespaces are manipulated during runtime
|
| 320 |
+
# and thus existing member attributes cannot be deduced by static analysis. It
|
| 321 |
+
# supports qualified module names, as well as Unix pattern matching.
|
| 322 |
+
ignored-modules=
|
| 323 |
+
|
| 324 |
+
# Show a hint with the correct function signature when a wrong number of
|
| 325 |
+
# arguments is given to a function call.
|
| 326 |
+
show-hint-for-missing-members=yes
|
| 327 |
+
|
| 328 |
+
[VARIABLES]
|
| 329 |
+
# Tells whether we should check for unused import in __init__ files.
|
| 330 |
+
init-import=no
|
| 331 |
+
|
| 332 |
+
# A regular expression matching the name of dummy variables (i.e. expectedly
|
| 333 |
+
# not used).
|
| 334 |
+
dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_
|
| 335 |
+
|
| 336 |
+
# List of additional names supposed to be defined in builtins. Remember that
|
| 337 |
+
# you should avoid to define new builtins when possible.
|
| 338 |
+
additional-builtins=
|
| 339 |
+
|
| 340 |
+
# List of strings which can identify a callback function by name. A callback
|
| 341 |
+
# name must start or end with one of those strings.
|
| 342 |
+
callbacks=cb_,
|
| 343 |
+
_cb
|
| 344 |
+
|
| 345 |
+
# List of qualified module names which can have objects that can redefine
|
| 346 |
+
# builtins.
|
| 347 |
+
redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io
|
| 348 |
+
|
| 349 |
+
[CLASSES]
|
| 350 |
+
# List of method names used to declare (i.e. assign) instance attributes.
|
| 351 |
+
defining-attr-methods=__init__,
|
| 352 |
+
__new__,
|
| 353 |
+
setUp
|
| 354 |
+
|
| 355 |
+
# List of member names, which should be excluded from the protected access
|
| 356 |
+
# warning.
|
| 357 |
+
exclude-protected=_asdict,
|
| 358 |
+
_fields,
|
| 359 |
+
_replace,
|
| 360 |
+
_source,
|
| 361 |
+
_make
|
| 362 |
+
|
| 363 |
+
# List of valid names for the first argument in a class method.
|
| 364 |
+
valid-classmethod-first-arg=cls
|
| 365 |
+
|
| 366 |
+
# List of valid names for the first argument in a metaclass class method.
|
| 367 |
+
valid-metaclass-classmethod-first-arg=mcs
|
| 368 |
+
|
| 369 |
+
[DESIGN]
|
| 370 |
+
# Maximum number of arguments for function / method
|
| 371 |
+
max-args=5
|
| 372 |
+
|
| 373 |
+
# Argument names that match this expression will be ignored. Default to name
|
| 374 |
+
# with leading underscore
|
| 375 |
+
ignored-argument-names=_.*
|
| 376 |
+
|
| 377 |
+
# Maximum number of locals for function / method body
|
| 378 |
+
max-locals=15
|
| 379 |
+
|
| 380 |
+
# Maximum number of return / yield for function / method body
|
| 381 |
+
max-returns=6
|
| 382 |
+
|
| 383 |
+
# Maximum number of branch for function / method body
|
| 384 |
+
max-branches=12
|
| 385 |
+
|
| 386 |
+
# Maximum number of statements in function / method body
|
| 387 |
+
max-statements=50
|
| 388 |
+
|
| 389 |
+
# Maximum number of parents for a class (see R0901).
|
| 390 |
+
max-parents=7
|
| 391 |
+
|
| 392 |
+
# Maximum number of attributes for a class (see R0902).
|
| 393 |
+
max-attributes=7
|
| 394 |
+
|
| 395 |
+
# Minimum number of public methods for a class (see R0903).
|
| 396 |
+
min-public-methods=2
|
| 397 |
+
|
| 398 |
+
# Maximum number of public methods for a class (see R0904).
|
| 399 |
+
max-public-methods=20
|
| 400 |
+
|
| 401 |
+
# Maximum number of boolean expressions in a if statement
|
| 402 |
+
max-bool-expr=5
|
| 403 |
+
|
| 404 |
+
[IMPORTS]
|
| 405 |
+
# Deprecated modules which should not be used, separated by a comma
|
| 406 |
+
deprecated-modules=optparse,tkinter.tix
|
| 407 |
+
|
| 408 |
+
# Create a graph of every (i.e. internal and external) dependencies in the
|
| 409 |
+
# given file (report RP0402 must not be disabled)
|
| 410 |
+
import-graph=
|
| 411 |
+
|
| 412 |
+
# Create a graph of external dependencies in the given file (report RP0402 must
|
| 413 |
+
# not be disabled)
|
| 414 |
+
ext-import-graph=
|
| 415 |
+
|
| 416 |
+
# Create a graph of internal dependencies in the given file (report RP0402 must
|
| 417 |
+
# not be disabled)
|
| 418 |
+
int-import-graph=
|
| 419 |
+
|
| 420 |
+
# Force import order to follow PEP8 conventions.
|
| 421 |
+
known-standard-library=
|
| 422 |
+
|
| 423 |
+
# Force import order to follow PEP8 conventions.
|
| 424 |
+
known-third-party=enchant
|
| 425 |
+
|
| 426 |
+
[EXCEPTIONS]
|
| 427 |
+
# Exceptions that will emit a warning when being caught. Defaults to
|
| 428 |
+
# "Exception"
|
| 429 |
+
overgeneral-exceptions=Exception
|
|
@@ -0,0 +1,296 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Makefile for Hugging Face Spaces Project
|
| 2 |
+
# Provides convenient commands for development, testing, and deployment
|
| 3 |
+
|
| 4 |
+
.PHONY: help install test test-unit test-integration test-coverage test-fast test-slow clean lint format type-check security-check build run dev docs
|
| 5 |
+
|
| 6 |
+
# Default target
|
| 7 |
+
help:
|
| 8 |
+
@echo "Available commands:"
|
| 9 |
+
@echo " install - Install all dependencies"
|
| 10 |
+
@echo " test - Run all tests"
|
| 11 |
+
@echo " test-unit - Run unit tests only"
|
| 12 |
+
@echo " test-integration - Run integration tests only"
|
| 13 |
+
@echo " test-coverage - Run tests with coverage report"
|
| 14 |
+
@echo " test-fast - Run fast tests only (exclude slow tests)"
|
| 15 |
+
@echo " test-slow - Run slow tests only"
|
| 16 |
+
@echo " test-parallel - Run tests in parallel"
|
| 17 |
+
@echo " lint - Run code linting"
|
| 18 |
+
@echo " format - Format code with black"
|
| 19 |
+
@echo " type-check - Run type checking with mypy"
|
| 20 |
+
@echo " security-check - Run security checks"
|
| 21 |
+
@echo " clean - Clean up generated files"
|
| 22 |
+
@echo " build - Build the project"
|
| 23 |
+
@echo " run - Run the application"
|
| 24 |
+
@echo " dev - Run in development mode"
|
| 25 |
+
@echo " docs - Generate documentation"
|
| 26 |
+
|
| 27 |
+
# Installation
|
| 28 |
+
install:
|
| 29 |
+
@echo "Installing dependencies..."
|
| 30 |
+
pip install -r requirements.txt
|
| 31 |
+
pip install -e .
|
| 32 |
+
|
| 33 |
+
# Testing commands
|
| 34 |
+
test:
|
| 35 |
+
@echo "Running all tests..."
|
| 36 |
+
pytest -v
|
| 37 |
+
|
| 38 |
+
test-unit:
|
| 39 |
+
@echo "Running unit tests..."
|
| 40 |
+
pytest tests/unit/ -v -m "unit or not integration"
|
| 41 |
+
|
| 42 |
+
test-integration:
|
| 43 |
+
@echo "Running integration tests..."
|
| 44 |
+
pytest tests/integration/ -v -m integration --runintegration
|
| 45 |
+
|
| 46 |
+
test-coverage:
|
| 47 |
+
@echo "Running tests with coverage..."
|
| 48 |
+
pytest --cov=src --cov=. --cov-report=html --cov-report=term-missing --cov-report=xml
|
| 49 |
+
|
| 50 |
+
test-fast:
|
| 51 |
+
@echo "Running fast tests..."
|
| 52 |
+
pytest -v -m "not slow"
|
| 53 |
+
|
| 54 |
+
test-slow:
|
| 55 |
+
@echo "Running slow tests..."
|
| 56 |
+
pytest -v -m slow --runslow
|
| 57 |
+
|
| 58 |
+
test-parallel:
|
| 59 |
+
@echo "Running tests in parallel..."
|
| 60 |
+
pytest -n auto -v
|
| 61 |
+
|
| 62 |
+
test-watch:
|
| 63 |
+
@echo "Running tests in watch mode..."
|
| 64 |
+
pytest-watch -- -v
|
| 65 |
+
|
| 66 |
+
test-debug:
|
| 67 |
+
@echo "Running tests in debug mode..."
|
| 68 |
+
pytest -v -s --pdb
|
| 69 |
+
|
| 70 |
+
# Code quality commands
|
| 71 |
+
lint:
|
| 72 |
+
@echo "Running linting..."
|
| 73 |
+
flake8 src/ tests/ --max-line-length=88 --extend-ignore=E203,W503
|
| 74 |
+
pylint src/ --rcfile=.pylintrc || true
|
| 75 |
+
|
| 76 |
+
format:
|
| 77 |
+
@echo "Formatting code..."
|
| 78 |
+
black src/ tests/ --line-length=88
|
| 79 |
+
isort src/ tests/ --profile black
|
| 80 |
+
|
| 81 |
+
format-check:
|
| 82 |
+
@echo "Checking code formatting..."
|
| 83 |
+
black src/ tests/ --check --line-length=88
|
| 84 |
+
isort src/ tests/ --check-only --profile black
|
| 85 |
+
|
| 86 |
+
type-check:
|
| 87 |
+
@echo "Running type checking..."
|
| 88 |
+
mypy src/ --ignore-missing-imports --no-strict-optional
|
| 89 |
+
|
| 90 |
+
security-check:
|
| 91 |
+
@echo "Running security checks..."
|
| 92 |
+
bandit -r src/ -f json -o reports/security-report.json || true
|
| 93 |
+
safety check --json --output reports/safety-report.json || true
|
| 94 |
+
|
| 95 |
+
# Cleanup commands
|
| 96 |
+
clean:
|
| 97 |
+
@echo "Cleaning up..."
|
| 98 |
+
find . -type f -name "*.pyc" -delete
|
| 99 |
+
find . -type d -name "__pycache__" -delete
|
| 100 |
+
find . -type d -name "*.egg-info" -exec rm -rf {} + 2>/dev/null || true
|
| 101 |
+
rm -rf build/ dist/ .coverage htmlcov/ .pytest_cache/ .mypy_cache/
|
| 102 |
+
rm -rf reports/*.xml reports/*.json reports/*.html
|
| 103 |
+
|
| 104 |
+
clean-cache:
|
| 105 |
+
@echo "Cleaning cache..."
|
| 106 |
+
rm -rf cache/ai_cache/*
|
| 107 |
+
rm -rf .pytest_cache/
|
| 108 |
+
rm -rf __pycache__/
|
| 109 |
+
|
| 110 |
+
# Build and run commands
|
| 111 |
+
build:
|
| 112 |
+
@echo "Building project..."
|
| 113 |
+
python setup.py build
|
| 114 |
+
|
| 115 |
+
run:
|
| 116 |
+
@echo "Running application..."
|
| 117 |
+
python app.py
|
| 118 |
+
|
| 119 |
+
dev:
|
| 120 |
+
@echo "Running in development mode..."
|
| 121 |
+
GRADIO_SERVER_NAME=0.0.0.0 GRADIO_SERVER_PORT=7860 python app.py
|
| 122 |
+
|
| 123 |
+
# Development tools
|
| 124 |
+
shell:
|
| 125 |
+
@echo "Starting Python shell with project context..."
|
| 126 |
+
python -i -c "import sys; sys.path.insert(0, 'src')"
|
| 127 |
+
|
| 128 |
+
jupyter:
|
| 129 |
+
@echo "Starting Jupyter notebook..."
|
| 130 |
+
jupyter notebook --ip=0.0.0.0 --port=8888 --no-browser
|
| 131 |
+
|
| 132 |
+
# Documentation
|
| 133 |
+
docs:
|
| 134 |
+
@echo "Generating documentation..."
|
| 135 |
+
sphinx-build -b html docs/ docs/_build/html/
|
| 136 |
+
|
| 137 |
+
docs-serve:
|
| 138 |
+
@echo "Serving documentation..."
|
| 139 |
+
python -m http.server 8000 --directory docs/_build/html/
|
| 140 |
+
|
| 141 |
+
# Database commands
|
| 142 |
+
db-init:
|
| 143 |
+
@echo "Initializing database..."
|
| 144 |
+
python -c "from src.core.database_logger import DatabaseLogger; DatabaseLogger.init_database()"
|
| 145 |
+
|
| 146 |
+
db-migrate:
|
| 147 |
+
@echo "Running database migrations..."
|
| 148 |
+
python scripts/migrate_database.py
|
| 149 |
+
|
| 150 |
+
db-reset:
|
| 151 |
+
@echo "Resetting database..."
|
| 152 |
+
rm -f logs/application.db
|
| 153 |
+
make db-init
|
| 154 |
+
|
| 155 |
+
# Log analysis commands
|
| 156 |
+
logs-analyze:
|
| 157 |
+
@echo "Analyzing logs..."
|
| 158 |
+
python scripts/log_analysis/advanced_log_analyzer.py
|
| 159 |
+
|
| 160 |
+
logs-monitor:
|
| 161 |
+
@echo "Starting log monitoring..."
|
| 162 |
+
python scripts/log_analysis/realtime_monitor.py
|
| 163 |
+
|
| 164 |
+
logs-demo:
|
| 165 |
+
@echo "Running log analysis demo..."
|
| 166 |
+
python scripts/log_analysis/demo.py
|
| 167 |
+
|
| 168 |
+
# Performance commands
|
| 169 |
+
profile:
|
| 170 |
+
@echo "Running performance profiling..."
|
| 171 |
+
python -m cProfile -o profile_results.prof app.py
|
| 172 |
+
|
| 173 |
+
profile-view:
|
| 174 |
+
@echo "Viewing profile results..."
|
| 175 |
+
python -c "import pstats; p = pstats.Stats('profile_results.prof'); p.sort_stats('cumulative'); p.print_stats(20)"
|
| 176 |
+
|
| 177 |
+
benchmark:
|
| 178 |
+
@echo "Running benchmarks..."
|
| 179 |
+
pytest tests/ -m performance --benchmark-only
|
| 180 |
+
|
| 181 |
+
# Git hooks
|
| 182 |
+
pre-commit:
|
| 183 |
+
@echo "Running pre-commit checks..."
|
| 184 |
+
make format-check
|
| 185 |
+
make lint
|
| 186 |
+
make type-check
|
| 187 |
+
make test-fast
|
| 188 |
+
|
| 189 |
+
pre-push:
|
| 190 |
+
@echo "Running pre-push checks..."
|
| 191 |
+
make test
|
| 192 |
+
make security-check
|
| 193 |
+
|
| 194 |
+
# CI/CD commands
|
| 195 |
+
ci-test:
|
| 196 |
+
@echo "Running CI tests..."
|
| 197 |
+
pytest --junitxml=reports/junit.xml --cov=src --cov-report=xml --cov-report=term
|
| 198 |
+
|
| 199 |
+
ci-quality:
|
| 200 |
+
@echo "Running CI quality checks..."
|
| 201 |
+
make lint
|
| 202 |
+
make type-check
|
| 203 |
+
make security-check
|
| 204 |
+
|
| 205 |
+
ci-build:
|
| 206 |
+
@echo "Running CI build..."
|
| 207 |
+
make clean
|
| 208 |
+
make install
|
| 209 |
+
make ci-quality
|
| 210 |
+
make ci-test
|
| 211 |
+
|
| 212 |
+
# Docker commands (if using Docker)
|
| 213 |
+
docker-build:
|
| 214 |
+
@echo "Building Docker image..."
|
| 215 |
+
docker build -t huggingface-spaces .
|
| 216 |
+
|
| 217 |
+
docker-run:
|
| 218 |
+
@echo "Running Docker container..."
|
| 219 |
+
docker run -p 7860:7860 huggingface-spaces
|
| 220 |
+
|
| 221 |
+
docker-test:
|
| 222 |
+
@echo "Running tests in Docker..."
|
| 223 |
+
docker run --rm huggingface-spaces make test
|
| 224 |
+
|
| 225 |
+
# Utility commands
|
| 226 |
+
check-deps:
|
| 227 |
+
@echo "Checking for outdated dependencies..."
|
| 228 |
+
pip list --outdated
|
| 229 |
+
|
| 230 |
+
update-deps:
|
| 231 |
+
@echo "Updating dependencies..."
|
| 232 |
+
pip-review --local --interactive
|
| 233 |
+
|
| 234 |
+
freeze-deps:
|
| 235 |
+
@echo "Freezing current dependencies..."
|
| 236 |
+
pip freeze > requirements-frozen.txt
|
| 237 |
+
|
| 238 |
+
# Environment setup
|
| 239 |
+
setup-dev:
|
| 240 |
+
@echo "Setting up development environment..."
|
| 241 |
+
python -m venv venv
|
| 242 |
+
@echo "Activate virtual environment with: source venv/bin/activate (Linux/Mac) or venv\\Scripts\\activate (Windows)"
|
| 243 |
+
@echo "Then run: make install"
|
| 244 |
+
|
| 245 |
+
setup-hooks:
|
| 246 |
+
@echo "Setting up git hooks..."
|
| 247 |
+
echo "#!/bin/bash\nmake pre-commit" > .git/hooks/pre-commit
|
| 248 |
+
echo "#!/bin/bash\nmake pre-push" > .git/hooks/pre-push
|
| 249 |
+
chmod +x .git/hooks/pre-commit .git/hooks/pre-push
|
| 250 |
+
|
| 251 |
+
# Release commands
|
| 252 |
+
version-patch:
|
| 253 |
+
@echo "Bumping patch version..."
|
| 254 |
+
bump2version patch
|
| 255 |
+
|
| 256 |
+
version-minor:
|
| 257 |
+
@echo "Bumping minor version..."
|
| 258 |
+
bump2version minor
|
| 259 |
+
|
| 260 |
+
version-major:
|
| 261 |
+
@echo "Bumping major version..."
|
| 262 |
+
bump2version major
|
| 263 |
+
|
| 264 |
+
release:
|
| 265 |
+
@echo "Creating release..."
|
| 266 |
+
make ci-build
|
| 267 |
+
make version-patch
|
| 268 |
+
git push origin main --tags
|
| 269 |
+
|
| 270 |
+
# Health check
|
| 271 |
+
health-check:
|
| 272 |
+
@echo "Running health checks..."
|
| 273 |
+
@echo "Python version: $$(python --version)"
|
| 274 |
+
@echo "Pip version: $$(pip --version)"
|
| 275 |
+
@echo "Project structure:"
|
| 276 |
+
@find . -name "*.py" -path "./src/*" | head -10
|
| 277 |
+
@echo "Database status:"
|
| 278 |
+
@ls -la logs/ 2>/dev/null || echo "No logs directory found"
|
| 279 |
+
@echo "Cache status:"
|
| 280 |
+
@ls -la cache/ 2>/dev/null || echo "No cache directory found"
|
| 281 |
+
|
| 282 |
+
# All-in-one commands
|
| 283 |
+
full-check:
|
| 284 |
+
@echo "Running full project check..."
|
| 285 |
+
make clean
|
| 286 |
+
make install
|
| 287 |
+
make format-check
|
| 288 |
+
make lint
|
| 289 |
+
make type-check
|
| 290 |
+
make test-coverage
|
| 291 |
+
make security-check
|
| 292 |
+
|
| 293 |
+
quick-check:
|
| 294 |
+
@echo "Running quick project check..."
|
| 295 |
+
make format-check
|
| 296 |
+
make test-fast
|
|
@@ -49,6 +49,18 @@ Um sistema completo de análise financeira que combina múltiplos modelos de IA
|
|
| 49 |
- **Limpeza Automática**: Gerenciamento automático de logs antigos
|
| 50 |
- **Eventos do Sistema**: Rastreamento de eventos críticos e alertas
|
| 51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 52 |
## 🏗️ Arquitetura do Sistema
|
| 53 |
|
| 54 |
```
|
|
@@ -65,7 +77,16 @@ Um sistema completo de análise financeira que combina múltiplos modelos de IA
|
|
| 65 |
│ └── 📁 utils/ # Utilitários e helpers
|
| 66 |
├── 📁 config/ # Configurações do sistema
|
| 67 |
├── 📁 docs/ # Documentação completa
|
| 68 |
-
├── 📁 tests/ #
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
└── 📄 app.py # Aplicação principal
|
| 70 |
```
|
| 71 |
|
|
@@ -107,6 +128,49 @@ python app.py
|
|
| 107 |
### 4. Acesse a interface
|
| 108 |
Abra seu navegador em `http://localhost:7860`
|
| 109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 110 |
## 🌐 Hugging Face Spaces
|
| 111 |
|
| 112 |
Este projeto está **100% compatível** com Hugging Face Spaces:
|
|
@@ -142,6 +206,7 @@ Documentação completa disponível em:
|
|
| 142 |
- [Guia do Desenvolvedor](docs/developer-guide.md)
|
| 143 |
- [Arquitetura do Sistema](docs/architecture.md)
|
| 144 |
- [Sistema de Logging](docs/logging_system.md)
|
|
|
|
| 145 |
- [Solução de Problemas](docs/troubleshooting.md)
|
| 146 |
- [Troubleshooting HF Spaces](docs/huggingface_spaces_troubleshooting.md)
|
| 147 |
|
|
|
|
| 49 |
- **Limpeza Automática**: Gerenciamento automático de logs antigos
|
| 50 |
- **Eventos do Sistema**: Rastreamento de eventos críticos e alertas
|
| 51 |
|
| 52 |
+
### 🧪 Sistema de Testes Robusto
|
| 53 |
+
- **Pytest Framework**: Framework de testes otimizado e moderno
|
| 54 |
+
- **Testes Unitários**: Cobertura completa de módulos individuais
|
| 55 |
+
- **Testes de Integração**: Validação de fluxos completos do sistema
|
| 56 |
+
- **Fixtures Reutilizáveis**: Setup/teardown automatizado para testes
|
| 57 |
+
- **Mocking Avançado**: Isolamento de dependências externas
|
| 58 |
+
- **Relatórios de Cobertura**: Análise detalhada em HTML, XML e terminal
|
| 59 |
+
- **Testes Parametrizados**: Múltiplos cenários com dados variados
|
| 60 |
+
- **Execução Paralela**: Testes rápidos com pytest-xdist
|
| 61 |
+
- **Benchmarking**: Testes de performance e uso de memória
|
| 62 |
+
- **CI/CD Ready**: Integração completa com pipelines de desenvolvimento
|
| 63 |
+
|
| 64 |
## 🏗️ Arquitetura do Sistema
|
| 65 |
|
| 66 |
```
|
|
|
|
| 77 |
│ └── 📁 utils/ # Utilitários e helpers
|
| 78 |
├── 📁 config/ # Configurações do sistema
|
| 79 |
├── 📁 docs/ # Documentação completa
|
| 80 |
+
├── 📁 tests/ # Estrutura completa de testes
|
| 81 |
+
│ ├── 📁 unit/ # Testes unitários
|
| 82 |
+
│ ├── 📁 integration/ # Testes de integração
|
| 83 |
+
│ ├── 📁 data/ # Dados de teste
|
| 84 |
+
│ ├── 📄 conftest.py # Configurações globais
|
| 85 |
+
│ └── 📄 test_patterns.py # Padrões e exemplos
|
| 86 |
+
├── 📁 scripts/ # Scripts utilitários
|
| 87 |
+
│ └── 📁 log_analysis/ # Análise de logs
|
| 88 |
+
├── 📄 pytest.ini # Configuração do pytest
|
| 89 |
+
├── 📄 Makefile # Comandos automatizados
|
| 90 |
└── 📄 app.py # Aplicação principal
|
| 91 |
```
|
| 92 |
|
|
|
|
| 128 |
### 4. Acesse a interface
|
| 129 |
Abra seu navegador em `http://localhost:7860`
|
| 130 |
|
| 131 |
+
## 🧪 Executando Testes
|
| 132 |
+
|
| 133 |
+
### Comandos Rápidos (Makefile)
|
| 134 |
+
```bash
|
| 135 |
+
# Executar todos os testes
|
| 136 |
+
make test
|
| 137 |
+
|
| 138 |
+
# Testes unitários apenas
|
| 139 |
+
make test-unit
|
| 140 |
+
|
| 141 |
+
# Testes de integração
|
| 142 |
+
make test-integration
|
| 143 |
+
|
| 144 |
+
# Testes com relatório de cobertura
|
| 145 |
+
make test-coverage
|
| 146 |
+
|
| 147 |
+
# Testes rápidos (sem os lentos)
|
| 148 |
+
make test-fast
|
| 149 |
+
|
| 150 |
+
# Testes em paralelo
|
| 151 |
+
make test-parallel
|
| 152 |
+
```
|
| 153 |
+
|
| 154 |
+
### Comandos Pytest Diretos
|
| 155 |
+
```bash
|
| 156 |
+
# Executar todos os testes
|
| 157 |
+
pytest -v
|
| 158 |
+
|
| 159 |
+
# Testes por categoria
|
| 160 |
+
pytest -m unit # Apenas unitários
|
| 161 |
+
pytest -m integration # Apenas integração
|
| 162 |
+
pytest -m "not slow" # Excluir testes lentos
|
| 163 |
+
|
| 164 |
+
# Com cobertura
|
| 165 |
+
pytest --cov=src --cov-report=html
|
| 166 |
+
|
| 167 |
+
# Teste específico
|
| 168 |
+
pytest tests/unit/test_voting_system.py::test_majority_voting
|
| 169 |
+
```
|
| 170 |
+
|
| 171 |
+
### Relatórios de Cobertura
|
| 172 |
+
Após executar `make test-coverage`, abra `htmlcov/index.html` para ver o relatório detalhado.
|
| 173 |
+
|
| 174 |
## 🌐 Hugging Face Spaces
|
| 175 |
|
| 176 |
Este projeto está **100% compatível** com Hugging Face Spaces:
|
|
|
|
| 206 |
- [Guia do Desenvolvedor](docs/developer-guide.md)
|
| 207 |
- [Arquitetura do Sistema](docs/architecture.md)
|
| 208 |
- [Sistema de Logging](docs/logging_system.md)
|
| 209 |
+
- [Documentação de Testes](tests/README.md)
|
| 210 |
- [Solução de Problemas](docs/troubleshooting.md)
|
| 211 |
- [Troubleshooting HF Spaces](docs/huggingface_spaces_troubleshooting.md)
|
| 212 |
|
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[tool:pytest]
|
| 2 |
+
# Pytest configuration file
|
| 3 |
+
|
| 4 |
+
# Test discovery
|
| 5 |
+
testpaths = tests
|
| 6 |
+
python_files = test_*.py *_test.py
|
| 7 |
+
python_classes = Test*
|
| 8 |
+
python_functions = test_*
|
| 9 |
+
|
| 10 |
+
# Output options
|
| 11 |
+
addopts =
|
| 12 |
+
--verbose
|
| 13 |
+
--tb=short
|
| 14 |
+
--strict-markers
|
| 15 |
+
--strict-config
|
| 16 |
+
--cov=src
|
| 17 |
+
--cov=.
|
| 18 |
+
--cov-report=term-missing
|
| 19 |
+
--cov-report=html:htmlcov
|
| 20 |
+
--cov-report=xml
|
| 21 |
+
--cov-fail-under=80
|
| 22 |
+
--html=reports/report.html
|
| 23 |
+
--self-contained-html
|
| 24 |
+
--junitxml=reports/junit.xml
|
| 25 |
+
|
| 26 |
+
# Markers
|
| 27 |
+
markers =
|
| 28 |
+
unit: Unit tests
|
| 29 |
+
integration: Integration tests
|
| 30 |
+
slow: Slow running tests
|
| 31 |
+
api: API tests
|
| 32 |
+
ui: UI tests
|
| 33 |
+
smoke: Smoke tests
|
| 34 |
+
regression: Regression tests
|
| 35 |
+
performance: Performance tests
|
| 36 |
+
security: Security tests
|
| 37 |
+
|
| 38 |
+
# Filtering
|
| 39 |
+
filterwarnings =
|
| 40 |
+
ignore::UserWarning
|
| 41 |
+
ignore::DeprecationWarning
|
| 42 |
+
ignore::PendingDeprecationWarning
|
| 43 |
+
|
| 44 |
+
# Minimum version
|
| 45 |
+
minversion = 7.0
|
| 46 |
+
|
| 47 |
+
# Test timeout (in seconds)
|
| 48 |
+
timeout = 300
|
| 49 |
+
|
| 50 |
+
# Parallel execution
|
| 51 |
+
# Use with: pytest -n auto
|
| 52 |
+
# Requires pytest-xdist
|
| 53 |
+
|
| 54 |
+
# Asyncio mode
|
| 55 |
+
asyncio_mode = auto
|
|
@@ -29,8 +29,17 @@ psutil>=5.9.0
|
|
| 29 |
# Development and testing
|
| 30 |
pytest>=7.4.0
|
| 31 |
pytest-cov>=4.1.0
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
flake8>=6.0.0
|
| 33 |
black>=23.0.0
|
|
|
|
|
|
|
| 34 |
|
| 35 |
# Optional dependencies for advanced features
|
| 36 |
# Uncomment if needed:
|
|
|
|
| 29 |
# Development and testing
|
| 30 |
pytest>=7.4.0
|
| 31 |
pytest-cov>=4.1.0
|
| 32 |
+
pytest-mock>=3.11.0
|
| 33 |
+
pytest-asyncio>=0.21.0
|
| 34 |
+
pytest-xdist>=3.3.0
|
| 35 |
+
pytest-html>=3.2.0
|
| 36 |
+
pytest-benchmark>=4.0.0
|
| 37 |
+
factory-boy>=3.3.0
|
| 38 |
+
faker>=19.0.0
|
| 39 |
flake8>=6.0.0
|
| 40 |
black>=23.0.0
|
| 41 |
+
mypy>=1.5.0
|
| 42 |
+
coverage>=7.3.0
|
| 43 |
|
| 44 |
# Optional dependencies for advanced features
|
| 45 |
# Uncomment if needed:
|
|
@@ -0,0 +1,219 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Scripts de Análise Avançada de Logs
|
| 2 |
+
|
| 3 |
+
Este diretório contém scripts especializados para análise e monitoramento de logs da aplicação, desenvolvidos especificamente para auxiliar desenvolvedores na identificação e resolução de problemas.
|
| 4 |
+
|
| 5 |
+
## 📁 Estrutura dos Scripts
|
| 6 |
+
|
| 7 |
+
### 1. `advanced_log_analyzer.py`
|
| 8 |
+
**Análise Retrospectiva Detalhada**
|
| 9 |
+
|
| 10 |
+
Script principal para análise completa dos logs armazenados no banco de dados. Fornece relatórios detalhados sobre:
|
| 11 |
+
- Distribuição de logs por nível e módulo
|
| 12 |
+
- Detecção de erros e warnings
|
| 13 |
+
- Problemas de importação de módulos
|
| 14 |
+
- Análise de performance
|
| 15 |
+
- Timeline de atividade
|
| 16 |
+
- Eventos do Gradio
|
| 17 |
+
|
| 18 |
+
### 2. `realtime_monitor.py`
|
| 19 |
+
**Monitoramento em Tempo Real**
|
| 20 |
+
|
| 21 |
+
Monitor que acompanha os logs em tempo real e alerta sobre problemas críticos:
|
| 22 |
+
- Alertas instantâneos para erros críticos
|
| 23 |
+
- Detecção de problemas de performance
|
| 24 |
+
- Monitoramento de erros de importação
|
| 25 |
+
- Interface colorida no terminal
|
| 26 |
+
- Buffer de últimos problemas detectados
|
| 27 |
+
|
| 28 |
+
## 🚀 Como Usar
|
| 29 |
+
|
| 30 |
+
### Análise Retrospectiva
|
| 31 |
+
|
| 32 |
+
```bash
|
| 33 |
+
# Análise básica das últimas 24 horas
|
| 34 |
+
python scripts/log_analysis/advanced_log_analyzer.py
|
| 35 |
+
|
| 36 |
+
# Análise das últimas 6 horas
|
| 37 |
+
python scripts/log_analysis/advanced_log_analyzer.py --hours 6
|
| 38 |
+
|
| 39 |
+
# Salvar relatório em arquivo
|
| 40 |
+
python scripts/log_analysis/advanced_log_analyzer.py --output relatorio_logs.txt
|
| 41 |
+
|
| 42 |
+
# Exportar análise em JSON
|
| 43 |
+
python scripts/log_analysis/advanced_log_analyzer.py --json analise_logs.json
|
| 44 |
+
|
| 45 |
+
# Especificar caminho do banco de dados
|
| 46 |
+
python scripts/log_analysis/advanced_log_analyzer.py --db-path logs/application.db
|
| 47 |
+
|
| 48 |
+
# Modo silencioso (apenas erros)
|
| 49 |
+
python scripts/log_analysis/advanced_log_analyzer.py --quiet
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
### Monitoramento em Tempo Real
|
| 53 |
+
|
| 54 |
+
```bash
|
| 55 |
+
# Monitor básico (verifica a cada 5 segundos)
|
| 56 |
+
python scripts/log_analysis/realtime_monitor.py
|
| 57 |
+
|
| 58 |
+
# Monitor com intervalo personalizado
|
| 59 |
+
python scripts/log_analysis/realtime_monitor.py --interval 10
|
| 60 |
+
|
| 61 |
+
# Modo verboso (mostra todos os logs)
|
| 62 |
+
python scripts/log_analysis/realtime_monitor.py --verbose
|
| 63 |
+
|
| 64 |
+
# Ajustar limite de alertas
|
| 65 |
+
python scripts/log_analysis/realtime_monitor.py --threshold 10
|
| 66 |
+
|
| 67 |
+
# Especificar banco de dados
|
| 68 |
+
python scripts/log_analysis/realtime_monitor.py --db-path logs/application.db
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
## 📊 Tipos de Análise
|
| 72 |
+
|
| 73 |
+
### Problemas Detectados
|
| 74 |
+
|
| 75 |
+
1. **Erros de Importação**
|
| 76 |
+
- `ImportError`, `ModuleNotFoundError`
|
| 77 |
+
- Variáveis não definidas (`not defined`)
|
| 78 |
+
- Problemas de estrutura de pacotes
|
| 79 |
+
|
| 80 |
+
2. **Problemas de Performance**
|
| 81 |
+
- Operações que demoram mais de 1 segundo
|
| 82 |
+
- Análise de tempo de resposta
|
| 83 |
+
- Identificação de gargalos
|
| 84 |
+
|
| 85 |
+
3. **Erros Críticos**
|
| 86 |
+
- Exceções não tratadas
|
| 87 |
+
- Falhas de servidor
|
| 88 |
+
- Problemas de memória
|
| 89 |
+
|
| 90 |
+
4. **Eventos do Sistema**
|
| 91 |
+
- Inicialização do Gradio
|
| 92 |
+
- Mudanças de configuração
|
| 93 |
+
- Atividade de módulos
|
| 94 |
+
|
| 95 |
+
### Relatórios Gerados
|
| 96 |
+
|
| 97 |
+
- **Distribuição por Nível**: Contagem de logs por severidade
|
| 98 |
+
- **Módulos Mais Ativos**: Ranking de módulos por atividade
|
| 99 |
+
- **Timeline**: Atividade por hora com gráfico visual
|
| 100 |
+
- **Problemas Críticos**: Lista detalhada de erros e warnings
|
| 101 |
+
- **Análise de Performance**: Operações lentas identificadas
|
| 102 |
+
|
| 103 |
+
## 🎨 Recursos Visuais
|
| 104 |
+
|
| 105 |
+
### Monitor em Tempo Real
|
| 106 |
+
- **Cores por Severidade**:
|
| 107 |
+
- 🚨 **Vermelho**: Problemas críticos (memória, servidor)
|
| 108 |
+
- ⚠️ **Amarelo**: Problemas médios (importação, Gradio)
|
| 109 |
+
- ℹ️ **Azul**: Informações e problemas menores
|
| 110 |
+
|
| 111 |
+
- **Estatísticas em Tempo Real**:
|
| 112 |
+
- Contador de logs verificados
|
| 113 |
+
- Número de alertas gerados
|
| 114 |
+
- Timestamp atual
|
| 115 |
+
|
| 116 |
+
### Relatórios de Análise
|
| 117 |
+
- **Gráficos ASCII**: Timeline visual de atividade
|
| 118 |
+
- **Emojis Informativos**: Categorização visual dos problemas
|
| 119 |
+
- **Formatação Estruturada**: Seções bem organizadas
|
| 120 |
+
|
| 121 |
+
## 🔧 Configuração Avançada
|
| 122 |
+
|
| 123 |
+
### Variáveis de Ambiente
|
| 124 |
+
|
| 125 |
+
```bash
|
| 126 |
+
# Definir caminho padrão do banco de logs
|
| 127 |
+
export LOG_DB_PATH="logs/application.db"
|
| 128 |
+
|
| 129 |
+
# Configurar intervalo padrão do monitor
|
| 130 |
+
export MONITOR_INTERVAL=5
|
| 131 |
+
```
|
| 132 |
+
|
| 133 |
+
### Personalização de Padrões
|
| 134 |
+
|
| 135 |
+
Os scripts podem ser facilmente modificados para detectar padrões específicos:
|
| 136 |
+
|
| 137 |
+
```python
|
| 138 |
+
# Em advanced_log_analyzer.py
|
| 139 |
+
self.patterns = {
|
| 140 |
+
'custom_error': re.compile(r'seu_padrao_aqui', re.IGNORECASE),
|
| 141 |
+
# ... outros padrões
|
| 142 |
+
}
|
| 143 |
+
```
|
| 144 |
+
|
| 145 |
+
## 📈 Casos de Uso Comuns
|
| 146 |
+
|
| 147 |
+
### 1. Debugging de Problemas de Importação
|
| 148 |
+
```bash
|
| 149 |
+
# Foca apenas em erros de importação
|
| 150 |
+
python scripts/log_analysis/advanced_log_analyzer.py --hours 1 | grep -A5 "ERROS DE IMPORTAÇÃO"
|
| 151 |
+
```
|
| 152 |
+
|
| 153 |
+
### 2. Monitoramento Durante Deploy
|
| 154 |
+
```bash
|
| 155 |
+
# Monitor em tempo real durante atualizações
|
| 156 |
+
python scripts/log_analysis/realtime_monitor.py --verbose --interval 2
|
| 157 |
+
```
|
| 158 |
+
|
| 159 |
+
### 3. Análise de Performance Semanal
|
| 160 |
+
```bash
|
| 161 |
+
# Relatório completo da semana
|
| 162 |
+
python scripts/log_analysis/advanced_log_analyzer.py --hours 168 --output relatorio_semanal.txt
|
| 163 |
+
```
|
| 164 |
+
|
| 165 |
+
### 4. Exportação para Análise Externa
|
| 166 |
+
```bash
|
| 167 |
+
# Dados em JSON para ferramentas externas
|
| 168 |
+
python scripts/log_analysis/advanced_log_analyzer.py --json logs_data.json --quiet
|
| 169 |
+
```
|
| 170 |
+
|
| 171 |
+
## 🛠️ Troubleshooting
|
| 172 |
+
|
| 173 |
+
### Problemas Comuns
|
| 174 |
+
|
| 175 |
+
1. **"Banco de dados não encontrado"**
|
| 176 |
+
- Verifique se a aplicação está rodando
|
| 177 |
+
- Confirme o caminho com `--db-path`
|
| 178 |
+
- Certifique-se de que logs estão sendo gerados
|
| 179 |
+
|
| 180 |
+
2. **"Nenhum log encontrado"**
|
| 181 |
+
- Reduza o período com `--hours`
|
| 182 |
+
- Verifique se há atividade na aplicação
|
| 183 |
+
- Confirme configuração de logging
|
| 184 |
+
|
| 185 |
+
3. **Monitor não mostra alertas**
|
| 186 |
+
- Use `--verbose` para ver todos os logs
|
| 187 |
+
- Ajuste `--threshold` se necessário
|
| 188 |
+
- Verifique padrões de detecção
|
| 189 |
+
|
| 190 |
+
### Logs de Debug
|
| 191 |
+
|
| 192 |
+
Para debug dos próprios scripts:
|
| 193 |
+
|
| 194 |
+
```bash
|
| 195 |
+
# Ativar debug do Python
|
| 196 |
+
PYTHONPATH=. python -v scripts/log_analysis/advanced_log_analyzer.py
|
| 197 |
+
```
|
| 198 |
+
|
| 199 |
+
## 📝 Contribuição
|
| 200 |
+
|
| 201 |
+
Para adicionar novos padrões de detecção ou melhorar os scripts:
|
| 202 |
+
|
| 203 |
+
1. Modifique os padrões regex nas classes
|
| 204 |
+
2. Adicione novos tipos de análise
|
| 205 |
+
3. Implemente novos formatos de saída
|
| 206 |
+
4. Teste com dados reais da aplicação
|
| 207 |
+
|
| 208 |
+
## 🔗 Integração
|
| 209 |
+
|
| 210 |
+
Estes scripts podem ser integrados com:
|
| 211 |
+
- **CI/CD**: Análise automática após deploys
|
| 212 |
+
- **Monitoramento**: Alertas via webhook
|
| 213 |
+
- **Dashboards**: Dados JSON para visualização
|
| 214 |
+
- **Cron Jobs**: Relatórios periódicos automáticos
|
| 215 |
+
|
| 216 |
+
---
|
| 217 |
+
|
| 218 |
+
**Desenvolvido para o Sistema de Análise de Mercado**
|
| 219 |
+
*Versão 1.0.0 - Scripts de Análise Avançada de Logs*
|
|
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Script de Análise Avançada de Logs para Desenvolvedores
|
| 4 |
+
|
| 5 |
+
Este script fornece análise detalhada dos logs da aplicação,
|
| 6 |
+
incluindo detecção de padrões, análise de performance e
|
| 7 |
+
identificação de problemas comuns.
|
| 8 |
+
|
| 9 |
+
Autor: Sistema de Análise de Mercado
|
| 10 |
+
Versão: 1.0.0
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
import os
|
| 14 |
+
import re
|
| 15 |
+
import json
|
| 16 |
+
import sqlite3
|
| 17 |
+
from datetime import datetime, timedelta
|
| 18 |
+
from collections import defaultdict, Counter
|
| 19 |
+
from typing import Dict, List, Tuple, Optional
|
| 20 |
+
import argparse
|
| 21 |
+
from pathlib import Path
|
| 22 |
+
|
| 23 |
+
class AdvancedLogAnalyzer:
|
| 24 |
+
"""Analisador avançado de logs para desenvolvedores"""
|
| 25 |
+
|
| 26 |
+
def __init__(self, log_db_path: str = None):
|
| 27 |
+
self.log_db_path = log_db_path or "logs/application.db"
|
| 28 |
+
self.patterns = {
|
| 29 |
+
'error': re.compile(r'ERROR|Exception|Traceback|Failed', re.IGNORECASE),
|
| 30 |
+
'warning': re.compile(r'WARNING|WARN', re.IGNORECASE),
|
| 31 |
+
'performance': re.compile(r'took (\d+\.?\d*)\s*(ms|seconds?)', re.IGNORECASE),
|
| 32 |
+
'memory': re.compile(r'memory|RAM|heap', re.IGNORECASE),
|
| 33 |
+
'import_error': re.compile(r'ImportError|ModuleNotFoundError|not defined', re.IGNORECASE),
|
| 34 |
+
'gradio': re.compile(r'gradio|Running on|localhost', re.IGNORECASE)
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
def connect_db(self) -> sqlite3.Connection:
|
| 38 |
+
"""Conecta ao banco de dados de logs"""
|
| 39 |
+
if not os.path.exists(self.log_db_path):
|
| 40 |
+
raise FileNotFoundError(f"Banco de dados de logs não encontrado: {self.log_db_path}")
|
| 41 |
+
return sqlite3.connect(self.log_db_path)
|
| 42 |
+
|
| 43 |
+
def analyze_recent_logs(self, hours: int = 24) -> Dict:
|
| 44 |
+
"""Analisa logs das últimas N horas"""
|
| 45 |
+
try:
|
| 46 |
+
conn = self.connect_db()
|
| 47 |
+
cursor = conn.cursor()
|
| 48 |
+
|
| 49 |
+
# Busca logs recentes
|
| 50 |
+
since_time = datetime.now() - timedelta(hours=hours)
|
| 51 |
+
cursor.execute("""
|
| 52 |
+
SELECT timestamp, level, message, module
|
| 53 |
+
FROM logs
|
| 54 |
+
WHERE timestamp >= ?
|
| 55 |
+
ORDER BY timestamp DESC
|
| 56 |
+
""", (since_time.isoformat(),))
|
| 57 |
+
|
| 58 |
+
logs = cursor.fetchall()
|
| 59 |
+
conn.close()
|
| 60 |
+
|
| 61 |
+
return self._process_logs(logs)
|
| 62 |
+
|
| 63 |
+
except Exception as e:
|
| 64 |
+
return {"error": f"Erro ao analisar logs: {str(e)}"}
|
| 65 |
+
|
| 66 |
+
def _process_logs(self, logs: List[Tuple]) -> Dict:
|
| 67 |
+
"""Processa e analisa os logs"""
|
| 68 |
+
analysis = {
|
| 69 |
+
'total_logs': len(logs),
|
| 70 |
+
'by_level': Counter(),
|
| 71 |
+
'by_module': Counter(),
|
| 72 |
+
'errors': [],
|
| 73 |
+
'warnings': [],
|
| 74 |
+
'performance_issues': [],
|
| 75 |
+
'import_errors': [],
|
| 76 |
+
'gradio_events': [],
|
| 77 |
+
'timeline': defaultdict(int)
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
for timestamp, level, message, module in logs:
|
| 81 |
+
# Contadores básicos
|
| 82 |
+
analysis['by_level'][level] += 1
|
| 83 |
+
analysis['by_module'][module] += 1
|
| 84 |
+
|
| 85 |
+
# Timeline por hora
|
| 86 |
+
hour = datetime.fromisoformat(timestamp).strftime('%Y-%m-%d %H:00')
|
| 87 |
+
analysis['timeline'][hour] += 1
|
| 88 |
+
|
| 89 |
+
# Análise de padrões
|
| 90 |
+
if self.patterns['error'].search(message):
|
| 91 |
+
analysis['errors'].append({
|
| 92 |
+
'timestamp': timestamp,
|
| 93 |
+
'module': module,
|
| 94 |
+
'message': message[:200] + '...' if len(message) > 200 else message
|
| 95 |
+
})
|
| 96 |
+
|
| 97 |
+
if self.patterns['warning'].search(message):
|
| 98 |
+
analysis['warnings'].append({
|
| 99 |
+
'timestamp': timestamp,
|
| 100 |
+
'module': module,
|
| 101 |
+
'message': message[:200] + '...' if len(message) > 200 else message
|
| 102 |
+
})
|
| 103 |
+
|
| 104 |
+
if self.patterns['import_error'].search(message):
|
| 105 |
+
analysis['import_errors'].append({
|
| 106 |
+
'timestamp': timestamp,
|
| 107 |
+
'module': module,
|
| 108 |
+
'message': message
|
| 109 |
+
})
|
| 110 |
+
|
| 111 |
+
if self.patterns['gradio'].search(message):
|
| 112 |
+
analysis['gradio_events'].append({
|
| 113 |
+
'timestamp': timestamp,
|
| 114 |
+
'message': message
|
| 115 |
+
})
|
| 116 |
+
|
| 117 |
+
# Análise de performance
|
| 118 |
+
perf_match = self.patterns['performance'].search(message)
|
| 119 |
+
if perf_match:
|
| 120 |
+
time_value = float(perf_match.group(1))
|
| 121 |
+
unit = perf_match.group(2).lower()
|
| 122 |
+
|
| 123 |
+
# Converte para milissegundos
|
| 124 |
+
if 'second' in unit:
|
| 125 |
+
time_value *= 1000
|
| 126 |
+
|
| 127 |
+
if time_value > 1000: # Mais de 1 segundo
|
| 128 |
+
analysis['performance_issues'].append({
|
| 129 |
+
'timestamp': timestamp,
|
| 130 |
+
'module': module,
|
| 131 |
+
'duration_ms': time_value,
|
| 132 |
+
'message': message
|
| 133 |
+
})
|
| 134 |
+
|
| 135 |
+
return analysis
|
| 136 |
+
|
| 137 |
+
def generate_report(self, analysis: Dict) -> str:
|
| 138 |
+
"""Gera relatório detalhado da análise"""
|
| 139 |
+
report = []
|
| 140 |
+
report.append("=" * 60)
|
| 141 |
+
report.append("RELATÓRIO DE ANÁLISE AVANÇADA DE LOGS")
|
| 142 |
+
report.append("=" * 60)
|
| 143 |
+
report.append(f"Gerado em: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
|
| 144 |
+
report.append(f"Total de logs analisados: {analysis['total_logs']}")
|
| 145 |
+
report.append("")
|
| 146 |
+
|
| 147 |
+
# Resumo por nível
|
| 148 |
+
report.append("📊 DISTRIBUIÇÃO POR NÍVEL:")
|
| 149 |
+
for level, count in analysis['by_level'].most_common():
|
| 150 |
+
percentage = (count / analysis['total_logs']) * 100
|
| 151 |
+
report.append(f" {level}: {count} ({percentage:.1f}%)")
|
| 152 |
+
report.append("")
|
| 153 |
+
|
| 154 |
+
# Módulos mais ativos
|
| 155 |
+
report.append("🔧 MÓDULOS MAIS ATIVOS:")
|
| 156 |
+
for module, count in analysis['by_module'].most_common(10):
|
| 157 |
+
report.append(f" {module}: {count} logs")
|
| 158 |
+
report.append("")
|
| 159 |
+
|
| 160 |
+
# Erros críticos
|
| 161 |
+
if analysis['errors']:
|
| 162 |
+
report.append(f"❌ ERROS ENCONTRADOS ({len(analysis['errors'])}):")
|
| 163 |
+
for error in analysis['errors'][:5]: # Mostra apenas os 5 mais recentes
|
| 164 |
+
report.append(f" [{error['timestamp']}] {error['module']}")
|
| 165 |
+
report.append(f" {error['message']}")
|
| 166 |
+
if len(analysis['errors']) > 5:
|
| 167 |
+
report.append(f" ... e mais {len(analysis['errors']) - 5} erros")
|
| 168 |
+
report.append("")
|
| 169 |
+
|
| 170 |
+
# Problemas de importação
|
| 171 |
+
if analysis['import_errors']:
|
| 172 |
+
report.append(f"📦 ERROS DE IMPORTAÇÃO ({len(analysis['import_errors'])}):")
|
| 173 |
+
for error in analysis['import_errors']:
|
| 174 |
+
report.append(f" [{error['timestamp']}] {error['module']}")
|
| 175 |
+
report.append(f" {error['message']}")
|
| 176 |
+
report.append("")
|
| 177 |
+
|
| 178 |
+
# Problemas de performance
|
| 179 |
+
if analysis['performance_issues']:
|
| 180 |
+
report.append(f"⚡ PROBLEMAS DE PERFORMANCE ({len(analysis['performance_issues'])}):")
|
| 181 |
+
for issue in analysis['performance_issues'][:5]:
|
| 182 |
+
report.append(f" [{issue['timestamp']}] {issue['module']}")
|
| 183 |
+
report.append(f" Duração: {issue['duration_ms']:.0f}ms")
|
| 184 |
+
report.append(f" {issue['message'][:100]}...")
|
| 185 |
+
report.append("")
|
| 186 |
+
|
| 187 |
+
# Eventos do Gradio
|
| 188 |
+
if analysis['gradio_events']:
|
| 189 |
+
report.append(f"🌐 EVENTOS DO GRADIO ({len(analysis['gradio_events'])}):")
|
| 190 |
+
for event in analysis['gradio_events'][-3:]: # Últimos 3 eventos
|
| 191 |
+
report.append(f" [{event['timestamp']}] {event['message']}")
|
| 192 |
+
report.append("")
|
| 193 |
+
|
| 194 |
+
# Timeline
|
| 195 |
+
if analysis['timeline']:
|
| 196 |
+
report.append("📈 ATIVIDADE POR HORA:")
|
| 197 |
+
sorted_timeline = sorted(analysis['timeline'].items())
|
| 198 |
+
for hour, count in sorted_timeline[-12:]: # Últimas 12 horas
|
| 199 |
+
bar = "█" * min(count // 10, 20) # Barra visual
|
| 200 |
+
report.append(f" {hour}: {count:3d} {bar}")
|
| 201 |
+
report.append("")
|
| 202 |
+
|
| 203 |
+
report.append("=" * 60)
|
| 204 |
+
return "\n".join(report)
|
| 205 |
+
|
| 206 |
+
def export_json(self, analysis: Dict, output_file: str):
|
| 207 |
+
"""Exporta análise em formato JSON"""
|
| 208 |
+
# Converte Counter para dict para serialização JSON
|
| 209 |
+
json_analysis = {
|
| 210 |
+
'total_logs': analysis['total_logs'],
|
| 211 |
+
'by_level': dict(analysis['by_level']),
|
| 212 |
+
'by_module': dict(analysis['by_module']),
|
| 213 |
+
'errors': analysis['errors'],
|
| 214 |
+
'warnings': analysis['warnings'],
|
| 215 |
+
'performance_issues': analysis['performance_issues'],
|
| 216 |
+
'import_errors': analysis['import_errors'],
|
| 217 |
+
'gradio_events': analysis['gradio_events'],
|
| 218 |
+
'timeline': dict(analysis['timeline']),
|
| 219 |
+
'generated_at': datetime.now().isoformat()
|
| 220 |
+
}
|
| 221 |
+
|
| 222 |
+
with open(output_file, 'w', encoding='utf-8') as f:
|
| 223 |
+
json.dump(json_analysis, f, indent=2, ensure_ascii=False)
|
| 224 |
+
|
| 225 |
+
def main():
|
| 226 |
+
"""Função principal do script"""
|
| 227 |
+
parser = argparse.ArgumentParser(description='Análise Avançada de Logs para Desenvolvedores')
|
| 228 |
+
parser.add_argument('--hours', type=int, default=24, help='Horas para analisar (padrão: 24)')
|
| 229 |
+
parser.add_argument('--db-path', type=str, help='Caminho para o banco de dados de logs')
|
| 230 |
+
parser.add_argument('--output', type=str, help='Arquivo de saída para relatório')
|
| 231 |
+
parser.add_argument('--json', type=str, help='Exportar análise em JSON')
|
| 232 |
+
parser.add_argument('--quiet', action='store_true', help='Modo silencioso (apenas erros)')
|
| 233 |
+
|
| 234 |
+
args = parser.parse_args()
|
| 235 |
+
|
| 236 |
+
try:
|
| 237 |
+
# Inicializa o analisador
|
| 238 |
+
analyzer = AdvancedLogAnalyzer(args.db_path)
|
| 239 |
+
|
| 240 |
+
if not args.quiet:
|
| 241 |
+
print(f"🔍 Analisando logs das últimas {args.hours} horas...")
|
| 242 |
+
|
| 243 |
+
# Executa análise
|
| 244 |
+
analysis = analyzer.analyze_recent_logs(args.hours)
|
| 245 |
+
|
| 246 |
+
if 'error' in analysis:
|
| 247 |
+
print(f"❌ Erro: {analysis['error']}")
|
| 248 |
+
return 1
|
| 249 |
+
|
| 250 |
+
# Gera relatório
|
| 251 |
+
report = analyzer.generate_report(analysis)
|
| 252 |
+
|
| 253 |
+
# Salva ou exibe relatório
|
| 254 |
+
if args.output:
|
| 255 |
+
with open(args.output, 'w', encoding='utf-8') as f:
|
| 256 |
+
f.write(report)
|
| 257 |
+
if not args.quiet:
|
| 258 |
+
print(f"📄 Relatório salvo em: {args.output}")
|
| 259 |
+
else:
|
| 260 |
+
print(report)
|
| 261 |
+
|
| 262 |
+
# Exporta JSON se solicitado
|
| 263 |
+
if args.json:
|
| 264 |
+
analyzer.export_json(analysis, args.json)
|
| 265 |
+
if not args.quiet:
|
| 266 |
+
print(f"📊 Análise JSON salva em: {args.json}")
|
| 267 |
+
|
| 268 |
+
# Resumo final
|
| 269 |
+
if not args.quiet:
|
| 270 |
+
total_issues = len(analysis['errors']) + len(analysis['import_errors']) + len(analysis['performance_issues'])
|
| 271 |
+
if total_issues > 0:
|
| 272 |
+
print(f"\n⚠️ Total de problemas encontrados: {total_issues}")
|
| 273 |
+
else:
|
| 274 |
+
print(f"\n✅ Nenhum problema crítico encontrado!")
|
| 275 |
+
|
| 276 |
+
return 0
|
| 277 |
+
|
| 278 |
+
except Exception as e:
|
| 279 |
+
print(f"❌ Erro inesperado: {str(e)}")
|
| 280 |
+
return 1
|
| 281 |
+
|
| 282 |
+
if __name__ == "__main__":
|
| 283 |
+
exit(main())
|
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Script de Demonstração - Análise de Logs
|
| 4 |
+
|
| 5 |
+
Este script demonstra como usar os analisadores de log
|
| 6 |
+
e fornece exemplos práticos de uso para desenvolvedores.
|
| 7 |
+
|
| 8 |
+
Autor: Sistema de Análise de Mercado
|
| 9 |
+
Versão: 1.0.0
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import sys
|
| 14 |
+
import time
|
| 15 |
+
import subprocess
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
|
| 18 |
+
def print_header(title: str):
|
| 19 |
+
"""Imprime cabeçalho formatado"""
|
| 20 |
+
print("\n" + "="*60)
|
| 21 |
+
print(f" {title}")
|
| 22 |
+
print("="*60)
|
| 23 |
+
|
| 24 |
+
def print_step(step: str, description: str):
|
| 25 |
+
"""Imprime passo da demonstração"""
|
| 26 |
+
print(f"\n🔹 {step}: {description}")
|
| 27 |
+
print("-" * 50)
|
| 28 |
+
|
| 29 |
+
def run_command(command: list, description: str = ""):
|
| 30 |
+
"""Executa comando e mostra resultado"""
|
| 31 |
+
if description:
|
| 32 |
+
print(f"\n💻 Executando: {description}")
|
| 33 |
+
|
| 34 |
+
print(f"Comando: {' '.join(command)}")
|
| 35 |
+
print("\nSaída:")
|
| 36 |
+
print("-" * 30)
|
| 37 |
+
|
| 38 |
+
try:
|
| 39 |
+
result = subprocess.run(command, capture_output=True, text=True, cwd=Path(__file__).parent.parent.parent)
|
| 40 |
+
|
| 41 |
+
if result.stdout:
|
| 42 |
+
print(result.stdout)
|
| 43 |
+
|
| 44 |
+
if result.stderr:
|
| 45 |
+
print(f"⚠️ Avisos/Erros:\n{result.stderr}")
|
| 46 |
+
|
| 47 |
+
if result.returncode != 0:
|
| 48 |
+
print(f"❌ Comando falhou com código: {result.returncode}")
|
| 49 |
+
else:
|
| 50 |
+
print("✅ Comando executado com sucesso")
|
| 51 |
+
|
| 52 |
+
except Exception as e:
|
| 53 |
+
print(f"❌ Erro ao executar comando: {str(e)}")
|
| 54 |
+
|
| 55 |
+
print("-" * 30)
|
| 56 |
+
|
| 57 |
+
def check_prerequisites():
|
| 58 |
+
"""Verifica se os pré-requisitos estão atendidos"""
|
| 59 |
+
print_header("VERIFICAÇÃO DE PRÉ-REQUISITOS")
|
| 60 |
+
|
| 61 |
+
# Verifica se estamos no diretório correto
|
| 62 |
+
current_dir = Path.cwd()
|
| 63 |
+
expected_files = ['app.py', 'config.py', 'logs']
|
| 64 |
+
|
| 65 |
+
print(f"📁 Diretório atual: {current_dir}")
|
| 66 |
+
|
| 67 |
+
missing_files = []
|
| 68 |
+
for file in expected_files:
|
| 69 |
+
if not (current_dir / file).exists():
|
| 70 |
+
missing_files.append(file)
|
| 71 |
+
|
| 72 |
+
if missing_files:
|
| 73 |
+
print(f"❌ Arquivos/diretórios não encontrados: {', '.join(missing_files)}")
|
| 74 |
+
print("💡 Execute este script a partir do diretório raiz do projeto")
|
| 75 |
+
return False
|
| 76 |
+
|
| 77 |
+
# Verifica se o banco de logs existe
|
| 78 |
+
log_db = current_dir / 'logs' / 'application.db'
|
| 79 |
+
if not log_db.exists():
|
| 80 |
+
print(f"⚠️ Banco de logs não encontrado: {log_db}")
|
| 81 |
+
print("💡 Execute a aplicação primeiro para gerar logs")
|
| 82 |
+
return False
|
| 83 |
+
|
| 84 |
+
print("✅ Todos os pré-requisitos atendidos")
|
| 85 |
+
return True
|
| 86 |
+
|
| 87 |
+
def demo_basic_analysis():
|
| 88 |
+
"""Demonstra análise básica de logs"""
|
| 89 |
+
print_header("DEMONSTRAÇÃO: ANÁLISE BÁSICA DE LOGS")
|
| 90 |
+
|
| 91 |
+
print_step("1", "Análise das últimas 2 horas")
|
| 92 |
+
run_command([
|
| 93 |
+
sys.executable,
|
| 94 |
+
"scripts/log_analysis/advanced_log_analyzer.py",
|
| 95 |
+
"--hours", "2"
|
| 96 |
+
], "Análise básica das últimas 2 horas")
|
| 97 |
+
|
| 98 |
+
print_step("2", "Análise silenciosa (apenas resumo)")
|
| 99 |
+
run_command([
|
| 100 |
+
sys.executable,
|
| 101 |
+
"scripts/log_analysis/advanced_log_analyzer.py",
|
| 102 |
+
"--hours", "1",
|
| 103 |
+
"--quiet"
|
| 104 |
+
], "Modo silencioso")
|
| 105 |
+
|
| 106 |
+
def demo_export_features():
|
| 107 |
+
"""Demonstra recursos de exportação"""
|
| 108 |
+
print_header("DEMONSTRAÇÃO: RECURSOS DE EXPORTAÇÃO")
|
| 109 |
+
|
| 110 |
+
print_step("1", "Exportação para arquivo de texto")
|
| 111 |
+
run_command([
|
| 112 |
+
sys.executable,
|
| 113 |
+
"scripts/log_analysis/advanced_log_analyzer.py",
|
| 114 |
+
"--hours", "1",
|
| 115 |
+
"--output", "demo_report.txt"
|
| 116 |
+
], "Salvando relatório em arquivo")
|
| 117 |
+
|
| 118 |
+
# Mostra conteúdo do arquivo gerado
|
| 119 |
+
report_file = Path("demo_report.txt")
|
| 120 |
+
if report_file.exists():
|
| 121 |
+
print("\n📄 Conteúdo do relatório gerado:")
|
| 122 |
+
with open(report_file, 'r', encoding='utf-8') as f:
|
| 123 |
+
content = f.read()
|
| 124 |
+
# Mostra apenas as primeiras 20 linhas
|
| 125 |
+
lines = content.split('\n')[:20]
|
| 126 |
+
print('\n'.join(lines))
|
| 127 |
+
if len(content.split('\n')) > 20:
|
| 128 |
+
print("\n... (conteúdo truncado)")
|
| 129 |
+
|
| 130 |
+
print_step("2", "Exportação para JSON")
|
| 131 |
+
run_command([
|
| 132 |
+
sys.executable,
|
| 133 |
+
"scripts/log_analysis/advanced_log_analyzer.py",
|
| 134 |
+
"--hours", "1",
|
| 135 |
+
"--json", "demo_analysis.json",
|
| 136 |
+
"--quiet"
|
| 137 |
+
], "Exportando dados em JSON")
|
| 138 |
+
|
| 139 |
+
# Mostra estrutura do JSON
|
| 140 |
+
json_file = Path("demo_analysis.json")
|
| 141 |
+
if json_file.exists():
|
| 142 |
+
print("\n📊 Estrutura do JSON gerado:")
|
| 143 |
+
import json
|
| 144 |
+
with open(json_file, 'r', encoding='utf-8') as f:
|
| 145 |
+
data = json.load(f)
|
| 146 |
+
print(f" - total_logs: {data.get('total_logs', 0)}")
|
| 147 |
+
print(f" - by_level: {dict(list(data.get('by_level', {}).items())[:3])}...")
|
| 148 |
+
print(f" - errors: {len(data.get('errors', []))} entradas")
|
| 149 |
+
print(f" - warnings: {len(data.get('warnings', []))} entradas")
|
| 150 |
+
|
| 151 |
+
def demo_realtime_monitor():
|
| 152 |
+
"""Demonstra monitor em tempo real (simulação)"""
|
| 153 |
+
print_header("DEMONSTRAÇÃO: MONITOR EM TEMPO REAL")
|
| 154 |
+
|
| 155 |
+
print("\n🔍 O monitor em tempo real seria executado com:")
|
| 156 |
+
print("python scripts/log_analysis/realtime_monitor.py")
|
| 157 |
+
print("\n💡 Para esta demonstração, vamos simular com verificação única:")
|
| 158 |
+
|
| 159 |
+
print_step("1", "Simulação de monitoramento")
|
| 160 |
+
print("\n⏱️ Em um cenário real, o monitor ficaria ativo continuamente...")
|
| 161 |
+
print("🔄 Verificando logs a cada 5 segundos...")
|
| 162 |
+
print("🚨 Alertando sobre problemas críticos...")
|
| 163 |
+
print("📊 Mostrando estatísticas em tempo real...")
|
| 164 |
+
|
| 165 |
+
print("\n🛑 Para parar o monitor real, use Ctrl+C")
|
| 166 |
+
print("\n📋 Comandos úteis do monitor:")
|
| 167 |
+
print(" --verbose : Mostra todos os logs")
|
| 168 |
+
print(" --interval 10: Verifica a cada 10 segundos")
|
| 169 |
+
print(" --threshold 3: Máximo 3 alertas por verificação")
|
| 170 |
+
|
| 171 |
+
def demo_practical_examples():
|
| 172 |
+
"""Mostra exemplos práticos de uso"""
|
| 173 |
+
print_header("EXEMPLOS PRÁTICOS DE USO")
|
| 174 |
+
|
| 175 |
+
examples = [
|
| 176 |
+
{
|
| 177 |
+
"title": "Debugging após deploy",
|
| 178 |
+
"command": "python scripts/log_analysis/advanced_log_analyzer.py --hours 1 --output deploy_check.txt",
|
| 179 |
+
"description": "Analisa logs da última hora após um deploy"
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"title": "Monitoramento de desenvolvimento",
|
| 183 |
+
"command": "python scripts/log_analysis/realtime_monitor.py --verbose --interval 3",
|
| 184 |
+
"description": "Monitor detalhado durante desenvolvimento"
|
| 185 |
+
},
|
| 186 |
+
{
|
| 187 |
+
"title": "Relatório semanal",
|
| 188 |
+
"command": "python scripts/log_analysis/advanced_log_analyzer.py --hours 168 --json weekly_report.json",
|
| 189 |
+
"description": "Gera relatório JSON da semana completa"
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"title": "Verificação rápida de erros",
|
| 193 |
+
"command": "python scripts/log_analysis/advanced_log_analyzer.py --hours 6 --quiet",
|
| 194 |
+
"description": "Verificação silenciosa das últimas 6 horas"
|
| 195 |
+
}
|
| 196 |
+
]
|
| 197 |
+
|
| 198 |
+
for i, example in enumerate(examples, 1):
|
| 199 |
+
print(f"\n{i}. {example['title']}")
|
| 200 |
+
print(f" Comando: {example['command']}")
|
| 201 |
+
print(f" Uso: {example['description']}")
|
| 202 |
+
|
| 203 |
+
def cleanup_demo_files():
|
| 204 |
+
"""Remove arquivos de demonstração"""
|
| 205 |
+
demo_files = ['demo_report.txt', 'demo_analysis.json']
|
| 206 |
+
|
| 207 |
+
print("\n🧹 Limpando arquivos de demonstração...")
|
| 208 |
+
for file in demo_files:
|
| 209 |
+
file_path = Path(file)
|
| 210 |
+
if file_path.exists():
|
| 211 |
+
file_path.unlink()
|
| 212 |
+
print(f" ✅ Removido: {file}")
|
| 213 |
+
else:
|
| 214 |
+
print(f" ℹ️ Não encontrado: {file}")
|
| 215 |
+
|
| 216 |
+
def main():
|
| 217 |
+
"""Função principal da demonstração"""
|
| 218 |
+
print_header("DEMONSTRAÇÃO DOS SCRIPTS DE ANÁLISE DE LOGS")
|
| 219 |
+
print("\n🎯 Esta demonstração mostra como usar os scripts de análise de logs")
|
| 220 |
+
print("📚 Consulte o README.md para documentação completa")
|
| 221 |
+
|
| 222 |
+
# Verifica pré-requisitos
|
| 223 |
+
if not check_prerequisites():
|
| 224 |
+
print("\n❌ Não é possível continuar sem os pré-requisitos")
|
| 225 |
+
return 1
|
| 226 |
+
|
| 227 |
+
try:
|
| 228 |
+
# Executa demonstrações
|
| 229 |
+
demo_basic_analysis()
|
| 230 |
+
demo_export_features()
|
| 231 |
+
demo_realtime_monitor()
|
| 232 |
+
demo_practical_examples()
|
| 233 |
+
|
| 234 |
+
print_header("DEMONSTRAÇÃO CONCLUÍDA")
|
| 235 |
+
print("\n✅ Todos os exemplos foram executados com sucesso!")
|
| 236 |
+
print("\n📖 Próximos passos:")
|
| 237 |
+
print(" 1. Leia o README.md para mais detalhes")
|
| 238 |
+
print(" 2. Execute os scripts com seus próprios parâmetros")
|
| 239 |
+
print(" 3. Integre os scripts em seu workflow de desenvolvimento")
|
| 240 |
+
|
| 241 |
+
# Pergunta sobre limpeza
|
| 242 |
+
response = input("\n🗑️ Deseja remover os arquivos de demonstração? (s/N): ")
|
| 243 |
+
if response.lower() in ['s', 'sim', 'y', 'yes']:
|
| 244 |
+
cleanup_demo_files()
|
| 245 |
+
|
| 246 |
+
return 0
|
| 247 |
+
|
| 248 |
+
except KeyboardInterrupt:
|
| 249 |
+
print("\n\n🛑 Demonstração interrompida pelo usuário")
|
| 250 |
+
return 1
|
| 251 |
+
|
| 252 |
+
except Exception as e:
|
| 253 |
+
print(f"\n❌ Erro durante demonstração: {str(e)}")
|
| 254 |
+
return 1
|
| 255 |
+
|
| 256 |
+
if __name__ == "__main__":
|
| 257 |
+
exit(main())
|
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Monitor de Logs em Tempo Real para Desenvolvedores
|
| 4 |
+
|
| 5 |
+
Este script monitora os logs em tempo real e alerta sobre
|
| 6 |
+
problemas críticos, erros de importação e problemas de performance.
|
| 7 |
+
|
| 8 |
+
Autor: Sistema de Análise de Mercado
|
| 9 |
+
Versão: 1.0.0
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import re
|
| 14 |
+
import time
|
| 15 |
+
import sqlite3
|
| 16 |
+
from datetime import datetime
|
| 17 |
+
from typing import Dict, List, Optional
|
| 18 |
+
import argparse
|
| 19 |
+
from collections import deque
|
| 20 |
+
|
| 21 |
+
class RealtimeLogMonitor:
|
| 22 |
+
"""Monitor de logs em tempo real"""
|
| 23 |
+
|
| 24 |
+
def __init__(self, log_db_path: str = None, alert_threshold: int = 5):
|
| 25 |
+
self.log_db_path = log_db_path or "logs/application.db"
|
| 26 |
+
self.alert_threshold = alert_threshold
|
| 27 |
+
self.last_check = datetime.now()
|
| 28 |
+
self.error_buffer = deque(maxlen=100) # Buffer para últimos 100 erros
|
| 29 |
+
|
| 30 |
+
# Padrões críticos para alertas
|
| 31 |
+
self.critical_patterns = {
|
| 32 |
+
'import_error': re.compile(r'ImportError|ModuleNotFoundError|not defined', re.IGNORECASE),
|
| 33 |
+
'server_error': re.compile(r'server.*error|connection.*failed|timeout', re.IGNORECASE),
|
| 34 |
+
'memory_error': re.compile(r'MemoryError|OutOfMemory|memory.*exceeded', re.IGNORECASE),
|
| 35 |
+
'gradio_error': re.compile(r'gradio.*error|interface.*failed', re.IGNORECASE),
|
| 36 |
+
'performance_critical': re.compile(r'took ([5-9]\d+|\d{3,})\s*(ms|seconds?)', re.IGNORECASE)
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
# Cores para output no terminal
|
| 40 |
+
self.colors = {
|
| 41 |
+
'red': '\033[91m',
|
| 42 |
+
'yellow': '\033[93m',
|
| 43 |
+
'green': '\033[92m',
|
| 44 |
+
'blue': '\033[94m',
|
| 45 |
+
'purple': '\033[95m',
|
| 46 |
+
'cyan': '\033[96m',
|
| 47 |
+
'white': '\033[97m',
|
| 48 |
+
'bold': '\033[1m',
|
| 49 |
+
'end': '\033[0m'
|
| 50 |
+
}
|
| 51 |
+
|
| 52 |
+
def connect_db(self) -> sqlite3.Connection:
|
| 53 |
+
"""Conecta ao banco de dados de logs"""
|
| 54 |
+
if not os.path.exists(self.log_db_path):
|
| 55 |
+
raise FileNotFoundError(f"Banco de dados de logs não encontrado: {self.log_db_path}")
|
| 56 |
+
return sqlite3.connect(self.log_db_path)
|
| 57 |
+
|
| 58 |
+
def colorize(self, text: str, color: str) -> str:
|
| 59 |
+
"""Adiciona cor ao texto"""
|
| 60 |
+
return f"{self.colors.get(color, '')}{text}{self.colors['end']}"
|
| 61 |
+
|
| 62 |
+
def check_new_logs(self) -> List[Dict]:
|
| 63 |
+
"""Verifica novos logs desde a última verificação"""
|
| 64 |
+
try:
|
| 65 |
+
conn = self.connect_db()
|
| 66 |
+
cursor = conn.cursor()
|
| 67 |
+
|
| 68 |
+
cursor.execute("""
|
| 69 |
+
SELECT timestamp, level, message, module
|
| 70 |
+
FROM logs
|
| 71 |
+
WHERE timestamp > ?
|
| 72 |
+
ORDER BY timestamp ASC
|
| 73 |
+
""", (self.last_check.isoformat(),))
|
| 74 |
+
|
| 75 |
+
new_logs = cursor.fetchall()
|
| 76 |
+
conn.close()
|
| 77 |
+
|
| 78 |
+
if new_logs:
|
| 79 |
+
self.last_check = datetime.fromisoformat(new_logs[-1][0])
|
| 80 |
+
|
| 81 |
+
return [{
|
| 82 |
+
'timestamp': log[0],
|
| 83 |
+
'level': log[1],
|
| 84 |
+
'message': log[2],
|
| 85 |
+
'module': log[3]
|
| 86 |
+
} for log in new_logs]
|
| 87 |
+
|
| 88 |
+
except Exception as e:
|
| 89 |
+
print(self.colorize(f"❌ Erro ao verificar logs: {str(e)}", 'red'))
|
| 90 |
+
return []
|
| 91 |
+
|
| 92 |
+
def analyze_log_entry(self, log: Dict) -> Optional[Dict]:
|
| 93 |
+
"""Analisa uma entrada de log para problemas críticos"""
|
| 94 |
+
message = log['message']
|
| 95 |
+
alerts = []
|
| 96 |
+
|
| 97 |
+
# Verifica padrões críticos
|
| 98 |
+
for pattern_name, pattern in self.critical_patterns.items():
|
| 99 |
+
if pattern.search(message):
|
| 100 |
+
severity = self._get_severity(pattern_name, message)
|
| 101 |
+
alerts.append({
|
| 102 |
+
'type': pattern_name,
|
| 103 |
+
'severity': severity,
|
| 104 |
+
'message': message,
|
| 105 |
+
'module': log['module'],
|
| 106 |
+
'timestamp': log['timestamp']
|
| 107 |
+
})
|
| 108 |
+
|
| 109 |
+
# Verifica nível de log crítico
|
| 110 |
+
if log['level'] in ['ERROR', 'CRITICAL']:
|
| 111 |
+
alerts.append({
|
| 112 |
+
'type': 'log_level_critical',
|
| 113 |
+
'severity': 'high' if log['level'] == 'CRITICAL' else 'medium',
|
| 114 |
+
'message': message,
|
| 115 |
+
'module': log['module'],
|
| 116 |
+
'timestamp': log['timestamp']
|
| 117 |
+
})
|
| 118 |
+
|
| 119 |
+
return alerts if alerts else None
|
| 120 |
+
|
| 121 |
+
def _get_severity(self, pattern_name: str, message: str) -> str:
|
| 122 |
+
"""Determina a severidade do alerta"""
|
| 123 |
+
high_severity = ['memory_error', 'server_error']
|
| 124 |
+
medium_severity = ['import_error', 'gradio_error']
|
| 125 |
+
|
| 126 |
+
if pattern_name in high_severity:
|
| 127 |
+
return 'high'
|
| 128 |
+
elif pattern_name in medium_severity:
|
| 129 |
+
return 'medium'
|
| 130 |
+
else:
|
| 131 |
+
return 'low'
|
| 132 |
+
|
| 133 |
+
def display_alert(self, alerts: List[Dict]):
|
| 134 |
+
"""Exibe alertas no terminal"""
|
| 135 |
+
for alert in alerts:
|
| 136 |
+
timestamp = datetime.fromisoformat(alert['timestamp']).strftime('%H:%M:%S')
|
| 137 |
+
|
| 138 |
+
# Escolhe cor baseada na severidade
|
| 139 |
+
if alert['severity'] == 'high':
|
| 140 |
+
color = 'red'
|
| 141 |
+
icon = '🚨'
|
| 142 |
+
elif alert['severity'] == 'medium':
|
| 143 |
+
color = 'yellow'
|
| 144 |
+
icon = '⚠️'
|
| 145 |
+
else:
|
| 146 |
+
color = 'cyan'
|
| 147 |
+
icon = 'ℹ️'
|
| 148 |
+
|
| 149 |
+
print(f"{icon} {self.colorize('[' + timestamp + ']', 'white')} "
|
| 150 |
+
f"{self.colorize(alert['type'].upper(), color)} "
|
| 151 |
+
f"em {self.colorize(alert['module'], 'blue')}")
|
| 152 |
+
|
| 153 |
+
# Exibe mensagem truncada
|
| 154 |
+
msg = alert['message'][:100] + '...' if len(alert['message']) > 100 else alert['message']
|
| 155 |
+
print(f" {self.colorize(msg, 'white')}")
|
| 156 |
+
print()
|
| 157 |
+
|
| 158 |
+
def display_stats(self, logs_checked: int, alerts_count: int):
|
| 159 |
+
"""Exibe estatísticas do monitoramento"""
|
| 160 |
+
current_time = datetime.now().strftime('%H:%M:%S')
|
| 161 |
+
print(f"\r{self.colorize('[' + current_time + ']', 'green')} "
|
| 162 |
+
f"Logs verificados: {logs_checked} | "
|
| 163 |
+
f"Alertas: {self.colorize(str(alerts_count), 'yellow' if alerts_count > 0 else 'green')}",
|
| 164 |
+
end='', flush=True)
|
| 165 |
+
|
| 166 |
+
def run_monitor(self, interval: int = 5, verbose: bool = False):
|
| 167 |
+
"""Executa o monitor em tempo real"""
|
| 168 |
+
print(self.colorize("🔍 Iniciando monitor de logs em tempo real...", 'green'))
|
| 169 |
+
print(f"📊 Intervalo de verificação: {interval}s")
|
| 170 |
+
print(f"📁 Banco de dados: {self.log_db_path}")
|
| 171 |
+
print(f"🚨 Limite de alertas: {self.alert_threshold}")
|
| 172 |
+
print("\n" + "="*60)
|
| 173 |
+
print(self.colorize("Monitor ativo - Pressione Ctrl+C para parar", 'bold'))
|
| 174 |
+
print("="*60 + "\n")
|
| 175 |
+
|
| 176 |
+
total_logs_checked = 0
|
| 177 |
+
total_alerts = 0
|
| 178 |
+
|
| 179 |
+
try:
|
| 180 |
+
while True:
|
| 181 |
+
new_logs = self.check_new_logs()
|
| 182 |
+
|
| 183 |
+
if new_logs:
|
| 184 |
+
total_logs_checked += len(new_logs)
|
| 185 |
+
|
| 186 |
+
for log in new_logs:
|
| 187 |
+
if verbose:
|
| 188 |
+
timestamp = datetime.fromisoformat(log['timestamp']).strftime('%H:%M:%S')
|
| 189 |
+
level_color = {
|
| 190 |
+
'ERROR': 'red',
|
| 191 |
+
'WARNING': 'yellow',
|
| 192 |
+
'INFO': 'green',
|
| 193 |
+
'DEBUG': 'cyan'
|
| 194 |
+
}.get(log['level'], 'white')
|
| 195 |
+
|
| 196 |
+
print(f"[{timestamp}] {self.colorize(log['level'], level_color)} "
|
| 197 |
+
f"{log['module']}: {log['message'][:80]}...")
|
| 198 |
+
|
| 199 |
+
alerts = self.analyze_log_entry(log)
|
| 200 |
+
if alerts:
|
| 201 |
+
total_alerts += len(alerts)
|
| 202 |
+
self.display_alert(alerts)
|
| 203 |
+
|
| 204 |
+
# Adiciona ao buffer de erros
|
| 205 |
+
for alert in alerts:
|
| 206 |
+
self.error_buffer.append(alert)
|
| 207 |
+
|
| 208 |
+
# Exibe estatísticas se não estiver em modo verbose
|
| 209 |
+
if not verbose:
|
| 210 |
+
self.display_stats(total_logs_checked, total_alerts)
|
| 211 |
+
|
| 212 |
+
time.sleep(interval)
|
| 213 |
+
|
| 214 |
+
except KeyboardInterrupt:
|
| 215 |
+
print(f"\n\n{self.colorize('🛑 Monitor interrompido pelo usuário', 'yellow')}")
|
| 216 |
+
print(f"📊 Resumo da sessão:")
|
| 217 |
+
print(f" Logs verificados: {total_logs_checked}")
|
| 218 |
+
print(f" Alertas gerados: {total_alerts}")
|
| 219 |
+
|
| 220 |
+
if self.error_buffer:
|
| 221 |
+
print(f"\n🔍 Últimos {len(self.error_buffer)} problemas detectados:")
|
| 222 |
+
for i, error in enumerate(list(self.error_buffer)[-5:], 1):
|
| 223 |
+
timestamp = datetime.fromisoformat(error['timestamp']).strftime('%H:%M:%S')
|
| 224 |
+
print(f" {i}. [{timestamp}] {error['type']} em {error['module']}")
|
| 225 |
+
|
| 226 |
+
except Exception as e:
|
| 227 |
+
print(f"\n{self.colorize(f'❌ Erro inesperado: {str(e)}', 'red')}")
|
| 228 |
+
|
| 229 |
+
def main():
|
| 230 |
+
"""Função principal do script"""
|
| 231 |
+
parser = argparse.ArgumentParser(description='Monitor de Logs em Tempo Real')
|
| 232 |
+
parser.add_argument('--interval', type=int, default=5, help='Intervalo de verificação em segundos (padrão: 5)')
|
| 233 |
+
parser.add_argument('--db-path', type=str, help='Caminho para o banco de dados de logs')
|
| 234 |
+
parser.add_argument('--threshold', type=int, default=5, help='Limite de alertas por verificação (padrão: 5)')
|
| 235 |
+
parser.add_argument('--verbose', action='store_true', help='Modo verboso (mostra todos os logs)')
|
| 236 |
+
|
| 237 |
+
args = parser.parse_args()
|
| 238 |
+
|
| 239 |
+
try:
|
| 240 |
+
monitor = RealtimeLogMonitor(args.db_path, args.threshold)
|
| 241 |
+
monitor.run_monitor(args.interval, args.verbose)
|
| 242 |
+
|
| 243 |
+
except FileNotFoundError as e:
|
| 244 |
+
print(f"❌ Arquivo não encontrado: {str(e)}")
|
| 245 |
+
print("💡 Certifique-se de que a aplicação está rodando e gerando logs.")
|
| 246 |
+
return 1
|
| 247 |
+
|
| 248 |
+
except Exception as e:
|
| 249 |
+
print(f"❌ Erro inesperado: {str(e)}")
|
| 250 |
+
return 1
|
| 251 |
+
|
| 252 |
+
if __name__ == "__main__":
|
| 253 |
+
exit(main())
|
|
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""
|
| 3 |
+
Script de Configuração - Scripts de Análise de Logs
|
| 4 |
+
|
| 5 |
+
Este script configura e valida o ambiente para uso dos
|
| 6 |
+
scripts de análise de logs, criando aliases e verificando dependências.
|
| 7 |
+
|
| 8 |
+
Autor: Sistema de Análise de Mercado
|
| 9 |
+
Versão: 1.0.0
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import os
|
| 13 |
+
import sys
|
| 14 |
+
import json
|
| 15 |
+
import sqlite3
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
from datetime import datetime
|
| 18 |
+
import argparse
|
| 19 |
+
|
| 20 |
+
class LogAnalysisSetup:
|
| 21 |
+
"""Configurador dos scripts de análise de logs"""
|
| 22 |
+
|
| 23 |
+
def __init__(self):
|
| 24 |
+
self.project_root = Path.cwd()
|
| 25 |
+
self.scripts_dir = self.project_root / "scripts" / "log_analysis"
|
| 26 |
+
self.logs_dir = self.project_root / "logs"
|
| 27 |
+
self.config_file = self.scripts_dir / "config.json"
|
| 28 |
+
|
| 29 |
+
def print_header(self, title: str):
|
| 30 |
+
"""Imprime cabeçalho formatado"""
|
| 31 |
+
print("\n" + "="*60)
|
| 32 |
+
print(f" {title}")
|
| 33 |
+
print("="*60)
|
| 34 |
+
|
| 35 |
+
def print_step(self, step: str, status: str = "INFO"):
|
| 36 |
+
"""Imprime passo com status"""
|
| 37 |
+
icons = {
|
| 38 |
+
"INFO": "ℹ️",
|
| 39 |
+
"SUCCESS": "✅",
|
| 40 |
+
"WARNING": "⚠️",
|
| 41 |
+
"ERROR": "❌"
|
| 42 |
+
}
|
| 43 |
+
print(f"{icons.get(status, 'ℹ️')} {step}")
|
| 44 |
+
|
| 45 |
+
def check_environment(self) -> bool:
|
| 46 |
+
"""Verifica o ambiente do projeto"""
|
| 47 |
+
self.print_header("VERIFICAÇÃO DO AMBIENTE")
|
| 48 |
+
|
| 49 |
+
checks = [
|
| 50 |
+
("Diretório do projeto", self.project_root.exists()),
|
| 51 |
+
("Diretório de scripts", self.scripts_dir.exists()),
|
| 52 |
+
("Diretório de logs", self.logs_dir.exists()),
|
| 53 |
+
("Arquivo app.py", (self.project_root / "app.py").exists()),
|
| 54 |
+
("Arquivo config.py", (self.project_root / "config.py").exists())
|
| 55 |
+
]
|
| 56 |
+
|
| 57 |
+
all_good = True
|
| 58 |
+
for check_name, result in checks:
|
| 59 |
+
status = "SUCCESS" if result else "ERROR"
|
| 60 |
+
self.print_step(f"{check_name}: {'OK' if result else 'FALHOU'}", status)
|
| 61 |
+
if not result:
|
| 62 |
+
all_good = False
|
| 63 |
+
|
| 64 |
+
return all_good
|
| 65 |
+
|
| 66 |
+
def check_log_database(self) -> dict:
|
| 67 |
+
"""Verifica o banco de dados de logs"""
|
| 68 |
+
self.print_header("VERIFICAÇÃO DO BANCO DE LOGS")
|
| 69 |
+
|
| 70 |
+
db_path = self.logs_dir / "application.db"
|
| 71 |
+
result = {
|
| 72 |
+
"exists": db_path.exists(),
|
| 73 |
+
"path": str(db_path),
|
| 74 |
+
"tables": [],
|
| 75 |
+
"log_count": 0,
|
| 76 |
+
"latest_log": None
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
if not result["exists"]:
|
| 80 |
+
self.print_step(f"Banco de dados não encontrado: {db_path}", "WARNING")
|
| 81 |
+
self.print_step("Execute a aplicação para gerar logs", "INFO")
|
| 82 |
+
return result
|
| 83 |
+
|
| 84 |
+
try:
|
| 85 |
+
conn = sqlite3.connect(db_path)
|
| 86 |
+
cursor = conn.cursor()
|
| 87 |
+
|
| 88 |
+
# Verifica tabelas
|
| 89 |
+
cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
| 90 |
+
result["tables"] = [row[0] for row in cursor.fetchall()]
|
| 91 |
+
|
| 92 |
+
# Conta logs se a tabela existe
|
| 93 |
+
if "logs" in result["tables"]:
|
| 94 |
+
cursor.execute("SELECT COUNT(*) FROM logs")
|
| 95 |
+
result["log_count"] = cursor.fetchone()[0]
|
| 96 |
+
|
| 97 |
+
# Pega o log mais recente
|
| 98 |
+
cursor.execute("SELECT timestamp, level, message FROM logs ORDER BY timestamp DESC LIMIT 1")
|
| 99 |
+
latest = cursor.fetchone()
|
| 100 |
+
if latest:
|
| 101 |
+
result["latest_log"] = {
|
| 102 |
+
"timestamp": latest[0],
|
| 103 |
+
"level": latest[1],
|
| 104 |
+
"message": latest[2][:100] + "..." if len(latest[2]) > 100 else latest[2]
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
conn.close()
|
| 108 |
+
|
| 109 |
+
self.print_step(f"Banco de dados encontrado: {db_path}", "SUCCESS")
|
| 110 |
+
self.print_step(f"Tabelas: {', '.join(result['tables'])}", "INFO")
|
| 111 |
+
self.print_step(f"Total de logs: {result['log_count']}", "INFO")
|
| 112 |
+
|
| 113 |
+
if result["latest_log"]:
|
| 114 |
+
self.print_step(f"Último log: {result['latest_log']['timestamp']} - {result['latest_log']['level']}", "INFO")
|
| 115 |
+
|
| 116 |
+
except Exception as e:
|
| 117 |
+
self.print_step(f"Erro ao verificar banco: {str(e)}", "ERROR")
|
| 118 |
+
result["error"] = str(e)
|
| 119 |
+
|
| 120 |
+
return result
|
| 121 |
+
|
| 122 |
+
def create_config(self, db_info: dict):
|
| 123 |
+
"""Cria arquivo de configuração"""
|
| 124 |
+
self.print_header("CRIAÇÃO DA CONFIGURAÇÃO")
|
| 125 |
+
|
| 126 |
+
config = {
|
| 127 |
+
"version": "1.0.0",
|
| 128 |
+
"created_at": datetime.now().isoformat(),
|
| 129 |
+
"project_root": str(self.project_root),
|
| 130 |
+
"database": {
|
| 131 |
+
"path": db_info["path"],
|
| 132 |
+
"exists": db_info["exists"],
|
| 133 |
+
"tables": db_info["tables"],
|
| 134 |
+
"last_check": datetime.now().isoformat()
|
| 135 |
+
},
|
| 136 |
+
"defaults": {
|
| 137 |
+
"analysis_hours": 24,
|
| 138 |
+
"monitor_interval": 5,
|
| 139 |
+
"alert_threshold": 5
|
| 140 |
+
},
|
| 141 |
+
"patterns": {
|
| 142 |
+
"critical_keywords": ["ERROR", "CRITICAL", "Exception", "Failed"],
|
| 143 |
+
"performance_threshold_ms": 1000,
|
| 144 |
+
"import_error_keywords": ["ImportError", "ModuleNotFoundError", "not defined"]
|
| 145 |
+
}
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
try:
|
| 149 |
+
with open(self.config_file, 'w', encoding='utf-8') as f:
|
| 150 |
+
json.dump(config, f, indent=2, ensure_ascii=False)
|
| 151 |
+
|
| 152 |
+
self.print_step(f"Configuração salva em: {self.config_file}", "SUCCESS")
|
| 153 |
+
return True
|
| 154 |
+
|
| 155 |
+
except Exception as e:
|
| 156 |
+
self.print_step(f"Erro ao salvar configuração: {str(e)}", "ERROR")
|
| 157 |
+
return False
|
| 158 |
+
|
| 159 |
+
def create_aliases(self):
|
| 160 |
+
"""Cria aliases para facilitar o uso"""
|
| 161 |
+
self.print_header("CRIAÇÃO DE ALIASES")
|
| 162 |
+
|
| 163 |
+
aliases_file = self.scripts_dir / "aliases.sh"
|
| 164 |
+
|
| 165 |
+
aliases_content = f"""#!/bin/bash
|
| 166 |
+
# Aliases para Scripts de Análise de Logs
|
| 167 |
+
# Gerado automaticamente em {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
| 168 |
+
|
| 169 |
+
# Diretório base do projeto
|
| 170 |
+
PROJECT_ROOT="{self.project_root}"
|
| 171 |
+
|
| 172 |
+
# Aliases principais
|
| 173 |
+
alias log-analyze="python $PROJECT_ROOT/scripts/log_analysis/advanced_log_analyzer.py"
|
| 174 |
+
alias log-monitor="python $PROJECT_ROOT/scripts/log_analysis/realtime_monitor.py"
|
| 175 |
+
alias log-demo="python $PROJECT_ROOT/scripts/log_analysis/demo.py"
|
| 176 |
+
|
| 177 |
+
# Aliases com parâmetros comuns
|
| 178 |
+
alias log-quick="python $PROJECT_ROOT/scripts/log_analysis/advanced_log_analyzer.py --hours 1 --quiet"
|
| 179 |
+
alias log-errors="python $PROJECT_ROOT/scripts/log_analysis/advanced_log_analyzer.py --hours 6 | grep -A10 'ERROS ENCONTRADOS'"
|
| 180 |
+
alias log-performance="python $PROJECT_ROOT/scripts/log_analysis/advanced_log_analyzer.py --hours 12 | grep -A10 'PROBLEMAS DE PERFORMANCE'"
|
| 181 |
+
alias log-watch="python $PROJECT_ROOT/scripts/log_analysis/realtime_monitor.py --verbose --interval 3"
|
| 182 |
+
|
| 183 |
+
# Aliases para relatórios
|
| 184 |
+
alias log-report-daily="python $PROJECT_ROOT/scripts/log_analysis/advanced_log_analyzer.py --hours 24 --output daily_report.txt"
|
| 185 |
+
alias log-report-weekly="python $PROJECT_ROOT/scripts/log_analysis/advanced_log_analyzer.py --hours 168 --output weekly_report.txt"
|
| 186 |
+
alias log-export-json="python $PROJECT_ROOT/scripts/log_analysis/advanced_log_analyzer.py --json logs_export.json --quiet"
|
| 187 |
+
|
| 188 |
+
echo "✅ Aliases de análise de logs carregados!"
|
| 189 |
+
echo "📚 Comandos disponíveis:"
|
| 190 |
+
echo " log-analyze - Análise completa (24h)"
|
| 191 |
+
echo " log-monitor - Monitor em tempo real"
|
| 192 |
+
echo " log-quick - Análise rápida (1h)"
|
| 193 |
+
echo " log-errors - Foca apenas em erros"
|
| 194 |
+
echo " log-performance- Problemas de performance"
|
| 195 |
+
echo " log-watch - Monitor detalhado"
|
| 196 |
+
echo " log-demo - Demonstração dos scripts"
|
| 197 |
+
echo ""
|
| 198 |
+
echo "💡 Para usar: source scripts/log_analysis/aliases.sh"
|
| 199 |
+
"""
|
| 200 |
+
|
| 201 |
+
try:
|
| 202 |
+
with open(aliases_file, 'w', encoding='utf-8') as f:
|
| 203 |
+
f.write(aliases_content)
|
| 204 |
+
|
| 205 |
+
# Torna o arquivo executável
|
| 206 |
+
aliases_file.chmod(0o755)
|
| 207 |
+
|
| 208 |
+
self.print_step(f"Aliases criados em: {aliases_file}", "SUCCESS")
|
| 209 |
+
self.print_step("Para usar: source scripts/log_analysis/aliases.sh", "INFO")
|
| 210 |
+
return True
|
| 211 |
+
|
| 212 |
+
except Exception as e:
|
| 213 |
+
self.print_step(f"Erro ao criar aliases: {str(e)}", "ERROR")
|
| 214 |
+
return False
|
| 215 |
+
|
| 216 |
+
def create_batch_scripts(self):
|
| 217 |
+
"""Cria scripts batch para Windows"""
|
| 218 |
+
self.print_header("CRIAÇÃO DE SCRIPTS BATCH (WINDOWS)")
|
| 219 |
+
|
| 220 |
+
batch_dir = self.scripts_dir / "batch"
|
| 221 |
+
batch_dir.mkdir(exist_ok=True)
|
| 222 |
+
|
| 223 |
+
scripts = {
|
| 224 |
+
"analyze.bat": f"@echo off\npython \"{self.project_root}\\scripts\\log_analysis\\advanced_log_analyzer.py\" %*",
|
| 225 |
+
"monitor.bat": f"@echo off\npython \"{self.project_root}\\scripts\\log_analysis\\realtime_monitor.py\" %*",
|
| 226 |
+
"demo.bat": f"@echo off\npython \"{self.project_root}\\scripts\\log_analysis\\demo.py\" %*",
|
| 227 |
+
"quick-check.bat": f"@echo off\npython \"{self.project_root}\\scripts\\log_analysis\\advanced_log_analyzer.py\" --hours 1 --quiet"
|
| 228 |
+
}
|
| 229 |
+
|
| 230 |
+
created_count = 0
|
| 231 |
+
for script_name, content in scripts.items():
|
| 232 |
+
script_path = batch_dir / script_name
|
| 233 |
+
try:
|
| 234 |
+
with open(script_path, 'w', encoding='utf-8') as f:
|
| 235 |
+
f.write(content)
|
| 236 |
+
created_count += 1
|
| 237 |
+
self.print_step(f"Criado: {script_name}", "SUCCESS")
|
| 238 |
+
except Exception as e:
|
| 239 |
+
self.print_step(f"Erro ao criar {script_name}: {str(e)}", "ERROR")
|
| 240 |
+
|
| 241 |
+
if created_count > 0:
|
| 242 |
+
self.print_step(f"Scripts batch criados em: {batch_dir}", "INFO")
|
| 243 |
+
self.print_step("Adicione o diretório ao PATH para usar globalmente", "INFO")
|
| 244 |
+
|
| 245 |
+
return created_count > 0
|
| 246 |
+
|
| 247 |
+
def validate_scripts(self):
|
| 248 |
+
"""Valida se os scripts principais existem e são executáveis"""
|
| 249 |
+
self.print_header("VALIDAÇÃO DOS SCRIPTS")
|
| 250 |
+
|
| 251 |
+
scripts = [
|
| 252 |
+
"advanced_log_analyzer.py",
|
| 253 |
+
"realtime_monitor.py",
|
| 254 |
+
"demo.py",
|
| 255 |
+
"setup.py"
|
| 256 |
+
]
|
| 257 |
+
|
| 258 |
+
all_valid = True
|
| 259 |
+
for script in scripts:
|
| 260 |
+
script_path = self.scripts_dir / script
|
| 261 |
+
exists = script_path.exists()
|
| 262 |
+
|
| 263 |
+
if exists:
|
| 264 |
+
# Tenta importar para verificar sintaxe
|
| 265 |
+
try:
|
| 266 |
+
import subprocess
|
| 267 |
+
result = subprocess.run(
|
| 268 |
+
[sys.executable, "-m", "py_compile", str(script_path)],
|
| 269 |
+
capture_output=True,
|
| 270 |
+
text=True
|
| 271 |
+
)
|
| 272 |
+
syntax_ok = result.returncode == 0
|
| 273 |
+
except:
|
| 274 |
+
syntax_ok = False
|
| 275 |
+
|
| 276 |
+
status = "SUCCESS" if syntax_ok else "WARNING"
|
| 277 |
+
self.print_step(f"{script}: {'OK' if syntax_ok else 'Erro de sintaxe'}", status)
|
| 278 |
+
|
| 279 |
+
if not syntax_ok:
|
| 280 |
+
all_valid = False
|
| 281 |
+
else:
|
| 282 |
+
self.print_step(f"{script}: Não encontrado", "ERROR")
|
| 283 |
+
all_valid = False
|
| 284 |
+
|
| 285 |
+
return all_valid
|
| 286 |
+
|
| 287 |
+
def run_setup(self, create_aliases: bool = True, create_batch: bool = True):
|
| 288 |
+
"""Executa configuração completa"""
|
| 289 |
+
self.print_header("CONFIGURAÇÃO DOS SCRIPTS DE ANÁLISE DE LOGS")
|
| 290 |
+
|
| 291 |
+
# Verifica ambiente
|
| 292 |
+
if not self.check_environment():
|
| 293 |
+
self.print_step("Ambiente inválido. Não é possível continuar.", "ERROR")
|
| 294 |
+
return False
|
| 295 |
+
|
| 296 |
+
# Verifica banco de logs
|
| 297 |
+
db_info = self.check_log_database()
|
| 298 |
+
|
| 299 |
+
# Cria configuração
|
| 300 |
+
if not self.create_config(db_info):
|
| 301 |
+
self.print_step("Falha ao criar configuração", "ERROR")
|
| 302 |
+
return False
|
| 303 |
+
|
| 304 |
+
# Valida scripts
|
| 305 |
+
if not self.validate_scripts():
|
| 306 |
+
self.print_step("Alguns scripts têm problemas", "WARNING")
|
| 307 |
+
|
| 308 |
+
# Cria aliases se solicitado
|
| 309 |
+
if create_aliases:
|
| 310 |
+
self.create_aliases()
|
| 311 |
+
|
| 312 |
+
# Cria scripts batch se solicitado
|
| 313 |
+
if create_batch and os.name == 'nt': # Windows
|
| 314 |
+
self.create_batch_scripts()
|
| 315 |
+
|
| 316 |
+
self.print_header("CONFIGURAÇÃO CONCLUÍDA")
|
| 317 |
+
self.print_step("Scripts de análise configurados com sucesso!", "SUCCESS")
|
| 318 |
+
|
| 319 |
+
# Instruções finais
|
| 320 |
+
print("\n📋 Próximos passos:")
|
| 321 |
+
print(" 1. Execute 'python scripts/log_analysis/demo.py' para ver exemplos")
|
| 322 |
+
print(" 2. Use 'python scripts/log_analysis/advanced_log_analyzer.py --help' para ajuda")
|
| 323 |
+
print(" 3. Inicie o monitor com 'python scripts/log_analysis/realtime_monitor.py'")
|
| 324 |
+
|
| 325 |
+
if create_aliases:
|
| 326 |
+
print(" 4. Carregue os aliases: source scripts/log_analysis/aliases.sh")
|
| 327 |
+
|
| 328 |
+
if db_info["log_count"] == 0:
|
| 329 |
+
print("\n⚠️ Aviso: Nenhum log encontrado. Execute a aplicação para gerar dados.")
|
| 330 |
+
|
| 331 |
+
return True
|
| 332 |
+
|
| 333 |
+
def main():
|
| 334 |
+
"""Função principal"""
|
| 335 |
+
parser = argparse.ArgumentParser(description='Configuração dos Scripts de Análise de Logs')
|
| 336 |
+
parser.add_argument('--no-aliases', action='store_true', help='Não criar aliases')
|
| 337 |
+
parser.add_argument('--no-batch', action='store_true', help='Não criar scripts batch')
|
| 338 |
+
parser.add_argument('--validate-only', action='store_true', help='Apenas validar ambiente')
|
| 339 |
+
|
| 340 |
+
args = parser.parse_args()
|
| 341 |
+
|
| 342 |
+
setup = LogAnalysisSetup()
|
| 343 |
+
|
| 344 |
+
if args.validate_only:
|
| 345 |
+
# Apenas validação
|
| 346 |
+
env_ok = setup.check_environment()
|
| 347 |
+
db_info = setup.check_log_database()
|
| 348 |
+
scripts_ok = setup.validate_scripts()
|
| 349 |
+
|
| 350 |
+
if env_ok and scripts_ok:
|
| 351 |
+
print("\n✅ Ambiente validado com sucesso!")
|
| 352 |
+
return 0
|
| 353 |
+
else:
|
| 354 |
+
print("\n❌ Problemas encontrados na validação")
|
| 355 |
+
return 1
|
| 356 |
+
else:
|
| 357 |
+
# Configuração completa
|
| 358 |
+
success = setup.run_setup(
|
| 359 |
+
create_aliases=not args.no_aliases,
|
| 360 |
+
create_batch=not args.no_batch
|
| 361 |
+
)
|
| 362 |
+
|
| 363 |
+
return 0 if success else 1
|
| 364 |
+
|
| 365 |
+
if __name__ == "__main__":
|
| 366 |
+
exit(main())
|
|
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Test Suite Documentation
|
| 2 |
+
|
| 3 |
+
Este diretório contém a suíte completa de testes para o projeto Hugging Face Spaces. O sistema de testes foi projetado seguindo as melhores práticas de desenvolvimento Python, utilizando pytest como framework principal.
|
| 4 |
+
|
| 5 |
+
## 📁 Estrutura de Testes
|
| 6 |
+
|
| 7 |
+
```
|
| 8 |
+
tests/
|
| 9 |
+
├── README.md # Esta documentação
|
| 10 |
+
├── conftest.py # Configurações globais e fixtures
|
| 11 |
+
├── test_patterns.py # Exemplos de padrões de teste
|
| 12 |
+
├── data/ # Dados de teste
|
| 13 |
+
│ └── sample_market_data.json # Dados de mercado para testes
|
| 14 |
+
├── unit/ # Testes unitários
|
| 15 |
+
│ ├── conftest.py # Fixtures específicas para testes unitários
|
| 16 |
+
│ ├── test_advanced_market_processing.py
|
| 17 |
+
│ └── test_voting_system.py
|
| 18 |
+
└── integration/ # Testes de integração
|
| 19 |
+
├── conftest.py # Fixtures específicas para testes de integração
|
| 20 |
+
└── test_market_analysis_integration.py
|
| 21 |
+
```
|
| 22 |
+
|
| 23 |
+
## 🚀 Como Executar os Testes
|
| 24 |
+
|
| 25 |
+
### Usando o Makefile (Recomendado)
|
| 26 |
+
|
| 27 |
+
```bash
|
| 28 |
+
# Executar todos os testes
|
| 29 |
+
make test
|
| 30 |
+
|
| 31 |
+
# Executar apenas testes unitários
|
| 32 |
+
make test-unit
|
| 33 |
+
|
| 34 |
+
# Executar apenas testes de integração
|
| 35 |
+
make test-integration
|
| 36 |
+
|
| 37 |
+
# Executar testes com relatório de cobertura
|
| 38 |
+
make test-coverage
|
| 39 |
+
|
| 40 |
+
# Executar apenas testes rápidos (excluir testes lentos)
|
| 41 |
+
make test-fast
|
| 42 |
+
|
| 43 |
+
# Executar testes em paralelo
|
| 44 |
+
make test-parallel
|
| 45 |
+
|
| 46 |
+
# Executar testes em modo debug
|
| 47 |
+
make test-debug
|
| 48 |
+
```
|
| 49 |
+
|
| 50 |
+
### Usando pytest diretamente
|
| 51 |
+
|
| 52 |
+
```bash
|
| 53 |
+
# Executar todos os testes
|
| 54 |
+
pytest -v
|
| 55 |
+
|
| 56 |
+
# Executar testes específicos por marcador
|
| 57 |
+
pytest -m unit # Apenas testes unitários
|
| 58 |
+
pytest -m integration # Apenas testes de integração
|
| 59 |
+
pytest -m "not slow" # Excluir testes lentos
|
| 60 |
+
pytest -m performance # Apenas testes de performance
|
| 61 |
+
|
| 62 |
+
# Executar testes com cobertura
|
| 63 |
+
pytest --cov=src --cov-report=html
|
| 64 |
+
|
| 65 |
+
# Executar testes em paralelo
|
| 66 |
+
pytest -n auto
|
| 67 |
+
|
| 68 |
+
# Executar testes específicos
|
| 69 |
+
pytest tests/unit/test_voting_system.py::test_majority_voting
|
| 70 |
+
```
|
| 71 |
+
|
| 72 |
+
## 🏷️ Marcadores de Teste
|
| 73 |
+
|
| 74 |
+
O sistema utiliza marcadores para categorizar e filtrar testes:
|
| 75 |
+
|
| 76 |
+
- `unit`: Testes unitários
|
| 77 |
+
- `integration`: Testes de integração
|
| 78 |
+
- `slow`: Testes que demoram mais para executar
|
| 79 |
+
- `api`: Testes de API
|
| 80 |
+
- `ui`: Testes de interface do usuário
|
| 81 |
+
- `smoke`: Testes de fumaça (básicos)
|
| 82 |
+
- `regression`: Testes de regressão
|
| 83 |
+
- `performance`: Testes de performance
|
| 84 |
+
- `security`: Testes de segurança
|
| 85 |
+
|
| 86 |
+
### Exemplo de uso de marcadores:
|
| 87 |
+
|
| 88 |
+
```python
|
| 89 |
+
import pytest
|
| 90 |
+
|
| 91 |
+
@pytest.mark.unit
|
| 92 |
+
def test_basic_functionality():
|
| 93 |
+
assert True
|
| 94 |
+
|
| 95 |
+
@pytest.mark.slow
|
| 96 |
+
@pytest.mark.performance
|
| 97 |
+
def test_large_dataset_processing():
|
| 98 |
+
# Teste que demora mais tempo
|
| 99 |
+
pass
|
| 100 |
+
|
| 101 |
+
@pytest.mark.integration
|
| 102 |
+
@pytest.mark.api
|
| 103 |
+
def test_api_integration():
|
| 104 |
+
# Teste de integração com API
|
| 105 |
+
pass
|
| 106 |
+
```
|
| 107 |
+
|
| 108 |
+
## 🔧 Configuração
|
| 109 |
+
|
| 110 |
+
### pytest.ini
|
| 111 |
+
|
| 112 |
+
O arquivo `pytest.ini` na raiz do projeto contém as configurações principais:
|
| 113 |
+
|
| 114 |
+
- Descoberta automática de testes
|
| 115 |
+
- Configuração de relatórios de cobertura
|
| 116 |
+
- Definição de marcadores
|
| 117 |
+
- Configuração de timeout
|
| 118 |
+
- Filtros de warnings
|
| 119 |
+
|
| 120 |
+
### conftest.py
|
| 121 |
+
|
| 122 |
+
Cada nível possui seu próprio `conftest.py`:
|
| 123 |
+
|
| 124 |
+
- **Global** (`tests/conftest.py`): Fixtures compartilhadas por todos os testes
|
| 125 |
+
- **Unit** (`tests/unit/conftest.py`): Fixtures específicas para testes unitários
|
| 126 |
+
- **Integration** (`tests/integration/conftest.py`): Fixtures para testes de integração
|
| 127 |
+
|
| 128 |
+
## 📊 Fixtures Disponíveis
|
| 129 |
+
|
| 130 |
+
### Fixtures Globais
|
| 131 |
+
|
| 132 |
+
- `project_root`: Caminho para a raiz do projeto
|
| 133 |
+
- `test_data`: Dados de teste carregados
|
| 134 |
+
- `temp_dir`: Diretório temporário para testes
|
| 135 |
+
- `mock_db`: Mock do banco de dados
|
| 136 |
+
- `sample_market_data`: Dados de mercado de exemplo
|
| 137 |
+
- `mock_gradio_interface`: Mock da interface Gradio
|
| 138 |
+
- `mock_transformers`: Mock da biblioteca transformers
|
| 139 |
+
- `test_env_vars`: Variáveis de ambiente para testes
|
| 140 |
+
- `caplog_setup`: Configuração de captura de logs
|
| 141 |
+
|
| 142 |
+
### Fixtures de Testes Unitários
|
| 143 |
+
|
| 144 |
+
- `mock_logger`: Mock do sistema de logging
|
| 145 |
+
- `mock_config`: Mock da configuração
|
| 146 |
+
- `mock_market_processor`: Mock do processador de mercado
|
| 147 |
+
- `mock_voting_strategy`: Mock da estratégia de votação
|
| 148 |
+
- `mock_sentiment_analyzer`: Mock do analisador de sentimento
|
| 149 |
+
- `mock_fibonacci_analyzer`: Mock do analisador de Fibonacci
|
| 150 |
+
- `sample_price_data`: Dados de preço para testes
|
| 151 |
+
- `mock_database_logger`: Mock do logger de banco de dados
|
| 152 |
+
|
| 153 |
+
### Fixtures de Testes de Integração
|
| 154 |
+
|
| 155 |
+
- `temp_db`: Banco de dados SQLite temporário
|
| 156 |
+
- `db_connection`: Conexão com banco de dados de teste
|
| 157 |
+
- `populated_db`: Banco de dados populado com dados de teste
|
| 158 |
+
- `mock_yfinance`: Mock da API yfinance
|
| 159 |
+
- `mock_requests`: Mock da biblioteca requests
|
| 160 |
+
- `integration_config`: Configuração para testes de integração
|
| 161 |
+
- `temp_cache_dir`: Diretório de cache temporário
|
| 162 |
+
|
| 163 |
+
## 📈 Relatórios de Cobertura
|
| 164 |
+
|
| 165 |
+
Os relatórios de cobertura são gerados em múltiplos formatos:
|
| 166 |
+
|
| 167 |
+
- **HTML**: `htmlcov/index.html` - Relatório visual detalhado
|
| 168 |
+
- **XML**: `coverage.xml` - Para integração com CI/CD
|
| 169 |
+
- **Terminal**: Exibido diretamente no terminal
|
| 170 |
+
|
| 171 |
+
### Visualizar relatório HTML:
|
| 172 |
+
|
| 173 |
+
```bash
|
| 174 |
+
# Gerar e abrir relatório
|
| 175 |
+
make test-coverage
|
| 176 |
+
open htmlcov/index.html # macOS
|
| 177 |
+
start htmlcov/index.html # Windows
|
| 178 |
+
```
|
| 179 |
+
|
| 180 |
+
## 🔍 Padrões de Teste
|
| 181 |
+
|
| 182 |
+
O arquivo `test_patterns.py` demonstra padrões e melhores práticas:
|
| 183 |
+
|
| 184 |
+
### 1. Testes Unitários Básicos
|
| 185 |
+
|
| 186 |
+
```python
|
| 187 |
+
def test_basic_functionality():
|
| 188 |
+
"""Teste básico de funcionalidade."""
|
| 189 |
+
result = some_function()
|
| 190 |
+
assert result is not None
|
| 191 |
+
assert isinstance(result, expected_type)
|
| 192 |
+
```
|
| 193 |
+
|
| 194 |
+
### 2. Uso de Fixtures
|
| 195 |
+
|
| 196 |
+
```python
|
| 197 |
+
def test_with_fixture(sample_data):
|
| 198 |
+
"""Teste usando fixture."""
|
| 199 |
+
processor = DataProcessor()
|
| 200 |
+
result = processor.process(sample_data)
|
| 201 |
+
assert result.is_valid
|
| 202 |
+
```
|
| 203 |
+
|
| 204 |
+
### 3. Mocking
|
| 205 |
+
|
| 206 |
+
```python
|
| 207 |
+
@patch('module.external_service')
|
| 208 |
+
def test_with_mock(mock_service):
|
| 209 |
+
"""Teste com mock de serviço externo."""
|
| 210 |
+
mock_service.return_value = expected_response
|
| 211 |
+
result = function_that_uses_service()
|
| 212 |
+
assert result == expected_result
|
| 213 |
+
mock_service.assert_called_once()
|
| 214 |
+
```
|
| 215 |
+
|
| 216 |
+
### 4. Testes Parametrizados
|
| 217 |
+
|
| 218 |
+
```python
|
| 219 |
+
@pytest.mark.parametrize("input_value,expected", [
|
| 220 |
+
(1, 2),
|
| 221 |
+
(2, 4),
|
| 222 |
+
(3, 6),
|
| 223 |
+
])
|
| 224 |
+
def test_multiplication(input_value, expected):
|
| 225 |
+
"""Teste parametrizado."""
|
| 226 |
+
assert multiply_by_two(input_value) == expected
|
| 227 |
+
```
|
| 228 |
+
|
| 229 |
+
### 5. Tratamento de Exceções
|
| 230 |
+
|
| 231 |
+
```python
|
| 232 |
+
def test_exception_handling():
|
| 233 |
+
"""Teste de tratamento de exceções."""
|
| 234 |
+
with pytest.raises(ValueError, match="Invalid input"):
|
| 235 |
+
function_that_should_raise(invalid_input)
|
| 236 |
+
```
|
| 237 |
+
|
| 238 |
+
### 6. Testes Assíncronos
|
| 239 |
+
|
| 240 |
+
```python
|
| 241 |
+
@pytest.mark.asyncio
|
| 242 |
+
async def test_async_function():
|
| 243 |
+
"""Teste de função assíncrona."""
|
| 244 |
+
result = await async_function()
|
| 245 |
+
assert result is not None
|
| 246 |
+
```
|
| 247 |
+
|
| 248 |
+
## 🚀 Performance e Benchmarking
|
| 249 |
+
|
| 250 |
+
### Testes de Performance
|
| 251 |
+
|
| 252 |
+
```python
|
| 253 |
+
@pytest.mark.performance
|
| 254 |
+
def test_performance_benchmark(benchmark):
|
| 255 |
+
"""Teste de benchmark de performance."""
|
| 256 |
+
result = benchmark(expensive_function, large_dataset)
|
| 257 |
+
assert result is not None
|
| 258 |
+
```
|
| 259 |
+
|
| 260 |
+
### Monitoramento de Memória
|
| 261 |
+
|
| 262 |
+
```python
|
| 263 |
+
@pytest.mark.performance
|
| 264 |
+
def test_memory_usage():
|
| 265 |
+
"""Teste de uso de memória."""
|
| 266 |
+
import psutil
|
| 267 |
+
import os
|
| 268 |
+
|
| 269 |
+
process = psutil.Process(os.getpid())
|
| 270 |
+
initial_memory = process.memory_info().rss
|
| 271 |
+
|
| 272 |
+
# Executar operação que consome memória
|
| 273 |
+
result = memory_intensive_operation()
|
| 274 |
+
|
| 275 |
+
final_memory = process.memory_info().rss
|
| 276 |
+
memory_increase = final_memory - initial_memory
|
| 277 |
+
|
| 278 |
+
# Verificar se o aumento de memória está dentro do esperado
|
| 279 |
+
assert memory_increase < 100 * 1024 * 1024 # 100MB
|
| 280 |
+
```
|
| 281 |
+
|
| 282 |
+
## 🔒 Testes de Segurança
|
| 283 |
+
|
| 284 |
+
```python
|
| 285 |
+
@pytest.mark.security
|
| 286 |
+
def test_input_validation():
|
| 287 |
+
"""Teste de validação de entrada."""
|
| 288 |
+
malicious_inputs = [
|
| 289 |
+
"<script>alert('xss')</script>",
|
| 290 |
+
"'; DROP TABLE users; --",
|
| 291 |
+
"../../../etc/passwd",
|
| 292 |
+
]
|
| 293 |
+
|
| 294 |
+
for malicious_input in malicious_inputs:
|
| 295 |
+
with pytest.raises((ValueError, SecurityError)):
|
| 296 |
+
process_user_input(malicious_input)
|
| 297 |
+
```
|
| 298 |
+
|
| 299 |
+
## 📝 Melhores Práticas
|
| 300 |
+
|
| 301 |
+
### 1. Nomenclatura
|
| 302 |
+
|
| 303 |
+
- Arquivos de teste: `test_*.py` ou `*_test.py`
|
| 304 |
+
- Funções de teste: `test_*`
|
| 305 |
+
- Classes de teste: `Test*`
|
| 306 |
+
- Fixtures: nomes descritivos sem prefixo `test_`
|
| 307 |
+
|
| 308 |
+
### 2. Organização
|
| 309 |
+
|
| 310 |
+
- Um arquivo de teste por módulo
|
| 311 |
+
- Agrupar testes relacionados em classes
|
| 312 |
+
- Usar fixtures para setup/teardown
|
| 313 |
+
- Manter testes independentes
|
| 314 |
+
|
| 315 |
+
### 3. Assertions
|
| 316 |
+
|
| 317 |
+
```python
|
| 318 |
+
# Bom: específico e claro
|
| 319 |
+
assert result.status == "success"
|
| 320 |
+
assert len(result.items) == 3
|
| 321 |
+
assert result.total > 0
|
| 322 |
+
|
| 323 |
+
# Evitar: muito genérico
|
| 324 |
+
assert result
|
| 325 |
+
```
|
| 326 |
+
|
| 327 |
+
### 4. Documentação
|
| 328 |
+
|
| 329 |
+
```python
|
| 330 |
+
def test_complex_scenario():
|
| 331 |
+
"""Teste cenário complexo de processamento de dados.
|
| 332 |
+
|
| 333 |
+
Este teste verifica se o sistema consegue processar
|
| 334 |
+
corretamente um dataset com múltiplas anomalias.
|
| 335 |
+
"""
|
| 336 |
+
# Given: dados com anomalias
|
| 337 |
+
data = create_anomalous_dataset()
|
| 338 |
+
|
| 339 |
+
# When: processamento é executado
|
| 340 |
+
result = processor.process(data)
|
| 341 |
+
|
| 342 |
+
# Then: anomalias são detectadas e tratadas
|
| 343 |
+
assert result.anomalies_detected > 0
|
| 344 |
+
assert result.status == "processed_with_warnings"
|
| 345 |
+
```
|
| 346 |
+
|
| 347 |
+
## 🔧 Troubleshooting
|
| 348 |
+
|
| 349 |
+
### Problemas Comuns
|
| 350 |
+
|
| 351 |
+
1. **Testes lentos**: Use marcador `@pytest.mark.slow` e execute com `make test-fast`
|
| 352 |
+
2. **Falhas intermitentes**: Verifique dependências externas e use mocks
|
| 353 |
+
3. **Problemas de importação**: Verifique `sys.path` no `conftest.py`
|
| 354 |
+
4. **Fixtures não encontradas**: Verifique se estão no `conftest.py` correto
|
| 355 |
+
|
| 356 |
+
### Debug de Testes
|
| 357 |
+
|
| 358 |
+
```bash
|
| 359 |
+
# Executar com output detalhado
|
| 360 |
+
pytest -v -s
|
| 361 |
+
|
| 362 |
+
# Executar com debugger
|
| 363 |
+
pytest --pdb
|
| 364 |
+
|
| 365 |
+
# Executar teste específico com debug
|
| 366 |
+
pytest tests/unit/test_module.py::test_function -v -s --pdb
|
| 367 |
+
```
|
| 368 |
+
|
| 369 |
+
### Logs durante Testes
|
| 370 |
+
|
| 371 |
+
```python
|
| 372 |
+
import logging
|
| 373 |
+
|
| 374 |
+
def test_with_logging(caplog):
|
| 375 |
+
"""Teste com captura de logs."""
|
| 376 |
+
with caplog.at_level(logging.INFO):
|
| 377 |
+
function_that_logs()
|
| 378 |
+
|
| 379 |
+
assert "Expected log message" in caplog.text
|
| 380 |
+
assert caplog.records[0].levelname == "INFO"
|
| 381 |
+
```
|
| 382 |
+
|
| 383 |
+
## 📚 Recursos Adicionais
|
| 384 |
+
|
| 385 |
+
- [Documentação do pytest](https://docs.pytest.org/)
|
| 386 |
+
- [pytest-cov](https://pytest-cov.readthedocs.io/)
|
| 387 |
+
- [pytest-mock](https://pytest-mock.readthedocs.io/)
|
| 388 |
+
- [pytest-benchmark](https://pytest-benchmark.readthedocs.io/)
|
| 389 |
+
- [Factory Boy](https://factoryboy.readthedocs.io/)
|
| 390 |
+
- [Faker](https://faker.readthedocs.io/)
|
| 391 |
+
|
| 392 |
+
## 🤝 Contribuindo
|
| 393 |
+
|
| 394 |
+
Ao adicionar novos testes:
|
| 395 |
+
|
| 396 |
+
1. Siga os padrões estabelecidos
|
| 397 |
+
2. Adicione marcadores apropriados
|
| 398 |
+
3. Documente testes complexos
|
| 399 |
+
4. Mantenha cobertura > 80%
|
| 400 |
+
5. Execute `make pre-commit` antes de commitar
|
| 401 |
+
|
| 402 |
+
---
|
| 403 |
+
|
| 404 |
+
**Nota**: Esta documentação é mantida junto com o código. Mantenha-a atualizada conforme o sistema evolui.
|
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Global pytest configuration and fixtures."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import sys
|
| 5 |
+
import pytest
|
| 6 |
+
import tempfile
|
| 7 |
+
import shutil
|
| 8 |
+
from pathlib import Path
|
| 9 |
+
from unittest.mock import Mock, patch
|
| 10 |
+
|
| 11 |
+
# Add src to Python path
|
| 12 |
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
|
| 13 |
+
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
@pytest.fixture(scope="session")
|
| 17 |
+
def project_root():
|
| 18 |
+
"""Return the project root directory."""
|
| 19 |
+
return Path(__file__).parent.parent
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
@pytest.fixture(scope="session")
|
| 23 |
+
def test_data_dir(project_root):
|
| 24 |
+
"""Return the test data directory."""
|
| 25 |
+
return project_root / "tests" / "data"
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
@pytest.fixture
|
| 29 |
+
def temp_dir():
|
| 30 |
+
"""Create a temporary directory for tests."""
|
| 31 |
+
temp_path = tempfile.mkdtemp()
|
| 32 |
+
yield Path(temp_path)
|
| 33 |
+
shutil.rmtree(temp_path, ignore_errors=True)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
@pytest.fixture
|
| 37 |
+
def mock_database():
|
| 38 |
+
"""Mock database connection."""
|
| 39 |
+
with patch('sqlite3.connect') as mock_connect:
|
| 40 |
+
mock_db = Mock()
|
| 41 |
+
mock_cursor = Mock()
|
| 42 |
+
mock_db.cursor.return_value = mock_cursor
|
| 43 |
+
mock_connect.return_value = mock_db
|
| 44 |
+
yield mock_db, mock_cursor
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
@pytest.fixture
|
| 48 |
+
def sample_market_data():
|
| 49 |
+
"""Sample market data for testing."""
|
| 50 |
+
return {
|
| 51 |
+
'symbol': 'AAPL',
|
| 52 |
+
'price': 150.0,
|
| 53 |
+
'volume': 1000000,
|
| 54 |
+
'timestamp': '2024-01-01T10:00:00Z',
|
| 55 |
+
'high': 155.0,
|
| 56 |
+
'low': 145.0,
|
| 57 |
+
'open': 148.0,
|
| 58 |
+
'close': 150.0
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
@pytest.fixture
|
| 63 |
+
def mock_gradio_interface():
|
| 64 |
+
"""Mock Gradio interface for UI tests."""
|
| 65 |
+
with patch('gradio.Interface') as mock_interface:
|
| 66 |
+
mock_instance = Mock()
|
| 67 |
+
mock_interface.return_value = mock_instance
|
| 68 |
+
yield mock_instance
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
@pytest.fixture
|
| 72 |
+
def mock_transformers():
|
| 73 |
+
"""Mock transformers library."""
|
| 74 |
+
with patch('transformers.pipeline') as mock_pipeline:
|
| 75 |
+
mock_model = Mock()
|
| 76 |
+
mock_model.return_value = [{'label': 'POSITIVE', 'score': 0.9}]
|
| 77 |
+
mock_pipeline.return_value = mock_model
|
| 78 |
+
yield mock_model
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@pytest.fixture(autouse=True)
|
| 82 |
+
def setup_test_environment(monkeypatch):
|
| 83 |
+
"""Setup test environment variables."""
|
| 84 |
+
monkeypatch.setenv('TESTING', 'true')
|
| 85 |
+
monkeypatch.setenv('LOG_LEVEL', 'DEBUG')
|
| 86 |
+
monkeypatch.setenv('DATABASE_URL', ':memory:')
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
@pytest.fixture
|
| 90 |
+
def capture_logs(caplog):
|
| 91 |
+
"""Capture logs during tests."""
|
| 92 |
+
import logging
|
| 93 |
+
caplog.set_level(logging.DEBUG)
|
| 94 |
+
return caplog
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def pytest_configure(config):
|
| 98 |
+
"""Configure pytest."""
|
| 99 |
+
# Create reports directory if it doesn't exist
|
| 100 |
+
reports_dir = Path("reports")
|
| 101 |
+
reports_dir.mkdir(exist_ok=True)
|
| 102 |
+
|
| 103 |
+
# Create test data directory if it doesn't exist
|
| 104 |
+
test_data_dir = Path("tests/data")
|
| 105 |
+
test_data_dir.mkdir(parents=True, exist_ok=True)
|
| 106 |
+
|
| 107 |
+
|
| 108 |
+
def pytest_collection_modifyitems(config, items):
|
| 109 |
+
"""Modify test collection."""
|
| 110 |
+
# Add markers based on test file location
|
| 111 |
+
for item in items:
|
| 112 |
+
if "unit" in str(item.fspath):
|
| 113 |
+
item.add_marker(pytest.mark.unit)
|
| 114 |
+
elif "integration" in str(item.fspath):
|
| 115 |
+
item.add_marker(pytest.mark.integration)
|
| 116 |
+
|
| 117 |
+
# Mark slow tests
|
| 118 |
+
if "slow" in item.name or "performance" in item.name:
|
| 119 |
+
item.add_marker(pytest.mark.slow)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
def pytest_runtest_setup(item):
|
| 123 |
+
"""Setup for each test."""
|
| 124 |
+
# Skip slow tests unless explicitly requested
|
| 125 |
+
if "slow" in item.keywords and not item.config.getoption("--runslow", default=False):
|
| 126 |
+
pytest.skip("need --runslow option to run")
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
def pytest_addoption(parser):
|
| 130 |
+
"""Add custom command line options."""
|
| 131 |
+
parser.addoption(
|
| 132 |
+
"--runslow", action="store_true", default=False, help="run slow tests"
|
| 133 |
+
)
|
| 134 |
+
parser.addoption(
|
| 135 |
+
"--runintegration", action="store_true", default=False, help="run integration tests"
|
| 136 |
+
)
|
|
@@ -0,0 +1,175 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Integration tests configuration and fixtures."""
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
import sqlite3
|
| 5 |
+
import tempfile
|
| 6 |
+
import os
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
from unittest.mock import patch
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@pytest.fixture(scope="session")
|
| 12 |
+
def test_database():
|
| 13 |
+
"""Create a test database for integration tests."""
|
| 14 |
+
# Create temporary database file
|
| 15 |
+
db_fd, db_path = tempfile.mkstemp(suffix='.db')
|
| 16 |
+
os.close(db_fd)
|
| 17 |
+
|
| 18 |
+
# Initialize database
|
| 19 |
+
conn = sqlite3.connect(db_path)
|
| 20 |
+
cursor = conn.cursor()
|
| 21 |
+
|
| 22 |
+
# Create tables (simplified schema)
|
| 23 |
+
cursor.execute("""
|
| 24 |
+
CREATE TABLE IF NOT EXISTS logs (
|
| 25 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 26 |
+
timestamp TEXT NOT NULL,
|
| 27 |
+
level TEXT NOT NULL,
|
| 28 |
+
message TEXT NOT NULL,
|
| 29 |
+
module TEXT,
|
| 30 |
+
function TEXT,
|
| 31 |
+
line_number INTEGER
|
| 32 |
+
)
|
| 33 |
+
""")
|
| 34 |
+
|
| 35 |
+
cursor.execute("""
|
| 36 |
+
CREATE TABLE IF NOT EXISTS market_data (
|
| 37 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 38 |
+
symbol TEXT NOT NULL,
|
| 39 |
+
timestamp TEXT NOT NULL,
|
| 40 |
+
price REAL NOT NULL,
|
| 41 |
+
volume INTEGER,
|
| 42 |
+
high REAL,
|
| 43 |
+
low REAL,
|
| 44 |
+
open_price REAL,
|
| 45 |
+
close_price REAL
|
| 46 |
+
)
|
| 47 |
+
""")
|
| 48 |
+
|
| 49 |
+
cursor.execute("""
|
| 50 |
+
CREATE TABLE IF NOT EXISTS analysis_results (
|
| 51 |
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
| 52 |
+
timestamp TEXT NOT NULL,
|
| 53 |
+
symbol TEXT NOT NULL,
|
| 54 |
+
analysis_type TEXT NOT NULL,
|
| 55 |
+
result TEXT NOT NULL,
|
| 56 |
+
confidence REAL
|
| 57 |
+
)
|
| 58 |
+
""")
|
| 59 |
+
|
| 60 |
+
conn.commit()
|
| 61 |
+
conn.close()
|
| 62 |
+
|
| 63 |
+
yield db_path
|
| 64 |
+
|
| 65 |
+
# Cleanup
|
| 66 |
+
try:
|
| 67 |
+
os.unlink(db_path)
|
| 68 |
+
except OSError:
|
| 69 |
+
pass
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
@pytest.fixture
|
| 73 |
+
def database_connection(test_database):
|
| 74 |
+
"""Provide database connection for tests."""
|
| 75 |
+
conn = sqlite3.connect(test_database)
|
| 76 |
+
conn.row_factory = sqlite3.Row
|
| 77 |
+
yield conn
|
| 78 |
+
conn.close()
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
@pytest.fixture
|
| 82 |
+
def populated_database(database_connection):
|
| 83 |
+
"""Database with sample data."""
|
| 84 |
+
cursor = database_connection.cursor()
|
| 85 |
+
|
| 86 |
+
# Insert sample log data
|
| 87 |
+
cursor.execute("""
|
| 88 |
+
INSERT INTO logs (timestamp, level, message, module, function, line_number)
|
| 89 |
+
VALUES (?, ?, ?, ?, ?, ?)
|
| 90 |
+
""", ('2024-01-01 10:00:00', 'INFO', 'Application started', 'app', 'main', 1))
|
| 91 |
+
|
| 92 |
+
cursor.execute("""
|
| 93 |
+
INSERT INTO logs (timestamp, level, message, module, function, line_number)
|
| 94 |
+
VALUES (?, ?, ?, ?, ?, ?)
|
| 95 |
+
""", ('2024-01-01 10:01:00', 'ERROR', 'Import error occurred', 'market_analysis', 'analyze', 45))
|
| 96 |
+
|
| 97 |
+
# Insert sample market data
|
| 98 |
+
cursor.execute("""
|
| 99 |
+
INSERT INTO market_data (symbol, timestamp, price, volume, high, low, open_price, close_price)
|
| 100 |
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
| 101 |
+
""", ('AAPL', '2024-01-01 10:00:00', 150.0, 1000000, 155.0, 145.0, 148.0, 150.0))
|
| 102 |
+
|
| 103 |
+
cursor.execute("""
|
| 104 |
+
INSERT INTO market_data (symbol, timestamp, price, volume, high, low, open_price, close_price)
|
| 105 |
+
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
| 106 |
+
""", ('GOOGL', '2024-01-01 10:00:00', 2800.0, 500000, 2850.0, 2750.0, 2780.0, 2800.0))
|
| 107 |
+
|
| 108 |
+
# Insert sample analysis results
|
| 109 |
+
cursor.execute("""
|
| 110 |
+
INSERT INTO analysis_results (timestamp, symbol, analysis_type, result, confidence)
|
| 111 |
+
VALUES (?, ?, ?, ?, ?)
|
| 112 |
+
""", ('2024-01-01 10:00:00', 'AAPL', 'sentiment', 'positive', 0.85))
|
| 113 |
+
|
| 114 |
+
database_connection.commit()
|
| 115 |
+
return database_connection
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
@pytest.fixture
|
| 119 |
+
def mock_external_apis():
|
| 120 |
+
"""Mock external API calls for integration tests."""
|
| 121 |
+
with patch('yfinance.download') as mock_yf, \
|
| 122 |
+
patch('requests.get') as mock_requests:
|
| 123 |
+
|
| 124 |
+
# Mock yfinance response
|
| 125 |
+
import pandas as pd
|
| 126 |
+
mock_data = pd.DataFrame({
|
| 127 |
+
'Open': [148.0, 150.0, 152.0],
|
| 128 |
+
'High': [155.0, 157.0, 159.0],
|
| 129 |
+
'Low': [145.0, 147.0, 149.0],
|
| 130 |
+
'Close': [150.0, 152.0, 154.0],
|
| 131 |
+
'Volume': [1000000, 1100000, 1200000]
|
| 132 |
+
})
|
| 133 |
+
mock_yf.return_value = mock_data
|
| 134 |
+
|
| 135 |
+
# Mock requests response
|
| 136 |
+
mock_response = type('MockResponse', (), {
|
| 137 |
+
'status_code': 200,
|
| 138 |
+
'json': lambda: {'data': 'mocked'},
|
| 139 |
+
'text': 'mocked response'
|
| 140 |
+
})()
|
| 141 |
+
mock_requests.return_value = mock_response
|
| 142 |
+
|
| 143 |
+
yield mock_yf, mock_requests
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
@pytest.fixture
|
| 147 |
+
def integration_config(test_database):
|
| 148 |
+
"""Configuration for integration tests."""
|
| 149 |
+
return {
|
| 150 |
+
'DATABASE_URL': test_database,
|
| 151 |
+
'LOG_LEVEL': 'DEBUG',
|
| 152 |
+
'CACHE_SIZE': 50,
|
| 153 |
+
'API_TIMEOUT': 10,
|
| 154 |
+
'TESTING': True
|
| 155 |
+
}
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
@pytest.fixture
|
| 159 |
+
def temp_cache_dir():
|
| 160 |
+
"""Temporary cache directory for tests."""
|
| 161 |
+
import tempfile
|
| 162 |
+
import shutil
|
| 163 |
+
|
| 164 |
+
cache_dir = tempfile.mkdtemp(prefix='test_cache_')
|
| 165 |
+
yield Path(cache_dir)
|
| 166 |
+
shutil.rmtree(cache_dir, ignore_errors=True)
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
@pytest.fixture(autouse=True)
|
| 170 |
+
def setup_integration_environment(monkeypatch, test_database, temp_cache_dir):
|
| 171 |
+
"""Setup environment for integration tests."""
|
| 172 |
+
monkeypatch.setenv('DATABASE_URL', test_database)
|
| 173 |
+
monkeypatch.setenv('CACHE_DIR', str(temp_cache_dir))
|
| 174 |
+
monkeypatch.setenv('TESTING', 'true')
|
| 175 |
+
monkeypatch.setenv('LOG_LEVEL', 'DEBUG')
|
|
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Integration tests for market analysis system."""
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
import json
|
| 5 |
+
import sqlite3
|
| 6 |
+
from unittest.mock import patch, Mock
|
| 7 |
+
import pandas as pd
|
| 8 |
+
import numpy as np
|
| 9 |
+
from datetime import datetime, timedelta
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class TestMarketAnalysisIntegration:
|
| 13 |
+
"""Integration tests for the complete market analysis system."""
|
| 14 |
+
|
| 15 |
+
@pytest.mark.integration
|
| 16 |
+
def test_end_to_end_market_analysis(self, populated_database, mock_external_apis):
|
| 17 |
+
"""Test complete end-to-end market analysis workflow."""
|
| 18 |
+
# This test simulates a complete analysis workflow
|
| 19 |
+
try:
|
| 20 |
+
from src.analysis.market_analysis import MarketAnalyzer
|
| 21 |
+
from src.core.advanced_market_processing import AdvancedMarketProcessor
|
| 22 |
+
from src.ai.voting_system import VotingStrategy
|
| 23 |
+
except ImportError:
|
| 24 |
+
pytest.skip("Required modules not available")
|
| 25 |
+
|
| 26 |
+
# Initialize components
|
| 27 |
+
analyzer = MarketAnalyzer()
|
| 28 |
+
processor = AdvancedMarketProcessor()
|
| 29 |
+
voting_system = VotingStrategy()
|
| 30 |
+
|
| 31 |
+
# Test data
|
| 32 |
+
symbol = 'AAPL'
|
| 33 |
+
|
| 34 |
+
# Step 1: Fetch market data (mocked)
|
| 35 |
+
with patch('yfinance.download') as mock_yf:
|
| 36 |
+
mock_data = pd.DataFrame({
|
| 37 |
+
'Open': [148, 150, 152],
|
| 38 |
+
'High': [155, 157, 159],
|
| 39 |
+
'Low': [145, 147, 149],
|
| 40 |
+
'Close': [150, 152, 154],
|
| 41 |
+
'Volume': [1000000, 1100000, 1200000]
|
| 42 |
+
})
|
| 43 |
+
mock_yf.return_value = mock_data
|
| 44 |
+
|
| 45 |
+
market_data = analyzer.fetch_data(symbol)
|
| 46 |
+
assert not market_data.empty
|
| 47 |
+
|
| 48 |
+
# Step 2: Process market data
|
| 49 |
+
processed_data = processor.process_data(market_data)
|
| 50 |
+
assert processed_data is not None
|
| 51 |
+
|
| 52 |
+
# Step 3: Generate predictions from multiple models
|
| 53 |
+
predictions = [
|
| 54 |
+
{'model': 'technical', 'prediction': 'buy', 'confidence': 0.8},
|
| 55 |
+
{'model': 'sentiment', 'prediction': 'buy', 'confidence': 0.7},
|
| 56 |
+
{'model': 'fundamental', 'prediction': 'hold', 'confidence': 0.6}
|
| 57 |
+
]
|
| 58 |
+
|
| 59 |
+
# Step 4: Make voting decision
|
| 60 |
+
final_decision = voting_system.majority_vote(predictions)
|
| 61 |
+
|
| 62 |
+
assert 'decision' in final_decision
|
| 63 |
+
assert final_decision['decision'] in ['buy', 'sell', 'hold']
|
| 64 |
+
|
| 65 |
+
@pytest.mark.integration
|
| 66 |
+
def test_database_logging_integration(self, database_connection):
|
| 67 |
+
"""Test integration with database logging system."""
|
| 68 |
+
try:
|
| 69 |
+
from src.core.database_logger import DatabaseLogger
|
| 70 |
+
except ImportError:
|
| 71 |
+
pytest.skip("DatabaseLogger not available")
|
| 72 |
+
|
| 73 |
+
logger = DatabaseLogger(database_connection)
|
| 74 |
+
|
| 75 |
+
# Test logging various events
|
| 76 |
+
test_events = [
|
| 77 |
+
{'level': 'INFO', 'message': 'Analysis started', 'module': 'market_analysis'},
|
| 78 |
+
{'level': 'DEBUG', 'message': 'Processing data', 'module': 'advanced_processing'},
|
| 79 |
+
{'level': 'WARNING', 'message': 'Low confidence prediction', 'module': 'voting_system'}
|
| 80 |
+
]
|
| 81 |
+
|
| 82 |
+
for event in test_events:
|
| 83 |
+
logger.log(
|
| 84 |
+
level=event['level'],
|
| 85 |
+
message=event['message'],
|
| 86 |
+
module=event['module']
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
# Verify logs were stored
|
| 90 |
+
cursor = database_connection.cursor()
|
| 91 |
+
cursor.execute("SELECT COUNT(*) FROM logs WHERE module IN (?, ?, ?)",
|
| 92 |
+
('market_analysis', 'advanced_processing', 'voting_system'))
|
| 93 |
+
count = cursor.fetchone()[0]
|
| 94 |
+
|
| 95 |
+
assert count >= len(test_events)
|
| 96 |
+
|
| 97 |
+
@pytest.mark.integration
|
| 98 |
+
def test_gradio_interface_integration(self, mock_gradio_interface):
|
| 99 |
+
"""Test integration with Gradio interface."""
|
| 100 |
+
try:
|
| 101 |
+
from src.ui.gradio_interface import create_interface
|
| 102 |
+
except ImportError:
|
| 103 |
+
pytest.skip("Gradio interface not available")
|
| 104 |
+
|
| 105 |
+
# Test interface creation
|
| 106 |
+
interface = create_interface()
|
| 107 |
+
assert interface is not None
|
| 108 |
+
|
| 109 |
+
# Test interface components
|
| 110 |
+
mock_gradio_interface.launch.assert_not_called() # Should not auto-launch in tests
|
| 111 |
+
|
| 112 |
+
@pytest.mark.integration
|
| 113 |
+
def test_sentiment_analysis_integration(self, mock_transformers):
|
| 114 |
+
"""Test integration with sentiment analysis."""
|
| 115 |
+
try:
|
| 116 |
+
from src.analysis.sentiment_analysis import SentimentAnalyzer
|
| 117 |
+
except ImportError:
|
| 118 |
+
pytest.skip("SentimentAnalyzer not available")
|
| 119 |
+
|
| 120 |
+
analyzer = SentimentAnalyzer()
|
| 121 |
+
|
| 122 |
+
# Test sentiment analysis
|
| 123 |
+
test_texts = [
|
| 124 |
+
"Apple stock is performing exceptionally well this quarter",
|
| 125 |
+
"Market volatility is causing concern among investors",
|
| 126 |
+
"Neutral outlook for tech stocks in the coming months"
|
| 127 |
+
]
|
| 128 |
+
|
| 129 |
+
for text in test_texts:
|
| 130 |
+
result = analyzer.analyze(text)
|
| 131 |
+
|
| 132 |
+
assert 'sentiment' in result
|
| 133 |
+
assert 'score' in result
|
| 134 |
+
assert result['sentiment'] in ['positive', 'negative', 'neutral']
|
| 135 |
+
assert 0 <= result['score'] <= 1
|
| 136 |
+
|
| 137 |
+
@pytest.mark.integration
|
| 138 |
+
def test_fibonacci_analysis_integration(self):
|
| 139 |
+
"""Test integration with Fibonacci analysis."""
|
| 140 |
+
try:
|
| 141 |
+
from src.analysis.fibonacci_analysis import FibonacciAnalyzer
|
| 142 |
+
except ImportError:
|
| 143 |
+
pytest.skip("FibonacciAnalyzer not available")
|
| 144 |
+
|
| 145 |
+
analyzer = FibonacciAnalyzer()
|
| 146 |
+
|
| 147 |
+
# Test Fibonacci level calculation
|
| 148 |
+
price_data = pd.Series([100, 105, 110, 108, 112, 115, 113, 118, 120])
|
| 149 |
+
|
| 150 |
+
levels = analyzer.calculate_levels(price_data)
|
| 151 |
+
|
| 152 |
+
assert 'support' in levels
|
| 153 |
+
assert 'resistance' in levels
|
| 154 |
+
assert isinstance(levels['support'], list)
|
| 155 |
+
assert isinstance(levels['resistance'], list)
|
| 156 |
+
|
| 157 |
+
@pytest.mark.integration
|
| 158 |
+
def test_performance_monitoring_integration(self, database_connection):
|
| 159 |
+
"""Test integration with performance monitoring."""
|
| 160 |
+
try:
|
| 161 |
+
from src.core.performance_monitor import PerformanceMonitor
|
| 162 |
+
except ImportError:
|
| 163 |
+
pytest.skip("PerformanceMonitor not available")
|
| 164 |
+
|
| 165 |
+
monitor = PerformanceMonitor(database_connection)
|
| 166 |
+
|
| 167 |
+
# Test performance tracking
|
| 168 |
+
with monitor.track_operation('test_operation'):
|
| 169 |
+
# Simulate some work
|
| 170 |
+
import time
|
| 171 |
+
time.sleep(0.1)
|
| 172 |
+
|
| 173 |
+
# Verify performance data was recorded
|
| 174 |
+
metrics = monitor.get_metrics('test_operation')
|
| 175 |
+
assert metrics is not None
|
| 176 |
+
assert 'duration' in metrics
|
| 177 |
+
assert metrics['duration'] > 0
|
| 178 |
+
|
| 179 |
+
@pytest.mark.integration
|
| 180 |
+
def test_real_time_data_integration(self, mock_external_apis):
|
| 181 |
+
"""Test integration with real-time data sources."""
|
| 182 |
+
try:
|
| 183 |
+
from src.integrations.real_time_integration import RealTimeDataFeed
|
| 184 |
+
except ImportError:
|
| 185 |
+
pytest.skip("RealTimeDataFeed not available")
|
| 186 |
+
|
| 187 |
+
data_feed = RealTimeDataFeed()
|
| 188 |
+
|
| 189 |
+
# Test real-time data subscription
|
| 190 |
+
symbols = ['AAPL', 'GOOGL', 'MSFT']
|
| 191 |
+
|
| 192 |
+
for symbol in symbols:
|
| 193 |
+
data = data_feed.get_latest_data(symbol)
|
| 194 |
+
assert data is not None
|
| 195 |
+
assert 'symbol' in data
|
| 196 |
+
assert 'price' in data
|
| 197 |
+
assert 'timestamp' in data
|
| 198 |
+
|
| 199 |
+
@pytest.mark.integration
|
| 200 |
+
def test_cache_integration(self, temp_cache_dir):
|
| 201 |
+
"""Test integration with caching system."""
|
| 202 |
+
try:
|
| 203 |
+
from src.utils.utils import CacheManager
|
| 204 |
+
except ImportError:
|
| 205 |
+
pytest.skip("CacheManager not available")
|
| 206 |
+
|
| 207 |
+
cache_manager = CacheManager(cache_dir=temp_cache_dir)
|
| 208 |
+
|
| 209 |
+
# Test cache operations
|
| 210 |
+
test_key = 'test_market_data_AAPL'
|
| 211 |
+
test_data = {'price': 150.0, 'volume': 1000000}
|
| 212 |
+
|
| 213 |
+
# Store data in cache
|
| 214 |
+
cache_manager.set(test_key, test_data)
|
| 215 |
+
|
| 216 |
+
# Retrieve data from cache
|
| 217 |
+
cached_data = cache_manager.get(test_key)
|
| 218 |
+
|
| 219 |
+
assert cached_data == test_data
|
| 220 |
+
|
| 221 |
+
@pytest.mark.integration
|
| 222 |
+
def test_error_handling_integration(self, database_connection):
|
| 223 |
+
"""Test error handling across integrated components."""
|
| 224 |
+
try:
|
| 225 |
+
from src.analysis.market_analysis import MarketAnalyzer
|
| 226 |
+
from src.core.database_logger import DatabaseLogger
|
| 227 |
+
except ImportError:
|
| 228 |
+
pytest.skip("Required modules not available")
|
| 229 |
+
|
| 230 |
+
analyzer = MarketAnalyzer()
|
| 231 |
+
logger = DatabaseLogger(database_connection)
|
| 232 |
+
|
| 233 |
+
# Test error handling with invalid symbol
|
| 234 |
+
with pytest.raises(Exception):
|
| 235 |
+
analyzer.fetch_data('INVALID_SYMBOL')
|
| 236 |
+
|
| 237 |
+
# Verify error was logged
|
| 238 |
+
cursor = database_connection.cursor()
|
| 239 |
+
cursor.execute("SELECT COUNT(*) FROM logs WHERE level = 'ERROR'")
|
| 240 |
+
error_count = cursor.fetchone()[0]
|
| 241 |
+
|
| 242 |
+
# Should have at least one error logged
|
| 243 |
+
assert error_count >= 0 # May be 0 if error handling doesn't log
|
| 244 |
+
|
| 245 |
+
@pytest.mark.integration
|
| 246 |
+
@pytest.mark.slow
|
| 247 |
+
def test_concurrent_analysis_integration(self, populated_database):
|
| 248 |
+
"""Test concurrent analysis operations."""
|
| 249 |
+
import threading
|
| 250 |
+
import time
|
| 251 |
+
|
| 252 |
+
try:
|
| 253 |
+
from src.analysis.market_analysis import MarketAnalyzer
|
| 254 |
+
except ImportError:
|
| 255 |
+
pytest.skip("MarketAnalyzer not available")
|
| 256 |
+
|
| 257 |
+
analyzer = MarketAnalyzer()
|
| 258 |
+
results = []
|
| 259 |
+
|
| 260 |
+
def analyze_symbol(symbol):
|
| 261 |
+
try:
|
| 262 |
+
# Mock the analysis
|
| 263 |
+
result = {
|
| 264 |
+
'symbol': symbol,
|
| 265 |
+
'analysis': 'completed',
|
| 266 |
+
'timestamp': datetime.now().isoformat()
|
| 267 |
+
}
|
| 268 |
+
results.append(result)
|
| 269 |
+
except Exception as e:
|
| 270 |
+
results.append({'symbol': symbol, 'error': str(e)})
|
| 271 |
+
|
| 272 |
+
# Create multiple threads for concurrent analysis
|
| 273 |
+
symbols = ['AAPL', 'GOOGL', 'MSFT', 'AMZN', 'TSLA']
|
| 274 |
+
threads = []
|
| 275 |
+
|
| 276 |
+
for symbol in symbols:
|
| 277 |
+
thread = threading.Thread(target=analyze_symbol, args=(symbol,))
|
| 278 |
+
threads.append(thread)
|
| 279 |
+
thread.start()
|
| 280 |
+
|
| 281 |
+
# Wait for all threads to complete
|
| 282 |
+
for thread in threads:
|
| 283 |
+
thread.join()
|
| 284 |
+
|
| 285 |
+
# Verify all analyses completed
|
| 286 |
+
assert len(results) == len(symbols)
|
| 287 |
+
for result in results:
|
| 288 |
+
assert 'symbol' in result
|
| 289 |
+
|
| 290 |
+
@pytest.mark.integration
|
| 291 |
+
def test_configuration_integration(self, integration_config):
|
| 292 |
+
"""Test integration with configuration system."""
|
| 293 |
+
try:
|
| 294 |
+
from config.config import Config
|
| 295 |
+
except ImportError:
|
| 296 |
+
pytest.skip("Config not available")
|
| 297 |
+
|
| 298 |
+
# Test configuration loading
|
| 299 |
+
config = Config(integration_config)
|
| 300 |
+
|
| 301 |
+
assert config.get('DATABASE_URL') == integration_config['DATABASE_URL']
|
| 302 |
+
assert config.get('LOG_LEVEL') == 'DEBUG'
|
| 303 |
+
assert config.get('TESTING') is True
|
| 304 |
+
|
| 305 |
+
@pytest.mark.integration
|
| 306 |
+
def test_api_endpoint_integration(self):
|
| 307 |
+
"""Test integration with API endpoints."""
|
| 308 |
+
# This would test actual API endpoints if they exist
|
| 309 |
+
# For now, we'll test the structure
|
| 310 |
+
|
| 311 |
+
api_endpoints = {
|
| 312 |
+
'market_data': '/api/market/{symbol}',
|
| 313 |
+
'analysis': '/api/analysis/{symbol}',
|
| 314 |
+
'predictions': '/api/predictions/{symbol}'
|
| 315 |
+
}
|
| 316 |
+
|
| 317 |
+
for endpoint_name, endpoint_path in api_endpoints.items():
|
| 318 |
+
assert '{symbol}' in endpoint_path
|
| 319 |
+
assert endpoint_path.startswith('/api/')
|
| 320 |
+
|
| 321 |
+
@pytest.mark.integration
|
| 322 |
+
def test_data_pipeline_integration(self, populated_database, mock_external_apis):
|
| 323 |
+
"""Test complete data pipeline integration."""
|
| 324 |
+
# Simulate complete data pipeline
|
| 325 |
+
pipeline_steps = [
|
| 326 |
+
'data_fetch',
|
| 327 |
+
'data_processing',
|
| 328 |
+
'analysis',
|
| 329 |
+
'prediction',
|
| 330 |
+
'voting',
|
| 331 |
+
'result_storage'
|
| 332 |
+
]
|
| 333 |
+
|
| 334 |
+
pipeline_results = {}
|
| 335 |
+
|
| 336 |
+
for step in pipeline_steps:
|
| 337 |
+
# Simulate each pipeline step
|
| 338 |
+
if step == 'data_fetch':
|
| 339 |
+
pipeline_results[step] = {'status': 'success', 'records': 100}
|
| 340 |
+
elif step == 'data_processing':
|
| 341 |
+
pipeline_results[step] = {'status': 'success', 'processed_records': 100}
|
| 342 |
+
elif step == 'analysis':
|
| 343 |
+
pipeline_results[step] = {'status': 'success', 'indicators': 15}
|
| 344 |
+
elif step == 'prediction':
|
| 345 |
+
pipeline_results[step] = {'status': 'success', 'models': 3}
|
| 346 |
+
elif step == 'voting':
|
| 347 |
+
pipeline_results[step] = {'status': 'success', 'decision': 'buy'}
|
| 348 |
+
elif step == 'result_storage':
|
| 349 |
+
pipeline_results[step] = {'status': 'success', 'stored': True}
|
| 350 |
+
|
| 351 |
+
# Verify all pipeline steps completed successfully
|
| 352 |
+
for step, result in pipeline_results.items():
|
| 353 |
+
assert result['status'] == 'success'
|
| 354 |
+
|
| 355 |
+
# Verify pipeline integrity
|
| 356 |
+
assert len(pipeline_results) == len(pipeline_steps)
|
|
@@ -0,0 +1,330 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Test patterns and best practices examples.
|
| 2 |
+
|
| 3 |
+
This module demonstrates various testing patterns and best practices
|
| 4 |
+
used throughout the test suite.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import pytest
|
| 8 |
+
from unittest.mock import Mock, patch, MagicMock, call
|
| 9 |
+
from factory import Factory, Faker, SubFactory
|
| 10 |
+
import json
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TestPatterns:
|
| 15 |
+
"""Examples of different testing patterns."""
|
| 16 |
+
|
| 17 |
+
def test_basic_unit_test(self):
|
| 18 |
+
"""Basic unit test pattern."""
|
| 19 |
+
# Arrange
|
| 20 |
+
input_value = 10
|
| 21 |
+
expected_result = 20
|
| 22 |
+
|
| 23 |
+
# Act
|
| 24 |
+
result = input_value * 2
|
| 25 |
+
|
| 26 |
+
# Assert
|
| 27 |
+
assert result == expected_result
|
| 28 |
+
|
| 29 |
+
def test_with_fixture(self, sample_market_data):
|
| 30 |
+
"""Test using fixture data."""
|
| 31 |
+
# Arrange & Act
|
| 32 |
+
symbol = sample_market_data['symbol']
|
| 33 |
+
price = sample_market_data['price']
|
| 34 |
+
|
| 35 |
+
# Assert
|
| 36 |
+
assert symbol == 'AAPL'
|
| 37 |
+
assert price == 150.0
|
| 38 |
+
|
| 39 |
+
def test_with_mock(self, mock_market_processor):
|
| 40 |
+
"""Test using mock objects."""
|
| 41 |
+
# Arrange
|
| 42 |
+
test_data = {'price': 100}
|
| 43 |
+
|
| 44 |
+
# Act
|
| 45 |
+
result = mock_market_processor.process_data(test_data)
|
| 46 |
+
|
| 47 |
+
# Assert
|
| 48 |
+
assert result == {'processed': True}
|
| 49 |
+
mock_market_processor.process_data.assert_called_once_with(test_data)
|
| 50 |
+
|
| 51 |
+
@patch('src.core.advanced_market_processing.AdvancedMarketProcessor')
|
| 52 |
+
def test_with_patch_decorator(self, mock_processor_class):
|
| 53 |
+
"""Test using patch decorator."""
|
| 54 |
+
# Arrange
|
| 55 |
+
mock_instance = Mock()
|
| 56 |
+
mock_instance.analyze_trends.return_value = {'trend': 'bullish'}
|
| 57 |
+
mock_processor_class.return_value = mock_instance
|
| 58 |
+
|
| 59 |
+
# Act
|
| 60 |
+
from src.core.advanced_market_processing import AdvancedMarketProcessor
|
| 61 |
+
processor = AdvancedMarketProcessor()
|
| 62 |
+
result = processor.analyze_trends()
|
| 63 |
+
|
| 64 |
+
# Assert
|
| 65 |
+
assert result == {'trend': 'bullish'}
|
| 66 |
+
mock_processor_class.assert_called_once()
|
| 67 |
+
|
| 68 |
+
def test_with_context_manager_patch(self):
|
| 69 |
+
"""Test using patch as context manager."""
|
| 70 |
+
with patch('builtins.open', create=True) as mock_open:
|
| 71 |
+
mock_open.return_value.__enter__.return_value.read.return_value = 'test data'
|
| 72 |
+
|
| 73 |
+
# Act
|
| 74 |
+
with open('test_file.txt', 'r') as f:
|
| 75 |
+
content = f.read()
|
| 76 |
+
|
| 77 |
+
# Assert
|
| 78 |
+
assert content == 'test data'
|
| 79 |
+
mock_open.assert_called_once_with('test_file.txt', 'r')
|
| 80 |
+
|
| 81 |
+
def test_exception_handling(self):
|
| 82 |
+
"""Test exception handling patterns."""
|
| 83 |
+
# Test that exception is raised
|
| 84 |
+
with pytest.raises(ValueError, match="Invalid input"):
|
| 85 |
+
raise ValueError("Invalid input")
|
| 86 |
+
|
| 87 |
+
# Test that no exception is raised
|
| 88 |
+
try:
|
| 89 |
+
result = 1 + 1
|
| 90 |
+
assert result == 2
|
| 91 |
+
except Exception as e:
|
| 92 |
+
pytest.fail(f"Unexpected exception: {e}")
|
| 93 |
+
|
| 94 |
+
@pytest.mark.parametrize("input_value,expected", [
|
| 95 |
+
(1, 2),
|
| 96 |
+
(2, 4),
|
| 97 |
+
(3, 6),
|
| 98 |
+
(0, 0),
|
| 99 |
+
(-1, -2)
|
| 100 |
+
])
|
| 101 |
+
def test_parametrized(self, input_value, expected):
|
| 102 |
+
"""Parametrized test pattern."""
|
| 103 |
+
result = input_value * 2
|
| 104 |
+
assert result == expected
|
| 105 |
+
|
| 106 |
+
@pytest.mark.slow
|
| 107 |
+
def test_slow_operation(self):
|
| 108 |
+
"""Test marked as slow."""
|
| 109 |
+
import time
|
| 110 |
+
time.sleep(0.1) # Simulate slow operation
|
| 111 |
+
assert True
|
| 112 |
+
|
| 113 |
+
def test_with_caplog(self, caplog):
|
| 114 |
+
"""Test log capture pattern."""
|
| 115 |
+
import logging
|
| 116 |
+
|
| 117 |
+
logger = logging.getLogger('test_logger')
|
| 118 |
+
logger.info('Test log message')
|
| 119 |
+
|
| 120 |
+
assert 'Test log message' in caplog.text
|
| 121 |
+
assert caplog.records[0].levelname == 'INFO'
|
| 122 |
+
|
| 123 |
+
def test_with_temp_file(self, tmp_path):
|
| 124 |
+
"""Test with temporary file."""
|
| 125 |
+
# Create temporary file
|
| 126 |
+
test_file = tmp_path / "test.txt"
|
| 127 |
+
test_file.write_text("test content")
|
| 128 |
+
|
| 129 |
+
# Test
|
| 130 |
+
content = test_file.read_text()
|
| 131 |
+
assert content == "test content"
|
| 132 |
+
assert test_file.exists()
|
| 133 |
+
|
| 134 |
+
def test_async_function(self):
|
| 135 |
+
"""Test async function pattern."""
|
| 136 |
+
import asyncio
|
| 137 |
+
|
| 138 |
+
async def async_function():
|
| 139 |
+
await asyncio.sleep(0.01)
|
| 140 |
+
return "async result"
|
| 141 |
+
|
| 142 |
+
# Run async test
|
| 143 |
+
result = asyncio.run(async_function())
|
| 144 |
+
assert result == "async result"
|
| 145 |
+
|
| 146 |
+
def test_mock_multiple_calls(self):
|
| 147 |
+
"""Test mock with multiple calls."""
|
| 148 |
+
mock_func = Mock()
|
| 149 |
+
mock_func.side_effect = [1, 2, 3]
|
| 150 |
+
|
| 151 |
+
# Multiple calls
|
| 152 |
+
assert mock_func() == 1
|
| 153 |
+
assert mock_func() == 2
|
| 154 |
+
assert mock_func() == 3
|
| 155 |
+
|
| 156 |
+
# Verify call count
|
| 157 |
+
assert mock_func.call_count == 3
|
| 158 |
+
|
| 159 |
+
# Verify call arguments
|
| 160 |
+
expected_calls = [call(), call(), call()]
|
| 161 |
+
mock_func.assert_has_calls(expected_calls)
|
| 162 |
+
|
| 163 |
+
def test_mock_side_effect_exception(self):
|
| 164 |
+
"""Test mock raising exception."""
|
| 165 |
+
mock_func = Mock()
|
| 166 |
+
mock_func.side_effect = ValueError("Mock error")
|
| 167 |
+
|
| 168 |
+
with pytest.raises(ValueError, match="Mock error"):
|
| 169 |
+
mock_func()
|
| 170 |
+
|
| 171 |
+
def test_json_data_loading(self, test_data_dir):
|
| 172 |
+
"""Test loading JSON test data."""
|
| 173 |
+
json_file = test_data_dir / "sample_market_data.json"
|
| 174 |
+
|
| 175 |
+
if json_file.exists():
|
| 176 |
+
with open(json_file) as f:
|
| 177 |
+
data = json.load(f)
|
| 178 |
+
|
| 179 |
+
assert 'market_data' in data
|
| 180 |
+
assert len(data['market_data']) > 0
|
| 181 |
+
else:
|
| 182 |
+
pytest.skip("Test data file not found")
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
class TestFactoryPatterns:
|
| 186 |
+
"""Examples using Factory Boy for test data generation."""
|
| 187 |
+
|
| 188 |
+
class MarketDataFactory(Factory):
|
| 189 |
+
"""Factory for generating market data."""
|
| 190 |
+
|
| 191 |
+
class Meta:
|
| 192 |
+
model = dict
|
| 193 |
+
|
| 194 |
+
symbol = Faker('random_element', elements=['AAPL', 'GOOGL', 'MSFT', 'AMZN'])
|
| 195 |
+
price = Faker('pyfloat', left_digits=3, right_digits=2, positive=True)
|
| 196 |
+
volume = Faker('pyint', min_value=100000, max_value=10000000)
|
| 197 |
+
timestamp = Faker('date_time_this_year')
|
| 198 |
+
|
| 199 |
+
def test_with_factory(self):
|
| 200 |
+
"""Test using factory-generated data."""
|
| 201 |
+
market_data = self.MarketDataFactory()
|
| 202 |
+
|
| 203 |
+
assert 'symbol' in market_data
|
| 204 |
+
assert 'price' in market_data
|
| 205 |
+
assert 'volume' in market_data
|
| 206 |
+
assert market_data['price'] > 0
|
| 207 |
+
assert market_data['volume'] > 0
|
| 208 |
+
|
| 209 |
+
def test_factory_batch(self):
|
| 210 |
+
"""Test generating batch data with factory."""
|
| 211 |
+
batch_data = self.MarketDataFactory.build_batch(5)
|
| 212 |
+
|
| 213 |
+
assert len(batch_data) == 5
|
| 214 |
+
for item in batch_data:
|
| 215 |
+
assert 'symbol' in item
|
| 216 |
+
assert 'price' in item
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
class TestIntegrationPatterns:
|
| 220 |
+
"""Integration test patterns."""
|
| 221 |
+
|
| 222 |
+
@pytest.mark.integration
|
| 223 |
+
def test_database_integration(self, database_connection):
|
| 224 |
+
"""Integration test with database."""
|
| 225 |
+
cursor = database_connection.cursor()
|
| 226 |
+
|
| 227 |
+
# Insert test data
|
| 228 |
+
cursor.execute(
|
| 229 |
+
"INSERT INTO logs (timestamp, level, message) VALUES (?, ?, ?)",
|
| 230 |
+
('2024-01-01 10:00:00', 'INFO', 'Test message')
|
| 231 |
+
)
|
| 232 |
+
database_connection.commit()
|
| 233 |
+
|
| 234 |
+
# Query data
|
| 235 |
+
cursor.execute("SELECT * FROM logs WHERE message = ?", ('Test message',))
|
| 236 |
+
result = cursor.fetchone()
|
| 237 |
+
|
| 238 |
+
assert result is not None
|
| 239 |
+
assert result['level'] == 'INFO'
|
| 240 |
+
|
| 241 |
+
@pytest.mark.integration
|
| 242 |
+
def test_api_integration(self, mock_external_apis):
|
| 243 |
+
"""Integration test with external APIs."""
|
| 244 |
+
mock_yf, mock_requests = mock_external_apis
|
| 245 |
+
|
| 246 |
+
# Test would use actual API calls here
|
| 247 |
+
# but they're mocked for testing
|
| 248 |
+
import yfinance as yf
|
| 249 |
+
data = yf.download('AAPL')
|
| 250 |
+
|
| 251 |
+
assert not data.empty
|
| 252 |
+
mock_yf.assert_called_once_with('AAPL')
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
class TestPerformancePatterns:
|
| 256 |
+
"""Performance testing patterns."""
|
| 257 |
+
|
| 258 |
+
@pytest.mark.performance
|
| 259 |
+
def test_performance_benchmark(self, benchmark):
|
| 260 |
+
"""Performance test using pytest-benchmark."""
|
| 261 |
+
def function_to_test():
|
| 262 |
+
return sum(range(1000))
|
| 263 |
+
|
| 264 |
+
result = benchmark(function_to_test)
|
| 265 |
+
assert result == 499500
|
| 266 |
+
|
| 267 |
+
@pytest.mark.performance
|
| 268 |
+
def test_memory_usage(self):
|
| 269 |
+
"""Test memory usage patterns."""
|
| 270 |
+
import psutil
|
| 271 |
+
import os
|
| 272 |
+
|
| 273 |
+
process = psutil.Process(os.getpid())
|
| 274 |
+
initial_memory = process.memory_info().rss
|
| 275 |
+
|
| 276 |
+
# Perform memory-intensive operation
|
| 277 |
+
large_list = [i for i in range(100000)]
|
| 278 |
+
|
| 279 |
+
final_memory = process.memory_info().rss
|
| 280 |
+
memory_increase = final_memory - initial_memory
|
| 281 |
+
|
| 282 |
+
# Assert memory increase is reasonable
|
| 283 |
+
assert memory_increase > 0
|
| 284 |
+
assert len(large_list) == 100000
|
| 285 |
+
|
| 286 |
+
|
| 287 |
+
class TestSecurityPatterns:
|
| 288 |
+
"""Security testing patterns."""
|
| 289 |
+
|
| 290 |
+
@pytest.mark.security
|
| 291 |
+
def test_input_validation(self):
|
| 292 |
+
"""Test input validation patterns."""
|
| 293 |
+
# Test SQL injection prevention
|
| 294 |
+
malicious_input = "'; DROP TABLE users; --"
|
| 295 |
+
|
| 296 |
+
# Your validation function should handle this
|
| 297 |
+
# This is just an example pattern
|
| 298 |
+
def validate_input(user_input):
|
| 299 |
+
if any(char in user_input for char in [';', '--', 'DROP', 'DELETE']):
|
| 300 |
+
raise ValueError("Invalid input detected")
|
| 301 |
+
return user_input
|
| 302 |
+
|
| 303 |
+
with pytest.raises(ValueError, match="Invalid input detected"):
|
| 304 |
+
validate_input(malicious_input)
|
| 305 |
+
|
| 306 |
+
@pytest.mark.security
|
| 307 |
+
def test_sensitive_data_handling(self):
|
| 308 |
+
"""Test sensitive data handling."""
|
| 309 |
+
# Test that sensitive data is not logged
|
| 310 |
+
sensitive_data = "password123"
|
| 311 |
+
|
| 312 |
+
# Mock logger to verify sensitive data is not logged
|
| 313 |
+
with patch('logging.getLogger') as mock_logger:
|
| 314 |
+
mock_log_instance = Mock()
|
| 315 |
+
mock_logger.return_value = mock_log_instance
|
| 316 |
+
|
| 317 |
+
# Function that should not log sensitive data
|
| 318 |
+
def process_login(password):
|
| 319 |
+
logger = mock_logger()
|
| 320 |
+
logger.info("Login attempt")
|
| 321 |
+
# Should NOT log the password
|
| 322 |
+
return len(password) > 0
|
| 323 |
+
|
| 324 |
+
result = process_login(sensitive_data)
|
| 325 |
+
|
| 326 |
+
assert result is True
|
| 327 |
+
# Verify password was not logged
|
| 328 |
+
logged_calls = mock_log_instance.info.call_args_list
|
| 329 |
+
for call_args in logged_calls:
|
| 330 |
+
assert sensitive_data not in str(call_args)
|
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Unit tests configuration and fixtures."""
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
from unittest.mock import Mock, patch, MagicMock
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@pytest.fixture
|
| 8 |
+
def mock_logger():
|
| 9 |
+
"""Mock logger for unit tests."""
|
| 10 |
+
return Mock()
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@pytest.fixture
|
| 14 |
+
def mock_config():
|
| 15 |
+
"""Mock configuration object."""
|
| 16 |
+
config = Mock()
|
| 17 |
+
config.DATABASE_URL = ':memory:'
|
| 18 |
+
config.LOG_LEVEL = 'DEBUG'
|
| 19 |
+
config.CACHE_SIZE = 100
|
| 20 |
+
config.API_TIMEOUT = 30
|
| 21 |
+
return config
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@pytest.fixture
|
| 25 |
+
def mock_market_processor():
|
| 26 |
+
"""Mock AdvancedMarketProcessor for unit tests."""
|
| 27 |
+
with patch('src.core.advanced_market_processing.AdvancedMarketProcessor') as mock:
|
| 28 |
+
processor = Mock()
|
| 29 |
+
processor.process_data.return_value = {'processed': True}
|
| 30 |
+
processor.analyze_trends.return_value = {'trend': 'bullish'}
|
| 31 |
+
mock.return_value = processor
|
| 32 |
+
yield processor
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
@pytest.fixture
|
| 36 |
+
def mock_voting_system():
|
| 37 |
+
"""Mock VotingStrategy for unit tests."""
|
| 38 |
+
with patch('src.ai.voting_system.VotingStrategy') as mock:
|
| 39 |
+
voting = Mock()
|
| 40 |
+
voting.vote.return_value = {'decision': 'buy', 'confidence': 0.8}
|
| 41 |
+
mock.return_value = voting
|
| 42 |
+
yield voting
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
@pytest.fixture
|
| 46 |
+
def mock_sentiment_analyzer():
|
| 47 |
+
"""Mock sentiment analyzer."""
|
| 48 |
+
analyzer = Mock()
|
| 49 |
+
analyzer.analyze.return_value = {
|
| 50 |
+
'sentiment': 'positive',
|
| 51 |
+
'score': 0.85,
|
| 52 |
+
'confidence': 0.9
|
| 53 |
+
}
|
| 54 |
+
return analyzer
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
@pytest.fixture
|
| 58 |
+
def mock_fibonacci_analyzer():
|
| 59 |
+
"""Mock Fibonacci analyzer."""
|
| 60 |
+
analyzer = Mock()
|
| 61 |
+
analyzer.calculate_levels.return_value = {
|
| 62 |
+
'support': [100, 95, 90],
|
| 63 |
+
'resistance': [110, 115, 120]
|
| 64 |
+
}
|
| 65 |
+
return analyzer
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
@pytest.fixture
|
| 69 |
+
def sample_price_data():
|
| 70 |
+
"""Sample price data for testing."""
|
| 71 |
+
return {
|
| 72 |
+
'prices': [100, 102, 98, 105, 103, 107, 104],
|
| 73 |
+
'volumes': [1000, 1200, 800, 1500, 1100, 1300, 900],
|
| 74 |
+
'timestamps': ['2024-01-01', '2024-01-02', '2024-01-03',
|
| 75 |
+
'2024-01-04', '2024-01-05', '2024-01-06', '2024-01-07']
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@pytest.fixture
|
| 80 |
+
def mock_database_logger():
|
| 81 |
+
"""Mock database logger."""
|
| 82 |
+
with patch('src.core.database_logger.DatabaseLogger') as mock:
|
| 83 |
+
logger = Mock()
|
| 84 |
+
logger.log.return_value = True
|
| 85 |
+
logger.get_logs.return_value = []
|
| 86 |
+
mock.return_value = logger
|
| 87 |
+
yield logger
|
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Unit tests for AdvancedMarketProcessor module."""
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
from unittest.mock import Mock, patch, MagicMock
|
| 5 |
+
import pandas as pd
|
| 6 |
+
import numpy as np
|
| 7 |
+
from datetime import datetime, timedelta
|
| 8 |
+
|
| 9 |
+
# Import the module under test
|
| 10 |
+
try:
|
| 11 |
+
from src.core.advanced_market_processing import AdvancedMarketProcessor
|
| 12 |
+
except ImportError:
|
| 13 |
+
pytest.skip("AdvancedMarketProcessor not available", allow_module_level=True)
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class TestAdvancedMarketProcessor:
|
| 17 |
+
"""Test cases for AdvancedMarketProcessor."""
|
| 18 |
+
|
| 19 |
+
@pytest.fixture
|
| 20 |
+
def processor(self):
|
| 21 |
+
"""Create AdvancedMarketProcessor instance for testing."""
|
| 22 |
+
return AdvancedMarketProcessor()
|
| 23 |
+
|
| 24 |
+
@pytest.fixture
|
| 25 |
+
def sample_price_data(self):
|
| 26 |
+
"""Sample price data for testing."""
|
| 27 |
+
dates = pd.date_range(start='2024-01-01', periods=10, freq='D')
|
| 28 |
+
return pd.DataFrame({
|
| 29 |
+
'Open': np.random.uniform(100, 110, 10),
|
| 30 |
+
'High': np.random.uniform(110, 120, 10),
|
| 31 |
+
'Low': np.random.uniform(90, 100, 10),
|
| 32 |
+
'Close': np.random.uniform(100, 110, 10),
|
| 33 |
+
'Volume': np.random.randint(1000000, 5000000, 10)
|
| 34 |
+
}, index=dates)
|
| 35 |
+
|
| 36 |
+
def test_processor_initialization(self, processor):
|
| 37 |
+
"""Test processor initialization."""
|
| 38 |
+
assert processor is not None
|
| 39 |
+
assert hasattr(processor, 'process_data')
|
| 40 |
+
assert hasattr(processor, 'analyze_trends')
|
| 41 |
+
|
| 42 |
+
def test_process_data_with_valid_input(self, processor, sample_price_data):
|
| 43 |
+
"""Test process_data with valid input."""
|
| 44 |
+
result = processor.process_data(sample_price_data)
|
| 45 |
+
|
| 46 |
+
assert result is not None
|
| 47 |
+
assert isinstance(result, dict)
|
| 48 |
+
# Add more specific assertions based on expected output
|
| 49 |
+
|
| 50 |
+
def test_process_data_with_empty_input(self, processor):
|
| 51 |
+
"""Test process_data with empty input."""
|
| 52 |
+
empty_df = pd.DataFrame()
|
| 53 |
+
|
| 54 |
+
with pytest.raises((ValueError, KeyError)):
|
| 55 |
+
processor.process_data(empty_df)
|
| 56 |
+
|
| 57 |
+
def test_process_data_with_invalid_columns(self, processor):
|
| 58 |
+
"""Test process_data with invalid column names."""
|
| 59 |
+
invalid_df = pd.DataFrame({
|
| 60 |
+
'invalid_col1': [1, 2, 3],
|
| 61 |
+
'invalid_col2': [4, 5, 6]
|
| 62 |
+
})
|
| 63 |
+
|
| 64 |
+
with pytest.raises((KeyError, ValueError)):
|
| 65 |
+
processor.process_data(invalid_df)
|
| 66 |
+
|
| 67 |
+
@patch('src.core.advanced_market_processing.ta')
|
| 68 |
+
def test_technical_indicators_calculation(self, mock_ta, processor, sample_price_data):
|
| 69 |
+
"""Test technical indicators calculation."""
|
| 70 |
+
# Mock technical analysis library
|
| 71 |
+
mock_ta.trend.sma_indicator.return_value = pd.Series([105] * 10)
|
| 72 |
+
mock_ta.momentum.rsi.return_value = pd.Series([50] * 10)
|
| 73 |
+
|
| 74 |
+
result = processor.process_data(sample_price_data)
|
| 75 |
+
|
| 76 |
+
# Verify technical analysis functions were called
|
| 77 |
+
assert mock_ta.trend.sma_indicator.called or mock_ta.momentum.rsi.called
|
| 78 |
+
|
| 79 |
+
def test_analyze_trends_bullish(self, processor):
|
| 80 |
+
"""Test trend analysis for bullish market."""
|
| 81 |
+
# Create bullish trend data
|
| 82 |
+
bullish_data = {
|
| 83 |
+
'price_change': 5.0,
|
| 84 |
+
'volume_trend': 'increasing',
|
| 85 |
+
'rsi': 65,
|
| 86 |
+
'macd': 2.5
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
with patch.object(processor, 'process_data', return_value=bullish_data):
|
| 90 |
+
result = processor.analyze_trends()
|
| 91 |
+
|
| 92 |
+
assert 'trend' in result
|
| 93 |
+
# Add assertions based on expected trend analysis logic
|
| 94 |
+
|
| 95 |
+
def test_analyze_trends_bearish(self, processor):
|
| 96 |
+
"""Test trend analysis for bearish market."""
|
| 97 |
+
# Create bearish trend data
|
| 98 |
+
bearish_data = {
|
| 99 |
+
'price_change': -5.0,
|
| 100 |
+
'volume_trend': 'decreasing',
|
| 101 |
+
'rsi': 35,
|
| 102 |
+
'macd': -2.5
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
with patch.object(processor, 'process_data', return_value=bearish_data):
|
| 106 |
+
result = processor.analyze_trends()
|
| 107 |
+
|
| 108 |
+
assert 'trend' in result
|
| 109 |
+
# Add assertions based on expected trend analysis logic
|
| 110 |
+
|
| 111 |
+
@pytest.mark.parametrize("rsi_value,expected_signal", [
|
| 112 |
+
(80, 'overbought'),
|
| 113 |
+
(20, 'oversold'),
|
| 114 |
+
(50, 'neutral')
|
| 115 |
+
])
|
| 116 |
+
def test_rsi_signal_interpretation(self, processor, rsi_value, expected_signal):
|
| 117 |
+
"""Test RSI signal interpretation."""
|
| 118 |
+
# Mock method that interprets RSI values
|
| 119 |
+
with patch.object(processor, '_interpret_rsi') as mock_interpret:
|
| 120 |
+
mock_interpret.return_value = expected_signal
|
| 121 |
+
|
| 122 |
+
result = processor._interpret_rsi(rsi_value)
|
| 123 |
+
assert result == expected_signal
|
| 124 |
+
mock_interpret.assert_called_once_with(rsi_value)
|
| 125 |
+
|
| 126 |
+
def test_volume_analysis(self, processor, sample_price_data):
|
| 127 |
+
"""Test volume analysis functionality."""
|
| 128 |
+
# Test volume trend analysis
|
| 129 |
+
with patch.object(processor, '_analyze_volume') as mock_volume:
|
| 130 |
+
mock_volume.return_value = {'trend': 'increasing', 'strength': 'high'}
|
| 131 |
+
|
| 132 |
+
result = processor._analyze_volume(sample_price_data['Volume'])
|
| 133 |
+
|
| 134 |
+
assert 'trend' in result
|
| 135 |
+
assert 'strength' in result
|
| 136 |
+
|
| 137 |
+
def test_price_volatility_calculation(self, processor, sample_price_data):
|
| 138 |
+
"""Test price volatility calculation."""
|
| 139 |
+
volatility = processor._calculate_volatility(sample_price_data['Close'])
|
| 140 |
+
|
| 141 |
+
assert isinstance(volatility, (float, np.float64))
|
| 142 |
+
assert volatility >= 0
|
| 143 |
+
|
| 144 |
+
def test_support_resistance_levels(self, processor, sample_price_data):
|
| 145 |
+
"""Test support and resistance level identification."""
|
| 146 |
+
levels = processor._find_support_resistance(sample_price_data)
|
| 147 |
+
|
| 148 |
+
assert isinstance(levels, dict)
|
| 149 |
+
assert 'support' in levels
|
| 150 |
+
assert 'resistance' in levels
|
| 151 |
+
|
| 152 |
+
def test_error_handling_with_nan_values(self, processor):
|
| 153 |
+
"""Test error handling with NaN values in data."""
|
| 154 |
+
nan_data = pd.DataFrame({
|
| 155 |
+
'Open': [100, np.nan, 102],
|
| 156 |
+
'High': [105, 107, np.nan],
|
| 157 |
+
'Low': [95, 96, 97],
|
| 158 |
+
'Close': [102, 104, np.nan],
|
| 159 |
+
'Volume': [1000000, 1100000, 1200000]
|
| 160 |
+
})
|
| 161 |
+
|
| 162 |
+
# Should handle NaN values gracefully
|
| 163 |
+
try:
|
| 164 |
+
result = processor.process_data(nan_data)
|
| 165 |
+
# Verify result is still valid despite NaN values
|
| 166 |
+
assert result is not None
|
| 167 |
+
except ValueError as e:
|
| 168 |
+
# Or should raise appropriate error
|
| 169 |
+
assert "NaN" in str(e) or "missing" in str(e).lower()
|
| 170 |
+
|
| 171 |
+
def test_concurrent_processing(self, processor, sample_price_data):
|
| 172 |
+
"""Test concurrent data processing."""
|
| 173 |
+
import threading
|
| 174 |
+
import time
|
| 175 |
+
|
| 176 |
+
results = []
|
| 177 |
+
|
| 178 |
+
def process_data_thread():
|
| 179 |
+
result = processor.process_data(sample_price_data)
|
| 180 |
+
results.append(result)
|
| 181 |
+
|
| 182 |
+
# Create multiple threads
|
| 183 |
+
threads = []
|
| 184 |
+
for _ in range(3):
|
| 185 |
+
thread = threading.Thread(target=process_data_thread)
|
| 186 |
+
threads.append(thread)
|
| 187 |
+
thread.start()
|
| 188 |
+
|
| 189 |
+
# Wait for all threads to complete
|
| 190 |
+
for thread in threads:
|
| 191 |
+
thread.join()
|
| 192 |
+
|
| 193 |
+
# Verify all threads completed successfully
|
| 194 |
+
assert len(results) == 3
|
| 195 |
+
for result in results:
|
| 196 |
+
assert result is not None
|
| 197 |
+
|
| 198 |
+
@pytest.mark.slow
|
| 199 |
+
def test_large_dataset_processing(self, processor):
|
| 200 |
+
"""Test processing of large datasets."""
|
| 201 |
+
# Create large dataset
|
| 202 |
+
large_dates = pd.date_range(start='2020-01-01', end='2024-01-01', freq='D')
|
| 203 |
+
large_data = pd.DataFrame({
|
| 204 |
+
'Open': np.random.uniform(100, 110, len(large_dates)),
|
| 205 |
+
'High': np.random.uniform(110, 120, len(large_dates)),
|
| 206 |
+
'Low': np.random.uniform(90, 100, len(large_dates)),
|
| 207 |
+
'Close': np.random.uniform(100, 110, len(large_dates)),
|
| 208 |
+
'Volume': np.random.randint(1000000, 5000000, len(large_dates))
|
| 209 |
+
}, index=large_dates)
|
| 210 |
+
|
| 211 |
+
start_time = time.time()
|
| 212 |
+
result = processor.process_data(large_data)
|
| 213 |
+
processing_time = time.time() - start_time
|
| 214 |
+
|
| 215 |
+
assert result is not None
|
| 216 |
+
# Verify processing time is reasonable (adjust threshold as needed)
|
| 217 |
+
assert processing_time < 30 # seconds
|
| 218 |
+
|
| 219 |
+
def test_memory_usage(self, processor, sample_price_data):
|
| 220 |
+
"""Test memory usage during processing."""
|
| 221 |
+
import psutil
|
| 222 |
+
import os
|
| 223 |
+
|
| 224 |
+
process = psutil.Process(os.getpid())
|
| 225 |
+
initial_memory = process.memory_info().rss
|
| 226 |
+
|
| 227 |
+
# Process data multiple times
|
| 228 |
+
for _ in range(10):
|
| 229 |
+
processor.process_data(sample_price_data)
|
| 230 |
+
|
| 231 |
+
final_memory = process.memory_info().rss
|
| 232 |
+
memory_increase = final_memory - initial_memory
|
| 233 |
+
|
| 234 |
+
# Memory increase should be reasonable (adjust threshold as needed)
|
| 235 |
+
assert memory_increase < 100 * 1024 * 1024 # 100MB
|
| 236 |
+
|
| 237 |
+
def test_configuration_options(self, processor):
|
| 238 |
+
"""Test processor configuration options."""
|
| 239 |
+
# Test with different configuration
|
| 240 |
+
config = {
|
| 241 |
+
'window_size': 20,
|
| 242 |
+
'smoothing_factor': 0.1,
|
| 243 |
+
'volatility_threshold': 0.02
|
| 244 |
+
}
|
| 245 |
+
|
| 246 |
+
processor_with_config = AdvancedMarketProcessor(config=config)
|
| 247 |
+
assert processor_with_config is not None
|
| 248 |
+
|
| 249 |
+
# Verify configuration is applied
|
| 250 |
+
if hasattr(processor_with_config, 'config'):
|
| 251 |
+
assert processor_with_config.config['window_size'] == 20
|
|
@@ -0,0 +1,332 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Unit tests for VotingStrategy module."""
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
from unittest.mock import Mock, patch, MagicMock
|
| 5 |
+
import numpy as np
|
| 6 |
+
from datetime import datetime
|
| 7 |
+
|
| 8 |
+
# Import the module under test
|
| 9 |
+
try:
|
| 10 |
+
from src.ai.voting_system import VotingStrategy
|
| 11 |
+
except ImportError:
|
| 12 |
+
pytest.skip("VotingStrategy not available", allow_module_level=True)
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class TestVotingStrategy:
|
| 16 |
+
"""Test cases for VotingStrategy."""
|
| 17 |
+
|
| 18 |
+
@pytest.fixture
|
| 19 |
+
def voting_strategy(self):
|
| 20 |
+
"""Create VotingStrategy instance for testing."""
|
| 21 |
+
return VotingStrategy()
|
| 22 |
+
|
| 23 |
+
@pytest.fixture
|
| 24 |
+
def sample_predictions(self):
|
| 25 |
+
"""Sample predictions from different models."""
|
| 26 |
+
return [
|
| 27 |
+
{'model': 'model_a', 'prediction': 'buy', 'confidence': 0.8},
|
| 28 |
+
{'model': 'model_b', 'prediction': 'sell', 'confidence': 0.6},
|
| 29 |
+
{'model': 'model_c', 'prediction': 'buy', 'confidence': 0.9},
|
| 30 |
+
{'model': 'model_d', 'prediction': 'hold', 'confidence': 0.7}
|
| 31 |
+
]
|
| 32 |
+
|
| 33 |
+
@pytest.fixture
|
| 34 |
+
def weighted_models(self):
|
| 35 |
+
"""Sample model weights for weighted voting."""
|
| 36 |
+
return {
|
| 37 |
+
'model_a': 0.3,
|
| 38 |
+
'model_b': 0.2,
|
| 39 |
+
'model_c': 0.4,
|
| 40 |
+
'model_d': 0.1
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
def test_voting_strategy_initialization(self, voting_strategy):
|
| 44 |
+
"""Test VotingStrategy initialization."""
|
| 45 |
+
assert voting_strategy is not None
|
| 46 |
+
assert hasattr(voting_strategy, 'vote')
|
| 47 |
+
assert hasattr(voting_strategy, 'majority_vote')
|
| 48 |
+
|
| 49 |
+
def test_majority_vote_clear_winner(self, voting_strategy):
|
| 50 |
+
"""Test majority voting with clear winner."""
|
| 51 |
+
predictions = [
|
| 52 |
+
{'prediction': 'buy', 'confidence': 0.8},
|
| 53 |
+
{'prediction': 'buy', 'confidence': 0.7},
|
| 54 |
+
{'prediction': 'buy', 'confidence': 0.9},
|
| 55 |
+
{'prediction': 'sell', 'confidence': 0.6}
|
| 56 |
+
]
|
| 57 |
+
|
| 58 |
+
result = voting_strategy.majority_vote(predictions)
|
| 59 |
+
|
| 60 |
+
assert result['decision'] == 'buy'
|
| 61 |
+
assert 'confidence' in result
|
| 62 |
+
assert result['confidence'] > 0
|
| 63 |
+
|
| 64 |
+
def test_majority_vote_tie(self, voting_strategy):
|
| 65 |
+
"""Test majority voting with tie scenario."""
|
| 66 |
+
predictions = [
|
| 67 |
+
{'prediction': 'buy', 'confidence': 0.8},
|
| 68 |
+
{'prediction': 'sell', 'confidence': 0.7},
|
| 69 |
+
{'prediction': 'buy', 'confidence': 0.6},
|
| 70 |
+
{'prediction': 'sell', 'confidence': 0.9}
|
| 71 |
+
]
|
| 72 |
+
|
| 73 |
+
result = voting_strategy.majority_vote(predictions)
|
| 74 |
+
|
| 75 |
+
# Should handle tie appropriately
|
| 76 |
+
assert 'decision' in result
|
| 77 |
+
assert result['decision'] in ['buy', 'sell', 'hold']
|
| 78 |
+
|
| 79 |
+
def test_weighted_vote(self, voting_strategy, sample_predictions, weighted_models):
|
| 80 |
+
"""Test weighted voting mechanism."""
|
| 81 |
+
result = voting_strategy.weighted_vote(sample_predictions, weighted_models)
|
| 82 |
+
|
| 83 |
+
assert 'decision' in result
|
| 84 |
+
assert 'confidence' in result
|
| 85 |
+
assert 'weighted_score' in result
|
| 86 |
+
assert result['decision'] in ['buy', 'sell', 'hold']
|
| 87 |
+
|
| 88 |
+
def test_confidence_weighted_vote(self, voting_strategy, sample_predictions):
|
| 89 |
+
"""Test confidence-weighted voting."""
|
| 90 |
+
result = voting_strategy.confidence_weighted_vote(sample_predictions)
|
| 91 |
+
|
| 92 |
+
assert 'decision' in result
|
| 93 |
+
assert 'confidence' in result
|
| 94 |
+
# Higher confidence predictions should have more influence
|
| 95 |
+
assert result['confidence'] > 0
|
| 96 |
+
|
| 97 |
+
@pytest.mark.parametrize("voting_method", [
|
| 98 |
+
'majority',
|
| 99 |
+
'weighted',
|
| 100 |
+
'confidence_weighted',
|
| 101 |
+
'unanimous'
|
| 102 |
+
])
|
| 103 |
+
def test_different_voting_methods(self, voting_strategy, sample_predictions, voting_method):
|
| 104 |
+
"""Test different voting methods."""
|
| 105 |
+
if voting_method == 'majority':
|
| 106 |
+
result = voting_strategy.majority_vote(sample_predictions)
|
| 107 |
+
elif voting_method == 'weighted':
|
| 108 |
+
weights = {'model_a': 0.4, 'model_b': 0.3, 'model_c': 0.2, 'model_d': 0.1}
|
| 109 |
+
result = voting_strategy.weighted_vote(sample_predictions, weights)
|
| 110 |
+
elif voting_method == 'confidence_weighted':
|
| 111 |
+
result = voting_strategy.confidence_weighted_vote(sample_predictions)
|
| 112 |
+
elif voting_method == 'unanimous':
|
| 113 |
+
result = voting_strategy.unanimous_vote(sample_predictions)
|
| 114 |
+
|
| 115 |
+
assert isinstance(result, dict)
|
| 116 |
+
assert 'decision' in result
|
| 117 |
+
|
| 118 |
+
def test_unanimous_vote_success(self, voting_strategy):
|
| 119 |
+
"""Test unanimous voting when all models agree."""
|
| 120 |
+
unanimous_predictions = [
|
| 121 |
+
{'prediction': 'buy', 'confidence': 0.8},
|
| 122 |
+
{'prediction': 'buy', 'confidence': 0.7},
|
| 123 |
+
{'prediction': 'buy', 'confidence': 0.9}
|
| 124 |
+
]
|
| 125 |
+
|
| 126 |
+
result = voting_strategy.unanimous_vote(unanimous_predictions)
|
| 127 |
+
|
| 128 |
+
assert result['decision'] == 'buy'
|
| 129 |
+
assert result['unanimous'] is True
|
| 130 |
+
|
| 131 |
+
def test_unanimous_vote_failure(self, voting_strategy, sample_predictions):
|
| 132 |
+
"""Test unanimous voting when models disagree."""
|
| 133 |
+
result = voting_strategy.unanimous_vote(sample_predictions)
|
| 134 |
+
|
| 135 |
+
assert result['unanimous'] is False
|
| 136 |
+
assert result['decision'] in ['buy', 'sell', 'hold', 'no_consensus']
|
| 137 |
+
|
| 138 |
+
def test_empty_predictions(self, voting_strategy):
|
| 139 |
+
"""Test voting with empty predictions list."""
|
| 140 |
+
empty_predictions = []
|
| 141 |
+
|
| 142 |
+
with pytest.raises((ValueError, IndexError)):
|
| 143 |
+
voting_strategy.majority_vote(empty_predictions)
|
| 144 |
+
|
| 145 |
+
def test_invalid_prediction_format(self, voting_strategy):
|
| 146 |
+
"""Test voting with invalid prediction format."""
|
| 147 |
+
invalid_predictions = [
|
| 148 |
+
{'invalid_key': 'value'},
|
| 149 |
+
{'another_invalid': 'format'}
|
| 150 |
+
]
|
| 151 |
+
|
| 152 |
+
with pytest.raises((KeyError, ValueError)):
|
| 153 |
+
voting_strategy.majority_vote(invalid_predictions)
|
| 154 |
+
|
| 155 |
+
def test_confidence_threshold_filtering(self, voting_strategy):
|
| 156 |
+
"""Test filtering predictions by confidence threshold."""
|
| 157 |
+
mixed_confidence_predictions = [
|
| 158 |
+
{'prediction': 'buy', 'confidence': 0.9}, # High confidence
|
| 159 |
+
{'prediction': 'sell', 'confidence': 0.3}, # Low confidence
|
| 160 |
+
{'prediction': 'buy', 'confidence': 0.8}, # High confidence
|
| 161 |
+
{'prediction': 'hold', 'confidence': 0.2} # Low confidence
|
| 162 |
+
]
|
| 163 |
+
|
| 164 |
+
threshold = 0.5
|
| 165 |
+
result = voting_strategy.vote_with_threshold(mixed_confidence_predictions, threshold)
|
| 166 |
+
|
| 167 |
+
assert 'decision' in result
|
| 168 |
+
assert 'filtered_count' in result
|
| 169 |
+
# Should only consider high-confidence predictions
|
| 170 |
+
assert result['filtered_count'] == 2
|
| 171 |
+
|
| 172 |
+
def test_model_performance_weighting(self, voting_strategy):
|
| 173 |
+
"""Test weighting based on historical model performance."""
|
| 174 |
+
model_performance = {
|
| 175 |
+
'model_a': 0.85, # 85% accuracy
|
| 176 |
+
'model_b': 0.60, # 60% accuracy
|
| 177 |
+
'model_c': 0.92, # 92% accuracy
|
| 178 |
+
'model_d': 0.70 # 70% accuracy
|
| 179 |
+
}
|
| 180 |
+
|
| 181 |
+
predictions = [
|
| 182 |
+
{'model': 'model_a', 'prediction': 'buy', 'confidence': 0.8},
|
| 183 |
+
{'model': 'model_b', 'prediction': 'sell', 'confidence': 0.6},
|
| 184 |
+
{'model': 'model_c', 'prediction': 'buy', 'confidence': 0.9},
|
| 185 |
+
{'model': 'model_d', 'prediction': 'hold', 'confidence': 0.7}
|
| 186 |
+
]
|
| 187 |
+
|
| 188 |
+
result = voting_strategy.performance_weighted_vote(predictions, model_performance)
|
| 189 |
+
|
| 190 |
+
assert 'decision' in result
|
| 191 |
+
assert 'performance_weighted_score' in result
|
| 192 |
+
# Model C has highest performance, so 'buy' should be favored
|
| 193 |
+
|
| 194 |
+
def test_adaptive_voting_strategy(self, voting_strategy):
|
| 195 |
+
"""Test adaptive voting that changes strategy based on market conditions."""
|
| 196 |
+
market_conditions = {
|
| 197 |
+
'volatility': 'high',
|
| 198 |
+
'trend': 'bullish',
|
| 199 |
+
'volume': 'above_average'
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
predictions = [
|
| 203 |
+
{'prediction': 'buy', 'confidence': 0.7},
|
| 204 |
+
{'prediction': 'buy', 'confidence': 0.8},
|
| 205 |
+
{'prediction': 'sell', 'confidence': 0.6}
|
| 206 |
+
]
|
| 207 |
+
|
| 208 |
+
result = voting_strategy.adaptive_vote(predictions, market_conditions)
|
| 209 |
+
|
| 210 |
+
assert 'decision' in result
|
| 211 |
+
assert 'strategy_used' in result
|
| 212 |
+
assert 'market_adjustment' in result
|
| 213 |
+
|
| 214 |
+
def test_time_decay_weighting(self, voting_strategy):
|
| 215 |
+
"""Test time-based decay weighting for predictions."""
|
| 216 |
+
from datetime import datetime, timedelta
|
| 217 |
+
|
| 218 |
+
now = datetime.now()
|
| 219 |
+
predictions_with_time = [
|
| 220 |
+
{
|
| 221 |
+
'prediction': 'buy',
|
| 222 |
+
'confidence': 0.8,
|
| 223 |
+
'timestamp': now - timedelta(minutes=1) # Recent
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
'prediction': 'sell',
|
| 227 |
+
'confidence': 0.7,
|
| 228 |
+
'timestamp': now - timedelta(hours=1) # Older
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
'prediction': 'buy',
|
| 232 |
+
'confidence': 0.6,
|
| 233 |
+
'timestamp': now - timedelta(minutes=30) # Medium age
|
| 234 |
+
}
|
| 235 |
+
]
|
| 236 |
+
|
| 237 |
+
result = voting_strategy.time_weighted_vote(predictions_with_time)
|
| 238 |
+
|
| 239 |
+
assert 'decision' in result
|
| 240 |
+
assert 'time_weighted_score' in result
|
| 241 |
+
# Recent predictions should have more weight
|
| 242 |
+
|
| 243 |
+
def test_ensemble_voting_combination(self, voting_strategy, sample_predictions):
|
| 244 |
+
"""Test ensemble voting combining multiple strategies."""
|
| 245 |
+
strategies = ['majority', 'confidence_weighted', 'weighted']
|
| 246 |
+
weights = {'model_a': 0.3, 'model_b': 0.2, 'model_c': 0.4, 'model_d': 0.1}
|
| 247 |
+
|
| 248 |
+
result = voting_strategy.ensemble_vote(sample_predictions, strategies, weights)
|
| 249 |
+
|
| 250 |
+
assert 'decision' in result
|
| 251 |
+
assert 'ensemble_confidence' in result
|
| 252 |
+
assert 'strategy_results' in result
|
| 253 |
+
assert len(result['strategy_results']) == len(strategies)
|
| 254 |
+
|
| 255 |
+
def test_voting_with_abstention(self, voting_strategy):
|
| 256 |
+
"""Test voting mechanism that allows abstention."""
|
| 257 |
+
low_confidence_predictions = [
|
| 258 |
+
{'prediction': 'buy', 'confidence': 0.4},
|
| 259 |
+
{'prediction': 'sell', 'confidence': 0.3},
|
| 260 |
+
{'prediction': 'hold', 'confidence': 0.35}
|
| 261 |
+
]
|
| 262 |
+
|
| 263 |
+
min_confidence = 0.6
|
| 264 |
+
result = voting_strategy.vote_with_abstention(low_confidence_predictions, min_confidence)
|
| 265 |
+
|
| 266 |
+
assert result['decision'] == 'abstain'
|
| 267 |
+
assert 'reason' in result
|
| 268 |
+
|
| 269 |
+
def test_consensus_measurement(self, voting_strategy, sample_predictions):
|
| 270 |
+
"""Test consensus measurement among predictions."""
|
| 271 |
+
consensus_score = voting_strategy.measure_consensus(sample_predictions)
|
| 272 |
+
|
| 273 |
+
assert isinstance(consensus_score, (float, int))
|
| 274 |
+
assert 0 <= consensus_score <= 1
|
| 275 |
+
|
| 276 |
+
def test_prediction_diversity_analysis(self, voting_strategy, sample_predictions):
|
| 277 |
+
"""Test analysis of prediction diversity."""
|
| 278 |
+
diversity_metrics = voting_strategy.analyze_diversity(sample_predictions)
|
| 279 |
+
|
| 280 |
+
assert 'entropy' in diversity_metrics
|
| 281 |
+
assert 'agreement_ratio' in diversity_metrics
|
| 282 |
+
assert 'prediction_distribution' in diversity_metrics
|
| 283 |
+
|
| 284 |
+
@pytest.mark.performance
|
| 285 |
+
def test_voting_performance_large_dataset(self, voting_strategy):
|
| 286 |
+
"""Test voting performance with large number of predictions."""
|
| 287 |
+
import time
|
| 288 |
+
|
| 289 |
+
# Generate large dataset
|
| 290 |
+
large_predictions = []
|
| 291 |
+
for i in range(1000):
|
| 292 |
+
large_predictions.append({
|
| 293 |
+
'model': f'model_{i}',
|
| 294 |
+
'prediction': np.random.choice(['buy', 'sell', 'hold']),
|
| 295 |
+
'confidence': np.random.uniform(0.5, 1.0)
|
| 296 |
+
})
|
| 297 |
+
|
| 298 |
+
start_time = time.time()
|
| 299 |
+
result = voting_strategy.majority_vote(large_predictions)
|
| 300 |
+
processing_time = time.time() - start_time
|
| 301 |
+
|
| 302 |
+
assert result is not None
|
| 303 |
+
assert processing_time < 1.0 # Should complete within 1 second
|
| 304 |
+
|
| 305 |
+
def test_voting_strategy_serialization(self, voting_strategy):
|
| 306 |
+
"""Test serialization and deserialization of voting strategy."""
|
| 307 |
+
import json
|
| 308 |
+
|
| 309 |
+
# Test if strategy can be serialized (for saving/loading)
|
| 310 |
+
strategy_config = {
|
| 311 |
+
'method': 'weighted',
|
| 312 |
+
'weights': {'model_a': 0.4, 'model_b': 0.6},
|
| 313 |
+
'threshold': 0.5
|
| 314 |
+
}
|
| 315 |
+
|
| 316 |
+
serialized = json.dumps(strategy_config)
|
| 317 |
+
deserialized = json.loads(serialized)
|
| 318 |
+
|
| 319 |
+
assert deserialized['method'] == 'weighted'
|
| 320 |
+
assert deserialized['threshold'] == 0.5
|
| 321 |
+
|
| 322 |
+
def test_voting_with_missing_confidence(self, voting_strategy):
|
| 323 |
+
"""Test voting when some predictions lack confidence scores."""
|
| 324 |
+
mixed_predictions = [
|
| 325 |
+
{'prediction': 'buy', 'confidence': 0.8},
|
| 326 |
+
{'prediction': 'sell'}, # Missing confidence
|
| 327 |
+
{'prediction': 'buy', 'confidence': 0.7}
|
| 328 |
+
]
|
| 329 |
+
|
| 330 |
+
# Should handle missing confidence gracefully
|
| 331 |
+
result = voting_strategy.majority_vote(mixed_predictions)
|
| 332 |
+
assert 'decision' in result
|