Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,12 +1,16 @@
|
|
| 1 |
|
| 2 |
-
|
| 3 |
from dataclasses import dataclass
|
| 4 |
from typing import List, Dict, Tuple
|
| 5 |
from datetime import datetime
|
| 6 |
from pathlib import Path
|
| 7 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
-
# ========== Núcleo MG360 (24 ítems) ==========
|
| 10 |
@dataclass(frozen=True)
|
| 11 |
class Item:
|
| 12 |
code: str
|
|
@@ -14,6 +18,7 @@ class Item:
|
|
| 14 |
dimension: str # "COG", "EMO", "REL", "EJE"
|
| 15 |
reverse: bool
|
| 16 |
|
|
|
|
| 17 |
ITEMS: List[Item] = [
|
| 18 |
# Cognitiva (COG)
|
| 19 |
Item("COG1", "Antes de decidir, evalúo cómo una acción afecta a otras áreas.", "COG", False),
|
|
@@ -54,24 +59,25 @@ DIMENSION_LABELS = {
|
|
| 54 |
}
|
| 55 |
|
| 56 |
def invert_if_needed(value: float, reverse: bool) -> float:
|
|
|
|
| 57 |
return 6 - value if reverse else value
|
| 58 |
|
| 59 |
def score_responses(responses: Dict[str, float]) -> Dict[str, float]:
|
|
|
|
| 60 |
dim_values = {d: [] for d in DIMENSIONS}
|
| 61 |
-
for code, v in responses.items():
|
| 62 |
-
# Validación 1..5
|
| 63 |
-
fv = float(v)
|
| 64 |
-
if not (1 <= fv <= 5):
|
| 65 |
-
raise ValueError(f"Respuesta fuera de 1-5 en {code}: {fv}")
|
| 66 |
-
# Cálculo
|
| 67 |
for it in ITEMS:
|
|
|
|
|
|
|
| 68 |
v = float(responses[it.code])
|
|
|
|
|
|
|
| 69 |
dim_values[it.dimension].append(invert_if_needed(v, it.reverse))
|
|
|
|
| 70 |
dim_avg = {d: sum(vals)/len(vals) for d, vals in dim_values.items()}
|
| 71 |
vals = list(dim_avg.values())
|
| 72 |
avg = sum(vals)/len(vals)
|
| 73 |
var = sum((x-avg)**2 for x in vals) / len(vals)
|
| 74 |
-
stdev = (var
|
| 75 |
balance_360 = 1 - (stdev / avg) if avg > 0 else 0.0
|
| 76 |
return {**dim_avg, "BALANCE_360": balance_360}
|
| 77 |
|
|
@@ -80,6 +86,7 @@ def dominant_axis(dim_scores: Dict[str, float]) -> Tuple[str, float]:
|
|
| 80 |
return best_dim, dim_scores[best_dim]
|
| 81 |
|
| 82 |
def interpret(dim_scores: Dict[str, float]) -> Dict[str, str]:
|
|
|
|
| 83 |
bal = dim_scores["BALANCE_360"]
|
| 84 |
if bal > 0.85:
|
| 85 |
eq = "Mentalidad 360 desarrollada"
|
|
@@ -94,45 +101,68 @@ def interpret(dim_scores: Dict[str, float]) -> Dict[str, str]:
|
|
| 94 |
"REL": "Conector colaborativo (Relacional)",
|
| 95 |
"EJE": "Gestor ejecutor (Ejecucional)",
|
| 96 |
}
|
| 97 |
-
return {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
-
# ========== Radar limpio y simétrico ==========
|
| 100 |
def radar_plot(dim_scores: Dict[str, float], title: str, out_png: str) -> str:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
import numpy as np
|
| 102 |
import matplotlib.pyplot as plt
|
| 103 |
|
|
|
|
| 104 |
DIM_ORDER = ["COG", "EMO", "REL", "EJE"]
|
| 105 |
labels = [DIMENSION_LABELS[d] for d in DIM_ORDER]
|
| 106 |
vals = [float(dim_scores[d]) for d in DIM_ORDER]
|
| 107 |
|
|
|
|
| 108 |
angles = np.linspace(0, 2*np.pi, len(labels), endpoint=False)
|
|
|
|
| 109 |
angles_cycle = np.concatenate([angles, [angles[0]]])
|
| 110 |
vals_cycle = np.concatenate([vals, [vals[0]]])
|
| 111 |
|
|
|
|
| 112 |
fig = plt.figure(figsize=(8, 8))
|
| 113 |
ax = plt.subplot(111, polar=True)
|
| 114 |
ax.set_facecolor("white")
|
| 115 |
-
fig.subplots_adjust(bottom=0.18)
|
| 116 |
|
|
|
|
| 117 |
ax.set_theta_offset(np.pi / 2)
|
| 118 |
ax.set_theta_direction(-1)
|
| 119 |
|
|
|
|
| 120 |
ax.set_xticks(angles)
|
| 121 |
ax.set_xticklabels(labels, fontsize=14, fontweight="bold")
|
| 122 |
|
|
|
|
| 123 |
ax.set_ylim(0, 5)
|
| 124 |
ax.set_yticks([1, 2, 3, 4, 5])
|
| 125 |
ax.set_yticklabels(["1", "2", "3", "4", "5"], fontsize=11)
|
| 126 |
ax.yaxis.grid(True, linewidth=0.8, alpha=0.6)
|
| 127 |
ax.xaxis.grid(True, linewidth=0.8, alpha=0.6)
|
| 128 |
|
|
|
|
| 129 |
ax.plot(angles_cycle, vals_cycle, linewidth=2.2)
|
| 130 |
ax.fill(angles_cycle, vals_cycle, alpha=0.18)
|
| 131 |
|
|
|
|
| 132 |
for ang, v in zip(angles, vals):
|
| 133 |
ax.plot([ang], [v], marker="o", markersize=6)
|
| 134 |
-
ax.text(ang, min(5, v + 0.22), f"{v:.2f}",
|
|
|
|
| 135 |
|
|
|
|
| 136 |
ax.set_title(title, fontsize=22, fontweight="bold", pad=18)
|
| 137 |
bal = float(dim_scores.get("BALANCE_360", 0.0))
|
| 138 |
dom = max(DIM_ORDER, key=lambda d: dim_scores[d])
|
|
@@ -144,94 +174,17 @@ def radar_plot(dim_scores: Dict[str, float], title: str, out_png: str) -> str:
|
|
| 144 |
plt.close(fig)
|
| 145 |
return out_png
|
| 146 |
|
| 147 |
-
# ========== Prompt + LLM (Phi, InferenceClient) ==========
|
| 148 |
-
from huggingface_hub import InferenceClient
|
| 149 |
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
def build_prompt(scores: Dict[str, float], inter: Dict[str, str]) -> str:
|
| 154 |
-
lines = [
|
| 155 |
-
"Eres un coach ejecutivo experto en ICB4 (People, Practice, Perspective).",
|
| 156 |
-
"Con base en el siguiente diagnóstico MG360 (1–5), genera un plan de desarrollo práctico, accionable y medible.",
|
| 157 |
-
"",
|
| 158 |
-
"Resultados por eje:",
|
| 159 |
-
f"- Cognitiva: {scores['COG']:.2f}/5",
|
| 160 |
-
f"- Emocional: {scores['EMO']:.2f}/5",
|
| 161 |
-
f"- Relacional: {scores['REL']:.2f}/5",
|
| 162 |
-
f"- Ejecucional:{scores['EJE']:.2f}/5",
|
| 163 |
-
f"- Balance 360: {scores['BALANCE_360']:.3f}",
|
| 164 |
-
f"- Interpretación: {inter['equilibrio']} | {inter['eje_dominante']}",
|
| 165 |
-
"",
|
| 166 |
-
"Instrucciones de salida:",
|
| 167 |
-
"1) Empieza con un resumen de 2–3 líneas del perfil.",
|
| 168 |
-
"2) Para cada eje (Cognitiva, Emocional, Relacional, Ejecucional):",
|
| 169 |
-
" - Da 3 acciones SMART (específicas, medibles, con plazos).",
|
| 170 |
-
" - Incluye 1 indicador sugerido y 1 hábito semanal.",
|
| 171 |
-
"3) Cierra con un plan de 30 días (semanas 1–4) y riesgos comunes.",
|
| 172 |
-
]
|
| 173 |
-
return "\\n".join(lines)
|
| 174 |
-
|
| 175 |
-
def generate_plan_with_phi(prompt: str) -> str:
|
| 176 |
-
# Sin token: devolvemos aviso (el usuario puede definir HF_TOKEN en Secrets)
|
| 177 |
-
if not HF_TOKEN:
|
| 178 |
-
return ("[Aviso] Falta HF_TOKEN. Configúralo como variable de entorno "
|
| 179 |
-
"en tu Space (Settings → Variables & secrets).")
|
| 180 |
-
|
| 181 |
-
# 1) Intento con InferenceClient
|
| 182 |
-
try:
|
| 183 |
-
client = InferenceClient(HF_MODEL_ID, token=HF_TOKEN, timeout=180)
|
| 184 |
-
out = client.text_generation(
|
| 185 |
-
prompt,
|
| 186 |
-
max_new_tokens=600,
|
| 187 |
-
do_sample=True,
|
| 188 |
-
temperature=0.7,
|
| 189 |
-
top_p=0.9,
|
| 190 |
-
repetition_penalty=1.05,
|
| 191 |
-
return_full_text=False,
|
| 192 |
-
)
|
| 193 |
-
return out.strip()
|
| 194 |
-
except Exception as e:
|
| 195 |
-
err1 = f"[InferenceClient] {type(e).__name__}: {e}"
|
| 196 |
-
|
| 197 |
-
# 2) Fallback vía REST
|
| 198 |
-
try:
|
| 199 |
-
url = f"https://api-inference.huggingface.co/models/{HF_MODEL_ID}"
|
| 200 |
-
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
|
| 201 |
-
payload = {
|
| 202 |
-
"inputs": prompt,
|
| 203 |
-
"parameters": {
|
| 204 |
-
"max_new_tokens": 600,
|
| 205 |
-
"do_sample": True,
|
| 206 |
-
"temperature": 0.7,
|
| 207 |
-
"top_p": 0.9,
|
| 208 |
-
"repetition_penalty": 1.05,
|
| 209 |
-
"return_full_text": False
|
| 210 |
-
}
|
| 211 |
-
}
|
| 212 |
-
r = requests.post(url, headers=headers, json=payload, timeout=180)
|
| 213 |
-
if r.status_code == 503:
|
| 214 |
-
return (f"[HF 503] El modelo {HF_MODEL_ID} está cargando o sin recurso. "
|
| 215 |
-
f"Detalle: {r.text}")
|
| 216 |
-
r.raise_for_status()
|
| 217 |
-
data = r.json()
|
| 218 |
-
if isinstance(data, list) and data and "generated_text" in data[0]:
|
| 219 |
-
return data[0]["generated_text"].strip()
|
| 220 |
-
if isinstance(data, dict) and "generated_text" in data:
|
| 221 |
-
return data["generated_text"].strip()
|
| 222 |
-
if isinstance(data, dict) and "error" in data:
|
| 223 |
-
return f"[HF error] {data.get('error')}"
|
| 224 |
-
return f"[HF respuesta inesperada] {json.dumps(data)[:400]}..."
|
| 225 |
-
except Exception as e2:
|
| 226 |
-
err2 = f"[REST] {type(e2).__name__}: {e2}"
|
| 227 |
-
return f"[Error al llamar al modelo {HF_MODEL_ID}]\\n{err1}\\n{err2}"
|
| 228 |
-
|
| 229 |
-
# ========== Utilidades Gradio ==========
|
| 230 |
def items_schema() -> List[Dict[str, str]]:
|
| 231 |
return [{"code": it.code, "text": it.text, "dimension": DIMENSION_LABELS[it.dimension], "reverse": it.reverse} for it in ITEMS]
|
| 232 |
|
| 233 |
def _ensure_outdir() -> Path:
|
| 234 |
-
out_dir = Path("mg360_resultados")
|
|
|
|
|
|
|
| 235 |
|
| 236 |
def _evaluate_internal(res_vals: List[int]):
|
| 237 |
schema = items_schema()
|
|
@@ -248,7 +201,7 @@ def _evaluate_internal(res_vals: List[int]):
|
|
| 248 |
with open(out_json, "w", encoding="utf-8") as f:
|
| 249 |
json.dump({"responses": responses, "scores": scores, "interpretation": inter}, f, ensure_ascii=False, indent=2)
|
| 250 |
|
| 251 |
-
#
|
| 252 |
md = [
|
| 253 |
"**Resultados**",
|
| 254 |
*(f"- {DIMENSION_LABELS[d]}: {scores[d]:.2f}/5" for d in ["COG","EMO","REL","EJE"]),
|
|
@@ -258,47 +211,37 @@ def _evaluate_internal(res_vals: List[int]):
|
|
| 258 |
f"- Equilibrio: {inter['equilibrio']}",
|
| 259 |
f"- Eje dominante: {inter['eje_dominante']}",
|
| 260 |
]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 261 |
|
| 262 |
-
# Plan de desarrollo (IA)
|
| 263 |
-
plan_prompt = build_prompt(scores, inter)
|
| 264 |
-
plan_text = generate_plan_with_phi(plan_prompt)
|
| 265 |
-
|
| 266 |
-
return str(out_png), "\\n".join(md), json.dumps({"responses": responses, "scores": scores, "interpretation": inter}, ensure_ascii=False, indent=2), plan_text
|
| 267 |
-
|
| 268 |
-
# ========== App Gradio ==========
|
| 269 |
-
import gradio as gr
|
| 270 |
-
|
| 271 |
-
schema = items_schema()
|
| 272 |
-
with gr.Blocks() as demo:
|
| 273 |
-
gr.Markdown("# Test MG360 (24 ítems) — Versión Avanzada + Plan IA")
|
| 274 |
-
gr.Markdown("**Escala 1–5:** 1=**Nunca**, 2=**Rara vez**, 3=**A veces**, 4=**Frecuente**, 5=**Siempre**.")
|
| 275 |
-
|
| 276 |
-
with gr.Accordion("Cuestionario (24 ítems)", open=True):
|
| 277 |
-
gr.Markdown("### Guía de escala: 1=**Nunca** · 2=**Rara vez** · 3=**A veces** · 4=**Frecuente** · 5=**Siempre**")
|
| 278 |
-
sliders = [
|
| 279 |
-
gr.Slider(1, 5, step=1, value=3,
|
| 280 |
-
label=f"{it['code']} — {it['text']} (1 Nunca · 2 Rara vez · 3 A veces · 4 Frecuente · 5 Siempre)")
|
| 281 |
-
for it in schema
|
| 282 |
-
]
|
| 283 |
-
|
| 284 |
-
with gr.Row():
|
| 285 |
-
btn = gr.Button("Evaluar", scale=1)
|
| 286 |
-
model_id = gr.Textbox(value=HF_MODEL_ID, label="HF_MODEL_ID (opcional)", scale=3)
|
| 287 |
-
img = gr.Image(type="filepath", label="Radar 4D (1–5)")
|
| 288 |
-
md = gr.Markdown()
|
| 289 |
-
js = gr.Code(language="json", label="Reporte (JSON)")
|
| 290 |
-
plan= gr.Markdown(label="Plan de desarrollo (IA)")
|
| 291 |
-
|
| 292 |
-
def evaluate(*vals):
|
| 293 |
-
# Permitir override del modelo por UI (no persiste, solo en sesión)
|
| 294 |
-
global HF_MODEL_ID
|
| 295 |
-
vals=list(vals)
|
| 296 |
-
HF_MODEL_ID = vals.pop() or HF_MODEL_ID
|
| 297 |
-
return _evaluate_internal(vals)
|
| 298 |
-
|
| 299 |
-
inputs = sliders + [model_id]
|
| 300 |
-
btn.click(fn=evaluate, inputs=inputs, outputs=[img, md, js, plan])
|
| 301 |
-
|
| 302 |
-
# Para ejecución local:
|
| 303 |
-
if __name__ == "__main__":
|
| 304 |
demo.launch()
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
|
| 2 |
+
|
| 3 |
from dataclasses import dataclass
|
| 4 |
from typing import List, Dict, Tuple
|
| 5 |
from datetime import datetime
|
| 6 |
from pathlib import Path
|
| 7 |
+
import math
|
| 8 |
+
import json
|
| 9 |
+
|
| 10 |
+
# ============================
|
| 11 |
+
# Núcleo MG360 (24 ítems)
|
| 12 |
+
# ============================
|
| 13 |
|
|
|
|
| 14 |
@dataclass(frozen=True)
|
| 15 |
class Item:
|
| 16 |
code: str
|
|
|
|
| 18 |
dimension: str # "COG", "EMO", "REL", "EJE"
|
| 19 |
reverse: bool
|
| 20 |
|
| 21 |
+
# 24 ítems: 6 por eje (3 directos + 3 inversos)
|
| 22 |
ITEMS: List[Item] = [
|
| 23 |
# Cognitiva (COG)
|
| 24 |
Item("COG1", "Antes de decidir, evalúo cómo una acción afecta a otras áreas.", "COG", False),
|
|
|
|
| 59 |
}
|
| 60 |
|
| 61 |
def invert_if_needed(value: float, reverse: bool) -> float:
|
| 62 |
+
"""Invierte Likert 1–5 cuando el ítem es inverso (6 - v)."""
|
| 63 |
return 6 - value if reverse else value
|
| 64 |
|
| 65 |
def score_responses(responses: Dict[str, float]) -> Dict[str, float]:
|
| 66 |
+
"""Calcula promedios por dimensión y BALANCE_360."""
|
| 67 |
dim_values = {d: [] for d in DIMENSIONS}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
for it in ITEMS:
|
| 69 |
+
if it.code not in responses:
|
| 70 |
+
raise ValueError(f"Falta respuesta para {it.code}")
|
| 71 |
v = float(responses[it.code])
|
| 72 |
+
if not (1 <= v <= 5):
|
| 73 |
+
raise ValueError(f"Respuesta fuera de rango 1-5 en {it.code}: {v}")
|
| 74 |
dim_values[it.dimension].append(invert_if_needed(v, it.reverse))
|
| 75 |
+
|
| 76 |
dim_avg = {d: sum(vals)/len(vals) for d, vals in dim_values.items()}
|
| 77 |
vals = list(dim_avg.values())
|
| 78 |
avg = sum(vals)/len(vals)
|
| 79 |
var = sum((x-avg)**2 for x in vals) / len(vals)
|
| 80 |
+
stdev = math.sqrt(var)
|
| 81 |
balance_360 = 1 - (stdev / avg) if avg > 0 else 0.0
|
| 82 |
return {**dim_avg, "BALANCE_360": balance_360}
|
| 83 |
|
|
|
|
| 86 |
return best_dim, dim_scores[best_dim]
|
| 87 |
|
| 88 |
def interpret(dim_scores: Dict[str, float]) -> Dict[str, str]:
|
| 89 |
+
"""Interpretación de equilibrio y eje dominante."""
|
| 90 |
bal = dim_scores["BALANCE_360"]
|
| 91 |
if bal > 0.85:
|
| 92 |
eq = "Mentalidad 360 desarrollada"
|
|
|
|
| 101 |
"REL": "Conector colaborativo (Relacional)",
|
| 102 |
"EJE": "Gestor ejecutor (Ejecucional)",
|
| 103 |
}
|
| 104 |
+
return {
|
| 105 |
+
"equilibrio": eq,
|
| 106 |
+
"eje_dominante": f"{perfiles[best]} — {DIMENSION_LABELS[best]} ({val:.2f}/5)"
|
| 107 |
+
}
|
| 108 |
+
|
| 109 |
+
# ============================
|
| 110 |
+
# Radar PRO (matplotlib)
|
| 111 |
+
# ============================
|
| 112 |
|
|
|
|
| 113 |
def radar_plot(dim_scores: Dict[str, float], title: str, out_png: str) -> str:
|
| 114 |
+
"""
|
| 115 |
+
Radar 4D limpio y simétrico (patrón estable):
|
| 116 |
+
- Círculo completo (0..2π)
|
| 117 |
+
- Cognitiva arriba, sentido horario
|
| 118 |
+
- Aros 1..5, etiquetas claras
|
| 119 |
+
- Valor numérico en cada eje
|
| 120 |
+
"""
|
| 121 |
import numpy as np
|
| 122 |
import matplotlib.pyplot as plt
|
| 123 |
|
| 124 |
+
# Orden fijo de dimensiones (coincidirá con tus promedios)
|
| 125 |
DIM_ORDER = ["COG", "EMO", "REL", "EJE"]
|
| 126 |
labels = [DIMENSION_LABELS[d] for d in DIM_ORDER]
|
| 127 |
vals = [float(dim_scores[d]) for d in DIM_ORDER]
|
| 128 |
|
| 129 |
+
# Cerrar polígono (repetimos primer punto al final)
|
| 130 |
angles = np.linspace(0, 2*np.pi, len(labels), endpoint=False)
|
| 131 |
+
angles = np.roll(angles, -0) # no rotamos el orden base
|
| 132 |
angles_cycle = np.concatenate([angles, [angles[0]]])
|
| 133 |
vals_cycle = np.concatenate([vals, [vals[0]]])
|
| 134 |
|
| 135 |
+
# Lienzo polar
|
| 136 |
fig = plt.figure(figsize=(8, 8))
|
| 137 |
ax = plt.subplot(111, polar=True)
|
| 138 |
ax.set_facecolor("white")
|
|
|
|
| 139 |
|
| 140 |
+
# Cognitiva arriba (90°) y sentido horario
|
| 141 |
ax.set_theta_offset(np.pi / 2)
|
| 142 |
ax.set_theta_direction(-1)
|
| 143 |
|
| 144 |
+
# Ticks de categorías
|
| 145 |
ax.set_xticks(angles)
|
| 146 |
ax.set_xticklabels(labels, fontsize=14, fontweight="bold")
|
| 147 |
|
| 148 |
+
# Radial 0–5 con aros visibles
|
| 149 |
ax.set_ylim(0, 5)
|
| 150 |
ax.set_yticks([1, 2, 3, 4, 5])
|
| 151 |
ax.set_yticklabels(["1", "2", "3", "4", "5"], fontsize=11)
|
| 152 |
ax.yaxis.grid(True, linewidth=0.8, alpha=0.6)
|
| 153 |
ax.xaxis.grid(True, linewidth=0.8, alpha=0.6)
|
| 154 |
|
| 155 |
+
# Polígono (borde + relleno suave) — sin especificar colores
|
| 156 |
ax.plot(angles_cycle, vals_cycle, linewidth=2.2)
|
| 157 |
ax.fill(angles_cycle, vals_cycle, alpha=0.18)
|
| 158 |
|
| 159 |
+
# Marcadores + etiqueta de valor en cada eje
|
| 160 |
for ang, v in zip(angles, vals):
|
| 161 |
ax.plot([ang], [v], marker="o", markersize=6)
|
| 162 |
+
ax.text(ang, min(5, v + 0.22), f"{v:.2f}",
|
| 163 |
+
ha="center", va="center", fontsize=11, fontweight="bold")
|
| 164 |
|
| 165 |
+
# Títulos
|
| 166 |
ax.set_title(title, fontsize=22, fontweight="bold", pad=18)
|
| 167 |
bal = float(dim_scores.get("BALANCE_360", 0.0))
|
| 168 |
dom = max(DIM_ORDER, key=lambda d: dim_scores[d])
|
|
|
|
| 174 |
plt.close(fig)
|
| 175 |
return out_png
|
| 176 |
|
|
|
|
|
|
|
| 177 |
|
| 178 |
+
# ============================
|
| 179 |
+
# Gradio App
|
| 180 |
+
# ============================
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 181 |
def items_schema() -> List[Dict[str, str]]:
|
| 182 |
return [{"code": it.code, "text": it.text, "dimension": DIMENSION_LABELS[it.dimension], "reverse": it.reverse} for it in ITEMS]
|
| 183 |
|
| 184 |
def _ensure_outdir() -> Path:
|
| 185 |
+
out_dir = Path("mg360_resultados")
|
| 186 |
+
out_dir.mkdir(parents=True, exist_ok=True)
|
| 187 |
+
return out_dir
|
| 188 |
|
| 189 |
def _evaluate_internal(res_vals: List[int]):
|
| 190 |
schema = items_schema()
|
|
|
|
| 201 |
with open(out_json, "w", encoding="utf-8") as f:
|
| 202 |
json.dump({"responses": responses, "scores": scores, "interpretation": inter}, f, ensure_ascii=False, indent=2)
|
| 203 |
|
| 204 |
+
# Markdown de resultados
|
| 205 |
md = [
|
| 206 |
"**Resultados**",
|
| 207 |
*(f"- {DIMENSION_LABELS[d]}: {scores[d]:.2f}/5" for d in ["COG","EMO","REL","EJE"]),
|
|
|
|
| 211 |
f"- Equilibrio: {inter['equilibrio']}",
|
| 212 |
f"- Eje dominante: {inter['eje_dominante']}",
|
| 213 |
]
|
| 214 |
+
return str(out_png), "\n".join(md), json.dumps({"responses": responses, "scores": scores, "interpretation": inter}, ensure_ascii=False, indent=2)
|
| 215 |
+
|
| 216 |
+
# ============ Lanzador Gradio ============
|
| 217 |
+
def main_gradio():
|
| 218 |
+
import gradio as gr
|
| 219 |
+
schema = items_schema()
|
| 220 |
+
|
| 221 |
+
with gr.Blocks() as demo:
|
| 222 |
+
gr.Markdown("# Test MG360 (24 ítems) — Versión Avanzada")
|
| 223 |
+
gr.Markdown("**Escala 1–5:** 1=**Nunca**, 2=**Rara vez**, 3=**A veces**, 4=**Frecuente**, 5=**Siempre**.")
|
| 224 |
+
|
| 225 |
+
with gr.Accordion("Cuestionario (24 ítems)", open=True):
|
| 226 |
+
gr.Markdown("### Guía de escala: 1=**Nunca** · 2=**Rara vez** · 3=**A veces** · 4=**Frecuente** · 5=**Siempre**")
|
| 227 |
+
sliders = [
|
| 228 |
+
gr.Slider(1, 5, step=1, value=3,
|
| 229 |
+
label=f"{it['code']} — {it['text']} (1 Nunca · 2 Rara vez · 3 A veces · 4 Frecuente · 5 Siempre)")
|
| 230 |
+
for it in schema
|
| 231 |
+
]
|
| 232 |
+
|
| 233 |
+
btn = gr.Button("Evaluar")
|
| 234 |
+
img = gr.Image(type="filepath", label="Radar 4D (1–5)")
|
| 235 |
+
md = gr.Markdown()
|
| 236 |
+
js = gr.Code(language="json", label="Reporte (JSON)")
|
| 237 |
+
|
| 238 |
+
# Importante: la función acepta múltiples parámetros (uno por slider)
|
| 239 |
+
def evaluate(*vals):
|
| 240 |
+
return _evaluate_internal(list(vals))
|
| 241 |
+
|
| 242 |
+
btn.click(fn=evaluate, inputs=sliders, outputs=[img, md, js])
|
| 243 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 244 |
demo.launch()
|
| 245 |
+
|
| 246 |
+
if __name__ == "__main__":
|
| 247 |
+
main_gradio()
|