Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -19,6 +19,10 @@ def gradio_predict(input_text):
|
|
| 19 |
max_length=512 # Ensure the sequence doesn't exceed the model's max length
|
| 20 |
)
|
| 21 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
# Convert tokenized inputs to numpy arrays and ensure correct shape
|
| 23 |
input_ids = np.array(tokenized_input["input_ids"], dtype=np.int64) # Shape should be [1, 512]
|
| 24 |
attention_mask = np.array(tokenized_input["attention_mask"], dtype=np.int64) # Shape should be [1, 512]
|
|
@@ -30,6 +34,9 @@ def gradio_predict(input_text):
|
|
| 30 |
# Prepare decoder input ids if required by your model
|
| 31 |
decoder_input_ids = input_ids # Adjust as needed based on model requirements
|
| 32 |
|
|
|
|
|
|
|
|
|
|
| 33 |
# Perform inference with ONNX model
|
| 34 |
try:
|
| 35 |
outputs = session.run(
|
|
|
|
| 19 |
max_length=512 # Ensure the sequence doesn't exceed the model's max length
|
| 20 |
)
|
| 21 |
|
| 22 |
+
# Check that we have all necessary inputs
|
| 23 |
+
if "input_ids" not in tokenized_input or "attention_mask" not in tokenized_input:
|
| 24 |
+
return "Error: Missing required tokenizer outputs."
|
| 25 |
+
|
| 26 |
# Convert tokenized inputs to numpy arrays and ensure correct shape
|
| 27 |
input_ids = np.array(tokenized_input["input_ids"], dtype=np.int64) # Shape should be [1, 512]
|
| 28 |
attention_mask = np.array(tokenized_input["attention_mask"], dtype=np.int64) # Shape should be [1, 512]
|
|
|
|
| 34 |
# Prepare decoder input ids if required by your model
|
| 35 |
decoder_input_ids = input_ids # Adjust as needed based on model requirements
|
| 36 |
|
| 37 |
+
# Print the shapes to check if they are correct
|
| 38 |
+
print(f"input_ids shape: {input_ids.shape}, attention_mask shape: {attention_mask.shape}")
|
| 39 |
+
|
| 40 |
# Perform inference with ONNX model
|
| 41 |
try:
|
| 42 |
outputs = session.run(
|