hackergeek commited on
Commit
5738620
Β·
verified Β·
1 Parent(s): c506ad9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +92 -13
app.py CHANGED
@@ -1,26 +1,105 @@
1
- πŸ“¦ RADIOCAP13 β€” HuggingFace Space
2
 
3
- Below is a complete multi-file project layout for deploying your image-captioning model as a HuggingFace Space. You can copy/paste these into your repository.
 
4
 
5
- app.py import gradio as gr import torch from transformers import ViTModel from PIL import Image from torchvision import transforms import json IMG_SIZE = 224 SEQ_LEN = 32 VOCAB_SIZE = 75460 device = torch.device("cuda" if torch.cuda.is_available() else "cpu") transform = transforms.Compose([ transforms.Resize((IMG_SIZE, IMG_SIZE)), transforms.ToTensor(), ]) def preprocess_image(img): if img is None: raise ValueError("Image is None") if not isinstance(img, Image.Image): img = Image.fromarray(img) if img.mode != "RGB": img = img.convert("RGB") return transform(img) class SimpleTokenizer: def __init__(self, word2idx=None): self.word2idx = word2idx or {} self.idx2word = {v: k for k, v in self.word2idx.items()} @classmethod def load(cls, path): with open(f"{path}/vocab.json", "r") as f: word2idx = json.load(f) return cls(word2idx) class BiasDecoder(torch.nn.Module): def __init__(self, feature_dim=768, vocab_size=VOCAB_SIZE): super().__init__() self.token_emb = torch.nn.Embedding(vocab_size, feature_dim) self.pos_emb = torch.nn.Embedding(SEQ_LEN-1, feature_dim) self.final_layer = torch.nn.Linear(feature_dim, vocab_size) def forward(self, img_feat, target_seq): x = self.token_emb(target_seq) pos = torch.arange(x.size(1), device=x.device).clamp(max=self.pos_emb.num_embeddings - 1) x = x + self.pos_emb(pos) x = x + img_feat.unsqueeze(1) return self.final_layer(x) # Load models decoder = BiasDecoder().to(device) decoder.load_state_dict(torch.load("pytorch_model.bin", map_location=device)) decoder.eval() vit = ViTModel.from_pretrained("google/vit-base-patch16-224-in21k").to(device) vit.eval() tokenizer = SimpleTokenizer.load("./") pad_idx = tokenizer.word2idx["<PAD>"] @torch.no_grad() def generate_caption(img): img_tensor = preprocess_image(img).unsqueeze(0).to(device) img_feat = vit(pixel_values=img_tensor).pooler_output beams = [([tokenizer.word2idx["<SOS>"]], 0.0)] beam_size = 3 for _ in range(SEQ_LEN - 1): candidates = [] for seq, score in beams: inp = torch.tensor(seq + [pad_idx] * (SEQ_LEN - len(seq)), device=device).unsqueeze(0) logits = decoder(img_feat, inp) probs = torch.nn.functional.log_softmax(logits[0, len(seq)-1], dim=-1) top_p, top_i = torch.topk(probs, beam_size) for i in range(beam_size): candidates.append((seq + [top_i[i].item()], score + top_p[i].item())) beams = sorted(candidates, key=lambda x: x[1], reverse=True)[:beam_size] if all(s[-1] == tokenizer.word2idx["<EOS>"] for s, _ in beams): break words = [tokenizer.idx2word.get(i, "<UNK>") for i in beams[0][0][1:] if i != pad_idx] return " ".join(words) with gr.Blocks() as demo: gr.Markdown("# RADIOCAP13 β€” Image Captioning Demo") img_in = gr.Image(type="pil", label="Upload an Image") out = gr.Textbox(label="Generated Caption") btn = gr.Button("Generate Caption") btn.click(generate_caption, inputs=img_in, outputs=out) if __name__ == "__main__": demo.launch() requirements.txt gradio transformers torch torchvision Pillow README.md # RADIOCAP13 β€” Visual Captioning Model This Space runs a custom image captioning pipeline using: - ViT Base (Google) - Custom BiasDecoder - Custom 75k vocabulary ## πŸš€ Running Locally
6
 
7
- pip install -r requirements.txt python app.py
 
 
 
 
 
 
 
8
 
9
- ## πŸ–ΌοΈ Usage Upload an image and the model will generate a caption using beam search. ## πŸ“ Files - `app.py` β€” Gradio interface + model code - `vocab.json` β€” tokenizer vocabulary - `pytorch_model.bin` β€” trained decoder weights
 
 
10
 
11
- If you'd like, I can add:
12
 
13
- example.jpg automatic CPU/GPU selector badge HuggingFace Space metadata (.huggingface.yaml) progress bars / image preview UI .huggingface.yaml (Space Metadata) # RADIOCAP13 Space Metadata sdk: gradio sdk_version: 4.0 python_version: 3.10 runtime: python3 # Enable GPU if available grants: - gpu: true app_file: app.py # Display information on the HF Space page space_description: | RADIOCAP13 β€” A custom image captioning model powered by ViT and a BiasDecoder. Upload an image and the model generates a caption using beam search. space_tags: - image-captioning - vision - vit - pytorch - gradio Example Image (example.jpg)
 
 
 
14
 
15
- Include a placeholder. You can drag an actual sample image later.
 
 
 
 
 
 
 
16
 
17
- (example.jpg placeholder β€” include any sample image in your repo) Enhanced UI with GPU Status + Progress Bar
 
 
 
18
 
19
- Below is an improved Gradio UI you can swap into app.py.
 
 
 
 
20
 
21
- with gr.Blocks() as demo: gr.Markdown("# RADIOCAP13 β€” Image Captioning Demo") gr.Markdown( f"**Device:** {'GPU πŸš€' if torch.cuda.is_available() else 'CPU 🐒'}" ) with gr.Row(): img_in = gr.Image(type="pil", label="Upload an Image") out = gr.Textbox(label="Generated Caption") btn = gr.Button("Generate Caption") progress = gr.Markdown("Ready.") def wrapped_generate(img): progress.update("Generating… please wait ⏳") caption = generate_caption(img) progress.update("Done βœ”οΈ") return caption btn.click(wrapped_generate, inputs=img_in, outputs=out) All enhancements added βœ”οΈ Metadata file for HF Space GPU badge / indicator Progress bar Support for example image Fully structured Space project
 
 
 
 
 
22
 
23
- If you want: πŸ”₯ Auto-download model weights from HuggingFace Hub πŸ”₯ Separate tokenizer/model modules πŸ”₯ Add image thumbnails / gallery samples πŸ”₯ Add beam size slider, temperature, etc.
 
 
 
 
 
24
 
25
- Just say β€œadd more” or tell me specific features!
 
 
 
26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # πŸ“¦ RADIOCAP13 β€” HuggingFace Space
2
 
3
+ Below is a complete multi-file project layout for deploying your image-captioning model as a HuggingFace Space.
4
+ You can copy/paste these into your repository.
5
 
6
+ ---
7
 
8
+ ## **app.py**
9
+ ```python
10
+ import gradio as gr
11
+ import torch
12
+ from transformers import ViTModel
13
+ from PIL import Image
14
+ from torchvision import transforms
15
+ import json
16
 
17
+ IMG_SIZE = 224
18
+ SEQ_LEN = 32
19
+ VOCAB_SIZE = 75460
20
 
21
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
22
 
23
+ transform = transforms.Compose([
24
+ transforms.Resize((IMG_SIZE, IMG_SIZE)),
25
+ transforms.ToTensor(),
26
+ ])
27
 
28
+ def preprocess_image(img):
29
+ if img is None:
30
+ raise ValueError("Image is None")
31
+ if not isinstance(img, Image.Image):
32
+ img = Image.fromarray(img)
33
+ if img.mode != "RGB":
34
+ img = img.convert("RGB")
35
+ return transform(img)
36
 
37
+ class SimpleTokenizer:
38
+ def __init__(self, word2idx=None):
39
+ self.word2idx = word2idx or {}
40
+ self.idx2word = {v: k for k, v in self.word2idx.items()}
41
 
42
+ @classmethod
43
+ def load(cls, path):
44
+ with open(f"{path}/vocab.json", "r") as f:
45
+ word2idx = json.load(f)
46
+ return cls(word2idx)
47
 
48
+ class BiasDecoder(torch.nn.Module):
49
+ def __init__(self, feature_dim=768, vocab_size=VOCAB_SIZE):
50
+ super().__init__()
51
+ self.token_emb = torch.nn.Embedding(vocab_size, feature_dim)
52
+ self.pos_emb = torch.nn.Embedding(SEQ_LEN-1, feature_dim)
53
+ self.final_layer = torch.nn.Linear(feature_dim, vocab_size)
54
 
55
+ def forward(self, img_feat, target_seq):
56
+ x = self.token_emb(target_seq)
57
+ pos = torch.arange(x.size(1), device=x.device).clamp(max=self.pos_emb.num_embeddings - 1)
58
+ x = x + self.pos_emb(pos)
59
+ x = x + img_feat.unsqueeze(1)
60
+ return self.final_layer(x)
61
 
62
+ # Load models
63
+ decoder = BiasDecoder().to(device)
64
+ decoder.load_state_dict(torch.load("pytorch_model.bin", map_location=device))
65
+ decoder.eval()
66
 
67
+ vit = ViTModel.from_pretrained("google/vit-base-patch16-224-in21k").to(device)
68
+ vit.eval()
69
+
70
+ tokenizer = SimpleTokenizer.load("./")
71
+ pad_idx = tokenizer.word2idx["<PAD>"]
72
+
73
+ @torch.no_grad()
74
+ def generate_caption(img):
75
+ img_tensor = preprocess_image(img).unsqueeze(0).to(device)
76
+ img_feat = vit(pixel_values=img_tensor).pooler_output
77
+
78
+ beams = [([tokenizer.word2idx["<SOS>"]], 0.0)]
79
+ beam_size = 3
80
+
81
+ for _ in range(SEQ_LEN - 1):
82
+ candidates = []
83
+ for seq, score in beams:
84
+ inp = torch.tensor(seq + [pad_idx] * (SEQ_LEN - len(seq)), device=device).unsqueeze(0)
85
+ logits = decoder(img_feat, inp)
86
+ probs = torch.nn.functional.log_softmax(logits[0, len(seq)-1], dim=-1)
87
+ top_p, top_i = torch.topk(probs, beam_size)
88
+ for i in range(beam_size):
89
+ candidates.append((seq + [top_i[i].item()], score + top_p[i].item()))
90
+ beams = sorted(candidates, key=lambda x: x[1], reverse=True)[:beam_size]
91
+ if all(s[-1] == tokenizer.word2idx["<EOS>"] for s, _ in beams):
92
+ break
93
+
94
+ words = [tokenizer.idx2word.get(i, "<UNK>") for i in beams[0][0][1:] if i != pad_idx]
95
+ return " ".join(words)
96
+
97
+ with gr.Blocks() as demo:
98
+ gr.Markdown("# RADIOCAP13 β€” Image Captioning Demo")
99
+ img_in = gr.Image(type="pil", label="Upload an Image")
100
+ out = gr.Textbox(label="Generated Caption")
101
+ btn = gr.Button("Generate Caption")
102
+ btn.click(generate_caption, inputs=img_in, outputs=out)
103
+
104
+ if __name__ == "__main__":
105
+ demo.launch()