#!/usr/bin/env python # coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers import LlamaForCausalLM, CodeLlamaTokenizer from transformers import PipelineTool QA_PROMPT = """Here is an example of how I want my code to be: '''{text}'''. Can you generate code for this prompt: '{question}'""" class CodeGeneratingTool(PipelineTool): default_checkpoint = "codellama/CodeLlama-7b-Instruct-hf" description = ( "This is a tool that generates codes related to a prompt. It takes two arguments named `text`, which is a template on how the user wants their code to be generated, and `question`, which is the prompt of the code, and returns the code to the question." ) name = "text_qa" pre_processor_class = CodeLlamaTokenizer model_class = LlamaForCausalLM inputs = ["text", "text"] outputs = ["text"] def encode(self, text: str, question: str): prompt = QA_PROMPT.format(text=text, question=question) return self.pre_processor(prompt, return_tensors="pt") def forward(self, inputs): output_ids = self.model.generate(**inputs) in_b, _ = inputs["input_ids"].shape out_b = output_ids.shape[0] return output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:])[0][0] def decode(self, outputs): return self.pre_processor.decode(outputs, skip_special_tokens=True, clean_up_tokenization_spaces=True)