Text Generation
Transformers
Safetensors
llama
datadreamer
datadreamer-0.28.0
Synthetic
text-generation-inference
Instructions to use dagger-realms/dagger_to_narrative with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- Transformers
How to use dagger-realms/dagger_to_narrative with Transformers:
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="dagger-realms/dagger_to_narrative")# Load model directly from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("dagger-realms/dagger_to_narrative") model = AutoModelForCausalLM.from_pretrained("dagger-realms/dagger_to_narrative") - Notebooks
- Google Colab
- Kaggle
- Local Apps
- vLLM
How to use dagger-realms/dagger_to_narrative with vLLM:
Install from pip and serve model
# Install vLLM from pip: pip install vllm # Start the vLLM server: vllm serve "dagger-realms/dagger_to_narrative" # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:8000/v1/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "dagger-realms/dagger_to_narrative", "prompt": "Once upon a time,", "max_tokens": 512, "temperature": 0.5 }'Use Docker
docker model run hf.co/dagger-realms/dagger_to_narrative
- SGLang
How to use dagger-realms/dagger_to_narrative with SGLang:
Install from pip and serve model
# Install SGLang from pip: pip install sglang # Start the SGLang server: python3 -m sglang.launch_server \ --model-path "dagger-realms/dagger_to_narrative" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "dagger-realms/dagger_to_narrative", "prompt": "Once upon a time,", "max_tokens": 512, "temperature": 0.5 }'Use Docker images
docker run --gpus all \ --shm-size 32g \ -p 30000:30000 \ -v ~/.cache/huggingface:/root/.cache/huggingface \ --env "HF_TOKEN=<secret>" \ --ipc=host \ lmsysorg/sglang:latest \ python3 -m sglang.launch_server \ --model-path "dagger-realms/dagger_to_narrative" \ --host 0.0.0.0 \ --port 30000 # Call the server using curl (OpenAI-compatible API): curl -X POST "http://localhost:30000/v1/completions" \ -H "Content-Type: application/json" \ --data '{ "model": "dagger-realms/dagger_to_narrative", "prompt": "Once upon a time,", "max_tokens": 512, "temperature": 0.5 }' - Docker Model Runner
How to use dagger-realms/dagger_to_narrative with Docker Model Runner:
docker model run hf.co/dagger-realms/dagger_to_narrative
| { | |
| "output_dir": "./.datadreamer/train-a-dagger-json--fictional-story-model/_checkpoints", | |
| "overwrite_output_dir": false, | |
| "do_train": false, | |
| "do_eval": true, | |
| "do_predict": false, | |
| "evaluation_strategy": "epoch", | |
| "prediction_loss_only": false, | |
| "per_device_train_batch_size": 1, | |
| "per_device_eval_batch_size": 1, | |
| "per_gpu_train_batch_size": null, | |
| "per_gpu_eval_batch_size": null, | |
| "gradient_accumulation_steps": 32, | |
| "eval_accumulation_steps": null, | |
| "eval_delay": 0, | |
| "learning_rate": 0.001, | |
| "weight_decay": 0.01, | |
| "adam_beta1": 0.9, | |
| "adam_beta2": 0.999, | |
| "adam_epsilon": 1e-08, | |
| "max_grad_norm": 1.0, | |
| "num_train_epochs": 500, | |
| "max_steps": -1, | |
| "lr_scheduler_type": "linear", | |
| "lr_scheduler_kwargs": {}, | |
| "warmup_ratio": 0.0, | |
| "warmup_steps": 0, | |
| "log_level": "passive", | |
| "log_level_replica": "warning", | |
| "log_on_each_node": true, | |
| "logging_dir": "./.datadreamer/train-a-dagger-json--fictional-story-model/_checkpoints/runs/Apr07_00-04-53_nlpgpu07.seas.upenn.edu", | |
| "logging_strategy": "steps", | |
| "logging_first_step": false, | |
| "logging_steps": 1, | |
| "logging_nan_inf_filter": true, | |
| "save_strategy": "epoch", | |
| "save_steps": 500, | |
| "save_total_limit": 1, | |
| "save_safetensors": true, | |
| "save_on_each_node": false, | |
| "save_only_model": false, | |
| "no_cuda": false, | |
| "use_cpu": false, | |
| "use_mps_device": false, | |
| "seed": 42, | |
| "data_seed": null, | |
| "jit_mode_eval": false, | |
| "use_ipex": false, | |
| "bf16": false, | |
| "fp16": false, | |
| "fp16_opt_level": "O1", | |
| "half_precision_backend": "auto", | |
| "bf16_full_eval": false, | |
| "fp16_full_eval": false, | |
| "tf32": null, | |
| "local_rank": 0, | |
| "ddp_backend": null, | |
| "tpu_num_cores": null, | |
| "tpu_metrics_debug": false, | |
| "debug": [], | |
| "dataloader_drop_last": false, | |
| "eval_steps": null, | |
| "dataloader_num_workers": 0, | |
| "dataloader_prefetch_factor": null, | |
| "past_index": -1, | |
| "run_name": "DataDreamer - Train a DAGGER JSON => Fictional Story Model", | |
| "disable_tqdm": true, | |
| "remove_unused_columns": true, | |
| "label_names": null, | |
| "load_best_model_at_end": true, | |
| "metric_for_best_model": "eval_perplexity", | |
| "greater_is_better": false, | |
| "ignore_data_skip": false, | |
| "fsdp": [], | |
| "fsdp_min_num_params": 0, | |
| "fsdp_config": { | |
| "min_num_params": 0, | |
| "xla": false, | |
| "xla_fsdp_v2": false, | |
| "xla_fsdp_grad_ckpt": false | |
| }, | |
| "fsdp_transformer_layer_cls_to_wrap": null, | |
| "accelerator_config": { | |
| "split_batches": false, | |
| "dispatch_batches": null, | |
| "even_batches": true, | |
| "use_seedable_sampler": true | |
| }, | |
| "deepspeed": null, | |
| "label_smoothing_factor": 0.0, | |
| "optim": "adamw_torch", | |
| "optim_args": null, | |
| "adafactor": false, | |
| "group_by_length": false, | |
| "length_column_name": "length", | |
| "report_to": [], | |
| "ddp_find_unused_parameters": null, | |
| "ddp_bucket_cap_mb": null, | |
| "ddp_broadcast_buffers": null, | |
| "dataloader_pin_memory": true, | |
| "dataloader_persistent_workers": false, | |
| "skip_memory_metrics": true, | |
| "use_legacy_prediction_loop": false, | |
| "push_to_hub": false, | |
| "resume_from_checkpoint": null, | |
| "hub_model_id": null, | |
| "hub_strategy": "every_save", | |
| "hub_token": "<HUB_TOKEN>", | |
| "hub_private_repo": false, | |
| "hub_always_push": false, | |
| "gradient_checkpointing": false, | |
| "gradient_checkpointing_kwargs": null, | |
| "include_inputs_for_metrics": false, | |
| "fp16_backend": "auto", | |
| "push_to_hub_model_id": null, | |
| "push_to_hub_organization": null, | |
| "push_to_hub_token": "<PUSH_TO_HUB_TOKEN>", | |
| "mp_parameters": "", | |
| "auto_find_batch_size": false, | |
| "full_determinism": false, | |
| "torchdynamo": null, | |
| "ray_scope": "last", | |
| "ddp_timeout": 1800, | |
| "torch_compile": false, | |
| "torch_compile_backend": null, | |
| "torch_compile_mode": null, | |
| "dispatch_batches": null, | |
| "split_batches": null, | |
| "include_tokens_per_second": false, | |
| "include_num_input_tokens_seen": false, | |
| "neftune_noise_alpha": null, | |
| "optim_target_modules": null | |
| } |