Skip to content
Snippets Groups Projects
Select Git revision
  • 06ec20da77b07351fb0b33c1fd5a1985cab65c08
  • main default protected
  • dev_yhe_citymodel
  • detached
  • dev_jbr_mkr_updating_pandas
  • dev_V2X_jfu
  • dev_jbr_pareto
  • dev_jgn_debug
  • dev_jou_cme
  • dev_jfu_V2X
  • dev_msc_rolinghorizon_prediction
  • dev_dph_jkr
  • dev_jou_fsa_extract_data_quarter
  • dev_yni_network
  • dev_jou_cma_arbitrage
  • dev_jbr_readme
  • dev_jou_fsa
  • dev_demand_yni
18 results

input_profile_processor.py

Blame
  • basic.py 955 B
    # SPDX-License-Identifier: Apache-2.0
    
    from vllm import LLM, SamplingParams
    
    # Sample prompts.
    prompts = [
        "Hello, my name is",
        "The president of the United States is",
        "The capital of France is",
        "The future of AI is",
    ]
    # Create a sampling params object.
    sampling_params = SamplingParams(temperature=0.8, top_p=0.95)
    
    
    def main():
        # Create an LLM.
        llm = LLM(model="facebook/opt-125m")
        # Generate texts from the prompts.
        # The output is a list of RequestOutput objects
        # that contain the prompt, generated text, and other information.
        outputs = llm.generate(prompts, sampling_params)
        # Print the outputs.
        print("\nGenerated Outputs:\n" + "-" * 60)
        for output in outputs:
            prompt = output.prompt
            generated_text = output.outputs[0].text
            print(f"Prompt:    {prompt!r}")
            print(f"Output:    {generated_text!r}")
            print("-" * 60)
    
    
    if __name__ == "__main__":
        main()