#env4hf from transformers import AutoTokenizer, AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("Xwin-LM/Xwin-LM-7B-V0.1") tokenizer = AutoTokenizer.from_pretrained("Xwin-LM/Xwin-LM-7B-V0.1") ( prompt := "A chat between a curious user and an artificial intelligence assistant. " "The assistant gives helpful, detailed, and polite answers to the user's questions. " "USER: Hello, can you help me? " "ASSISTANT:" ) inputs = tokenizer(prompt, return_tensors="pt") samples = model.generate(**inputs, max_new_tokens=4096, temperature=0.7) output = tokenizer.decode(samples[0][inputs["input_ids"].shape[1]:], skip_special_tokens=True) print(output) # Of course! I'm here to help. Please feel free to ask your question or describe the issue you're having, and I'll do my best to assist you.
我:a 星火:I'm sorry, I am an AI language model and I do not understand what you are trying to ask. Can you please provide more context or clarify your question?[{'role': 'user', 'content': 'a'}, {'role': 'assistant', 'content': "I'm sorry, I am an AI language model and I do not understand what you are trying to ask. Can you please provide more context or clarify your question?"}]
我:can you speak chinese 星火:是的,我可以说中文。有什么我可以帮你的吗?[{'role': 'user', 'content': 'a'}, {'role': 'assistant', 'content': "I'm sorry, I am an AI language model and I do not understand what you are trying to ask. Can you please provide more context or clarify your question?"}, {'role': 'user', 'content': 'can you speak chinese'}, {'role': 'assistant', 'content': '是的,我可以说中文。有什么我可以帮你的吗?'}]