|
|
|
""" |
|
DeepSeek Terminal Chat Interface |
|
|
|
A live terminal chat interface for your DeepSeek MCP server. |
|
Use this for interactive conversations anytime! |
|
""" |
|
|
|
import asyncio |
|
import sys |
|
import os |
|
import signal |
|
from pathlib import Path |
|
from datetime import datetime |
|
|
|
|
|
sys.path.insert(0, str(Path(__file__).parent)) |
|
|
|
from mcp_interface import MCPLLMInterface |
|
from config import config |
|
|
|
class DeepSeekChat: |
|
"""Interactive terminal chat with DeepSeek""" |
|
|
|
def __init__(self): |
|
self.interface = None |
|
self.conversation_history = [] |
|
self.running = True |
|
|
|
async def initialize(self): |
|
"""Initialize the MCP interface and load model""" |
|
print("DeepSeek Terminal Chat") |
|
print("=" * 50) |
|
print("Initializing...") |
|
|
|
try: |
|
|
|
self.interface = MCPLLMInterface(config.model_path) |
|
|
|
|
|
print("Loading DeepSeek model...") |
|
await self.interface._load_model() |
|
|
|
print(f"Model loaded successfully!") |
|
print(f"GPU Layers: {config.n_gpu_layers}") |
|
print(f"Context Size: {config.n_ctx}") |
|
print("=" * 50) |
|
print("Chat started! Type your message and press Enter.") |
|
print("Commands:") |
|
print(" /help - Show help") |
|
print(" /clear - Clear conversation history") |
|
print(" /config - Show current configuration") |
|
print(" /quit - Exit chat") |
|
print("=" * 50) |
|
|
|
return True |
|
|
|
except Exception as e: |
|
print(f"Error initializing: {e}") |
|
return False |
|
|
|
async def chat_loop(self): |
|
"""Main chat loop""" |
|
while self.running: |
|
try: |
|
|
|
user_input = input("\nYou: ").strip() |
|
|
|
if not user_input: |
|
continue |
|
|
|
|
|
if user_input.startswith('/'): |
|
await self.handle_command(user_input) |
|
continue |
|
|
|
|
|
self.conversation_history.append(f"User: {user_input}") |
|
|
|
|
|
print("DeepSeek: Thinking...", end="", flush=True) |
|
|
|
|
|
chat_args = { |
|
"message": user_input, |
|
"max_tokens": config.default_max_tokens, |
|
"temperature": config.default_temperature |
|
} |
|
|
|
response = await self.interface._handle_chat(chat_args) |
|
response_text = response[0].text.strip() |
|
|
|
|
|
print(f"\rDeepSeek: {response_text}") |
|
|
|
|
|
self.conversation_history.append(f"DeepSeek: {response_text}") |
|
|
|
|
|
if len(self.conversation_history) > 20: |
|
self.conversation_history = self.conversation_history[-20:] |
|
|
|
except KeyboardInterrupt: |
|
print("\n\nChat interrupted. Type /quit to exit or continue chatting.") |
|
continue |
|
except Exception as e: |
|
print(f"\nError: {e}") |
|
continue |
|
|
|
async def handle_command(self, command): |
|
"""Handle chat commands""" |
|
cmd = command.lower().strip() |
|
|
|
if cmd == '/help': |
|
print("\nAvailable Commands:") |
|
print(" /help - Show this help message") |
|
print(" /clear - Clear conversation history") |
|
print(" /config - Show current configuration") |
|
print(" /history - Show conversation history") |
|
print(" /temp <value> - Set temperature (0.1-2.0)") |
|
print(" /tokens <value> - Set max tokens (50-2048)") |
|
print(" /quit - Exit chat") |
|
|
|
elif cmd == '/clear': |
|
self.conversation_history.clear() |
|
print("Conversation history cleared.") |
|
|
|
elif cmd == '/config': |
|
print(f"\nCurrent Configuration:") |
|
print(f" Model: {Path(config.model_path).name}") |
|
print(f" GPU Layers: {config.n_gpu_layers}") |
|
print(f" Context Size: {config.n_ctx}") |
|
print(f" Max Tokens: {config.default_max_tokens}") |
|
print(f" Temperature: {config.default_temperature}") |
|
print(f" Top-p: {config.default_top_p}") |
|
|
|
elif cmd == '/history': |
|
if self.conversation_history: |
|
print("\nConversation History:") |
|
for msg in self.conversation_history[-10:]: |
|
print(f" {msg}") |
|
else: |
|
print("No conversation history.") |
|
|
|
elif cmd.startswith('/temp '): |
|
try: |
|
temp = float(cmd.split()[1]) |
|
if 0.1 <= temp <= 2.0: |
|
config.default_temperature = temp |
|
print(f"Temperature set to {temp}") |
|
else: |
|
print("Temperature must be between 0.1 and 2.0") |
|
except: |
|
print("Invalid temperature value. Use: /temp 0.7") |
|
|
|
elif cmd.startswith('/tokens '): |
|
try: |
|
tokens = int(cmd.split()[1]) |
|
if 50 <= tokens <= 2048: |
|
config.default_max_tokens = tokens |
|
print(f"Max tokens set to {tokens}") |
|
else: |
|
print("Max tokens must be between 50 and 2048") |
|
except: |
|
print("Invalid token value. Use: /tokens 512") |
|
|
|
elif cmd == '/quit': |
|
print("Goodbye!") |
|
self.running = False |
|
|
|
else: |
|
print(f"Unknown command: {command}") |
|
print("Type /help for available commands.") |
|
|
|
async def main(): |
|
"""Main function""" |
|
chat = DeepSeekChat() |
|
|
|
|
|
def signal_handler(signum, frame): |
|
print("\n\nExiting chat...") |
|
chat.running = False |
|
|
|
signal.signal(signal.SIGINT, signal_handler) |
|
|
|
|
|
if await chat.initialize(): |
|
await chat.chat_loop() |
|
else: |
|
print("Failed to initialize chat.") |
|
sys.exit(1) |
|
|
|
if __name__ == "__main__": |
|
try: |
|
asyncio.run(main()) |
|
except KeyboardInterrupt: |
|
print("\nChat terminated.") |
|
except Exception as e: |
|
print(f"Error: {e}") |
|
sys.exit(1) |
|
|