File size: 6,971 Bytes
ba18ff2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 |
#!/usr/bin/env python3
"""
DeepSeek Terminal Chat Interface
A live terminal chat interface for your DeepSeek MCP server.
Use this for interactive conversations anytime!
"""
import asyncio
import sys
import os
import signal
from pathlib import Path
from datetime import datetime
# Add current directory to path for imports
sys.path.insert(0, str(Path(__file__).parent))
from mcp_interface import MCPLLMInterface
from config import config
class DeepSeekChat:
"""Interactive terminal chat with DeepSeek"""
def __init__(self):
self.interface = None
self.conversation_history = []
self.running = True
async def initialize(self):
"""Initialize the MCP interface and load model"""
print("DeepSeek Terminal Chat")
print("=" * 50)
print("Initializing...")
try:
# Create interface
self.interface = MCPLLMInterface(config.model_path)
# Load model
print("Loading DeepSeek model...")
await self.interface._load_model()
print(f"Model loaded successfully!")
print(f"GPU Layers: {config.n_gpu_layers}")
print(f"Context Size: {config.n_ctx}")
print("=" * 50)
print("Chat started! Type your message and press Enter.")
print("Commands:")
print(" /help - Show help")
print(" /clear - Clear conversation history")
print(" /config - Show current configuration")
print(" /quit - Exit chat")
print("=" * 50)
return True
except Exception as e:
print(f"Error initializing: {e}")
return False
async def chat_loop(self):
"""Main chat loop"""
while self.running:
try:
# Get user input
user_input = input("\nYou: ").strip()
if not user_input:
continue
# Handle commands
if user_input.startswith('/'):
await self.handle_command(user_input)
continue
# Add to conversation history
self.conversation_history.append(f"User: {user_input}")
# Show thinking indicator
print("DeepSeek: Thinking...", end="", flush=True)
# Get response from model
chat_args = {
"message": user_input,
"max_tokens": config.default_max_tokens,
"temperature": config.default_temperature
}
response = await self.interface._handle_chat(chat_args)
response_text = response[0].text.strip()
# Clear thinking indicator and show response
print(f"\rDeepSeek: {response_text}")
# Add to conversation history
self.conversation_history.append(f"DeepSeek: {response_text}")
# Keep conversation history manageable
if len(self.conversation_history) > 20:
self.conversation_history = self.conversation_history[-20:]
except KeyboardInterrupt:
print("\n\nChat interrupted. Type /quit to exit or continue chatting.")
continue
except Exception as e:
print(f"\nError: {e}")
continue
async def handle_command(self, command):
"""Handle chat commands"""
cmd = command.lower().strip()
if cmd == '/help':
print("\nAvailable Commands:")
print(" /help - Show this help message")
print(" /clear - Clear conversation history")
print(" /config - Show current configuration")
print(" /history - Show conversation history")
print(" /temp <value> - Set temperature (0.1-2.0)")
print(" /tokens <value> - Set max tokens (50-2048)")
print(" /quit - Exit chat")
elif cmd == '/clear':
self.conversation_history.clear()
print("Conversation history cleared.")
elif cmd == '/config':
print(f"\nCurrent Configuration:")
print(f" Model: {Path(config.model_path).name}")
print(f" GPU Layers: {config.n_gpu_layers}")
print(f" Context Size: {config.n_ctx}")
print(f" Max Tokens: {config.default_max_tokens}")
print(f" Temperature: {config.default_temperature}")
print(f" Top-p: {config.default_top_p}")
elif cmd == '/history':
if self.conversation_history:
print("\nConversation History:")
for msg in self.conversation_history[-10:]: # Show last 10 messages
print(f" {msg}")
else:
print("No conversation history.")
elif cmd.startswith('/temp '):
try:
temp = float(cmd.split()[1])
if 0.1 <= temp <= 2.0:
config.default_temperature = temp
print(f"Temperature set to {temp}")
else:
print("Temperature must be between 0.1 and 2.0")
except:
print("Invalid temperature value. Use: /temp 0.7")
elif cmd.startswith('/tokens '):
try:
tokens = int(cmd.split()[1])
if 50 <= tokens <= 2048:
config.default_max_tokens = tokens
print(f"Max tokens set to {tokens}")
else:
print("Max tokens must be between 50 and 2048")
except:
print("Invalid token value. Use: /tokens 512")
elif cmd == '/quit':
print("Goodbye!")
self.running = False
else:
print(f"Unknown command: {command}")
print("Type /help for available commands.")
async def main():
"""Main function"""
chat = DeepSeekChat()
# Handle Ctrl+C gracefully
def signal_handler(signum, frame):
print("\n\nExiting chat...")
chat.running = False
signal.signal(signal.SIGINT, signal_handler)
# Initialize and start chat
if await chat.initialize():
await chat.chat_loop()
else:
print("Failed to initialize chat.")
sys.exit(1)
if __name__ == "__main__":
try:
asyncio.run(main())
except KeyboardInterrupt:
print("\nChat terminated.")
except Exception as e:
print(f"Error: {e}")
sys.exit(1)
|