File size: 3,297 Bytes
ba18ff2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#!/usr/bin/env python3
"""
DeepSeek MCP Server Startup Script

This script provides an easy way to start the MCP server with proper configuration
and error handling.
"""

import os
import sys
import argparse
import logging
from pathlib import Path

def setup_logging(verbose: bool = False):
    """Setup logging configuration"""
    level = logging.DEBUG if verbose else logging.INFO
    logging.basicConfig(
        level=level,
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        handlers=[
            logging.StreamHandler(sys.stderr),
            logging.FileHandler('mcp_server.log')
        ]
    )

def check_model_file(model_path: Path) -> bool:
    """Check if the model file exists and is accessible"""
    if not model_path.exists():
        logging.error(f"Model file not found: {model_path}")
        logging.error("Please ensure the DeepSeek model file is placed in the models/ directory")
        return False
    
    if not model_path.is_file():
        logging.error(f"Model path is not a file: {model_path}")
        return False
    
    # Check file size (should be several GB for the model)
    size_gb = model_path.stat().st_size / (1024**3)
    if size_gb < 1:
        logging.warning(f"Model file seems small ({size_gb:.2f} GB). This might not be a valid model file.")
    else:
        logging.info(f"Model file found: {model_path} ({size_gb:.2f} GB)")
    
    return True

def main():
    """Main startup function"""
    parser = argparse.ArgumentParser(description="Start DeepSeek MCP Server")
    parser.add_argument(
        "--model-path", 
        type=str, 
        help="Path to the model file (default: ./models/deepseek-llm-7b-chat-Q6_K.gguf)"
    )
    parser.add_argument(
        "--verbose", 
        action="store_true", 
        help="Enable verbose logging"
    )
    parser.add_argument(
        "--gpu-layers", 
        type=int, 
        default=35, 
        help="Number of GPU layers to use (default: 35)"
    )
    
    args = parser.parse_args()
    
    # Setup logging
    setup_logging(args.verbose)
    logger = logging.getLogger(__name__)
    
    # Determine model path
    if args.model_path:
        model_path = Path(args.model_path)
    else:
        model_path = Path(__file__).parent / "models" / "deepseek-llm-7b-chat-Q6_K.gguf"
    
    # Check model file
    if not check_model_file(model_path):
        sys.exit(1)
    
    # Set environment variable for GPU layers
    os.environ['MCP_GPU_LAYERS'] = str(args.gpu_layers)
    
    logger.info("Starting DeepSeek MCP Server...")
    logger.info(f"Model path: {model_path}")
    logger.info(f"GPU layers: {args.gpu_layers}")
    
    # Import and start the server
    try:
        from mcp_interface import MCPLLMInterface
        import asyncio
        
        interface = MCPLLMInterface(str(model_path))
        asyncio.run(interface.run())
        
    except KeyboardInterrupt:
        logger.info("Server stopped by user")
    except ImportError as e:
        logger.error(f"Import error: {e}")
        logger.error("Please install required dependencies: pip install -r requirements.txt")
        sys.exit(1)
    except Exception as e:
        logger.error(f"Error starting server: {e}")
        sys.exit(1)

if __name__ == "__main__":
    main()