File size: 10,295 Bytes
eabe53d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
import sys
import os
import ccxt
import pandas as pd
import numpy as np
from datetime import datetime
import ta
import argparse
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pickle
import warnings

# Suppress warnings
warnings.filterwarnings('ignore')

# Configuration
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.expand_frame_repr', True)

class MLTechnicalScanner:
    def __init__(self, training_mode=False):
        self.training_mode = training_mode
        self.model = None
        self.model_file = "technical_ml_model.pkl"
        self.training_data_file = "training_data.csv"
        self.min_training_samples = 100
        self.load_ml_model()
        
        # Initialize exchanges
        self.exchanges = {}
        for id in ccxt.exchanges:
            exchange = getattr(ccxt, id)
            self.exchanges[id] = exchange()
        
        # ML features configuration
        self.feature_columns = [
            'rsi', 'macd', 'bollinger_upper', 'bollinger_lower', 
            'volume_ma', 'ema_20', 'ema_50', 'adx'
        ]
        
        # Performance tracking
        self.performance_history = pd.DataFrame(columns=[
            'timestamp', 'symbol', 'prediction', 'actual', 'profit'
        ])
        
        # Training data collection
        self.training_data = pd.DataFrame(columns=self.feature_columns + ['target'])
        
    def load_ml_model(self):
        """Load trained ML model if exists"""
        if os.path.exists(self.model_file):
            with open(self.model_file, 'rb') as f:
                self.model = pickle.load(f)
            print("Loaded trained model from file")
        else:
            print("Initializing new model")
            self.model = RandomForestClassifier(n_estimators=100, random_state=42)
    
    def save_ml_model(self):
        """Save trained ML model"""
        with open(self.model_file, 'wb') as f:
            pickle.dump(self.model, f)
        print("Saved model to file")
    
    def load_training_data(self):
        """Load existing training data if available"""
        if os.path.exists(self.training_data_file):
            self.training_data = pd.read_csv(self.training_data_file)
            print(f"Loaded {len(self.training_data)} training samples")
    
    def save_training_data(self):
        """Save training data to file"""
        self.training_data.to_csv(self.training_data_file, index=False)
        print(f"Saved {len(self.training_data)} training samples")
    
    def calculate_features(self, df):
        """Calculate technical indicators"""
        try:
            close = df['close'].astype(float)
            high = df['high'].astype(float)
            low = df['low'].astype(float)
            volume = df['volume'].astype(float)
            
            # Momentum Indicators
            df['rsi'] = ta.momentum.rsi(close, window=14)
            df['macd'] = ta.trend.macd_diff(close)
            
            # Volatility Indicators
            bollinger = ta.volatility.BollingerBands(close)
            df['bollinger_upper'] = bollinger.bollinger_hband()
            df['bollinger_lower'] = bollinger.bollinger_lband()
            
            # Volume Indicators
            df['volume_ma'] = volume.rolling(window=20).mean()
            
            # Trend Indicators
            df['ema_20'] = ta.trend.ema_indicator(close, window=20)
            df['ema_50'] = ta.trend.ema_indicator(close, window=50)
            df['adx'] = ta.trend.adx(high, low, close, window=14)
            
            return df
        except Exception as e:
            print(f"Error calculating features: {str(e)}")
            return None
    
    def train_initial_model(self):
        """Train initial model if we have enough data"""
        self.load_training_data()
        
        if len(self.training_data) >= self.min_training_samples:
            X = self.training_data[self.feature_columns]
            y = self.training_data['target']
            
            X_train, X_test, y_train, y_test = train_test_split(
                X, y, test_size=0.2, random_state=42
            )
            
            self.model.fit(X_train, y_train)
            
            # Evaluate model
            preds = self.model.predict(X_test)
            accuracy = accuracy_score(y_test, preds)
            print(f"Initial model trained with accuracy: {accuracy:.2f}")
            
            self.save_ml_model()
            return True
        else:
            print(f"Not enough training data ({len(self.training_data)} samples). Need at least {self.min_training_samples}.")
            return False
    
    def predict_direction(self, features):
        """Predict price direction using ML model"""
        try:
            if self.model is None or not hasattr(self.model, 'classes_'):
                return 0  # Neutral if no model
            
            features = features[self.feature_columns].values.reshape(1, -1)
            return self.model.predict(features)[0]
        except Exception as e:
            print(f"Prediction error: {str(e)}")
            return 0
    
    def collect_training_sample(self, symbol, exchange, timeframe='1h'):
        """Collect data sample for training"""
        try:
            ohlcv = exchange.fetch_ohlcv(symbol, timeframe, limit=100)
            if len(ohlcv) < 50:
                return
                
            df = pd.DataFrame(ohlcv, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
            df = self.calculate_features(df)
            if df is None:
                return
                
            current_price = df['close'].iloc[-1]
            future_price = df['close'].iloc[-1]  # Should be forward-looking in production
            
            price_change = future_price - current_price
            target = 1 if price_change > 0 else (-1 if price_change < 0 else 0)
            
            features = df.iloc[-2].copy()
            features['target'] = target
            
            new_row = pd.DataFrame([features])
            self.training_data = pd.concat([self.training_data, new_row], ignore_index=True)
            print(f"Collected training sample for {symbol}")
            
            if len(self.training_data) % 10 == 0:
                self.save_training_data()
                
        except Exception as e:
            print(f"Error collecting training sample: {str(e)}")
    
    def scan_symbol(self, symbol, exchange, timeframes):
        """Scan symbol for trading opportunities"""
        try:
            primary_tf = timeframes[0]
            ohlcv = exchange.fetch_ohlcv(symbol, primary_tf, limit=100)
            if len(ohlcv) < 50:
                return
                
            df = pd.DataFrame(ohlcv, columns=['timestamp', 'open', 'high', 'low', 'close', 'volume'])
            df = self.calculate_features(df)
            if df is None:
                return
            
            latest = df.iloc[-1].copy()
            features = pd.DataFrame([latest[self.feature_columns]])
            
            if self.training_mode:
                self.collect_training_sample(symbol, exchange, primary_tf)
                return
                
            prediction = self.predict_direction(features)
            
            # Simplified trend detection using EMA crossover
            ema_20 = df['ema_20'].iloc[-1]
            ema_50 = df['ema_50'].iloc[-1]
            price = df['close'].iloc[-1]
            
            uptrend = (ema_20 > ema_50) and (price > ema_20)
            downtrend = (ema_20 < ema_50) and (price < ema_20)
            
            if uptrend and prediction == 1:
                self.alert(symbol, "STRONG UPTREND", timeframes)
            elif downtrend and prediction == -1:
                self.alert(symbol, "STRONG DOWNTREND", timeframes)
            elif uptrend:
                self.alert(symbol, "UPTREND", timeframes)
            elif downtrend:
                self.alert(symbol, "DOWNTREND", timeframes)
                
        except Exception as e:
            print(f"Error scanning {symbol}: {str(e)}")
    
    def alert(self, symbol, trend_type, timeframes):
        """Generate alert for detected trend"""
        message = f"({trend_type}) detected for {symbol} on {timeframes} at {datetime.now()}"
        print(message)

# Main execution
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("-e", "--exchange", help="Exchange name", required=True)
    parser.add_argument("-f", "--filter", help="Asset filter", required=True)
    parser.add_argument("-tf", "--timeframes", help="Timeframes to scan (comma separated)", required=True)
    parser.add_argument("--train", help="Run in training mode", action="store_true")
    args = parser.parse_args()
    
    scanner = MLTechnicalScanner(training_mode=args.train)
    
    exchange = scanner.exchanges.get(args.exchange.lower())
    if not exchange:
        print(f"Exchange {args.exchange} not supported")
        sys.exit(1)
    
    try:
        markets = exchange.fetch_markets()
    except Exception as e:
        print(f"Error fetching markets: {str(e)}")
        sys.exit(1)
    
    symbols = [
        m['id'] for m in markets 
        if m['active'] and args.filter in m['id']
    ]
    
    if not symbols:
        print(f"No symbols found matching filter {args.filter}")
        sys.exit(1)
    
    if args.train:
        print(f"Running in training mode for {len(symbols)} symbols")
        for symbol in symbols:
            scanner.collect_training_sample(symbol, exchange)
        
        if scanner.train_initial_model():
            print("Training completed successfully")
        else:
            print("Not enough data collected for training")
        sys.exit(0)
    
    if not hasattr(scanner.model, 'classes_'):
        print("Warning: No trained model available. Running with basic scanning only.")
    
    timeframes = args.timeframes.split(',')
    print(f"Scanning {len(symbols)} symbols on timeframes {timeframes}")
    
    for symbol in symbols:
        scanner.scan_symbol(symbol, exchange, timeframes)