improvement of ai model
This commit is contained in:
parent
cc60f570bd
commit
85f5dbe075
@ -317,7 +317,7 @@ async def process_symbol(ticker, con, start_date, end_date):
|
|||||||
split_size = int(len(df) * (1-test_size))
|
split_size = int(len(df) * (1-test_size))
|
||||||
test_data = df.iloc[split_size:]
|
test_data = df.iloc[split_size:]
|
||||||
#selected_features = [col for col in df.columns if col not in ['date','price','Target']]
|
#selected_features = [col for col in df.columns if col not in ['date','price','Target']]
|
||||||
best_features = ['freeCashFlowYield', 'cci', 'daily_return', 'cashAndCashEquivalents_to_cashAndShortTermInvestments', 'longTermDebt_to_totalLiabilitiesAndStockholdersEquity', 'longTermDebt_to_totalAssets', 'totalStockholdersEquity_to_totalLiabilitiesAndStockholdersEquity', 'totalStockholdersEquity_to_totalAssets']
|
best_features = ['williams', 'stoch', 'fdi', 'revenue_to_cashAndCashEquivalents', 'revenue_to_cashAndShortTermInvestments', 'costOfRevenue_to_cashAndCashEquivalents', 'costOfRevenue_to_cashAndShortTermInvestments', 'ebitda_to_cashAndShortTermInvestments', 'incomeTaxExpense_to_cashAndCashEquivalents', 'incomeTaxExpense_to_cashAndShortTermInvestments', 'capitalExpenditure_to_cashAndCashEquivalents', 'capitalExpenditure_to_cashAndShortTermInvestments', 'totalCurrentLiabilities_to_cashAndShortTermInvestments', 'netDebt_to_cashAndShortTermInvestments', 'inventory_to_cashAndShortTermInvestments']
|
||||||
print(f"For the Ticker: {ticker}")
|
print(f"For the Ticker: {ticker}")
|
||||||
data = predictor.evaluate_model(test_data[best_features], test_data['Target'])
|
data = predictor.evaluate_model(test_data[best_features], test_data['Target'])
|
||||||
|
|
||||||
@ -391,7 +391,7 @@ async def train_process(tickers, con):
|
|||||||
predictor = ScorePredictor()
|
predictor = ScorePredictor()
|
||||||
#print(selected_features)
|
#print(selected_features)
|
||||||
selected_features = [col for col in df_train if col not in ['price','date','Target']]
|
selected_features = [col for col in df_train if col not in ['price','date','Target']]
|
||||||
best_features = predictor.feature_selection(df_train[selected_features], df_train['Target'],k=8)
|
best_features = predictor.feature_selection(df_train[selected_features], df_train['Target'],k=15)
|
||||||
print(best_features)
|
print(best_features)
|
||||||
predictor.train_model(df_train[best_features], df_train['Target'])
|
predictor.train_model(df_train[best_features], df_train['Target'])
|
||||||
predictor.evaluate_model(df_test[best_features], df_test['Target'])
|
predictor.evaluate_model(df_test[best_features], df_test['Target'])
|
||||||
@ -406,7 +406,7 @@ async def run():
|
|||||||
|
|
||||||
if train_mode:
|
if train_mode:
|
||||||
#Train first model
|
#Train first model
|
||||||
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE marketCap >= 50E9 AND symbol NOT LIKE '%.%'")
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE marketCap >= 500E9 AND symbol NOT LIKE '%.%'")
|
||||||
stock_symbols = [row[0] for row in cursor.fetchall()]
|
stock_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
print('Number of Stocks')
|
print('Number of Stocks')
|
||||||
print(len(stock_symbols))
|
print(len(stock_symbols))
|
||||||
|
|||||||
Binary file not shown.
@ -38,24 +38,24 @@ class ScorePredictor:
|
|||||||
clear_session()
|
clear_session()
|
||||||
|
|
||||||
# Input layer
|
# Input layer
|
||||||
inputs = Input(shape=(8,))
|
inputs = Input(shape=(15,))
|
||||||
|
|
||||||
# First dense layer
|
# First dense layer
|
||||||
x = Dense(512, activation='relu')(inputs)
|
x = Dense(64, activation='leaky_relu')(inputs)
|
||||||
x = Dropout(0.5)(x)
|
x = Dropout(0.3)(x)
|
||||||
x = BatchNormalization()(x)
|
x = BatchNormalization()(x)
|
||||||
|
|
||||||
# Additional dense layers
|
# Additional dense layers
|
||||||
for units in [256,128]:
|
for units in [64,32]:
|
||||||
x = Dense(units, activation='relu')(x)
|
x = Dense(units, activation='leaky_relu')(x)
|
||||||
x = Dropout(0.5)(x)
|
x = Dropout(0.3)(x)
|
||||||
x = BatchNormalization()(x)
|
x = BatchNormalization()(x)
|
||||||
|
|
||||||
# Reshape for attention mechanism
|
# Reshape for attention mechanism
|
||||||
x = Reshape((128, 1))(x)
|
x = Reshape((32, 1))(x)
|
||||||
|
|
||||||
# Attention mechanism
|
# Attention mechanism
|
||||||
attention = Dense(128, activation='relu')(x)
|
attention = Dense(32, activation='leaky_relu')(x)
|
||||||
attention = Dense(1, activation='softmax')(attention)
|
attention = Dense(1, activation='softmax')(attention)
|
||||||
|
|
||||||
# Apply attention
|
# Apply attention
|
||||||
@ -71,7 +71,7 @@ class ScorePredictor:
|
|||||||
model = Model(inputs=inputs, outputs=outputs)
|
model = Model(inputs=inputs, outputs=outputs)
|
||||||
|
|
||||||
# Optimizer with a lower learning rate
|
# Optimizer with a lower learning rate
|
||||||
optimizer = Adam(learning_rate=0.001, clipnorm=1.0)
|
optimizer = Adam(learning_rate=0.01, clipnorm=1.0)
|
||||||
|
|
||||||
# Compile the model
|
# Compile the model
|
||||||
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
|
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
|
||||||
@ -95,8 +95,8 @@ class ScorePredictor:
|
|||||||
checkpoint = ModelCheckpoint('ml_models/weights/ai-score/weights.keras',
|
checkpoint = ModelCheckpoint('ml_models/weights/ai-score/weights.keras',
|
||||||
save_best_only=True, save_freq = 1,
|
save_best_only=True, save_freq = 1,
|
||||||
monitor='val_loss', mode='min')
|
monitor='val_loss', mode='min')
|
||||||
early_stopping = EarlyStopping(monitor='val_loss', patience=50, restore_best_weights=True)
|
early_stopping = EarlyStopping(monitor='val_loss', patience=20, restore_best_weights=True)
|
||||||
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=30, min_lr=0.001)
|
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=10, min_lr=0.001)
|
||||||
|
|
||||||
self.model.fit(X_train, y_train, epochs=100_000, batch_size=32,
|
self.model.fit(X_train, y_train, epochs=100_000, batch_size=32,
|
||||||
validation_split=0.1, callbacks=[checkpoint, early_stopping, reduce_lr])
|
validation_split=0.1, callbacks=[checkpoint, early_stopping, reduce_lr])
|
||||||
@ -112,7 +112,7 @@ class ScorePredictor:
|
|||||||
|
|
||||||
# Get the model's predictions
|
# Get the model's predictions
|
||||||
test_predictions = self.model.predict(X_test)
|
test_predictions = self.model.predict(X_test)
|
||||||
#print(test_predictions)
|
print(test_predictions)
|
||||||
|
|
||||||
# Extract the probabilities for class 1 (index 1 in the softmax output)
|
# Extract the probabilities for class 1 (index 1 in the softmax output)
|
||||||
class_1_probabilities = test_predictions[:, 1]
|
class_1_probabilities = test_predictions[:, 1]
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user