update share statistics

This commit is contained in:
MuslemRahimi 2024-08-18 16:34:34 +02:00
parent 6be02b5760
commit 622bc8d9a1
3 changed files with 14 additions and 10 deletions

View File

@ -85,20 +85,24 @@ async def run():
cursor = con.cursor() cursor = con.cursor()
cursor.execute("PRAGMA journal_mode = wal") cursor.execute("PRAGMA journal_mode = wal")
cursor.execute("SELECT DISTINCT symbol FROM stocks") cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE symbol NOT LIKE '%.%'")
stock_symbols = [row[0] for row in cursor.fetchall()] stock_symbols = [row[0] for row in cursor.fetchall()]
counter = 0 counter = 0
for ticker in tqdm(stock_symbols): for ticker in tqdm(stock_symbols):
forward_pe_dict, short_dict = await get_data(ticker, con) try:
if forward_pe_dict.keys() and short_dict.keys(): forward_pe_dict, short_dict = await get_data(ticker, con)
await save_as_json(ticker, forward_pe_dict, short_dict) if forward_pe_dict.keys() and short_dict.keys():
await save_as_json(ticker, forward_pe_dict, short_dict)
counter += 1
if counter % 20 == 0:
print(f"Processed {counter} tickers, waiting for 5 seconds...")
await asyncio.sleep(5)
except Exception as e:
print(ticker, e)
counter += 1
if counter % 100 == 0:
print(f"Processed {counter} tickers, waiting for 10 seconds...")
await asyncio.sleep(10)
con.close() con.close()

View File

@ -144,7 +144,7 @@ class FundamentalPredictor:
# Second LSTM layer with dropout and batch normalization # Second LSTM layer with dropout and batch normalization
model.add(LSTM(128, return_sequences=True, kernel_regularizer=regularizers.l2(0.01))) model.add(LSTM(256, return_sequences=True, kernel_regularizer=regularizers.l2(0.01)))
model.add(Dropout(0.5)) model.add(Dropout(0.5))
model.add(BatchNormalization()) model.add(BatchNormalization())
@ -153,7 +153,7 @@ class FundamentalPredictor:
model.add(Dropout(0.5)) model.add(Dropout(0.5))
model.add(BatchNormalization()) model.add(BatchNormalization())
model.add(Dense(32, activation='relu', kernel_regularizer=regularizers.l2(0.01))) model.add(Dense(64, activation='relu', kernel_regularizer=regularizers.l2(0.01)))
model.add(Dropout(0.2)) model.add(Dropout(0.2))
model.add(BatchNormalization()) model.add(BatchNormalization())