refactor most shorted stocks
This commit is contained in:
parent
e93bc9b780
commit
329028705a
@ -7,9 +7,23 @@ from selenium.webdriver.chrome.service import Service
|
|||||||
from webdriver_manager.chrome import ChromeDriverManager
|
from webdriver_manager.chrome import ChromeDriverManager
|
||||||
from selenium.webdriver.chrome.options import Options
|
from selenium.webdriver.chrome.options import Options
|
||||||
from dotenv import load_dotenv
|
from dotenv import load_dotenv
|
||||||
import sqlite3
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
|
quote_cache = {}
|
||||||
|
|
||||||
|
def get_quote_data(symbol):
|
||||||
|
"""Get quote data for a symbol from JSON file"""
|
||||||
|
if symbol in quote_cache:
|
||||||
|
return quote_cache[symbol]
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
with open(f"json/quote/{symbol}.json") as file:
|
||||||
|
quote_data = orjson.loads(file.read())
|
||||||
|
quote_cache[symbol] = quote_data # Cache the loaded data
|
||||||
|
return quote_data
|
||||||
|
except:
|
||||||
|
return None
|
||||||
|
|
||||||
def load_json(file_path):
|
def load_json(file_path):
|
||||||
"""Load existing JSON data from file."""
|
"""Load existing JSON data from file."""
|
||||||
if os.path.exists(file_path):
|
if os.path.exists(file_path):
|
||||||
@ -55,14 +69,7 @@ def save_latest_ratings(combined_data, json_file_path, limit=700):
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"An error occurred: {e}")
|
print(f"An error occurred: {e}")
|
||||||
|
|
||||||
query_template = """
|
|
||||||
SELECT
|
|
||||||
name
|
|
||||||
FROM
|
|
||||||
stocks
|
|
||||||
WHERE
|
|
||||||
symbol = ?
|
|
||||||
"""
|
|
||||||
|
|
||||||
SENTIMENT_MAP = {
|
SENTIMENT_MAP = {
|
||||||
"Bullish": "Strong Buy",
|
"Bullish": "Strong Buy",
|
||||||
@ -108,7 +115,6 @@ def format_date(date_str):
|
|||||||
|
|
||||||
def main():
|
def main():
|
||||||
# Load environment variables
|
# Load environment variables
|
||||||
con = sqlite3.connect('stocks.db')
|
|
||||||
load_dotenv()
|
load_dotenv()
|
||||||
url = os.getenv('CRAMER_WEBSITE')
|
url = os.getenv('CRAMER_WEBSITE')
|
||||||
|
|
||||||
@ -160,13 +166,15 @@ def main():
|
|||||||
if not item['date']:
|
if not item['date']:
|
||||||
continue # Skip if date parsing fails
|
continue # Skip if date parsing fails
|
||||||
|
|
||||||
# Check if the data is already in the file
|
quote_data = get_quote_data(symbol)
|
||||||
if (item['ticker'], item['date']) not in existing_keys:
|
if quote_data:
|
||||||
db_data = pd.read_sql_query(query_template, con, params=(symbol,))
|
res.append({**item,
|
||||||
res.append({
|
'name': quote_data('name'),
|
||||||
**item,
|
'price': round(quote_data.get('price'), 2) if quote_data.get('price') is not None else None,
|
||||||
'name': db_data['name'].iloc[0]
|
'changesPercentage': round(quote_data.get('changesPercentage'), 2) if quote_data.get('changesPercentage') is not None else None,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Error processing {symbol}: {e}")
|
print(f"Error processing {symbol}: {e}")
|
||||||
|
|
||||||
@ -181,7 +189,6 @@ def main():
|
|||||||
finally:
|
finally:
|
||||||
# Ensure the WebDriver is closed
|
# Ensure the WebDriver is closed
|
||||||
driver.quit()
|
driver.quit()
|
||||||
con.close()
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|||||||
@ -637,6 +637,50 @@ async def get_most_ftd_shares():
|
|||||||
with open("json/stocks-list/list/most-ftd-shares.json", 'wb') as file:
|
with open("json/stocks-list/list/most-ftd-shares.json", 'wb') as file:
|
||||||
file.write(orjson.dumps(res_list))
|
file.write(orjson.dumps(res_list))
|
||||||
|
|
||||||
|
async def get_most_shorted_stocks():
|
||||||
|
with sqlite3.connect('stocks.db') as con:
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE symbol NOT LIKE '%.%' AND symbol NOT LIKE '%-%'")
|
||||||
|
symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
res_list = []
|
||||||
|
for symbol in symbols:
|
||||||
|
try:
|
||||||
|
# Load quote data from JSON file
|
||||||
|
short_percent_float = stock_screener_data_dict[symbol].get('shortFloatPercent',None)
|
||||||
|
if short_percent_float > 10:
|
||||||
|
quote_data = await get_quote_data(symbol)
|
||||||
|
# Assign price and volume, and check if they meet the penny stock criteria
|
||||||
|
if quote_data:
|
||||||
|
price = round(quote_data.get('price',None), 2)
|
||||||
|
changesPercentage = round(quote_data.get('changesPercentage'), 2)
|
||||||
|
market_cap = round(quote_data.get('marketCap',None), 2)
|
||||||
|
name = quote_data.get('name')
|
||||||
|
|
||||||
|
# Append stock data to res_list if it meets the criteria
|
||||||
|
if changesPercentage != 0:
|
||||||
|
res_list.append({
|
||||||
|
'symbol': symbol,
|
||||||
|
'name': name,
|
||||||
|
'price': price,
|
||||||
|
'changesPercentage': changesPercentage,
|
||||||
|
'shortFloatPercent': short_percent_float,
|
||||||
|
})
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if res_list:
|
||||||
|
# Sort by market cap in descending order
|
||||||
|
res_list = sorted(res_list, key=lambda x: x['shortFloatPercent'], reverse=True)[:100]
|
||||||
|
|
||||||
|
# Assign rank to each stock
|
||||||
|
for rank, item in enumerate(res_list, start=1):
|
||||||
|
item['rank'] = rank
|
||||||
|
|
||||||
|
# Write the filtered and ranked penny stocks to a JSON file
|
||||||
|
with open("json/stocks-list/list/most-shorted-stocks.json", 'wb') as file:
|
||||||
|
file.write(orjson.dumps(res_list))
|
||||||
|
|
||||||
|
|
||||||
async def etf_bitcoin_list():
|
async def etf_bitcoin_list():
|
||||||
@ -872,6 +916,7 @@ async def run():
|
|||||||
get_highest_income_tax(),
|
get_highest_income_tax(),
|
||||||
get_most_employees(),
|
get_most_employees(),
|
||||||
get_most_ftd_shares(),
|
get_most_ftd_shares(),
|
||||||
|
get_most_shorted_stocks(),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@ -3968,7 +3968,7 @@ async def get_statistics(data: FilterStockList, api_key: str = Security(get_api_
|
|||||||
category_type = 'sector'
|
category_type = 'sector'
|
||||||
elif filter_list == 'reits':
|
elif filter_list == 'reits':
|
||||||
category_type = 'industry'
|
category_type = 'industry'
|
||||||
elif filter_list in ['most-ftd-shares','highest-income-tax','most-employees','highest-revenue','top-rated-dividend-stocks','penny-stocks','overbought-stocks','oversold-stocks','faang','magnificent-seven','ca','cn','de','gb','il','in','jp','nyse','nasdaq','amex','dowjones','sp500','nasdaq100','all-stock-tickers']:
|
elif filter_list in ['most-shorted-stocks','most-ftd-shares','highest-income-tax','most-employees','highest-revenue','top-rated-dividend-stocks','penny-stocks','overbought-stocks','oversold-stocks','faang','magnificent-seven','ca','cn','de','gb','il','in','jp','nyse','nasdaq','amex','dowjones','sp500','nasdaq100','all-stock-tickers']:
|
||||||
category_type = 'stocks-list'
|
category_type = 'stocks-list'
|
||||||
elif filter_list in ['dividend-kings','dividend-aristocrats']:
|
elif filter_list in ['dividend-kings','dividend-aristocrats']:
|
||||||
category_type = 'dividends'
|
category_type = 'dividends'
|
||||||
|
|||||||
@ -1705,68 +1705,6 @@ async def get_ipo_calendar(con, symbols):
|
|||||||
|
|
||||||
return res_sorted
|
return res_sorted
|
||||||
|
|
||||||
async def get_most_shorted_stocks(con):
|
|
||||||
directory_path = 'json/share-statistics/*.json'
|
|
||||||
|
|
||||||
def filename_has_no_dot(file_path):
|
|
||||||
filename = os.path.basename(file_path)
|
|
||||||
if filename.endswith('.json'):
|
|
||||||
base_name = filename[:-5] # Remove the .json part
|
|
||||||
# Return True only if there is no dot in the base name
|
|
||||||
if '.' not in base_name:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
async def read_json_files(directory_path):
|
|
||||||
for file_path in glob.glob(directory_path):
|
|
||||||
if filename_has_no_dot(file_path):
|
|
||||||
try:
|
|
||||||
async with aiofiles.open(file_path, 'r') as file:
|
|
||||||
data = await file.read()
|
|
||||||
json_data = json.loads(data)
|
|
||||||
yield file_path, json_data
|
|
||||||
except (json.JSONDecodeError, IOError) as e:
|
|
||||||
print(f"Error reading {file_path}: {e}")
|
|
||||||
|
|
||||||
def extract_elements(file_path, data):
|
|
||||||
symbol = os.path.basename(file_path).rsplit('.', 1)[0]
|
|
||||||
return {
|
|
||||||
"symbol": symbol,
|
|
||||||
"sharesShort": data.get("sharesShort"),
|
|
||||||
"shortRatio": data.get("shortRatio"),
|
|
||||||
"shortOutStandingPercent": data.get("shortOutStandingPercent"),
|
|
||||||
"shortFloatPercent": data.get("shortFloatPercent"),
|
|
||||||
#"sharesShortPriorMonth": data.get("sharesShortPriorMonth"),
|
|
||||||
#"latestOutstandingShares": data.get("latestOutstandingShares"),
|
|
||||||
#"latestFloatShares": data.get("latestFloatShares")
|
|
||||||
}
|
|
||||||
|
|
||||||
# Initialize a list to hold the extracted data
|
|
||||||
extracted_data = []
|
|
||||||
|
|
||||||
# Read and process JSON files
|
|
||||||
async for file_path, json_data in read_json_files(directory_path):
|
|
||||||
element = extract_elements(file_path, json_data)
|
|
||||||
short_outstanding_percent = element.get("shortOutStandingPercent")
|
|
||||||
|
|
||||||
# Check if shortOutStandingPercent is at least 20
|
|
||||||
if short_outstanding_percent is not None and float(short_outstanding_percent) >= 20 and float(short_outstanding_percent) < 100:
|
|
||||||
extracted_data.append(element)
|
|
||||||
|
|
||||||
sorted_list = sorted(extracted_data, key=lambda x: x['shortOutStandingPercent'], reverse=True)
|
|
||||||
|
|
||||||
for index, item in enumerate(sorted_list, start=1):
|
|
||||||
try:
|
|
||||||
symbol = item['symbol']
|
|
||||||
with open(f"json/quote/{symbol}.json") as file:
|
|
||||||
data = orjson.loads(file.read())
|
|
||||||
item['name'] = data['name']
|
|
||||||
item['rank'] = index
|
|
||||||
#item['sector'] = data['sector'].iloc[0]
|
|
||||||
except Exception as e:
|
|
||||||
print(e)
|
|
||||||
|
|
||||||
return sorted_list
|
|
||||||
|
|
||||||
async def save_json_files():
|
async def save_json_files():
|
||||||
con = sqlite3.connect('stocks.db')
|
con = sqlite3.connect('stocks.db')
|
||||||
@ -1809,11 +1747,6 @@ async def save_json_files():
|
|||||||
ujson.dump(dividends_list, file)
|
ujson.dump(dividends_list, file)
|
||||||
|
|
||||||
|
|
||||||
data = await get_most_shorted_stocks(con)
|
|
||||||
with open(f"json/most-shorted-stocks/data.json", 'w') as file:
|
|
||||||
ujson.dump(data, file)
|
|
||||||
|
|
||||||
|
|
||||||
data = await get_congress_rss_feed(symbols, etf_symbols, crypto_symbols)
|
data = await get_congress_rss_feed(symbols, etf_symbols, crypto_symbols)
|
||||||
with open(f"json/congress-trading/rss-feed/data.json", 'w') as file:
|
with open(f"json/congress-trading/rss-feed/data.json", 'w') as file:
|
||||||
ujson.dump(data, file)
|
ujson.dump(data, file)
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user