bugfixing earnings calendar and top stocks
This commit is contained in:
parent
33e40e793d
commit
e4e0389ae2
@ -1,5 +1,5 @@
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from datetime import datetime, timedelta
|
||||
import numpy as np
|
||||
from scipy.stats import norm
|
||||
import time
|
||||
@ -12,6 +12,8 @@ import pandas as pd
|
||||
from collections import Counter
|
||||
import aiohttp
|
||||
import asyncio
|
||||
import statistics
|
||||
|
||||
|
||||
load_dotenv()
|
||||
api_key = os.getenv('BENZINGA_API_KEY')
|
||||
@ -121,28 +123,35 @@ def get_top_stocks():
|
||||
with open(f"json/analyst/all-analyst-data.json", 'r') as file:
|
||||
analyst_stats_list = ujson.load(file)
|
||||
|
||||
filtered_data = [item for item in analyst_stats_list if item['analystScore'] >= 5]
|
||||
filtered_data = [item for item in analyst_stats_list if item['analystScore'] >= 4]
|
||||
|
||||
res_list = []
|
||||
# Define the date range for the past 12 months
|
||||
end_date = datetime.now().date()
|
||||
start_date = end_date - timedelta(days=365)
|
||||
|
||||
res_list = []
|
||||
for item in filtered_data:
|
||||
ticker_list = item['ratingsList']
|
||||
ticker_list = [{'ticker': i['ticker'], 'pt_current': i['pt_current']} for i in ticker_list if i['rating_current'] == 'Strong Buy']
|
||||
# Filter by 'Strong Buy' and ensure the rating is within the last 12 months
|
||||
ticker_list = [{'ticker': i['ticker'], 'adjusted_pt_current': i['adjusted_pt_current'], 'date': i['date']}
|
||||
for i in ticker_list
|
||||
if i['rating_current'] == 'Strong Buy'
|
||||
and start_date <= datetime.strptime(i['date'], '%Y-%m-%d').date() <= end_date]
|
||||
if len(ticker_list) > 0:
|
||||
#res_list += list(set(ticker_list))
|
||||
res_list += ticker_list
|
||||
|
||||
# Create a dictionary to store ticker occurrences and corresponding pt_current values
|
||||
ticker_data = {}
|
||||
for item in res_list:
|
||||
ticker = item['ticker']
|
||||
pt_current_str = item['pt_current']
|
||||
pt_current_str = item['adjusted_pt_current']
|
||||
if pt_current_str: # Skip empty strings
|
||||
pt_current = float(pt_current_str)
|
||||
if ticker in ticker_data:
|
||||
ticker_data[ticker]['sum'] += pt_current
|
||||
ticker_data[ticker]['counter'] += 1
|
||||
ticker_data[ticker]['pt_list'].append(pt_current)
|
||||
else:
|
||||
ticker_data[ticker] = {'sum': pt_current, 'counter': 1}
|
||||
ticker_data[ticker] = {'pt_list': [pt_current]}
|
||||
|
||||
for ticker, info in ticker_data.items():
|
||||
try:
|
||||
@ -156,13 +165,22 @@ def get_top_stocks():
|
||||
info['name'] = None
|
||||
info['marketCap'] = None
|
||||
|
||||
# Calculate average pt_current for each ticker
|
||||
# Calculate median pt_current for each ticker
|
||||
for ticker, info in ticker_data.items():
|
||||
info['average'] = round(info['sum'] / info['counter'],2)
|
||||
if info['pt_list']:
|
||||
info['median'] = round(statistics.median(info['pt_list']), 2)
|
||||
|
||||
# Convert the dictionary back to a list format
|
||||
result = [{'ticker': ticker, 'upside': round((info['average']/info.get('price')-1)*100, 2) if info.get('price') else None, 'priceTarget': info['average'], 'price': info['price'], 'counter': info['counter'], 'name': info['name'], 'marketCap': info['marketCap']} for ticker, info in ticker_data.items()]
|
||||
result = [item for item in result if item['upside'] is not None and item['upside'] >= 5 and item['upside'] <= 250] #filter outliners
|
||||
result = [{'ticker': ticker,
|
||||
'upside': round((info['median']/info.get('price')-1)*100, 2) if info.get('price') else None,
|
||||
'priceTarget': info['median'],
|
||||
'price': info['price'],
|
||||
'counter': len(info['pt_list']),
|
||||
'name': info['name'],
|
||||
'marketCap': info['marketCap']}
|
||||
for ticker, info in ticker_data.items()]
|
||||
|
||||
result = [item for item in result if item['upside'] is not None and item['upside'] >= 5 and item['upside'] <= 250] # Filter outliers
|
||||
|
||||
result_sorted = sorted(result, key=lambda x: x['counter'] if x['counter'] is not None else float('-inf'), reverse=True)
|
||||
|
||||
@ -423,3 +441,4 @@ async def run():
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(run())
|
||||
|
||||
@ -1,132 +1,9 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
from edgar import *
|
||||
|
||||
SEC Filing Scraper
|
||||
@author: AdamGetbags
|
||||
# Tell the SEC who you are
|
||||
set_identity("Michael Mccallum mike.mccalum@indigo.com")
|
||||
|
||||
"""
|
||||
|
||||
# import modules
|
||||
import requests
|
||||
import pandas as pd
|
||||
filings = Company("NVDA").get_filings(form="10-Q").latest(3)
|
||||
|
||||
# create request header
|
||||
headers = {'User-Agent': "email@address.com"}
|
||||
|
||||
# get all companies data
|
||||
companyTickers = requests.get(
|
||||
"https://www.sec.gov/files/company_tickers.json",
|
||||
headers=headers
|
||||
)
|
||||
|
||||
# review response / keys
|
||||
print(companyTickers.json().keys())
|
||||
|
||||
# format response to dictionary and get first key/value
|
||||
firstEntry = companyTickers.json()['0']
|
||||
|
||||
# parse CIK // without leading zeros
|
||||
directCik = companyTickers.json()['0']['cik_str']
|
||||
|
||||
# dictionary to dataframe
|
||||
companyData = pd.DataFrame.from_dict(companyTickers.json(),
|
||||
orient='index')
|
||||
|
||||
# add leading zeros to CIK
|
||||
companyData['cik_str'] = companyData['cik_str'].astype(
|
||||
str).str.zfill(10)
|
||||
|
||||
# review data
|
||||
print(companyData[:1])
|
||||
|
||||
cik = companyData[0:1].cik_str[0]
|
||||
|
||||
# get company specific filing metadata
|
||||
filingMetadata = requests.get(
|
||||
f'https://data.sec.gov/submissions/CIK{cik}.json',
|
||||
headers=headers
|
||||
)
|
||||
|
||||
# review json
|
||||
print(filingMetadata.json().keys())
|
||||
filingMetadata.json()['filings']
|
||||
filingMetadata.json()['filings'].keys()
|
||||
filingMetadata.json()['filings']['recent']
|
||||
filingMetadata.json()['filings']['recent'].keys()
|
||||
|
||||
# dictionary to dataframe
|
||||
allForms = pd.DataFrame.from_dict(
|
||||
filingMetadata.json()['filings']['recent']
|
||||
)
|
||||
|
||||
# review columns
|
||||
allForms.columns
|
||||
allForms[['accessionNumber', 'reportDate', 'form']].head(50)
|
||||
|
||||
# 10-Q metadata
|
||||
allForms.iloc[11]
|
||||
|
||||
# get company facts data
|
||||
companyFacts = requests.get(
|
||||
f'https://data.sec.gov/api/xbrl/companyfacts/CIK{cik}.json',
|
||||
headers=headers
|
||||
)
|
||||
|
||||
#review data
|
||||
companyFacts.json().keys()
|
||||
companyFacts.json()['facts']
|
||||
companyFacts.json()['facts'].keys()
|
||||
|
||||
# filing metadata
|
||||
companyFacts.json()['facts']['dei'][
|
||||
'EntityCommonStockSharesOutstanding']
|
||||
companyFacts.json()['facts']['dei'][
|
||||
'EntityCommonStockSharesOutstanding'].keys()
|
||||
companyFacts.json()['facts']['dei'][
|
||||
'EntityCommonStockSharesOutstanding']['units']
|
||||
companyFacts.json()['facts']['dei'][
|
||||
'EntityCommonStockSharesOutstanding']['units']['shares']
|
||||
companyFacts.json()['facts']['dei'][
|
||||
'EntityCommonStockSharesOutstanding']['units']['shares'][0]
|
||||
|
||||
# concept data // financial statement line items
|
||||
companyFacts.json()['facts']['us-gaap']
|
||||
companyFacts.json()['facts']['us-gaap'].keys()
|
||||
|
||||
# different amounts of data available per concept
|
||||
companyFacts.json()['facts']['us-gaap']['AccountsPayable']
|
||||
companyFacts.json()['facts']['us-gaap']['Revenues']
|
||||
companyFacts.json()['facts']['us-gaap']['Assets']
|
||||
|
||||
# get company concept data
|
||||
companyConcept = requests.get(
|
||||
(
|
||||
f'https://data.sec.gov/api/xbrl/companyconcept/CIK{cik}'
|
||||
f'/us-gaap/Assets.json'
|
||||
),
|
||||
headers=headers
|
||||
)
|
||||
|
||||
# review data
|
||||
companyConcept.json().keys()
|
||||
companyConcept.json()['units']
|
||||
companyConcept.json()['units'].keys()
|
||||
companyConcept.json()['units']['USD']
|
||||
companyConcept.json()['units']['USD'][0]
|
||||
|
||||
# parse assets from single filing
|
||||
companyConcept.json()['units']['USD'][0]['val']
|
||||
|
||||
# get all filings data
|
||||
assetsData = pd.DataFrame.from_dict((
|
||||
companyConcept.json()['units']['USD']))
|
||||
|
||||
# review data
|
||||
assetsData.columns
|
||||
assetsData.form
|
||||
|
||||
# get assets from 10Q forms and reset index
|
||||
assets10Q = assetsData[assetsData.form == '10-Q']
|
||||
assets10Q = assets10Q.reset_index(drop=True)
|
||||
|
||||
print(assets10Q)
|
||||
print(filings.search("Revenue by Geography"))
|
||||
@ -819,6 +819,28 @@ async def get_earnings_calendar(con, stock_symbols):
|
||||
|
||||
start_date += timedelta(days=1) # Increment date by one day
|
||||
|
||||
seen_symbols = set()
|
||||
unique_data = []
|
||||
|
||||
for item in res_list:
|
||||
symbol = item.get('symbol')
|
||||
try:
|
||||
with open(f"json/quote/{symbol}.json", 'r') as file:
|
||||
quote = ujson.load(file)
|
||||
try:
|
||||
earnings_date = datetime.strptime(quote['earningsAnnouncement'].split('T')[0], '%Y-%m-%d').strftime('%Y-%m-%d')
|
||||
except:
|
||||
earnings_date = '-'
|
||||
except Exception as e:
|
||||
earnings_date = '-'
|
||||
print(e)
|
||||
|
||||
if symbol is None or symbol not in seen_symbols:
|
||||
#bug in fmp endpoint. Double check that earnings date is the same as in quote endpoint
|
||||
if item['date'] == earnings_date:
|
||||
#print(symbol, item['date'], earnings_date)
|
||||
unique_data.append(item)
|
||||
seen_symbols.add(symbol)
|
||||
|
||||
return res_list
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user