update bulk download endpoint
This commit is contained in:
parent
027028199a
commit
41e6fb236f
99
app/main.py
99
app/main.py
@ -302,6 +302,7 @@ class HistoricalPrice(BaseModel):
|
|||||||
|
|
||||||
class BulkDownload(BaseModel):
|
class BulkDownload(BaseModel):
|
||||||
tickers: list
|
tickers: list
|
||||||
|
bulkData: list
|
||||||
|
|
||||||
class AnalystId(BaseModel):
|
class AnalystId(BaseModel):
|
||||||
analystId: str
|
analystId: str
|
||||||
@ -4387,43 +4388,89 @@ async def get_stock_data(data: BulkList, api_key: str = Security(get_api_key)):
|
|||||||
|
|
||||||
|
|
||||||
@app.post("/bulk-download")
|
@app.post("/bulk-download")
|
||||||
async def get_stock(data: BulkDownload, api_key: str = Security(get_api_key)):
|
async def get_data(data: BulkDownload, api_key: str = Security(get_api_key)):
|
||||||
# Ensure tickers is a list
|
# Ensure tickers are uppercase and unique if needed.
|
||||||
tickers = [ticker.upper() for ticker in data.tickers]
|
tickers = [ticker.upper() for ticker in data.tickers]
|
||||||
|
selected_data_items = [item for item in data.bulkData if item.get("selected") is True]
|
||||||
|
|
||||||
# Create an in-memory binary stream for the zip archive
|
DATA_TYPE_PATHS = {
|
||||||
|
"Price Data": "json/historical-price/max/{ticker}.json",
|
||||||
|
"Dividends Data": "json/dividends/companies/{ticker}.json",
|
||||||
|
"Options Data": "json/options-historical-data/companies/{ticker}.json"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create an in-memory binary stream for the zip archive.
|
||||||
memory_file = io.BytesIO()
|
memory_file = io.BytesIO()
|
||||||
|
|
||||||
# Open a zipfile for writing into that memory stream
|
# Open the zipfile for writing into the memory stream.
|
||||||
with zipfile.ZipFile(memory_file, mode="w", compression=zipfile.ZIP_DEFLATED) as zf:
|
with zipfile.ZipFile(memory_file, mode="w", compression=zipfile.ZIP_DEFLATED) as zf:
|
||||||
for ticker in tickers:
|
for ticker in tickers:
|
||||||
try:
|
for data_item in selected_data_items:
|
||||||
with open(f"json/historical-price/max/{ticker}.json", 'rb') as file:
|
data_type_name = data_item.get("name")
|
||||||
# Load the JSON data for the ticker
|
|
||||||
data_list = orjson.loads(file.read())
|
|
||||||
except Exception:
|
|
||||||
data_list = []
|
|
||||||
|
|
||||||
# Convert the JSON data to CSV format. Adjust the keys as needed.
|
# For Price Data, we need to merge the adjusted price data.
|
||||||
csv_buffer = io.StringIO()
|
if data_type_name == "Price Data":
|
||||||
csv_writer = csv.writer(csv_buffer)
|
# Read historical price data.
|
||||||
|
try:
|
||||||
|
with open(f"json/historical-price/max/{ticker}.json", 'rb') as file:
|
||||||
|
res = orjson.loads(file.read())
|
||||||
|
except Exception as e:
|
||||||
|
res = []
|
||||||
|
|
||||||
if data_list:
|
# Read adjusted price data.
|
||||||
# Write headers using keys from the first record
|
try:
|
||||||
headers = list(data_list[0].keys())
|
with open(f"json/historical-price/adj/{ticker}.json", 'rb') as file:
|
||||||
csv_writer.writerow(headers)
|
adj_res = orjson.loads(file.read())
|
||||||
# Write each row
|
except Exception as e:
|
||||||
for row in data_list:
|
adj_res = []
|
||||||
csv_writer.writerow([row.get(key, "") for key in headers])
|
|
||||||
else:
|
|
||||||
csv_writer.writerow(["No data available"])
|
|
||||||
|
|
||||||
zf.writestr(f"{ticker}.csv", csv_buffer.getvalue())
|
# Create a dictionary mapping date (or time) to the corresponding adjusted price entry.
|
||||||
|
# We assume "date" in adj_res corresponds to "time" in res.
|
||||||
|
adj_by_date = {entry["date"]: entry for entry in adj_res if "date" in entry}
|
||||||
|
|
||||||
|
# Loop over the historical price records and add the adjusted prices if available.
|
||||||
|
for record in res:
|
||||||
|
date_key = record.get("time")
|
||||||
|
if date_key in adj_by_date:
|
||||||
|
adj_entry = adj_by_date[date_key]
|
||||||
|
record["adjOpen"] = adj_entry.get("adjOpen")
|
||||||
|
record["adjHigh"] = adj_entry.get("adjHigh")
|
||||||
|
record["adjLow"] = adj_entry.get("adjLow")
|
||||||
|
record["adjClose"] = adj_entry.get("adjClose")
|
||||||
|
|
||||||
|
json_data = res # Use the merged result.
|
||||||
|
else:
|
||||||
|
# Fallback to the mapped file path for other data types.
|
||||||
|
file_path_template = DATA_TYPE_PATHS.get(data_type_name)
|
||||||
|
if not file_path_template:
|
||||||
|
continue # Skip if the data type is not mapped.
|
||||||
|
try:
|
||||||
|
with open(file_path_template.format(ticker=ticker), 'rb') as file:
|
||||||
|
json_data = orjson.loads(file.read())
|
||||||
|
if data_type_name == 'Dividends Data':
|
||||||
|
json_data = json_data['history']
|
||||||
|
json_data = sorted(json_data, key=lambda item: item['date'])
|
||||||
|
except:
|
||||||
|
json_data = []
|
||||||
|
|
||||||
|
# Convert the JSON data to CSV.
|
||||||
|
csv_buffer = io.StringIO()
|
||||||
|
csv_writer = csv.writer(csv_buffer)
|
||||||
|
|
||||||
|
if json_data and isinstance(json_data, list) and len(json_data) > 0:
|
||||||
|
# Write headers based on the keys of the first record.
|
||||||
|
headers = list(json_data[0].keys())
|
||||||
|
csv_writer.writerow(headers)
|
||||||
|
for row in json_data:
|
||||||
|
csv_writer.writerow([row.get(key, "") for key in headers])
|
||||||
|
else:
|
||||||
|
csv_writer.writerow(["No data available"])
|
||||||
|
|
||||||
|
# Write the CSV content to the zip file under a folder named after the data type.
|
||||||
|
zip_csv_path = f"{data_type_name}/{ticker}.csv"
|
||||||
|
zf.writestr(zip_csv_path, csv_buffer.getvalue())
|
||||||
|
|
||||||
# Seek back to the start of the BytesIO stream before returning it.
|
|
||||||
memory_file.seek(0)
|
memory_file.seek(0)
|
||||||
|
|
||||||
# Return the zip file as a StreamingResponse.
|
|
||||||
return StreamingResponse(
|
return StreamingResponse(
|
||||||
memory_file,
|
memory_file,
|
||||||
media_type="application/zip",
|
media_type="application/zip",
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user