Initial commit with existing files
This commit is contained in:
commit
a262e2b0a2
1
.gitattributes
vendored
Normal file
1
.gitattributes
vendored
Normal file
@ -0,0 +1 @@
|
|||||||
|
*.db filter=lfs diff=lfs merge=lfs -text
|
||||||
13
.gitignore
vendored
Normal file
13
.gitignore
vendored
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
app/__pycache__
|
||||||
|
app/saved_models
|
||||||
|
app/backup_db
|
||||||
|
app/stocks.*
|
||||||
|
app/institute.*
|
||||||
|
app/etf.*
|
||||||
|
app/crypto.*
|
||||||
|
app/*.sh
|
||||||
|
app/.env*
|
||||||
|
app/ml_models/weights
|
||||||
|
app/json/*
|
||||||
|
fastify/node_modules
|
||||||
|
pocketbase/*
|
||||||
21
LICENSE
Normal file
21
LICENSE
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2024 stocknear
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
||||||
44
README.md
Normal file
44
README.md
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
<div align="center">
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# **Open Source Stock Analysis & Community Platform for Small Investors.**
|
||||||
|
|
||||||
|
<h3>
|
||||||
|
|
||||||
|
[Homepage](https://stocknear.com/) | [Discord](https://discord.com/invite/hCwZMMZ2MT)
|
||||||
|
|
||||||
|
</h3>
|
||||||
|
|
||||||
|
[](https://github.com/stocknear/backend/stargazers)
|
||||||
|
|
||||||
|
</div>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Techstack
|
||||||
|
This is the codebase that powers [stocknear's](https://stocknear.com/) backend, which is an open-source stock analysis & community platform.
|
||||||
|
|
||||||
|
Built with:
|
||||||
|
- [FastAPI](https://fastapi.tiangolo.com/): Python Backend
|
||||||
|
- [Fastify](https://fastify.dev/): Nodejs Backend
|
||||||
|
- [Pocketbase](https://pocketbase.io/): Database
|
||||||
|
- [Redis](https://redis.io/): Caching Data
|
||||||
|
|
||||||
|
# Getting started
|
||||||
|
Coming soon to run stocknear locally on your machine.
|
||||||
|
|
||||||
|
# Contributing
|
||||||
|
Stocknear is open-source software and you're welcome to contribute to its development.
|
||||||
|
|
||||||
|
The core idea of stocknear shall always be: ***Simplicity***, ***Maintainable***, ***Readable*** & ***Fast*** in this order.
|
||||||
|
|
||||||
|
If want to contribute to the codebase please follow these guidelines:
|
||||||
|
- Reducing complexity and increasing readability is a huge plus!
|
||||||
|
- Anything you claim is a "speedup" must be benchmarked. In general, the goal is simplicity, so even if your PR makes things marginally faster, you have to consider the tradeoff with maintainablity and readablity.
|
||||||
|
- If your PR looks "complex", is a big diff, or adds lots of lines, it won't be reviewed or merged. Consider breaking it up into smaller PRs that are individually clear wins. A common pattern I see is prerequisite refactors before adding new functionality. If you can (cleanly) refactor to the point that the feature is a 3 line change, this is great, and something easy for us to review.
|
||||||
|
|
||||||
|
# Support ❤️
|
||||||
|
If you love the idea of stocknear and want to support our mission you can help us in two ways:
|
||||||
|
- Become a [Pro Member](https://stocknear.com/pricing) of stocknear to get unlimited feature access to enjoy the platform to the fullest.
|
||||||
|
- You can donate money via [Ko-fi](https://ko-fi.com/stocknear) to help us pay the servers & data providers to keep everything running!
|
||||||
93
app/GetStartEndDate.py
Normal file
93
app/GetStartEndDate.py
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
from datetime import datetime, timedelta
|
||||||
|
import pytz
|
||||||
|
|
||||||
|
|
||||||
|
class GetStartEndDate:
|
||||||
|
def __init__(self):
|
||||||
|
self.new_york_tz = pytz.timezone('America/New_York')
|
||||||
|
self.current_datetime = datetime.now(self.new_york_tz)
|
||||||
|
|
||||||
|
def check_if_holiday(self):
|
||||||
|
holiday_dates = {
|
||||||
|
datetime(2023, 5, 29): 'memorial_day',
|
||||||
|
datetime(2023, 6, 19): 'independence_day',
|
||||||
|
datetime(2023, 6, 20): 'independence_day+1',
|
||||||
|
datetime(2023, 9, 4): 'labor_day',
|
||||||
|
datetime(2023, 9, 5): 'labor_day+1',
|
||||||
|
datetime(2023, 11, 23): 'thanks_giving',
|
||||||
|
datetime(2023, 12, 25): 'christmas',
|
||||||
|
datetime(2024, 1, 1): 'new_year',
|
||||||
|
datetime(2024, 1, 15): 'martin_luther_king',
|
||||||
|
datetime(2024, 2, 19): 'washington_birthday',
|
||||||
|
}
|
||||||
|
|
||||||
|
for date, name in holiday_dates.items():
|
||||||
|
if date.date() == self.current_datetime.date():
|
||||||
|
return name
|
||||||
|
return None
|
||||||
|
|
||||||
|
def correct_1d_interval(self, holiday):
|
||||||
|
if holiday == 'memorial_day':
|
||||||
|
start_date_1d = datetime(2023, 5, 26)
|
||||||
|
elif holiday in ('independence_day', 'independence_day+1'):
|
||||||
|
start_date_1d = datetime(2023, 6, 16)
|
||||||
|
elif holiday in ('labor_day', 'labor_day+1'):
|
||||||
|
start_date_1d = datetime(2023, 9, 1)
|
||||||
|
elif holiday == 'thanks_giving':
|
||||||
|
start_date_1d = datetime(2023, 11, 22)
|
||||||
|
elif holiday == 'new_year':
|
||||||
|
start_date_1d = datetime(2023, 12, 29)
|
||||||
|
elif holiday == 'martin_luther_king':
|
||||||
|
start_date_1d = datetime(2023, 1, 12)
|
||||||
|
elif holiday == 'washington_birthday':
|
||||||
|
start_date_1d = datetime(2024, 2, 16)
|
||||||
|
else:
|
||||||
|
current_time_new_york = datetime.now(self.new_york_tz)
|
||||||
|
current_weekday = current_time_new_york.weekday()
|
||||||
|
is_afternoon = current_time_new_york.hour > 9 or (current_time_new_york.hour == 9 and current_time_new_york.minute >= 30)
|
||||||
|
|
||||||
|
if current_weekday == 0:
|
||||||
|
start_date_1d = current_time_new_york if is_afternoon else current_time_new_york - timedelta(days=3)
|
||||||
|
elif current_weekday in (5, 6): # Saturday or Sunday
|
||||||
|
start_date_1d = current_time_new_york - timedelta(days=current_weekday % 5 + 1)
|
||||||
|
else:
|
||||||
|
start_date_1d = current_time_new_york if is_afternoon else current_time_new_york - timedelta(days=1)
|
||||||
|
return start_date_1d
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
holiday = self.check_if_holiday()
|
||||||
|
start_date_1d = self.correct_1d_interval(holiday)
|
||||||
|
|
||||||
|
current_time_new_york = datetime.now(self.new_york_tz)
|
||||||
|
is_afternoon = current_time_new_york.hour > 9 or (current_time_new_york.hour == 9 and current_time_new_york.minute >= 30)
|
||||||
|
if holiday:
|
||||||
|
holiday_dates = {
|
||||||
|
'memorial_day': datetime(2023, 5, 26),
|
||||||
|
'independence_day': datetime(2023, 6, 16),
|
||||||
|
'independence_day+1': datetime(2023, 6, 16),
|
||||||
|
'labor_day': datetime(2023, 9, 1),
|
||||||
|
'labor_day+1': datetime(2023, 9, 1),
|
||||||
|
'thanks_giving': datetime(2023, 11, 22),
|
||||||
|
'christmas': datetime(2023, 12, 22),
|
||||||
|
'new_year': datetime(2023, 12, 29),
|
||||||
|
'martin_luther_king': datetime(2024, 1, 12),
|
||||||
|
'washington_birthday': datetime(2024, 2, 16),
|
||||||
|
}
|
||||||
|
|
||||||
|
if holiday in holiday_dates:
|
||||||
|
end_date_1d = holiday_dates[holiday]
|
||||||
|
elif holiday in ['independence_day+1', 'labor_day+1', 'christmas_day+1'] and not is_afternoon:
|
||||||
|
end_date_1d = holiday_dates[holiday]
|
||||||
|
else:
|
||||||
|
end_date_1d = self.current_datetime
|
||||||
|
elif current_time_new_york.weekday() == 0:
|
||||||
|
end_date_1d = current_time_new_york if is_afternoon else current_time_new_york - timedelta(days=3)
|
||||||
|
else:
|
||||||
|
end_date_1d = current_time_new_york
|
||||||
|
|
||||||
|
return start_date_1d, end_date_1d
|
||||||
|
|
||||||
|
|
||||||
|
#Test Mode
|
||||||
|
#start, end = GetStartEndDate().run()
|
||||||
|
#print(start, end)
|
||||||
371
app/create_crypto_db.py
Normal file
371
app/create_crypto_db.py
Normal file
@ -0,0 +1,371 @@
|
|||||||
|
import aiohttp
|
||||||
|
import asyncio
|
||||||
|
import sqlite3
|
||||||
|
import certifi
|
||||||
|
import json
|
||||||
|
import ujson
|
||||||
|
import pandas as pd
|
||||||
|
import os
|
||||||
|
from tqdm import tqdm
|
||||||
|
import re
|
||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import warnings
|
||||||
|
import requests
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
coingecko_api_key = os.getenv('COINGECKO_API_KEY')
|
||||||
|
|
||||||
|
# Filter out the specific RuntimeWarning
|
||||||
|
warnings.filterwarnings("ignore", category=RuntimeWarning, message="invalid value encountered in scalar divide")
|
||||||
|
|
||||||
|
|
||||||
|
start_date = datetime(2015, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if os.path.exists("backup_db/crypto.db"):
|
||||||
|
os.remove('backup_db/crypto.db')
|
||||||
|
|
||||||
|
|
||||||
|
def get_jsonparsed_data(data):
|
||||||
|
try:
|
||||||
|
return json.loads(data)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def gecko_crypto_id(symbol):
|
||||||
|
symbol_map = {
|
||||||
|
'BTCUSD': 'bitcoin',
|
||||||
|
'DASHUSD': 'dash',
|
||||||
|
'ETCUSD': 'ethereum-classic',
|
||||||
|
'SHIBUSD': 'shiba',
|
||||||
|
'USDCUSD': 'usd-coin',
|
||||||
|
'LINKUSD': 'chainlink',
|
||||||
|
'BNBUSD': 'binancecoin',
|
||||||
|
'ETHUSD': 'ethereum',
|
||||||
|
'LTCUSD': 'litecoin',
|
||||||
|
'SOLUSD': 'solana',
|
||||||
|
'DOGEUSD': 'dogecoin',
|
||||||
|
'XRPUSD': 'ripple',
|
||||||
|
'XMRUSD': 'monero',
|
||||||
|
'USDTUSD': 'tether',
|
||||||
|
'ADAUSD': 'cardano',
|
||||||
|
'AVAXUSD': 'avalanche-2',
|
||||||
|
'LUNAUSD': 'terra-luna-2',
|
||||||
|
'BCHUSD': 'bitcoin-cash',
|
||||||
|
'TRXUSD': 'tron',
|
||||||
|
'DOTUSD': 'polkadot',
|
||||||
|
'ALGOUSD': 'algorand'
|
||||||
|
}
|
||||||
|
return symbol_map.get(symbol, None)
|
||||||
|
|
||||||
|
def get_description(symbol):
|
||||||
|
symbol_map = {
|
||||||
|
'BTCUSD': 'A brief history Bitcoin was created in 2009 by Satoshi Nakamoto, a pseudonymous developer. Bitcoin is designed to be completely decentralized and not controlled by any single authority. With a total supply of 21 million, its scarcity and decentralized nature make it almost impossible to inflate or manipulate. For this reason, many consider bitcoin to be the ultimate store of value or ‘Digital Gold’. Bitcoin is fully open-source and operates on a proof-of-work blockchain, a shared public ledger and history of transactions organized into "blocks" that are "chained" together to prevent tampering. This technology creates a permanent record of each transaction. Users on the Bitcoin network verify transactions through a process known as mining, which is designed to confirm new transactions are consistent with older transactions that have been confirmed in the past, ensuring users can not spend a Bitcoin they don’t have or attempt to double-spend coins.',
|
||||||
|
'DASHUSD': 'Dash was launched in 2014 as a fork of Litecoin (which itself is a fork of Bitcoin). The founder, Evan Duffield, wanted to increase privacy and security in crypto transactions. The project was originally launched under the name "Darkcoin" before it got rebranded to Dash (Digital Cash) in 2015. Although the cryptocurrency still has robust encryption, the primary objective of the project has gone through readjustment. Its current aim is to provide an affordable and convenient means to make day-to-day payments for a wide range of products and services- a practical alternative to bank cards and hard cash. Dash distinguishes itself through its unique mining algorithms and its system for handling transactions. Dash uses the X11 algorithm, a modification of the proof-of-stake algorithm. It also uses CoinJoin mixing to scramble transactions and make privacy possible on its blockchain. Additionally, Dash is run by a subset of its users, which are called "masternodes." Masternodes simplify the verification and validation of transactions- reducing the number of nodes needed to successfully approve a transaction.',
|
||||||
|
'ETCUSD': 'Ethereum Classic came into existence on July 20, 2016, as a continuation of the original Ethereum blockchain following a compromise in the original protocol leading to a fork of the protocol. Ethereum Classic is dedicated to enabling decentralized, immutable, and unstoppable applications. Like the original Ethereum network, the blockchain relies on "proof of work" mining, meaning that users run hardware and software to validate transactions on the network and keep it secure- earning ETC in return. However, Ethereum Classic differs from Ethereum in that the platform does not plan to move away from Proof-of-Work, while Ethereum is trying to make the transition to Proof-of-Stake It should also be noted that the Ethereum Classic ecosystem is not as active as the Ethereum network. These relatively low rates of use have caused problems for the networks security since blockchains rely on having a distributed group of users running the network; when there are not enough people actively doing so, it leaves the blockchain vulnerable. However, Ethereum Classic has been actively making updates to address this issue in its network.',
|
||||||
|
'LINKUSD': 'Chainlink was created by Sergey Nazarov and Steve Ellis, who authored a 2017 white paper with Ari Juels. Launched in 2019, Chainlink is described as a decentralized "oracle" network which aims to bring real-world data onto the blockchain. Oracles are entities that connect blockchains to external systems. Though traditional oracles are centralized, Chainlink decentralizes the process of moving data on and off blockchains through the use of "hybrid smart contracts." These hybrid smart contracts create access to off-chain resources, letting them react to real-world events and execute agreements that would otherwise need external proof of performance. As a result, Chainlink has been used to distribute non-fungible tokens (NFTs), gamify personal savings, and facilitate recalibrations of cryptocurrency token supplies, among other applications.',
|
||||||
|
'USDCUSD': 'USD Coin (USDC) was launched in September of 2018 by Center — a joint venture between Coinbase and Circle. USDC first launched on the Ethereum blockchain as an ERC-20 token, but has since expanded to other blockchains including Solana, Stellar, and Algorand, and can be purchased on both centralized and decentralized exchanges (DEXs). As a stablecoin, it provides all the benefits of cryptocurrencies––faster, cheaper, permissionless transactions––without the price volatility.',
|
||||||
|
'SHIBUSD': 'Launched in August 2020 by a founder called Ryoshi, Shiba Inu (SHIB) was created as an Ethereum-based meme coin inspired by Dogecoin. According to the project`s “woofpaper” (whitepaper), Shiba Inu was developed as the answer to a simple question: What would happen if a cryptocurrency project was 100% run by its community? Its founder Ryoshi attributes its origins to an "experiment in decentralized spontaneous community building. Since its founding, it has evolved into a decentralized ecosystem supporting projects such as an NFT art incubator and a decentralized exchange called Shibaswap.',
|
||||||
|
'BNBUSD': 'Binance was founded in 2017 by Changpeng Zhao, a developer who had previously created a high-frequency trading software called Fusion Systems. Binance was initially based in China but later moved its headquarters following the Chinese government`s increasing regulation of cryptocurrency. Binance offers crypto-to-crypto trading in more than 500 cryptocurrencies and virtual tokens, with a strong focus on altcoin trading. Additionally, Binance has among the lowest transaction fees for cryptocurrency exchanges thanks to its commission structure. Fees generally start low, and then only move lower. Binance uses a volume-based pricing model and even gives you further discounts if you use its proprietary cryptocurrency to buy and sell.',
|
||||||
|
'ETHUSD': 'The original Ethereum concept was introduced in 2013 by Vitalik Buterin with the release of the Ethereum whitepaper and in 2015 the Ethereum platform was launched by Buterin and Joseph Lubin along with several other co-founders. Ethereum is described as “the world’s programmable blockchain,” positioning itself as an electronic, programmable network that anyone can build on to launch cryptocurrencies and decentralized applications. Unlike Bitcoin which has a maximum circulation of 21 million coins, the amount of ETH that can be created is unlimited, although the time that it takes to process a block of ETH limits how much ether can be minted each year. Another difference between Ethereum and Bitcoin is how the networks treat transaction processing fees. These fees are known as “gas” on the Ethereum network and are paid by the participants in Ethereum transactions. The fees associated with Bitcoin transactions, however, are absorbed by the broader Bitcoin network. Additionally, although both Bitcoin and Ethereum currently use Proof-of-Work consensus mechanisms, Ethereum is in the process of gradually transitioning to a different consensus algorithm known as Proof-of-Stake, which uses significantly less energy.',
|
||||||
|
'LTCUSD': 'Litecoin was launched in 2011 by Charlie Lee, a former Google employee. It aims to be a "lite" version of Bitcoin in that it features many of the same properties as Bitcoin–albeit lighter weight. It is commonly often referred to as digital silver to Bitcoins digital gold and is often used as a pseudo testnet for Bitcoin, adopting new protocol changes before they are deployed on Bitcoin. Like Bitcoin, Litecoin uses a form of proof-of-work mining to enable anyone who dedicates their computing resources to add new blocks to its blockchain and earn the new Litecoin it creates. Where Litecoin differs is in its mining algorithm called Scrypt PoW. Scrypt allows the platform to finalize transactions faster. On Litecoin, new blocks are added to the blockchain roughly every 2.5 minutes (as opposed to 10 minutes on Bitcoin).',
|
||||||
|
'SOLUSD': 'Solana was created in 2017 by Anatoly Yakovenko and Raj Gokal. Yakovenko, who is also the CEO of Solana Labs, came from a background in system design and wanted to apply this knowledge and create a brand new blockchain that could scale to global adoption. Solana boasts a theoretical peak capacity of 65,000 transactions per second and has become one of the most highly used blockchains due to its speed and low transaction costs. Solana runs on a hybrid protocol of proof-of-stake (PoS) and a concept Solana calls proof-of-history (PoH). Solana is also said to be an “Ethereum competitor,” due to its distinct advantage over Ethereum in terms of transaction processing speed and transaction costs. Solana can process as many as 50,000 transactions per second (TPS), and its average cost per transaction is $0.00025. In contrast, Ethereum can only handle less than 15 TPS, while transaction fees reached a record of $70 in 2021.',
|
||||||
|
'DOGEUSD': 'Founded in 2013 by software engineers Billy Markus and Jackson Palmer, Dogecoin was created as a marketing experiment based on the popular "Doge" Internet meme and as a lighthearted alternative to traditional cryptocurrencies. Despite its origins as a “joke,” Dogecoin’s scrypt technology (a hashing function that uses SHA-256 but includes much higher memory requirements for proof-of-work) and an unlimited supply of coins set it apart as a faster, more adaptable, and consumer-friendly version of Bitcoin. Like other cryptocurrencies, Dogecoin is mined by the decentralized network of computers that runs the currency. But unlike other coins, Dogecoin does not have a cap on the total number of coins that can be mined- making it an inflationary rather than deflationary coin. In 2014 due to network security issues, Dogecoin agreed to merge mine its network with Litecoin (LTC).',
|
||||||
|
'XRPUSD': 'RippleNet was founded in 2012 by Chris Larsen and Jed McCaleb and is based on the work of Ryan Fugger, who created the XRP Ledger- an open-source cryptographic ledger powered by a peer-to-peer network of nodes. XRP’s main aim is to increase the speed and reduce the cost of transferring money between financial institutions. XRP does this through an open-source and peer-to-peer decentralized platform that allows for a seamless transfer of money in any form. XRP is a global network and counts major banks and financial services among its customers. Ripple uses a medium, known as a gateway, as the link in the trust chain between two parties wanting to make a transaction. Usually, in the form of banks, the gateway acts as a trusted intermediary to help two parties complete a transaction by providing a channel to transfer funds in fiat and cryptocurrencies. It should also be noted that XRP runs a federated consensus algorithm which differs from both Proof-of-Work and Proof-of-Stake mechanisms. Essentially, the mechanism allows participating nodes to validate transactions by conducting a poll, enabling almost instant confirmations without a central authority.',
|
||||||
|
'XMRUSD': 'Monero, originally called Bitmonero, was launched in 2014 after a hard fork from Bytecoin. Monero (XMR) is a cryptocurrency focused on privacy. It aims to allow payments to be made quickly and inexpensively without fear of censorship. Monero is unique in that it’s designed to keep wallets and transactions completely anonymous, including network members, developers, and miners. Monero alleviates privacy concerns using the concepts of ring signatures and stealth addresses. Ring signatures enable a sender to conceal their identity from other participants in a group. To generate a ring signature, the Monero platform uses a combination of a sender’s account keys and combines it with public keys on the blockchain, making it unique as well as private. It hides the senders identity, as it is computationally impossible to ascertain which of the group members keys was used to produce the complex signature.',
|
||||||
|
'USDTUSD': 'Originally known as “Realcoin,” Tether was founded in July 2014 by Brock Pierce, Craig Sellars, and Reeve Collins. Tether aims to solve two major issues with existing cryptocurrencies: high volatility and convertibility between fiat currencies and cryptocurrencies. To address these perceived issues Tether created a cryptocurrency that is fully backed 1:1 by deposits of fiat currencies like the US dollar, the euro, or the yen. This makes Tether a fiat-based stablecoin, which differs from other stablecoins such as crypto-collateralized stablecoins, which use cryptocurrency reserves as collateral. Tether relies on a Proof-of-Reserve to ensure that reserve assets match circulating USTD tokens. Doing this requires a third party to audit Tether’s bank accounts on a regular basis to show that the reserves are held in an amount equal to the outstanding tokens. Tether uses an IOU model where each USDT represents a claim for $1.00 held in Tether’s reserves.',
|
||||||
|
'ADAUSD': 'Cardano is a blockchain founded on peer-reviewed research by Charles Hoskinson, a co-founder of the Ethereum project. He began developing Cardano in 2015, launching the platform and the ADA token in 2017. Positioned as an alternative to Ethereum, Cardano aims to offer greater security, scalability, and energy efficiency than its peers. Currently, Cardano has released three products: Atala PRISM, Atala SCAN, and Atala Trace. The first product is marketed as an identity management tool that can be used to provide access to services, while the other two products are being used to trace a product’s journey through a supply chain. Additionally, Cardano utilizes Ouroboros, an algorithm that uses proof-of-stake (PoS) protocol to mine blocks. The protocol is designed to reduce energy expenditure during the block production process to a minimum by eliminating the need for massive computing resources that are more central to the functioning of the proof-of-work (PoW) protocol. In Cardanos PoS system, staking determines a nodes capability to create blocks, and a nodes stake is equal to the amount of ADA held by it over the long term.',
|
||||||
|
'AVAXUSD': 'Launched in 2020 by the Ava Labs team, Avalanche quickly ascended the cryptocurrency rankings while aiming to be the fastest, lowest cost, and most environmentally-friendly blockchain. Although Avalanche’s platform is complex, there are three primary aspects of its design that distinguish it from other blockchain projects. First, it uses a novel consensus mechanism that builds off of PoS. When a transaction is received by a validator node that node then samples a random set of other validators (which then randomly samples another set of validators) and checks for agreement until consensus is reached. Second, Avalanche users can launch specialized chains called sub-nets that operate using their own sets of rules- comparable to Polkadot’s parachains and Ethereum 2.0’s shards. Lastly, Avalanche is built using three different blockchains called the X-Chain, C-Chain, and P-Chain. Digital assets can be moved across each of these chains to accomplish different functions within the ecosystem.',
|
||||||
|
'BCHUSD': 'Bitcoin Cash came about in 2017 and was created to address concerns over Bitcoins scalability while staying as close to its original vision as a form of digital cash. It’s a hard fork of the Bitcoin blockchain, meaning the network “split” in two at a certain block as decided on by various miners and developers within the Bitcoin network. Bitcoin Cash uses an increased block size with an adjustable level of difficulty to ensure fast transactions as its user base scales. At a technical level, Bitcoin Cash works exactly the same as Bitcoin. Both platforms have a hard cap of 21 million assets, use nodes to validate transactions, and use a PoW consensus algorithm. However, BCH operates faster and has lower transaction fees than its predecessor, thanks to the aforementioned larger block size. Bitcoin Cash can support 25,000 transactions per block compared with Bitcoin’s 1,000 to 1,500 per block. Additionally, as of March 2022, the maximum block size for BCH was increased fourfold to 32 MB.',
|
||||||
|
'TRXUSD': 'Founded in 2017 by a Singapore non-profit organization, the Tron Foundation, Tron aims to host a global entertainment system and to be the infrastructure of the decentralized web. It powers an ecosystem of decentralized applications (DApps) by offering high throughput, high scalability, and high availability. The Tron network relies on a Delegated-Proof-of-Stake (DPoS) consensus mechanism to secure the blockchain. Similar to a proof-of-stake consensus mechanism, DPoS allows users to earn passive income whenever they stake their holdings in a network wallet. However, unlike a PoS system, only elected nodes can approve blocks of transactions on Tron. In the Tron ecosystem, the community elects 27 Super Representatives every six hours for this vital task. These representatives are chosen by the community of users staking their TRX. In addition to super representatives, users can operate as witness nodes, full nodes and Solidity nodes. Witness nodes propose blocks and vote on protocol decisions, full nodes broadcast transactions and blocks, and Solidity nodes sync blocks from full nodes and provide APIs. It should also be noted that there are no transaction fees for TRX, and the network is able to support 2000 of these feeless transactions every second.',
|
||||||
|
'DOTUSD': 'Polkadot was launched on May 26, 2020, by Ethereum co-creator Gavin Wood, with the nonprofit Web3 Foundation serving as the primary research organization that maintains Polkadot’s open-source code. The main goal of the Polkadot platforms is to connect blockchains and enable cross-blockchain transfers of data and assets. Polkadot does this through an innovative "parallel chain," or para chain, system to enable interoperability between blockchains. The Polkadot platform allows for the creation of three types of blockchains. At the centre is the Relay chain- the main Polkadot blockchain- which is where transactions are finalized. On top of that, there are Parachains -custom blockchains- that use the relay chain’s computing resources to confirm that transactions are accurate. Lastly, there are Bridges which allow the Polkadot network to interact with other blockchains.',
|
||||||
|
'ALGOUSD': 'Algorand was founded by Silvio Micali, a professor of computer science at the Massachusetts Institute of Technology and the 2012 recipient of the Turing Award. Launched in 2019, Algorand is a protocol that is designed to solve the blockchain trilemma of achieving speed, security, and decentralization simultaneously. In doing so, it aims to power the future of finance (FutureFi) by accelerating the convergence of decentralized and traditional finance (DeFi and TradFi). Additionally, the Algorand blockchain uses a modified version of the proof-of-stake consensus mechanism called pure-proof-of-stake (PPoS). The PPoS consensus mechanism uses randomness to increase the security of the blockchain network. While the traditional proof-of-stake method enables cryptocurrency holders to opt into validating transactions, the pure-proof-of-stake method recruits validators from the entire pool of ALGO holders.',
|
||||||
|
}
|
||||||
|
return symbol_map.get(symbol, 'No description available')
|
||||||
|
|
||||||
|
def get_website(symbol):
|
||||||
|
symbol_map = {
|
||||||
|
'BTCUSD': 'https://bitcoin.org',
|
||||||
|
'DASHUSD': 'https://www.dash.org/',
|
||||||
|
'ETCUSD': 'https://ethereumclassic.org/',
|
||||||
|
'LINKUSD': 'https://chain.link/',
|
||||||
|
'USDCUSD': 'https://www.centre.io/usdc',
|
||||||
|
'SHIBUSD': 'https://shibatoken.com/',
|
||||||
|
'BNBUSD': 'https://www.bnbchain.org',
|
||||||
|
'ETHUSD': 'https://ethereum.org',
|
||||||
|
'LTCUSD': 'https://litecoin.org/',
|
||||||
|
'SOLUSD': 'https://solana.com/',
|
||||||
|
'DOGEUSD': 'https://dogecoin.com/',
|
||||||
|
'XRPUSD': 'https://xrpl.org/',
|
||||||
|
'XMRUSD': 'https://www.getmonero.org/',
|
||||||
|
'USDTUSD': 'https://tether.to',
|
||||||
|
'ADAUSD': 'https://cardano.org/',
|
||||||
|
'AVAXUSD': 'https://avax.network/',
|
||||||
|
'LUNAUSD': '/',
|
||||||
|
'BCHUSD': 'https://bch.info',
|
||||||
|
'TRXUSD': 'https://tron.network/',
|
||||||
|
'DOTUSD': 'https://polkadot.network/',
|
||||||
|
'ALGOUSD': 'https://algorandtechnologies.com/'
|
||||||
|
}
|
||||||
|
return symbol_map.get(symbol, '/')
|
||||||
|
|
||||||
|
class CryptoDatabase:
|
||||||
|
def __init__(self, db_path):
|
||||||
|
self.db_path = db_path
|
||||||
|
self.conn = sqlite3.connect(db_path)
|
||||||
|
self.cursor = self.conn.cursor()
|
||||||
|
self.cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
self.conn.commit()
|
||||||
|
self._create_table()
|
||||||
|
|
||||||
|
def close_connection(self):
|
||||||
|
self.cursor.close()
|
||||||
|
self.conn.close()
|
||||||
|
|
||||||
|
def _create_table(self):
|
||||||
|
self.cursor.execute("""
|
||||||
|
CREATE TABLE IF NOT EXISTS cryptos (
|
||||||
|
symbol TEXT PRIMARY KEY,
|
||||||
|
name TEXT,
|
||||||
|
exchange TEXT,
|
||||||
|
type TEXT
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
|
||||||
|
def get_column_type(self, value):
|
||||||
|
column_type = ""
|
||||||
|
|
||||||
|
if isinstance(value, str):
|
||||||
|
column_type = "TEXT"
|
||||||
|
elif isinstance(value, int):
|
||||||
|
column_type = "INTEGER"
|
||||||
|
elif isinstance(value, float):
|
||||||
|
column_type = "REAL"
|
||||||
|
else:
|
||||||
|
# Handle other data types or customize based on your specific needs
|
||||||
|
column_type = "TEXT"
|
||||||
|
|
||||||
|
return column_type
|
||||||
|
|
||||||
|
def remove_null(self, value):
|
||||||
|
if isinstance(value, str) and value == None:
|
||||||
|
value = 'n/a'
|
||||||
|
elif isinstance(value, int) and value == None:
|
||||||
|
value = 0
|
||||||
|
elif isinstance(value, float) and value == None:
|
||||||
|
value = 0
|
||||||
|
else:
|
||||||
|
# Handle other data types or customize based on your specific needs
|
||||||
|
pass
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
async def save_fundamental_data(self, session, symbol):
|
||||||
|
try:
|
||||||
|
crypto_id = gecko_crypto_id(symbol)
|
||||||
|
|
||||||
|
urls = [
|
||||||
|
f"https://financialmodelingprep.com/api/v3/quote/{symbol}?apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v4/crypto_news?tickers={symbol}&limit=50&apikey={api_key}",
|
||||||
|
f"https://api.coingecko.com/api/v3/coins/markets?vs_currency=usd&ids={crypto_id}"
|
||||||
|
]
|
||||||
|
|
||||||
|
fundamental_data = {}
|
||||||
|
|
||||||
|
|
||||||
|
for url in urls:
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.text()
|
||||||
|
parsed_data = get_jsonparsed_data(data)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if isinstance(parsed_data, list) and "quote" in url:
|
||||||
|
fundamental_data['quote'] = ujson.dumps(parsed_data)
|
||||||
|
data_dict = {
|
||||||
|
'price': parsed_data[0]['price'],
|
||||||
|
'changesPercentage': round(parsed_data[0]['changesPercentage'],2),
|
||||||
|
'marketCap': parsed_data[0]['marketCap'],
|
||||||
|
'previousClose': parsed_data[0]['previousClose'],
|
||||||
|
}
|
||||||
|
fundamental_data.update(data_dict)
|
||||||
|
|
||||||
|
elif isinstance(parsed_data, list) and "crypto_news" in url:
|
||||||
|
fundamental_data['crypto_news'] = ujson.dumps(parsed_data)
|
||||||
|
elif "coingecko" in url:
|
||||||
|
headers = {
|
||||||
|
"accept": "application/json",
|
||||||
|
"x-cg-demo-api-key": coingecko_api_key
|
||||||
|
}
|
||||||
|
response = requests.get(url, headers=headers)
|
||||||
|
|
||||||
|
gecko_data = ujson.loads(response.text)[0]
|
||||||
|
gecko_data['description'] = get_description(symbol)
|
||||||
|
gecko_data['website'] = get_website(symbol)
|
||||||
|
fundamental_data['profile'] = ujson.dumps(gecko_data)
|
||||||
|
|
||||||
|
max_supply = gecko_data.get('max_supply')
|
||||||
|
if max_supply is None:
|
||||||
|
max_supply = "Uncapped"
|
||||||
|
|
||||||
|
data_dict = {
|
||||||
|
'circulatingSupply': gecko_data['circulating_supply'],
|
||||||
|
'maxSupply': max_supply,
|
||||||
|
}
|
||||||
|
fundamental_data.update(data_dict)
|
||||||
|
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# Check if columns already exist in the table
|
||||||
|
self.cursor.execute("PRAGMA table_info(cryptos)")
|
||||||
|
columns = {column[1]: column[2] for column in self.cursor.fetchall()}
|
||||||
|
|
||||||
|
# Update column definitions with keys from fundamental_data
|
||||||
|
column_definitions = {
|
||||||
|
key: (self.get_column_type(fundamental_data.get(key, None)), self.remove_null(fundamental_data.get(key, None)))
|
||||||
|
for key in fundamental_data
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
for column, (column_type, value) in column_definitions.items():
|
||||||
|
if column not in columns and column_type:
|
||||||
|
self.cursor.execute(f"ALTER TABLE cryptos ADD COLUMN {column} {column_type}")
|
||||||
|
|
||||||
|
self.cursor.execute(f"UPDATE cryptos SET {column} = ? WHERE symbol = ?", (value, symbol))
|
||||||
|
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch fundamental data for symbol {symbol}: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
async def save_cryptos(self, cryptos):
|
||||||
|
symbols = []
|
||||||
|
names = []
|
||||||
|
ticker_data = []
|
||||||
|
|
||||||
|
for item in cryptos:
|
||||||
|
symbol = item.get('symbol', '')
|
||||||
|
name = item.get('name', '').replace('USDt','').replace('USD','')
|
||||||
|
exchange = item.get('exchangeShortName', '')
|
||||||
|
ticker_type = 'crypto'
|
||||||
|
|
||||||
|
if name and '.' not in symbol and not re.search(r'\d', symbol):
|
||||||
|
symbols.append(symbol)
|
||||||
|
names.append(name)
|
||||||
|
ticker_data.append((symbol, name, exchange, ticker_type))
|
||||||
|
|
||||||
|
|
||||||
|
self.cursor.execute("BEGIN TRANSACTION") # Begin a transaction
|
||||||
|
|
||||||
|
for data in ticker_data:
|
||||||
|
symbol, name, exchange, ticker_type = data
|
||||||
|
|
||||||
|
self.cursor.execute("""
|
||||||
|
INSERT OR IGNORE INTO cryptos (symbol, name, exchange, type)
|
||||||
|
VALUES (?, ?, ?, ?)
|
||||||
|
""", (symbol, name, exchange, ticker_type))
|
||||||
|
self.cursor.execute("""
|
||||||
|
UPDATE cryptos SET name = ?, exchange = ?, type= ?
|
||||||
|
WHERE symbol = ?
|
||||||
|
""", (name, exchange, ticker_type, symbol))
|
||||||
|
|
||||||
|
self.cursor.execute("COMMIT") # Commit the transaction
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Save OHLC data for each ticker using aiohttp
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
tasks = []
|
||||||
|
i = 0
|
||||||
|
for item in tqdm(ticker_data):
|
||||||
|
symbol, name, exchange, ticker_type = item
|
||||||
|
symbol = symbol.replace("-", "")
|
||||||
|
tasks.append(self.save_ohlc_data(session, symbol))
|
||||||
|
tasks.append(self.save_fundamental_data(session, symbol))
|
||||||
|
|
||||||
|
i += 1
|
||||||
|
if i % 150 == 0:
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
tasks = []
|
||||||
|
print('sleeping mode: ', i)
|
||||||
|
await asyncio.sleep(60) # Pause for 60 seconds
|
||||||
|
|
||||||
|
#tasks.append(self.save_ohlc_data(session, "%5EGSPC"))
|
||||||
|
|
||||||
|
if tasks:
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
|
||||||
|
def _create_ticker_table(self, symbol):
|
||||||
|
#cleaned_symbol = re.sub(r'[^a-zA-Z0-9_]', '_', symbol)
|
||||||
|
# Check if table exists
|
||||||
|
self.cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{symbol}'")
|
||||||
|
table_exists = self.cursor.fetchone() is not None
|
||||||
|
|
||||||
|
if not table_exists:
|
||||||
|
query = f"""
|
||||||
|
CREATE TABLE '{cleaned_symbol}' (
|
||||||
|
date TEXT,
|
||||||
|
open FLOAT,
|
||||||
|
high FLOAT,
|
||||||
|
low FLOAT,
|
||||||
|
close FLOAT,
|
||||||
|
volume INT,
|
||||||
|
change_percent FLOAT,
|
||||||
|
);
|
||||||
|
"""
|
||||||
|
self.cursor.execute(query)
|
||||||
|
|
||||||
|
async def save_ohlc_data(self, session, symbol):
|
||||||
|
try:
|
||||||
|
#self._create_ticker_table(symbol) # Create table for the symbol
|
||||||
|
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/historical-price-full/{symbol}?serietype=bar&from={start_date}&to={end_date}&apikey={api_key}"
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.text()
|
||||||
|
|
||||||
|
ohlc_data = get_jsonparsed_data(data)
|
||||||
|
if 'historical' in ohlc_data:
|
||||||
|
ohlc_values = [(item['date'], item['open'], item['high'], item['low'], item['close'], item['volume'], item['changePercent']) for item in ohlc_data['historical'][::-1]]
|
||||||
|
|
||||||
|
df = pd.DataFrame(ohlc_values, columns=['date', 'open', 'high', 'low', 'close', 'volume', 'change_percent'])
|
||||||
|
|
||||||
|
# Perform bulk insert
|
||||||
|
df.to_sql(symbol, self.conn, if_exists='append', index=False)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch OHLC data for symbol {symbol}: {str(e)}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to create table for symbol {symbol}: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/symbol/available-cryptocurrencies?apikey={api_key}"
|
||||||
|
|
||||||
|
async def fetch_tickers():
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.text()
|
||||||
|
return get_jsonparsed_data(data)
|
||||||
|
|
||||||
|
|
||||||
|
db = CryptoDatabase('backup_db/crypto.db')
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
all_tickers = [item for item in loop.run_until_complete(fetch_tickers()) if item['symbol'] in ['DASHUSD','ETCUSD','LINKUSD','USDCUSD','SHIBUSD','BNBUSD','BTCUSD', 'ETHUSD', 'LTCUSD', 'SOLUSD','DOGEUSD','XRPUSD','XMRUSD','USDTUSD','ADAUSD','AVAXUSD','BCHUSD','TRXUSD','DOTUSD','ALGOUSD']]
|
||||||
|
|
||||||
|
loop.run_until_complete(db.save_cryptos(all_tickers))
|
||||||
|
db.close_connection()
|
||||||
398
app/create_etf_db.py
Normal file
398
app/create_etf_db.py
Normal file
@ -0,0 +1,398 @@
|
|||||||
|
import aiohttp
|
||||||
|
import asyncio
|
||||||
|
import sqlite3
|
||||||
|
import certifi
|
||||||
|
import json
|
||||||
|
import ujson
|
||||||
|
import pandas as pd
|
||||||
|
import os
|
||||||
|
from tqdm import tqdm
|
||||||
|
import re
|
||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
|
||||||
|
|
||||||
|
# Filter out the specific RuntimeWarning
|
||||||
|
warnings.filterwarnings("ignore", category=RuntimeWarning, message="invalid value encountered in scalar divide")
|
||||||
|
|
||||||
|
|
||||||
|
start_date = datetime(2015, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if os.path.exists("backup_db/etf.db"):
|
||||||
|
os.remove('backup_db/etf.db')
|
||||||
|
|
||||||
|
|
||||||
|
def get_jsonparsed_data(data):
|
||||||
|
try:
|
||||||
|
return json.loads(data)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def get_etf_provider(etf_name):
|
||||||
|
provider_mapping = {
|
||||||
|
'first-trust': {'FT', 'First Trust'},
|
||||||
|
'blackrock': {'IShares', 'iShares', 'ishares', 'Ishares'},
|
||||||
|
'vanguard': {'Vanguard'},
|
||||||
|
'state-street': {'SPDR'},
|
||||||
|
'invesco': {'Invesco'},
|
||||||
|
'charles-schwab': {'Schwab'},
|
||||||
|
'jpmorgan-chase': {'JPMorgan Chase', 'J.P.', 'JP Morgan'},
|
||||||
|
'dimensional': {'Dimensional'},
|
||||||
|
'wisdom-tree': {'Wisdom Tree', 'WisdomTree', 'Wisdom'},
|
||||||
|
'proshares': {'ProShares', 'Proshares'},
|
||||||
|
'vaneck': {'VanEck'},
|
||||||
|
'fidelity': {'Fidelity'},
|
||||||
|
'global-x': {'Global X'},
|
||||||
|
'american-century-investments': {'Avantis', 'American Century'},
|
||||||
|
'direxion': {'Direxion'},
|
||||||
|
'goldman-sachs': {'Goldman Sachs'},
|
||||||
|
'pimco': {'PIMCO'},
|
||||||
|
'flexshares': {'FlexShares'},
|
||||||
|
'xtrackers': {'Xtrackers'},
|
||||||
|
'capital-group': {'Capital Group'},
|
||||||
|
'innovator': {'Innovator'},
|
||||||
|
'ark': {'ARK', '3D Printing'},
|
||||||
|
'franklin-templeton': {'Franklin', 'Western', 'Royce', 'ClearBridge', 'Martin Currie'},
|
||||||
|
'janus-henderson': {'Janus'},
|
||||||
|
'ssc': {'Alerian', 'ALPS', 'Alps', 'Riverfront', 'Level Four'},
|
||||||
|
'sprott': {'Sprott'},
|
||||||
|
'nuveen': {'Nuveen'},
|
||||||
|
'victory-shares': {'VictoryShares'},
|
||||||
|
'abrdn': {'abrdn'},
|
||||||
|
'krane-shares': {'KraneShares'},
|
||||||
|
'pgim': {'PGIM'},
|
||||||
|
'john-hancock': {'John Hancock'},
|
||||||
|
'alpha-architect': {'EA Bridgeway', 'Strive U.S.', 'Freedom 100', 'Alpha Architect', 'Strive', 'Burney', 'Euclidean', 'Gadsden', 'Argent', 'Guru', 'Sparkline', 'Relative Sentiment', 'Altrius Global'},
|
||||||
|
'bny-mellon': {'BNY'},
|
||||||
|
'amplify-investments': {'Amplify'},
|
||||||
|
'the-hartford': {'Hartford'},
|
||||||
|
'index-iq': {'IQ', 'IndexIQ'},
|
||||||
|
'exchange-traded-concepts': {'ROBO', 'ETC', 'EMQQ', 'Cabana', 'Saba', 'Bitwise', 'NETLease', 'Hull', 'Vesper', 'Corbett', 'FMQQ', 'India Internet', 'QRAFT', 'Capital Link', 'Armor US', 'ETFB Green', 'Nifty India', 'Blue Horizon', 'LG Qraft', 'KPOP', 'Optica Rare', 'Akros', 'BTD Capital'},
|
||||||
|
'fm-investments': {'US Treasury', 'F/m'},
|
||||||
|
'principal': {'Principal'},
|
||||||
|
'etf-mg': {'ETFMG', 'Etho Climate', 'AI Powered Equity', 'Bluestar Israel', 'Breakwave Dry', 'Wedbush'},
|
||||||
|
'simplify': {'Simplify'},
|
||||||
|
'marygold': {'USCF', 'United States'},
|
||||||
|
't-rowe-price': {'T.Rowe Price'},
|
||||||
|
'bondbloxx': {'BondBloxx'},
|
||||||
|
'columbia-threadneedle': {'Columbia'},
|
||||||
|
'tidal': {'RPAR', 'Gotham', 'Adasina', 'UPAR', 'Blueprint Chesapeake', 'Nicholas Fixed', 'FolioBeyond', 'God Bless America', 'Zega Buy', 'Leatherback', 'SonicShares', 'Aztian', 'Unlimited HFND', 'Return Stacked', 'Meet Kevin', 'Sound Enhanced', 'Carbon Collective', 'Pinnacle Focused', 'Robinson Alternative', 'Ionic Inflation', 'ATAC', 'CNIC', 'REIT', 'Newday Ocean'},
|
||||||
|
'cambria': {'Cambria'},
|
||||||
|
'main-management': {'Main'},
|
||||||
|
'allianz': {'AllianzIM'},
|
||||||
|
'putnam': {'Putnam'},
|
||||||
|
'aptus-capital-advisors': {'Aptus'},
|
||||||
|
'yieldmax': {'YieldMax'},
|
||||||
|
'graniteshares': {'GraniteShares'},
|
||||||
|
'us-global-investors': {'U.S. Global'},
|
||||||
|
'the-motley-fool': {'Motley Fool'},
|
||||||
|
'inspire': {'Inspire'},
|
||||||
|
'defiance': {'Defiance'},
|
||||||
|
'harbor': {'Harbor'},
|
||||||
|
'advisorshares': {'AdvisorShares'},
|
||||||
|
'virtus-investment-partners': {'Virtus'},
|
||||||
|
'strategy-shares': {'Strategy Shares'},
|
||||||
|
'redwood': {'LeaderShares'},
|
||||||
|
'morgan-stanley': {'Calvert', 'Morgan Stanley'},
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
for provider, keywords in provider_mapping.items():
|
||||||
|
if any(keyword in etf_name for keyword in keywords):
|
||||||
|
return provider
|
||||||
|
|
||||||
|
return 'other'
|
||||||
|
|
||||||
|
|
||||||
|
class ETFDatabase:
|
||||||
|
def __init__(self, db_path):
|
||||||
|
self.db_path = db_path
|
||||||
|
self.conn = sqlite3.connect(db_path)
|
||||||
|
self.cursor = self.conn.cursor()
|
||||||
|
self.cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
self.conn.commit()
|
||||||
|
self._create_table()
|
||||||
|
|
||||||
|
def close_connection(self):
|
||||||
|
self.cursor.close()
|
||||||
|
self.conn.close()
|
||||||
|
|
||||||
|
def _create_table(self):
|
||||||
|
self.cursor.execute("""
|
||||||
|
CREATE TABLE IF NOT EXISTS etfs (
|
||||||
|
symbol TEXT PRIMARY KEY,
|
||||||
|
name TEXT,
|
||||||
|
exchange TEXT,
|
||||||
|
exchangeShortName TEXT,
|
||||||
|
type TEXT
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
|
||||||
|
def get_column_type(self, value):
|
||||||
|
column_type = ""
|
||||||
|
|
||||||
|
if isinstance(value, str):
|
||||||
|
column_type = "TEXT"
|
||||||
|
elif isinstance(value, int):
|
||||||
|
column_type = "INTEGER"
|
||||||
|
elif isinstance(value, float):
|
||||||
|
column_type = "REAL"
|
||||||
|
else:
|
||||||
|
# Handle other data types or customize based on your specific needs
|
||||||
|
column_type = "TEXT"
|
||||||
|
|
||||||
|
return column_type
|
||||||
|
|
||||||
|
def remove_null(self, value):
|
||||||
|
if isinstance(value, str) and value == None:
|
||||||
|
value = 'n/a'
|
||||||
|
elif isinstance(value, int) and value == None:
|
||||||
|
value = 0
|
||||||
|
elif isinstance(value, float) and value == None:
|
||||||
|
value = 0
|
||||||
|
else:
|
||||||
|
# Handle other data types or customize based on your specific needs
|
||||||
|
pass
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def delete_data_if_condition(self, condition, symbol):
|
||||||
|
# Get a list of all tables in the database
|
||||||
|
self.cursor.execute("SELECT name FROM sqlite_master WHERE type='table'")
|
||||||
|
tables = [table[0] for table in self.cursor.fetchall()]
|
||||||
|
|
||||||
|
for table in tables:
|
||||||
|
# Check if the table name is not 'etfs' (the main table)
|
||||||
|
if table != 'etfs':
|
||||||
|
# Construct a DELETE query to delete data from the table based on the condition
|
||||||
|
delete_query = f"DELETE FROM {table} WHERE {condition}"
|
||||||
|
|
||||||
|
# Execute the DELETE query with the symbol as a parameter
|
||||||
|
self.cursor.execute(delete_query, (symbol,))
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
async def save_fundamental_data(self, session, symbol):
|
||||||
|
try:
|
||||||
|
urls = [
|
||||||
|
f"https://financialmodelingprep.com/api/v4/etf-info?symbol={symbol}&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/etf-holder/{symbol}?apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/etf-country-weightings/{symbol}?apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/quote/{symbol}?apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/historical-price-full/stock_dividend/{symbol}?apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/stock_news?tickers={symbol}&limit=50&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v4/institutional-ownership/institutional-holders/symbol-ownership-percent?date=2023-09-30&symbol={symbol}&page=0&apikey={api_key}",
|
||||||
|
]
|
||||||
|
|
||||||
|
fundamental_data = {}
|
||||||
|
|
||||||
|
|
||||||
|
for url in urls:
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.text()
|
||||||
|
parsed_data = get_jsonparsed_data(data)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if isinstance(parsed_data, list) and "etf-info" in url:
|
||||||
|
fundamental_data['profile'] = ujson.dumps(parsed_data)
|
||||||
|
etf_name = parsed_data[0]['name']
|
||||||
|
etf_provider = get_etf_provider(etf_name)
|
||||||
|
|
||||||
|
data_dict = {
|
||||||
|
'inceptionDate': parsed_data[0]['inceptionDate'],
|
||||||
|
'etfProvider': etf_provider,
|
||||||
|
'expenseRatio': round(parsed_data[0]['expenseRatio'],2),
|
||||||
|
'totalAssets': parsed_data[0]['aum'],
|
||||||
|
}
|
||||||
|
fundamental_data.update(data_dict)
|
||||||
|
|
||||||
|
elif isinstance(parsed_data, list) and "quote" in url:
|
||||||
|
fundamental_data['quote'] = ujson.dumps(parsed_data)
|
||||||
|
data_dict = {
|
||||||
|
'price': parsed_data[0]['price'],
|
||||||
|
'changesPercentage': round(parsed_data[0]['changesPercentage'],2),
|
||||||
|
'marketCap': parsed_data[0]['marketCap'],
|
||||||
|
'eps': round(parsed_data[0]['eps'],2),
|
||||||
|
'pe': round(parsed_data[0]['pe'],2),
|
||||||
|
'previousClose': parsed_data[0]['previousClose'],
|
||||||
|
}
|
||||||
|
fundamental_data.update(data_dict)
|
||||||
|
|
||||||
|
elif isinstance(parsed_data, list) and "stock_news" in url:
|
||||||
|
fundamental_data['etf_news'] = ujson.dumps(parsed_data)
|
||||||
|
elif isinstance(parsed_data, list) and "etf-holder" in url:
|
||||||
|
fundamental_data['holding'] = ujson.dumps(parsed_data)
|
||||||
|
data_dict = {'numberOfHoldings': len(json.loads(fundamental_data['holding']))}
|
||||||
|
fundamental_data.update(data_dict)
|
||||||
|
elif isinstance(parsed_data, list) and "etf-country-weightings" in url:
|
||||||
|
fundamental_data['country_weightings'] = ujson.dumps(parsed_data)
|
||||||
|
|
||||||
|
elif "stock_dividend" in url:
|
||||||
|
fundamental_data['etf_dividend'] = ujson.dumps(parsed_data)
|
||||||
|
|
||||||
|
elif "institutional-ownership/institutional-holders" in url:
|
||||||
|
fundamental_data['shareholders'] = ujson.dumps(parsed_data)
|
||||||
|
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# Check if columns already exist in the table
|
||||||
|
self.cursor.execute("PRAGMA table_info(etfs)")
|
||||||
|
columns = {column[1]: column[2] for column in self.cursor.fetchall()}
|
||||||
|
|
||||||
|
# Update column definitions with keys from fundamental_data
|
||||||
|
column_definitions = {
|
||||||
|
key: (self.get_column_type(fundamental_data.get(key, None)), self.remove_null(fundamental_data.get(key, None)))
|
||||||
|
for key in fundamental_data
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(json.loads(fundamental_data['holding'])) == 0:
|
||||||
|
self.cursor.execute("DELETE FROM etfs WHERE symbol = ?", (symbol,))
|
||||||
|
#self.cursor.execute("DELETE FROM symbol WHERE symbol = ?", (symbol,))
|
||||||
|
self.conn.commit()
|
||||||
|
print(f"Delete {symbol}")
|
||||||
|
return
|
||||||
|
|
||||||
|
for column, (column_type, value) in column_definitions.items():
|
||||||
|
if column not in columns and column_type:
|
||||||
|
self.cursor.execute(f"ALTER TABLE etfs ADD COLUMN {column} {column_type}")
|
||||||
|
|
||||||
|
self.cursor.execute(f"UPDATE etfs SET {column} = ? WHERE symbol = ?", (value, symbol))
|
||||||
|
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch fundamental data for symbol {symbol}: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
async def save_etfs(self, etfs):
|
||||||
|
symbols = []
|
||||||
|
names = []
|
||||||
|
ticker_data = []
|
||||||
|
|
||||||
|
for etf in etfs:
|
||||||
|
exchange_short_name = etf.get('exchangeShortName', '')
|
||||||
|
ticker_type = etf.get('type', '')
|
||||||
|
symbol = etf.get('symbol', '')
|
||||||
|
name = etf.get('name', '')
|
||||||
|
exchange = etf.get('exchange', '')
|
||||||
|
|
||||||
|
if (name and '.' not in symbol and not re.search(r'\d', symbol)) or symbol == 'QDVE.DE':
|
||||||
|
symbols.append(symbol)
|
||||||
|
names.append(name)
|
||||||
|
ticker_data.append((symbol, name, exchange, exchange_short_name, ticker_type))
|
||||||
|
|
||||||
|
|
||||||
|
self.cursor.execute("BEGIN TRANSACTION") # Begin a transaction
|
||||||
|
|
||||||
|
for data in ticker_data:
|
||||||
|
symbol, name, exchange, exchange_short_name, ticker_type = data
|
||||||
|
self.cursor.execute("""
|
||||||
|
INSERT OR IGNORE INTO etfs (symbol, name, exchange, exchangeShortName, type)
|
||||||
|
VALUES (?, ?, ?, ?, ?)
|
||||||
|
""", (symbol, name, exchange, exchange_short_name, ticker_type))
|
||||||
|
self.cursor.execute("""
|
||||||
|
UPDATE etfs SET name = ?, exchange = ?, exchangeShortName = ?, type = ?
|
||||||
|
WHERE symbol = ?
|
||||||
|
""", (name, exchange, exchange_short_name, ticker_type, symbol))
|
||||||
|
|
||||||
|
self.cursor.execute("COMMIT") # Commit the transaction
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Save OHLC data for each ticker using aiohttp
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
tasks = []
|
||||||
|
i = 0
|
||||||
|
for etf_data in tqdm(ticker_data):
|
||||||
|
symbol, name, exchange, exchange_short_name, ticker_type = etf_data
|
||||||
|
symbol = symbol.replace("-", "")
|
||||||
|
tasks.append(self.save_ohlc_data(session, symbol))
|
||||||
|
tasks.append(self.save_fundamental_data(session, symbol))
|
||||||
|
|
||||||
|
i += 1
|
||||||
|
if i % 150 == 0:
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
tasks = []
|
||||||
|
print('sleeping mode: ', i)
|
||||||
|
await asyncio.sleep(60) # Pause for 60 seconds
|
||||||
|
|
||||||
|
#tasks.append(self.save_ohlc_data(session, "%5EGSPC"))
|
||||||
|
|
||||||
|
if tasks:
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
|
||||||
|
def _create_ticker_table(self, symbol):
|
||||||
|
#cleaned_symbol = re.sub(r'[^a-zA-Z0-9_]', '_', symbol)
|
||||||
|
# Check if table exists
|
||||||
|
self.cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{symbol}'")
|
||||||
|
table_exists = self.cursor.fetchone() is not None
|
||||||
|
|
||||||
|
if not table_exists:
|
||||||
|
query = f"""
|
||||||
|
CREATE TABLE '{cleaned_symbol}' (
|
||||||
|
date TEXT,
|
||||||
|
open FLOAT,
|
||||||
|
high FLOAT,
|
||||||
|
low FLOAT,
|
||||||
|
close FLOAT,
|
||||||
|
volume INT,
|
||||||
|
change_percent FLOAT,
|
||||||
|
);
|
||||||
|
"""
|
||||||
|
self.cursor.execute(query)
|
||||||
|
|
||||||
|
async def save_ohlc_data(self, session, symbol):
|
||||||
|
try:
|
||||||
|
#self._create_ticker_table(symbol) # Create table for the symbol
|
||||||
|
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/historical-price-full/{symbol}?serietype=bar&from={start_date}&to={end_date}&apikey={api_key}"
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.text()
|
||||||
|
|
||||||
|
ohlc_data = get_jsonparsed_data(data)
|
||||||
|
if 'historical' in ohlc_data:
|
||||||
|
ohlc_values = [(item['date'], item['open'], item['high'], item['low'], item['close'], item['volume'], item['changePercent']) for item in ohlc_data['historical'][::-1]]
|
||||||
|
|
||||||
|
df = pd.DataFrame(ohlc_values, columns=['date', 'open', 'high', 'low', 'close', 'volume', 'change_percent'])
|
||||||
|
|
||||||
|
# Perform bulk insert
|
||||||
|
df.to_sql(symbol, self.conn, if_exists='append', index=False)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch OHLC data for symbol {symbol}: {str(e)}")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to create table for symbol {symbol}: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/etf/list?apikey={api_key}"
|
||||||
|
|
||||||
|
|
||||||
|
async def fetch_tickers():
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.text()
|
||||||
|
return get_jsonparsed_data(data)
|
||||||
|
|
||||||
|
|
||||||
|
db = ETFDatabase('backup_db/etf.db')
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
all_tickers = loop.run_until_complete(fetch_tickers())
|
||||||
|
loop.run_until_complete(db.save_etfs(all_tickers))
|
||||||
|
db.close_connection()
|
||||||
261
app/create_institute_db.py
Normal file
261
app/create_institute_db.py
Normal file
@ -0,0 +1,261 @@
|
|||||||
|
import aiohttp
|
||||||
|
import asyncio
|
||||||
|
import sqlite3
|
||||||
|
import certifi
|
||||||
|
import json
|
||||||
|
import pandas as pd
|
||||||
|
from tqdm import tqdm
|
||||||
|
import re
|
||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import warnings
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
|
||||||
|
# Filter out the specific RuntimeWarning
|
||||||
|
warnings.filterwarnings("ignore", category=RuntimeWarning, message="invalid value encountered in scalar divide")
|
||||||
|
|
||||||
|
conn = sqlite3.connect('stocks.db')
|
||||||
|
cursor = conn.cursor()
|
||||||
|
|
||||||
|
# Execute the SQL query
|
||||||
|
cursor.execute("SELECT symbol FROM stocks")
|
||||||
|
|
||||||
|
# Fetch all the results into a list
|
||||||
|
symbol_list = [row[0] for row in cursor.fetchall()]
|
||||||
|
conn.close()
|
||||||
|
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
quarter_date = '2024-3-31'
|
||||||
|
|
||||||
|
|
||||||
|
if os.path.exists("backup_db/institute.db"):
|
||||||
|
os.remove('backup_db/institute.db')
|
||||||
|
|
||||||
|
|
||||||
|
def get_jsonparsed_data(data):
|
||||||
|
try:
|
||||||
|
return json.loads(data)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class InstituteDatabase:
|
||||||
|
def __init__(self, db_path):
|
||||||
|
self.db_path = db_path
|
||||||
|
self.conn = sqlite3.connect(db_path)
|
||||||
|
self.cursor = self.conn.cursor()
|
||||||
|
self.cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
self.conn.commit()
|
||||||
|
self._create_table()
|
||||||
|
|
||||||
|
def close_connection(self):
|
||||||
|
self.cursor.close()
|
||||||
|
self.conn.close()
|
||||||
|
|
||||||
|
def _create_table(self):
|
||||||
|
self.cursor.execute("""
|
||||||
|
CREATE TABLE IF NOT EXISTS institutes (
|
||||||
|
cik TEXT PRIMARY KEY,
|
||||||
|
name TEXT
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def get_column_type(self, value):
|
||||||
|
column_type = ""
|
||||||
|
|
||||||
|
if isinstance(value, str):
|
||||||
|
column_type = "TEXT"
|
||||||
|
elif isinstance(value, int):
|
||||||
|
column_type = "INTEGER"
|
||||||
|
elif isinstance(value, float):
|
||||||
|
column_type = "REAL"
|
||||||
|
else:
|
||||||
|
# Handle other data types or customize based on your specific needs
|
||||||
|
column_type = "TEXT"
|
||||||
|
|
||||||
|
return column_type
|
||||||
|
|
||||||
|
def remove_null(self, value):
|
||||||
|
if isinstance(value, str) and value == None:
|
||||||
|
value = 'n/a'
|
||||||
|
elif isinstance(value, int) and value == None:
|
||||||
|
value = 0
|
||||||
|
elif isinstance(value, float) and value == None:
|
||||||
|
value = 0
|
||||||
|
else:
|
||||||
|
# Handle other data types or customize based on your specific needs
|
||||||
|
pass
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
async def save_portfolio_data(self, session, cik):
|
||||||
|
try:
|
||||||
|
urls = [
|
||||||
|
f"https://financialmodelingprep.com/api/v4/institutional-ownership/industry/portfolio-holdings-summary?cik={cik}&date={quarter_date}&page=0&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v4/institutional-ownership/portfolio-holdings?cik={cik}&date={quarter_date}&page=0&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v4/institutional-ownership/portfolio-holdings-summary?cik={cik}&date={quarter_date}&page=0&apikey={api_key}"
|
||||||
|
]
|
||||||
|
|
||||||
|
portfolio_data = {}
|
||||||
|
|
||||||
|
for url in urls:
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.text()
|
||||||
|
parsed_data = get_jsonparsed_data(data)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if isinstance(parsed_data, list) and "industry/portfolio-holdings-summary" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
portfolio_data['industry'] = json.dumps(parsed_data)
|
||||||
|
if isinstance(parsed_data, list) and "https://financialmodelingprep.com/api/v4/institutional-ownership/portfolio-holdings?cik=" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
|
||||||
|
parsed_data = [item for item in parsed_data if 'symbol' in item and item['symbol'] is not None and item['symbol'] in symbol_list] #symbol must be included in the database
|
||||||
|
portfolio_data['holdings'] = json.dumps(parsed_data)
|
||||||
|
|
||||||
|
|
||||||
|
number_of_stocks = len(parsed_data)
|
||||||
|
total_market_value = sum(item['marketValue'] for item in parsed_data)
|
||||||
|
avg_performance_percentage = sum(item['performancePercentage'] for item in parsed_data) / len(parsed_data)
|
||||||
|
|
||||||
|
performance_percentages = [item.get("performancePercentage", 0) for item in parsed_data]
|
||||||
|
positive_performance_count = sum(1 for percentage in performance_percentages if percentage > 0)
|
||||||
|
win_rate = round(positive_performance_count / len(performance_percentages) * 100,2)
|
||||||
|
data_dict = {
|
||||||
|
'winRate': win_rate,
|
||||||
|
'numberOfStocks': number_of_stocks,
|
||||||
|
'marketValue': total_market_value,
|
||||||
|
'avgPerformancePercentage': avg_performance_percentage,
|
||||||
|
}
|
||||||
|
|
||||||
|
portfolio_data.update(data_dict)
|
||||||
|
|
||||||
|
elif isinstance(parsed_data, list) and "https://financialmodelingprep.com/api/v4/institutional-ownership/portfolio-holdings-summary" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
data_dict = {
|
||||||
|
#'numberOfStocks': parsed_data[0]['portfolioSize'],
|
||||||
|
#'marketValue': parsed_data[0]['marketValue'],
|
||||||
|
'averageHoldingPeriod': parsed_data[0]['averageHoldingPeriod'],
|
||||||
|
'turnover': parsed_data[0]['turnover'],
|
||||||
|
#'performancePercentage': parsed_data[0]['performancePercentage']
|
||||||
|
}
|
||||||
|
portfolio_data.update(data_dict)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Check if columns already exist in the table
|
||||||
|
self.cursor.execute("PRAGMA table_info(institutes)")
|
||||||
|
columns = {column[1]: column[2] for column in self.cursor.fetchall()}
|
||||||
|
|
||||||
|
holdings_list = json.loads(portfolio_data['holdings'])
|
||||||
|
|
||||||
|
symbols_to_check = {holding['symbol'] for holding in holdings_list[:3]} # Extract the first two symbols
|
||||||
|
symbols_not_in_list = not any(symbol in symbol_list for symbol in symbols_to_check)
|
||||||
|
|
||||||
|
|
||||||
|
if symbols_not_in_list or 'industry' not in portfolio_data or len(json.loads(portfolio_data['industry'])) == 0:
|
||||||
|
# If 'industry' is not a list, delete the row and return
|
||||||
|
#print(f"Deleting row for cik {cik} because 'industry' is not a list.")
|
||||||
|
self.cursor.execute("DELETE FROM institutes WHERE cik = ?", (cik,))
|
||||||
|
self.conn.commit()
|
||||||
|
return
|
||||||
|
|
||||||
|
# Update column definitions with keys from portfolio_data
|
||||||
|
column_definitions = {
|
||||||
|
key: (self.get_column_type(portfolio_data.get(key, None)), self.remove_null(portfolio_data.get(key, None)))
|
||||||
|
for key in portfolio_data
|
||||||
|
}
|
||||||
|
|
||||||
|
for column, (column_type, value) in column_definitions.items():
|
||||||
|
if column not in columns and column_type:
|
||||||
|
self.cursor.execute(f"ALTER TABLE institutes ADD COLUMN {column} {column_type}")
|
||||||
|
|
||||||
|
self.cursor.execute(f"UPDATE institutes SET {column} = ? WHERE cik = ?", (value, cik))
|
||||||
|
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch portfolio data for cik {cik}: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
async def save_insitute(self, institutes):
|
||||||
|
|
||||||
|
institute_data = []
|
||||||
|
|
||||||
|
for item in institutes:
|
||||||
|
cik = item.get('cik', '')
|
||||||
|
name = item.get('name', '')
|
||||||
|
|
||||||
|
|
||||||
|
institute_data.append((cik, name))
|
||||||
|
|
||||||
|
|
||||||
|
self.cursor.execute("BEGIN TRANSACTION") # Begin a transaction
|
||||||
|
|
||||||
|
for data in institute_data:
|
||||||
|
cik, name = data
|
||||||
|
self.cursor.execute("""
|
||||||
|
INSERT OR IGNORE INTO institutes (cik, name)
|
||||||
|
VALUES (?, ?)
|
||||||
|
""", (cik, name))
|
||||||
|
self.cursor.execute("""
|
||||||
|
UPDATE institutes SET name = ?
|
||||||
|
WHERE cik = ?
|
||||||
|
""", (name, cik))
|
||||||
|
|
||||||
|
self.cursor.execute("COMMIT") # Commit the transaction
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Save OHLC data for each ticker using aiohttp
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
tasks = []
|
||||||
|
i = 0
|
||||||
|
for item in tqdm(institute_data):
|
||||||
|
cik, name = item
|
||||||
|
tasks.append(self.save_portfolio_data(session, cik))
|
||||||
|
|
||||||
|
i += 1
|
||||||
|
if i % 700 == 0:
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
tasks = []
|
||||||
|
print('sleeping mode: ', i)
|
||||||
|
await asyncio.sleep(60) # Pause for 60 seconds
|
||||||
|
|
||||||
|
|
||||||
|
if tasks:
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
|
||||||
|
url = f"https://financialmodelingprep.com/api/v4/institutional-ownership/list?apikey={api_key}"
|
||||||
|
|
||||||
|
|
||||||
|
async def fetch_tickers():
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.text()
|
||||||
|
return get_jsonparsed_data(data)
|
||||||
|
|
||||||
|
|
||||||
|
db = InstituteDatabase('backup_db/institute.db')
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
all_tickers = loop.run_until_complete(fetch_tickers())
|
||||||
|
loop.run_until_complete(db.save_insitute(all_tickers))
|
||||||
|
db.close_connection()
|
||||||
517
app/create_stock_db.py
Normal file
517
app/create_stock_db.py
Normal file
@ -0,0 +1,517 @@
|
|||||||
|
import aiohttp
|
||||||
|
import asyncio
|
||||||
|
import sqlite3
|
||||||
|
import certifi
|
||||||
|
import json
|
||||||
|
import ujson
|
||||||
|
import pandas as pd
|
||||||
|
import os
|
||||||
|
from tqdm import tqdm
|
||||||
|
import re
|
||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime
|
||||||
|
from ta.utils import *
|
||||||
|
from ta.volatility import *
|
||||||
|
from ta.momentum import *
|
||||||
|
from ta.trend import *
|
||||||
|
from ta.volume import *
|
||||||
|
import subprocess
|
||||||
|
import time
|
||||||
|
import warnings
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
|
||||||
|
# Filter out the specific RuntimeWarning
|
||||||
|
warnings.filterwarnings("ignore", category=RuntimeWarning, message="invalid value encountered in scalar divide")
|
||||||
|
|
||||||
|
|
||||||
|
start_date = datetime(2015, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if os.path.exists("backup_db/stocks.db"):
|
||||||
|
os.remove('backup_db/stocks.db')
|
||||||
|
|
||||||
|
|
||||||
|
def get_jsonparsed_data(data):
|
||||||
|
try:
|
||||||
|
return json.loads(data)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class StockDatabase:
|
||||||
|
def __init__(self, db_path):
|
||||||
|
self.db_path = db_path
|
||||||
|
self.conn = sqlite3.connect(db_path)
|
||||||
|
self.cursor = self.conn.cursor()
|
||||||
|
self.cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
self.conn.commit()
|
||||||
|
self._create_table()
|
||||||
|
|
||||||
|
def close_connection(self):
|
||||||
|
self.cursor.close()
|
||||||
|
self.conn.close()
|
||||||
|
|
||||||
|
def _create_table(self):
|
||||||
|
self.cursor.execute("""
|
||||||
|
CREATE TABLE IF NOT EXISTS stocks (
|
||||||
|
symbol TEXT PRIMARY KEY,
|
||||||
|
name TEXT,
|
||||||
|
exchange TEXT,
|
||||||
|
exchangeShortName TEXT,
|
||||||
|
type TEXT
|
||||||
|
)
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def get_column_type(self, value):
|
||||||
|
column_type = ""
|
||||||
|
|
||||||
|
if isinstance(value, str):
|
||||||
|
column_type = "TEXT"
|
||||||
|
elif isinstance(value, int):
|
||||||
|
column_type = "INTEGER"
|
||||||
|
elif isinstance(value, float):
|
||||||
|
column_type = "REAL"
|
||||||
|
else:
|
||||||
|
# Handle other data types or customize based on your specific needs
|
||||||
|
column_type = "TEXT"
|
||||||
|
|
||||||
|
return column_type
|
||||||
|
|
||||||
|
def remove_null(self, value):
|
||||||
|
if isinstance(value, str) and value == None:
|
||||||
|
value = 'n/a'
|
||||||
|
elif isinstance(value, int) and value == None:
|
||||||
|
value = 0
|
||||||
|
elif isinstance(value, float) and value == None:
|
||||||
|
value = 0
|
||||||
|
else:
|
||||||
|
# Handle other data types or customize based on your specific needs
|
||||||
|
pass
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
|
||||||
|
async def save_fundamental_data(self, session, symbol):
|
||||||
|
try:
|
||||||
|
urls = [
|
||||||
|
f"https://financialmodelingprep.com/api/v3/profile/{symbol}?apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/quote/{symbol}?apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/income-statement/{symbol}?period=quarter&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/income-statement-growth/{symbol}?period=quarter&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/stock_news?tickers={symbol}&limit=50&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v4/esg-environmental-social-governance-data-ratings?symbol={symbol}&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v4/esg-environmental-social-governance-data?symbol={symbol}&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/historical-price-full/stock_dividend/{symbol}?limit=400&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v4/historical/employee_count?symbol={symbol}&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/balance-sheet-statement/{symbol}?period=quarter&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/balance-sheet-statement-growth/{symbol}?period=quarter&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/cash-flow-statement/{symbol}?period=quarter&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/cash-flow-statement-growth/{symbol}?period=quarter&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/ratios/{symbol}?period=quarter&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/historical-price-full/stock_split/{symbol}?apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v4/stock_peers?symbol={symbol}&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v4/institutional-ownership/institutional-holders/symbol-ownership-percent?date=2023-09-30&symbol={symbol}&page=0&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v4/revenue-product-segmentation?symbol={symbol}&structure=flat&period=annual&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v4/revenue-geographic-segmentation?symbol={symbol}&structure=flat&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v3/analyst-estimates/{symbol}?apikey={api_key}",
|
||||||
|
]
|
||||||
|
|
||||||
|
fundamental_data = {}
|
||||||
|
|
||||||
|
# Check if 'income' and 'income_growth' data already exist for the symbol
|
||||||
|
try:
|
||||||
|
self.cursor.execute("SELECT income, income_growth, balance, balance_growth, cashflow, cashflow_growth, ratios, stock_peers, esg_data, esg_ratings FROM stocks WHERE symbol = ?", (symbol,))
|
||||||
|
existing_data = self.cursor.fetchone()
|
||||||
|
income_exists = existing_data and existing_data[0] is not None
|
||||||
|
income_growth_exists = existing_data and existing_data[1] is not None
|
||||||
|
balance_exists = existing_data and existing_data[0] is not None
|
||||||
|
balance_growth_exists = existing_data and existing_data[1] is not None
|
||||||
|
cashflow_exists = existing_data and existing_data[0] is not None
|
||||||
|
cashflow_growth_exists = existing_data and existing_data[1] is not None
|
||||||
|
ratios_exists = existing_data and existing_data[1] is not None
|
||||||
|
stock_peers_exists = existing_data and existing_data[0] is not None
|
||||||
|
esg_data_exists = existing_data and existing_data[0] is not None
|
||||||
|
esg_ratings_exists = existing_data and existing_data[0] is not None
|
||||||
|
except:
|
||||||
|
income_exists = False
|
||||||
|
income_growth_exists = False
|
||||||
|
balance_exists = False
|
||||||
|
balance_growth_exists = False
|
||||||
|
cashflow_exists = False
|
||||||
|
cashflow_growth_exists = False
|
||||||
|
ratios_exists = False
|
||||||
|
stock_peers_exists = False
|
||||||
|
esg_data_exists = False
|
||||||
|
esg_ratings_exists = False
|
||||||
|
|
||||||
|
for url in urls:
|
||||||
|
|
||||||
|
# Skip the API calls if the data already exists
|
||||||
|
if 'income-statement/' in url and income_exists:
|
||||||
|
print(f"Skipping income-statement for {symbol} as it already exists.")
|
||||||
|
continue
|
||||||
|
elif 'income-statement-growth/' in url and income_growth_exists:
|
||||||
|
print(f"Skipping income-statement-growth for {symbol} as it already exists.")
|
||||||
|
continue
|
||||||
|
elif 'balance-sheet-statement/' in url and balance_exists:
|
||||||
|
print(f"Skipping balance-statement for {symbol} as it already exists.")
|
||||||
|
continue
|
||||||
|
elif 'balance-sheet-statement-growth/' in url and balance_growth_exists:
|
||||||
|
print(f"Skipping balance-statement-growth for {symbol} as it already exists.")
|
||||||
|
continue
|
||||||
|
elif 'cash-flow-statement/' in url and cashflow_exists:
|
||||||
|
print(f"Skipping cashflow-statement for {symbol} as it already exists.")
|
||||||
|
continue
|
||||||
|
elif 'cash-flow-statement-growth/' in url and cashflow_growth_exists:
|
||||||
|
print(f"Skipping cashflow-statement-growth for {symbol} as it already exists.")
|
||||||
|
continue
|
||||||
|
elif '/v3/ratios/' in url and ratios_exists:
|
||||||
|
print(f"Skipping ratios for {symbol} as it already exists.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
elif 'stock_peers/' in url and stock_peers_exists:
|
||||||
|
print(f"Skipping stock_peers for {symbol} as it already exists.")
|
||||||
|
continue
|
||||||
|
elif 'esg-environmental-social-governance-data' in url and esg_data_exists:
|
||||||
|
print(f"Skipping esg_data for {symbol} as it already exists.")
|
||||||
|
continue
|
||||||
|
elif 'esg-environmental-social-governance-data-ratings?' in url and esg_ratings_exists:
|
||||||
|
print(f"Skipping esg_ratings for {symbol} as it already exists.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.text()
|
||||||
|
parsed_data = get_jsonparsed_data(data)
|
||||||
|
|
||||||
|
try:
|
||||||
|
if isinstance(parsed_data, list) and "profile" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['profile'] = ujson.dumps(parsed_data)
|
||||||
|
data_dict = {
|
||||||
|
'beta': parsed_data[0]['beta'],
|
||||||
|
'country': parsed_data[0]['country'],
|
||||||
|
'sector': parsed_data[0]['sector'],
|
||||||
|
'industry': parsed_data[0]['industry'],
|
||||||
|
'discounted_cash_flow': round(parsed_data[0]['dcf'],2),
|
||||||
|
}
|
||||||
|
fundamental_data.update(data_dict)
|
||||||
|
|
||||||
|
elif isinstance(parsed_data, list) and "quote" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['quote'] = ujson.dumps(parsed_data)
|
||||||
|
data_dict = {
|
||||||
|
'price': parsed_data[0]['price'],
|
||||||
|
'changesPercentage': round(parsed_data[0]['changesPercentage'],2),
|
||||||
|
'marketCap': parsed_data[0]['marketCap'],
|
||||||
|
'avgVolume': parsed_data[0]['avgVolume'],
|
||||||
|
'eps': parsed_data[0]['eps'],
|
||||||
|
'pe': parsed_data[0]['pe'],
|
||||||
|
}
|
||||||
|
fundamental_data.update(data_dict)
|
||||||
|
|
||||||
|
elif isinstance(parsed_data, list) and "income-statement/" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['income'] = ujson.dumps(parsed_data)
|
||||||
|
data_dict = {'revenue': parsed_data[0]['revenue'],
|
||||||
|
'netIncome': parsed_data[0]['netIncome'],
|
||||||
|
'grossProfit': parsed_data[0]['grossProfit'],
|
||||||
|
'costOfRevenue':parsed_data[0]['costOfRevenue'],
|
||||||
|
'costAndExpenses':parsed_data[0]['costAndExpenses'],
|
||||||
|
'interestIncome':parsed_data[0]['interestIncome'],
|
||||||
|
'interestExpense':parsed_data[0]['interestExpense'],
|
||||||
|
'researchAndDevelopmentExpenses':parsed_data[0]['researchAndDevelopmentExpenses'],
|
||||||
|
'ebitda':parsed_data[0]['ebitda'],
|
||||||
|
'ebitdaratio':parsed_data[0]['ebitdaratio'],
|
||||||
|
'depreciationAndAmortization':parsed_data[0]['depreciationAndAmortization'],
|
||||||
|
'operatingIncome':parsed_data[0]['operatingIncome'],
|
||||||
|
'operatingExpenses':parsed_data[0]['operatingExpenses']
|
||||||
|
}
|
||||||
|
fundamental_data.update(data_dict)
|
||||||
|
elif isinstance(parsed_data, list) and "/v3/ratios/" in url:
|
||||||
|
fundamental_data['ratios'] = ujson.dumps(parsed_data)
|
||||||
|
data_dict = {'payoutRatio': parsed_data[0]['payoutRatio'],
|
||||||
|
'priceToBookRatio': parsed_data[0]['priceToBookRatio'],
|
||||||
|
'dividendPayoutRatio': parsed_data[0]['dividendPayoutRatio'],
|
||||||
|
'priceToSalesRatio':parsed_data[0]['priceToSalesRatio'],
|
||||||
|
'priceEarningsRatio':parsed_data[0]['priceEarningsRatio'],
|
||||||
|
'priceCashFlowRatio':parsed_data[0]['priceCashFlowRatio'],
|
||||||
|
'priceSalesRatio':parsed_data[0]['priceSalesRatio'],
|
||||||
|
'dividendYield':parsed_data[0]['dividendYield'],
|
||||||
|
'cashFlowToDebtRatio':parsed_data[0]['cashFlowToDebtRatio'],
|
||||||
|
'freeCashFlowPerShare':parsed_data[0]['freeCashFlowPerShare'],
|
||||||
|
'cashPerShare':parsed_data[0]['cashPerShare'],
|
||||||
|
}
|
||||||
|
fundamental_data.update(data_dict)
|
||||||
|
|
||||||
|
elif isinstance(parsed_data, list) and "balance-sheet-statement/" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['balance'] = ujson.dumps(parsed_data)
|
||||||
|
elif isinstance(parsed_data, list) and "cash-flow-statement/" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['cashflow'] = ujson.dumps(parsed_data)
|
||||||
|
|
||||||
|
elif isinstance(parsed_data, list) and "sector-benchmark" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['esg_sector_benchmark'] = ujson.dumps(parsed_data)
|
||||||
|
elif isinstance(parsed_data, list) and "income-statement-growth/" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['income_growth'] = ujson.dumps(parsed_data)
|
||||||
|
data_dict = {'growthRevenue': parsed_data[0]['growthRevenue']*100,
|
||||||
|
'growthNetIncome': parsed_data[0]['growthNetIncome']*100,
|
||||||
|
'growthGrossProfit': parsed_data[0]['growthGrossProfit']*100,
|
||||||
|
'growthCostOfRevenue':parsed_data[0]['growthCostOfRevenue']*100,
|
||||||
|
'growthCostAndExpenses':parsed_data[0]['growthCostAndExpenses']*100,
|
||||||
|
'growthInterestExpense':parsed_data[0]['growthInterestExpense']*100,
|
||||||
|
'growthResearchAndDevelopmentExpenses':parsed_data[0]['growthResearchAndDevelopmentExpenses']*100,
|
||||||
|
'growthEBITDA':parsed_data[0]['growthEBITDA']*100,
|
||||||
|
'growthEBITDARatio':parsed_data[0]['growthEBITDARatio']*100,
|
||||||
|
'growthDepreciationAndAmortization':parsed_data[0]['growthDepreciationAndAmortization']*100,
|
||||||
|
'growthEPS':parsed_data[0]['growthEPS']*100,
|
||||||
|
'growthOperatingIncome':parsed_data[0]['growthOperatingIncome']*100,
|
||||||
|
'growthOperatingExpenses':parsed_data[0]['growthOperatingExpenses']*100
|
||||||
|
}
|
||||||
|
|
||||||
|
fundamental_data.update(data_dict)
|
||||||
|
elif isinstance(parsed_data, list) and "balance-sheet-statement-growth/" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['balance_growth'] = ujson.dumps(parsed_data)
|
||||||
|
elif isinstance(parsed_data, list) and "cash-flow-statement-growth/" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['cashflow_growth'] = ujson.dumps(parsed_data)
|
||||||
|
elif isinstance(parsed_data, list) and "stock_news" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['stock_news'] = ujson.dumps(parsed_data)
|
||||||
|
elif isinstance(parsed_data, list) and "esg-environmental-social-governance-data?" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['esg_data'] = ujson.dumps(parsed_data[0])
|
||||||
|
data_dict = {'ESGScore': parsed_data[0]['ESGScore']}
|
||||||
|
fundamental_data.update(data_dict)
|
||||||
|
|
||||||
|
elif isinstance(parsed_data, list) and "esg-environmental-social-governance-data-ratings?" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['esg_ratings'] = ujson.dumps(parsed_data[0])
|
||||||
|
|
||||||
|
elif "stock_dividend" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['stock_dividend'] = ujson.dumps(parsed_data)
|
||||||
|
elif "employee_count" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['history_employee_count'] = ujson.dumps(parsed_data)
|
||||||
|
elif "stock_split" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['stock_split'] = ujson.dumps(parsed_data['historical'])
|
||||||
|
elif "stock_peers" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['stock_peers'] = ujson.dumps([item for item in parsed_data[0]['peersList'] if item != ""])
|
||||||
|
elif "institutional-ownership/institutional-holders" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['shareholders'] = ujson.dumps(parsed_data)
|
||||||
|
elif "revenue-product-segmentation" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['revenue_product_segmentation'] = ujson.dumps(parsed_data)
|
||||||
|
elif "revenue-geographic-segmentation" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['revenue_geographic_segmentation'] = ujson.dumps(parsed_data)
|
||||||
|
elif "analyst-estimates" in url:
|
||||||
|
# Handle list response, save as JSON object
|
||||||
|
fundamental_data['analyst_estimates'] = ujson.dumps(parsed_data)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
# Check if columns already exist in the table
|
||||||
|
self.cursor.execute("PRAGMA table_info(stocks)")
|
||||||
|
columns = {column[1]: column[2] for column in self.cursor.fetchall()}
|
||||||
|
|
||||||
|
# Update column definitions with keys from fundamental_data
|
||||||
|
column_definitions = {
|
||||||
|
key: (self.get_column_type(fundamental_data.get(key, None)), self.remove_null(fundamental_data.get(key, None)))
|
||||||
|
for key in fundamental_data
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
for column, (column_type, value) in column_definitions.items():
|
||||||
|
if column not in columns and column_type:
|
||||||
|
self.cursor.execute(f"ALTER TABLE stocks ADD COLUMN {column} {column_type}")
|
||||||
|
|
||||||
|
self.cursor.execute(f"UPDATE stocks SET {column} = ? WHERE symbol = ?", (value, symbol))
|
||||||
|
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch fundamental data for symbol {symbol}: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
async def save_stocks(self, stocks):
|
||||||
|
symbols = []
|
||||||
|
names = []
|
||||||
|
ticker_data = []
|
||||||
|
|
||||||
|
for stock in stocks:
|
||||||
|
exchange_short_name = stock.get('exchangeShortName', '')
|
||||||
|
ticker_type = stock.get('type', '')
|
||||||
|
if exchange_short_name in ['XETRA','NYSE', 'NASDAQ','AMEX', 'PNK','EURONEXT'] and ticker_type in ['stock']:
|
||||||
|
symbol = stock.get('symbol', '')
|
||||||
|
if exchange_short_name == 'PNK' and symbol not in ['DRSHF','NTDOY','OTGLF','TCEHY', 'KRKNF','BYDDY','XIACY','NSRGY']:
|
||||||
|
pass
|
||||||
|
elif exchange_short_name == 'EURONEXT' and symbol not in ['ALEUP.PA','ALNEV.PA','ALGAU.PA','ALDRV.PA','ALHYG.PA','ALVMG.PA']:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
name = stock.get('name', '')
|
||||||
|
exchange = stock.get('exchange', '')
|
||||||
|
|
||||||
|
#if name and '-' not in symbol:
|
||||||
|
if name:
|
||||||
|
symbols.append(symbol)
|
||||||
|
names.append(name)
|
||||||
|
|
||||||
|
ticker_data.append((symbol, name, exchange, exchange_short_name, ticker_type))
|
||||||
|
|
||||||
|
|
||||||
|
self.cursor.execute("BEGIN TRANSACTION") # Begin a transaction
|
||||||
|
|
||||||
|
for data in ticker_data:
|
||||||
|
symbol, name, exchange, exchange_short_name, ticker_type = data
|
||||||
|
|
||||||
|
# Check if the symbol already exists
|
||||||
|
self.cursor.execute("SELECT symbol FROM stocks WHERE symbol = ?", (symbol,))
|
||||||
|
exists = self.cursor.fetchone()
|
||||||
|
|
||||||
|
# If it doesn't exist, insert it
|
||||||
|
if not exists:
|
||||||
|
self.cursor.execute("""
|
||||||
|
INSERT INTO stocks (symbol, name, exchange, exchangeShortName, type)
|
||||||
|
VALUES (?, ?, ?, ?, ?)
|
||||||
|
""", (symbol, name, exchange, exchange_short_name, ticker_type))
|
||||||
|
|
||||||
|
# Update the existing row
|
||||||
|
else:
|
||||||
|
self.cursor.execute("""
|
||||||
|
UPDATE stocks SET name = ?, exchange = ?, exchangeShortName = ?, type = ?
|
||||||
|
WHERE symbol = ?
|
||||||
|
""", (name, exchange, exchange_short_name, ticker_type, symbol))
|
||||||
|
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
# Save OHLC data for each ticker using aiohttp
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
tasks = []
|
||||||
|
i = 0
|
||||||
|
for stock_data in tqdm(ticker_data):
|
||||||
|
symbol, name, exchange, exchange_short_name, ticker_type = stock_data
|
||||||
|
#symbol = symbol.replace("-", "") # Remove "-" from symbol
|
||||||
|
tasks.append(self.save_ohlc_data(session, symbol))
|
||||||
|
tasks.append(self.save_fundamental_data(session, symbol))
|
||||||
|
|
||||||
|
i += 1
|
||||||
|
if i % 60 == 0:
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
tasks = []
|
||||||
|
print('sleeping mode: ', i)
|
||||||
|
await asyncio.sleep(60) # Pause for 60 seconds
|
||||||
|
|
||||||
|
|
||||||
|
if tasks:
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
|
||||||
|
def _create_ticker_table(self, symbol):
|
||||||
|
cleaned_symbol = symbol # Ensure this is a safe string to use as a table name
|
||||||
|
self.cursor.execute(f"""
|
||||||
|
CREATE TABLE IF NOT EXISTS '{cleaned_symbol}' (
|
||||||
|
date TEXT UNIQUE,
|
||||||
|
open FLOAT,
|
||||||
|
high FLOAT,
|
||||||
|
low FLOAT,
|
||||||
|
close FLOAT,
|
||||||
|
volume INT,
|
||||||
|
change_percent FLOAT
|
||||||
|
);
|
||||||
|
""")
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
async def save_ohlc_data(self, session, symbol):
|
||||||
|
try:
|
||||||
|
self._create_ticker_table(symbol) # Ensure the table exists
|
||||||
|
|
||||||
|
# Fetch OHLC data from the API
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/historical-price-full/{symbol}?serietype=bar&from={start_date}&to={end_date}&apikey={api_key}"
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.text()
|
||||||
|
|
||||||
|
ohlc_data = get_jsonparsed_data(data)
|
||||||
|
if 'historical' in ohlc_data:
|
||||||
|
historical_data = ohlc_data['historical'][::-1]
|
||||||
|
|
||||||
|
for entry in historical_data:
|
||||||
|
# Prepare the data for each entry
|
||||||
|
date = entry.get('date')
|
||||||
|
open_price = entry.get('open')
|
||||||
|
high = entry.get('high')
|
||||||
|
low = entry.get('low')
|
||||||
|
close = entry.get('close')
|
||||||
|
volume = entry.get('volume')
|
||||||
|
change_percent = entry.get('changePercent')
|
||||||
|
|
||||||
|
# Check if this date's data already exists
|
||||||
|
self.cursor.execute(f"SELECT date FROM '{symbol}' WHERE date = ?", (date,))
|
||||||
|
exists = self.cursor.fetchone()
|
||||||
|
|
||||||
|
# If it doesn't exist, insert the new data
|
||||||
|
if not exists:
|
||||||
|
self.cursor.execute(f"""
|
||||||
|
INSERT INTO '{symbol}' (date, open, high, low, close, volume, change_percent)
|
||||||
|
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||||
|
""", (date, open_price, high, low, close, volume, change_percent))
|
||||||
|
|
||||||
|
# Commit all changes to the database
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch or insert OHLC data for symbol {symbol}: {str(e)}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/available-traded/list?apikey={api_key}"
|
||||||
|
|
||||||
|
|
||||||
|
async def fetch_tickers():
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.text()
|
||||||
|
return get_jsonparsed_data(data)
|
||||||
|
|
||||||
|
|
||||||
|
db = StockDatabase('backup_db/stocks.db')
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
all_tickers = loop.run_until_complete(fetch_tickers())
|
||||||
|
#all_tickers = [item for item in all_tickers if item['symbol'] == 'KRKNF']
|
||||||
|
'''
|
||||||
|
existing_names = set()
|
||||||
|
|
||||||
|
filtered_data = []
|
||||||
|
for item in all_tickers:
|
||||||
|
if '.' not in item['symbol'] and item['name'] not in existing_names:
|
||||||
|
filtered_data.append(item)
|
||||||
|
existing_names.add(item['name'])
|
||||||
|
|
||||||
|
print(len(filtered_data))
|
||||||
|
|
||||||
|
for item in filtered_data:
|
||||||
|
if 'RHM.DE' in item['symbol']:
|
||||||
|
print(item)
|
||||||
|
|
||||||
|
time.sleep(1000)
|
||||||
|
'''
|
||||||
|
|
||||||
|
loop.run_until_complete(db.save_stocks(all_tickers))
|
||||||
|
db.close_connection()
|
||||||
295
app/cron_analyst_db.py
Normal file
295
app/cron_analyst_db.py
Normal file
@ -0,0 +1,295 @@
|
|||||||
|
from benzinga import financial_data
|
||||||
|
import requests
|
||||||
|
from datetime import datetime, timedelta, date
|
||||||
|
from collections import defaultdict
|
||||||
|
import numpy as np
|
||||||
|
from scipy.stats import norm
|
||||||
|
import time
|
||||||
|
import sqlite3
|
||||||
|
import ujson
|
||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from tqdm import tqdm
|
||||||
|
import pandas as pd
|
||||||
|
from collections import Counter
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('BENZINGA_API_KEY')
|
||||||
|
|
||||||
|
fin = financial_data.Benzinga(api_key)
|
||||||
|
|
||||||
|
headers = {"accept": "application/json"}
|
||||||
|
|
||||||
|
|
||||||
|
# Define a function to remove duplicates based on a key
|
||||||
|
def remove_duplicates(data, key):
|
||||||
|
seen = set()
|
||||||
|
new_data = []
|
||||||
|
for item in data:
|
||||||
|
if item[key] not in seen:
|
||||||
|
seen.add(item[key])
|
||||||
|
new_data.append(item)
|
||||||
|
return new_data
|
||||||
|
|
||||||
|
def extract_sector(ticker, con):
|
||||||
|
|
||||||
|
query_template = f"""
|
||||||
|
SELECT
|
||||||
|
sector
|
||||||
|
FROM
|
||||||
|
stocks
|
||||||
|
WHERE
|
||||||
|
symbol = ?
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
df = pd.read_sql_query(query_template, con, params=(ticker,))
|
||||||
|
sector = df['sector'].iloc[0]
|
||||||
|
except:
|
||||||
|
sector = None
|
||||||
|
|
||||||
|
return sector
|
||||||
|
|
||||||
|
def calculate_rating(data):
|
||||||
|
overall_average_return = float(data['avgReturn'])
|
||||||
|
overall_success_rate = float(data['successRate'])
|
||||||
|
total_ratings = int(data['totalRatings'])
|
||||||
|
last_rating = data['lastRating']
|
||||||
|
|
||||||
|
try:
|
||||||
|
last_rating_date = datetime.strptime(last_rating, "%Y-%m-%d")
|
||||||
|
difference = (datetime.now() - last_rating_date).days
|
||||||
|
except:
|
||||||
|
difference = 1000 # In case of None
|
||||||
|
|
||||||
|
if total_ratings == 0 or difference >= 600:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
# Define weights for each factor
|
||||||
|
weight_return = 0.4
|
||||||
|
weight_success_rate = 0.3
|
||||||
|
weight_total_ratings = 0.1
|
||||||
|
weight_difference = 0.2 # Reduced weight for difference
|
||||||
|
|
||||||
|
# Calculate weighted sum
|
||||||
|
weighted_sum = (weight_return * overall_average_return +
|
||||||
|
weight_success_rate * overall_success_rate +
|
||||||
|
weight_total_ratings * total_ratings +
|
||||||
|
weight_difference * (1 / (1 + difference))) # Adjusted weight for difference
|
||||||
|
|
||||||
|
# Normalize the weighted sum to get a rating between 0 and 5
|
||||||
|
min_rating = 0
|
||||||
|
max_rating = 5
|
||||||
|
normalized_rating = min(max(weighted_sum / (weight_return + weight_success_rate + weight_total_ratings + weight_difference), min_rating), max_rating)
|
||||||
|
|
||||||
|
if normalized_rating >= 4:
|
||||||
|
if total_ratings < 10:
|
||||||
|
normalized_rating -= 2.4
|
||||||
|
elif total_ratings < 15:
|
||||||
|
normalized_rating -= 2.5
|
||||||
|
elif total_ratings < 20:
|
||||||
|
normalized_rating -= 0.75
|
||||||
|
elif total_ratings < 30:
|
||||||
|
normalized_rating -= 1
|
||||||
|
elif overall_average_return <=10:
|
||||||
|
normalized_rating -=1.1
|
||||||
|
'''
|
||||||
|
if overall_average_return <= 0 and overall_average_return >= -5:
|
||||||
|
normalized_rating = min(normalized_rating - 2, 0)
|
||||||
|
elif overall_average_return < -5 and overall_average_return >= -10:
|
||||||
|
normalized_rating = min(normalized_rating - 3, 0)
|
||||||
|
else:
|
||||||
|
normalized_rating = min(normalized_rating - 4, 0)
|
||||||
|
'''
|
||||||
|
if overall_average_return <= 0:
|
||||||
|
normalized_rating = min(normalized_rating - 2, 0)
|
||||||
|
|
||||||
|
normalized_rating = max(normalized_rating, 0)
|
||||||
|
|
||||||
|
return round(normalized_rating, 2)
|
||||||
|
|
||||||
|
def get_analyst_ratings(analyst_id):
|
||||||
|
|
||||||
|
url = "https://api.benzinga.com/api/v2.1/calendar/ratings"
|
||||||
|
res_list = []
|
||||||
|
|
||||||
|
for page in range(0,5):
|
||||||
|
try:
|
||||||
|
querystring = {"token":api_key,"parameters[analyst_id]": analyst_id, "page": str(page), "pagesize":"1000"}
|
||||||
|
response = requests.request("GET", url, headers=headers, params=querystring)
|
||||||
|
data = ujson.loads(response.text)['ratings']
|
||||||
|
res_list +=data
|
||||||
|
time.sleep(2)
|
||||||
|
except:
|
||||||
|
break
|
||||||
|
|
||||||
|
return res_list
|
||||||
|
|
||||||
|
def get_all_analyst_stats():
|
||||||
|
url = "https://api.benzinga.com/api/v2.1/calendar/ratings/analysts"
|
||||||
|
res_list = []
|
||||||
|
for _ in range(0,20): #Run the api N times because not all analyst are counted Bug from benzinga
|
||||||
|
for page in range(0,100):
|
||||||
|
try:
|
||||||
|
querystring = {"token":api_key,"page": f"{page}", 'pagesize': "1000"}
|
||||||
|
response = requests.request("GET", url, headers=headers, params=querystring)
|
||||||
|
|
||||||
|
data = ujson.loads(response.text)['analyst_ratings_analyst']
|
||||||
|
res_list+=data
|
||||||
|
except:
|
||||||
|
break
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
res_list = remove_duplicates(res_list, 'id') # remove duplicates of analyst
|
||||||
|
res_list = [item for item in res_list if item.get('ratings_accuracy', {}).get('total_ratings', 0) != 0]
|
||||||
|
|
||||||
|
final_list = []
|
||||||
|
for item in res_list:
|
||||||
|
analyst_dict = {
|
||||||
|
'analystName': item['name_full'],
|
||||||
|
'companyName': item['firm_name'],
|
||||||
|
'analystId': item['id'],
|
||||||
|
'firmId': item['firm_id']
|
||||||
|
}
|
||||||
|
|
||||||
|
stats_dict = {
|
||||||
|
'avgReturn': item['ratings_accuracy'].get('overall_average_return', 0),
|
||||||
|
'successRate': item['ratings_accuracy'].get('overall_success_rate', 0),
|
||||||
|
'totalRatings': item['ratings_accuracy'].get('total_ratings', 0),
|
||||||
|
}
|
||||||
|
|
||||||
|
final_list.append({**analyst_dict,**stats_dict})
|
||||||
|
|
||||||
|
|
||||||
|
return final_list
|
||||||
|
|
||||||
|
def get_top_stocks():
|
||||||
|
with open(f"json/analyst/all-analyst-data.json", 'r') as file:
|
||||||
|
analyst_stats_list = ujson.load(file)
|
||||||
|
|
||||||
|
filtered_data = [item for item in analyst_stats_list if item['analystScore'] >= 5]
|
||||||
|
|
||||||
|
res_list = []
|
||||||
|
for item in filtered_data:
|
||||||
|
ticker_list = item['ratingsList']
|
||||||
|
ticker_list = [{'ticker': i['ticker'], 'pt_current': i['pt_current']} for i in ticker_list if i['rating_current'] == 'Strong Buy']
|
||||||
|
if len(ticker_list) > 0:
|
||||||
|
#res_list += list(set(ticker_list))
|
||||||
|
res_list += ticker_list
|
||||||
|
|
||||||
|
# Create a dictionary to store ticker occurrences and corresponding pt_current values
|
||||||
|
ticker_data = {}
|
||||||
|
for item in res_list:
|
||||||
|
ticker = item['ticker']
|
||||||
|
pt_current_str = item['pt_current']
|
||||||
|
if pt_current_str: # Skip empty strings
|
||||||
|
pt_current = float(pt_current_str)
|
||||||
|
if ticker in ticker_data:
|
||||||
|
ticker_data[ticker]['sum'] += pt_current
|
||||||
|
ticker_data[ticker]['counter'] += 1
|
||||||
|
else:
|
||||||
|
ticker_data[ticker] = {'sum': pt_current, 'counter': 1}
|
||||||
|
|
||||||
|
for ticker, info in ticker_data.items():
|
||||||
|
try:
|
||||||
|
with open(f"json/quote/{ticker}.json", 'r') as file:
|
||||||
|
res = ujson.load(file)
|
||||||
|
info['price'] = res.get('price', None)
|
||||||
|
info['name'] = res.get('name', None)
|
||||||
|
info['marketCap'] = res.get('marketCap', None)
|
||||||
|
except:
|
||||||
|
info['price'] = None
|
||||||
|
info['name'] = None
|
||||||
|
info['marketCap'] = None
|
||||||
|
|
||||||
|
# Calculate average pt_current for each ticker
|
||||||
|
for ticker, info in ticker_data.items():
|
||||||
|
info['average'] = round(info['sum'] / info['counter'],2)
|
||||||
|
|
||||||
|
# Convert the dictionary back to a list format
|
||||||
|
result = [{'ticker': ticker, 'upside': round((info['average']/info.get('price')-1)*100, 2) if info.get('price') else None, 'priceTarget': info['average'], 'price': info['price'], 'counter': info['counter'], 'name': info['name'], 'marketCap': info['marketCap']} for ticker, info in ticker_data.items()]
|
||||||
|
result = [item for item in result if item['upside'] is not None and item['upside'] >= 5 and item['upside'] <= 250] #filter outliners
|
||||||
|
|
||||||
|
result_sorted = sorted(result, key=lambda x: x['counter'] if x['counter'] is not None else float('-inf'), reverse=True)
|
||||||
|
|
||||||
|
for rank, item in enumerate(result_sorted):
|
||||||
|
item['rank'] = rank+1
|
||||||
|
|
||||||
|
with open(f"json/analyst/top-stocks.json", 'w') as file:
|
||||||
|
ujson.dump(result_sorted, file)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
#Step1 get all analyst id's and stats
|
||||||
|
analyst_list = get_all_analyst_stats()
|
||||||
|
print('Number of analyst:', len(analyst_list))
|
||||||
|
#Step2 get rating history for each individual analyst and score the analyst
|
||||||
|
for item in tqdm(analyst_list):
|
||||||
|
data = get_analyst_ratings(item['analystId'])
|
||||||
|
item['ratingsList'] = data
|
||||||
|
item['totalRatings'] = len(data) #true total ratings, which is important for the score
|
||||||
|
item['lastRating'] = data[0]['date'] if len(data) > 0 else None
|
||||||
|
item['numOfStocks'] = len({item['ticker'] for item in data})
|
||||||
|
stats_dict = {
|
||||||
|
'avgReturn': item.get('avgReturn', 0),
|
||||||
|
'successRate': item.get('successRate', 0),
|
||||||
|
'totalRatings': item.get('totalRatings', 0),
|
||||||
|
'lastRating': item.get('lastRating', None),
|
||||||
|
}
|
||||||
|
item['analystScore'] = calculate_rating(stats_dict)
|
||||||
|
|
||||||
|
try:
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
print('Start extracting main sectors')
|
||||||
|
for item in tqdm(analyst_list):
|
||||||
|
ticker_list = [entry['ticker'] for entry in item['ratingsList']]
|
||||||
|
sector_list = []
|
||||||
|
for ticker in ticker_list:
|
||||||
|
sector = extract_sector(ticker, con)
|
||||||
|
sector_list.append(sector)
|
||||||
|
|
||||||
|
sector_counts = Counter(sector_list)
|
||||||
|
main_sectors = sector_counts.most_common(3)
|
||||||
|
main_sectors = [item[0] for item in main_sectors if item[0] is not None]
|
||||||
|
item['mainSectors'] = main_sectors
|
||||||
|
con.close()
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
analyst_list = sorted(analyst_list, key=lambda x: float(x['analystScore']), reverse=True)
|
||||||
|
number_of_all_analysts = len(analyst_list)
|
||||||
|
|
||||||
|
for rank, item in enumerate(analyst_list):
|
||||||
|
item['rank'] = rank+1
|
||||||
|
item['numOfAnalysts'] = number_of_all_analysts
|
||||||
|
item['avgReturn'] = round(float(item['avgReturn']),2)
|
||||||
|
item['successRate'] = round(float(item['successRate']),2)
|
||||||
|
with open(f"json/analyst/analyst-db/{item['analystId']}.json", 'w') as file:
|
||||||
|
ujson.dump(item, file)
|
||||||
|
|
||||||
|
|
||||||
|
#Save top 100 analysts
|
||||||
|
top_analysts_list = []
|
||||||
|
#Drop the element ratingsList for the top 100 analysts list
|
||||||
|
for item in analyst_list[0:100]:
|
||||||
|
top_analysts_list.append({
|
||||||
|
'analystName': item['analystName'],
|
||||||
|
'analystId': item['analystId'],
|
||||||
|
'rank': item['rank'],
|
||||||
|
'analystScore': item['analystScore'],
|
||||||
|
'companyName': item['companyName'],
|
||||||
|
'successRate': item['successRate'],
|
||||||
|
'avgReturn': item['avgReturn'],
|
||||||
|
'totalRatings': item['totalRatings'],
|
||||||
|
'lastRating': item['lastRating'],
|
||||||
|
'mainSectors': item['mainSectors']
|
||||||
|
})
|
||||||
|
|
||||||
|
with open(f"json/analyst/top-analysts.json", 'w') as file:
|
||||||
|
ujson.dump(top_analysts_list, file)
|
||||||
|
|
||||||
|
#Save all analyst data in raw form for the next step
|
||||||
|
with open(f"json/analyst/all-analyst-data.json", 'w') as file:
|
||||||
|
ujson.dump(analyst_list, file)
|
||||||
|
|
||||||
|
#Save top stocks with strong buys from 5 star analysts
|
||||||
|
get_top_stocks()
|
||||||
241
app/cron_analyst_ticker.py
Normal file
241
app/cron_analyst_ticker.py
Normal file
@ -0,0 +1,241 @@
|
|||||||
|
from benzinga import financial_data
|
||||||
|
import requests
|
||||||
|
from datetime import datetime, timedelta, date
|
||||||
|
from collections import defaultdict
|
||||||
|
import numpy as np
|
||||||
|
from scipy.stats import norm
|
||||||
|
import time
|
||||||
|
import sqlite3
|
||||||
|
import ujson
|
||||||
|
import math
|
||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('BENZINGA_API_KEY')
|
||||||
|
|
||||||
|
fin = financial_data.Benzinga(api_key)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Define a function to remove duplicates based on a key
|
||||||
|
def remove_duplicates(data, key):
|
||||||
|
seen = set()
|
||||||
|
new_data = []
|
||||||
|
for item in data:
|
||||||
|
if item[key] not in seen:
|
||||||
|
seen.add(item[key])
|
||||||
|
new_data.append(item)
|
||||||
|
return new_data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def get_summary(res_list):
|
||||||
|
#Get Latest Summary of ratings from the last 12 months
|
||||||
|
# -Number of Analyst, -Price Target, -Consensus Rating
|
||||||
|
end_date = date.today()
|
||||||
|
start_date = end_date - timedelta(days=365) #end_date is today
|
||||||
|
filtered_data = [item for item in res_list if start_date <= datetime.strptime(item['date'], '%Y-%m-%d').date() <= end_date]
|
||||||
|
|
||||||
|
#Compute Average Price Target
|
||||||
|
latest_pt_current = defaultdict(int)
|
||||||
|
# Iterate through the data to update the latest pt_current for each analyst
|
||||||
|
for item in filtered_data:
|
||||||
|
if 'adjusted_pt_current' in item and item['adjusted_pt_current']:
|
||||||
|
analyst_name = item['analyst_name']
|
||||||
|
latest_pt_current[analyst_name] = max(latest_pt_current[analyst_name], float(item['pt_current']))
|
||||||
|
|
||||||
|
# Compute the average pt_current based on the latest values
|
||||||
|
pt_current_values = list(latest_pt_current.values())
|
||||||
|
average_pt_current = sum(pt_current_values) / len(pt_current_values) if pt_current_values else 0
|
||||||
|
|
||||||
|
#print("Average pt_current:", round(average_pt_current, 2))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Compute Consensus Rating
|
||||||
|
consensus_ratings = defaultdict(str)
|
||||||
|
# Define the rating hierarchy
|
||||||
|
rating_hierarchy = {'Strong Sell': 0, 'Sell': 1, 'Hold': 2, 'Buy': 3, 'Strong Buy': 4}
|
||||||
|
|
||||||
|
# Iterate through the data to update the consensus rating for each analyst
|
||||||
|
for item in filtered_data:
|
||||||
|
if 'rating_current' in item and item['rating_current'] and 'analyst_name' in item and item['analyst_name']:
|
||||||
|
try:
|
||||||
|
analyst_name = item['analyst_name']
|
||||||
|
current_rating = item['rating_current']
|
||||||
|
if current_rating in rating_hierarchy:
|
||||||
|
consensus_ratings[analyst_name] = current_rating
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Compute the consensus rating based on the most frequent rating among analysts
|
||||||
|
consensus_rating_counts = defaultdict(int)
|
||||||
|
for rating in consensus_ratings.values():
|
||||||
|
consensus_rating_counts[rating] += 1
|
||||||
|
|
||||||
|
consensus_rating = max(consensus_rating_counts, key=consensus_rating_counts.get)
|
||||||
|
#print("Consensus Rating:", consensus_rating)
|
||||||
|
|
||||||
|
#Sum up all Buy,Sell,Hold for the progress bar in sveltekit
|
||||||
|
# Convert defaultdict to regular dictionary
|
||||||
|
data_dict = dict(consensus_rating_counts)
|
||||||
|
|
||||||
|
# Sum up 'Strong Buy' and 'Buy'
|
||||||
|
buy_total = data_dict.get('Strong Buy', 0) + data_dict.get('Buy', 0)
|
||||||
|
|
||||||
|
# Sum up 'Strong Sell' and 'Sell'
|
||||||
|
sell_total = data_dict.get('Strong Sell', 0) + data_dict.get('Sell', 0)
|
||||||
|
hold_total = data_dict.get('Hold', 0)
|
||||||
|
|
||||||
|
|
||||||
|
unique_analyst_names = set()
|
||||||
|
numOfAnalyst = 0
|
||||||
|
|
||||||
|
for item in filtered_data:
|
||||||
|
if item['analyst_name'] not in unique_analyst_names:
|
||||||
|
unique_analyst_names.add(item['analyst_name'])
|
||||||
|
numOfAnalyst += 1
|
||||||
|
#print("Number of unique analyst names:", numOfAnalyst)
|
||||||
|
|
||||||
|
stats = {'numOfAnalyst': numOfAnalyst, 'consensusRating': consensus_rating, 'priceTarget': round(average_pt_current, 2)}
|
||||||
|
categorical_ratings = {'Buy': buy_total, 'Sell': sell_total, 'Hold': hold_total}
|
||||||
|
|
||||||
|
res = {**stats, **categorical_ratings}
|
||||||
|
return res
|
||||||
|
|
||||||
|
def run(chunk,analyst_list):
|
||||||
|
end_date = date.today()
|
||||||
|
start_date = datetime(2015,1,1)
|
||||||
|
end_date_str = end_date.strftime('%Y-%m-%d')
|
||||||
|
start_date_str = start_date.strftime('%Y-%m-%d')
|
||||||
|
|
||||||
|
|
||||||
|
company_tickers = ','.join(chunk)
|
||||||
|
res_list = []
|
||||||
|
for page in range(0, 500):
|
||||||
|
try:
|
||||||
|
data = fin.ratings(company_tickers=company_tickers, page=page, pagesize=1000, date_from=start_date_str, date_to=end_date_str)
|
||||||
|
data = ujson.loads(fin.output(data))['ratings']
|
||||||
|
res_list += data
|
||||||
|
except:
|
||||||
|
break
|
||||||
|
|
||||||
|
res_list = [item for item in res_list if item.get('analyst_name')]
|
||||||
|
#print(res_list[-15])
|
||||||
|
for ticker in chunk:
|
||||||
|
try:
|
||||||
|
ticker_filtered_data = [item for item in res_list if item['ticker'] == ticker]
|
||||||
|
if len(ticker_filtered_data) != 0:
|
||||||
|
for item in ticker_filtered_data:
|
||||||
|
if item['rating_current'] == 'Strong Sell' or item['rating_current'] == 'Strong Buy':
|
||||||
|
pass
|
||||||
|
elif item['rating_current'] == 'Neutral':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_current'] == 'Equal-Weight' or item['rating_current'] == 'Sector Weight' or item['rating_current'] == 'Sector Perform':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_current'] == 'In-Line':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_current'] == 'Outperform' and item['action_company'] == 'Downgrades':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_current'] == 'Negative':
|
||||||
|
item['rating_current'] = 'Sell'
|
||||||
|
elif (item['rating_current'] == 'Outperform' or item['rating_current'] == 'Overweight') and (item['action_company'] == 'Reiterates' or item['action_company'] == 'Initiates Coverage On'):
|
||||||
|
item['rating_current'] = 'Buy'
|
||||||
|
item['action_comapny'] = 'Initiates'
|
||||||
|
elif item['rating_current'] == 'Market Outperform' and (item['action_company'] == 'Maintains' or item['action_company'] == 'Reiterates'):
|
||||||
|
item['rating_current'] = 'Buy'
|
||||||
|
elif item['rating_current'] == 'Outperform' and (item['action_company'] == 'Maintains' or item['action_pt'] == 'Announces' or item['action_company'] == 'Upgrades'):
|
||||||
|
item['rating_current'] = 'Buy'
|
||||||
|
elif item['rating_current'] == 'Buy' and (item['action_company'] == 'Raises' or item['action_pt'] == 'Raises'):
|
||||||
|
item['rating_current'] = 'Strong Buy'
|
||||||
|
elif item['rating_current'] == 'Overweight' and (item['action_company'] == 'Maintains' or item['action_company'] == 'Upgrades' or item['action_company'] == 'Reiterates' or item['action_pt'] == 'Raises'):
|
||||||
|
item['rating_current'] = 'Buy'
|
||||||
|
elif item['rating_current'] == 'Positive' or item['rating_current'] == 'Sector Outperform':
|
||||||
|
item['rating_current'] = 'Buy'
|
||||||
|
elif item['rating_current'] == 'Underperform' or item['rating_current'] == 'Underweight':
|
||||||
|
item['rating_current'] = 'Sell'
|
||||||
|
elif item['rating_current'] == 'Reduce' and (item['action_company'] == 'Downgrades' or item['action_pt'] == 'Lowers'):
|
||||||
|
item['rating_current'] = 'Sell'
|
||||||
|
elif item['rating_current'] == 'Sell' and item['action_pt'] == 'Announces':
|
||||||
|
item['rating_current'] = 'Strong Sell'
|
||||||
|
elif item['rating_current'] == 'Market Perform':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_prior'] == 'Outperform' and item['action_company'] == 'Downgrades':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_current'] == 'Peer Perform' and item['rating_prior'] == 'Peer Perfrom':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_current'] == 'Peer Perform' and item['action_pt'] == 'Announces':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
item['action_comapny'] = 'Initiates'
|
||||||
|
|
||||||
|
summary = get_summary(ticker_filtered_data)
|
||||||
|
|
||||||
|
#get ratings of each analyst
|
||||||
|
with open(f"json/analyst/summary/{ticker}.json", 'w') as file:
|
||||||
|
ujson.dump(summary, file)
|
||||||
|
|
||||||
|
for item1 in ticker_filtered_data:
|
||||||
|
#item1['analystId'] = ''
|
||||||
|
#item1['analystScore'] = 0
|
||||||
|
#item1['adjusted_pt_current'] = 0
|
||||||
|
#item1['adjusted_pt_prior'] = 0
|
||||||
|
for item2 in analyst_stats_list:
|
||||||
|
if item1['analyst'] == item2['companyName'] and item1['analyst_name'] == item2['analystName']:
|
||||||
|
item1['analystId'] = item2['analystId']
|
||||||
|
item1['analystScore'] = item2['analystScore']
|
||||||
|
break
|
||||||
|
elif item1['analyst_name'] == item2['analystName']:
|
||||||
|
item1['analystId'] = item2['analystId']
|
||||||
|
item1['analystScore'] = item2['analystScore']
|
||||||
|
break
|
||||||
|
#Bug: Benzinga does not give me reliable all analyst names and hence score.
|
||||||
|
# Compute in those cases the analyst score separately for each analyst
|
||||||
|
|
||||||
|
'''
|
||||||
|
if 'analystScore' not in item1: #or item1['analystScore'] == 0:
|
||||||
|
one_sample_list = get_one_sample_analyst_data(item1['analyst_name'], item1['analyst'])
|
||||||
|
item1['analystId'] = one_sample_list[0]['id']
|
||||||
|
item1['analystScore'] = one_sample_list[0]['analystScore']
|
||||||
|
'''
|
||||||
|
|
||||||
|
desired_keys = ['date', 'action_company', 'rating_current', 'adjusted_pt_current', 'adjusted_pt_prior', 'analystId', 'analystScore', 'analyst', 'analyst_name']
|
||||||
|
|
||||||
|
ticker_filtered_data = [
|
||||||
|
{key: item[key] if key in item else None for key in desired_keys}
|
||||||
|
for item in ticker_filtered_data
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
#print(ticker_filtered_data[0])
|
||||||
|
#time.sleep(10000)
|
||||||
|
with open(f"json/analyst/history/{ticker}.json", 'w') as file:
|
||||||
|
ujson.dump(ticker_filtered_data, file)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
stock_con = sqlite3.connect('stocks.db')
|
||||||
|
stock_cursor = stock_con.cursor()
|
||||||
|
stock_cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stock_symbols = [row[0] for row in stock_cursor.fetchall()]
|
||||||
|
|
||||||
|
stock_con.close()
|
||||||
|
|
||||||
|
#Save all analyst data in raw form for the next step
|
||||||
|
with open(f"json/analyst/all-analyst-data.json", 'r') as file:
|
||||||
|
analyst_stats_list = ujson.load(file)
|
||||||
|
|
||||||
|
chunk_size = len(stock_symbols) // 40 # Divide the list into N chunks
|
||||||
|
chunks = [stock_symbols[i:i + chunk_size] for i in range(0, len(stock_symbols), chunk_size)]
|
||||||
|
#chunks = [['AMD','NVDA','MSFT']]
|
||||||
|
for chunk in chunks:
|
||||||
|
run(chunk, analyst_stats_list)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
50
app/cron_bull_bear_say.py
Normal file
50
app/cron_bull_bear_say.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
import aiohttp
|
||||||
|
import aiofiles
|
||||||
|
import ujson
|
||||||
|
import sqlite3
|
||||||
|
import asyncio
|
||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('BENZINGA_API_KEY')
|
||||||
|
|
||||||
|
async def get_endpoint(session, symbol):
|
||||||
|
url = "https://api.benzinga.com/api/v1/bulls_bears_say"
|
||||||
|
querystring = {"token": api_key, "symbols": symbol}
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with session.get(url, params=querystring) as response:
|
||||||
|
res = ujson.loads(await response.text())
|
||||||
|
try:
|
||||||
|
for item in res['bulls_say_bears_say']:
|
||||||
|
date = datetime.fromtimestamp(item['updated'])
|
||||||
|
date = date.strftime("%B %d, %Y")
|
||||||
|
formatted_data = {'date': date, 'bearSays': item['bear_case'], 'bullSays': item['bull_case']}
|
||||||
|
except:
|
||||||
|
formatted_data = {}
|
||||||
|
except Exception as e:
|
||||||
|
formatted_data = {}
|
||||||
|
print(e)
|
||||||
|
with open(f"json/bull_bear_say/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(formatted_data, file)
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE symbol != ?", ('%5EGSPC',))
|
||||||
|
stocks_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
#stocks_symbols = ['NVDA']
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
await asyncio.gather(*(get_endpoint(session, symbol) for symbol in stocks_symbols))
|
||||||
|
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
334
app/cron_congress_trading.py
Normal file
334
app/cron_congress_trading.py
Normal file
@ -0,0 +1,334 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import aiofiles
|
||||||
|
import sqlite3
|
||||||
|
import pandas as pd
|
||||||
|
import time
|
||||||
|
import hashlib
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
|
||||||
|
|
||||||
|
async def save_json_data(symbol, data):
|
||||||
|
async with aiofiles.open(f"json/congress-trading/company/{symbol}.json", 'w') as file:
|
||||||
|
await file.write(ujson.dumps(data))
|
||||||
|
|
||||||
|
async def get_congress_data(symbols, session):
|
||||||
|
tasks = []
|
||||||
|
politician_list = []
|
||||||
|
for symbol in symbols:
|
||||||
|
task = asyncio.create_task(get_endpoints(symbol, session))
|
||||||
|
tasks.append(task)
|
||||||
|
responses = await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
for symbol, response in zip(symbols, responses):
|
||||||
|
if len(response) > 0:
|
||||||
|
await save_json_data(symbol, response)
|
||||||
|
politician_list +=response
|
||||||
|
|
||||||
|
return politician_list
|
||||||
|
|
||||||
|
def generate_id(name):
|
||||||
|
hashed = hashlib.sha256(name.encode()).hexdigest()
|
||||||
|
return hashed[:10]
|
||||||
|
|
||||||
|
def replace_representative(office):
|
||||||
|
replacements = {
|
||||||
|
'Carper, Thomas R. (Senator)': 'Tom Carper',
|
||||||
|
'Thomas R. Carper': 'Tom Carper',
|
||||||
|
'Tuberville, Tommy (Senator)': 'Tommy Tuberville',
|
||||||
|
'Ricketts, Pete (Senator)': 'John Ricketts',
|
||||||
|
'Pete Ricketts': 'John Ricketts',
|
||||||
|
'Moran, Jerry (Senator)': 'Jerry Moran',
|
||||||
|
'Fischer, Deb (Senator)': 'Deb Fischer',
|
||||||
|
'Mullin, Markwayne (Senator)': 'Markwayne Mullin',
|
||||||
|
'Whitehouse, Sheldon (Senator)': 'Sheldon Whitehouse',
|
||||||
|
'Toomey, Pat (Senator)': 'Pat Toomey',
|
||||||
|
'Sullivan, Dan (Senator)': 'Dan Sullivan',
|
||||||
|
'Capito, Shelley Moore (Senator)': 'Shelley Moore Capito',
|
||||||
|
'Roberts, Pat (Senator)': 'Pat Roberts',
|
||||||
|
'King, Angus (Senator)': 'Angus King',
|
||||||
|
'Hoeven, John (Senator)': 'John Hoeven',
|
||||||
|
'Duckworth, Tammy (Senator)': 'Tammy Duckworth',
|
||||||
|
'Perdue, David (Senator)': 'David Perdue',
|
||||||
|
'Inhofe, James M. (Senator)': 'James M. Inhofe',
|
||||||
|
'Murray, Patty (Senator)': 'Patty Murray',
|
||||||
|
'Boozman, John (Senator)': 'John Boozman',
|
||||||
|
'Loeffler, Kelly (Senator)': 'Kelly Loeffler',
|
||||||
|
'Reed, John F. (Senator)': 'John F. Reed',
|
||||||
|
'Collins, Susan M. (Senator)': 'Susan M. Collins',
|
||||||
|
'Cassidy, Bill (Senator)': 'Bill Cassidy',
|
||||||
|
'Wyden, Ron (Senator)': 'Ron Wyden',
|
||||||
|
'Hickenlooper, John (Senator)': 'John Hickenlooper',
|
||||||
|
'Booker, Cory (Senator)': 'Cory Booker',
|
||||||
|
'Donald Beyer, (Senator).': 'Donald Sternoff Beyer',
|
||||||
|
'Peters, Gary (Senator)': 'Gary Peters',
|
||||||
|
'Donald Sternoff Beyer, (Senator).': 'Donald Sternoff Beyer',
|
||||||
|
'Donald S. Beyer, Jr.': 'Donald Sternoff Beyer',
|
||||||
|
'Donald Sternoff Honorable Beyer': 'Donald Sternoff Beyer',
|
||||||
|
'K. Michael Conaway': 'Michael Conaway',
|
||||||
|
'C. Scott Franklin': 'Scott Franklin',
|
||||||
|
'Robert C. "Bobby" Scott': 'Bobby Scott',
|
||||||
|
'Madison Cawthorn': 'David Madison Cawthorn',
|
||||||
|
'Cruz, Ted (Senator)': 'Ted Cruz',
|
||||||
|
'Smith, Tina (Senator)': 'Tina Smith',
|
||||||
|
'Graham, Lindsey (Senator)': 'Lindsey Graham',
|
||||||
|
'Hagerty, Bill (Senator)': 'Bill Hagerty',
|
||||||
|
'Scott, Rick (Senator)': 'Rick Scott',
|
||||||
|
'Warner, Mark (Senator)': 'Mark Warner',
|
||||||
|
'McConnell, A. Mitchell Jr. (Senator)': 'Mitch McConnell',
|
||||||
|
'Mitchell McConnell': 'Mitch McConnell',
|
||||||
|
'Charles J. "Chuck" Fleischmann': 'Chuck Fleischmann',
|
||||||
|
'Vance, J.D. (Senator)': 'James Vance',
|
||||||
|
'Neal Patrick MD, Facs Dunn': 'Neal Dunn',
|
||||||
|
'Neal Patrick MD, Facs Dunn (Senator)': 'Neal Dunn',
|
||||||
|
'Tillis, Thom (Senator)': 'Thom Tillis',
|
||||||
|
'W. Gregory Steube': 'Greg Steube',
|
||||||
|
'W. Grego Steube': 'Greg Steube',
|
||||||
|
'David David Madison Cawthorn': 'David Madison Cawthorn',
|
||||||
|
'Blunt, Roy (Senator)': 'Roy Blunt',
|
||||||
|
'Thune, John (Senator)': 'John Thune',
|
||||||
|
'Rosen, Jacky (Senator)': 'Jacky Rosen',
|
||||||
|
'James Costa': 'Jim Costa',
|
||||||
|
'Lummis, Cynthia (Senator)': 'Cynthia Lummis',
|
||||||
|
'Coons, Chris (Senator)': 'Chris Coons',
|
||||||
|
'Udall, Tom (Senator)': 'Tom Udall',
|
||||||
|
'Kennedy, John (Senator)': 'John Kennedy',
|
||||||
|
'Bennet, Michael (Senator)': 'Michael Bennet',
|
||||||
|
'Casey, Robert P. Jr. (Senator)': 'Robert Casey',
|
||||||
|
'Van Hollen, Chris (Senator)': 'Chris Van Hollen',
|
||||||
|
'Manchin, Joe (Senator)': 'Joe Manchin',
|
||||||
|
'Cornyn, John (Senator)': 'John Cornyn',
|
||||||
|
'Enzy, Michael (Senator)': 'Michael Enzy',
|
||||||
|
'Cardin, Benjamin (Senator)': 'Benjamin Cardin',
|
||||||
|
'Kaine, Tim (Senator)': 'Tim Kaine',
|
||||||
|
'Joseph P. Kennedy III': 'Joe Kennedy',
|
||||||
|
'James E Hon Banks': 'Jim Banks',
|
||||||
|
'Michael F. Q. San Nicolas': 'Michael San Nicolas',
|
||||||
|
'Barbara J Honorable Comstock': 'Barbara Comstock',
|
||||||
|
'Mr ': '',
|
||||||
|
'Mr. ': '',
|
||||||
|
'Dr ': '',
|
||||||
|
'Dr. ': '',
|
||||||
|
'Mrs ': '',
|
||||||
|
'Mrs. ': '',
|
||||||
|
'(Senator)': '',
|
||||||
|
}
|
||||||
|
|
||||||
|
for old, new in replacements.items():
|
||||||
|
office = office.replace(old, new)
|
||||||
|
office = ' '.join(office.split())
|
||||||
|
return office
|
||||||
|
|
||||||
|
async def get_endpoints(symbol, session):
|
||||||
|
res_list = []
|
||||||
|
amount_mapping = {
|
||||||
|
'$1,001 -': '$1K-$15K',
|
||||||
|
'$1,001 - $15,000': '$1K-$15K',
|
||||||
|
'$15,001 - $50,000': '$15K-$50K',
|
||||||
|
'$15,001 -': '$15K-$50K',
|
||||||
|
'$50,001 - $100,000': '$50K-$100K',
|
||||||
|
'$100,001 - $250,000': '$100K-$250K',
|
||||||
|
'$100,001 - $500,000': '$100K-$500K',
|
||||||
|
'$250,001 - $500,000': '$250K-$500K',
|
||||||
|
'$500,001 - $1,000,000': '$500K-$1M',
|
||||||
|
'$1,000,001 - $5,000,000': '$1M-$5M',
|
||||||
|
'Spouse/DC Over $1,000,000': 'Over $1M'
|
||||||
|
}
|
||||||
|
|
||||||
|
congressional_districts = {"UT": "Utah","CA": "California","NY": "New York","TX": "Texas","FL": "Florida","IL": "Illinois","PA": "Pennsylvania","OH": "Ohio","GA": "Georgia","MI": "Michigan","NC": "North Carolina","AZ": "Arizona","WA": "Washington","CO": "Colorado","OR": "Oregon","VA": "Virginia","NJ": "New Jersey","TN": "Tennessee","MA": "Massachusetts","WI": "Wisconsin","SC": "South Carolina","KY": "Kentucky","LA": "Louisiana","AR": "Arkansas","AL": "Alabama","MS": "Mississippi","NDAL": "North Dakota","SDAL": "South Dakota","MN": "Minnesota","IA": "Iowa","OK": "Oklahoma","ID": "Idaho","NH": "New Hampshire","NE": "Nebraska","MTAL": "Montana","WYAL": "Wyoming","WV": "West Virginia","VTAL": "Vermont","DEAL": "Delaware","RI": "Rhode Island","ME": "Maine","HI": "Hawaii","AKAL": "Alaska","NM": "New Mexico","KS": "Kansas","MS": "Mississippi","CT": "Connecticut","MD": "Maryland","NV": "Nevada",}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Form API request URLs
|
||||||
|
url_senate = f"https://financialmodelingprep.com/api/v4/senate-trading?symbol={symbol}&apikey={api_key}"
|
||||||
|
url_house = f"https://financialmodelingprep.com/api/v4/senate-disclosure?symbol={symbol}&apikey={api_key}"
|
||||||
|
|
||||||
|
async with session.get(url_senate) as response_senate, session.get(url_house) as response_house:
|
||||||
|
data = []
|
||||||
|
for count, response in enumerate([response_senate, response_house]):
|
||||||
|
data = await response.json()
|
||||||
|
for item in data:
|
||||||
|
if count == 0:
|
||||||
|
item['congress'] = 'Senate'
|
||||||
|
elif count == 1:
|
||||||
|
item['congress'] = 'House'
|
||||||
|
|
||||||
|
item['amount'] = amount_mapping.get(item['amount'], item['amount'])
|
||||||
|
if any('sale' in word.lower() for word in item['type'].split()):
|
||||||
|
item['type'] = 'Sold'
|
||||||
|
if any('purchase' in word.lower() for word in item['type'].split()):
|
||||||
|
item['type'] = 'Bought'
|
||||||
|
if any('exchange' in word.lower() for word in item['type'].split()):
|
||||||
|
item['type'] = 'Exchange'
|
||||||
|
|
||||||
|
|
||||||
|
if 'representative' in item:
|
||||||
|
item['representative'] = replace_representative(item['representative'])
|
||||||
|
|
||||||
|
if 'office' in item:
|
||||||
|
item['representative'] = replace_representative(item['office'])
|
||||||
|
|
||||||
|
item['id'] = generate_id(item['representative'])
|
||||||
|
|
||||||
|
if 'district' in item:
|
||||||
|
# Extract state code from the 'district' value
|
||||||
|
state_code = item['district'][:2]
|
||||||
|
|
||||||
|
# Replace 'district' value with the corresponding value from congressional_districts
|
||||||
|
item['district'] = f"{congressional_districts.get(state_code, state_code)}"
|
||||||
|
if 'dateRecieved' in item:
|
||||||
|
item['disclosureDate'] = item['dateRecieved']
|
||||||
|
|
||||||
|
res_list +=data
|
||||||
|
|
||||||
|
res_list = sorted(res_list, key=lambda x: x['transactionDate'], reverse=True)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch data for {symbol}: {e}")
|
||||||
|
|
||||||
|
return res_list
|
||||||
|
|
||||||
|
|
||||||
|
def create_politician_db(data, stock_symbols, stock_raw_data, etf_symbols, etf_raw_data, crypto_symbols, crypto_raw_data):
|
||||||
|
grouped_data = defaultdict(list)
|
||||||
|
# Group elements by id
|
||||||
|
for item in data:
|
||||||
|
#Bug: Data provider does offer not always ticker but in edge cases symbol (Suck my ass FMP!)
|
||||||
|
if ('ticker' in item and item['ticker'] in stock_symbols) or ('symbol' in item and item['symbol'] in stock_symbols):
|
||||||
|
for j in stock_raw_data:
|
||||||
|
if (item.get('ticker') or (item.get('symbol'))) == j['symbol']:
|
||||||
|
item['ticker'] = j['symbol']
|
||||||
|
item['name'] = j['name']
|
||||||
|
item['assetType'] = 'stock'
|
||||||
|
break
|
||||||
|
elif ('ticker' in item and item['ticker'] in etf_symbols) or ('symbol' in item and item['symbol'] in etf_symbols):
|
||||||
|
for j in etf_raw_data:
|
||||||
|
if (item.get('ticker') or (item.get('symbol'))) == j['symbol']:
|
||||||
|
item['ticker'] = j['symbol']
|
||||||
|
item['name'] = j['name']
|
||||||
|
item['assetType'] = 'etf'
|
||||||
|
break
|
||||||
|
elif ('ticker' in item and item['ticker'] in crypto_symbols) or ('symbol' in item and item['symbol'] in crypto_symbols):
|
||||||
|
for j in crypto_raw_data:
|
||||||
|
if (item.get('ticker') or (item.get('symbol'))) == j['symbol']:
|
||||||
|
item['ticker'] = j['symbol']
|
||||||
|
item['name'] = j['name']
|
||||||
|
item['assetType'] = 'crypto'
|
||||||
|
break
|
||||||
|
|
||||||
|
grouped_data[item['id']].append(item)
|
||||||
|
|
||||||
|
|
||||||
|
# Convert defaultdict to list
|
||||||
|
grouped_data_list = list(grouped_data.values())
|
||||||
|
for item in grouped_data_list:
|
||||||
|
item = sorted(item, key=lambda x: x['transactionDate'], reverse=True)
|
||||||
|
with open(f"json/congress-trading/politician-db/{item[0]['id']}.json", 'w') as file:
|
||||||
|
ujson.dump(item, file)
|
||||||
|
|
||||||
|
|
||||||
|
def create_search_list():
|
||||||
|
folder_path = 'json/congress-trading/politician-db/'
|
||||||
|
# Loop through all files in the folder
|
||||||
|
search_politician_list = []
|
||||||
|
for filename in os.listdir(folder_path):
|
||||||
|
# Check if the file is a JSON file
|
||||||
|
if filename.endswith('.json'):
|
||||||
|
file_path = os.path.join(folder_path, filename)
|
||||||
|
# Open and read the JSON file
|
||||||
|
with open(file_path, 'r') as file:
|
||||||
|
data = ujson.load(file)
|
||||||
|
first_item = data[0]
|
||||||
|
if 'Senator' in first_item['representative']:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
search_politician_list.append({
|
||||||
|
'representative': first_item['representative'],
|
||||||
|
'id': first_item['id'],
|
||||||
|
'totalTrades': len(data),
|
||||||
|
'district': first_item['district'] if 'district' in first_item else '',
|
||||||
|
'lastTrade': first_item['transactionDate'],
|
||||||
|
})
|
||||||
|
|
||||||
|
search_politician_list = sorted(search_politician_list, key=lambda x: x['lastTrade'], reverse=True)
|
||||||
|
with open('json/congress-trading/search_list.json', 'w') as file:
|
||||||
|
ujson.dump(search_politician_list, file)
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
try:
|
||||||
|
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT symbol, name FROM stocks WHERE symbol NOT LIKE '%.%'")
|
||||||
|
stock_raw_data = cursor.fetchall()
|
||||||
|
stock_raw_data = [{
|
||||||
|
'symbol': row[0],
|
||||||
|
'name': row[1],
|
||||||
|
} for row in stock_raw_data]
|
||||||
|
|
||||||
|
stock_symbols = [item['symbol'] for item in stock_raw_data]
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol, name FROM etfs")
|
||||||
|
etf_raw_data = etf_cursor.fetchall()
|
||||||
|
etf_raw_data = [{
|
||||||
|
'symbol': row[0],
|
||||||
|
'name': row[1],
|
||||||
|
} for row in etf_raw_data]
|
||||||
|
etf_symbols = [item['symbol'] for item in etf_raw_data]
|
||||||
|
etf_con.close()
|
||||||
|
|
||||||
|
crypto_con = sqlite3.connect('crypto.db')
|
||||||
|
crypto_cursor = crypto_con.cursor()
|
||||||
|
crypto_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
crypto_cursor.execute("SELECT DISTINCT symbol, name FROM cryptos")
|
||||||
|
crypto_raw_data = crypto_cursor.fetchall()
|
||||||
|
crypto_raw_data = [{
|
||||||
|
'symbol': row[0],
|
||||||
|
'name': row[1],
|
||||||
|
} for row in crypto_raw_data]
|
||||||
|
crypto_symbols = [item['symbol'] for item in crypto_raw_data]
|
||||||
|
crypto_con.close()
|
||||||
|
|
||||||
|
total_symbols = crypto_symbols +etf_symbols + stock_symbols
|
||||||
|
total_raw_data = stock_raw_data + etf_raw_data + crypto_raw_data
|
||||||
|
chunk_size = 250
|
||||||
|
politician_list = []
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch symbols: {e}")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
connector = aiohttp.TCPConnector(limit=100) # Adjust the limit as needed
|
||||||
|
async with aiohttp.ClientSession(connector=connector) as session:
|
||||||
|
for i in range(0, len(total_symbols), chunk_size):
|
||||||
|
symbols_chunk = total_symbols[i:i + chunk_size]
|
||||||
|
data = await get_congress_data(symbols_chunk,session)
|
||||||
|
politician_list +=data
|
||||||
|
print('sleeping for 60 sec')
|
||||||
|
await asyncio.sleep(60) # Wait for 60 seconds between chunks
|
||||||
|
|
||||||
|
create_politician_db(politician_list, stock_symbols, stock_raw_data, etf_symbols, etf_raw_data, crypto_symbols, crypto_raw_data)
|
||||||
|
create_search_list()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to run fetch and save data: {e}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
93
app/cron_enterprise_values.py
Normal file
93
app/cron_enterprise_values.py
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import aiofiles
|
||||||
|
import sqlite3
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
|
||||||
|
|
||||||
|
async def save_json_data(symbol, data):
|
||||||
|
async with aiofiles.open(f"json/enterprise-values/{symbol}.json", 'w') as file:
|
||||||
|
await file.write(ujson.dumps(data))
|
||||||
|
|
||||||
|
async def get_data(symbols, session):
|
||||||
|
tasks = []
|
||||||
|
for symbol in symbols:
|
||||||
|
task = asyncio.create_task(get_endpoints(symbol, session))
|
||||||
|
tasks.append(task)
|
||||||
|
responses = await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
if len(responses) > 0:
|
||||||
|
for symbol, response in zip(symbols, responses):
|
||||||
|
await save_json_data(symbol, response)
|
||||||
|
|
||||||
|
async def replace_date_with_fiscal_year(data):
|
||||||
|
res_list = []
|
||||||
|
for entry in data[-10:]:
|
||||||
|
# Extract year from the date
|
||||||
|
year = entry["date"].split("-")[0]
|
||||||
|
# Convert year to fiscal year format (e.g., FY23)
|
||||||
|
fiscal_year = "FY" + str(int(year[-2:]) + 1)
|
||||||
|
# Update the "date" key with fiscal year
|
||||||
|
entry["date"] = fiscal_year
|
||||||
|
res_list.append(entry)
|
||||||
|
return res_list
|
||||||
|
|
||||||
|
async def get_endpoints(symbol, session):
|
||||||
|
data = []
|
||||||
|
try:
|
||||||
|
# Form API request URLs
|
||||||
|
url= f"https://financialmodelingprep.com/api/v3/enterprise-values/{symbol}/?period=annual&apikey={api_key}"
|
||||||
|
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = []
|
||||||
|
data = await response.json()
|
||||||
|
data = sorted(data, key=lambda x: x['date'])
|
||||||
|
data = await replace_date_with_fiscal_year(data)
|
||||||
|
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch data for {symbol}: {e}")
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
try:
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stock_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
total_symbols = stock_symbols
|
||||||
|
chunk_size = 1000
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch symbols: {e}")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
connector = aiohttp.TCPConnector(limit=100) # Adjust the limit as needed
|
||||||
|
async with aiohttp.ClientSession(connector=connector) as session:
|
||||||
|
for i in range(0, len(total_symbols), chunk_size):
|
||||||
|
symbols_chunk = total_symbols[i:i + chunk_size]
|
||||||
|
await get_data(symbols_chunk, session)
|
||||||
|
print('sleeping for 60 sec')
|
||||||
|
await asyncio.sleep(60) # Wait for 60 seconds between chunks
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to run fetch and save data: {e}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
105
app/cron_executive.py
Normal file
105
app/cron_executive.py
Normal file
@ -0,0 +1,105 @@
|
|||||||
|
from datetime import datetime, timedelta
|
||||||
|
import ujson
|
||||||
|
import time
|
||||||
|
import sqlite3
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
from collections import defaultdict
|
||||||
|
import time
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
from faker import Faker
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
|
||||||
|
|
||||||
|
def generate_female_names(num_names):
|
||||||
|
fake = Faker()
|
||||||
|
female_names = []
|
||||||
|
|
||||||
|
for _ in range(num_names):
|
||||||
|
female_names.append(fake.first_name_female())
|
||||||
|
|
||||||
|
return female_names
|
||||||
|
|
||||||
|
def generate_male_names(num_names):
|
||||||
|
fake = Faker()
|
||||||
|
male_names = []
|
||||||
|
for _ in range(num_names):
|
||||||
|
male_names.append(fake.first_name_male())
|
||||||
|
|
||||||
|
return male_names
|
||||||
|
|
||||||
|
|
||||||
|
# Specify the number of female names you want in the list
|
||||||
|
number_of_names = 20_000
|
||||||
|
female_names_list = generate_female_names(number_of_names)
|
||||||
|
male_names_list = generate_male_names(number_of_names)
|
||||||
|
|
||||||
|
|
||||||
|
def custom_sort(entry):
|
||||||
|
# Ensure "Chief Executive Officer" appears first, then sort by name
|
||||||
|
if "Chief Executive Officer" in entry['title']:
|
||||||
|
return (0, entry['name'])
|
||||||
|
else:
|
||||||
|
return (1, entry['name'])
|
||||||
|
|
||||||
|
|
||||||
|
async def save_executives(session, symbol):
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/key-executives/{symbol}?apikey={api_key}"
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.json()
|
||||||
|
unique_names = set()
|
||||||
|
filtered_data = []
|
||||||
|
|
||||||
|
for entry in sorted(data, key=custom_sort):
|
||||||
|
name = entry['name']
|
||||||
|
if name not in unique_names:
|
||||||
|
unique_names.add(name)
|
||||||
|
filtered_data.append(entry)
|
||||||
|
|
||||||
|
for entry in filtered_data:
|
||||||
|
if entry['gender'] == '' or entry['gender'] == None:
|
||||||
|
if any(substring.lower() in entry['name'].lower() for substring in female_names_list):
|
||||||
|
#print(entry['name'])
|
||||||
|
entry['gender'] = 'female'
|
||||||
|
elif any(substring.lower() in entry['name'].lower() for substring in male_names_list):
|
||||||
|
#print(entry['name'])
|
||||||
|
entry['gender'] = 'male'
|
||||||
|
|
||||||
|
if len(filtered_data) > 0:
|
||||||
|
with open(f"json/executives/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(filtered_data, file)
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE symbol NOT LIKE '%.%'")
|
||||||
|
symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
tasks = []
|
||||||
|
i = 0
|
||||||
|
for symbol in tqdm(symbols):
|
||||||
|
tasks.append(save_executives(session, symbol))
|
||||||
|
|
||||||
|
i += 1
|
||||||
|
if i % 800 == 0:
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
tasks = []
|
||||||
|
print('sleeping mode: ', i)
|
||||||
|
await asyncio.sleep(60) # Pause for 60 seconds
|
||||||
|
|
||||||
|
#tasks.append(self.save_ohlc_data(session, "%5EGSPC"))
|
||||||
|
|
||||||
|
if tasks:
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
loop.run_until_complete(run())
|
||||||
167
app/cron_fundamental_predictor.py
Normal file
167
app/cron_fundamental_predictor.py
Normal file
@ -0,0 +1,167 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import sqlite3
|
||||||
|
from datetime import datetime
|
||||||
|
from ml_models.fundamental_predictor import FundamentalPredictor
|
||||||
|
import yfinance as yf
|
||||||
|
from collections import defaultdict
|
||||||
|
import pandas as pd
|
||||||
|
from tqdm import tqdm
|
||||||
|
import concurrent.futures
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
async def save_json(symbol, data):
|
||||||
|
with open(f"json/fundamental-predictor-analysis/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
|
||||||
|
async def download_data(ticker, con, start_date, end_date):
|
||||||
|
try:
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
income, income_growth, balance, balance_growth, cashflow, cashflow_growth, ratios
|
||||||
|
FROM
|
||||||
|
stocks
|
||||||
|
WHERE
|
||||||
|
symbol = ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
query_df = pd.read_sql_query(query_template, con, params=(ticker,))
|
||||||
|
|
||||||
|
income = ujson.loads(query_df['income'].iloc[0])
|
||||||
|
#Only consider company with at least 10 year worth of data
|
||||||
|
if len(income) < 40:
|
||||||
|
raise ValueError("Income data length is too small.")
|
||||||
|
|
||||||
|
income = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in income if int(item["date"][:4]) >= 2000]
|
||||||
|
income_growth = ujson.loads(query_df['income_growth'].iloc[0])
|
||||||
|
income_growth = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in income_growth if int(item["date"][:4]) >= 2000]
|
||||||
|
|
||||||
|
balance = ujson.loads(query_df['balance'].iloc[0])
|
||||||
|
balance = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in balance if int(item["date"][:4]) >= 2000]
|
||||||
|
balance_growth = ujson.loads(query_df['balance_growth'].iloc[0])
|
||||||
|
balance_growth = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in balance_growth if int(item["date"][:4]) >= 2000]
|
||||||
|
|
||||||
|
cashflow = ujson.loads(query_df['cashflow'].iloc[0])
|
||||||
|
cashflow = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in cashflow if int(item["date"][:4]) >= 2000]
|
||||||
|
cashflow_growth = ujson.loads(query_df['cashflow_growth'].iloc[0])
|
||||||
|
cashflow_growth = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in cashflow_growth if int(item["date"][:4]) >= 2000]
|
||||||
|
|
||||||
|
|
||||||
|
ratios = ujson.loads(query_df['ratios'].iloc[0])
|
||||||
|
ratios = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in ratios if int(item["date"][:4]) >= 2000]
|
||||||
|
|
||||||
|
combined_data = defaultdict(dict)
|
||||||
|
# Iterate over all lists simultaneously
|
||||||
|
for entries in zip(income, income_growth, balance, balance_growth, cashflow, cashflow_growth, ratios):
|
||||||
|
# Iterate over each entry in the current set of entries
|
||||||
|
for entry in entries:
|
||||||
|
date = entry['date']
|
||||||
|
# Merge entry data into combined_data, skipping duplicate keys
|
||||||
|
for key, value in entry.items():
|
||||||
|
if key not in combined_data[date]:
|
||||||
|
combined_data[date][key] = value
|
||||||
|
|
||||||
|
combined_data = list(combined_data.values())
|
||||||
|
|
||||||
|
df = yf.download(ticker, start=start_date, end=end_date, interval="1d").reset_index()
|
||||||
|
df = df.rename(columns={'Adj Close': 'close', 'Date': 'date'})
|
||||||
|
#print(df[['date','close']])
|
||||||
|
df['date'] = df['date'].dt.strftime('%Y-%m-%d')
|
||||||
|
|
||||||
|
|
||||||
|
for item in combined_data:
|
||||||
|
# Find close price for '2023-09-30' or the closest available date prior to it
|
||||||
|
target_date = item['date']
|
||||||
|
counter = 0
|
||||||
|
max_attempts = 10
|
||||||
|
|
||||||
|
while target_date not in df['date'].values and counter < max_attempts:
|
||||||
|
# If the target date doesn't exist, move one day back
|
||||||
|
target_date = (pd.to_datetime(target_date) - pd.Timedelta(days=1)).strftime('%Y-%m-%d')
|
||||||
|
|
||||||
|
counter += 1
|
||||||
|
if counter == max_attempts:
|
||||||
|
break
|
||||||
|
|
||||||
|
# Get the close price for the found or closest date
|
||||||
|
close_price = round(df[df['date'] == target_date]['close'].values[0],2)
|
||||||
|
item['price'] = close_price
|
||||||
|
|
||||||
|
#print(f"Close price for {target_date}: {close_price}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
combined_data = sorted(combined_data, key=lambda x: x['date'])
|
||||||
|
|
||||||
|
|
||||||
|
df_income = pd.DataFrame(combined_data).dropna()
|
||||||
|
|
||||||
|
df_income['Target'] = ((df_income['price'].shift(-1) - df_income['price']) / df_income['price'] > 0).astype(int)
|
||||||
|
|
||||||
|
df_copy = df_income.copy()
|
||||||
|
#print(df_copy)
|
||||||
|
|
||||||
|
return df_copy
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
|
async def process_symbol(ticker, con, start_date, end_date):
|
||||||
|
try:
|
||||||
|
test_size = 0.4
|
||||||
|
start_date = datetime(2000, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
predictor = FundamentalPredictor(path="ml_models/weights")
|
||||||
|
df = await download_data(ticker, con, start_date, end_date)
|
||||||
|
split_size = int(len(df) * (1-test_size))
|
||||||
|
test_data = df.iloc[split_size:]
|
||||||
|
selected_features = ['shortTermCoverageRatios','netProfitMargin','debtRepayment','totalDebt','interestIncome','researchAndDevelopmentExpenses','priceEarningsToGrowthRatio','priceCashFlowRatio','cashPerShare','debtRatio','growthRevenue','revenue','growthNetIncome','ebitda','priceEarningsRatio','priceToBookRatio','epsdiluted','priceToSalesRatio','growthOtherCurrentLiabilities', 'receivablesTurnover', 'totalLiabilitiesAndStockholdersEquity', 'totalLiabilitiesAndTotalEquity', 'totalAssets', 'growthOtherCurrentAssets', 'retainedEarnings', 'totalEquity']
|
||||||
|
data, prediction_list = predictor.evaluate_model(test_data[selected_features], test_data['Target'])
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
output_list = [{'date': date, 'price': price, 'prediction': prediction, 'target': target}
|
||||||
|
for (date, price,target), prediction in zip(test_data[['date', 'price','Target']].iloc[-6:].values, prediction_list[-6:])]
|
||||||
|
'''
|
||||||
|
#print(output_list)
|
||||||
|
|
||||||
|
if len(data) != 0:
|
||||||
|
if data['precision'] >= 50 and data['accuracy'] >= 50:
|
||||||
|
await save_json(ticker, data)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE marketCap >= 1E9")
|
||||||
|
stock_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
|
||||||
|
total_symbols = stock_symbols
|
||||||
|
|
||||||
|
print(f"Total tickers: {len(total_symbols)}")
|
||||||
|
start_date = datetime(2000, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
chunk_size = len(total_symbols) #// 70 # Divide the list into N chunks
|
||||||
|
chunks = [total_symbols[i:i + chunk_size] for i in range(0, len(total_symbols), chunk_size)]
|
||||||
|
for chunk in chunks:
|
||||||
|
tasks = []
|
||||||
|
for ticker in tqdm(chunk):
|
||||||
|
tasks.append(process_symbol(ticker, con, start_date, end_date))
|
||||||
|
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
74
app/cron_heatmap.py
Normal file
74
app/cron_heatmap.py
Normal file
@ -0,0 +1,74 @@
|
|||||||
|
from mixpanel_utils import MixpanelUtils
|
||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from collections import Counter, OrderedDict
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
async def get_quote_of_stocks(ticker_list):
|
||||||
|
ticker_str = ','.join(ticker_list)
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/quote/{ticker_str}?apikey={api_key}"
|
||||||
|
async with session.get(url) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
return await response.json()
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
index_list = ['sp500', 'nasdaq', 'dowjones']
|
||||||
|
for index in index_list:
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/{index}_constituent?apikey={api_key}"
|
||||||
|
res_list = []
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.json()
|
||||||
|
|
||||||
|
for item in data:
|
||||||
|
res_list.append({'symbol': item['symbol'], 'sector': item['sector']})
|
||||||
|
|
||||||
|
ticker_list = [item['symbol'] for item in res_list]
|
||||||
|
latest_quote = await get_quote_of_stocks(ticker_list)
|
||||||
|
for quote in latest_quote:
|
||||||
|
symbol = quote['symbol']
|
||||||
|
for item in res_list:
|
||||||
|
if item['symbol'] == symbol:
|
||||||
|
item['changesPercentage'] = round(quote['changesPercentage'],2)
|
||||||
|
item['marketCap'] = quote['marketCap']
|
||||||
|
|
||||||
|
# Create a dictionary to store sectors and their corresponding symbols and percentages
|
||||||
|
sector_dict = {}
|
||||||
|
|
||||||
|
for item in res_list:
|
||||||
|
sector = item['sector']
|
||||||
|
symbol = item['symbol']
|
||||||
|
percentage = item['changesPercentage']
|
||||||
|
marketCap = item['marketCap']
|
||||||
|
|
||||||
|
if sector not in sector_dict:
|
||||||
|
sector_dict[sector] = {'name': sector, 'value': 0, 'children': []}
|
||||||
|
|
||||||
|
sector_dict[sector]['value'] += marketCap
|
||||||
|
sector_dict[sector]['children'].append({'name': symbol, 'value': marketCap, 'changesPercentage': percentage})
|
||||||
|
|
||||||
|
# Convert the dictionary to a list
|
||||||
|
result_list = list(sector_dict.values())
|
||||||
|
|
||||||
|
# Optionally, if you want to add the 'value' for each sector
|
||||||
|
for sector in result_list:
|
||||||
|
sector['value'] = round(sector['value'], 2)
|
||||||
|
#print(result_list)
|
||||||
|
|
||||||
|
with open(f"json/heatmaps/{index}.json", 'w') as file:
|
||||||
|
ujson.dump(result_list, file)
|
||||||
|
|
||||||
|
|
||||||
|
asyncio.run(run())
|
||||||
148
app/cron_historical_price.py
Normal file
148
app/cron_historical_price.py
Normal file
@ -0,0 +1,148 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import aiofiles
|
||||||
|
import sqlite3
|
||||||
|
from datetime import datetime, timedelta, time
|
||||||
|
import pytz
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
|
||||||
|
# Define a function to remove duplicates based on a key
|
||||||
|
def remove_duplicates(data, key):
|
||||||
|
seen = set()
|
||||||
|
new_data = []
|
||||||
|
for item in data:
|
||||||
|
if item[key] not in seen:
|
||||||
|
seen.add(item[key])
|
||||||
|
new_data.append(item)
|
||||||
|
return new_data
|
||||||
|
|
||||||
|
async def save_price_data(symbol, data):
|
||||||
|
async with aiofiles.open(f"json/historical-price/{symbol}.json", 'w') as file:
|
||||||
|
await file.write(ujson.dumps(data))
|
||||||
|
|
||||||
|
async def fetch_and_save_symbols_data(symbols, etf_symbols, crypto_symbols, session):
|
||||||
|
tasks = []
|
||||||
|
for symbol in symbols:
|
||||||
|
if symbol in etf_symbols:
|
||||||
|
query_con = etf_con
|
||||||
|
elif symbol in crypto_symbols:
|
||||||
|
query_con = crypto_con
|
||||||
|
else:
|
||||||
|
query_con = con
|
||||||
|
|
||||||
|
task = asyncio.create_task(get_historical_data(symbol, query_con, session))
|
||||||
|
tasks.append(task)
|
||||||
|
responses = await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
for symbol, response in zip(symbols, responses):
|
||||||
|
await save_price_data(symbol, response)
|
||||||
|
|
||||||
|
async def get_historical_data(ticker, query_con, session):
|
||||||
|
try:
|
||||||
|
# Form API request URLs
|
||||||
|
url_1w = f"https://financialmodelingprep.com/api/v3/historical-chart/30min/{ticker}?from={start_date_1w}&to={end_date}&apikey={api_key}"
|
||||||
|
url_1m = f"https://financialmodelingprep.com/api/v3/historical-chart/1hour/{ticker}?from={start_date_1m}&to={end_date}&apikey={api_key}"
|
||||||
|
|
||||||
|
async with session.get(url_1w) as response_1w, session.get(url_1m) as response_1m:
|
||||||
|
data = []
|
||||||
|
for response in [response_1w, response_1m]:
|
||||||
|
json_data = await response.json()
|
||||||
|
df = pd.DataFrame(json_data).iloc[::-1].reset_index(drop=True)
|
||||||
|
df = df.drop(['volume'], axis=1)
|
||||||
|
df = df.round(2).rename(columns={"date": "time"})
|
||||||
|
data.append(df.to_json(orient="records"))
|
||||||
|
|
||||||
|
# Database read for 6M, 1Y, MAX data
|
||||||
|
query_template = """
|
||||||
|
SELECT date, open,high,low,close
|
||||||
|
FROM "{ticker}"
|
||||||
|
WHERE date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
query = query_template.format(ticker=ticker)
|
||||||
|
df_6m = pd.read_sql_query(query, query_con, params=(start_date_6m, end_date)).round(2).rename(columns={"date": "time"})
|
||||||
|
df_1y = pd.read_sql_query(query, query_con, params=(start_date_1y, end_date)).round(2).rename(columns={"date": "time"})
|
||||||
|
df_max = pd.read_sql_query(query, query_con, params=(start_date_max, end_date)).round(2).rename(columns={"date": "time"})
|
||||||
|
|
||||||
|
res = {
|
||||||
|
'1W': ujson.loads(data[0]) if data else [],
|
||||||
|
'1M': ujson.loads(data[1]) if len(data) > 1 else [],
|
||||||
|
'6M': ujson.loads(df_6m.to_json(orient="records")),
|
||||||
|
'1Y': ujson.loads(df_1y.to_json(orient="records")),
|
||||||
|
'MAX': ujson.loads(df_max.to_json(orient="records"))
|
||||||
|
}
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch data for {ticker}: {e}")
|
||||||
|
res = {
|
||||||
|
'1W': [],
|
||||||
|
'1M': [],
|
||||||
|
'6M': [],
|
||||||
|
'1Y': [],
|
||||||
|
'MAX': []
|
||||||
|
}
|
||||||
|
return res
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
total_symbols = []
|
||||||
|
try:
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stock_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol FROM etfs")
|
||||||
|
etf_symbols = [row[0] for row in etf_cursor.fetchall()]
|
||||||
|
|
||||||
|
crypto_cursor = crypto_con.cursor()
|
||||||
|
crypto_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
crypto_cursor.execute("SELECT DISTINCT symbol FROM cryptos")
|
||||||
|
crypto_symbols = [row[0] for row in crypto_cursor.fetchall()]
|
||||||
|
|
||||||
|
total_symbols = stock_symbols + etf_symbols + crypto_symbols
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to fetch symbols: {e}")
|
||||||
|
return
|
||||||
|
|
||||||
|
try:
|
||||||
|
connector = aiohttp.TCPConnector(limit=100) # Adjust the limit as needed
|
||||||
|
async with aiohttp.ClientSession(connector=connector) as session:
|
||||||
|
for i in range(0, len(total_symbols), chunk_size):
|
||||||
|
symbols_chunk = total_symbols[i:i + chunk_size]
|
||||||
|
await fetch_and_save_symbols_data(symbols_chunk, etf_symbols, crypto_symbols, session)
|
||||||
|
print('sleeping for 60 sec')
|
||||||
|
await asyncio.sleep(60) # Wait for 60 seconds between chunks
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed to run fetch and save data: {e}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
crypto_con = sqlite3.connect('crypto.db')
|
||||||
|
|
||||||
|
berlin_tz = pytz.timezone('Europe/Berlin')
|
||||||
|
end_date = datetime.now(berlin_tz)
|
||||||
|
start_date_1w = (end_date - timedelta(days=7)).strftime("%Y-%m-%d")
|
||||||
|
start_date_1m = (end_date - timedelta(days=30)).strftime("%Y-%m-%d")
|
||||||
|
start_date_6m = (end_date - timedelta(days=180)).strftime("%Y-%m-%d")
|
||||||
|
start_date_1y = (end_date - timedelta(days=365)).strftime("%Y-%m-%d")
|
||||||
|
start_date_max = datetime(1970, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = end_date.strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
print(start_date_max, end_date)
|
||||||
|
|
||||||
|
chunk_size = 250
|
||||||
|
asyncio.run(run())
|
||||||
|
con.close()
|
||||||
|
etf_con.close()
|
||||||
|
crypto_con.close()
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
113
app/cron_insider_trading.py
Normal file
113
app/cron_insider_trading.py
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import sqlite3
|
||||||
|
from datetime import datetime
|
||||||
|
import pytz
|
||||||
|
from aiofiles import open as async_open
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
|
||||||
|
# Function to check if the year is at least 2015
|
||||||
|
def is_at_least_2015(date_string):
|
||||||
|
year = datetime.strptime(date_string, "%Y-%m-%d").year
|
||||||
|
return year >= 2015
|
||||||
|
|
||||||
|
async def get_statistics_endpoints(session, symbol):
|
||||||
|
url = f"https://financialmodelingprep.com/api/v4/insider-roaster-statistic?symbol={symbol}&apikey={api_key}"
|
||||||
|
async with session.get(url) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
return symbol, await response.json()
|
||||||
|
else:
|
||||||
|
return symbol, []
|
||||||
|
|
||||||
|
async def get_insider_trading_endpoints(session, symbol):
|
||||||
|
aggregated_data = []
|
||||||
|
for page in range(101): # Pages from 0 to 100
|
||||||
|
url = f"https://financialmodelingprep.com/api/v4/insider-trading?symbol={symbol}&page={page}&apikey={api_key}"
|
||||||
|
async with session.get(url) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
data = await response.json()
|
||||||
|
if not data:
|
||||||
|
break # Break if the result is empty
|
||||||
|
aggregated_data.extend(data)
|
||||||
|
else:
|
||||||
|
break # Break if response status is not 200
|
||||||
|
filtered_data = [item for item in aggregated_data if is_at_least_2015(item["transactionDate"][:10])]
|
||||||
|
|
||||||
|
if len(filtered_data) > 0:
|
||||||
|
await save_insider_trading_as_json(symbol, filtered_data)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
async def save_statistics_as_json(symbol, data):
|
||||||
|
async with async_open(f"json/insider-trading/statistics/{symbol}.json", 'w') as file:
|
||||||
|
await file.write(ujson.dumps(data))
|
||||||
|
|
||||||
|
async def save_insider_trading_as_json(symbol, data):
|
||||||
|
async with async_open(f"json/insider-trading/history/{symbol}.json", 'w') as file:
|
||||||
|
await file.write(ujson.dumps(data))
|
||||||
|
|
||||||
|
async def aggregate_statistics(symbol, data):
|
||||||
|
aggregated_data = {}
|
||||||
|
for entry in data:
|
||||||
|
year = entry['year']
|
||||||
|
if year >= 2015:
|
||||||
|
if year not in aggregated_data:
|
||||||
|
aggregated_data[year] = {
|
||||||
|
'year': year,
|
||||||
|
'totalBought': 0,
|
||||||
|
'totalSold': 0,
|
||||||
|
'purchases': 0,
|
||||||
|
'sales': 0
|
||||||
|
}
|
||||||
|
aggregated_data[year]['totalBought'] += entry['totalBought']
|
||||||
|
aggregated_data[year]['totalSold'] += entry['totalSold']
|
||||||
|
aggregated_data[year]['purchases'] += entry['purchases']
|
||||||
|
aggregated_data[year]['sales'] += entry['sales']
|
||||||
|
|
||||||
|
res = list(aggregated_data.values())
|
||||||
|
|
||||||
|
await save_statistics_as_json(symbol, res)
|
||||||
|
|
||||||
|
|
||||||
|
async def process_symbols(session, symbols):
|
||||||
|
#History
|
||||||
|
tasks = [get_insider_trading_endpoints(session, symbol) for symbol in symbols]
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
#Statistics
|
||||||
|
|
||||||
|
tasks = [get_statistics_endpoints(session, symbol) for symbol in symbols]
|
||||||
|
results = await asyncio.gather(*tasks)
|
||||||
|
for symbol, data in results:
|
||||||
|
if data:
|
||||||
|
await aggregate_statistics(symbol, data)
|
||||||
|
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE symbol NOT LIKE '%.%'")
|
||||||
|
stock_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
chunk_size = len(stock_symbols) // 70 # Divide the list into N chunks
|
||||||
|
chunks = [stock_symbols[i:i + chunk_size] for i in range(0, len(stock_symbols), chunk_size)]
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
for chunk in chunks:
|
||||||
|
await process_symbols(session, chunk)
|
||||||
|
await asyncio.sleep(60)
|
||||||
|
print('sleep')
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
346
app/cron_market_movers.py
Normal file
346
app/cron_market_movers.py
Normal file
@ -0,0 +1,346 @@
|
|||||||
|
from datetime import date, datetime, timedelta, time
|
||||||
|
import ujson
|
||||||
|
import sqlite3
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
from collections import defaultdict
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import pytz
|
||||||
|
|
||||||
|
#Update Market Movers Price, ChangesPercentage, Volume and MarketCap regularly
|
||||||
|
berlin_tz = pytz.timezone('Europe/Berlin')
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
|
||||||
|
def check_if_holiday():
|
||||||
|
hol1_date = datetime(2023, 5, 29)
|
||||||
|
hol2_date = datetime(2023, 6, 19)
|
||||||
|
hol2_next_day_date = datetime(2023,6,20)
|
||||||
|
hol3_date = datetime(2023,9,4)
|
||||||
|
hol3_next_day_date = datetime(2023,9,5)
|
||||||
|
hol4_date = datetime(2023,11,23)
|
||||||
|
hol5_date = datetime(2023,12,25)
|
||||||
|
hol6_date = datetime(2024,1,1)
|
||||||
|
hol7_date = datetime(2024,1,15)
|
||||||
|
hol8_date = datetime(2024,2,19)
|
||||||
|
|
||||||
|
current_datetime = datetime.now(berlin_tz)
|
||||||
|
if current_datetime.year == hol1_date.year and current_datetime.month == hol1_date.month and current_datetime.day == hol1_date.day:
|
||||||
|
holiday = 'memorial_day'
|
||||||
|
elif current_datetime.year == hol2_date.year and current_datetime.month == hol2_date.month and current_datetime.day == hol2_date.day:
|
||||||
|
holiday = 'independence_day'
|
||||||
|
elif current_datetime.year == hol2_next_day_date.year and current_datetime.month == hol2_next_day_date.month and current_datetime.day == hol2_next_day_date.day:
|
||||||
|
holiday = 'independence_day+1'
|
||||||
|
elif current_datetime.year == hol3_date.year and current_datetime.month == hol3_date.month and current_datetime.day == hol3_date.day:
|
||||||
|
holiday = 'labor_day'
|
||||||
|
elif current_datetime.year == hol3_next_day_date.year and current_datetime.month == hol3_next_day_date.month and current_datetime.day == hol3_next_day_date.day:
|
||||||
|
holiday = 'labor_day+1'
|
||||||
|
elif current_datetime.year == hol4_date.year and current_datetime.month == hol4_date.month and current_datetime.day == hol4_date.day:
|
||||||
|
holiday = 'thanks_giving'
|
||||||
|
elif current_datetime.year == hol5_date.year and current_datetime.month == hol5_date.month and current_datetime.day == hol5_date.day:
|
||||||
|
holiday = 'christmas'
|
||||||
|
elif current_datetime.year == hol6_date.year and current_datetime.month == hol6_date.month and current_datetime.day == hol6_date.day:
|
||||||
|
holiday = 'new_year'
|
||||||
|
elif current_datetime.year == hol7_date.year and current_datetime.month == hol7_date.month and current_datetime.day == hol7_date.day:
|
||||||
|
holiday = 'martin_luther_king'
|
||||||
|
elif current_datetime.year == hol8_date.year and current_datetime.month == hol8_date.month and current_datetime.day == hol8_date.day:
|
||||||
|
holiday = 'washington_birthday'
|
||||||
|
else:
|
||||||
|
holiday = None
|
||||||
|
return holiday
|
||||||
|
|
||||||
|
holiday = check_if_holiday()
|
||||||
|
holiday_dates = {
|
||||||
|
'memorial_day': datetime(2023, 5, 26),
|
||||||
|
'independence_day': datetime(2023, 6, 16),
|
||||||
|
'independence_day+1': datetime(2023, 6, 16),
|
||||||
|
'labor_day': datetime(2023, 9, 1),
|
||||||
|
'labor_day+1': datetime(2023, 9, 1),
|
||||||
|
'thanks_giving': datetime(2023, 11, 22),
|
||||||
|
'christmas': datetime(2023, 12, 22),
|
||||||
|
'new_year': datetime(2023, 12, 29),
|
||||||
|
'martin_luther_king': datetime(2024, 1, 12),
|
||||||
|
'washington_birthday': datetime(2024,2,16)
|
||||||
|
}
|
||||||
|
|
||||||
|
def correct_1d_interval():
|
||||||
|
|
||||||
|
if holiday == 'memorial_day':
|
||||||
|
start_date_1d = datetime(2023,5,26)
|
||||||
|
elif holiday == 'independence_day' or holiday == 'independence_day+1':
|
||||||
|
start_date_1d = datetime(2023, 6, 16)
|
||||||
|
elif holiday == 'labor_day' or holiday == 'labor_day+1':
|
||||||
|
start_date_1d = datetime(2023, 9, 1)
|
||||||
|
elif holiday == 'thanks_giving':
|
||||||
|
start_date_1d = datetime(2023, 11, 22)
|
||||||
|
elif holiday == 'new_year':
|
||||||
|
start_date_1d = datetime(2023, 12, 29)
|
||||||
|
elif holiday == 'martin_luther_king':
|
||||||
|
start_date_1d = datetime(2023, 1, 12)
|
||||||
|
elif holiday == 'washington_birthday':
|
||||||
|
start_date_1d = datetime(2024, 2, 16)
|
||||||
|
else:
|
||||||
|
current_time_berlin = datetime.now(berlin_tz)
|
||||||
|
|
||||||
|
# Get the current weekday (Monday is 0 and Sunday is 6)
|
||||||
|
current_weekday = current_time_berlin.weekday()
|
||||||
|
is_afternoon = current_time_berlin.hour > 15 or (current_time_berlin.hour == 15 and current_time_berlin.minute >= 30)
|
||||||
|
|
||||||
|
if current_weekday == 0:
|
||||||
|
# It's Monday and before 15:30 PM
|
||||||
|
start_date_1d = current_time_berlin if is_afternoon else current_time_berlin - timedelta(days=3)
|
||||||
|
elif current_weekday in (5, 6): # Saturday or Sunday
|
||||||
|
start_date_1d = current_time_berlin - timedelta(days=current_weekday % 5 + 1)
|
||||||
|
else:
|
||||||
|
start_date_1d = current_time_berlin if is_afternoon else current_time_berlin - timedelta(days=1)
|
||||||
|
|
||||||
|
return start_date_1d
|
||||||
|
|
||||||
|
async def get_todays_data(ticker):
|
||||||
|
|
||||||
|
end_date = datetime.now(berlin_tz)
|
||||||
|
current_weekday = end_date.weekday()
|
||||||
|
current_time_berlin = datetime.now(berlin_tz)
|
||||||
|
is_afternoon = current_time_berlin.hour > 15 or (current_time_berlin.hour == 15 and current_time_berlin.minute >= 30)
|
||||||
|
|
||||||
|
start_date_1d = correct_1d_interval()
|
||||||
|
if holiday in holiday_dates:
|
||||||
|
if holiday in ['independence_day+1', 'labor_day+1', 'christmas_day+1'] and not is_afternoon:
|
||||||
|
end_date_1d = holiday_dates[holiday]
|
||||||
|
else:
|
||||||
|
end_date_1d = holiday_dates[holiday]
|
||||||
|
elif current_weekday == 0:
|
||||||
|
# It's Monday and before 15:30 PM
|
||||||
|
end_date_1d = current_time_berlin if is_afternoon else current_time_berlin - timedelta(days=3)
|
||||||
|
else:
|
||||||
|
end_date_1d = end_date
|
||||||
|
|
||||||
|
start_date_1d = start_date_1d.strftime("%Y-%m-%d")
|
||||||
|
end_date_1d = end_date_1d.strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/historical-chart/1min/{ticker}?from={start_date_1d}&to={end_date_1d}&apikey={api_key}"
|
||||||
|
|
||||||
|
df_1d = pd.DataFrame()
|
||||||
|
|
||||||
|
current_date = correct_1d_interval()
|
||||||
|
target_time = time(15,30)
|
||||||
|
extract_date = current_date.strftime('%Y-%m-%d')
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
responses = await asyncio.gather(session.get(url))
|
||||||
|
|
||||||
|
for response in responses:
|
||||||
|
try:
|
||||||
|
json_data = await response.json()
|
||||||
|
df_1d = pd.DataFrame(json_data).iloc[::-1].reset_index(drop=True)
|
||||||
|
opening_price = df_1d['open'].iloc[0]
|
||||||
|
df_1d = df_1d.drop(['open', 'high', 'low', 'volume'], axis=1)
|
||||||
|
df_1d = df_1d.round(2).rename(columns={"date": "time", "close": "value"})
|
||||||
|
|
||||||
|
if current_weekday == 5 or current_weekday == 6:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
if current_date.time() < target_time:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
end_time = pd.to_datetime(f'{extract_date} 16:00:00')
|
||||||
|
new_index = pd.date_range(start=df_1d['time'].iloc[-1], end=end_time, freq='1min')
|
||||||
|
|
||||||
|
remaining_df = pd.DataFrame(index=new_index, columns=['value'])
|
||||||
|
remaining_df = remaining_df.reset_index().rename(columns={"index": "time"})
|
||||||
|
remaining_df['time'] = remaining_df['time'].dt.strftime('%Y-%m-%d %H:%M:%S')
|
||||||
|
remainind_df = remaining_df.set_index('time')
|
||||||
|
|
||||||
|
df_1d = pd.concat([df_1d, remaining_df[1:: ]])
|
||||||
|
#To-do FutureWarning: The behavior of DataFrame concatenation with empty or all-NA entries is deprecated. In a future version, this will no longer exclude empty or all-NA columns when determining the result dtypes. To retain the old behavior, exclude the relevant entries before the concat operation.
|
||||||
|
|
||||||
|
df_1d = ujson.loads(df_1d.to_json(orient="records"))
|
||||||
|
except:
|
||||||
|
df_1d = []
|
||||||
|
return df_1d
|
||||||
|
|
||||||
|
async def get_jsonparsed_data(session, url):
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.json()
|
||||||
|
return data
|
||||||
|
|
||||||
|
async def get_quote_of_stocks(ticker_list):
|
||||||
|
ticker_str = ','.join(ticker_list)
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/quote/{ticker_str}?apikey={api_key}"
|
||||||
|
async with session.get(url) as response:
|
||||||
|
df = await response.json()
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
|
async def get_gainer_loser_active_stocks():
|
||||||
|
|
||||||
|
#Database read 1y and 3y data
|
||||||
|
query_fundamental_template = """
|
||||||
|
SELECT
|
||||||
|
marketCap
|
||||||
|
FROM
|
||||||
|
stocks
|
||||||
|
WHERE
|
||||||
|
symbol = ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
volume
|
||||||
|
FROM
|
||||||
|
"{ticker}"
|
||||||
|
ORDER BY
|
||||||
|
rowid DESC
|
||||||
|
LIMIT 1
|
||||||
|
"""
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl=False)) as session:
|
||||||
|
gainer_url = f"https://financialmodelingprep.com/api/v3/stock_market/gainers?apikey={api_key}"
|
||||||
|
loser_url = f"https://financialmodelingprep.com/api/v3/stock_market/losers?apikey={api_key}"
|
||||||
|
active_url = f"https://financialmodelingprep.com/api/v3/stock_market/actives?apikey={api_key}"
|
||||||
|
|
||||||
|
# Gather all the HTTP requests concurrently
|
||||||
|
tasks = [
|
||||||
|
get_jsonparsed_data(session, gainer_url),
|
||||||
|
get_jsonparsed_data(session, loser_url),
|
||||||
|
get_jsonparsed_data(session, active_url)
|
||||||
|
]
|
||||||
|
|
||||||
|
gainer_json, loser_json, active_json = await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
gainer_json = [{k: v for k, v in stock.items() if stock['symbol'] in symbols} for stock in gainer_json]
|
||||||
|
gainer_json = [entry for entry in gainer_json if entry]
|
||||||
|
|
||||||
|
loser_json = [{k: v for k, v in stock.items() if stock['symbol'] in symbols} for stock in loser_json]
|
||||||
|
loser_json = [entry for entry in loser_json if entry]
|
||||||
|
|
||||||
|
active_json = [{k: v for k, v in stock.items() if stock['symbol'] in symbols} for stock in active_json]
|
||||||
|
active_json = [entry for entry in active_json if entry]
|
||||||
|
for entry in active_json:
|
||||||
|
try:
|
||||||
|
symbol = entry['symbol']
|
||||||
|
query = query_template.format(ticker=symbol)
|
||||||
|
fundamental_data = pd.read_sql_query(query_fundamental_template, con, params=(symbol,))
|
||||||
|
volume = pd.read_sql_query(query, con)
|
||||||
|
entry['marketCap'] = int(fundamental_data['marketCap'].iloc[0])
|
||||||
|
entry['volume'] = int(volume['volume'].iloc[0])
|
||||||
|
except:
|
||||||
|
entry['marketCap'] = None
|
||||||
|
entry['volume'] = None
|
||||||
|
|
||||||
|
active_json = sorted(active_json, key=lambda x: (x['marketCap'] >= 10**9, x['volume']), reverse=True)
|
||||||
|
|
||||||
|
|
||||||
|
stocks = gainer_json[:20] + loser_json[:20] + active_json[:20]
|
||||||
|
|
||||||
|
#remove change key element
|
||||||
|
stocks = [{k: v for k, v in stock.items() if k != "change"} for stock in stocks]
|
||||||
|
|
||||||
|
|
||||||
|
for entry in stocks:
|
||||||
|
try:
|
||||||
|
symbol = entry['symbol']
|
||||||
|
query = query_template.format(ticker=symbol)
|
||||||
|
fundamental_data = pd.read_sql_query(query_fundamental_template, con, params=(symbol,))
|
||||||
|
volume = pd.read_sql_query(query, con)
|
||||||
|
entry['marketCap'] = int(fundamental_data['marketCap'].iloc[0])
|
||||||
|
entry['volume'] = int(volume['volume'].iloc[0])
|
||||||
|
except:
|
||||||
|
entry['marketCap'] = None
|
||||||
|
entry['volume'] = None
|
||||||
|
|
||||||
|
|
||||||
|
day_gainer_json = stocks[:20]
|
||||||
|
day_loser_json = stocks[20:40]
|
||||||
|
day_active_json = stocks[40:60]
|
||||||
|
|
||||||
|
query_market_movers = """
|
||||||
|
SELECT
|
||||||
|
gainer,loser,most_active
|
||||||
|
FROM
|
||||||
|
market_movers
|
||||||
|
"""
|
||||||
|
past_gainer = pd.read_sql_query(query_market_movers, con)
|
||||||
|
|
||||||
|
gainer_json = eval(past_gainer['gainer'].iloc[0])
|
||||||
|
loser_json = eval(past_gainer['loser'].iloc[0])
|
||||||
|
active_json = eval(past_gainer['most_active'].iloc[0])
|
||||||
|
|
||||||
|
gainer_json['1D'] = day_gainer_json
|
||||||
|
loser_json['1D'] = day_loser_json
|
||||||
|
active_json['1D'] = day_active_json #sorted(day_active_json, key=lambda x: x.get('volume', 0) if x.get('volume') is not None else 0, reverse=True)
|
||||||
|
|
||||||
|
|
||||||
|
data = {'gainers': gainer_json, 'losers': loser_json, 'active': active_json}
|
||||||
|
#Extract all unique symbols from gainer,loser, active
|
||||||
|
unique_symbols = set()
|
||||||
|
|
||||||
|
# Iterate through time periods, categories, and symbols
|
||||||
|
for time_period in data.keys():
|
||||||
|
for category in data[time_period].keys():
|
||||||
|
for stock_data in data[time_period][category]:
|
||||||
|
symbol = stock_data["symbol"]
|
||||||
|
unique_symbols.add(symbol)
|
||||||
|
|
||||||
|
# Convert the set to a list if needed
|
||||||
|
unique_symbols_list = list(unique_symbols)
|
||||||
|
|
||||||
|
#Get the latest quote of all unique symbol and map it back to the original data list to update all values
|
||||||
|
|
||||||
|
latest_quote = await get_quote_of_stocks(unique_symbols_list)
|
||||||
|
# Updating values in the data list based on matching symbols from the quote list
|
||||||
|
for time_period in data.keys():
|
||||||
|
for category in data[time_period].keys():
|
||||||
|
for stock_data in data[time_period][category]:
|
||||||
|
symbol = stock_data["symbol"]
|
||||||
|
quote_stock = next((item for item in latest_quote if item["symbol"] == symbol), None)
|
||||||
|
if quote_stock:
|
||||||
|
stock_data['price'] = quote_stock['price']
|
||||||
|
stock_data['changesPercentage'] = quote_stock['changesPercentage']
|
||||||
|
stock_data['marketCap'] = quote_stock['marketCap']
|
||||||
|
stock_data['volume'] = quote_stock['volume']
|
||||||
|
|
||||||
|
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
async def get_historical_data():
|
||||||
|
res_list = []
|
||||||
|
ticker_list = ['SPY', 'QQQ', 'DIA', 'IWM', 'IVV']
|
||||||
|
latest_quote = await get_quote_of_stocks(ticker_list)
|
||||||
|
|
||||||
|
for quote in latest_quote:
|
||||||
|
ticker = quote['symbol']
|
||||||
|
df = await get_todays_data(ticker)
|
||||||
|
|
||||||
|
res_list.append({'symbol': ticker, 'priceData': df, 'changesPercentage': round(quote['changesPercentage'],2), 'previousClose': round(quote['previousClose'],2)})
|
||||||
|
|
||||||
|
return res_list
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE symbol != ?", ('%5EGSPC',))
|
||||||
|
symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
data = asyncio.run(get_historical_data())
|
||||||
|
with open(f"json/mini-plots-index/data.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
|
||||||
|
data = asyncio.run(get_gainer_loser_active_stocks())
|
||||||
|
with open(f"json/market-movers/data.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
con.close()
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
66
app/cron_market_news.py
Normal file
66
app/cron_market_news.py
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import finnhub
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
finnhub_api_key = os.getenv('FINNHUB_API_KEY')
|
||||||
|
finnhub_client = finnhub.Client(api_key=finnhub_api_key)
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
async def run():
|
||||||
|
limit = 200
|
||||||
|
urls = [
|
||||||
|
f'https://financialmodelingprep.com/api/v3/stock_news?limit={limit}&apikey={api_key}',
|
||||||
|
f"https://financialmodelingprep.com/api/v4/general_news?limit={limit}&apikey={api_key}",
|
||||||
|
f"https://financialmodelingprep.com/api/v4/crypto_news?limit={limit}&apikey={api_key}",
|
||||||
|
]
|
||||||
|
for url in urls:
|
||||||
|
res_list = []
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.json()
|
||||||
|
if "stock_news" in url:
|
||||||
|
data_name = 'stock-news'
|
||||||
|
elif "general_news" in url:
|
||||||
|
data_name = 'general-news'
|
||||||
|
elif "crypto_news" in url:
|
||||||
|
data_name = 'crypto-news'
|
||||||
|
|
||||||
|
with open(f"json/market-news/{data_name}.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
'''
|
||||||
|
#Finnhub data
|
||||||
|
async def run():
|
||||||
|
limit = 200
|
||||||
|
urls = [
|
||||||
|
f'https://financialmodelingprep.com/api/v3/stock_news?limit={limit}&apikey={api_key}',
|
||||||
|
f'https://financialmodelingprep.com/api/v4/crypto_news?limit={limit}&apikey={api_key}',
|
||||||
|
]
|
||||||
|
for url in urls:
|
||||||
|
res_list = []
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.json()
|
||||||
|
if "stock_news" in url:
|
||||||
|
data_name = 'stock-news'
|
||||||
|
elif "crypto_news" in url:
|
||||||
|
data_name = 'crypto-news'
|
||||||
|
with open(f"json/market-news/{data_name}.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
|
||||||
|
general_news = finnhub_client.general_news('general')
|
||||||
|
general_news = [item for item in general_news if item["source"] != "" and item["image"] != ""]
|
||||||
|
with open(f"json/market-news/general-news.json", 'w') as file:
|
||||||
|
ujson.dump(general_news, file)
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
141
app/cron_one_day_price.py
Normal file
141
app/cron_one_day_price.py
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import sqlite3
|
||||||
|
from datetime import datetime, timedelta, time
|
||||||
|
import pytz
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
|
||||||
|
from GetStartEndDate import GetStartEndDate
|
||||||
|
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
|
||||||
|
|
||||||
|
async def save_price_data(symbol, data):
|
||||||
|
with open(f"json/one-day-price/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
|
||||||
|
|
||||||
|
async def fetch_and_save_symbols_data(symbols):
|
||||||
|
tasks = []
|
||||||
|
for symbol in symbols:
|
||||||
|
task = asyncio.create_task(get_todays_data(symbol))
|
||||||
|
tasks.append(task)
|
||||||
|
responses = await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
for symbol, response in zip(symbols, responses):
|
||||||
|
await save_price_data(symbol, response)
|
||||||
|
|
||||||
|
async def get_todays_data(ticker):
|
||||||
|
|
||||||
|
start_date_1d, end_date_1d = GetStartEndDate().run()
|
||||||
|
|
||||||
|
|
||||||
|
current_weekday = end_date_1d.weekday()
|
||||||
|
|
||||||
|
start_date = start_date_1d.strftime("%Y-%m-%d")
|
||||||
|
end_date = end_date_1d.strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/historical-chart/1min/{ticker}?from={start_date}&to={end_date}&apikey={api_key}"
|
||||||
|
|
||||||
|
df_1d = pd.DataFrame()
|
||||||
|
|
||||||
|
current_date = start_date_1d
|
||||||
|
target_time = time(9,30)
|
||||||
|
|
||||||
|
extract_date = current_date.strftime('%Y-%m-%d')
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
responses = await asyncio.gather(session.get(url))
|
||||||
|
|
||||||
|
for response in responses:
|
||||||
|
try:
|
||||||
|
json_data = await response.json()
|
||||||
|
df_1d = pd.DataFrame(json_data).iloc[::-1].reset_index(drop=True)
|
||||||
|
df_1d = df_1d.drop(['volume'], axis=1)
|
||||||
|
df_1d = df_1d.round(2).rename(columns={"date": "time"})
|
||||||
|
try:
|
||||||
|
with open(f"json/quote/{ticker}.json", 'r') as file:
|
||||||
|
res = ujson.load(file)
|
||||||
|
df_1d['close'].iloc[0] = res['previousClose']
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
if current_weekday == 5 or current_weekday == 6:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
if current_date.time() < target_time:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
end_time = pd.to_datetime(f'{extract_date} 16:00:00')
|
||||||
|
new_index = pd.date_range(start=df_1d['time'].iloc[-1], end=end_time, freq='1min')
|
||||||
|
|
||||||
|
remaining_df = pd.DataFrame(index=new_index, columns=['open', 'high', 'low','close'])
|
||||||
|
remaining_df = remaining_df.reset_index().rename(columns={"index": "time"})
|
||||||
|
remaining_df['time'] = remaining_df['time'].dt.strftime('%Y-%m-%d %H:%M:%S')
|
||||||
|
remainind_df = remaining_df.set_index('time')
|
||||||
|
|
||||||
|
df_1d = pd.concat([df_1d, remaining_df[1:: ]])
|
||||||
|
#To-do FutureWarning: The behavior of DataFrame concatenation with empty or all-NA entries is deprecated. In a future version, this will no longer exclude empty or all-NA columns when determining the result dtypes. To retain the old behavior, exclude the relevant entries before the concat operation.
|
||||||
|
|
||||||
|
df_1d = ujson.loads(df_1d.to_json(orient="records"))
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
df_1d = []
|
||||||
|
|
||||||
|
res = df_1d
|
||||||
|
|
||||||
|
return res
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
crypto_con = sqlite3.connect('crypto.db')
|
||||||
|
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stocks_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol FROM etfs")
|
||||||
|
etf_symbols = [row[0] for row in etf_cursor.fetchall()]
|
||||||
|
|
||||||
|
crypto_cursor = crypto_con.cursor()
|
||||||
|
crypto_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
crypto_cursor.execute("SELECT DISTINCT symbol FROM cryptos")
|
||||||
|
crypto_symbols = [row[0] for row in crypto_cursor.fetchall()]
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
etf_con.close()
|
||||||
|
crypto_con.close()
|
||||||
|
|
||||||
|
new_york_tz = pytz.timezone('America/New_York')
|
||||||
|
current_time_ny = datetime.now(new_york_tz)
|
||||||
|
market_open = (current_time_ny.hour == 9 and current_time_ny.minute >= 30) or \
|
||||||
|
(current_time_ny.hour > 9 and current_time_ny.hour < 17) or \
|
||||||
|
(current_time_ny.hour == 17 and current_time_ny.minute == 0)
|
||||||
|
|
||||||
|
total_symbols = stocks_symbols + etf_symbols + crypto_symbols
|
||||||
|
|
||||||
|
if market_open:
|
||||||
|
chunk_size = 1000
|
||||||
|
for i in range(0, len(total_symbols), chunk_size):
|
||||||
|
symbols_chunk = total_symbols[i:i+chunk_size]
|
||||||
|
await fetch_and_save_symbols_data(symbols_chunk)
|
||||||
|
print('sleeping for 45 sec')
|
||||||
|
await asyncio.sleep(45) # Wait for 60 seconds between chunks
|
||||||
|
else:
|
||||||
|
print('Market Closed')
|
||||||
|
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
139
app/cron_options_bubble.py
Normal file
139
app/cron_options_bubble.py
Normal file
@ -0,0 +1,139 @@
|
|||||||
|
import sqlite3
|
||||||
|
from datetime import datetime, timedelta, date
|
||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from benzinga import financial_data
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('BENZINGA_API_KEY')
|
||||||
|
|
||||||
|
fin = financial_data.Benzinga(api_key)
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_dte(date_expiration):
|
||||||
|
expiration_date = datetime.strptime(date_expiration, "%Y-%m-%d")
|
||||||
|
return (expiration_date - datetime.today()).days
|
||||||
|
|
||||||
|
def calculate_avg_dte(data):
|
||||||
|
active_options = [entry for entry in data if calculate_dte(entry['date_expiration']) >= 0]
|
||||||
|
|
||||||
|
if active_options:
|
||||||
|
total_dte = sum(entry['dte'] for entry in active_options)
|
||||||
|
return int(total_dte / len(active_options))
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
def calculate_put_call_volumes(data):
|
||||||
|
put_volume = sum(int(entry['volume']) for entry in data if entry['put_call'] == 'PUT')
|
||||||
|
call_volume = sum(int(entry['volume']) for entry in data if entry['put_call'] == 'CALL')
|
||||||
|
return put_volume, call_volume
|
||||||
|
|
||||||
|
def options_bubble_data(chunk):
|
||||||
|
try:
|
||||||
|
company_tickers = ','.join(chunk)
|
||||||
|
end_date = date.today()
|
||||||
|
start_date = end_date - timedelta(90)
|
||||||
|
|
||||||
|
end_date_str = end_date.strftime('%Y-%m-%d')
|
||||||
|
start_date_str = start_date.strftime('%Y-%m-%d')
|
||||||
|
|
||||||
|
res_list = []
|
||||||
|
for page in range(0, 100):
|
||||||
|
try:
|
||||||
|
data = fin.options_activity(company_tickers=company_tickers, page=page, pagesize=500, date_from=start_date_str, date_to=end_date_str)
|
||||||
|
data = ujson.loads(fin.output(data))['option_activity']
|
||||||
|
res_list += data
|
||||||
|
except:
|
||||||
|
break
|
||||||
|
|
||||||
|
res_filtered = [{key: value for key, value in item.items() if key in ['ticker','date', 'date_expiration', 'put_call', 'volume', 'open_interest']} for item in res_list]
|
||||||
|
|
||||||
|
for option_type in ['CALL', 'PUT']:
|
||||||
|
for item in res_filtered:
|
||||||
|
if item['put_call'].upper() == option_type:
|
||||||
|
item['dte'] = calculate_dte(item['date_expiration'])
|
||||||
|
if item['ticker'] in ['BRK.A', 'BRK.B']:
|
||||||
|
item['ticker'] = f"BRK-{item['ticker'][-1]}"
|
||||||
|
|
||||||
|
|
||||||
|
#Save raw data for each ticker for options page stack bar chart
|
||||||
|
for ticker in chunk:
|
||||||
|
ticker_filtered_data = [entry for entry in res_filtered if entry['ticker'] == ticker]
|
||||||
|
if len(ticker_filtered_data) != 0:
|
||||||
|
#sum up calls and puts for each day for the plot
|
||||||
|
summed_data = {}
|
||||||
|
for entry in ticker_filtered_data:
|
||||||
|
volume = int(entry['volume'])
|
||||||
|
open_interest = int(entry['open_interest'])
|
||||||
|
put_call = entry['put_call']
|
||||||
|
|
||||||
|
if entry['date'] not in summed_data:
|
||||||
|
summed_data[entry['date']] = {'CALL': {'volume': 0, 'open_interest': 0}, 'PUT': {'volume': 0, 'open_interest': 0}}
|
||||||
|
|
||||||
|
summed_data[entry['date']][put_call]['volume'] += volume
|
||||||
|
summed_data[entry['date']][put_call]['open_interest'] += open_interest
|
||||||
|
|
||||||
|
result_list = [{'date': date, 'CALL': summed_data[date]['CALL'], 'PUT': summed_data[date]['PUT']} for date in summed_data]
|
||||||
|
#reverse the list
|
||||||
|
result_list = result_list[::-1]
|
||||||
|
with open(f"json/options-flow/company/{ticker}.json", 'w') as file:
|
||||||
|
ujson.dump(result_list, file)
|
||||||
|
|
||||||
|
#Save bubble data for each ticker for overview page
|
||||||
|
for ticker in chunk:
|
||||||
|
|
||||||
|
bubble_data = {}
|
||||||
|
for time_period, days in {'oneDay': 1, 'oneWeek': 7, 'oneMonth': 30, 'threeMonth': 90}.items():
|
||||||
|
start_date = end_date - timedelta(days=days) #end_date is today
|
||||||
|
|
||||||
|
filtered_data = [item for item in res_filtered if start_date <= datetime.strptime(item['date'], '%Y-%m-%d').date() <= end_date]
|
||||||
|
|
||||||
|
|
||||||
|
ticker_filtered_data = [entry for entry in filtered_data if entry['ticker'] == ticker]
|
||||||
|
put_volume, call_volume = calculate_put_call_volumes(ticker_filtered_data)
|
||||||
|
avg_dte = calculate_avg_dte(ticker_filtered_data)
|
||||||
|
bubble_data[time_period] = {'putVolume': put_volume, 'callVolume': call_volume, 'avgDTE': avg_dte}
|
||||||
|
|
||||||
|
if all(all(value == 0 for value in data.values()) for data in bubble_data.values()):
|
||||||
|
bubble_data = {}
|
||||||
|
#don't save the json
|
||||||
|
else:
|
||||||
|
with open(f"json/options-bubble/{ticker}.json", 'w') as file:
|
||||||
|
ujson.dump(bubble_data, file)
|
||||||
|
|
||||||
|
|
||||||
|
except ValueError as ve:
|
||||||
|
print(ve)
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
try:
|
||||||
|
stock_con = sqlite3.connect('stocks.db')
|
||||||
|
stock_cursor = stock_con.cursor()
|
||||||
|
stock_cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stock_symbols = [row[0] for row in stock_cursor.fetchall()]
|
||||||
|
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol FROM etfs")
|
||||||
|
etf_symbols = [row[0] for row in etf_cursor.fetchall()]
|
||||||
|
|
||||||
|
stock_con.close()
|
||||||
|
etf_con.close()
|
||||||
|
|
||||||
|
total_symbols = stock_symbols + etf_symbols
|
||||||
|
total_symbols = [item.replace("BRK-B", "BRK.B") for item in total_symbols]
|
||||||
|
|
||||||
|
chunk_size = len(total_symbols) // 20 # Divide the list into N chunks
|
||||||
|
chunks = [total_symbols[i:i + chunk_size] for i in range(0, len(total_symbols), chunk_size)]
|
||||||
|
|
||||||
|
for chunk in chunks:
|
||||||
|
options_bubble_data(chunk)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
100
app/cron_options_flow.py
Normal file
100
app/cron_options_flow.py
Normal file
@ -0,0 +1,100 @@
|
|||||||
|
import time
|
||||||
|
from benzinga import financial_data
|
||||||
|
import ujson
|
||||||
|
import numpy as np
|
||||||
|
import sqlite3
|
||||||
|
import asyncio
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
import concurrent.futures
|
||||||
|
from GetStartEndDate import GetStartEndDate
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('BENZINGA_API_KEY')
|
||||||
|
|
||||||
|
fin = financial_data.Benzinga(api_key)
|
||||||
|
|
||||||
|
stock_con = sqlite3.connect('stocks.db')
|
||||||
|
stock_cursor = stock_con.cursor()
|
||||||
|
stock_cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stock_symbols = [row[0] for row in stock_cursor.fetchall()]
|
||||||
|
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol FROM etfs")
|
||||||
|
etf_symbols = [row[0] for row in etf_cursor.fetchall()]
|
||||||
|
|
||||||
|
start_date_1d, end_date_1d = GetStartEndDate().run()
|
||||||
|
start_date = start_date_1d.strftime("%Y-%m-%d")
|
||||||
|
end_date = end_date_1d.strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
#print(start_date,end_date)
|
||||||
|
|
||||||
|
def process_page(page):
|
||||||
|
try:
|
||||||
|
data = fin.options_activity(date_from=start_date, date_to=end_date, page=page, pagesize=1000)
|
||||||
|
data = ujson.loads(fin.output(data))['option_activity']
|
||||||
|
filtered_data = [{key: value for key, value in item.items() if key in ['ticker','time', 'id','sentiment','underlying_price', 'cost_basis', 'underlying_price','option_activity_type','date', 'date_expiration', 'open_interest','price', 'put_call','strike_price', 'volume']} for item in data]
|
||||||
|
time.sleep(1)
|
||||||
|
page_list = []
|
||||||
|
for item in filtered_data:
|
||||||
|
if item['underlying_price'] != '':
|
||||||
|
ticker = item['ticker']
|
||||||
|
if ticker == 'BRK.A':
|
||||||
|
ticker = 'BRK-A'
|
||||||
|
elif ticker == 'BRK.B':
|
||||||
|
ticker = 'BRK-B'
|
||||||
|
|
||||||
|
put_call = 'Calls' if item['put_call'] == 'CALL' else 'Puts'
|
||||||
|
|
||||||
|
asset_type = 'stock' if ticker in stock_symbols else ('etf' if ticker in etf_symbols else '')
|
||||||
|
|
||||||
|
item['assetType'] = asset_type
|
||||||
|
item['put_call'] = put_call
|
||||||
|
item['ticker'] = ticker
|
||||||
|
item['price'] = round(float(item['price']), 2)
|
||||||
|
item['strike_price'] = round(float(item['strike_price']), 2)
|
||||||
|
item['cost_basis'] = round(float(item['cost_basis']), 2)
|
||||||
|
item['underlying_price'] = round(float(item['underlying_price']), 2)
|
||||||
|
item['type'] = item['option_activity_type'].capitalize()
|
||||||
|
item['sentiment'] = item['sentiment'].capitalize()
|
||||||
|
|
||||||
|
page_list.append(item)
|
||||||
|
|
||||||
|
return page_list
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error processing page {page}: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
# Assuming fin, stock_symbols, and etf_symbols are defined elsewhere
|
||||||
|
res_list = []
|
||||||
|
|
||||||
|
# Adjust max_workers to control the degree of parallelism
|
||||||
|
max_workers = 6
|
||||||
|
|
||||||
|
# Fetch pages concurrently
|
||||||
|
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||||
|
future_to_page = {executor.submit(process_page, page): page for page in range(20)}
|
||||||
|
for future in concurrent.futures.as_completed(future_to_page):
|
||||||
|
page = future_to_page[future]
|
||||||
|
try:
|
||||||
|
page_list = future.result()
|
||||||
|
res_list += page_list
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Exception occurred: {e}")
|
||||||
|
break
|
||||||
|
|
||||||
|
# res_list now contains the aggregated results from all pages
|
||||||
|
#print(res_list)
|
||||||
|
def custom_key(item):
|
||||||
|
return item['time']
|
||||||
|
|
||||||
|
res_list = sorted(res_list, key=custom_key, reverse =True)
|
||||||
|
|
||||||
|
with open(f"json/options-flow/feed/data.json", 'w') as file:
|
||||||
|
ujson.dump(res_list, file)
|
||||||
|
|
||||||
|
stock_con.close()
|
||||||
|
etf_con.close()
|
||||||
101
app/cron_options_zero_dte.py
Normal file
101
app/cron_options_zero_dte.py
Normal file
@ -0,0 +1,101 @@
|
|||||||
|
import time
|
||||||
|
from benzinga import financial_data
|
||||||
|
import ujson
|
||||||
|
import numpy as np
|
||||||
|
import sqlite3
|
||||||
|
import asyncio
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
import concurrent.futures
|
||||||
|
from GetStartEndDate import GetStartEndDate
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('BENZINGA_API_KEY')
|
||||||
|
|
||||||
|
fin = financial_data.Benzinga(api_key)
|
||||||
|
|
||||||
|
stock_con = sqlite3.connect('stocks.db')
|
||||||
|
stock_cursor = stock_con.cursor()
|
||||||
|
stock_cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stock_symbols = [row[0] for row in stock_cursor.fetchall()]
|
||||||
|
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol FROM etfs")
|
||||||
|
etf_symbols = [row[0] for row in etf_cursor.fetchall()]
|
||||||
|
|
||||||
|
start_date_1d, end_date_1d = GetStartEndDate().run()
|
||||||
|
start_date = start_date_1d.strftime("%Y-%m-%d")
|
||||||
|
end_date = end_date_1d.strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
#print(start_date,end_date)
|
||||||
|
|
||||||
|
def process_page(page):
|
||||||
|
try:
|
||||||
|
data = fin.options_activity(date_from=start_date, date_to=end_date, page=page, pagesize=1000)
|
||||||
|
data = ujson.loads(fin.output(data))['option_activity']
|
||||||
|
filtered_data = [{key: value for key, value in item.items() if key in ['ticker','time', 'id','sentiment','underlying_price', 'cost_basis', 'underlying_price','option_activity_type','date', 'date_expiration', 'open_interest','price', 'put_call','strike_price', 'volume']} for item in data]
|
||||||
|
time.sleep(1)
|
||||||
|
page_list = []
|
||||||
|
for item in filtered_data:
|
||||||
|
if item['underlying_price'] != '':
|
||||||
|
ticker = item['ticker']
|
||||||
|
if ticker == 'BRK.A':
|
||||||
|
ticker = 'BRK-A'
|
||||||
|
elif ticker == 'BRK.B':
|
||||||
|
ticker = 'BRK-B'
|
||||||
|
|
||||||
|
put_call = 'Calls' if item['put_call'] == 'CALL' else 'Puts'
|
||||||
|
|
||||||
|
asset_type = 'stock' if ticker in stock_symbols else ('etf' if ticker in etf_symbols else '')
|
||||||
|
|
||||||
|
item['assetType'] = asset_type
|
||||||
|
item['put_call'] = put_call
|
||||||
|
item['ticker'] = ticker
|
||||||
|
item['price'] = round(float(item['price']), 2)
|
||||||
|
item['strike_price'] = round(float(item['strike_price']), 2)
|
||||||
|
item['cost_basis'] = round(float(item['cost_basis']), 2)
|
||||||
|
item['underlying_price'] = round(float(item['underlying_price']), 2)
|
||||||
|
item['type'] = item['option_activity_type'].capitalize()
|
||||||
|
item['sentiment'] = item['sentiment'].capitalize()
|
||||||
|
|
||||||
|
if item['date_expiration'] == start_date:
|
||||||
|
page_list.append(item)
|
||||||
|
return page_list
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error processing page {page}: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
# Assuming fin, stock_symbols, and etf_symbols are defined elsewhere
|
||||||
|
res_list = []
|
||||||
|
|
||||||
|
# Adjust max_workers to control the degree of parallelism
|
||||||
|
max_workers = 6
|
||||||
|
|
||||||
|
# Fetch pages concurrently
|
||||||
|
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||||
|
future_to_page = {executor.submit(process_page, page): page for page in range(20)}
|
||||||
|
for future in concurrent.futures.as_completed(future_to_page):
|
||||||
|
page = future_to_page[future]
|
||||||
|
try:
|
||||||
|
page_list = future.result()
|
||||||
|
res_list += page_list
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Exception occurred: {e}")
|
||||||
|
|
||||||
|
# res_list now contains the aggregated results from all pages
|
||||||
|
#print(res_list)
|
||||||
|
#print(len(res_list))
|
||||||
|
|
||||||
|
# Define a custom key function to extract the time and convert it to a sortable format
|
||||||
|
def custom_key(item):
|
||||||
|
return item['time']
|
||||||
|
|
||||||
|
res_list = sorted(res_list, key=custom_key, reverse =True)
|
||||||
|
with open(f"json/options-flow/zero-dte/data.json", 'w') as file:
|
||||||
|
ujson.dump(res_list, file)
|
||||||
|
|
||||||
|
stock_con.close()
|
||||||
|
etf_con.close()
|
||||||
189
app/cron_portfolio.py
Normal file
189
app/cron_portfolio.py
Normal file
@ -0,0 +1,189 @@
|
|||||||
|
import pytz
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from urllib.request import urlopen
|
||||||
|
import certifi
|
||||||
|
import json
|
||||||
|
import ujson
|
||||||
|
import schedule
|
||||||
|
import time
|
||||||
|
import subprocess
|
||||||
|
from pocketbase import PocketBase # Client also works the same
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import pytz
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
pb_admin_email = os.getenv('POCKETBASE_ADMIN_EMAIL')
|
||||||
|
pb_password = os.getenv('POCKETBASE_PASSWORD')
|
||||||
|
|
||||||
|
#berlin_tz = pytz.timezone('Europe/Berlin')
|
||||||
|
new_york_tz = pytz.timezone('America/New_York')
|
||||||
|
pb = PocketBase('http://127.0.0.1:8090')
|
||||||
|
admin_data = pb.admins.auth_with_password(pb_admin_email, pb_password)
|
||||||
|
|
||||||
|
# Set the system's timezone to Berlin at the beginning
|
||||||
|
subprocess.run(["timedatectl", "set-timezone", "Europe/Berlin"])
|
||||||
|
|
||||||
|
async def get_quote_of_stocks(ticker_list):
|
||||||
|
ticker_str = ','.join(ticker_list)
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/quote/{ticker_str}?apikey={api_key}"
|
||||||
|
async with session.get(url) as response:
|
||||||
|
df = await response.json()
|
||||||
|
return df
|
||||||
|
|
||||||
|
def check_number_of_shares(holdings, trading_history):
|
||||||
|
# Create a dictionary to track the current number of shares for each symbol
|
||||||
|
share_count = {}
|
||||||
|
# Update share count based on history
|
||||||
|
for transaction in trading_history:
|
||||||
|
symbol = transaction["symbol"]
|
||||||
|
num_shares = transaction["numberOfShares"]
|
||||||
|
if transaction["type"] == "buy":
|
||||||
|
# Increment the share count for the symbol
|
||||||
|
share_count[symbol] = share_count.get(symbol, 0) + num_shares
|
||||||
|
elif transaction["type"] == "sell":
|
||||||
|
# Decrement the share count for the symbol
|
||||||
|
share_count[symbol] = share_count.get(symbol, 0) - num_shares
|
||||||
|
|
||||||
|
# Update the holdings list based on the share count
|
||||||
|
for holding in holdings:
|
||||||
|
symbol = holding["symbol"]
|
||||||
|
if symbol in share_count:
|
||||||
|
holding["numberOfShares"] = share_count[symbol]
|
||||||
|
|
||||||
|
return holdings
|
||||||
|
|
||||||
|
def compute_available_cash(transactions):
|
||||||
|
available_cash = 100000 # Initial available cash
|
||||||
|
for transaction in transactions:
|
||||||
|
if transaction['type'] == 'buy':
|
||||||
|
shares_bought = transaction['numberOfShares']
|
||||||
|
price_per_share = transaction['price']
|
||||||
|
total_cost = shares_bought * price_per_share
|
||||||
|
available_cash -= total_cost
|
||||||
|
elif transaction['type'] == 'sell':
|
||||||
|
shares_sold = transaction['numberOfShares']
|
||||||
|
price_per_share = transaction['price']
|
||||||
|
total_gain = shares_sold * price_per_share
|
||||||
|
available_cash += total_gain
|
||||||
|
|
||||||
|
return available_cash
|
||||||
|
|
||||||
|
|
||||||
|
def compute_overall_return(initial_budget, transactions):
|
||||||
|
current_budget = initial_budget
|
||||||
|
|
||||||
|
for transaction in transactions:
|
||||||
|
if transaction["type"] == "buy":
|
||||||
|
current_budget -= transaction["numberOfShares"] * transaction["price"]
|
||||||
|
elif transaction["type"] == "sell":
|
||||||
|
current_budget += transaction["numberOfShares"] * transaction["price"]
|
||||||
|
|
||||||
|
overall_return = (current_budget - initial_budget) / initial_budget * 100
|
||||||
|
#print('overall return: ', overall_return)
|
||||||
|
return overall_return
|
||||||
|
|
||||||
|
|
||||||
|
async def update_portfolio():
|
||||||
|
current_time = datetime.now(new_york_tz)
|
||||||
|
current_weekday = current_time.weekday()
|
||||||
|
|
||||||
|
initial_budget = 100000
|
||||||
|
|
||||||
|
opening_hour = 9
|
||||||
|
opening_minute = 30
|
||||||
|
closing_hour = 17
|
||||||
|
|
||||||
|
is_market_open = ( current_time.hour > opening_hour or (current_time.hour == opening_hour and current_time.minute >= opening_minute)) and current_time.hour < closing_hour
|
||||||
|
if current_weekday <= 5 and is_market_open:
|
||||||
|
# Get the current date
|
||||||
|
current_month = datetime.today()
|
||||||
|
# Set the day to 1 to get the beginning of the current month
|
||||||
|
beginning_of_month = current_month.replace(day=1)
|
||||||
|
# Format it as a string if needed
|
||||||
|
formatted_date = beginning_of_month.strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
result = pb.collection("portfolios").get_full_list(query_params = {"filter": f'created >= "{formatted_date}"'})
|
||||||
|
|
||||||
|
ranking_list = []
|
||||||
|
ticker_list = []
|
||||||
|
if len(result) != 0:
|
||||||
|
#get all tickers from all portfolios
|
||||||
|
for port in result:
|
||||||
|
if len(port.holdings) != 0:
|
||||||
|
ticker_list += [i['symbol'] for i in port.holdings]
|
||||||
|
ticker_list = list(set(ticker_list))
|
||||||
|
#unique ticker_list
|
||||||
|
data = await get_quote_of_stocks(ticker_list)
|
||||||
|
#Get all quotes in bulks to save api calls
|
||||||
|
|
||||||
|
for x in result:
|
||||||
|
if len(x.trading_history) > 0:
|
||||||
|
try:
|
||||||
|
if len(x.holdings) != 0:
|
||||||
|
|
||||||
|
#compute the correct available cash to avoid bugs
|
||||||
|
x.available_cash = compute_available_cash(x.trading_history)
|
||||||
|
|
||||||
|
account_value = x.available_cash
|
||||||
|
quote_data_dict = {dd['symbol']: dd for dd in data}
|
||||||
|
#compute the correct number of shares to avoid bugs
|
||||||
|
x.holdings = check_number_of_shares(x.holdings, x.trading_history)
|
||||||
|
|
||||||
|
for item in x.holdings:
|
||||||
|
dd = quote_data_dict.get(item['symbol'])
|
||||||
|
if dd:
|
||||||
|
current_price = dd['price']
|
||||||
|
since_bought_change = round((current_price/ item['boughtPrice'] - 1) * 100, 2)
|
||||||
|
account_value += current_price * item['numberOfShares']
|
||||||
|
|
||||||
|
# Update holdings_list
|
||||||
|
item['currentPrice'] = current_price
|
||||||
|
item['sinceBoughtChange'] = since_bought_change
|
||||||
|
|
||||||
|
overall_return = round( ( account_value/initial_budget -1) * 100 ,2)
|
||||||
|
#Update Pocketbase with new values
|
||||||
|
|
||||||
|
pb.collection("portfolios").update(x.id, {
|
||||||
|
"accountValue": account_value,
|
||||||
|
"overallReturn": overall_return,
|
||||||
|
"availableCash": x.available_cash,
|
||||||
|
"holdings": x.holdings,
|
||||||
|
})
|
||||||
|
else:
|
||||||
|
#overall_return = x.overall_return
|
||||||
|
overall_return = compute_overall_return(initial_budget, x.trading_history)
|
||||||
|
account_value = round(initial_budget*(1+overall_return/100),2)
|
||||||
|
available_cash = account_value
|
||||||
|
|
||||||
|
pb.collection("portfolios").update(x.id, {
|
||||||
|
"accountValue": account_value,
|
||||||
|
"overallReturn": overall_return,
|
||||||
|
"availableCash": available_cash,
|
||||||
|
})
|
||||||
|
|
||||||
|
ranking_list.append({'userId': x.id, 'overallReturn': overall_return})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
|
||||||
|
#Apply ranking to each user
|
||||||
|
sorted_ranking_list = sorted(ranking_list, key=lambda x: x['overallReturn'], reverse=True)
|
||||||
|
for rank, item in enumerate(sorted_ranking_list):
|
||||||
|
pb.collection("portfolios").update(item['userId'], {
|
||||||
|
"rank": rank+1,
|
||||||
|
})
|
||||||
|
print('Done')
|
||||||
|
else:
|
||||||
|
print('Market Closed')
|
||||||
|
|
||||||
|
|
||||||
|
asyncio.run(update_portfolio())
|
||||||
155
app/cron_price_alert.py
Normal file
155
app/cron_price_alert.py
Normal file
@ -0,0 +1,155 @@
|
|||||||
|
import pytz
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from urllib.request import urlopen
|
||||||
|
import certifi
|
||||||
|
import json
|
||||||
|
import ujson
|
||||||
|
import schedule
|
||||||
|
import time
|
||||||
|
import subprocess
|
||||||
|
from pocketbase import PocketBase # Client also works the same
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import pytz
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
import smtplib
|
||||||
|
from email.mime.multipart import MIMEMultipart
|
||||||
|
from email.mime.text import MIMEText
|
||||||
|
import boto3
|
||||||
|
from botocore.exceptions import NoCredentialsError
|
||||||
|
from bs4 import BeautifulSoup
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
pb_admin_email = os.getenv('POCKETBASE_ADMIN_EMAIL')
|
||||||
|
pb_password = os.getenv('POCKETBASE_PASSWORD')
|
||||||
|
|
||||||
|
aws_access_key_id = os.getenv('AWS_ACCESS_KEY_ID')
|
||||||
|
aws_secret_access_key = os.getenv('AWS_SECRET_ACCESS_KEY')
|
||||||
|
|
||||||
|
berlin_tz = pytz.timezone('Europe/Berlin')
|
||||||
|
pb = PocketBase('http://127.0.0.1:8090')
|
||||||
|
admin_data = pb.admins.auth_with_password(pb_admin_email, pb_password)
|
||||||
|
|
||||||
|
|
||||||
|
#Send price alert via email
|
||||||
|
def send_email(recipient, symbol, asset_type, current_price,target_price, condition):
|
||||||
|
# Replace the placeholders with your AWS SES credentials
|
||||||
|
region_name = 'eu-north-1' #email-smtp.eu-north-1.amazonaws.com
|
||||||
|
|
||||||
|
# Replace the placeholders with your sender email and password
|
||||||
|
sender_email = 'mrahimi@stocknear.com'
|
||||||
|
|
||||||
|
to_email = recipient # user email address
|
||||||
|
subject = f'Price Alert triggered for ${symbol}'
|
||||||
|
|
||||||
|
# Read the index.html file
|
||||||
|
with open('html_template/price_alert.html', 'r') as file:
|
||||||
|
html_content = file.read()
|
||||||
|
|
||||||
|
# Parse the HTML content
|
||||||
|
soup = BeautifulSoup(html_content, 'html.parser')
|
||||||
|
|
||||||
|
# Extract the body element
|
||||||
|
html_body = str(soup.body)
|
||||||
|
# Get the current date
|
||||||
|
current_date = datetime.now()
|
||||||
|
# Define the format string
|
||||||
|
date_format = "%A - %B %d, %Y"
|
||||||
|
# Format the date
|
||||||
|
formatted_date = current_date.strftime(date_format)
|
||||||
|
|
||||||
|
if asset_type == 'stock':
|
||||||
|
asset_type = 'stocks'
|
||||||
|
elif asset_type == 'etf':
|
||||||
|
asset_type = 'etf'
|
||||||
|
elif asset_type == 'crypto':
|
||||||
|
asset_type = 'crypto'
|
||||||
|
|
||||||
|
html_body = html_body.replace('currentDate', formatted_date)
|
||||||
|
html_body = html_body.replace('addingSentence', f'The price of ${current_price} is {condition} your target price of ${target_price}')
|
||||||
|
html_body = html_body.replace('symbol', symbol)
|
||||||
|
html_body = html_body.replace('asset-link', f'/{asset_type}/{symbol}')
|
||||||
|
|
||||||
|
# Create a MIMEMultipart object
|
||||||
|
message = MIMEMultipart('alternative')
|
||||||
|
message['Subject'] = subject
|
||||||
|
message['From'] = sender_email
|
||||||
|
message['To'] = to_email
|
||||||
|
|
||||||
|
#Preheader text
|
||||||
|
preheader = MIMEText("This is a price alert notification.", 'plain')
|
||||||
|
|
||||||
|
message.attach(MIMEText(html_body, 'html'))
|
||||||
|
|
||||||
|
# Use Amazon SES to send the email
|
||||||
|
ses_client = boto3.client(
|
||||||
|
'ses',
|
||||||
|
aws_access_key_id=aws_access_key_id,
|
||||||
|
aws_secret_access_key=aws_secret_access_key,
|
||||||
|
region_name=region_name
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Send the email
|
||||||
|
response = ses_client.send_raw_email(
|
||||||
|
Source=message['From'],
|
||||||
|
Destinations=[message['To']],
|
||||||
|
RawMessage={'Data': message.as_string()},
|
||||||
|
)
|
||||||
|
print("Email sent successfully!")
|
||||||
|
except NoCredentialsError:
|
||||||
|
print("AWS credentials not available")
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error sending email: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
result = pb.collection("priceAlert").get_full_list(query_params={"filter": 'triggered=false'})
|
||||||
|
if len(result) != 0:
|
||||||
|
for item in result:
|
||||||
|
symbol = item.symbol
|
||||||
|
with open(f"json/quote/{symbol}.json", 'r') as file:
|
||||||
|
data = ujson.load(file)
|
||||||
|
current_price = round(data['price'],2)
|
||||||
|
target_price = round(item.target_price,2)
|
||||||
|
if (item.condition == 'below') and target_price >= current_price:
|
||||||
|
#print('below true', symbol, target_price)
|
||||||
|
pb.collection("priceAlert").update(item.id, {"triggered": True})
|
||||||
|
|
||||||
|
newNotification = {
|
||||||
|
'opUser': item.user,
|
||||||
|
'user': '9ncz4wunmhk0k52', #stocknear bot id
|
||||||
|
'notifyType': 'priceAlert',
|
||||||
|
'priceAlert': item.id,
|
||||||
|
'liveResults': {'symbol': symbol, 'assetType': item.asset_type, 'condition': item.condition, 'targetPrice': target_price, 'currentPrice': current_price},
|
||||||
|
}
|
||||||
|
pb.collection('notifications').create(newNotification)
|
||||||
|
#send alert via email
|
||||||
|
recipient = (pb.collection('users').get_one(item.user)).email
|
||||||
|
send_email(recipient, symbol, item.asset_type, current_price, target_price, item.condition)
|
||||||
|
|
||||||
|
elif (item.condition == 'above') and target_price <= current_price:
|
||||||
|
#print('above true', symbol, target_price)
|
||||||
|
pb.collection("priceAlert").update(item.id, {"triggered": True})
|
||||||
|
|
||||||
|
newNotification = {
|
||||||
|
'opUser': item.user,
|
||||||
|
'user': '9ncz4wunmhk0k52', #stocknear bot id
|
||||||
|
'notifyType': 'priceAlert',
|
||||||
|
'priceAlert': item.id,
|
||||||
|
'liveResults': {'symbol': symbol, 'assetType': item.asset_type, 'condition': item.condition, 'targetPrice': target_price, 'currentPrice': current_price},
|
||||||
|
}
|
||||||
|
pb.collection('notifications').create(newNotification)
|
||||||
|
#send alert via email
|
||||||
|
recipient = (pb.collection('users').get_one(item.user)).email
|
||||||
|
send_email(recipient, symbol, item.asset_type, current_price, target_price, item.condition)
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
113
app/cron_price_analysis.py
Normal file
113
app/cron_price_analysis.py
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import sqlite3
|
||||||
|
from datetime import datetime
|
||||||
|
from ml_models.prophet_model import PricePredictor
|
||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
from tqdm import tqdm
|
||||||
|
import concurrent.futures
|
||||||
|
|
||||||
|
|
||||||
|
def convert_symbols(symbol_list):
|
||||||
|
"""
|
||||||
|
Converts the symbols in the given list from 'BTCUSD' and 'USDTUSD' format to 'BTC-USD' and 'USDT-USD' format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol_list (list): A list of strings representing the symbols to be converted.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: A new list with the symbols converted to the desired format.
|
||||||
|
"""
|
||||||
|
converted_symbols = []
|
||||||
|
for symbol in symbol_list:
|
||||||
|
# Determine the base and quote currencies
|
||||||
|
base_currency = symbol[:-3]
|
||||||
|
quote_currency = symbol[-3:]
|
||||||
|
|
||||||
|
# Construct the new symbol in the desired format
|
||||||
|
new_symbol = f"{base_currency}-{quote_currency}"
|
||||||
|
converted_symbols.append(new_symbol)
|
||||||
|
|
||||||
|
return converted_symbols
|
||||||
|
|
||||||
|
async def save_json(symbol, data):
|
||||||
|
with open(f"json/price-analysis/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
|
||||||
|
async def download_data(ticker, start_date, end_date):
|
||||||
|
try:
|
||||||
|
df = yf.download(ticker, start=start_date, end=end_date, interval="1d")
|
||||||
|
df = df.reset_index()
|
||||||
|
df = df[['Date', 'Adj Close']]
|
||||||
|
df = df.rename(columns={"Date": "ds", "Adj Close": "y"})
|
||||||
|
if len(df) > 252*2: #At least 2 years of history is necessary
|
||||||
|
q_high= df["y"].quantile(0.99)
|
||||||
|
q_low = df["y"].quantile(0.05)
|
||||||
|
df = df[(df["y"] > q_low)]
|
||||||
|
df = df[(df["y"] < q_high)]
|
||||||
|
#df['y'] = df['y'].rolling(window=10).mean()
|
||||||
|
#df = df.dropna()
|
||||||
|
return df
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
async def process_symbol(ticker, start_date, end_date, crypto_symbols):
|
||||||
|
try:
|
||||||
|
df = await download_data(ticker, start_date, end_date)
|
||||||
|
data = PricePredictor().run(df)
|
||||||
|
|
||||||
|
if ticker in crypto_symbols:
|
||||||
|
ticker = ticker.replace('-','') #convert back from BTC-USD to BTCUSD
|
||||||
|
await save_json(ticker, data)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
crypto_con = sqlite3.connect('crypto.db')
|
||||||
|
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
#cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
#cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE marketCap > 10E9 AND symbol NOT LIKE '%.%'")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE marketCap > 1E9")
|
||||||
|
stock_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol FROM etfs WHERE totalAssets > 5E9")
|
||||||
|
etf_symbols = [row[0] for row in etf_cursor.fetchall()]
|
||||||
|
|
||||||
|
crypto_cursor = crypto_con.cursor()
|
||||||
|
crypto_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
crypto_cursor.execute("SELECT DISTINCT symbol FROM cryptos")
|
||||||
|
crypto_symbols = [row[0] for row in crypto_cursor.fetchall()]
|
||||||
|
crypto_symbols = convert_symbols(crypto_symbols) #Convert BTCUSD to BTC-USD for yfinance
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
etf_con.close()
|
||||||
|
crypto_con.close()
|
||||||
|
|
||||||
|
total_symbols = stock_symbols + etf_symbols + crypto_symbols
|
||||||
|
print(f"Total tickers: {len(total_symbols)}")
|
||||||
|
start_date = datetime(2000, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
chunk_size = len(total_symbols) // 70 # Divide the list into N chunks
|
||||||
|
chunks = [total_symbols[i:i + chunk_size] for i in range(0, len(total_symbols), chunk_size)]
|
||||||
|
for chunk in chunks:
|
||||||
|
tasks = []
|
||||||
|
for ticker in tqdm(chunk):
|
||||||
|
tasks.append(process_symbol(ticker, start_date, end_date, crypto_symbols))
|
||||||
|
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
112
app/cron_quote.py
Normal file
112
app/cron_quote.py
Normal file
@ -0,0 +1,112 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import sqlite3
|
||||||
|
from datetime import datetime
|
||||||
|
import pytz
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
|
||||||
|
|
||||||
|
async def get_quote_of_stocks(ticker_list):
|
||||||
|
ticker_str = ','.join(ticker_list)
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/quote/{ticker_str}?apikey={api_key}"
|
||||||
|
async with session.get(url) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
return await response.json()
|
||||||
|
else:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
async def get_pre_post_quote_of_stocks(ticker_list):
|
||||||
|
ticker_str = ','.join(ticker_list)
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
url = f"https://financialmodelingprep.com/api/v4/batch-pre-post-market/{ticker_str}?apikey={api_key}"
|
||||||
|
async with session.get(url) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
return await response.json()
|
||||||
|
else:
|
||||||
|
return {}
|
||||||
|
|
||||||
|
async def save_quote_as_json(symbol, data):
|
||||||
|
with open(f"json/quote/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
|
||||||
|
async def save_pre_post_quote_as_json(symbol, data):
|
||||||
|
try:
|
||||||
|
with open(f"json/quote/{symbol}.json", 'r') as file:
|
||||||
|
previous_close = (ujson.load(file))['price']
|
||||||
|
changes_percentage = round((data['ask']/previous_close-1)*100,2)
|
||||||
|
with open(f"json/pre-post-quote/{symbol}.json", 'w') as file:
|
||||||
|
res = {'symbol': symbol, 'price': round(data['ask'],2), 'changesPercentage': changes_percentage, 'time': data['timestamp']}
|
||||||
|
ujson.dump(res, file)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
crypto_con = sqlite3.connect('crypto.db')
|
||||||
|
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE symbol != ?", ('%5EGSPC',))
|
||||||
|
stocks_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol FROM etfs")
|
||||||
|
etf_symbols = [row[0] for row in etf_cursor.fetchall()]
|
||||||
|
|
||||||
|
crypto_cursor = crypto_con.cursor()
|
||||||
|
crypto_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
crypto_cursor.execute("SELECT DISTINCT symbol FROM cryptos")
|
||||||
|
crypto_symbols = [row[0] for row in crypto_cursor.fetchall()]
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
etf_con.close()
|
||||||
|
crypto_con.close()
|
||||||
|
|
||||||
|
new_york_tz = pytz.timezone('America/New_York')
|
||||||
|
current_time_new_york = datetime.now(new_york_tz)
|
||||||
|
is_market_closed = (current_time_new_york.hour < 9 or
|
||||||
|
(current_time_new_york.hour == 9 and current_time_new_york.minute < 30) or
|
||||||
|
current_time_new_york.hour >= 16)
|
||||||
|
|
||||||
|
|
||||||
|
#Crypto Quotes
|
||||||
|
latest_quote = await get_quote_of_stocks(crypto_symbols)
|
||||||
|
for item in latest_quote:
|
||||||
|
symbol = item['symbol']
|
||||||
|
|
||||||
|
await save_quote_as_json(symbol, item)
|
||||||
|
|
||||||
|
# Stock and ETF Quotes
|
||||||
|
|
||||||
|
total_symbols = stocks_symbols+etf_symbols
|
||||||
|
|
||||||
|
chunk_size = len(total_symbols) // 10 # Divide the list into 10 chunks
|
||||||
|
chunks = [total_symbols[i:i + chunk_size] for i in range(0, len(total_symbols), chunk_size)]
|
||||||
|
for chunk in chunks:
|
||||||
|
if is_market_closed == False:
|
||||||
|
latest_quote = await get_quote_of_stocks(chunk)
|
||||||
|
for item in latest_quote:
|
||||||
|
symbol = item['symbol']
|
||||||
|
await save_quote_as_json(symbol, item)
|
||||||
|
#print(f"Saved data for {symbol}.")
|
||||||
|
|
||||||
|
if is_market_closed == True:
|
||||||
|
latest_quote = await get_pre_post_quote_of_stocks(chunk)
|
||||||
|
for item in latest_quote:
|
||||||
|
symbol = item['symbol']
|
||||||
|
await save_pre_post_quote_as_json(symbol, item)
|
||||||
|
#print(f"Saved data for {symbol}.")
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
72
app/cron_sec_filings.py
Normal file
72
app/cron_sec_filings.py
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
from datetime import datetime, timedelta
|
||||||
|
import ujson
|
||||||
|
import time
|
||||||
|
import sqlite3
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
from collections import defaultdict
|
||||||
|
import time
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
from faker import Faker
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
|
||||||
|
|
||||||
|
async def fetch_sec_filings(session, symbol, filing_type):
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/sec_filings/{symbol}?type={filing_type}&page=0&apikey={api_key}"
|
||||||
|
async with session.get(url) as response:
|
||||||
|
data = await response.json()
|
||||||
|
return [{'date': entry['fillingDate'], 'link': entry['finalLink']} for entry in data]
|
||||||
|
|
||||||
|
async def save_sec_filings(session, symbol):
|
||||||
|
tasks = [
|
||||||
|
fetch_sec_filings(session, symbol, '8-k'),
|
||||||
|
fetch_sec_filings(session, symbol, '10-k'),
|
||||||
|
fetch_sec_filings(session, symbol, '10-q')
|
||||||
|
]
|
||||||
|
|
||||||
|
res_eight_k, res_ten_k, res_ten_q = await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
if len(res_eight_k) == 0 and len(res_ten_k) == 0 and len(res_ten_q) == 0:
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
res = {'eightK': res_eight_k, 'tenK': res_ten_k, 'tenQ': res_ten_q}
|
||||||
|
with open(f"json/sec-filings/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(res, file)
|
||||||
|
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE symbol NOT LIKE '%.%'")
|
||||||
|
symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
tasks = []
|
||||||
|
i = 0
|
||||||
|
for symbol in tqdm(symbols):
|
||||||
|
tasks.append(save_sec_filings(session, symbol))
|
||||||
|
|
||||||
|
i += 1
|
||||||
|
if i % 300 == 0:
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
tasks = []
|
||||||
|
print('sleeping mode: ', i)
|
||||||
|
await asyncio.sleep(60) # Pause for 60 seconds
|
||||||
|
|
||||||
|
#tasks.append(self.save_ohlc_data(session, "%5EGSPC"))
|
||||||
|
|
||||||
|
if tasks:
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
loop = asyncio.get_event_loop()
|
||||||
|
loop.run_until_complete(run())
|
||||||
205
app/cron_sentiment_analysis.py
Normal file
205
app/cron_sentiment_analysis.py
Normal file
@ -0,0 +1,205 @@
|
|||||||
|
from nltk.sentiment.vader import SentimentIntensityAnalyzer
|
||||||
|
from textblob import TextBlob
|
||||||
|
from tqdm import tqdm
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import sqlite3
|
||||||
|
import ujson
|
||||||
|
import time
|
||||||
|
import random
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
|
||||||
|
'''
|
||||||
|
import nltk
|
||||||
|
nltk.download('vader_lexicon')
|
||||||
|
'''
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
sid = SentimentIntensityAnalyzer()
|
||||||
|
|
||||||
|
|
||||||
|
def convert_symbols(symbol_list):
|
||||||
|
"""
|
||||||
|
Converts the symbols in the given list from 'BTCUSD' and 'USDTUSD' format to 'BTC-USD' and 'USDT-USD' format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol_list (list): A list of strings representing the symbols to be converted.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: A new list with the symbols converted to the desired format.
|
||||||
|
"""
|
||||||
|
converted_symbols = []
|
||||||
|
for symbol in symbol_list:
|
||||||
|
# Determine the base and quote currencies
|
||||||
|
base_currency = symbol[:-3]
|
||||||
|
quote_currency = symbol[-3:]
|
||||||
|
|
||||||
|
# Construct the new symbol in the desired format
|
||||||
|
new_symbol = f"{base_currency}-{quote_currency}"
|
||||||
|
converted_symbols.append(new_symbol)
|
||||||
|
|
||||||
|
return converted_symbols
|
||||||
|
|
||||||
|
async def get_news_of_stocks(ticker_list,page):
|
||||||
|
ticker_str = ','.join(ticker_list)
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/stock_news?tickers={ticker_str}&page={page}&limit=2000&apikey={api_key}"
|
||||||
|
async with session.get(url) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
return await response.json()
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
async def get_news_of_cryptos(ticker_list,page):
|
||||||
|
ticker_str = ','.join(ticker_list)
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
url = f"https://financialmodelingprep.com/api/v4/crypto_news?tickers={ticker_str}&page={page}&limit=2000&apikey={api_key}"
|
||||||
|
async with session.get(url) as response:
|
||||||
|
if response.status == 200:
|
||||||
|
return await response.json()
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def remove_duplicates(data, key):
|
||||||
|
seen = set()
|
||||||
|
new_data = []
|
||||||
|
for item in data:
|
||||||
|
if item[key] not in seen:
|
||||||
|
seen.add(item[key])
|
||||||
|
new_data.append(item)
|
||||||
|
return new_data
|
||||||
|
|
||||||
|
def adjust_scaled_score(scaled_score):
|
||||||
|
adjustment = random.choice([-2,-1, 0, 1, 2])
|
||||||
|
# Add the adjustment to the scaled_score
|
||||||
|
scaled_score += adjustment
|
||||||
|
|
||||||
|
# Ensure the scaled_score stays within the range of 0 to 10
|
||||||
|
scaled_score = max(0, min(10, scaled_score))
|
||||||
|
|
||||||
|
return scaled_score
|
||||||
|
|
||||||
|
def compute_sentiment_score(sentence):
|
||||||
|
# Compute sentiment score using VADER
|
||||||
|
#sentiment_score = sid.polarity_scores(sentence)['compound']
|
||||||
|
sentiment_score = TextBlob(sentence).sentiment.polarity
|
||||||
|
# Scale the sentiment score to range from 0 to 10
|
||||||
|
scaled_score = (sentiment_score + 1) * 5 # Map from [-1, 1] to [0, 10]
|
||||||
|
return scaled_score
|
||||||
|
|
||||||
|
def get_sentiment(symbol, res_list, is_crypto=False):
|
||||||
|
if is_crypto == True:
|
||||||
|
time_format = '%Y-%m-%dT%H:%M:%S.%fZ'
|
||||||
|
else:
|
||||||
|
time_format = '%Y-%m-%d %H:%M:%S'
|
||||||
|
|
||||||
|
end_date = datetime.now().date()
|
||||||
|
end_date_datetime = datetime.combine(end_date, datetime.min.time()) # Convert end_date to datetime
|
||||||
|
|
||||||
|
sentiment_scores_by_period = {}
|
||||||
|
|
||||||
|
for time_period, days in {'oneWeek': 10, 'oneMonth': 30, 'threeMonth': 90, 'sixMonth': 180, 'oneYear': 365}.items():
|
||||||
|
start_date = end_date - timedelta(days=days)
|
||||||
|
title_data = [item['title'] for item in res_list if start_date <= datetime.strptime(item['publishedDate'], time_format).date() <= end_date_datetime.date()]
|
||||||
|
text_data = [item['text'] for item in res_list if start_date <= datetime.strptime(item['publishedDate'], time_format).date() <= end_date_datetime.date()]
|
||||||
|
|
||||||
|
|
||||||
|
sentiment_scores_title = [compute_sentiment_score(sentence) for sentence in title_data]
|
||||||
|
if sentiment_scores_title: # Handle case when sentiment_scores is empty
|
||||||
|
average_sentiment_title_score = round(sum(sentiment_scores_title) / len(sentiment_scores_title))
|
||||||
|
else:
|
||||||
|
average_sentiment_title_score = 0
|
||||||
|
|
||||||
|
sentiment_scores_text = [compute_sentiment_score(sentence) for sentence in text_data]
|
||||||
|
if sentiment_scores_text: # Handle case when sentiment_scores is empty
|
||||||
|
average_sentiment_text_score = round(sum(sentiment_scores_text) / len(sentiment_scores_text))
|
||||||
|
else:
|
||||||
|
average_sentiment_text_score = 0
|
||||||
|
|
||||||
|
sentiment_scores_by_period[time_period] = adjust_scaled_score(round((average_sentiment_title_score+average_sentiment_text_score)/2))
|
||||||
|
|
||||||
|
|
||||||
|
label_mapping = {'oneWeek': '1W', 'oneMonth': '1M', 'threeMonth': '3M', 'sixMonth': '6M', 'oneYear': '1Y'}
|
||||||
|
result = [{'label': label_mapping[key], 'value': value} for key, value in sentiment_scores_by_period.items()]
|
||||||
|
|
||||||
|
if any(item['value'] != 0 for item in result):
|
||||||
|
|
||||||
|
if is_crypto == True:
|
||||||
|
symbol = symbol.replace('-','') #convert back from BTC-USD to BTCUSD
|
||||||
|
|
||||||
|
with open(f"json/sentiment-analysis/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(result, file)
|
||||||
|
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
crypto_con = sqlite3.connect('crypto.db')
|
||||||
|
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stocks_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol FROM etfs")
|
||||||
|
etf_symbols = [row[0] for row in etf_cursor.fetchall()]
|
||||||
|
|
||||||
|
crypto_cursor = crypto_con.cursor()
|
||||||
|
crypto_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
crypto_cursor.execute("SELECT DISTINCT symbol FROM cryptos")
|
||||||
|
crypto_symbols = [row[0] for row in crypto_cursor.fetchall()]
|
||||||
|
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
etf_con.close()
|
||||||
|
crypto_con.close()
|
||||||
|
|
||||||
|
#chunk not necessary at the moment
|
||||||
|
|
||||||
|
res_list = []
|
||||||
|
for page in tqdm(range(0,100)):
|
||||||
|
data = await get_news_of_cryptos(crypto_symbols, page)
|
||||||
|
if len(data) == 0:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
res_list+=data
|
||||||
|
|
||||||
|
crypto_symbols = convert_symbols(crypto_symbols)#The News article has the symbol format BTC-USD
|
||||||
|
|
||||||
|
for symbol in crypto_symbols:
|
||||||
|
filtered_ticker = [item for item in res_list if item['symbol'] == symbol]
|
||||||
|
filtered_ticker = remove_duplicates(filtered_ticker, 'publishedDate')
|
||||||
|
get_sentiment(symbol, filtered_ticker, is_crypto=True)
|
||||||
|
|
||||||
|
|
||||||
|
total_symbols = stocks_symbols+etf_symbols
|
||||||
|
|
||||||
|
chunk_size = len(total_symbols) // 70 # Divide the list into N chunks
|
||||||
|
chunks = [total_symbols[i:i + chunk_size] for i in range(0, len(total_symbols), chunk_size)]
|
||||||
|
for chunk in tqdm(chunks):
|
||||||
|
res_list = []
|
||||||
|
for page in tqdm(range(0,100)):
|
||||||
|
data = await get_news_of_stocks(chunk, page)
|
||||||
|
if len(data) == 0:
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
res_list+=data
|
||||||
|
for symbol in chunk:
|
||||||
|
filtered_ticker = [item for item in res_list if item['symbol'] == symbol]
|
||||||
|
filtered_ticker = remove_duplicates(filtered_ticker, 'publishedDate')
|
||||||
|
get_sentiment(symbol, filtered_ticker, is_crypto=False)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
69
app/cron_similar_stocks.py
Normal file
69
app/cron_similar_stocks.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import sqlite3
|
||||||
|
from datetime import datetime
|
||||||
|
from rating import rating_model
|
||||||
|
import pandas as pd
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
async def save_similar_stocks(symbol, data):
|
||||||
|
with open(f"json/similar-stocks/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
|
||||||
|
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
quote, stock_peers
|
||||||
|
FROM
|
||||||
|
stocks
|
||||||
|
WHERE
|
||||||
|
symbol = ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE symbol != ?", ('%5EGSPC',))
|
||||||
|
stocks_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
#stocks_symbols = ['AMD']
|
||||||
|
for ticker in stocks_symbols:
|
||||||
|
filtered_df = []
|
||||||
|
df = pd.read_sql_query(query_template, con, params=(ticker,))
|
||||||
|
try:
|
||||||
|
df = ujson.loads(df['stock_peers'].iloc[0])
|
||||||
|
except:
|
||||||
|
df = []
|
||||||
|
if len(df) > 0:
|
||||||
|
df = [stock for stock in df if stock in stocks_symbols]
|
||||||
|
for symbol in df:
|
||||||
|
try:
|
||||||
|
df = pd.read_sql_query(query_template, con, params=(symbol,))
|
||||||
|
df_dict = df.to_dict()
|
||||||
|
quote_dict = eval(df_dict['quote'][0])[0]
|
||||||
|
filtered_df.append(quote_dict) # Add the modified result to the combined list
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
filtered_df = [
|
||||||
|
{
|
||||||
|
"symbol": entry["symbol"],
|
||||||
|
"name": entry["name"],
|
||||||
|
"marketCap": entry["marketCap"],
|
||||||
|
"avgVolume": entry["avgVolume"]
|
||||||
|
}
|
||||||
|
for entry in filtered_df
|
||||||
|
]
|
||||||
|
|
||||||
|
sorted_df = sorted(filtered_df, key=lambda x: x['marketCap'], reverse=True)
|
||||||
|
|
||||||
|
await save_similar_stocks(ticker, sorted_df)
|
||||||
|
|
||||||
|
try:
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
asyncio.run(run())
|
||||||
|
con.close()
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
128
app/cron_stockdeck.py
Normal file
128
app/cron_stockdeck.py
Normal file
@ -0,0 +1,128 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import sqlite3
|
||||||
|
from datetime import datetime
|
||||||
|
from rating import rating_model
|
||||||
|
import pandas as pd
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
async def save_stockdeck(symbol, data):
|
||||||
|
with open(f"json/stockdeck/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
|
||||||
|
def clean_financial_data(self, list1, list2):
|
||||||
|
#combine income_statement with income_growth_statement
|
||||||
|
combined_list = []
|
||||||
|
for item1 in list1:
|
||||||
|
for item2 in list2:
|
||||||
|
if item1["date"] == item2["date"]:
|
||||||
|
combined_item = {**item1, **item2} # Combine the dictionaries
|
||||||
|
combined_list.append(combined_item)
|
||||||
|
break
|
||||||
|
return combined_list
|
||||||
|
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
profile, quote,
|
||||||
|
esg_ratings,esg_data,stock_split
|
||||||
|
FROM
|
||||||
|
stocks
|
||||||
|
WHERE
|
||||||
|
symbol = ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
async def get_data(ticker):
|
||||||
|
try:
|
||||||
|
df = pd.read_sql_query(query_template, con, params=(ticker,))
|
||||||
|
if df.empty:
|
||||||
|
final_res =[{}]
|
||||||
|
return final_res
|
||||||
|
else:
|
||||||
|
data= df.to_dict(orient='records')
|
||||||
|
data =data[0]
|
||||||
|
|
||||||
|
company_profile = ujson.loads(data['profile'])
|
||||||
|
#company_quote = ujson.loads(data['quote'])
|
||||||
|
try:
|
||||||
|
with open(f"json/quote/{ticker}.json", 'r') as file:
|
||||||
|
company_quote = ujson.load(file)
|
||||||
|
except:
|
||||||
|
company_quote = {}
|
||||||
|
|
||||||
|
if data['esg_data'] == None:
|
||||||
|
company_esg_score = {'ESGScore': 'n/a', 'socialScore': 'n/a', 'environmentalScore': 'n/a', 'governanceScore': 'n/a'}
|
||||||
|
company_esg_rating = {'ESGRiskRating': 'n/a', 'industry': 'n/a'}
|
||||||
|
else:
|
||||||
|
company_esg_score = ujson.loads(data['esg_data'])
|
||||||
|
if data['esg_ratings'] == None:
|
||||||
|
company_esg_rating = {'ESGRiskRating': 'n/a', 'industry': 'n/a'}
|
||||||
|
else:
|
||||||
|
company_esg_rating = ujson.loads(data['esg_ratings'])
|
||||||
|
|
||||||
|
if data['stock_split'] == None:
|
||||||
|
company_stock_split = []
|
||||||
|
else:
|
||||||
|
company_stock_split = ujson.loads(data['stock_split'])
|
||||||
|
|
||||||
|
res_profile = [
|
||||||
|
{
|
||||||
|
'ceoName': company_profile[0]['ceo'],
|
||||||
|
'companyName': company_profile[0]['companyName'],
|
||||||
|
'industry': company_profile[0]['industry'],
|
||||||
|
'image': company_profile[0]['image'],
|
||||||
|
'sector': company_profile[0]['sector'],
|
||||||
|
'beta': company_profile[0]['beta'],
|
||||||
|
'marketCap': company_profile[0]['mktCap'],
|
||||||
|
'avgVolume': company_profile[0]['volAvg'],
|
||||||
|
'country': company_profile[0]['country'],
|
||||||
|
'exchange': company_profile[0]['exchangeShortName'],
|
||||||
|
'earning': company_quote['earningsAnnouncement'],
|
||||||
|
'previousClose': company_quote['price'], #This is true because I update my db before the market opens hence the price will be the previousClose price.
|
||||||
|
'website': company_profile[0]['website'],
|
||||||
|
'description': company_profile[0]['description'],
|
||||||
|
'esgScore': company_esg_score['ESGScore'],
|
||||||
|
'socialScore': company_esg_score['socialScore'],
|
||||||
|
'environmentalScore': company_esg_score['environmentalScore'],
|
||||||
|
'governanceScore': company_esg_score['governanceScore'],
|
||||||
|
'esgRiskRating': company_esg_rating['ESGRiskRating'],
|
||||||
|
'fullTimeEmployees': company_profile[0]['fullTimeEmployees'],
|
||||||
|
'stockSplits': company_stock_split,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
if data['esg_data'] == None:
|
||||||
|
company_esg_score = {'ESGScore': 'n/a', 'socialScore': 'n/a', 'environmentalScore': 'n/a', 'governanceScore': 'n/a'}
|
||||||
|
company_esg_rating = {'ESGRiskRating': 'n/a', 'industry': 'n/a'}
|
||||||
|
else:
|
||||||
|
company_esg_score = ujson.loads(data['esg_data'])
|
||||||
|
if data['esg_ratings'] == None:
|
||||||
|
company_esg_rating = {'ESGRiskRating': 'n/a', 'industry': 'n/a'}
|
||||||
|
else:
|
||||||
|
company_esg_rating = ujson.loads(data['esg_ratings'])
|
||||||
|
|
||||||
|
|
||||||
|
final_res = {k: v for d in [res_profile] for dict in d for k, v in dict.items()}
|
||||||
|
|
||||||
|
return final_res
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
final_res =[{}]
|
||||||
|
return final_res
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stocks_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
for ticker in stocks_symbols:
|
||||||
|
res = await get_data(ticker)
|
||||||
|
await save_stockdeck(ticker, [res])
|
||||||
|
|
||||||
|
try:
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
asyncio.run(run())
|
||||||
|
con.close()
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
75
app/cron_ta_rating.py
Normal file
75
app/cron_ta_rating.py
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import sqlite3
|
||||||
|
from datetime import datetime
|
||||||
|
from rating import rating_model
|
||||||
|
import pandas as pd
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
async def save_ta_rating(symbol, data):
|
||||||
|
with open(f"json/ta-rating/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
start_date = "2022-01-01"
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
crypto_con = sqlite3.connect('crypto.db')
|
||||||
|
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stocks_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol FROM etfs")
|
||||||
|
etf_symbols = [row[0] for row in etf_cursor.fetchall()]
|
||||||
|
|
||||||
|
crypto_cursor = crypto_con.cursor()
|
||||||
|
crypto_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
crypto_cursor.execute("SELECT DISTINCT symbol FROM cryptos")
|
||||||
|
crypto_symbols = [row[0] for row in crypto_cursor.fetchall()]
|
||||||
|
|
||||||
|
total_symbols = stocks_symbols + etf_symbols + crypto_symbols
|
||||||
|
|
||||||
|
for symbol in tqdm(total_symbols):
|
||||||
|
table_name = None
|
||||||
|
if symbol in etf_symbols: # Fixed variable name from symbols to symbol
|
||||||
|
query_con = etf_con
|
||||||
|
elif symbol in crypto_symbols:
|
||||||
|
query_con = crypto_con
|
||||||
|
elif symbol in stocks_symbols:
|
||||||
|
query_con = con
|
||||||
|
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
date, open, high, low, close, volume
|
||||||
|
FROM
|
||||||
|
"{symbol}"
|
||||||
|
WHERE
|
||||||
|
date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
query = query_template.format(symbol=symbol)
|
||||||
|
df = pd.read_sql_query(query,query_con, params=(start_date, end_date))
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Assuming rating_model and save_quote_as_json are defined elsewhere
|
||||||
|
res_dict = rating_model(df).ta_rating()
|
||||||
|
await save_ta_rating(symbol, res_dict)
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
etf_con.close()
|
||||||
|
crypto_con.close()
|
||||||
|
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
95
app/cron_top_etf_holder.py
Normal file
95
app/cron_top_etf_holder.py
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
from tqdm import tqdm
|
||||||
|
import pandas as pd
|
||||||
|
import sqlite3
|
||||||
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
async def save_json_file(symbol, data):
|
||||||
|
with open(f"json/top-etf-ticker-holder/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
|
||||||
|
# Fetch all ETF data in one go
|
||||||
|
def fetch_all_etf_data(etf_symbols):
|
||||||
|
etf_data = {}
|
||||||
|
for etf_ticker in etf_symbols:
|
||||||
|
try:
|
||||||
|
df = pd.read_sql_query(query_template, etf_con, params=(etf_ticker,))
|
||||||
|
etf_data[etf_ticker] = df
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error fetching data for {etf_ticker}: {e}")
|
||||||
|
return etf_data
|
||||||
|
|
||||||
|
|
||||||
|
def process_etf(etf_ticker, stock_ticker, df):
|
||||||
|
etf_weight_percentages = []
|
||||||
|
try:
|
||||||
|
for index, row in df.iterrows():
|
||||||
|
holdings = ujson.loads(row['holding'])
|
||||||
|
total_assets = int(row['totalAssets'])
|
||||||
|
name = row['name']
|
||||||
|
for holding in holdings:
|
||||||
|
if holding['asset'] == stock_ticker:
|
||||||
|
etf_weight_percentages.append({
|
||||||
|
'symbol': etf_ticker,
|
||||||
|
'name': name,
|
||||||
|
'totalAssets': total_assets,
|
||||||
|
'weightPercentage': holding['weightPercentage']
|
||||||
|
})
|
||||||
|
break # No need to continue checking if found
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
return etf_weight_percentages
|
||||||
|
|
||||||
|
async def save_and_process(stock_ticker, etf_data):
|
||||||
|
etf_weight_percentages = []
|
||||||
|
with ThreadPoolExecutor(max_workers=14) as executor:
|
||||||
|
futures = [executor.submit(process_etf, etf_ticker, stock_ticker, df) for etf_ticker, df in etf_data.items()]
|
||||||
|
for future in as_completed(futures):
|
||||||
|
etf_weight_percentages.extend(future.result())
|
||||||
|
|
||||||
|
# Filter out only the ETFs where totalAssets > 0
|
||||||
|
etf_weight_percentages = [etf for etf in etf_weight_percentages if etf['totalAssets'] > 0]
|
||||||
|
|
||||||
|
data = sorted(etf_weight_percentages, key=lambda x: x['weightPercentage'], reverse=True)[:5]
|
||||||
|
if len(data) > 0:
|
||||||
|
await save_json_file(stock_ticker, data)
|
||||||
|
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
|
||||||
|
# Main loop
|
||||||
|
etf_data = fetch_all_etf_data(etf_symbols)
|
||||||
|
for stock_ticker in tqdm(stocks_symbols):
|
||||||
|
await save_and_process(stock_ticker, etf_data)
|
||||||
|
|
||||||
|
try:
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stocks_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol FROM etfs")
|
||||||
|
etf_symbols = [row[0] for row in etf_cursor.fetchall()]
|
||||||
|
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
name, totalAssets, holding
|
||||||
|
FROM
|
||||||
|
etfs
|
||||||
|
WHERE
|
||||||
|
symbol = ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
asyncio.run(run())
|
||||||
|
con.close()
|
||||||
|
etf_con.close()
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
134
app/cron_trend_analysis.py
Normal file
134
app/cron_trend_analysis.py
Normal file
@ -0,0 +1,134 @@
|
|||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import sqlite3
|
||||||
|
from datetime import datetime
|
||||||
|
from ml_models.classification import TrendPredictor
|
||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
from tqdm import tqdm
|
||||||
|
import concurrent.futures
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
async def save_json(symbol, data):
|
||||||
|
with open(f"json/trend-analysis/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
|
||||||
|
async def download_data(ticker, start_date, end_date):
|
||||||
|
try:
|
||||||
|
df = yf.download(ticker, start=start_date, end=end_date, interval="1d")
|
||||||
|
df = df.rename(columns={'Adj Close': 'close', 'Open': 'open', 'High': 'high', 'Low': 'low', 'Volume': 'volume', 'Date': 'date'})
|
||||||
|
return df
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
def convert_symbols(symbol_list):
|
||||||
|
"""
|
||||||
|
Converts the symbols in the given list from 'BTCUSD' and 'USDTUSD' format to 'BTC-USD' and 'USDT-USD' format.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
symbol_list (list): A list of strings representing the symbols to be converted.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
list: A new list with the symbols converted to the desired format.
|
||||||
|
"""
|
||||||
|
converted_symbols = []
|
||||||
|
for symbol in symbol_list:
|
||||||
|
# Determine the base and quote currencies
|
||||||
|
base_currency = symbol[:-3]
|
||||||
|
quote_currency = symbol[-3:]
|
||||||
|
|
||||||
|
# Construct the new symbol in the desired format
|
||||||
|
new_symbol = f"{base_currency}-{quote_currency}"
|
||||||
|
converted_symbols.append(new_symbol)
|
||||||
|
|
||||||
|
return converted_symbols
|
||||||
|
|
||||||
|
async def process_symbol(ticker, start_date, end_date, crypto_symbols):
|
||||||
|
try:
|
||||||
|
best_features = ['close','williams','fi','emv','adi','cmf','bb_hband','bb_lband','vpt','stoch','stoch_rsi','rsi','nvi','macd','mfi','cci','obv','adx','adx_pos','adx_neg']
|
||||||
|
test_size = 0.2
|
||||||
|
df = await download_data(ticker, start_date, end_date)
|
||||||
|
|
||||||
|
async def process_nth_day(nth_day):
|
||||||
|
try:
|
||||||
|
predictor = TrendPredictor(nth_day=nth_day, path="ml_models/weights")
|
||||||
|
df_copy = df.copy()
|
||||||
|
df_copy["Target"] = ((df_copy["close"].shift(-nth_day) > df_copy["close"])).astype(int)
|
||||||
|
predictors = predictor.generate_features(df_copy)
|
||||||
|
df_copy = df_copy.dropna(subset=df_copy.columns[df_copy.columns != "nth_day"])
|
||||||
|
split_size = int(len(df_copy) * (1-test_size))
|
||||||
|
test_data = df_copy.iloc[split_size:]
|
||||||
|
|
||||||
|
res_dict = predictor.evaluate_model(test_data[best_features], test_data['Target'])
|
||||||
|
|
||||||
|
if nth_day == 5:
|
||||||
|
time_period = 'oneWeek'
|
||||||
|
elif nth_day == 20:
|
||||||
|
time_period = 'oneMonth'
|
||||||
|
elif nth_day == 60:
|
||||||
|
time_period = 'threeMonth'
|
||||||
|
|
||||||
|
return {'label': time_period, **res_dict}
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
return None
|
||||||
|
|
||||||
|
tasks = [process_nth_day(nth_day) for nth_day in [5, 20, 60]]
|
||||||
|
results = await asyncio.gather(*tasks)
|
||||||
|
res_list = [r for r in results if r is not None]
|
||||||
|
|
||||||
|
if ticker in crypto_symbols:
|
||||||
|
ticker = ticker.replace('-','') #convert back from BTC-USD to BTCUSD
|
||||||
|
|
||||||
|
await save_json(ticker, res_list)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
crypto_con = sqlite3.connect('crypto.db')
|
||||||
|
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
#cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE marketCap > 1E9 AND symbol NOT LIKE '%.%'")
|
||||||
|
stock_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol FROM etfs WHERE totalAssets > 5E9")
|
||||||
|
etf_symbols = [row[0] for row in etf_cursor.fetchall()]
|
||||||
|
|
||||||
|
crypto_cursor = crypto_con.cursor()
|
||||||
|
crypto_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
crypto_cursor.execute("SELECT DISTINCT symbol FROM cryptos")
|
||||||
|
crypto_symbols = [row[0] for row in crypto_cursor.fetchall()]
|
||||||
|
crypto_symbols = convert_symbols(crypto_symbols)
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
etf_con.close()
|
||||||
|
crypto_con.close()
|
||||||
|
|
||||||
|
total_symbols = stock_symbols + etf_symbols + crypto_symbols
|
||||||
|
print(f"Total tickers: {len(total_symbols)}")
|
||||||
|
start_date = datetime(2000, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
chunk_size = len(total_symbols) // 70 # Divide the list into N chunks
|
||||||
|
chunks = [total_symbols[i:i + chunk_size] for i in range(0, len(total_symbols), chunk_size)]
|
||||||
|
for chunk in chunks:
|
||||||
|
tasks = []
|
||||||
|
for ticker in tqdm(chunk):
|
||||||
|
tasks.append(process_symbol(ticker, start_date, end_date, crypto_symbols))
|
||||||
|
|
||||||
|
await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
138
app/cron_var.py
Normal file
138
app/cron_var.py
Normal file
@ -0,0 +1,138 @@
|
|||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime
|
||||||
|
#import yfinance as yf
|
||||||
|
import numpy as np
|
||||||
|
import ujson
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import sqlite3
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
async def save_json(symbol, data):
|
||||||
|
with open(f"json/var/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(data, file)
|
||||||
|
|
||||||
|
# Define risk rating scale
|
||||||
|
def assign_risk_rating(var):
|
||||||
|
if var >= 25: # This threshold can be adjusted based on your specific criteria
|
||||||
|
return 1
|
||||||
|
elif var >= 20:
|
||||||
|
return 2
|
||||||
|
elif var >= 15:
|
||||||
|
return 3
|
||||||
|
elif var >= 10:
|
||||||
|
return 4
|
||||||
|
elif var >= 8:
|
||||||
|
return 5
|
||||||
|
elif var >= 6:
|
||||||
|
return 6
|
||||||
|
elif var >= 4:
|
||||||
|
return 7
|
||||||
|
elif var >= 2:
|
||||||
|
return 8
|
||||||
|
elif var >= 1:
|
||||||
|
return 9
|
||||||
|
else:
|
||||||
|
return 10
|
||||||
|
|
||||||
|
def compute_var(df):
|
||||||
|
# Calculate daily returns
|
||||||
|
df['Returns'] = df['close'].pct_change()
|
||||||
|
df = df.dropna()
|
||||||
|
# Calculate VaR at 95% confidence level
|
||||||
|
confidence_level = 0.95
|
||||||
|
var = abs(np.percentile(df['Returns'], 100 * (1 - confidence_level)))
|
||||||
|
var_N_days = round(var * np.sqrt(5)*100,2) # N days
|
||||||
|
|
||||||
|
# Assign risk rating
|
||||||
|
risk_rating = assign_risk_rating(var_N_days)
|
||||||
|
outlook = 'Neutral'
|
||||||
|
if risk_rating < 5:
|
||||||
|
outlook = 'Risky'
|
||||||
|
elif risk_rating > 5:
|
||||||
|
outlook = 'Minimum Risk'
|
||||||
|
|
||||||
|
return {'rating': risk_rating, 'var': -var_N_days, 'outlook': outlook}
|
||||||
|
|
||||||
|
#print(f"The Value at a 95% confidence level is: {var_N_days}%")
|
||||||
|
#print(f"The risk rating based on the Value at Risk is: {risk_rating}")
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
start_date = "2015-01-01"
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
crypto_con = sqlite3.connect('crypto.db')
|
||||||
|
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stocks_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol FROM etfs")
|
||||||
|
etf_symbols = [row[0] for row in etf_cursor.fetchall()]
|
||||||
|
|
||||||
|
crypto_cursor = crypto_con.cursor()
|
||||||
|
crypto_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
crypto_cursor.execute("SELECT DISTINCT symbol FROM cryptos")
|
||||||
|
crypto_symbols = [row[0] for row in crypto_cursor.fetchall()]
|
||||||
|
|
||||||
|
total_symbols = stocks_symbols + etf_symbols + crypto_symbols
|
||||||
|
|
||||||
|
for symbol in tqdm(total_symbols):
|
||||||
|
if symbol in etf_symbols: # Fixed variable name from symbols to symbol
|
||||||
|
query_con = etf_con
|
||||||
|
elif symbol in crypto_symbols: # Fixed variable name from symbols to symbol
|
||||||
|
query_con = crypto_con
|
||||||
|
elif symbol in stocks_symbols: # Fixed variable name from symbols to symbol
|
||||||
|
query_con = con
|
||||||
|
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
date, open, high, low, close, volume
|
||||||
|
FROM
|
||||||
|
"{symbol}"
|
||||||
|
WHERE
|
||||||
|
date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
query = query_template.format(symbol=symbol)
|
||||||
|
df = pd.read_sql_query(query, query_con, params=(start_date, end_date))
|
||||||
|
|
||||||
|
try:
|
||||||
|
res_dict = compute_var(df)
|
||||||
|
|
||||||
|
await save_json(symbol, res_dict)
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
etf_con.close()
|
||||||
|
crypto_con.close()
|
||||||
|
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
#Test mode
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Download data
|
||||||
|
ticker = 'TCON'
|
||||||
|
start_date = datetime(2015, 1, 1)
|
||||||
|
end_date = datetime.today()
|
||||||
|
|
||||||
|
df = yf.download(ticker, start=start_date, end=end_date, interval="1d")
|
||||||
|
df = df.reset_index()
|
||||||
|
df = df[['Date', 'Close']]
|
||||||
|
|
||||||
|
# Calculate daily returns
|
||||||
|
df['Returns'] = df['Close'].pct_change()
|
||||||
|
df = df.dropna()
|
||||||
|
'''
|
||||||
159
app/cron_wiim.py
Normal file
159
app/cron_wiim.py
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
import aiohttp
|
||||||
|
import aiofiles
|
||||||
|
import ujson
|
||||||
|
import sqlite3
|
||||||
|
import pandas as pd
|
||||||
|
import asyncio
|
||||||
|
import pytz
|
||||||
|
import time
|
||||||
|
import os
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
|
date_format = "%a, %d %b %Y %H:%M:%S %z"
|
||||||
|
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('BENZINGA_API_KEY')
|
||||||
|
|
||||||
|
headers = {"accept": "application/json"}
|
||||||
|
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
close
|
||||||
|
FROM
|
||||||
|
"{symbol}"
|
||||||
|
WHERE
|
||||||
|
date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
# List of holidays when the stock market is closed
|
||||||
|
holidays = [
|
||||||
|
"2024-01-01",
|
||||||
|
"2024-03-29",
|
||||||
|
"2024-12-25",
|
||||||
|
]
|
||||||
|
|
||||||
|
def is_holiday(date):
|
||||||
|
"""Check if the given date is a holiday"""
|
||||||
|
str_date = date.strftime("%Y-%m-%d")
|
||||||
|
return str_date in holidays
|
||||||
|
|
||||||
|
def correct_weekday(selected_date):
|
||||||
|
# Monday is 0 and Sunday is 6
|
||||||
|
if selected_date.weekday() == 0:
|
||||||
|
selected_date -= timedelta(3)
|
||||||
|
elif selected_date.weekday() <= 4:
|
||||||
|
selected_date -= timedelta(1)
|
||||||
|
elif selected_date.weekday() == 5:
|
||||||
|
selected_date -= timedelta(1)
|
||||||
|
elif selected_date.weekday() == 6:
|
||||||
|
selected_date -= timedelta(2)
|
||||||
|
|
||||||
|
# Check if the selected date is a holiday and adjust if necessary
|
||||||
|
while is_holiday(selected_date):
|
||||||
|
selected_date -= timedelta(1)
|
||||||
|
|
||||||
|
# Adjust again if the resulting date is a Saturday or Sunday
|
||||||
|
if selected_date.weekday() >= 5:
|
||||||
|
selected_date -= timedelta(selected_date.weekday() - 4)
|
||||||
|
|
||||||
|
return selected_date
|
||||||
|
|
||||||
|
async def get_endpoint(session, symbol, con):
|
||||||
|
url = "https://api.benzinga.com/api/v2/news"
|
||||||
|
querystring = {"token": api_key,"tickers": symbol, "channels":"WIIM","pageSize":"20","displayOutput":"full"}
|
||||||
|
async with session.get(url, params=querystring, headers=headers) as response:
|
||||||
|
res_list = []
|
||||||
|
res = ujson.loads(await response.text())
|
||||||
|
|
||||||
|
for item in res:
|
||||||
|
date_obj = datetime.strptime(item['created'], date_format)
|
||||||
|
date_obj_utc = date_obj.astimezone(pytz.utc)
|
||||||
|
|
||||||
|
new_date_obj_utc = date_obj_utc
|
||||||
|
|
||||||
|
start_date_obj_utc = correct_weekday(date_obj_utc)
|
||||||
|
|
||||||
|
start_date = start_date_obj_utc.strftime("%Y-%m-%d")
|
||||||
|
end_date = new_date_obj_utc.strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
new_date_str = new_date_obj_utc.strftime("%b %d, %Y")
|
||||||
|
query = query_template.format(symbol=symbol)
|
||||||
|
|
||||||
|
try:
|
||||||
|
df = pd.read_sql_query(query,con, params=(start_date, end_date))
|
||||||
|
if not df.empty:
|
||||||
|
change_percent = round((df['close'].iloc[1]/df['close'].iloc[0] -1)*100,2)
|
||||||
|
else:
|
||||||
|
change_percent = '-'
|
||||||
|
except Exception as e:
|
||||||
|
change_percent = '-'
|
||||||
|
|
||||||
|
res_list.append({'date': new_date_str, 'text': item['title'], 'changesPercentage': change_percent})
|
||||||
|
with open(f"json/wiim/company/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(res_list, file)
|
||||||
|
|
||||||
|
'''
|
||||||
|
current_date = datetime.now(pytz.utc)
|
||||||
|
date_difference = current_date - new_date_obj_utc
|
||||||
|
if date_difference.days < 2:
|
||||||
|
new_date_str = new_date_obj_utc.strftime("%b %d, %Y")
|
||||||
|
formatted_data = {'wiim': res[0]['title'], 'updated': new_date_str}
|
||||||
|
|
||||||
|
with open(f"json/wiim/{symbol}.json", 'w') as file:
|
||||||
|
ujson.dump(formatted_data, file)
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
async def get_latest_wiim(session, stock_symbols, etf_symbols):
|
||||||
|
url = "https://api.benzinga.com/api/v2/news"
|
||||||
|
querystring = {"token": api_key,"channels":"WIIM","pageSize":"20","displayOutput":"full"}
|
||||||
|
|
||||||
|
try:
|
||||||
|
async with session.get(url, params=querystring, headers=headers) as response:
|
||||||
|
res_list = []
|
||||||
|
res = ujson.loads(await response.text())
|
||||||
|
for item in res:
|
||||||
|
for el in item['stocks']:
|
||||||
|
# Update the 'name' key to 'ticker'
|
||||||
|
if 'name' in el:
|
||||||
|
el['ticker'] = el.pop('name')
|
||||||
|
if el['ticker'] in stock_symbols:
|
||||||
|
el['assetType'] = 'stock'
|
||||||
|
elif el['ticker'] in etf_symbols:
|
||||||
|
el['assetType'] = 'etf'
|
||||||
|
res_list.append({'date': item['created'], 'text': item['title'], 'stocks': item['stocks']})
|
||||||
|
with open(f"json/wiim/rss-feed/data.json", 'w') as file:
|
||||||
|
ujson.dump(res_list, file)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
#pass
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
async def run():
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stock_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
etf_con = sqlite3.connect('etf.db')
|
||||||
|
|
||||||
|
etf_cursor = etf_con.cursor()
|
||||||
|
etf_cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
etf_cursor.execute("SELECT DISTINCT symbol FROM etfs")
|
||||||
|
etf_symbols = [row[0] for row in etf_cursor.fetchall()]
|
||||||
|
|
||||||
|
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
await get_latest_wiim(session, stock_symbols, etf_symbols)
|
||||||
|
await asyncio.gather(*(get_endpoint(session, symbol, con) for symbol in stock_symbols))
|
||||||
|
await asyncio.gather(*(get_endpoint(session, symbol, etf_con) for symbol in etf_symbols))
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
etf_con.close()
|
||||||
|
try:
|
||||||
|
asyncio.run(run())
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
106
app/financial_modeling_prep.py
Normal file
106
app/financial_modeling_prep.py
Normal file
@ -0,0 +1,106 @@
|
|||||||
|
import pandas as pd
|
||||||
|
import time
|
||||||
|
import ujson
|
||||||
|
import aiohttp
|
||||||
|
import asyncio
|
||||||
|
|
||||||
|
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
profile, quote,
|
||||||
|
esg_ratings,esg_data,stock_split
|
||||||
|
FROM
|
||||||
|
stocks
|
||||||
|
WHERE
|
||||||
|
symbol = ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
class FinancialModelingPrep:
|
||||||
|
def __init__(self, ticker, con):
|
||||||
|
#self.url = url
|
||||||
|
self.ticker = ticker
|
||||||
|
self.con = con
|
||||||
|
|
||||||
|
|
||||||
|
def clean_financial_data(self, list1, list2):
|
||||||
|
#combine income_statement with income_growth_statement
|
||||||
|
combined_list = []
|
||||||
|
for item1 in list1:
|
||||||
|
for item2 in list2:
|
||||||
|
if item1["date"] == item2["date"]:
|
||||||
|
combined_item = {**item1, **item2} # Combine the dictionaries
|
||||||
|
combined_list.append(combined_item)
|
||||||
|
break
|
||||||
|
return combined_list
|
||||||
|
|
||||||
|
async def company_info(self):
|
||||||
|
df = pd.read_sql_query(query_template, self.con, params=(self.ticker,))
|
||||||
|
|
||||||
|
#con.close()
|
||||||
|
if df.empty:
|
||||||
|
final_res =[{}]
|
||||||
|
return final_res
|
||||||
|
else:
|
||||||
|
data= df.to_dict(orient='records')
|
||||||
|
data =data[0]
|
||||||
|
|
||||||
|
company_profile = ujson.loads(data['profile'])
|
||||||
|
company_quote = ujson.loads(data['quote'])
|
||||||
|
company_tier_list = data['rating']
|
||||||
|
|
||||||
|
if data['esg_data'] == None:
|
||||||
|
company_esg_score = {'ESGScore': 'n/a', 'socialScore': 'n/a', 'environmentalScore': 'n/a', 'governanceScore': 'n/a'}
|
||||||
|
company_esg_rating = {'ESGRiskRating': 'n/a', 'industry': 'n/a'}
|
||||||
|
else:
|
||||||
|
company_esg_score = ujson.loads(data['esg_data'])
|
||||||
|
if data['esg_ratings'] == None:
|
||||||
|
company_esg_rating = {'ESGRiskRating': 'n/a', 'industry': 'n/a'}
|
||||||
|
else:
|
||||||
|
company_esg_rating = ujson.loads(data['esg_ratings'])
|
||||||
|
|
||||||
|
if data['stock_split'] == None:
|
||||||
|
company_stock_split = []
|
||||||
|
else:
|
||||||
|
company_stock_split = ujson.loads(data['stock_split'])
|
||||||
|
|
||||||
|
res_profile = [
|
||||||
|
{
|
||||||
|
'ceoName': company_profile[0]['ceo'],
|
||||||
|
'companyName': company_profile[0]['companyName'],
|
||||||
|
'industry': company_profile[0]['industry'],
|
||||||
|
'image': company_profile[0]['image'],
|
||||||
|
'sector': company_profile[0]['sector'],
|
||||||
|
'beta': company_profile[0]['beta'],
|
||||||
|
'marketCap': company_profile[0]['mktCap'],
|
||||||
|
'avgVolume': company_profile[0]['volAvg'],
|
||||||
|
'country': company_profile[0]['country'],
|
||||||
|
'exchange': company_profile[0]['exchangeShortName'],
|
||||||
|
'earning': company_quote[0]['earningsAnnouncement'],
|
||||||
|
'previousClose': company_quote[0]['price'], #This is true because I update my db before the market opens hence the price will be the previousClose price.
|
||||||
|
'website': company_profile[0]['website'],
|
||||||
|
'description': company_profile[0]['description'],
|
||||||
|
'esgScore': company_esg_score['ESGScore'],
|
||||||
|
'socialScore': company_esg_score['socialScore'],
|
||||||
|
'environmentalScore': company_esg_score['environmentalScore'],
|
||||||
|
'governanceScore': company_esg_score['governanceScore'],
|
||||||
|
'esgRiskRating': company_esg_rating['ESGRiskRating'],
|
||||||
|
'fullTimeEmployees': company_profile[0]['fullTimeEmployees'],
|
||||||
|
'stockSplits': company_stock_split,
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if data['esg_data'] == None:
|
||||||
|
company_esg_score = {'ESGScore': 'n/a', 'socialScore': 'n/a', 'environmentalScore': 'n/a', 'governanceScore': 'n/a'}
|
||||||
|
company_esg_rating = {'ESGRiskRating': 'n/a', 'industry': 'n/a'}
|
||||||
|
else:
|
||||||
|
company_esg_score = ujson.loads(data['esg_data'])
|
||||||
|
if data['esg_ratings'] == None:
|
||||||
|
company_esg_rating = {'ESGRiskRating': 'n/a', 'industry': 'n/a'}
|
||||||
|
else:
|
||||||
|
company_esg_rating = ujson.loads(data['esg_ratings'])
|
||||||
|
|
||||||
|
final_res = {k: v for d in [res_profile] for dict in d for k, v in dict.items()}
|
||||||
|
|
||||||
|
return final_res
|
||||||
17
app/html_template/price_alert.html
Normal file
17
app/html_template/price_alert.html
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html lang="en" class="bg-[#fff]">
|
||||||
|
|
||||||
|
|
||||||
|
<body data-sveltekit-preload-data="hover" class="bg-[#fff] overflow-x-hidden">
|
||||||
|
<div> <body data-svelte-h="svelte-t9a30e"><div><div><center style="width:100%;table-layout:fixed;background-color:#ffffff;padding-bottom:60px"><table role="presentation" width="100%" style="background-color:#ffffff;margin:0 auto;width:100%;max-width:600px;border-spacing:0;font-family:Arial,Helvetica,sans-serif;color:#171a1b"><tbody><tr><td height="6" style="padding:0"></td></tr> <tr><td style="padding:0;border:1px solid #d1d5db;border-radius:10px;padding:15px 10px 15px 10px"><table role="presentation" width="100%" style="border-spacing:0"><tbody><tr><td align="center" style="padding:0;font-size:13px;color:#545454;text-decoration:none">currentDate</td></tr> <tr><td height="8" style="padding:0"></td></tr> <tr><td style="padding:0" align="center"><a href="https://stocknear.com" target="_blank" rel="noopener" style="color:inherit; text-decoration:none"><img src="https://stocknear-pocketbase.s3.amazonaws.com/logo/app_logo.png" width="60" alt="Logo" style="border:0; margin-right: 10px; border-radius: 50%;" data-bit="iit"> <h1 style="font-weight: 1000; font-size: 1.3rem">Price Alert Activated</h1></a></td></tr></tbody></table></td></tr> <tr><td height="20" style="padding:0"></td></tr> <tr><td style="padding:0;border:1px solid #d1d5db;border-radius:10px;padding:15px 10px 15px 10px"><table role="presentation" width="100%" style="border-spacing:0"><tbody><tr><td height="10" style="padding:0"></td></tr> <tr><td style="padding:0"><table role="presentation" width="100%" style="border-spacing:0;"><tbody><tr><td style="padding:0"><ul style="padding-left: 15px;"><li style="list-style-type: none; color:#000;font-size:16px;line-height:22px;margin-bottom:20px; "><strong>Price alert triggered for</strong> <a href="https://stocknear.com/asset-link" target="_blank" rel="noopener" style="color:#1e73ba;text-decoration:underline" data-saferedirecturl="https://stocknear.com/asset-link">symbol</a>:
|
||||||
|
addingSentence</li></ul></td></tr></tbody></table></td></tr></tbody></table></td></tr> <tr><td height="20" style="padding:0"></td></tr></tbody></table> <table role="presentation" width="100%" style="border-spacing:0"><tbody><tr><td style="padding-top:10px;width:3%"></td> <td align="center" style="padding:0"><a href="https://stocknear.com" target="_blank" rel="noopener" style="color:#1e73ba;text-decoration:underline;color:#434343!important;font-size:16px;margin:0 3px;line-height:2em" data-saferedirecturl="https://stocknear.com">Home Page</a> |
|
||||||
|
<a href="https://stocknear.com/price-alert" target="_blank" rel="noopener" style="color:#1e73ba;text-decoration:underline;color:#434343!important;font-size:16px;margin:0 3px;line-height:2em">Your Price Alerts</a> |
|
||||||
|
<a href="https://stocknear.com/about" target="_blank" rel="noopener" style="color:#1e73ba;text-decoration:underline;color:#434343!important;font-size:16px;margin:0 3px;line-height:2em" data-saferedirecturl="https://stocknear.com/about">About Us</a></td> <td style="padding:0;width:3%"></td></tr></tbody></table></center></div></div></body>
|
||||||
|
|
||||||
|
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
2922
app/main.py
Normal file
2922
app/main.py
Normal file
File diff suppressed because it is too large
Load Diff
152
app/market_movers.py
Normal file
152
app/market_movers.py
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
import sqlite3
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
import pandas as pd
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
|
||||||
|
class Past_Market_Movers:
|
||||||
|
def __init__(self):
|
||||||
|
self.con = sqlite3.connect('backup_db/stocks.db')
|
||||||
|
self.cursor = self.con.cursor()
|
||||||
|
self.cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
self.symbols = self.get_stock_symbols()
|
||||||
|
|
||||||
|
def get_stock_symbols(self):
|
||||||
|
self.cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE symbol != ?", ('%5EGSPC',))
|
||||||
|
return [row[0] for row in self.cursor.fetchall()]
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def check_if_holiday():
|
||||||
|
holidays = {
|
||||||
|
datetime(2023, 5, 29): 'memorial_day',
|
||||||
|
datetime(2023, 6, 19): 'independence_day',
|
||||||
|
datetime(2023, 6, 20): 'independence_day+1',
|
||||||
|
datetime(2023, 9, 4): 'labor_day',
|
||||||
|
}
|
||||||
|
|
||||||
|
current_datetime = datetime.today()
|
||||||
|
for holiday_date, holiday_name in holidays.items():
|
||||||
|
if current_datetime == holiday_date:
|
||||||
|
return holiday_name
|
||||||
|
return None
|
||||||
|
|
||||||
|
def correct_weekday_interval(self, prev_day):
|
||||||
|
holiday = self.check_if_holiday()
|
||||||
|
if holiday:
|
||||||
|
if holiday == 'memorial_day':
|
||||||
|
start_date = datetime(2023, 5, 26)
|
||||||
|
elif holiday in ('independence_day', 'independence_day+1'):
|
||||||
|
start_date = datetime(2023, 6, 16)
|
||||||
|
else:
|
||||||
|
start_date = datetime(2023, 9, 1)
|
||||||
|
else:
|
||||||
|
current_date = datetime.today() - timedelta(prev_day)
|
||||||
|
current_weekday = current_date.weekday()
|
||||||
|
if current_weekday in (5, 6): # Saturday or Sunday
|
||||||
|
start_date = current_date - timedelta(days=current_weekday % 5 + 1)
|
||||||
|
else:
|
||||||
|
start_date = current_date
|
||||||
|
return start_date.strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
def run(self, time_periods=[7,30,90,180]):
|
||||||
|
performance_data = []
|
||||||
|
query_template = """
|
||||||
|
SELECT date, close, volume FROM "{ticker}" WHERE date >= ?
|
||||||
|
"""
|
||||||
|
query_fundamental_template = """
|
||||||
|
SELECT marketCap, name FROM stocks WHERE symbol = ?
|
||||||
|
"""
|
||||||
|
gainer_json = {}
|
||||||
|
loser_json = {}
|
||||||
|
active_json = {}
|
||||||
|
|
||||||
|
for time_period in time_periods:
|
||||||
|
performance_data = []
|
||||||
|
high_volume = []
|
||||||
|
gainer_data = []
|
||||||
|
loser_data = []
|
||||||
|
active_data = []
|
||||||
|
|
||||||
|
start_date = self.correct_weekday_interval(time_period)
|
||||||
|
for ticker in self.symbols:
|
||||||
|
try:
|
||||||
|
query = query_template.format(ticker=ticker)
|
||||||
|
df = pd.read_sql_query(query, self.con, params=(start_date,))
|
||||||
|
if not df.empty:
|
||||||
|
fundamental_data = pd.read_sql_query(query_fundamental_template, self.con, params=(ticker,))
|
||||||
|
avg_volume = df['volume'].mean()
|
||||||
|
if avg_volume > 1E6 and df['close'].mean() > 1:
|
||||||
|
changes_percentage = ((df['close'].iloc[-1] - df['close'].iloc[0]) / df['close'].iloc[0]) * 100
|
||||||
|
performance_data.append((ticker, fundamental_data['name'].iloc[0], df['close'].iloc[-1], changes_percentage, avg_volume, int(fundamental_data['marketCap'].iloc[0])))
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Sort the stocks by percentage change in descending order
|
||||||
|
performance_data.sort(key=lambda x: x[3], reverse=True)
|
||||||
|
high_volume = sorted(performance_data, key=lambda x: x[4], reverse=True)
|
||||||
|
|
||||||
|
for symbol, name, price, changes_percentage, volume, market_cap in performance_data[:20]:
|
||||||
|
gainer_data.append({'symbol': symbol, 'name': name, 'price': price, 'changesPercentage': changes_percentage, 'volume': volume, 'marketCap': market_cap})
|
||||||
|
for symbol, name, price, changes_percentage, volume, market_cap in performance_data[-20:]:
|
||||||
|
loser_data.append({'symbol': symbol, 'name': name, 'price': price, 'changesPercentage': changes_percentage, 'volume': volume, 'marketCap': market_cap})
|
||||||
|
for symbol, name, price, changes_percentage, volume, market_cap in high_volume[:20]:
|
||||||
|
active_data.append({'symbol': symbol, 'name': name, 'price': price, 'changesPercentage': changes_percentage, 'volume': volume, 'marketCap': market_cap})
|
||||||
|
|
||||||
|
|
||||||
|
if time_period == 7:
|
||||||
|
gainer_json['1W'] = gainer_data
|
||||||
|
loser_json['1W'] = loser_data
|
||||||
|
active_json['1W'] = active_data
|
||||||
|
elif time_period == 30:
|
||||||
|
gainer_json['1M'] = gainer_data
|
||||||
|
loser_json['1M'] = loser_data
|
||||||
|
active_json['1M'] = active_data
|
||||||
|
elif time_period == 90:
|
||||||
|
gainer_json['3M'] = gainer_data
|
||||||
|
loser_json['3M'] = loser_data
|
||||||
|
active_json['3M'] = active_data
|
||||||
|
elif time_period == 180:
|
||||||
|
gainer_json['6M'] = gainer_data
|
||||||
|
loser_json['6M'] = loser_data
|
||||||
|
active_json['6M'] = active_data
|
||||||
|
|
||||||
|
return gainer_json, loser_json, active_json
|
||||||
|
|
||||||
|
|
||||||
|
def create_table(self):
|
||||||
|
"""
|
||||||
|
Create the 'market_movers' table if it doesn't exist and add 'gainer', 'loser', and 'most_active' columns.
|
||||||
|
"""
|
||||||
|
query_drop = "DROP TABLE IF EXISTS market_movers"
|
||||||
|
self.con.execute(query_drop)
|
||||||
|
query_create = """
|
||||||
|
CREATE TABLE IF NOT EXISTS market_movers (
|
||||||
|
gainer TEXT,
|
||||||
|
loser TEXT,
|
||||||
|
most_active TEXT
|
||||||
|
)
|
||||||
|
"""
|
||||||
|
self.con.execute(query_create)
|
||||||
|
self.con.commit()
|
||||||
|
|
||||||
|
|
||||||
|
def update_database(self, gainer_json, loser_json, active_json):
|
||||||
|
"""
|
||||||
|
Update the 'gainer', 'loser', and 'most_active' columns in the 'market_movers' table with the provided JSON data.
|
||||||
|
"""
|
||||||
|
query = "INSERT INTO market_movers (gainer, loser, most_active) VALUES (?, ?, ?)"
|
||||||
|
gainer_json_str = json.dumps(gainer_json)
|
||||||
|
loser_json_str = json.dumps(loser_json)
|
||||||
|
active_json_str = json.dumps(active_json)
|
||||||
|
self.con.execute(query, (gainer_json_str, loser_json_str, active_json_str))
|
||||||
|
self.con.commit()
|
||||||
|
|
||||||
|
def close_database_connection(self):
|
||||||
|
self.con.close()
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
analyzer = Past_Market_Movers()
|
||||||
|
analyzer.create_table() # Create the 'market_movers' table with the 'gainer', 'loser', and 'most_active' columns
|
||||||
|
gainer_json, loser_json, active_json = analyzer.run() # Retrieve the gainer_json, loser_json, and active_json data
|
||||||
|
analyzer.update_database(gainer_json, loser_json, active_json) # Update the 'gainer', 'loser', and 'most_active' columns with the respective data
|
||||||
|
analyzer.close_database_connection()
|
||||||
209
app/mc.py
Normal file
209
app/mc.py
Normal file
@ -0,0 +1,209 @@
|
|||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
from scipy.stats import norm
|
||||||
|
from datetime import datetime, date, timedelta
|
||||||
|
import time
|
||||||
|
import sqlite3
|
||||||
|
import concurrent.futures
|
||||||
|
import json
|
||||||
|
from tqdm import tqdm
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
#source https://medium.com/analytics-vidhya/monte-carlo-simulations-for-predicting-stock-prices-python-a64f53585662
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
parser = argparse.ArgumentParser(description='Process stock or ETF data.')
|
||||||
|
parser.add_argument('--db', choices=['stocks', 'etf'], required=True, help='Database name (stocks or etf)')
|
||||||
|
parser.add_argument('--table', choices=['stocks', 'etfs'], required=True, help='Table name (stocks or etfs)')
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
class GeometricBrownianMotion:
|
||||||
|
def __init__(self, data, pred_ndays):
|
||||||
|
#self.start = start
|
||||||
|
#self.end = end
|
||||||
|
#self.ticker = ticker
|
||||||
|
self.data = data
|
||||||
|
self.days = pred_ndays
|
||||||
|
self.num_sim = 1000
|
||||||
|
|
||||||
|
self.percentile = 0.01
|
||||||
|
|
||||||
|
|
||||||
|
np.random.seed(42)
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
|
||||||
|
self.data['date'] = pd.to_datetime(self.data['date'])
|
||||||
|
dt = self.days/self.num_sim
|
||||||
|
lr = np.log(1+self.data['close'].pct_change())
|
||||||
|
u = lr.mean()
|
||||||
|
sigma = lr.std()
|
||||||
|
drift = u -sigma**2.0 / 2.0
|
||||||
|
Z = norm.ppf(np.random.rand(self.days, self.num_sim)) #days, trials
|
||||||
|
dr = np.exp(drift *dt + sigma * Z * np.sqrt(dt))
|
||||||
|
|
||||||
|
#Calculating the stock price for every trial
|
||||||
|
new_prediction = np.zeros_like(dr)
|
||||||
|
new_prediction[0] = self.data['close'].iloc[-1]
|
||||||
|
for t in range(1, self.days):
|
||||||
|
new_prediction[t] = new_prediction[t-1]*dr[t]
|
||||||
|
|
||||||
|
|
||||||
|
#future_dates = pd.DataFrame([self.data['date'].iloc[-1] + timedelta(days=d) for d in range(0, self.days)])
|
||||||
|
#future_dates = future_dates.reset_index()
|
||||||
|
#future_dates['date'] = future_dates[0]
|
||||||
|
|
||||||
|
|
||||||
|
#new_prediction=pd.concat([future_dates['Date'], pd.DataFrame(new_prediction)],axis=1)
|
||||||
|
|
||||||
|
new_prediction = pd.DataFrame(new_prediction)
|
||||||
|
|
||||||
|
percentile_price = pd.DataFrame()
|
||||||
|
# Compute percentile of (99%,50%,1%) formula (100-1,100-50,100-1)
|
||||||
|
#Likelihood that value x does not drop x-y is 99 % in the next d days
|
||||||
|
|
||||||
|
for i in range(len(new_prediction)):
|
||||||
|
next_price = new_prediction.iloc[i, :]
|
||||||
|
next_price = sorted(next_price, key=int)
|
||||||
|
pp = np.percentile(next_price, [1, 50, 100-self.percentile])
|
||||||
|
|
||||||
|
# Concatenate the new data to the existing DataFrame
|
||||||
|
df_temp = pd.DataFrame({'min': pp[0], 'mean': pp[1], 'max': pp[2]}, index=[0])
|
||||||
|
percentile_price = pd.concat([percentile_price, df_temp], ignore_index=True)
|
||||||
|
|
||||||
|
#percentile_price = pd.concat([future_dates['date'],percentile_price],axis=1)
|
||||||
|
#dates_formatted =future_dates['date'].dt.strftime("%Y-%m-%d").tolist()
|
||||||
|
dict_price = {
|
||||||
|
#'date': dates_formatted,
|
||||||
|
'min': percentile_price['min'].tolist()[-1],
|
||||||
|
'mean': percentile_price['mean'].tolist()[-1],
|
||||||
|
'max': percentile_price['max'].tolist()[-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
'''
|
||||||
|
fig,ax = plt.subplots()
|
||||||
|
ax.plot(self.data['date'],self.data['date'],color='purple')
|
||||||
|
ax.plot(percentile_price['date'],percentile_price['brown_mean'],color='black')
|
||||||
|
ax.plot(percentile_price['date'],percentile_price['brown_max'],color='green')
|
||||||
|
ax.plot(percentile_price['date'],percentile_price['brown_min'],color='red')
|
||||||
|
|
||||||
|
plt.fill_between(percentile_price['date'],percentile_price['brown_max'],percentile_price['brown_mean'],alpha=0.3,color='green')
|
||||||
|
#plt.fill_between(percentile_price['date'],percentile_price['brown_mean'],percentile_price['brown_min'],alpha=0.3,color='red')
|
||||||
|
plt.xlabel('%s days in the future' % self.days)
|
||||||
|
plt.ylabel('Stock price prediction')
|
||||||
|
plt.show()
|
||||||
|
'''
|
||||||
|
|
||||||
|
#return percentile_price[['date','mean']], percentile_price[['Date','max']], percentile_price[['Date','min']]
|
||||||
|
|
||||||
|
return dict_price
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def create_column(con):
|
||||||
|
"""
|
||||||
|
Create the 'pricePrediction' column if it doesn't exist in the db table.
|
||||||
|
"""
|
||||||
|
query_check = f"PRAGMA table_info({table_name})"
|
||||||
|
cursor = con.execute(query_check)
|
||||||
|
columns = [col[1] for col in cursor.fetchall()]
|
||||||
|
|
||||||
|
if 'pricePrediction' not in columns:
|
||||||
|
print('yellow')
|
||||||
|
query = f"ALTER TABLE {table_name} ADD COLUMN pricePrediction TEXT"
|
||||||
|
con.execute(query)
|
||||||
|
con.commit()
|
||||||
|
|
||||||
|
def update_database(pred_dict, symbol, con):
|
||||||
|
query = f"UPDATE {table_name} SET pricePrediction = ? WHERE symbol = ?"
|
||||||
|
pred_json = json.dumps(pred_dict) # Convert the pred dictionary to JSON string
|
||||||
|
con.execute(query, (pred_json, symbol))
|
||||||
|
con.commit()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def process_symbol(ticker):
|
||||||
|
try:
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
date, close
|
||||||
|
FROM
|
||||||
|
"{ticker}"
|
||||||
|
WHERE
|
||||||
|
date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
query = query_template.format(ticker=ticker)
|
||||||
|
df = pd.read_sql_query(query, con, params=(start_date, end_date))
|
||||||
|
time_list = [7,30,90,180]
|
||||||
|
|
||||||
|
pred_dict = {}
|
||||||
|
try:
|
||||||
|
for time_period in time_list:
|
||||||
|
if time_period == 7:
|
||||||
|
pred_dict['1W'] = GeometricBrownianMotion(df, time_period).run()
|
||||||
|
elif time_period == 30:
|
||||||
|
pred_dict['1M'] = GeometricBrownianMotion(df, time_period).run()
|
||||||
|
elif time_period == 90:
|
||||||
|
pred_dict['3M'] = GeometricBrownianMotion(df, time_period).run()
|
||||||
|
elif time_period == 180:
|
||||||
|
pred_dict['6M'] = GeometricBrownianMotion(df, time_period).run()
|
||||||
|
|
||||||
|
except:
|
||||||
|
pred_dict = {'1W': {'min': 0, 'mean': 0, 'max': 0}, '1M': {'min': 0, 'mean': 0, 'max': 0}, '3M': {'min': 0, 'mean': 0, 'max': 0}, '6M': {'min': 0, 'mean': 0, 'max': 0}}
|
||||||
|
|
||||||
|
create_column(con)
|
||||||
|
update_database(pred_dict, ticker, con)
|
||||||
|
|
||||||
|
except:
|
||||||
|
print(f"Failed create price prediction for {ticker}")
|
||||||
|
|
||||||
|
|
||||||
|
args = parse_args()
|
||||||
|
db_name = args.db
|
||||||
|
table_name = args.table
|
||||||
|
|
||||||
|
con = sqlite3.connect(f'backup_db/{db_name}.db')
|
||||||
|
|
||||||
|
symbol_query = f"SELECT DISTINCT symbol FROM {table_name}"
|
||||||
|
|
||||||
|
symbol_cursor = con.execute(symbol_query)
|
||||||
|
symbols = [symbol[0] for symbol in symbol_cursor.fetchall()]
|
||||||
|
|
||||||
|
start_date = datetime(1970, 1, 1)
|
||||||
|
end_date = datetime.today()
|
||||||
|
|
||||||
|
# Number of concurrent workers
|
||||||
|
num_processes = 4 # You can adjust this based on your system's capabilities
|
||||||
|
futures = []
|
||||||
|
|
||||||
|
with concurrent.futures.ProcessPoolExecutor(max_workers=num_processes) as executor:
|
||||||
|
for symbol in symbols:
|
||||||
|
futures.append(executor.submit(process_symbol, symbol))
|
||||||
|
|
||||||
|
# Use tqdm to wrap around the futures for progress tracking
|
||||||
|
for future in tqdm(concurrent.futures.as_completed(futures), total=len(symbols), desc="Processing"):
|
||||||
|
pass
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
date, close
|
||||||
|
FROM
|
||||||
|
{ticker}
|
||||||
|
WHERE
|
||||||
|
date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
ticker = 'AMD'
|
||||||
|
start_date = datetime(2020,1,1)
|
||||||
|
end_date = datetime.today()
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
query = query_template.format(ticker=ticker)
|
||||||
|
df = pd.read_sql_query(query, con, params=(start_date, end_date))
|
||||||
|
#Compute the logarithmic returns
|
||||||
|
GeometricBrownianMotion(df).run()
|
||||||
|
'''
|
||||||
BIN
app/ml_models/__pycache__/backtesting.cpython-310.pyc
Normal file
BIN
app/ml_models/__pycache__/backtesting.cpython-310.pyc
Normal file
Binary file not shown.
BIN
app/ml_models/__pycache__/classification.cpython-310.pyc
Normal file
BIN
app/ml_models/__pycache__/classification.cpython-310.pyc
Normal file
Binary file not shown.
BIN
app/ml_models/__pycache__/fundamental_predictor.cpython-310.pyc
Normal file
BIN
app/ml_models/__pycache__/fundamental_predictor.cpython-310.pyc
Normal file
Binary file not shown.
BIN
app/ml_models/__pycache__/prophet.cpython-310.pyc
Normal file
BIN
app/ml_models/__pycache__/prophet.cpython-310.pyc
Normal file
Binary file not shown.
BIN
app/ml_models/__pycache__/prophet_model.cpython-310.pyc
Normal file
BIN
app/ml_models/__pycache__/prophet_model.cpython-310.pyc
Normal file
Binary file not shown.
57
app/ml_models/backtesting.py
Normal file
57
app/ml_models/backtesting.py
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
from sklearn.metrics import explained_variance_score,r2_score
|
||||||
|
#from sklearn.metrics import mean_squared_error
|
||||||
|
import pandas as pd
|
||||||
|
|
||||||
|
class Backtesting:
|
||||||
|
def __init__(self, original_ytrain, train_predict, original_ytest, test_predict):
|
||||||
|
|
||||||
|
self.original_ytrain = original_ytrain
|
||||||
|
self.train_predict = train_predict
|
||||||
|
|
||||||
|
self.original_ytest = original_ytest
|
||||||
|
self.test_predict = test_predict
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
'''
|
||||||
|
Explained variance regression score:
|
||||||
|
The explained variance score explains the dispersion of errors of a given dataset,
|
||||||
|
and the formula is written as follows:
|
||||||
|
Here, and Var(y) is the variance of prediction errors and actual values respectively.
|
||||||
|
Scores close to 1.0 are highly desired, indicating better squares of standard deviations of errors.
|
||||||
|
|
||||||
|
print('Variance score:')
|
||||||
|
print("Train data explained variance regression score:", explained_variance_score(self.original_ytrain, self.train_predict))
|
||||||
|
print("Test data explained variance regression score:", explained_variance_score(self.original_ytest, self.test_predict))
|
||||||
|
print('=================')
|
||||||
|
|
||||||
|
|
||||||
|
R2 score for regression
|
||||||
|
R-squared (R2) is a statistical measure that represents the proportion of the variance
|
||||||
|
for a dependent variable that's explained by an independent variable or variables in
|
||||||
|
a regression model
|
||||||
|
1 = Best
|
||||||
|
0 or < 0 = worse
|
||||||
|
|
||||||
|
print('R2 score:')
|
||||||
|
print("Train data R2 score:", r2_score(self.original_ytrain, self.train_predict))
|
||||||
|
print("Test data R2 score:", r2_score(self.original_ytest, self.test_predict))
|
||||||
|
print('=================')
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
print('Mean squared error:')
|
||||||
|
print("Train data accuracy regression score:", mean_squared_error(self.original_ytrain, self.train_predict))
|
||||||
|
print("Test data accuracy regression score:", mean_squared_error(self.original_ytest, self.test_predict))
|
||||||
|
print('=================')
|
||||||
|
'''
|
||||||
|
res = pd.DataFrame(index=['metrics'])
|
||||||
|
res['train_variance_score'] = explained_variance_score(self.original_ytrain, self.train_predict)
|
||||||
|
res['test_variance_score'] = explained_variance_score(self.original_ytest, self.test_predict)
|
||||||
|
|
||||||
|
res['train_r2_score'] = r2_score(self.original_ytrain, self.train_predict)
|
||||||
|
res['test_r2_score'] = r2_score(self.original_ytest, self.test_predict)
|
||||||
|
|
||||||
|
|
||||||
|
res=res.reset_index(drop=True)
|
||||||
|
|
||||||
|
return res
|
||||||
199
app/ml_models/classification.py
Normal file
199
app/ml_models/classification.py
Normal file
@ -0,0 +1,199 @@
|
|||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from sklearn.ensemble import RandomForestClassifier
|
||||||
|
#from sklearn.model_selection import GridSearchCV
|
||||||
|
#from sklearn.linear_model import LogisticRegression
|
||||||
|
from xgboost import XGBClassifier
|
||||||
|
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from sklearn.preprocessing import MinMaxScaler
|
||||||
|
from ta.utils import *
|
||||||
|
from ta.volatility import *
|
||||||
|
from ta.momentum import *
|
||||||
|
from ta.trend import *
|
||||||
|
from ta.volume import *
|
||||||
|
from tqdm import tqdm
|
||||||
|
from sklearn.feature_selection import SelectKBest, f_classif
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import pickle
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
async def download_data(ticker, start_date, end_date, nth_day):
|
||||||
|
try:
|
||||||
|
df = yf.download(ticker, start=start_date, end=end_date, interval="1d")
|
||||||
|
df = df.rename(columns={'Adj Close': 'close', 'Open': 'open', 'High': 'high', 'Low': 'low', 'Volume': 'volume', 'Date': 'date'})
|
||||||
|
df["Target"] = ((df["close"].shift(-nth_day) > df["close"])).astype(int)
|
||||||
|
df_copy = df.copy()
|
||||||
|
if len(df_copy) > 252*2: #At least 2 years of history is necessary
|
||||||
|
return df_copy
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
|
class TrendPredictor:
|
||||||
|
def __init__(self, nth_day, path="weights"):
|
||||||
|
self.model = RandomForestClassifier(n_estimators=500, max_depth = 10, min_samples_split=10, random_state=42, n_jobs=10)
|
||||||
|
self.scaler = MinMaxScaler()
|
||||||
|
self.nth_day = nth_day
|
||||||
|
self.path = path
|
||||||
|
|
||||||
|
def generate_features(self, df):
|
||||||
|
new_predictors = []
|
||||||
|
|
||||||
|
df['macd'] = macd(df['close'])
|
||||||
|
df['macd_signal'] = macd_signal(df['close'])
|
||||||
|
df['macd_hist'] = 2*macd_diff(df['close'])
|
||||||
|
df['adx'] = adx(df['high'],df['low'],df['close'])
|
||||||
|
df["adx_pos"] = adx_pos(df['high'],df['low'],df['close'])
|
||||||
|
df["adx_neg"] = adx_neg(df['high'],df['low'],df['close'])
|
||||||
|
df['cci'] = CCIIndicator(high=df['high'], low=df['low'], close=df['close']).cci()
|
||||||
|
df['mfi'] = MFIIndicator(high=df['high'], low=df['low'], close=df['close'], volume=df['volume']).money_flow_index()
|
||||||
|
|
||||||
|
df['nvi'] = NegativeVolumeIndexIndicator(close=df['close'], volume=df['volume']).negative_volume_index()
|
||||||
|
df['obv'] = OnBalanceVolumeIndicator(close=df['close'], volume=df['volume']).on_balance_volume()
|
||||||
|
df['vpt'] = VolumePriceTrendIndicator(close=df['close'], volume=df['volume']).volume_price_trend()
|
||||||
|
|
||||||
|
df['rsi'] = rsi(df["close"], window=14)
|
||||||
|
df['stoch_rsi'] = stochrsi_k(df['close'], window=14, smooth1=3, smooth2=3)
|
||||||
|
df['bb_hband'] = bollinger_hband(df['close'], window=14)/df['close']
|
||||||
|
df['bb_lband'] = bollinger_lband(df['close'], window=14)/df['close']
|
||||||
|
|
||||||
|
df['adi'] = acc_dist_index(high=df['high'],low=df['low'],close=df['close'],volume=df['volume'])
|
||||||
|
df['cmf'] = chaikin_money_flow(high=df['high'],low=df['low'],close=df['close'],volume=df['volume'], window=20)
|
||||||
|
df['emv'] = ease_of_movement(high=df['high'],low=df['low'],volume=df['volume'], window=20)
|
||||||
|
df['fi'] = force_index(close=df['close'], volume=df['volume'], window= 13)
|
||||||
|
|
||||||
|
#df['atr'] = average_true_range(df['high'], df['low'], df['close'], window=20)
|
||||||
|
#df['roc'] = roc(df['close'], window=20)
|
||||||
|
df['williams'] = WilliamsRIndicator(high=df['high'], low=df['low'], close=df['close']).williams_r()
|
||||||
|
#df['vwap'] = VolumeWeightedAveragePrice(high=df['high'],low=df['low'],close=df['close'], volume=df['volume'],window=14).volume_weighted_average_price()
|
||||||
|
#df['sma_cross'] = (sma_indicator(df['close'], window=10) -sma_indicator(df['close'], window=50)).fillna(0).astype(int)
|
||||||
|
#df['ema_cross'] = (ema_indicator(df['close'], window=10) -ema_indicator(df['close'], window=50)).fillna(0).astype(int)
|
||||||
|
#df['wma_cross'] = (wma_indicator(df['close'], window=10) -wma_indicator(df['close'], window=50)).fillna(0).astype(int)
|
||||||
|
#each data is reducing accuracy
|
||||||
|
|
||||||
|
df['stoch'] = stoch(df['high'], df['low'], df['close'], window=14)
|
||||||
|
|
||||||
|
new_predictors+=['williams','fi','emv','cmf','adi','bb_hband','bb_lband','vpt','stoch','stoch_rsi','rsi','nvi','obv','macd','macd_signal','macd_hist','adx','adx_pos','adx_neg','cci','mfi']
|
||||||
|
return new_predictors
|
||||||
|
|
||||||
|
def feature_selection(self, df, predictors):
|
||||||
|
X = df[predictors]
|
||||||
|
y = df['Target']
|
||||||
|
|
||||||
|
selector = SelectKBest(score_func=f_classif, k=15)
|
||||||
|
selector.fit(X, y)
|
||||||
|
|
||||||
|
selector.transform(X)
|
||||||
|
selected_features = [col for i, col in enumerate(X.columns) if selector.get_support()[i]]
|
||||||
|
|
||||||
|
return selected_features
|
||||||
|
|
||||||
|
def train_model(self, X_train, y_train):
|
||||||
|
X_train = np.where(np.isinf(X_train), np.nan, X_train)
|
||||||
|
X_train = np.nan_to_num(X_train)
|
||||||
|
|
||||||
|
X_train = self.scaler.fit_transform(X_train)
|
||||||
|
self.model.fit(X_train, y_train)
|
||||||
|
pickle.dump(self.model, open(f'{self.path}/model_weights_{self.nth_day}.pkl', 'wb'))
|
||||||
|
|
||||||
|
def evaluate_model(self, X_test, y_test):
|
||||||
|
X_test = np.where(np.isinf(X_test), np.nan, X_test)
|
||||||
|
X_test = np.nan_to_num(X_test)
|
||||||
|
|
||||||
|
X_test = self.scaler.fit_transform(X_test)
|
||||||
|
|
||||||
|
with open(f'{self.path}/model_weights_{self.nth_day}.pkl', 'rb') as f:
|
||||||
|
self.model = pickle.load(f)
|
||||||
|
|
||||||
|
test_predictions = self.model.predict(X_test)
|
||||||
|
#test_predictions[test_predictions >=.55] = 1
|
||||||
|
#test_predictions[test_predictions <.55] = 0
|
||||||
|
|
||||||
|
|
||||||
|
test_precision = precision_score(y_test, test_predictions)
|
||||||
|
test_accuracy = accuracy_score(y_test, test_predictions)
|
||||||
|
#test_recall = recall_score(y_test, test_predictions)
|
||||||
|
#test_f1 = f1_score(y_test, test_predictions)
|
||||||
|
#test_roc_auc = roc_auc_score(y_test, test_predictions)
|
||||||
|
|
||||||
|
|
||||||
|
#print("Test Set Metrics:")
|
||||||
|
print(f"Precision: {round(test_precision * 100)}%")
|
||||||
|
print(f"Accuracy: {round(test_accuracy * 100)}%")
|
||||||
|
#print(f"Recall: {round(test_recall * 100)}%")
|
||||||
|
#print(f"F1-Score: {round(test_f1 * 100)}%")
|
||||||
|
#print(f"ROC-AUC: {round(test_roc_auc * 100)}%")
|
||||||
|
#print("Number of value counts in the test set")
|
||||||
|
#print(pd.DataFrame(test_predictions).value_counts())
|
||||||
|
|
||||||
|
next_value_prediction = 1 if test_predictions[-1] >= 0.5 else 0
|
||||||
|
return {'accuracy': round(test_accuracy*100), 'precision': round(test_precision*100), 'sentiment': 'Bullish' if next_value_prediction == 1 else 'Bearish'}
|
||||||
|
|
||||||
|
|
||||||
|
#Train mode
|
||||||
|
|
||||||
|
async def train_process(nth_day):
|
||||||
|
tickers =['KO','WMT','BA','PLD','AZN','LLY','INFN','GRMN','VVX','EPD','PII','WY','BLMN','AAP','ON','TGT','SMG','EL','EOG','ULTA','DV','PLNT','GLOB','LKQ','CWH','PSX','SO','TGT','GD','MU','NKE','AMGN','BX','CAT','PEP','LIN','ABBV','COST','MRK','HD','JNJ','PG','SPCB','CVX','SHEL','MS','GS','MA','V','JPM','XLF','DPZ','CMG','MCD','ALTM','PDD','MNST','SBUX','AMAT','ZS','IBM','SMCI','ORCL','XLK','VUG','VTI','VOO','IWM','IEFA','PEP','WMT','XOM','V','AVGO','BIDU','GOOGL','SNAP','DASH','SPOT','NVO','META','MSFT','ADBE','DIA','PFE','BAC','RIVN','NIO','CISS','INTC','AAPL','BYND','MSFT','HOOD','MARA','SHOP','CRM','PYPL','UBER','SAVE','QQQ','IVV','SPY','EVOK','GME','F','NVDA','AMD','AMZN','TSM','TSLA']
|
||||||
|
tickers = list(set(tickers))
|
||||||
|
print(len(tickers))
|
||||||
|
|
||||||
|
df_train = pd.DataFrame()
|
||||||
|
df_test = pd.DataFrame()
|
||||||
|
best_features = ['close','williams','fi','emv','adi','cmf','bb_hband','bb_lband','vpt','stoch','stoch_rsi','rsi','nvi','macd','mfi','cci','obv','adx','adx_pos','adx_neg']
|
||||||
|
test_size = 0.2
|
||||||
|
start_date = datetime(2000, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
predictor = TrendPredictor(nth_day=nth_day)
|
||||||
|
|
||||||
|
tasks = [download_data(ticker, start_date, end_date, nth_day) for ticker in tickers]
|
||||||
|
dfs = await asyncio.gather(*tasks)
|
||||||
|
|
||||||
|
for df in dfs:
|
||||||
|
try:
|
||||||
|
predictors = predictor.generate_features(df)
|
||||||
|
predictors = [pred for pred in predictors if pred in df.columns]
|
||||||
|
df = df.dropna(subset=df.columns[df.columns != "nth_day"])
|
||||||
|
split_size = int(len(df) * (1-test_size))
|
||||||
|
train_data = df.iloc[:split_size]
|
||||||
|
test_data = df.iloc[split_size:]
|
||||||
|
df_train = pd.concat([df_train, train_data], ignore_index=True)
|
||||||
|
df_test = pd.concat([df_test, test_data], ignore_index=True)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
df_train = df_train.sample(frac=1).reset_index(drop=True)
|
||||||
|
#df_train.to_csv('train_set.csv')
|
||||||
|
#df_test.to_csv('test_set.csv')
|
||||||
|
predictor.train_model(df_train[best_features], df_train['Target'])
|
||||||
|
predictor.evaluate_model(df_test[best_features], df_test['Target'])
|
||||||
|
|
||||||
|
async def test_process(nth_day):
|
||||||
|
best_features = ['close','williams','fi','emv','adi','cmf','bb_hband','bb_lband','vpt','stoch','stoch_rsi','rsi','nvi','macd','mfi','cci','obv','adx','adx_pos','adx_neg']
|
||||||
|
test_size = 0.2
|
||||||
|
start_date = datetime(2000, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
predictor = TrendPredictor(nth_day=nth_day)
|
||||||
|
|
||||||
|
df = await download_data('BTC-USD', start_date, end_date, nth_day)
|
||||||
|
predictors = predictor.generate_features(df)
|
||||||
|
df = df.dropna(subset=df.columns[df.columns != "nth_day"])
|
||||||
|
split_size = int(len(df) * (1-test_size))
|
||||||
|
test_data = df.iloc[split_size:]
|
||||||
|
|
||||||
|
predictor.evaluate_model(test_data[best_features], test_data['Target'])
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
|
||||||
|
for nth_day in [5,20,60]:
|
||||||
|
await train_process(nth_day)
|
||||||
|
|
||||||
|
await test_process(nth_day=5)
|
||||||
|
|
||||||
|
# Run the main function
|
||||||
|
#asyncio.run(main())
|
||||||
295
app/ml_models/fundamental_predictor.py
Normal file
295
app/ml_models/fundamental_predictor.py
Normal file
@ -0,0 +1,295 @@
|
|||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from sklearn.ensemble import RandomForestClassifier
|
||||||
|
import numpy as np
|
||||||
|
from xgboost import XGBClassifier
|
||||||
|
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from sklearn.preprocessing import MinMaxScaler, StandardScaler
|
||||||
|
from keras.models import Sequential
|
||||||
|
from keras.layers import LSTM, Dense, Conv1D, Bidirectional, Attention,Dropout, BatchNormalization
|
||||||
|
from keras.optimizers import Adam
|
||||||
|
from keras.callbacks import EarlyStopping, ModelCheckpoint
|
||||||
|
from keras.models import load_model
|
||||||
|
from sklearn.feature_selection import SelectKBest, f_classif
|
||||||
|
from tensorflow.keras.backend import clear_session
|
||||||
|
from keras import regularizers
|
||||||
|
|
||||||
|
from tqdm import tqdm
|
||||||
|
from collections import defaultdict
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import pickle
|
||||||
|
import time
|
||||||
|
import sqlite3
|
||||||
|
import ujson
|
||||||
|
|
||||||
|
|
||||||
|
#Based on the paper: https://arxiv.org/pdf/1603.00751
|
||||||
|
|
||||||
|
|
||||||
|
async def download_data(ticker, con, start_date, end_date):
|
||||||
|
try:
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
income, income_growth, balance, balance_growth, cashflow, cashflow_growth, ratios
|
||||||
|
FROM
|
||||||
|
stocks
|
||||||
|
WHERE
|
||||||
|
symbol = ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
query_df = pd.read_sql_query(query_template, con, params=(ticker,))
|
||||||
|
|
||||||
|
income = ujson.loads(query_df['income'].iloc[0])
|
||||||
|
|
||||||
|
#Only consider company with at least 10 year worth of data
|
||||||
|
if len(income) < 40:
|
||||||
|
raise ValueError("Income data length is too small.")
|
||||||
|
|
||||||
|
income = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in income if int(item["date"][:4]) >= 2000]
|
||||||
|
income_growth = ujson.loads(query_df['income_growth'].iloc[0])
|
||||||
|
income_growth = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in income_growth if int(item["date"][:4]) >= 2000]
|
||||||
|
|
||||||
|
balance = ujson.loads(query_df['balance'].iloc[0])
|
||||||
|
balance = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in balance if int(item["date"][:4]) >= 2000]
|
||||||
|
balance_growth = ujson.loads(query_df['balance_growth'].iloc[0])
|
||||||
|
balance_growth = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in balance_growth if int(item["date"][:4]) >= 2000]
|
||||||
|
|
||||||
|
cashflow = ujson.loads(query_df['cashflow'].iloc[0])
|
||||||
|
cashflow = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in cashflow if int(item["date"][:4]) >= 2000]
|
||||||
|
cashflow_growth = ujson.loads(query_df['cashflow_growth'].iloc[0])
|
||||||
|
cashflow_growth = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in cashflow_growth if int(item["date"][:4]) >= 2000]
|
||||||
|
|
||||||
|
|
||||||
|
ratios = ujson.loads(query_df['ratios'].iloc[0])
|
||||||
|
ratios = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in ratios if int(item["date"][:4]) >= 2000]
|
||||||
|
|
||||||
|
combined_data = defaultdict(dict)
|
||||||
|
# Iterate over all lists simultaneously
|
||||||
|
for entries in zip(income, income_growth, balance, balance_growth, cashflow, cashflow_growth, ratios):
|
||||||
|
# Iterate over each entry in the current set of entries
|
||||||
|
for entry in entries:
|
||||||
|
date = entry['date']
|
||||||
|
# Merge entry data into combined_data, skipping duplicate keys
|
||||||
|
for key, value in entry.items():
|
||||||
|
if key not in combined_data[date]:
|
||||||
|
combined_data[date][key] = value
|
||||||
|
|
||||||
|
combined_data = list(combined_data.values())
|
||||||
|
|
||||||
|
df = yf.download(ticker, start=start_date, end=end_date, interval="1d").reset_index()
|
||||||
|
df = df.rename(columns={'Adj Close': 'close', 'Date': 'date'})
|
||||||
|
#print(df[['date','close']])
|
||||||
|
df['date'] = df['date'].dt.strftime('%Y-%m-%d')
|
||||||
|
|
||||||
|
|
||||||
|
for item in combined_data:
|
||||||
|
# Find close price for '2023-09-30' or the closest available date prior to it
|
||||||
|
target_date = item['date']
|
||||||
|
counter = 0
|
||||||
|
max_attempts = 10
|
||||||
|
|
||||||
|
while target_date not in df['date'].values and counter < max_attempts:
|
||||||
|
# If the target date doesn't exist, move one day back
|
||||||
|
target_date = (pd.to_datetime(target_date) - pd.Timedelta(days=1)).strftime('%Y-%m-%d')
|
||||||
|
counter += 1
|
||||||
|
if counter == max_attempts:
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
# Get the close price for the found or closest date
|
||||||
|
close_price = round(df[df['date'] == target_date]['close'].values[0],2)
|
||||||
|
item['price'] = close_price
|
||||||
|
#print(f"Close price for {target_date}: {close_price}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
combined_data = sorted(combined_data, key=lambda x: x['date'])
|
||||||
|
|
||||||
|
|
||||||
|
df_income = pd.DataFrame(combined_data).dropna()
|
||||||
|
|
||||||
|
df_income['Target'] = ((df_income['price'].shift(-1) - df_income['price']) / df_income['price'] > 0).astype(int)
|
||||||
|
|
||||||
|
df_copy = df_income.copy()
|
||||||
|
|
||||||
|
return df_copy
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
|
class FundamentalPredictor:
|
||||||
|
def __init__(self, path='weights'):
|
||||||
|
self.model = self.build_model() #RandomForestClassifier(n_estimators=1000, max_depth = 20, min_samples_split=10, random_state=42, n_jobs=10)
|
||||||
|
self.scaler = MinMaxScaler()
|
||||||
|
self.path = path
|
||||||
|
|
||||||
|
def build_model(self):
|
||||||
|
clear_session()
|
||||||
|
model = Sequential()
|
||||||
|
|
||||||
|
model.add(Conv1D(filters=64, kernel_size=3, padding='same', activation='relu', input_shape=(None, 1)))
|
||||||
|
|
||||||
|
# First LSTM layer with dropout and batch normalization
|
||||||
|
model.add(LSTM(256, return_sequences=True, kernel_regularizer=regularizers.l2(0.01)))
|
||||||
|
model.add(Dropout(0.5))
|
||||||
|
model.add(BatchNormalization())
|
||||||
|
|
||||||
|
|
||||||
|
# Second LSTM layer with dropout and batch normalization
|
||||||
|
model.add(LSTM(128, return_sequences=True, kernel_regularizer=regularizers.l2(0.01)))
|
||||||
|
model.add(Dropout(0.5))
|
||||||
|
model.add(BatchNormalization())
|
||||||
|
|
||||||
|
# Third LSTM layer with dropout and batch normalization
|
||||||
|
model.add(LSTM(128, kernel_regularizer=regularizers.l2(0.01)))
|
||||||
|
model.add(Dropout(0.5))
|
||||||
|
model.add(BatchNormalization())
|
||||||
|
|
||||||
|
model.add(Dense(32, activation='relu', kernel_regularizer=regularizers.l2(0.01)))
|
||||||
|
model.add(Dropout(0.2))
|
||||||
|
model.add(BatchNormalization())
|
||||||
|
|
||||||
|
# Dense layer with sigmoid activation for binary classification
|
||||||
|
model.add(Dense(1, activation='sigmoid'))
|
||||||
|
|
||||||
|
|
||||||
|
# Adam optimizer with a learning rate of 0.001
|
||||||
|
optimizer = Adam(learning_rate=0.01)
|
||||||
|
|
||||||
|
# Compile model with binary crossentropy loss and accuracy metric
|
||||||
|
model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
|
||||||
|
|
||||||
|
return model
|
||||||
|
|
||||||
|
def preprocess_data(self, X):
|
||||||
|
#X = X.applymap(lambda x: 9999 if x == 0 else x) # Replace 0 with 9999 as suggested in the paper
|
||||||
|
X = np.where(np.isinf(X), np.nan, X)
|
||||||
|
X = np.nan_to_num(X)
|
||||||
|
X = self.scaler.fit_transform(X)
|
||||||
|
return X
|
||||||
|
|
||||||
|
def reshape_for_lstm(self, X):
|
||||||
|
return X.reshape((X.shape[0], X.shape[1], 1))
|
||||||
|
|
||||||
|
def train_model(self, X_train, y_train):
|
||||||
|
X_train = self.preprocess_data(X_train)
|
||||||
|
X_train = self.reshape_for_lstm(X_train)
|
||||||
|
|
||||||
|
checkpoint = ModelCheckpoint(f'{self.path}/fundamental_weights/weights.keras', save_best_only=True, monitor='val_loss', mode='min')
|
||||||
|
early_stopping = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
|
||||||
|
|
||||||
|
self.model.fit(X_train, y_train, epochs=250, batch_size=32, validation_split=0.2, callbacks=[checkpoint, early_stopping])
|
||||||
|
self.model.save(f'{self.path}/fundamental_weights/weights.keras')
|
||||||
|
|
||||||
|
def evaluate_model(self, X_test, y_test):
|
||||||
|
X_test = self.preprocess_data(X_test)
|
||||||
|
X_test = self.reshape_for_lstm(X_test)
|
||||||
|
|
||||||
|
self.model = self.build_model()
|
||||||
|
self.model = load_model(f'{self.path}/fundamental_weights/weights.keras')
|
||||||
|
|
||||||
|
test_predictions = self.model.predict(X_test).flatten()
|
||||||
|
|
||||||
|
test_predictions[test_predictions >= 0.5] = 1
|
||||||
|
test_predictions[test_predictions < 0.5] = 0
|
||||||
|
|
||||||
|
test_precision = precision_score(y_test, test_predictions)
|
||||||
|
test_accuracy = accuracy_score(y_test, test_predictions)
|
||||||
|
|
||||||
|
print("Test Set Metrics:")
|
||||||
|
print(f"Precision: {round(test_precision * 100)}%")
|
||||||
|
print(f"Accuracy: {round(test_accuracy * 100)}%")
|
||||||
|
|
||||||
|
next_value_prediction = 1 if test_predictions[-1] >= 0.5 else 0
|
||||||
|
return {'accuracy': round(test_accuracy*100), 'precision': round(test_precision*100), 'sentiment': 'Bullish' if next_value_prediction == 1 else 'Bearish'}, test_predictions
|
||||||
|
|
||||||
|
def feature_selection(self, X_train, y_train,k=8):
|
||||||
|
'''
|
||||||
|
selector = SelectKBest(score_func=f_classif, k=8)
|
||||||
|
selector.fit(X_train, y_train)
|
||||||
|
|
||||||
|
selector.transform(X_train)
|
||||||
|
selected_features = [col for i, col in enumerate(X_train.columns) if selector.get_support()[i]]
|
||||||
|
|
||||||
|
return selected_features
|
||||||
|
'''
|
||||||
|
# Calculate the variance of each feature with respect to the target
|
||||||
|
variances = {}
|
||||||
|
for col in X_train.columns:
|
||||||
|
grouped_variance = X_train.groupby(y_train)[col].var().mean()
|
||||||
|
variances[col] = grouped_variance
|
||||||
|
|
||||||
|
# Sort features by variance and select top k features
|
||||||
|
sorted_features = sorted(variances, key=variances.get, reverse=True)[:k]
|
||||||
|
return sorted_features
|
||||||
|
|
||||||
|
#Train mode
|
||||||
|
async def train_process(tickers, con):
|
||||||
|
tickers = list(set(tickers))
|
||||||
|
df_train = pd.DataFrame()
|
||||||
|
df_test = pd.DataFrame()
|
||||||
|
test_size = 0.4
|
||||||
|
start_date = datetime(2000, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
predictor = FundamentalPredictor()
|
||||||
|
df_train = pd.DataFrame()
|
||||||
|
df_test = pd.DataFrame()
|
||||||
|
|
||||||
|
|
||||||
|
tasks = [download_data(ticker, con, start_date, end_date) for ticker in tickers]
|
||||||
|
dfs = await asyncio.gather(*tasks)
|
||||||
|
for df in dfs:
|
||||||
|
try:
|
||||||
|
split_size = int(len(df) * (1-test_size))
|
||||||
|
train_data = df.iloc[:split_size]
|
||||||
|
test_data = df.iloc[split_size:]
|
||||||
|
df_train = pd.concat([df_train, train_data], ignore_index=True)
|
||||||
|
df_test = pd.concat([df_test, test_data], ignore_index=True)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
best_features = [col for col in df_train.columns if col not in ['date','price','Target']]
|
||||||
|
|
||||||
|
df_train = df_train.sample(frac=1).reset_index(drop=True)
|
||||||
|
print('======Train Set Datapoints======')
|
||||||
|
print(len(df_train))
|
||||||
|
#selected_features = predictor.feature_selection(df_train[best_features], df_train['Target'],k=10)
|
||||||
|
#print(selected_features)
|
||||||
|
#selected_features = [col for col in df_train if col not in ['price','date','Target']]
|
||||||
|
selected_features = ['shortTermCoverageRatios','netProfitMargin','debtRepayment','totalDebt','interestIncome','researchAndDevelopmentExpenses','priceEarningsToGrowthRatio','priceCashFlowRatio','cashPerShare','debtRatio','growthRevenue','revenue','growthNetIncome','ebitda','priceEarningsRatio','priceToBookRatio','epsdiluted','priceToSalesRatio','growthOtherCurrentLiabilities', 'receivablesTurnover', 'totalLiabilitiesAndStockholdersEquity', 'totalLiabilitiesAndTotalEquity', 'totalAssets', 'growthOtherCurrentAssets', 'retainedEarnings', 'totalEquity']
|
||||||
|
|
||||||
|
predictor.train_model(df_train[selected_features], df_train['Target'])
|
||||||
|
predictor.evaluate_model(df_test[selected_features], df_test['Target'])
|
||||||
|
|
||||||
|
|
||||||
|
async def test_process(con):
|
||||||
|
test_size = 0.4
|
||||||
|
start_date = datetime(2000, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
predictor = FundamentalPredictor()
|
||||||
|
df = await download_data('GME', con, start_date, end_date)
|
||||||
|
split_size = int(len(df) * (1-test_size))
|
||||||
|
test_data = df.iloc[split_size:]
|
||||||
|
#selected_features = [col for col in test_data if col not in ['price','date','Target']]
|
||||||
|
selected_features = ['shortTermCoverageRatios','netProfitMargin','debtRepayment','totalDebt','interestIncome','researchAndDevelopmentExpenses','priceEarningsToGrowthRatio','priceCashFlowRatio','cashPerShare','debtRatio','growthRevenue','revenue','growthNetIncome','ebitda','priceEarningsRatio','priceToBookRatio','epsdiluted','priceToSalesRatio','growthOtherCurrentLiabilities', 'receivablesTurnover', 'totalLiabilitiesAndStockholdersEquity', 'totalLiabilitiesAndTotalEquity', 'totalAssets', 'growthOtherCurrentAssets', 'retainedEarnings', 'totalEquity']
|
||||||
|
predictor.evaluate_model(test_data[selected_features], test_data['Target'])
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
con = sqlite3.connect('../stocks.db')
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE marketCap >= 500E9")
|
||||||
|
stock_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
print(len(stock_symbols))
|
||||||
|
await train_process(stock_symbols, con)
|
||||||
|
await test_process(con)
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
# Run the main function
|
||||||
|
asyncio.run(main())
|
||||||
219
app/ml_models/lstm.py
Normal file
219
app/ml_models/lstm.py
Normal file
@ -0,0 +1,219 @@
|
|||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from sklearn.ensemble import RandomForestClassifier
|
||||||
|
from xgboost import XGBClassifier
|
||||||
|
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score
|
||||||
|
from sklearn.preprocessing import MinMaxScaler
|
||||||
|
from ta.utils import *
|
||||||
|
from ta.volatility import *
|
||||||
|
from ta.momentum import *
|
||||||
|
from ta.trend import *
|
||||||
|
from ta.volume import *
|
||||||
|
from tqdm import tqdm
|
||||||
|
from sklearn.feature_selection import SelectKBest, f_classif
|
||||||
|
from keras.models import Sequential
|
||||||
|
from keras.layers import LSTM, Dense, Dropout, BatchNormalization, Bidirectional
|
||||||
|
from keras.optimizers import Adam
|
||||||
|
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
|
||||||
|
from keras.regularizers import l2
|
||||||
|
|
||||||
|
class StockPredictor:
|
||||||
|
def __init__(self, ticker, start_date, end_date):
|
||||||
|
self.ticker = ticker
|
||||||
|
self.start_date = start_date
|
||||||
|
self.end_date = end_date
|
||||||
|
self.nth_day = 60
|
||||||
|
self.model = None #RandomForestClassifier(n_estimators=3500, min_samples_split=100, random_state=42, n_jobs=-1) #XGBClassifier(n_estimators=200, max_depth=2, learning_rate=1, objective='binary:logistic')
|
||||||
|
self.horizons = [3,5,10, 15, 20]
|
||||||
|
self.test_size = 0.2
|
||||||
|
|
||||||
|
def download_data(self):
|
||||||
|
df_original = yf.download(self.ticker, start=self.start_date, end=self.end_date, interval="1d")
|
||||||
|
df_original.index = pd.to_datetime(df_original.index)
|
||||||
|
return df_original
|
||||||
|
|
||||||
|
def preprocess_data(self, df):
|
||||||
|
df['Target'] = (df['Close'].shift(-self.nth_day) > df['Close']).astype(int)
|
||||||
|
df.dropna(inplace=True)
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
|
def generate_features(self, df):
|
||||||
|
new_predictors = []
|
||||||
|
for horizon in self.horizons:
|
||||||
|
rolling_averages = df.rolling(horizon).mean()
|
||||||
|
|
||||||
|
ratio_column = f"Close_Ratio_{horizon}"
|
||||||
|
df[ratio_column] = df["Close"] / rolling_averages["Close"]
|
||||||
|
new_predictors.append(ratio_column)
|
||||||
|
|
||||||
|
trend_column = f"Trend_{horizon}"
|
||||||
|
df[trend_column] = df["Close"].pct_change(periods=horizon)
|
||||||
|
new_predictors.append(trend_column)
|
||||||
|
|
||||||
|
volatility_column = f"Volatility_{horizon}"
|
||||||
|
df[volatility_column] = df["Close"].pct_change().rolling(horizon).std()
|
||||||
|
new_predictors.append(volatility_column)
|
||||||
|
|
||||||
|
volatility_mean_column = f"Volatility_Mean_{horizon}"
|
||||||
|
df[volatility_mean_column] = df["Close"].pct_change().rolling(horizon).mean()
|
||||||
|
new_predictors.append(volatility_mean_column)
|
||||||
|
|
||||||
|
sma_column = f"SMA_{horizon}"
|
||||||
|
df[sma_column] = sma_indicator(df['Close'], window=horizon)
|
||||||
|
|
||||||
|
ema_column = f"EMA_{horizon}"
|
||||||
|
df[ema_column] = ema_indicator(df['Close'], window=horizon)
|
||||||
|
|
||||||
|
rsi_column = f"RSI_{horizon}"
|
||||||
|
df[rsi_column] = rsi(df["Close"], window=horizon)
|
||||||
|
new_predictors.append(rsi_column)
|
||||||
|
|
||||||
|
stoch_rsi_column = f"STOCH_RSI_{horizon}"
|
||||||
|
df[stoch_rsi_column] = stochrsi_k(df['Close'], window=horizon, smooth1=3, smooth2=3)
|
||||||
|
new_predictors.append(stoch_rsi_column)
|
||||||
|
|
||||||
|
stoch_column = f"STOCH_{horizon}"
|
||||||
|
df[stoch_column] = stoch(df['High'], df['Low'], df['Close'], window=horizon)
|
||||||
|
new_predictors.append(stoch_column)
|
||||||
|
|
||||||
|
roc_column = f"ROC_{horizon}"
|
||||||
|
df[roc_column] = roc(df['Close'], window=horizon)
|
||||||
|
new_predictors.append(roc_column)
|
||||||
|
|
||||||
|
wma_column = f"WMA_{horizon}"
|
||||||
|
df[wma_column] = wma_indicator(df['Close'], window=horizon)
|
||||||
|
new_predictors.append(wma_column)
|
||||||
|
|
||||||
|
# Additional features
|
||||||
|
atr_column = f"ATR_{horizon}"
|
||||||
|
df[atr_column] = average_true_range(df['High'], df['Low'], df['Close'], window=horizon)
|
||||||
|
new_predictors.append(atr_column)
|
||||||
|
|
||||||
|
|
||||||
|
adx_column = f"ADX_{horizon}"
|
||||||
|
df[adx_column] = adx(df['High'], df['Low'], df['Close'], window=horizon)
|
||||||
|
new_predictors.append(adx_column)
|
||||||
|
|
||||||
|
bb_bands_column = f"BB_{horizon}"
|
||||||
|
df[bb_bands_column] = bollinger_hband(df['Close'], window=horizon) / df['Close']
|
||||||
|
new_predictors.append(bb_bands_column)
|
||||||
|
|
||||||
|
|
||||||
|
df['macd'] = macd(df['Close'])
|
||||||
|
df['macd_signal'] = macd_signal(df['Close'])
|
||||||
|
df['macd_hist'] = 2*macd_diff(df['Close'])
|
||||||
|
new_predictors.append('macd')
|
||||||
|
new_predictors.append('macd_signal')
|
||||||
|
new_predictors.append('macd_hist')
|
||||||
|
return new_predictors
|
||||||
|
|
||||||
|
def feature_selection(self, df, predictors):
|
||||||
|
X = df[predictors]
|
||||||
|
y = df['Target']
|
||||||
|
|
||||||
|
selector = SelectKBest(score_func=f_classif, k=5)
|
||||||
|
selector.fit(X, y)
|
||||||
|
|
||||||
|
selector.transform(X)
|
||||||
|
selected_features = [col for i, col in enumerate(X.columns) if selector.get_support()[i]]
|
||||||
|
|
||||||
|
return selected_features
|
||||||
|
|
||||||
|
def build_lstm_model(self,input_shape):
|
||||||
|
model = Sequential()
|
||||||
|
model.add(Bidirectional(LSTM(units=1024, return_sequences=True, kernel_regularizer=l2(0.01)), input_shape=input_shape))
|
||||||
|
model.add(BatchNormalization())
|
||||||
|
model.add(Dropout(0.5))
|
||||||
|
model.add(Bidirectional(LSTM(units=128, return_sequences=True, kernel_regularizer=l2(0.01))))
|
||||||
|
model.add(BatchNormalization())
|
||||||
|
model.add(Dropout(0.25))
|
||||||
|
model.add(Bidirectional(LSTM(units=64, kernel_regularizer=l2(0.01))))
|
||||||
|
model.add(BatchNormalization())
|
||||||
|
model.add(Dropout(0.2))
|
||||||
|
model.add(Dense(units=1, activation='sigmoid'))
|
||||||
|
|
||||||
|
# Learning rate scheduler
|
||||||
|
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.001)
|
||||||
|
# Early stopping
|
||||||
|
early_stop = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)
|
||||||
|
|
||||||
|
model.compile(optimizer=Adam(lr=0.001), loss='binary_crossentropy', metrics=['accuracy'])
|
||||||
|
|
||||||
|
return model, [reduce_lr, early_stop]
|
||||||
|
|
||||||
|
|
||||||
|
def train_model(self, X_train, y_train):
|
||||||
|
self.model, callbacks = self.build_lstm_model((X_train.shape[1], X_train.shape[2]))
|
||||||
|
history = self.model.fit(X_train, y_train, epochs=500, batch_size=32, validation_split=0.1, callbacks=callbacks)
|
||||||
|
|
||||||
|
def evaluate_model(self, X_test, y_test):
|
||||||
|
# Reshape X_test to remove the extra dimension
|
||||||
|
X_test_reshaped = X_test.reshape(X_test.shape[0], X_test.shape[2])
|
||||||
|
|
||||||
|
X_test_df = pd.DataFrame(X_test_reshaped, columns=predictors)
|
||||||
|
y_test_df = pd.DataFrame(y_test, columns=['Target'])
|
||||||
|
|
||||||
|
test_df = X_test_df.join(y_test_df)
|
||||||
|
test_df = test_df.iloc[int(len(test_df) * (1 - self.test_size)):]
|
||||||
|
|
||||||
|
# Implement the rest of your evaluation logic here
|
||||||
|
test_predictions = self.model.predict(X_test)
|
||||||
|
test_predictions = (test_predictions > 0.5).astype(int)
|
||||||
|
print(test_predictions)
|
||||||
|
# Assuming you have the model already defined and trained
|
||||||
|
# Perform evaluation metrics on test_predictions and y_test
|
||||||
|
|
||||||
|
test_precision = precision_score(y_test, test_predictions)
|
||||||
|
test_accuracy = accuracy_score(y_test, test_predictions)
|
||||||
|
test_recall = recall_score(y_test, test_predictions)
|
||||||
|
test_f1 = f1_score(y_test, test_predictions)
|
||||||
|
test_roc_auc = roc_auc_score(y_test, test_predictions)
|
||||||
|
|
||||||
|
print("Test Set Metrics:")
|
||||||
|
print(f"Precision: {round(test_precision * 100)}%")
|
||||||
|
print(f"Accuracy: {round(test_accuracy * 100)}%")
|
||||||
|
print(f"Recall: {round(test_recall * 100)}%")
|
||||||
|
print(f"F1-Score: {round(test_f1 * 100)}%")
|
||||||
|
print(f"ROC-AUC: {round(test_roc_auc * 100)}%")
|
||||||
|
|
||||||
|
def predict_next_value(self, df, predictors):
|
||||||
|
latest_data_point = df.iloc[-1][predictors]
|
||||||
|
next_value_prediction = self.model.predict([latest_data_point])[0]
|
||||||
|
next_value_probability = self.model.predict_proba([latest_data_point])[0][1]
|
||||||
|
print("Predicted next value:", next_value_prediction)
|
||||||
|
print("Probability of predicted next value:", round(next_value_probability * 100, 2), "%")
|
||||||
|
latest_date_index = df.index[-1]
|
||||||
|
next_prediction_date = latest_date_index + pd.DateOffset(days=self.nth_day)
|
||||||
|
print("Corresponding date for the next prediction:", next_prediction_date)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
ticker = 'AAPL'
|
||||||
|
start_date = datetime(2000, 1, 1)
|
||||||
|
end_date = datetime.today()
|
||||||
|
|
||||||
|
predictor = StockPredictor(ticker, start_date, end_date)
|
||||||
|
df = predictor.download_data()
|
||||||
|
|
||||||
|
predictors = predictor.generate_features(df)
|
||||||
|
df = predictor.preprocess_data(df)
|
||||||
|
|
||||||
|
X = df[predictors].values
|
||||||
|
y = df['Target'].values
|
||||||
|
|
||||||
|
# Normalize features
|
||||||
|
scaler = MinMaxScaler(feature_range=(0, 1))
|
||||||
|
X = scaler.fit_transform(X)
|
||||||
|
|
||||||
|
# Reshape data for LSTM
|
||||||
|
X = X.reshape((X.shape[0], 1, X.shape[1]))
|
||||||
|
|
||||||
|
train_size = int(len(X) * (1 - predictor.test_size))
|
||||||
|
X_train, X_test = X[:train_size], X[train_size:]
|
||||||
|
y_train, y_test = y[:train_size], y[train_size:]
|
||||||
|
|
||||||
|
predictor.train_model(X_train, y_train)
|
||||||
|
predictor.evaluate_model(X_test, y_test)
|
||||||
|
predictor.predict_next_value(X[-1])
|
||||||
94
app/ml_models/prophet_model.py
Normal file
94
app/ml_models/prophet_model.py
Normal file
@ -0,0 +1,94 @@
|
|||||||
|
import pandas as pd
|
||||||
|
from prophet import Prophet
|
||||||
|
from datetime import datetime
|
||||||
|
import yfinance as yf
|
||||||
|
import numpy as np
|
||||||
|
import asyncio
|
||||||
|
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
|
||||||
|
#import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
|
||||||
|
async def download_data(ticker, start_date, end_date):
|
||||||
|
try:
|
||||||
|
df = yf.download(ticker, start=start_date, end=end_date, interval="1d")
|
||||||
|
df = df.reset_index()
|
||||||
|
df = df[['Date', 'Adj Close']]
|
||||||
|
df = df.rename(columns={"Date": "ds", "Adj Close": "y"})
|
||||||
|
if len(df) > 252*2: #At least 2 years of history is necessary
|
||||||
|
#df['y'] = df['y'].rolling(window=200).mean()
|
||||||
|
#df = df.dropna()
|
||||||
|
return df
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
|
class PricePredictor:
|
||||||
|
def __init__(self, predict_ndays=365):
|
||||||
|
self.predict_ndays = predict_ndays
|
||||||
|
self.model = Prophet(
|
||||||
|
interval_width = 0.8,
|
||||||
|
daily_seasonality=True,
|
||||||
|
yearly_seasonality = True,
|
||||||
|
)
|
||||||
|
|
||||||
|
def run(self, df):
|
||||||
|
self.model.fit(df)
|
||||||
|
future = self.model.make_future_dataframe(periods=self.predict_ndays)
|
||||||
|
forecast = self.model.predict(future)
|
||||||
|
|
||||||
|
# Apply rolling average to smooth the upper bound
|
||||||
|
rolling_window = 200
|
||||||
|
forecast['smoothed_upper'] = round(forecast['yhat_upper'].rolling(window=rolling_window, min_periods=1).mean(),2)
|
||||||
|
forecast['smoothed_lower'] = round(forecast['yhat_lower'].rolling(window=rolling_window, min_periods=1).mean(),2)
|
||||||
|
forecast['smoothed_mean'] = round(forecast['yhat'].rolling(window=rolling_window, min_periods=1).mean(),2)
|
||||||
|
|
||||||
|
actual_values = df['y'].values
|
||||||
|
predicted_values = forecast['yhat'].values[:-self.predict_ndays]
|
||||||
|
|
||||||
|
rmse = round(np.sqrt(mean_squared_error(actual_values, predicted_values)),2)
|
||||||
|
mape = round(np.mean(np.abs((actual_values - predicted_values) / actual_values)) * 100)
|
||||||
|
r2 = round(r2_score(actual_values, predicted_values)*100)
|
||||||
|
|
||||||
|
print("RMSE:", rmse)
|
||||||
|
print("MAPE:", mape)
|
||||||
|
print("R2 Score:", r2)
|
||||||
|
pred_date_list = forecast['ds'][-1200-self.predict_ndays:].dt.strftime('%Y-%m-%d').tolist()
|
||||||
|
upper_list = forecast['smoothed_upper'][-1200-self.predict_ndays:].tolist()
|
||||||
|
lower_list = forecast['smoothed_lower'][-1200-self.predict_ndays:].tolist()
|
||||||
|
mean_list = forecast['smoothed_mean'][-1200-self.predict_ndays:].tolist()
|
||||||
|
|
||||||
|
historical_date_list = df['ds'][-1200:].dt.strftime('%Y-%m-%d').tolist()
|
||||||
|
historical_price_list = round(df['y'][-1200:],2).tolist()
|
||||||
|
|
||||||
|
return {'rmse': rmse,'mape': mape,'r2Score':r2, 'historicalPrice': historical_price_list, 'predictionDate': pred_date_list, 'upperBand': upper_list, 'lowerBand': lower_list, 'meanResult': mean_list}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#Test Mode
|
||||||
|
async def main():
|
||||||
|
for ticker in ['TSLA']:
|
||||||
|
start_date = datetime(2000, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
df = await download_data(ticker, start_date, end_date)
|
||||||
|
data = PricePredictor().run(df)
|
||||||
|
|
||||||
|
# Run the main function
|
||||||
|
#asyncio.run(main())
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Plotting
|
||||||
|
'''
|
||||||
|
fig, ax = plt.subplots(figsize=(10, 6))
|
||||||
|
|
||||||
|
ax.plot(forecast['ds'][-1200-predict_ndays:], forecast['smoothed_mean'][-1200-predict_ndays:], color='blue', label='Predicted')
|
||||||
|
ax.fill_between(forecast['ds'][-1200-predict_ndays:], forecast['smoothed_lower'][-1200-predict_ndays:], forecast['smoothed_upper'][-1200-predict_ndays:], color='gray', alpha=0.5, label='Confidence Interval')
|
||||||
|
ax.plot(df['ds'][-1200:], df['y'][-1200:], color='black', label='Actual')
|
||||||
|
ax.set_xlabel('Date')
|
||||||
|
ax.set_ylabel('Price')
|
||||||
|
ax.set_title('Forecasted Prices for {}'.format(ticker))
|
||||||
|
ax.legend()
|
||||||
|
ax.grid(True)
|
||||||
|
plt.show()
|
||||||
|
'''
|
||||||
152
app/ml_models/regression.py
Normal file
152
app/ml_models/regression.py
Normal file
@ -0,0 +1,152 @@
|
|||||||
|
import numpy as np
|
||||||
|
import pandas as pd
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from sklearn.preprocessing import MinMaxScaler
|
||||||
|
|
||||||
|
from sklearn.linear_model import LinearRegression
|
||||||
|
from sklearn.svm import SVR
|
||||||
|
from sklearn.ensemble import RandomForestRegressor
|
||||||
|
from sklearn.neighbors import KNeighborsRegressor
|
||||||
|
|
||||||
|
from keras.models import Sequential, load_model
|
||||||
|
from keras.layers import LSTM, Dense, Dropout, BatchNormalization, Bidirectional
|
||||||
|
from keras.regularizers import l2
|
||||||
|
import time
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from xgboost import XGBRegressor
|
||||||
|
from backtesting import Backtesting
|
||||||
|
import yfinance as yf
|
||||||
|
|
||||||
|
|
||||||
|
class regression_model:
|
||||||
|
def __init__(self, model_name, data, test_size, time_step, nth_day):
|
||||||
|
self.model_name = model_name
|
||||||
|
self.data = data
|
||||||
|
self.test_size = test_size
|
||||||
|
self.time_step = time_step
|
||||||
|
self.nth_day = nth_day
|
||||||
|
|
||||||
|
def correct_weekday(self, select_date):
|
||||||
|
# Monday is 0 and Sunday is 6
|
||||||
|
if select_date.weekday() > 4:
|
||||||
|
select_date = select_date - timedelta(select_date.weekday() - 4)
|
||||||
|
else:
|
||||||
|
pass
|
||||||
|
return select_date
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
dates = self.data['Date']
|
||||||
|
df = self.data['Close']
|
||||||
|
scaler = MinMaxScaler(feature_range=(0, 1))
|
||||||
|
df = scaler.fit_transform(np.array(df).reshape(-1, 1))
|
||||||
|
|
||||||
|
test_split_idx = int(df.shape[0] * (1 - self.test_size))
|
||||||
|
|
||||||
|
train_data = df[:test_split_idx].copy()
|
||||||
|
test_data = df[test_split_idx:].copy()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# convert an array of values into a dataset matrix
|
||||||
|
def create_dataset(dataset):
|
||||||
|
dataX, dataY = [], []
|
||||||
|
for i in range(len(dataset) - self.time_step - 1 - self.nth_day):
|
||||||
|
a = dataset[i:(i + self.time_step), 0]
|
||||||
|
dataX.append(a)
|
||||||
|
dataY.append(dataset[i + self.time_step + self.nth_day, 0])
|
||||||
|
return np.array(dataX), np.array(dataY)
|
||||||
|
|
||||||
|
def create_date_dataset(dataset):
|
||||||
|
dataX = []
|
||||||
|
for i in range(len(dataset) - self.time_step - 1 - self.nth_day):
|
||||||
|
a = dataset[i:(i + self.time_step)].iloc[-1]
|
||||||
|
dataX.append(a)
|
||||||
|
return pd.DataFrame(dataX)
|
||||||
|
|
||||||
|
X_train, y_train = create_dataset(train_data)
|
||||||
|
X_test, y_test = create_dataset(test_data)
|
||||||
|
|
||||||
|
def fit_model(model, X_train, y_train):
|
||||||
|
if self.model_name == 'LSTM':
|
||||||
|
model.fit(X_train, y_train, epochs=100, batch_size=32, validation_split=0.1)
|
||||||
|
else:
|
||||||
|
model.fit(X_train, y_train)
|
||||||
|
|
||||||
|
if self.model_name == 'LinearRegression':
|
||||||
|
model = LinearRegression(n_jobs=-1)
|
||||||
|
elif self.model_name == "XGBoost":
|
||||||
|
model = XGBRegressor(max_depth=10)
|
||||||
|
elif self.model_name == "SVR":
|
||||||
|
model = SVR()
|
||||||
|
elif self.model_name == 'RandomForestRegressor':
|
||||||
|
model = RandomForestRegressor()
|
||||||
|
elif self.model_name == 'KNeighborsRegressor':
|
||||||
|
model = KNeighborsRegressor()
|
||||||
|
elif self.model_name == 'LSTM':
|
||||||
|
model = Sequential()
|
||||||
|
model.add(Bidirectional(LSTM(units=100, return_sequences=True, kernel_regularizer=l2(0.01)), input_shape=(self.time_step, 1)))
|
||||||
|
model.add(BatchNormalization())
|
||||||
|
model.add(Dropout(0.5))
|
||||||
|
model.add(Bidirectional(LSTM(units=50, return_sequences=True, kernel_regularizer=l2(0.01))))
|
||||||
|
model.add(BatchNormalization())
|
||||||
|
model.add(Dropout(0.25))
|
||||||
|
model.add(Bidirectional(LSTM(units=10, kernel_regularizer=l2(0.01))))
|
||||||
|
model.add(BatchNormalization())
|
||||||
|
model.add(Dropout(0.2))
|
||||||
|
model.add(Dense(units=1))
|
||||||
|
model.compile(optimizer='sgd', loss='mean_squared_error')
|
||||||
|
else:
|
||||||
|
model = LinearRegression()
|
||||||
|
|
||||||
|
fit_model(model, X_train, y_train)
|
||||||
|
|
||||||
|
train_predict = model.predict(X_train)
|
||||||
|
train_predict = train_predict.reshape(-1, 1)
|
||||||
|
test_predict = model.predict(X_test)
|
||||||
|
test_predict = test_predict.reshape(-1, 1)
|
||||||
|
|
||||||
|
train_predict = scaler.inverse_transform(train_predict)
|
||||||
|
test_predict = scaler.inverse_transform(test_predict)
|
||||||
|
original_ytrain = scaler.inverse_transform(y_train.reshape(-1, 1))
|
||||||
|
original_ytest = scaler.inverse_transform(y_test.reshape(-1, 1))
|
||||||
|
|
||||||
|
performance = Backtesting(original_ytrain, train_predict, original_ytest, test_predict).run()
|
||||||
|
|
||||||
|
train_dates = dates[:test_split_idx].copy()
|
||||||
|
test_dates = dates[test_split_idx:].copy()
|
||||||
|
|
||||||
|
train_dates = create_date_dataset(train_dates)
|
||||||
|
test_dates = create_date_dataset(test_dates)
|
||||||
|
|
||||||
|
train_res = pd.DataFrame()
|
||||||
|
train_res['Date'] = train_dates
|
||||||
|
train_res['train'] = pd.DataFrame(train_predict)
|
||||||
|
|
||||||
|
test_res = pd.DataFrame()
|
||||||
|
test_res['Date'] = test_dates
|
||||||
|
test_res['test'] = pd.DataFrame(test_predict)
|
||||||
|
|
||||||
|
# Predict nth_day
|
||||||
|
x_input = test_data[len(test_data) - self.time_step:].reshape(1, -1)
|
||||||
|
yhat = model.predict(x_input)
|
||||||
|
new_pred_df = pd.DataFrame(scaler.inverse_transform(yhat.reshape(-1, 1)).reshape(1, -1).tolist()[0])
|
||||||
|
|
||||||
|
pred_res = pd.DataFrame()
|
||||||
|
pred_res['yhat'] = new_pred_df
|
||||||
|
print(performance)
|
||||||
|
print(pred_res)
|
||||||
|
|
||||||
|
return performance, train_res, test_res, pred_res
|
||||||
|
|
||||||
|
|
||||||
|
ticker = 'AMD'
|
||||||
|
start_date = datetime(2000, 1, 1)
|
||||||
|
end_date = datetime(2024,2,1) #datetime.today()
|
||||||
|
df = yf.download(ticker, start=start_date, end=end_date, interval="1d")
|
||||||
|
df = df.reset_index()
|
||||||
|
model_name = 'LinearRegression'
|
||||||
|
test_size = 0.2
|
||||||
|
time_step = 1
|
||||||
|
nth_day = 20 # Change this value to the desired nth_day
|
||||||
|
metric, train_df, test_df, pred_df = regression_model(model_name, df, test_size=test_size, \
|
||||||
|
time_step=time_step, nth_day=nth_day).run()
|
||||||
257
app/ml_models/test.py
Normal file
257
app/ml_models/test.py
Normal file
@ -0,0 +1,257 @@
|
|||||||
|
import yfinance as yf
|
||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from sklearn.ensemble import RandomForestClassifier
|
||||||
|
import numpy as np
|
||||||
|
from xgboost import XGBClassifier
|
||||||
|
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
|
from sklearn.preprocessing import MinMaxScaler, StandardScaler
|
||||||
|
from tqdm import tqdm
|
||||||
|
from sklearn.feature_selection import SelectKBest, f_classif
|
||||||
|
from collections import defaultdict
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import pickle
|
||||||
|
import time
|
||||||
|
import sqlite3
|
||||||
|
import ujson
|
||||||
|
|
||||||
|
|
||||||
|
#Based on the paper: https://arxiv.org/pdf/1603.00751
|
||||||
|
|
||||||
|
|
||||||
|
async def download_data(ticker, con, start_date, end_date):
|
||||||
|
try:
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
income, income_growth, balance, balance_growth, cashflow, cashflow_growth, ratios
|
||||||
|
FROM
|
||||||
|
stocks
|
||||||
|
WHERE
|
||||||
|
symbol = ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
query_df = pd.read_sql_query(query_template, con, params=(ticker,))
|
||||||
|
|
||||||
|
income = ujson.loads(query_df['income'].iloc[0])
|
||||||
|
|
||||||
|
#Only consider company with at least 10 year worth of data
|
||||||
|
if len(income) < 40:
|
||||||
|
raise ValueError("Income data length is too small.")
|
||||||
|
|
||||||
|
income = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in income if int(item["date"][:4]) >= 2000]
|
||||||
|
income_growth = ujson.loads(query_df['income_growth'].iloc[0])
|
||||||
|
income_growth = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in income_growth if int(item["date"][:4]) >= 2000]
|
||||||
|
|
||||||
|
balance = ujson.loads(query_df['balance'].iloc[0])
|
||||||
|
balance = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in balance if int(item["date"][:4]) >= 2000]
|
||||||
|
balance_growth = ujson.loads(query_df['balance_growth'].iloc[0])
|
||||||
|
balance_growth = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in balance_growth if int(item["date"][:4]) >= 2000]
|
||||||
|
|
||||||
|
cashflow = ujson.loads(query_df['cashflow'].iloc[0])
|
||||||
|
cashflow = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in cashflow if int(item["date"][:4]) >= 2000]
|
||||||
|
cashflow_growth = ujson.loads(query_df['cashflow_growth'].iloc[0])
|
||||||
|
cashflow_growth = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in cashflow_growth if int(item["date"][:4]) >= 2000]
|
||||||
|
|
||||||
|
|
||||||
|
ratios = ujson.loads(query_df['ratios'].iloc[0])
|
||||||
|
ratios = [{k: v for k, v in item.items() if k not in ["symbol","reportedCurrency","calendarYear","fillingDate","acceptedDate","period","cik","link", "finalLink"]} for item in ratios if int(item["date"][:4]) >= 2000]
|
||||||
|
|
||||||
|
combined_data = defaultdict(dict)
|
||||||
|
# Iterate over all lists simultaneously
|
||||||
|
for entries in zip(income, income_growth, balance, balance_growth, cashflow, cashflow_growth, ratios):
|
||||||
|
# Iterate over each entry in the current set of entries
|
||||||
|
for entry in entries:
|
||||||
|
date = entry['date']
|
||||||
|
# Merge entry data into combined_data, skipping duplicate keys
|
||||||
|
for key, value in entry.items():
|
||||||
|
if key not in combined_data[date]:
|
||||||
|
combined_data[date][key] = value
|
||||||
|
|
||||||
|
combined_data = list(combined_data.values())
|
||||||
|
|
||||||
|
df = yf.download(ticker, start=start_date, end=end_date, interval="1d").reset_index()
|
||||||
|
df = df.rename(columns={'Adj Close': 'close', 'Date': 'date'})
|
||||||
|
#print(df[['date','close']])
|
||||||
|
df['date'] = df['date'].dt.strftime('%Y-%m-%d')
|
||||||
|
|
||||||
|
|
||||||
|
for item in combined_data:
|
||||||
|
# Find close price for '2023-09-30' or the closest available date prior to it
|
||||||
|
target_date = item['date']
|
||||||
|
counter = 0
|
||||||
|
max_attempts = 10
|
||||||
|
|
||||||
|
while target_date not in df['date'].values and counter < max_attempts:
|
||||||
|
# If the target date doesn't exist, move one day back
|
||||||
|
target_date = (pd.to_datetime(target_date) - pd.Timedelta(days=1)).strftime('%Y-%m-%d')
|
||||||
|
counter += 1
|
||||||
|
if counter == max_attempts:
|
||||||
|
break
|
||||||
|
|
||||||
|
|
||||||
|
# Get the close price for the found or closest date
|
||||||
|
close_price = round(df[df['date'] == target_date]['close'].values[0],2)
|
||||||
|
item['price'] = close_price
|
||||||
|
#print(f"Close price for {target_date}: {close_price}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
combined_data = sorted(combined_data, key=lambda x: x['date'])
|
||||||
|
|
||||||
|
|
||||||
|
df_income = pd.DataFrame(combined_data).dropna()
|
||||||
|
|
||||||
|
df_income['Target'] = ((df_income['price'].shift(-1) - df_income['price']) / df_income['price'] > 0).astype(int)
|
||||||
|
|
||||||
|
df_copy = df_income.copy()
|
||||||
|
|
||||||
|
return df_copy
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
|
class FundamentalPredictor:
|
||||||
|
def __init__(self, path='weights'):
|
||||||
|
self.model = XGBClassifier() #RandomForestClassifier(n_estimators=1000, max_depth = 20, min_samples_split=10, random_state=42, n_jobs=10)
|
||||||
|
self.scaler = StandardScaler()
|
||||||
|
self.path = path
|
||||||
|
|
||||||
|
def feature_selection(self, X_train, y_train,k=8):
|
||||||
|
'''
|
||||||
|
selector = SelectKBest(score_func=f_classif, k=8)
|
||||||
|
selector.fit(X_train, y_train)
|
||||||
|
|
||||||
|
selector.transform(X_train)
|
||||||
|
selected_features = [col for i, col in enumerate(X_train.columns) if selector.get_support()[i]]
|
||||||
|
|
||||||
|
return selected_features
|
||||||
|
'''
|
||||||
|
# Calculate the variance of each feature with respect to the target
|
||||||
|
variances = {}
|
||||||
|
for col in X_train.columns:
|
||||||
|
grouped_variance = X_train.groupby(y_train)[col].var().mean()
|
||||||
|
variances[col] = grouped_variance
|
||||||
|
|
||||||
|
# Sort features by variance and select top k features
|
||||||
|
sorted_features = sorted(variances, key=variances.get, reverse=True)[:k]
|
||||||
|
return sorted_features
|
||||||
|
|
||||||
|
|
||||||
|
def train_model(self, X_train, y_train):
|
||||||
|
X_train = X_train.applymap(lambda x: 1 if x == 0 else x) #Replace 0 with 1 as suggested in the paper
|
||||||
|
X_train = np.where(np.isinf(X_train), np.nan, X_train)
|
||||||
|
X_train = np.nan_to_num(X_train)
|
||||||
|
|
||||||
|
X_train = self.scaler.fit_transform(X_train)
|
||||||
|
self.model.fit(X_train, y_train)
|
||||||
|
pickle.dump(self.model, open(f'{self.path}/fundamental_weights/weights.pkl', 'wb'))
|
||||||
|
|
||||||
|
def evaluate_model(self, X_test, y_test):
|
||||||
|
X_test = X_test.applymap(lambda x: 1 if x == 0 else x) #Replace 0 with 1 as suggested in the paper
|
||||||
|
X_test = np.where(np.isinf(X_test), np.nan, X_test)
|
||||||
|
X_test = np.nan_to_num(X_test)
|
||||||
|
|
||||||
|
X_test = self.scaler.fit_transform(X_test)
|
||||||
|
|
||||||
|
with open(f'{self.path}/fundamental_weights/weights.pkl', 'rb') as f:
|
||||||
|
self.model = pickle.load(f)
|
||||||
|
|
||||||
|
#test_predictions = self.model.predict(X_test)
|
||||||
|
test_predictions = self.model.predict_proba(X_test)[:,1]
|
||||||
|
|
||||||
|
test_predictions[test_predictions >=.5] = 1
|
||||||
|
test_predictions[test_predictions <.5] = 0
|
||||||
|
|
||||||
|
#print(y_test)
|
||||||
|
|
||||||
|
test_precision = precision_score(y_test, test_predictions)
|
||||||
|
test_accuracy = accuracy_score(y_test, test_predictions)
|
||||||
|
#test_recall = recall_score(y_test, test_predictions)
|
||||||
|
#test_f1 = f1_score(y_test, test_predictions)
|
||||||
|
#test_roc_auc = roc_auc_score(y_test, test_predictions)
|
||||||
|
|
||||||
|
|
||||||
|
print("Test Set Metrics:")
|
||||||
|
print(f"Precision: {round(test_precision * 100)}%")
|
||||||
|
print(f"Accuracy: {round(test_accuracy * 100)}%")
|
||||||
|
#print(f"Recall: {round(test_recall * 100)}%")
|
||||||
|
#print(f"F1-Score: {round(test_f1 * 100)}%")
|
||||||
|
#print(f"ROC-AUC: {round(test_roc_auc * 100)}%")
|
||||||
|
#print("Number of value counts in the test set")
|
||||||
|
#print(pd.DataFrame(test_predictions).value_counts())
|
||||||
|
|
||||||
|
next_value_prediction = 1 if test_predictions[-1] >= 0.5 else 0
|
||||||
|
return {'accuracy': round(test_accuracy*100), 'precision': round(test_precision*100), 'sentiment': 'Bullish' if next_value_prediction == 1 else 'Bearish'}, test_predictions
|
||||||
|
|
||||||
|
|
||||||
|
#Train mode
|
||||||
|
async def train_process(tickers, con):
|
||||||
|
tickers = list(set(tickers))
|
||||||
|
|
||||||
|
df_train = pd.DataFrame()
|
||||||
|
df_test = pd.DataFrame()
|
||||||
|
test_size = 0.4
|
||||||
|
start_date = datetime(2000, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
predictor = FundamentalPredictor()
|
||||||
|
df_train = pd.DataFrame()
|
||||||
|
df_test = pd.DataFrame()
|
||||||
|
|
||||||
|
|
||||||
|
tasks = [download_data(ticker, con, start_date, end_date) for ticker in tickers]
|
||||||
|
dfs = await asyncio.gather(*tasks)
|
||||||
|
for df in dfs:
|
||||||
|
try:
|
||||||
|
split_size = int(len(df) * (1-test_size))
|
||||||
|
train_data = df.iloc[:split_size]
|
||||||
|
test_data = df.iloc[split_size:]
|
||||||
|
df_train = pd.concat([df_train, train_data], ignore_index=True)
|
||||||
|
df_test = pd.concat([df_test, test_data], ignore_index=True)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
best_features = [col for col in df_train.columns if col not in ['date','price','Target']]
|
||||||
|
|
||||||
|
df_train = df_train.sample(frac=1).reset_index(drop=True)
|
||||||
|
print('======Train Set Datapoints======')
|
||||||
|
print(len(df_train))
|
||||||
|
#selected_features = predictor.feature_selection(df_train[best_features], df_train['Target'],k=10)
|
||||||
|
#print(selected_features)
|
||||||
|
#selected_features = [col for col in df_train if col not in ['price','date','Target']]
|
||||||
|
selected_features = ['growthRevenue','ebitda','priceToBookRatio','eps','priceToSalesRatio','growthOtherCurrentLiabilities', 'receivablesTurnover', 'totalLiabilitiesAndStockholdersEquity', 'totalLiabilitiesAndTotalEquity', 'totalAssets', 'growthOtherCurrentAssets', 'retainedEarnings', 'totalEquity', 'totalStockholdersEquity', 'totalNonCurrentAssets']
|
||||||
|
|
||||||
|
predictor.train_model(df_train[selected_features], df_train['Target'])
|
||||||
|
predictor.evaluate_model(df_test[selected_features], df_test['Target'])
|
||||||
|
|
||||||
|
|
||||||
|
async def test_process(con):
|
||||||
|
test_size = 0.4
|
||||||
|
start_date = datetime(2000, 1, 1).strftime("%Y-%m-%d")
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
predictor = FundamentalPredictor()
|
||||||
|
df = await download_data('GME', con, start_date, end_date)
|
||||||
|
split_size = int(len(df) * (1-test_size))
|
||||||
|
test_data = df.iloc[split_size:]
|
||||||
|
selected_features = ['growthRevenue','ebitda','priceToBookRatio','eps','priceToSalesRatio','growthOtherCurrentLiabilities', 'receivablesTurnover', 'totalLiabilitiesAndStockholdersEquity', 'totalLiabilitiesAndTotalEquity', 'totalAssets', 'growthOtherCurrentAssets', 'retainedEarnings', 'totalEquity', 'totalStockholdersEquity', 'totalNonCurrentAssets']
|
||||||
|
#selected_features = [col for col in test_data if col not in ['price','date','Target']]
|
||||||
|
predictor.evaluate_model(test_data[selected_features], test_data['Target'])
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
con = sqlite3.connect('../stocks.db')
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks WHERE marketCap >= 500E9")
|
||||||
|
stock_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
print(len(stock_symbols))
|
||||||
|
#selected_features = ['operatingIncomeRatio','growthRevenue','revenue','netIncome','priceToSalesRatio']
|
||||||
|
await train_process(stock_symbols, con)
|
||||||
|
await test_process(con)
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
# Run the main function
|
||||||
|
asyncio.run(main())
|
||||||
518
app/ml_models/test2.py
Normal file
518
app/ml_models/test2.py
Normal file
@ -0,0 +1,518 @@
|
|||||||
|
data = [
|
||||||
|
{
|
||||||
|
"year": "2028",
|
||||||
|
"symbol": "AMD",
|
||||||
|
"revenue": 114629191658,
|
||||||
|
"revenuePercentage": 38.27,
|
||||||
|
"ebitda": 17326854891,
|
||||||
|
"ebitdaPercentage": 15.12,
|
||||||
|
"ebit": 9902916035,
|
||||||
|
"ebitPercentage": 8.64,
|
||||||
|
"depreciation": 7423938855,
|
||||||
|
"depreciationPercentage": 6.48,
|
||||||
|
"totalCash": 27053022289,
|
||||||
|
"totalCashPercentage": 23.6,
|
||||||
|
"receivables": 24437715130,
|
||||||
|
"receivablesPercentage": 21.32,
|
||||||
|
"inventories": 17418440963,
|
||||||
|
"inventoriesPercentage": 15.2,
|
||||||
|
"payable": 10805895396,
|
||||||
|
"payablePercentage": 9.43,
|
||||||
|
"capitalExpenditure": -2838432889,
|
||||||
|
"capitalExpenditurePercentage": -2.48,
|
||||||
|
"price": 170.78,
|
||||||
|
"beta": 1.651,
|
||||||
|
"dilutedSharesOutstanding": 1625000000,
|
||||||
|
"costofDebt": 4.37,
|
||||||
|
"taxRate": -73.58,
|
||||||
|
"afterTaxCostOfDebt": 4.37,
|
||||||
|
"riskFreeRate": 4.37,
|
||||||
|
"marketRiskPremium": 4.72,
|
||||||
|
"costOfEquity": 12.16,
|
||||||
|
"totalDebt": 3003000000,
|
||||||
|
"totalEquity": 277517500000,
|
||||||
|
"totalCapital": 280520500000,
|
||||||
|
"debtWeighting": 1.07,
|
||||||
|
"equityWeighting": 98.93,
|
||||||
|
"wacc": 12.08,
|
||||||
|
"taxRateCash": 4430363,
|
||||||
|
"ebiat": 9464180956,
|
||||||
|
"ufcf": 5455551112,
|
||||||
|
"sumPvUfcf": 11958854466,
|
||||||
|
"longTermGrowthRate": 4,
|
||||||
|
"terminalValue": 70244084511,
|
||||||
|
"presentTerminalValue": 39721249234,
|
||||||
|
"enterpriseValue": 51680103700,
|
||||||
|
"netDebt": -930000000,
|
||||||
|
"equityValue": 52610103700,
|
||||||
|
"equityValuePerShare": 32.38,
|
||||||
|
"freeCashFlowT1": 5673773157
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"year": "2027",
|
||||||
|
"symbol": "AMD",
|
||||||
|
"revenue": 82901959141,
|
||||||
|
"revenuePercentage": 38.27,
|
||||||
|
"ebitda": 12531103076,
|
||||||
|
"ebitdaPercentage": 15.12,
|
||||||
|
"ebit": 7161972693,
|
||||||
|
"ebitPercentage": 8.64,
|
||||||
|
"depreciation": 5369130382,
|
||||||
|
"depreciationPercentage": 6.48,
|
||||||
|
"totalCash": 19565247874,
|
||||||
|
"totalCashPercentage": 23.6,
|
||||||
|
"receivables": 17673809192,
|
||||||
|
"receivablesPercentage": 21.32,
|
||||||
|
"inventories": 12597339823,
|
||||||
|
"inventoriesPercentage": 15.2,
|
||||||
|
"payable": 7815024128,
|
||||||
|
"payablePercentage": 9.43,
|
||||||
|
"capitalExpenditure": -2052807352,
|
||||||
|
"capitalExpenditurePercentage": -2.48,
|
||||||
|
"price": 170.78,
|
||||||
|
"beta": 1.651,
|
||||||
|
"dilutedSharesOutstanding": 1625000000,
|
||||||
|
"costofDebt": 4.37,
|
||||||
|
"taxRate": -73.58,
|
||||||
|
"afterTaxCostOfDebt": 4.37,
|
||||||
|
"riskFreeRate": 4.37,
|
||||||
|
"marketRiskPremium": 4.72,
|
||||||
|
"costOfEquity": 12.16,
|
||||||
|
"totalDebt": 3003000000,
|
||||||
|
"totalEquity": 277517500000,
|
||||||
|
"totalCapital": 280520500000,
|
||||||
|
"debtWeighting": 1.07,
|
||||||
|
"equityWeighting": 98.93,
|
||||||
|
"wacc": 12.08,
|
||||||
|
"taxRateCash": 4430363,
|
||||||
|
"ebiat": 6844671340,
|
||||||
|
"ufcf": 3945555829,
|
||||||
|
"sumPvUfcf": 0,
|
||||||
|
"longTermGrowthRate": 4,
|
||||||
|
"terminalValue": 70244084511,
|
||||||
|
"presentTerminalValue": 39721249234,
|
||||||
|
"enterpriseValue": 51680103700,
|
||||||
|
"netDebt": -930000000,
|
||||||
|
"equityValue": 52610103700,
|
||||||
|
"equityValuePerShare": 32.38,
|
||||||
|
"freeCashFlowT1": 5673773157
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"year": "2026",
|
||||||
|
"symbol": "AMD",
|
||||||
|
"revenue": 59956235668,
|
||||||
|
"revenuePercentage": 38.27,
|
||||||
|
"ebitda": 9062726345,
|
||||||
|
"ebitdaPercentage": 15.12,
|
||||||
|
"ebit": 5179671591,
|
||||||
|
"ebitPercentage": 8.64,
|
||||||
|
"depreciation": 3883054753,
|
||||||
|
"depreciationPercentage": 6.48,
|
||||||
|
"totalCash": 14149950430,
|
||||||
|
"totalCashPercentage": 23.6,
|
||||||
|
"receivables": 12782026867,
|
||||||
|
"receivablesPercentage": 21.32,
|
||||||
|
"inventories": 9110629990,
|
||||||
|
"inventoriesPercentage": 15.2,
|
||||||
|
"payable": 5651970511,
|
||||||
|
"payablePercentage": 9.43,
|
||||||
|
"capitalExpenditure": -1484628381,
|
||||||
|
"capitalExpenditurePercentage": -2.48,
|
||||||
|
"price": 170.78,
|
||||||
|
"beta": 1.651,
|
||||||
|
"dilutedSharesOutstanding": 1625000000,
|
||||||
|
"costofDebt": 4.37,
|
||||||
|
"taxRate": -73.58,
|
||||||
|
"afterTaxCostOfDebt": 4.37,
|
||||||
|
"riskFreeRate": 4.37,
|
||||||
|
"marketRiskPremium": 4.72,
|
||||||
|
"costOfEquity": 12.16,
|
||||||
|
"totalDebt": 3003000000,
|
||||||
|
"totalEquity": 277517500000,
|
||||||
|
"totalCapital": 280520500000,
|
||||||
|
"debtWeighting": 1.07,
|
||||||
|
"equityWeighting": 98.93,
|
||||||
|
"wacc": 12.08,
|
||||||
|
"taxRateCash": 4430363,
|
||||||
|
"ebiat": 4950193363,
|
||||||
|
"ufcf": 2853499211,
|
||||||
|
"sumPvUfcf": 0,
|
||||||
|
"longTermGrowthRate": 4,
|
||||||
|
"terminalValue": 70244084511,
|
||||||
|
"presentTerminalValue": 39721249234,
|
||||||
|
"enterpriseValue": 51680103700,
|
||||||
|
"netDebt": -930000000,
|
||||||
|
"equityValue": 52610103700,
|
||||||
|
"equityValuePerShare": 32.38,
|
||||||
|
"freeCashFlowT1": 5673773157
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"year": "2025",
|
||||||
|
"symbol": "AMD",
|
||||||
|
"revenue": 43361462536,
|
||||||
|
"revenuePercentage": 38.27,
|
||||||
|
"ebitda": 6554331914,
|
||||||
|
"ebitdaPercentage": 15.12,
|
||||||
|
"ebit": 3746034639,
|
||||||
|
"ebitPercentage": 8.64,
|
||||||
|
"depreciation": 2808297274,
|
||||||
|
"depreciationPercentage": 6.48,
|
||||||
|
"totalCash": 10233506801,
|
||||||
|
"totalCashPercentage": 23.6,
|
||||||
|
"receivables": 9244199088,
|
||||||
|
"receivablesPercentage": 21.32,
|
||||||
|
"inventories": 6588976719,
|
||||||
|
"inventoriesPercentage": 15.2,
|
||||||
|
"payable": 4087609985,
|
||||||
|
"payablePercentage": 9.43,
|
||||||
|
"capitalExpenditure": -1073710802,
|
||||||
|
"capitalExpenditurePercentage": -2.48,
|
||||||
|
"price": 170.78,
|
||||||
|
"beta": 1.651,
|
||||||
|
"dilutedSharesOutstanding": 1625000000,
|
||||||
|
"costofDebt": 4.37,
|
||||||
|
"taxRate": -73.58,
|
||||||
|
"afterTaxCostOfDebt": 4.37,
|
||||||
|
"riskFreeRate": 4.37,
|
||||||
|
"marketRiskPremium": 4.72,
|
||||||
|
"costOfEquity": 12.16,
|
||||||
|
"totalDebt": 3003000000,
|
||||||
|
"totalEquity": 277517500000,
|
||||||
|
"totalCapital": 280520500000,
|
||||||
|
"debtWeighting": 1.07,
|
||||||
|
"equityWeighting": 98.93,
|
||||||
|
"wacc": 12.08,
|
||||||
|
"taxRateCash": 4430363,
|
||||||
|
"ebiat": 3580071725,
|
||||||
|
"ufcf": 2063703597,
|
||||||
|
"sumPvUfcf": 0,
|
||||||
|
"longTermGrowthRate": 4,
|
||||||
|
"terminalValue": 70244084511,
|
||||||
|
"presentTerminalValue": 39721249234,
|
||||||
|
"enterpriseValue": 51680103700,
|
||||||
|
"netDebt": -930000000,
|
||||||
|
"equityValue": 52610103700,
|
||||||
|
"equityValuePerShare": 32.38,
|
||||||
|
"freeCashFlowT1": 5673773157
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"year": "2024",
|
||||||
|
"symbol": "AMD",
|
||||||
|
"revenue": 31359814577,
|
||||||
|
"revenuePercentage": 38.27,
|
||||||
|
"ebitda": 4740214501,
|
||||||
|
"ebitdaPercentage": 15.12,
|
||||||
|
"ebit": 2709201784,
|
||||||
|
"ebitPercentage": 8.64,
|
||||||
|
"depreciation": 2031012716,
|
||||||
|
"depreciationPercentage": 6.48,
|
||||||
|
"totalCash": 7401062072,
|
||||||
|
"totalCashPercentage": 23.6,
|
||||||
|
"receivables": 6685576370,
|
||||||
|
"receivablesPercentage": 21.32,
|
||||||
|
"inventories": 4765270267,
|
||||||
|
"inventoriesPercentage": 15.2,
|
||||||
|
"payable": 2956235415,
|
||||||
|
"payablePercentage": 9.43,
|
||||||
|
"capitalExpenditure": -776527582,
|
||||||
|
"capitalExpenditurePercentage": -2.48,
|
||||||
|
"price": 170.78,
|
||||||
|
"beta": 1.651,
|
||||||
|
"dilutedSharesOutstanding": 1625000000,
|
||||||
|
"costofDebt": 4.37,
|
||||||
|
"taxRate": -73.58,
|
||||||
|
"afterTaxCostOfDebt": 4.37,
|
||||||
|
"riskFreeRate": 4.37,
|
||||||
|
"marketRiskPremium": 4.72,
|
||||||
|
"costOfEquity": 12.16,
|
||||||
|
"totalDebt": 3003000000,
|
||||||
|
"totalEquity": 277517500000,
|
||||||
|
"totalCapital": 280520500000,
|
||||||
|
"debtWeighting": 1.07,
|
||||||
|
"equityWeighting": 98.93,
|
||||||
|
"wacc": 12.08,
|
||||||
|
"taxRateCash": 4430363,
|
||||||
|
"ebiat": 2589174324,
|
||||||
|
"ufcf": 3030048236,
|
||||||
|
"sumPvUfcf": 0,
|
||||||
|
"longTermGrowthRate": 4,
|
||||||
|
"terminalValue": 70244084511,
|
||||||
|
"presentTerminalValue": 39721249234,
|
||||||
|
"enterpriseValue": 51680103700,
|
||||||
|
"netDebt": -930000000,
|
||||||
|
"equityValue": 52610103700,
|
||||||
|
"equityValuePerShare": 32.38,
|
||||||
|
"freeCashFlowT1": 5673773157
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"year": "2023",
|
||||||
|
"symbol": "AMD",
|
||||||
|
"revenue": 22680000000,
|
||||||
|
"revenuePercentage": -3.9,
|
||||||
|
"ebitda": 4149000000,
|
||||||
|
"ebitdaPercentage": 18.29,
|
||||||
|
"ebit": 598000000,
|
||||||
|
"ebitPercentage": 2.64,
|
||||||
|
"depreciation": 3551000000,
|
||||||
|
"depreciationPercentage": 15.66,
|
||||||
|
"totalCash": 5773000000,
|
||||||
|
"totalCashPercentage": 25.45,
|
||||||
|
"receivables": 5385000000,
|
||||||
|
"receivablesPercentage": 23.74,
|
||||||
|
"inventories": 4351000000,
|
||||||
|
"inventoriesPercentage": 19.18,
|
||||||
|
"payable": 2055000000,
|
||||||
|
"payablePercentage": 9.06,
|
||||||
|
"capitalExpenditure": -546000000,
|
||||||
|
"capitalExpenditurePercentage": -2.41,
|
||||||
|
"price": 170.78,
|
||||||
|
"beta": 1.651,
|
||||||
|
"dilutedSharesOutstanding": 1625000000,
|
||||||
|
"costofDebt": 4.37,
|
||||||
|
"taxRate": -73.58,
|
||||||
|
"afterTaxCostOfDebt": 4.37,
|
||||||
|
"riskFreeRate": 4.37,
|
||||||
|
"marketRiskPremium": 4.72,
|
||||||
|
"costOfEquity": 12.16,
|
||||||
|
"totalDebt": 3003000000,
|
||||||
|
"totalEquity": 277517500000,
|
||||||
|
"totalCapital": 280520500000,
|
||||||
|
"debtWeighting": 1.07,
|
||||||
|
"equityWeighting": 98.93,
|
||||||
|
"wacc": 12.08,
|
||||||
|
"taxRateCash": -73577235,
|
||||||
|
"ebiat": 1037991869,
|
||||||
|
"ufcf": 1767991869,
|
||||||
|
"sumPvUfcf": 0,
|
||||||
|
"longTermGrowthRate": 4,
|
||||||
|
"terminalValue": 70244084511,
|
||||||
|
"presentTerminalValue": 39717576725,
|
||||||
|
"enterpriseValue": 51676431191,
|
||||||
|
"netDebt": -930000000,
|
||||||
|
"equityValue": 52606431191,
|
||||||
|
"equityValuePerShare": 32.37,
|
||||||
|
"freeCashFlowT1": 5673773157
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"year": "2022",
|
||||||
|
"symbol": "AMD",
|
||||||
|
"revenue": 23601000000,
|
||||||
|
"revenuePercentage": 43.61,
|
||||||
|
"ebitda": 3372000000,
|
||||||
|
"ebitdaPercentage": 14.29,
|
||||||
|
"ebit": 1264000000,
|
||||||
|
"ebitPercentage": 5.36,
|
||||||
|
"depreciation": 2108000000,
|
||||||
|
"depreciationPercentage": 8.93,
|
||||||
|
"totalCash": 5855000000,
|
||||||
|
"totalCashPercentage": 24.81,
|
||||||
|
"receivables": 4128000000,
|
||||||
|
"receivablesPercentage": 17.49,
|
||||||
|
"inventories": 3771000000,
|
||||||
|
"inventoriesPercentage": 15.98,
|
||||||
|
"payable": 2493000000,
|
||||||
|
"payablePercentage": 10.56,
|
||||||
|
"capitalExpenditure": -450000000,
|
||||||
|
"capitalExpenditurePercentage": -1.91,
|
||||||
|
"price": 170.78,
|
||||||
|
"beta": 1.651,
|
||||||
|
"dilutedSharesOutstanding": 1625000000,
|
||||||
|
"costofDebt": 4.37,
|
||||||
|
"taxRate": -73.58,
|
||||||
|
"afterTaxCostOfDebt": 4.37,
|
||||||
|
"riskFreeRate": 4.37,
|
||||||
|
"marketRiskPremium": 4.72,
|
||||||
|
"costOfEquity": 12.16,
|
||||||
|
"totalDebt": 3003000000,
|
||||||
|
"totalEquity": 277517500000,
|
||||||
|
"totalCapital": 280520500000,
|
||||||
|
"debtWeighting": 1.07,
|
||||||
|
"equityWeighting": 98.93,
|
||||||
|
"wacc": 12.08,
|
||||||
|
"taxRateCash": -11486486,
|
||||||
|
"ebiat": 1409189189,
|
||||||
|
"ufcf": 1003189189,
|
||||||
|
"sumPvUfcf": 0,
|
||||||
|
"longTermGrowthRate": 4,
|
||||||
|
"terminalValue": 70244084511,
|
||||||
|
"presentTerminalValue": 39717576725,
|
||||||
|
"enterpriseValue": 51676431191,
|
||||||
|
"netDebt": -930000000,
|
||||||
|
"equityValue": 52606431191,
|
||||||
|
"equityValuePerShare": 32.37,
|
||||||
|
"freeCashFlowT1": 5673773157
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"year": "2021",
|
||||||
|
"symbol": "AMD",
|
||||||
|
"revenue": 16434000000,
|
||||||
|
"revenuePercentage": 68.33,
|
||||||
|
"ebitda": 3703000000,
|
||||||
|
"ebitdaPercentage": 22.53,
|
||||||
|
"ebit": 3648000000,
|
||||||
|
"ebitPercentage": 22.2,
|
||||||
|
"depreciation": 55000000,
|
||||||
|
"depreciationPercentage": 0.33467,
|
||||||
|
"totalCash": 3608000000,
|
||||||
|
"totalCashPercentage": 21.95,
|
||||||
|
"receivables": 2708000000,
|
||||||
|
"receivablesPercentage": 16.48,
|
||||||
|
"inventories": 1955000000,
|
||||||
|
"inventoriesPercentage": 11.9,
|
||||||
|
"payable": 1321000000,
|
||||||
|
"payablePercentage": 8.04,
|
||||||
|
"capitalExpenditure": -301000000,
|
||||||
|
"capitalExpenditurePercentage": -1.83,
|
||||||
|
"price": 170.78,
|
||||||
|
"beta": 1.651,
|
||||||
|
"dilutedSharesOutstanding": 1625000000,
|
||||||
|
"costofDebt": 4.37,
|
||||||
|
"taxRate": -73.58,
|
||||||
|
"afterTaxCostOfDebt": 4.37,
|
||||||
|
"riskFreeRate": 4.37,
|
||||||
|
"marketRiskPremium": 4.72,
|
||||||
|
"costOfEquity": 12.16,
|
||||||
|
"totalDebt": 3003000000,
|
||||||
|
"totalEquity": 277517500000,
|
||||||
|
"totalCapital": 280520500000,
|
||||||
|
"debtWeighting": 1.07,
|
||||||
|
"equityWeighting": 98.93,
|
||||||
|
"wacc": 12.07,
|
||||||
|
"taxRateCash": 13818480,
|
||||||
|
"ebiat": 3143901880,
|
||||||
|
"ufcf": 2562901880,
|
||||||
|
"sumPvUfcf": 0,
|
||||||
|
"longTermGrowthRate": 4,
|
||||||
|
"terminalValue": 70244084511,
|
||||||
|
"presentTerminalValue": 39729032775,
|
||||||
|
"enterpriseValue": 51687887242,
|
||||||
|
"netDebt": -930000000,
|
||||||
|
"equityValue": 52617887242,
|
||||||
|
"equityValuePerShare": 32.38,
|
||||||
|
"freeCashFlowT1": 5673773157
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"year": "2020",
|
||||||
|
"symbol": "AMD",
|
||||||
|
"revenue": 9763000000,
|
||||||
|
"revenuePercentage": 45.05,
|
||||||
|
"ebitda": 1322000000,
|
||||||
|
"ebitdaPercentage": 13.54,
|
||||||
|
"ebit": 968000000,
|
||||||
|
"ebitPercentage": 9.91,
|
||||||
|
"depreciation": 354000000,
|
||||||
|
"depreciationPercentage": 3.63,
|
||||||
|
"totalCash": 2290000000,
|
||||||
|
"totalCashPercentage": 23.46,
|
||||||
|
"receivables": 2076000000,
|
||||||
|
"receivablesPercentage": 21.26,
|
||||||
|
"inventories": 1399000000,
|
||||||
|
"inventoriesPercentage": 14.33,
|
||||||
|
"payable": 468000000,
|
||||||
|
"payablePercentage": 4.79,
|
||||||
|
"capitalExpenditure": -294000000,
|
||||||
|
"capitalExpenditurePercentage": -3.01,
|
||||||
|
"price": 170.78,
|
||||||
|
"beta": 1.651,
|
||||||
|
"dilutedSharesOutstanding": 1625000000,
|
||||||
|
"costofDebt": 4.37,
|
||||||
|
"taxRate": -73.58,
|
||||||
|
"afterTaxCostOfDebt": 4.37,
|
||||||
|
"riskFreeRate": 4.37,
|
||||||
|
"marketRiskPremium": 4.72,
|
||||||
|
"costOfEquity": 12.16,
|
||||||
|
"totalDebt": 3003000000,
|
||||||
|
"totalEquity": 277517500000,
|
||||||
|
"totalCapital": 280520500000,
|
||||||
|
"debtWeighting": 1.07,
|
||||||
|
"equityWeighting": 98.93,
|
||||||
|
"wacc": 12.08,
|
||||||
|
"taxRateCash": -95294117,
|
||||||
|
"ebiat": 1890447058,
|
||||||
|
"ufcf": 796447058,
|
||||||
|
"sumPvUfcf": 0,
|
||||||
|
"longTermGrowthRate": 4,
|
||||||
|
"terminalValue": 70244084511,
|
||||||
|
"presentTerminalValue": 39717576725,
|
||||||
|
"enterpriseValue": 51676431191,
|
||||||
|
"netDebt": -930000000,
|
||||||
|
"equityValue": 52606431191,
|
||||||
|
"equityValuePerShare": 32.37,
|
||||||
|
"freeCashFlowT1": 5673773157
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"year": "2019",
|
||||||
|
"symbol": "AMD",
|
||||||
|
"revenue": 6731000000,
|
||||||
|
"revenuePercentage": 0,
|
||||||
|
"ebitda": 466000000,
|
||||||
|
"ebitdaPercentage": 6.92,
|
||||||
|
"ebit": 208000000,
|
||||||
|
"ebitPercentage": 3.09,
|
||||||
|
"depreciation": 258000000,
|
||||||
|
"depreciationPercentage": 3.83,
|
||||||
|
"totalCash": 1503000000,
|
||||||
|
"totalCashPercentage": 22.33,
|
||||||
|
"receivables": 1859000000,
|
||||||
|
"receivablesPercentage": 27.62,
|
||||||
|
"inventories": 982000000,
|
||||||
|
"inventoriesPercentage": 14.59,
|
||||||
|
"payable": 988000000,
|
||||||
|
"payablePercentage": 14.68,
|
||||||
|
"capitalExpenditure": -217000000,
|
||||||
|
"capitalExpenditurePercentage": -3.22,
|
||||||
|
"price": 170.78,
|
||||||
|
"beta": 1.651,
|
||||||
|
"dilutedSharesOutstanding": 1625000000,
|
||||||
|
"costofDebt": 4.37,
|
||||||
|
"taxRate": -73.58,
|
||||||
|
"afterTaxCostOfDebt": 4.37,
|
||||||
|
"riskFreeRate": 4.37,
|
||||||
|
"marketRiskPremium": 4.72,
|
||||||
|
"costOfEquity": 12.16,
|
||||||
|
"totalDebt": 3003000000,
|
||||||
|
"totalEquity": 277517500000,
|
||||||
|
"totalCapital": 280520500000,
|
||||||
|
"debtWeighting": 1.07,
|
||||||
|
"equityWeighting": 98.93,
|
||||||
|
"wacc": 12.08,
|
||||||
|
"taxRateCash": 8333334,
|
||||||
|
"ebiat": 190666666,
|
||||||
|
"ufcf": -1621333333,
|
||||||
|
"sumPvUfcf": 0,
|
||||||
|
"longTermGrowthRate": 4,
|
||||||
|
"terminalValue": 70244084511,
|
||||||
|
"presentTerminalValue": 39724484904,
|
||||||
|
"enterpriseValue": 51683339370,
|
||||||
|
"netDebt": -930000000,
|
||||||
|
"equityValue": 52613339370,
|
||||||
|
"equityValuePerShare": 32.38,
|
||||||
|
"freeCashFlowT1": 5673773157
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
# Function to calculate present value
|
||||||
|
def present_value(cash_flow, discount_rate, time_period):
|
||||||
|
return cash_flow / ((1 + discount_rate) ** time_period)
|
||||||
|
|
||||||
|
# Initialize variables
|
||||||
|
total_present_value = 0
|
||||||
|
wacc_sum = 0
|
||||||
|
|
||||||
|
# Iterate over each year's data
|
||||||
|
for i, year_data in enumerate(data):
|
||||||
|
ufcf = year_data['ufcf']
|
||||||
|
wacc = year_data['wacc']
|
||||||
|
time_period = i + 1
|
||||||
|
|
||||||
|
# Discounting free cash flow to its present value
|
||||||
|
present_value_ufcf = present_value(ufcf, wacc, time_period)
|
||||||
|
|
||||||
|
total_present_value += present_value_ufcf
|
||||||
|
wacc_sum += wacc
|
||||||
|
|
||||||
|
# Calculate DCF
|
||||||
|
average_wacc = wacc_sum / len(data)
|
||||||
|
dcf = total_present_value + (data[-1]['presentTerminalValue'] / ((1 + average_wacc) ** len(data)))
|
||||||
|
|
||||||
|
print("Discounted Cash Flow (DCF) for AMD:", dcf)
|
||||||
292
app/primary_cron_job.py
Normal file
292
app/primary_cron_job.py
Normal file
@ -0,0 +1,292 @@
|
|||||||
|
import pytz
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from urllib.request import urlopen
|
||||||
|
import certifi
|
||||||
|
import json
|
||||||
|
import ujson
|
||||||
|
import schedule
|
||||||
|
import time
|
||||||
|
import subprocess
|
||||||
|
from pocketbase import PocketBase # Client also works the same
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import pytz
|
||||||
|
import sqlite3
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import threading # Import threading module for parallel execution
|
||||||
|
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
useast_ip_address = os.getenv('USEAST_IP_ADDRESS')
|
||||||
|
pb_admin_email = os.getenv('POCKETBASE_ADMIN_EMAIL')
|
||||||
|
pb_admin_password = os.getenv('POCKETBASE_PASSWORD')
|
||||||
|
|
||||||
|
pb = PocketBase('http://127.0.0.1:8090')
|
||||||
|
admin_data = pb.admins.auth_with_password(pb_admin_email, pb_admin_password)
|
||||||
|
|
||||||
|
# Set the system's timezone to Berlin at the beginning
|
||||||
|
subprocess.run(["timedatectl", "set-timezone", "Europe/Berlin"])
|
||||||
|
|
||||||
|
async def get_quote_of_stocks(ticker_list):
|
||||||
|
ticker_str = ','.join(ticker_list)
|
||||||
|
async with aiohttp.ClientSession() as session:
|
||||||
|
url = f"https://financialmodelingprep.com/api/v3/quote/{ticker_str}?apikey={api_key}"
|
||||||
|
async with session.get(url) as response:
|
||||||
|
df = await response.json()
|
||||||
|
return df
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def run_json_job():
|
||||||
|
# Run the asynchronous function inside an asyncio loop
|
||||||
|
subprocess.run(["python3", "restart_json.py"])
|
||||||
|
subprocess.run(["pm2", "restart","fastapi"])
|
||||||
|
|
||||||
|
def run_cron_insider_trading():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 5:
|
||||||
|
subprocess.run(["python3", "cron_insider_trading.py"])
|
||||||
|
command = [
|
||||||
|
"sudo", "rsync", "-avz", "-e", "ssh",
|
||||||
|
"/root/backend_stocknear/app/json/insider-trading",
|
||||||
|
f"root@{useast_ip_address}:/root/backend_stocknear/app/json"
|
||||||
|
]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
def run_congress_trading():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 5:
|
||||||
|
subprocess.run(["python3", "cron_congress_trading.py"])
|
||||||
|
command = [
|
||||||
|
"sudo", "rsync", "-avz", "-e", "ssh",
|
||||||
|
"/root/backend_stocknear/app/json/congress-trading",
|
||||||
|
f"root@{useast_ip_address}:/root/backend_stocknear/app/json"
|
||||||
|
]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
def run_cron_var():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 5:
|
||||||
|
subprocess.run(["python3", "cron_var.py"])
|
||||||
|
command = [
|
||||||
|
"sudo", "rsync", "-avz", "-e", "ssh",
|
||||||
|
"/root/backend_stocknear/app/json/var",
|
||||||
|
f"root@{useast_ip_address}:/root/backend_stocknear/app/json"
|
||||||
|
]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
def run_cron_market_movers():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 4:
|
||||||
|
subprocess.run(["python3", "cron_market_movers.py"])
|
||||||
|
command = [
|
||||||
|
"sudo", "rsync", "-avz", "-e", "ssh",
|
||||||
|
"/root/backend_stocknear/app/json/market-movers",
|
||||||
|
f"root@{useast_ip_address}:/root/backend_stocknear/app/json"
|
||||||
|
]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
|
||||||
|
def run_cron_market_news():
|
||||||
|
subprocess.run(["python3", "cron_market_news.py"])
|
||||||
|
command = [
|
||||||
|
"sudo", "rsync", "-avz", "-e", "ssh",
|
||||||
|
"/root/backend_stocknear/app/json/market-news",
|
||||||
|
f"root@{useast_ip_address}:/root/backend_stocknear/app/json"
|
||||||
|
]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
def run_cron_heatmap():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 4:
|
||||||
|
subprocess.run(["python3", "cron_heatmap.py"])
|
||||||
|
command = [
|
||||||
|
"sudo", "rsync", "-avz", "-e", "ssh",
|
||||||
|
"/root/backend_stocknear/app/json/heatmaps",
|
||||||
|
f"root@{useast_ip_address}:/root/backend_stocknear/app/json"
|
||||||
|
]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
def run_cron_quote():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 6:
|
||||||
|
subprocess.run(["python3", "cron_quote.py"])
|
||||||
|
command = ["sudo", "rsync", "-avz", "-e", "ssh", "/root/backend_stocknear/app/json/quote", f"root@{useast_ip_address}:/root/backend_stocknear/app/json"]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
def run_cron_price_alert():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 4:
|
||||||
|
subprocess.run(["python3", "cron_price_alert.py"])
|
||||||
|
|
||||||
|
def run_cron_portfolio():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 4:
|
||||||
|
subprocess.run(["python3", "cron_portfolio.py"])
|
||||||
|
|
||||||
|
def run_cron_options_flow():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 4:
|
||||||
|
subprocess.run(["python3", "cron_options_flow.py"])
|
||||||
|
|
||||||
|
command = [
|
||||||
|
"sudo", "rsync", "-avz", "-e", "ssh",
|
||||||
|
"/root/backend_stocknear/app/json/options-flow/feed/",
|
||||||
|
f"root@{useast_ip_address}:/root/backend_stocknear/app/json/options-flow/feed/"
|
||||||
|
]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
|
||||||
|
def run_cron_options_zero_dte():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 4:
|
||||||
|
subprocess.run(["python3", "cron_options_zero_dte.py"])
|
||||||
|
command = [
|
||||||
|
"sudo", "rsync", "-avz", "-e", "ssh",
|
||||||
|
"/root/backend_stocknear/app/json/options-flow/zero-dte/",
|
||||||
|
f"root@{useast_ip_address}:/root/backend_stocknear/app/json/options-flow/zero-dte/"
|
||||||
|
]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
|
||||||
|
def run_ta_rating():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 5:
|
||||||
|
subprocess.run(["python3", "cron_ta_rating.py"])
|
||||||
|
command = [
|
||||||
|
"sudo", "rsync", "-avz", "-e", "ssh",
|
||||||
|
"/root/backend_stocknear/app/json/ta-rating",
|
||||||
|
f"root@{useast_ip_address}:/root/backend_stocknear/app/json"
|
||||||
|
]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
def run_stockdeck():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 5:
|
||||||
|
subprocess.run(["python3", "cron_stockdeck.py"])
|
||||||
|
command = [
|
||||||
|
"sudo", "rsync", "-avz", "-e", "ssh",
|
||||||
|
"/root/backend_stocknear/app/json/stockdeck",
|
||||||
|
f"root@{useast_ip_address}:/root/backend_stocknear/app/json"
|
||||||
|
]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
def run_similar_stocks():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 5:
|
||||||
|
subprocess.run(["python3", "cron_similar_stocks.py"])
|
||||||
|
command = [
|
||||||
|
"sudo", "rsync", "-avz", "-e", "ssh",
|
||||||
|
"/root/backend_stocknear/app/json/similar-stocks",
|
||||||
|
f"root@{useast_ip_address}:/root/backend_stocknear/app/json"
|
||||||
|
]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
def run_historical_price():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 5:
|
||||||
|
subprocess.run(["python3", "cron_historical_price.py"])
|
||||||
|
command = [
|
||||||
|
"sudo", "rsync", "-avz", "-e", "ssh",
|
||||||
|
"/root/backend_stocknear/app/json/historical-price",
|
||||||
|
f"root@{useast_ip_address}:/root/backend_stocknear/json"
|
||||||
|
]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
def run_one_day_price():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 6:
|
||||||
|
subprocess.run(["python3", "cron_one_day_price.py"])
|
||||||
|
command = ["sudo", "rsync", "-avz", "-e", "ssh", "/root/backend_stocknear/app/json/one-day-price/", f"root@{useast_ip_address}:/root/backend_stocknear/app/json/one-day-price/"]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
def run_options_bubble_ticker():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 4:
|
||||||
|
subprocess.run(["python3", "cron_options_bubble.py"])
|
||||||
|
|
||||||
|
command = ["sudo", "rsync", "-avz", "-e", "ssh", "/root/backend_stocknear/app/json/options-bubble/", f"root@{useast_ip_address}:/root/backend_stocknear/app/json/options-bubble/"]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
command = ["sudo", "rsync", "-avz", "-e", "ssh", "/root/backend_stocknear/app/json/options-flow/company/", f"root@{useast_ip_address}:/root/backend_stocknear/app/json/options-flow/company/"]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
def run_analyst_rating():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 5:
|
||||||
|
subprocess.run(["python3", "cron_analyst_db.py"])
|
||||||
|
subprocess.run(["python3", "cron_analyst_ticker.py"])
|
||||||
|
command = ["sudo", "rsync", "-avz", "-e", "ssh", "/root/backend_stocknear/app/json/analyst", f"root@{useast_ip_address}:/root/backend_stocknear/app/json"]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
def run_market_moods():
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 5:
|
||||||
|
subprocess.run(["python3", "cron_bull_bear_say.py"])
|
||||||
|
subprocess.run(["python3", "cron_wiim.py"])
|
||||||
|
command = ["sudo", "rsync", "-avz", "-e", "ssh", "/root/backend_stocknear/app/json/bull_bear_say", f"root@{useast_ip_address}:/root/backend_stocknear/app/json"]
|
||||||
|
subprocess.run(command)
|
||||||
|
command = ["sudo", "rsync", "-avz", "-e", "ssh", "/root/backend_stocknear/app/json/wiim", f"root@{useast_ip_address}:/root/backend_stocknear/app/json"]
|
||||||
|
subprocess.run(command)
|
||||||
|
|
||||||
|
|
||||||
|
def run_db_schedule_job():
|
||||||
|
#update db daily
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 5:
|
||||||
|
subprocess.run(["bash", "run_universe.sh"])
|
||||||
|
|
||||||
|
def run_restart_cache():
|
||||||
|
#update db daily
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 5:
|
||||||
|
subprocess.run(["pm2", "restart","fastapi"])
|
||||||
|
subprocess.run(["pm2", "restart","fastify"])
|
||||||
|
#subprocess.run(["python3", "cache_endpoints.py"])
|
||||||
|
|
||||||
|
# Create functions to run each schedule in a separate thread
|
||||||
|
def run_threaded(job_func):
|
||||||
|
job_thread = threading.Thread(target=job_func)
|
||||||
|
job_thread.start()
|
||||||
|
|
||||||
|
# Schedule the job to run
|
||||||
|
|
||||||
|
schedule.every().day.at("01:00").do(run_threaded, run_options_bubble_ticker).tag('options_ticker_job')
|
||||||
|
schedule.every().day.at("02:00").do(run_threaded, run_db_schedule_job)
|
||||||
|
schedule.every().day.at("06:00").do(run_threaded, run_historical_price).tag('historical_job')
|
||||||
|
schedule.every().day.at("07:00").do(run_threaded, run_ta_rating).tag('ta_rating_job')
|
||||||
|
schedule.every().day.at("08:00").do(run_threaded, run_cron_insider_trading).tag('insider_trading_job')
|
||||||
|
schedule.every().day.at("09:00").do(run_threaded, run_congress_trading).tag('congress_job')
|
||||||
|
|
||||||
|
schedule.every().day.at("13:30").do(run_threaded, run_stockdeck).tag('stockdeck_job')
|
||||||
|
schedule.every().day.at("13:45").do(run_threaded, run_similar_stocks).tag('similar_stocks_job')
|
||||||
|
schedule.every().day.at("14:00").do(run_threaded, run_cron_var).tag('var_job')
|
||||||
|
|
||||||
|
|
||||||
|
schedule.every().day.at("15:45").do(run_threaded, run_restart_cache)
|
||||||
|
schedule.every(1).minutes.do(run_threaded, run_cron_portfolio).tag('portfolio_job')
|
||||||
|
schedule.every(5).minutes.do(run_threaded, run_cron_market_movers).tag('market_movers_job')
|
||||||
|
|
||||||
|
schedule.every(15).minutes.do(run_threaded, run_cron_market_news).tag('market_news_job')
|
||||||
|
schedule.every(10).minutes.do(run_threaded, run_one_day_price).tag('one_day_price_job')
|
||||||
|
schedule.every(5).minutes.do(run_threaded, run_cron_heatmap).tag('heatmap_job')
|
||||||
|
|
||||||
|
schedule.every(1).minutes.do(run_threaded, run_cron_quote).tag('quote_job')
|
||||||
|
schedule.every(1).minutes.do(run_threaded, run_cron_price_alert).tag('price_alert_job')
|
||||||
|
schedule.every(15).minutes.do(run_threaded, run_market_moods).tag('market_moods_job')
|
||||||
|
schedule.every(3).hours.do(run_threaded, run_json_job).tag('json_job')
|
||||||
|
schedule.every(6).hours.do(run_threaded, run_analyst_rating).tag('analyst_job')
|
||||||
|
|
||||||
|
schedule.every(10).seconds.do(run_threaded, run_cron_options_flow).tag('options_flow_job')
|
||||||
|
schedule.every(10).seconds.do(run_threaded, run_cron_options_zero_dte).tag('options_zero_dte_job')
|
||||||
|
|
||||||
|
|
||||||
|
# Run the scheduled jobs indefinitelyp
|
||||||
|
while True:
|
||||||
|
schedule.run_pending()
|
||||||
|
time.sleep(3)
|
||||||
251
app/rating.py
Normal file
251
app/rating.py
Normal file
@ -0,0 +1,251 @@
|
|||||||
|
import pandas as pd
|
||||||
|
from datetime import datetime
|
||||||
|
from ta.utils import *
|
||||||
|
from ta.volatility import *
|
||||||
|
from ta.momentum import *
|
||||||
|
from ta.trend import *
|
||||||
|
from ta.volume import *
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class rating_model:
|
||||||
|
def __init__(self, df):
|
||||||
|
#Results are in the form of
|
||||||
|
# Strong Sell => 0
|
||||||
|
# Sell => 1
|
||||||
|
# Neutral => 2
|
||||||
|
# Buy => 3
|
||||||
|
# Strong Buy => 4
|
||||||
|
|
||||||
|
self.data = df
|
||||||
|
|
||||||
|
def compute_overall_signal(self, data):
|
||||||
|
ratingMap = {
|
||||||
|
'Strong Sell': 0,
|
||||||
|
'Sell': 1,
|
||||||
|
'Neutral': 2,
|
||||||
|
'Buy': 3,
|
||||||
|
'Strong Buy': 4
|
||||||
|
}
|
||||||
|
|
||||||
|
# Extract overall ratings from the data
|
||||||
|
overallRating = {item['name']: item['signal'] for item in data}
|
||||||
|
|
||||||
|
# Compute mean overall rating
|
||||||
|
mean_overall_rating = sum(ratingMap[val] for val in overallRating.values()) / len(overallRating)
|
||||||
|
mean_overall_rating /= 4.0
|
||||||
|
|
||||||
|
# Determine overall signal based on mean rating
|
||||||
|
if 0 < mean_overall_rating <= 0.15:
|
||||||
|
overall_signal = "Strong Sell"
|
||||||
|
elif 0.15 < mean_overall_rating <= 0.45:
|
||||||
|
overall_signal = "Sell"
|
||||||
|
elif 0.45 < mean_overall_rating <= 0.55:
|
||||||
|
overall_signal = 'Neutral'
|
||||||
|
elif 0.55 < mean_overall_rating <= 0.8:
|
||||||
|
overall_signal = 'Buy'
|
||||||
|
elif 0.8 < mean_overall_rating <= 1.0:
|
||||||
|
overall_signal = "Strong Buy"
|
||||||
|
else:
|
||||||
|
overall_signal = 'n/a'
|
||||||
|
|
||||||
|
return overall_signal
|
||||||
|
|
||||||
|
def ta_rating(self):
|
||||||
|
df = pd.DataFrame()
|
||||||
|
df['sma_20'] = sma_indicator(self.data['close'], window=20)
|
||||||
|
df['sma_50'] = sma_indicator(self.data['close'], window=50)
|
||||||
|
df['ema_20'] = ema_indicator(self.data['close'], window=20)
|
||||||
|
df['ema_50'] = ema_indicator(self.data['close'], window=50)
|
||||||
|
df['wma'] = wma_indicator(self.data['close'], window=20)
|
||||||
|
df['adx'] = adx(self.data['high'],self.data['low'],self.data['close'])
|
||||||
|
df["adx_pos"] = adx_pos(self.data['high'],self.data['low'],self.data['close'])
|
||||||
|
df["adx_neg"] = adx_neg(self.data['high'],self.data['low'],self.data['close'])
|
||||||
|
df['williams'] = WilliamsRIndicator(high=self.data['high'], low=self.data['low'], close=self.data['close']).williams_r()
|
||||||
|
|
||||||
|
# Assign ratings based on SMA values
|
||||||
|
df['sma_rating'] = 'Neutral'
|
||||||
|
if self.data['close'].iloc[-1] < df['sma_50'].iloc[-1] and df['sma_20'].iloc[-1] < df['sma_50'].iloc[-1]:
|
||||||
|
df['sma_rating'] = 'Strong Sell'
|
||||||
|
elif self.data['close'].iloc[-1] < df['sma_20'].iloc[-1] and df['sma_20'].iloc[-1] < df['sma_50'].iloc[-1]:
|
||||||
|
df['sma_rating'] = 'Sell'
|
||||||
|
elif df['sma_20'].iloc[-1] <= self.data['close'].iloc[-1] <= df['sma_50'].iloc[-1]:
|
||||||
|
df['sma_rating'] = 'Neutral'
|
||||||
|
elif self.data['close'].iloc[-1] > df['sma_20'].iloc[-1] and df['sma_20'].iloc[-1] > df['sma_50'].iloc[-1]:
|
||||||
|
df['sma_rating'] = 'Buy'
|
||||||
|
elif self.data['close'].iloc[-1] > df['sma_50'].iloc[-1] and df['sma_20'].iloc[-1] > df['sma_50'].iloc[-1]:
|
||||||
|
df['sma_rating'] = 'Strong Buy'
|
||||||
|
|
||||||
|
# Assign ratings for ema
|
||||||
|
df['ema_rating'] = 'Neutral'
|
||||||
|
|
||||||
|
if self.data['close'].iloc[-1] < df['ema_50'].iloc[-1] and df['ema_20'].iloc[-1] < df['ema_50'].iloc[-1]:
|
||||||
|
df['ema_rating'] = 'Strong Sell'
|
||||||
|
elif self.data['close'].iloc[-1] < df['ema_20'].iloc[-1] and df['ema_20'].iloc[-1] < df['ema_50'].iloc[-1]:
|
||||||
|
df['ema_rating'] = 'Sell'
|
||||||
|
elif df['ema_20'].iloc[-1] <= self.data['close'].iloc[-1] <= df['ema_50'].iloc[-1]:
|
||||||
|
df['ema_rating'] = 'Neutral'
|
||||||
|
elif self.data['close'].iloc[-1] > df['ema_20'].iloc[-1] and df['ema_20'].iloc[-1] > df['ema_50'].iloc[-1]:
|
||||||
|
df['ema_rating'] = 'Buy'
|
||||||
|
elif self.data['close'].iloc[-1] > df['ema_50'].iloc[-1] and df['ema_20'].iloc[-1] > df['ema_50'].iloc[-1]:
|
||||||
|
df['ema_rating'] = 'Strong Buy'
|
||||||
|
|
||||||
|
# Assign ratings based on wma
|
||||||
|
df['wma_rating'] = pd.cut(self.data['close'] - df['wma'],
|
||||||
|
bins=[float('-inf'), -10, -5, 0, 5, 10],
|
||||||
|
labels=['Strong Sell', 'Sell', 'Neutral', 'Buy', 'Strong Buy'])
|
||||||
|
|
||||||
|
# Assign ratings based on adx
|
||||||
|
if df['adx'].iloc[-1] > 50 and df['adx_neg'].iloc[-1] > df['adx_pos'].iloc[-1]:
|
||||||
|
df['adx_rating'] = 'Strong Sell'
|
||||||
|
elif df['adx'].iloc[-1] >=25 and df['adx'].iloc[-1] <=50 and df['adx_neg'].iloc[1] > df['adx_pos'].iloc[-1]:
|
||||||
|
df['adx_rating'] = 'Sell'
|
||||||
|
elif df['adx'].iloc[-1] < 25:
|
||||||
|
df['adx_rating'] = 'Neutral'
|
||||||
|
elif df['adx'].iloc[-1] >=25 and df['adx'].iloc[-1] <=50 and df['adx_pos'].iloc[-1] > df['adx_neg'].iloc[-1]:
|
||||||
|
df['adx_rating'] = 'Buy'
|
||||||
|
elif df['adx'].iloc[-1] > 50 and df['adx_pos'].iloc[-1] > df['adx_neg'].iloc[-1]:
|
||||||
|
df['adx_rating'] = 'Strong Buy'
|
||||||
|
else:
|
||||||
|
df['adx_rating'] = 'Neutral'
|
||||||
|
|
||||||
|
|
||||||
|
# Assign ratings based on williams
|
||||||
|
df['williams_rating'] = 'Neutral'
|
||||||
|
df.loc[df["williams"] < -80, 'williams_rating'] = "Strong Sell"
|
||||||
|
df.loc[(df["williams"] >= -80) & (df["williams"] < -50), 'williams_rating'] = "Sell"
|
||||||
|
df.loc[(df["williams"] >= -50) & (df["williams"] <= -20), 'williams_rating'] = "Buy"
|
||||||
|
df.loc[df["williams"] > -20, 'williams_rating'] = "Strong Buy"
|
||||||
|
|
||||||
|
#=========Momentum Indicators ============#
|
||||||
|
|
||||||
|
|
||||||
|
aroon = AroonIndicator(self.data['close'], low=self.data['low'], window=14)
|
||||||
|
df['rsi'] = rsi(self.data['close'], window=14)
|
||||||
|
df['stoch_rsi'] = stochrsi_k(self.data['close'], window=14, smooth1 = 3, smooth2 =3)*100
|
||||||
|
|
||||||
|
df['macd'] = macd(self.data['close'])
|
||||||
|
df['macd_signal'] = macd_signal(self.data['close'])
|
||||||
|
df['macd_hist'] = 2*macd_diff(self.data['close'])
|
||||||
|
df['roc'] = roc(self.data['close'], window=14)
|
||||||
|
df['cci'] = CCIIndicator(high=self.data['high'], low=self.data['low'], close=self.data['close']).cci()
|
||||||
|
df['mfi'] = MFIIndicator(high=self.data['high'], low=self.data['low'], close=self.data['close'], volume=self.data['volume']).money_flow_index()
|
||||||
|
|
||||||
|
# Assign ratings based on MFI values
|
||||||
|
df['mfi_rating'] = pd.cut(df['mfi'],
|
||||||
|
bins=[-1, 20, 40, 60, 80, 101],
|
||||||
|
labels=['Strong Buy', 'Buy', 'Neutral', 'Sell', 'Strong Sell'])
|
||||||
|
|
||||||
|
# Assign ratings based on RSI values
|
||||||
|
df['rsi_rating'] = pd.cut(df['rsi'],
|
||||||
|
bins=[-1, 30, 50, 60, 70, 101],
|
||||||
|
labels=['Strong Buy', 'Buy', 'Neutral', 'Sell', 'Strong Sell'])
|
||||||
|
|
||||||
|
# Assign ratings based on Stoch RSI values
|
||||||
|
df['stoch_rsi_rating'] = pd.cut(df['stoch_rsi'],
|
||||||
|
bins=[-1, 30, 50, 60, 70, 101],
|
||||||
|
labels=['Strong Buy', 'Buy', 'Neutral', 'Sell', 'Strong Sell'])
|
||||||
|
|
||||||
|
|
||||||
|
# Assign ratings for macd
|
||||||
|
if df['macd'].iloc[-1] < df['macd_signal'].iloc[-1] and df['macd_hist'].iloc[-1] < 0 \
|
||||||
|
and df['macd_hist'].iloc[-1] > df['macd_hist'].iloc[-2]:
|
||||||
|
df['macd_rating'] = 'Strong Sell'
|
||||||
|
elif df['macd'].iloc[-1] < df['macd_signal'].iloc[-1] and df['macd_hist'].iloc[-1] < 0 \
|
||||||
|
and df['macd_hist'].iloc[-1] < df['macd_hist'].iloc[-2]:
|
||||||
|
df['macd_rating'] = 'Sell'
|
||||||
|
elif abs(df['macd'].iloc[-1] - df['macd_signal'].iloc[-1]) < 0.01 and abs(df['macd_hist'].iloc[-1]) < 0.01:
|
||||||
|
df['macd_rating'] = 'Neutral'
|
||||||
|
elif df['macd'].iloc[-1] > df['macd_signal'].iloc[-1] and df['macd_hist'].iloc[-1] > 0 and df['macd_hist'].iloc[-1] < df['macd_hist'].iloc[-2]:
|
||||||
|
df['macd_rating'] = 'Buy'
|
||||||
|
elif df['macd'].iloc[-1] > df['macd_signal'].iloc[-1] and df['macd_hist'].iloc[-1] > 0 and df['macd_hist'].iloc[-1] > df['macd_hist'].iloc[-2]:
|
||||||
|
df['macd_rating'] = 'Strong Buy'
|
||||||
|
else:
|
||||||
|
df['macd_rating'] = 'Neutral'
|
||||||
|
|
||||||
|
|
||||||
|
# Assign ratings for roc
|
||||||
|
if df['roc'].iloc[-1] < -10:
|
||||||
|
df['roc_rating'] = 'Strong Sell'
|
||||||
|
elif df['roc'].iloc[-1] > -10 and df['roc'].iloc[-1] <= -5:
|
||||||
|
df['roc_rating'] = 'Sell'
|
||||||
|
elif df['roc'].iloc[-1] > -5 and df['roc'].iloc[-1] < 5:
|
||||||
|
df['roc_rating'] = 'Neutral'
|
||||||
|
elif df['roc'].iloc[-1] >=5 and df['roc'].iloc[-1] < 10:
|
||||||
|
df['roc_rating'] = 'Buy'
|
||||||
|
elif df['roc'].iloc[-1] >= 10:
|
||||||
|
df['roc_rating'] = 'Strong Buy'
|
||||||
|
else:
|
||||||
|
df['roc_rating'] = 'Neutral'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Define CCI threshold values for signals
|
||||||
|
cci_strong_sell_threshold = -100
|
||||||
|
cci_sell_threshold = -50
|
||||||
|
cci_buy_threshold = 50
|
||||||
|
cci_strong_buy_threshold = 100
|
||||||
|
|
||||||
|
# Assign signals based on CCI values
|
||||||
|
if df['cci'].iloc[-1] < cci_strong_sell_threshold:
|
||||||
|
df['cci_rating'] = 'Strong Sell'
|
||||||
|
elif cci_strong_sell_threshold <= df['cci'].iloc[-1] < cci_sell_threshold:
|
||||||
|
df['cci_rating'] = 'Sell'
|
||||||
|
elif cci_sell_threshold <= df['cci'].iloc[-1] < cci_buy_threshold:
|
||||||
|
df['cci_rating'] = 'Neutral'
|
||||||
|
elif cci_buy_threshold <= df['cci'].iloc[-1] < cci_strong_buy_threshold:
|
||||||
|
df['cci_rating'] = 'Buy'
|
||||||
|
else:
|
||||||
|
df['cci_rating'] = 'Strong Buy'
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
res_list = [
|
||||||
|
{'name': 'Relative Strength Index (14)', 'value': round(df['rsi'].iloc[-1],2), 'signal': df['rsi_rating'].iloc[-1]},
|
||||||
|
{'name': 'Stochastic RSI Fast (3,3,14,14)', 'value': round(df['stoch_rsi'].iloc[-1],2), 'signal': df['stoch_rsi_rating'].iloc[-1]},
|
||||||
|
{'name': 'Money Flow Index (14)', 'value': round(df['mfi'].iloc[-1],2), 'signal': df['mfi_rating'].iloc[-1]},
|
||||||
|
{'name': 'Simple Moving Average (20)', 'value': round(df['sma_20'].iloc[-1],2), 'signal': df['sma_rating'].iloc[-1]},
|
||||||
|
{'name': 'Exponential Moving Average (20)', 'value': round(df['ema_20'].iloc[-1],2), 'signal': df['ema_rating'].iloc[-1]},
|
||||||
|
{'name': 'Weighted Moving Average (20)', 'value': round(df['wma'].iloc[-1],2), 'signal': df['wma_rating'].iloc[-1]},
|
||||||
|
{'name': 'Average Directional Index (14)', 'value': round(df['adx'].iloc[-1],2), 'signal': df['adx_rating'].iloc[-1]},
|
||||||
|
{'name': 'Commodity Channel Index (14)', 'value': round(df['cci'].iloc[-1],2), 'signal': df['cci_rating'].iloc[-1]},
|
||||||
|
{'name': 'Rate of Change (12)', 'value': round(df['roc'].iloc[-1],2), 'signal': df['roc_rating'].iloc[-1]},
|
||||||
|
{'name': 'Moving Average Convergence Divergence (12, 26)', 'value': round(df['macd'].iloc[-1],2), 'signal': df['macd_rating'].iloc[-1]},
|
||||||
|
{'name': 'Williams %R (14)', 'value': round(df['williams'].iloc[-1],2), 'signal': df['williams_rating'].iloc[-1]}
|
||||||
|
]
|
||||||
|
|
||||||
|
overall_signal = self.compute_overall_signal(res_list)
|
||||||
|
|
||||||
|
res_dict = {'overallSignal': overall_signal, 'signalList': res_list}
|
||||||
|
return res_dict
|
||||||
|
|
||||||
|
# Load the historical stock price data
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#Testing mode
|
||||||
|
# Load the data
|
||||||
|
'''
|
||||||
|
import sqlite3
|
||||||
|
start_date = "2015-01-01"
|
||||||
|
end_date = datetime.today().strftime("%Y-%m-%d")
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
symbol = 'ZTS'
|
||||||
|
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
date, open, high, low, close, volume
|
||||||
|
FROM
|
||||||
|
"{symbol}"
|
||||||
|
WHERE
|
||||||
|
date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
query = query_template.format(symbol=symbol)
|
||||||
|
df = pd.read_sql_query(query, con, params=(start_date, end_date))
|
||||||
|
|
||||||
|
test = rating_model(df).ta_rating()
|
||||||
|
print(test)
|
||||||
|
con.close()
|
||||||
|
'''
|
||||||
1164
app/restart_json.py
Normal file
1164
app/restart_json.py
Normal file
File diff suppressed because one or more lines are too long
54
app/secondary_cron_job.py
Normal file
54
app/secondary_cron_job.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
import pytz
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from urllib.request import urlopen
|
||||||
|
import certifi
|
||||||
|
import json
|
||||||
|
import ujson
|
||||||
|
import schedule
|
||||||
|
import time
|
||||||
|
import subprocess
|
||||||
|
import asyncio
|
||||||
|
import aiohttp
|
||||||
|
import pytz
|
||||||
|
import sqlite3
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
import threading # Import threading module for parallel execution
|
||||||
|
|
||||||
|
|
||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
load_dotenv()
|
||||||
|
api_key = os.getenv('FMP_API_KEY')
|
||||||
|
|
||||||
|
berlin_tz = pytz.timezone('Europe/Berlin')
|
||||||
|
|
||||||
|
# Set the system's timezone to Berlin at the beginning
|
||||||
|
subprocess.run(["timedatectl", "set-timezone", "Europe/Berlin"])
|
||||||
|
|
||||||
|
def run_restart_cache():
|
||||||
|
#update db daily
|
||||||
|
week = datetime.today().weekday()
|
||||||
|
if week <= 5:
|
||||||
|
subprocess.run(["pm2", "restart","fastapi"])
|
||||||
|
subprocess.run(["pm2", "restart","fastify"])
|
||||||
|
|
||||||
|
|
||||||
|
def run_json_job():
|
||||||
|
# Run the asynchronous function inside an asyncio loop
|
||||||
|
subprocess.run(["python3", "restart_json.py"])
|
||||||
|
subprocess.run(["pm2", "restart","fastapi"])
|
||||||
|
|
||||||
|
|
||||||
|
# Create functions to run each schedule in a separate thread
|
||||||
|
def run_threaded(job_func):
|
||||||
|
job_thread = threading.Thread(target=job_func)
|
||||||
|
job_thread.start()
|
||||||
|
|
||||||
|
|
||||||
|
schedule.every().day.at("15:45").do(run_threaded, run_restart_cache)
|
||||||
|
schedule.every(3).hours.do(run_threaded, run_json_job).tag('json_job')
|
||||||
|
|
||||||
|
while True:
|
||||||
|
schedule.run_pending()
|
||||||
|
time.sleep(3)
|
||||||
337
app/stats.py
Normal file
337
app/stats.py
Normal file
@ -0,0 +1,337 @@
|
|||||||
|
import quantstats as qs
|
||||||
|
from datetime import datetime
|
||||||
|
import pandas as pd
|
||||||
|
import sqlite3
|
||||||
|
from math import sqrt, ceil
|
||||||
|
from dateutil.relativedelta import relativedelta
|
||||||
|
import time
|
||||||
|
import json
|
||||||
|
from tqdm import tqdm
|
||||||
|
import concurrent.futures
|
||||||
|
import numpy as np
|
||||||
|
import argparse
|
||||||
|
|
||||||
|
|
||||||
|
pd.set_option('display.max_rows', 150)
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
parser = argparse.ArgumentParser(description='Process stock, etf or crypto data.')
|
||||||
|
parser.add_argument('--db', choices=['stocks', 'etf', 'crypto'], required=True, help='Database name (stocks or etf)')
|
||||||
|
parser.add_argument('--table', choices=['stocks', 'etfs', 'cryptos'], required=True, help='Table name (stocks or etfs)')
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
# Define a function to get the ticker from the database
|
||||||
|
def get_ticker_data_from_database(database_path, sp500_ticker, start_date, end_date):
|
||||||
|
con_etf = sqlite3.connect(database_path)
|
||||||
|
|
||||||
|
# Fetch data for the selected ticker (SPY or another ticker)
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
date, close
|
||||||
|
FROM
|
||||||
|
"{ticker}"
|
||||||
|
WHERE
|
||||||
|
date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
query = query_template.format(ticker=sp500_ticker)
|
||||||
|
df = pd.read_sql_query(query, con_etf, params=(start_date, end_date))
|
||||||
|
df['date'] = pd.to_datetime(df['date'])
|
||||||
|
df = df.rename(columns={'date': 'Date'})
|
||||||
|
df[sp500_ticker] = df['close'].pct_change()
|
||||||
|
df.set_index("Date", inplace=True)
|
||||||
|
df.drop(columns=['close'], inplace=True)
|
||||||
|
con_etf.close()
|
||||||
|
|
||||||
|
return sp500_ticker, df
|
||||||
|
|
||||||
|
class Quant_Stats:
|
||||||
|
def __init__(self):
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_trading_periods(self):
|
||||||
|
periods_per_year = 252
|
||||||
|
half_year = ceil(periods_per_year / 2)
|
||||||
|
return periods_per_year, half_year
|
||||||
|
|
||||||
|
|
||||||
|
def get_data(self, df, ticker):
|
||||||
|
benchmark = "SPY"
|
||||||
|
compounded = True
|
||||||
|
rf = 0
|
||||||
|
today = df.index[-1]
|
||||||
|
comp_func = qs.stats.comp
|
||||||
|
win_year, win_half_year = self.get_trading_periods()
|
||||||
|
|
||||||
|
metrics = pd.DataFrame()
|
||||||
|
|
||||||
|
metrics['Expected Daily %'] = round(qs.stats.expected_return(df, compounded=True)*100,2)
|
||||||
|
metrics['Expected Monthly %'] = round(qs.stats.expected_return(df, compounded=True, aggregate="M")*100,2)
|
||||||
|
metrics['Expected Yearly %'] = round(qs.stats.expected_return(df, compounded=True, aggregate="A")*100,2)
|
||||||
|
metrics["Cumulative Return %"] = round(qs.stats.comp(df) * 100, 2)
|
||||||
|
metrics["CAGR %"] = round(qs.stats.cagr(df, rf, compounded) * 100, 2)
|
||||||
|
metrics["Sharpe"] = qs.stats.sharpe(df, rf, win_year, compounded)
|
||||||
|
metrics["Sortino"] = qs.stats.sortino(df, rf, win_year, True)
|
||||||
|
metrics["Volatility (ann.) %"] = round(qs.stats.volatility(df, win_year, True)* 100, 2)
|
||||||
|
metrics["Calmar"] = round(qs.stats.calmar(df),2)
|
||||||
|
metrics["Skew"] = qs.stats.skew(df, prepare_returns=False)
|
||||||
|
metrics["Kurtosis"] = qs.stats.kurtosis(df, prepare_returns=False)
|
||||||
|
metrics["Kelly Criterion %"] = round(qs.stats.kelly_criterion(df, prepare_returns=False) * 100, 2)
|
||||||
|
metrics["Risk of Ruin %"] = round(qs.stats.risk_of_ruin(df, prepare_returns=False), 2)
|
||||||
|
metrics["Daily Value-at-Risk %"] = -abs(qs.stats.var(df, prepare_returns=False) * 100)
|
||||||
|
metrics["Expected Shortfall (cVaR) %"] = -abs(qs.stats.cvar(df, prepare_returns=False) * 100)
|
||||||
|
metrics["Max Consecutive Wins"] = qs.stats.consecutive_wins(df)
|
||||||
|
metrics["Max Consecutive Losses"] = qs.stats.consecutive_losses(df)
|
||||||
|
metrics["Gain/Pain Ratio"] = qs.stats.gain_to_pain_ratio(df, rf)
|
||||||
|
metrics["Gain/Pain (1M)"] = qs.stats.gain_to_pain_ratio(df, rf, "M")
|
||||||
|
metrics["Payoff Ratio"] = qs.stats.payoff_ratio(df, prepare_returns=False)
|
||||||
|
metrics["Profit Factor"] = qs.stats.profit_factor(df, prepare_returns=False)
|
||||||
|
metrics["Common Sense Ratio"] = qs.stats.common_sense_ratio(df, prepare_returns=False)
|
||||||
|
metrics["CPC Index"] = qs.stats.cpc_index(df, prepare_returns=False)
|
||||||
|
metrics["Tail Ratio"] = qs.stats.tail_ratio(df, prepare_returns=False)
|
||||||
|
metrics["Outlier Win Ratio"] = qs.stats.outlier_win_ratio(df, prepare_returns=False)
|
||||||
|
metrics["Outlier Loss Ratio"] = qs.stats.outlier_loss_ratio(df, prepare_returns=False)
|
||||||
|
|
||||||
|
#Yearly return is included since eoy = end of the year True
|
||||||
|
ticker_monthly_returns = round(qs.stats.monthly_returns(df[ticker], eoy = True, compounded = True) * 100,2)
|
||||||
|
benchmark_monthly_returns = round(qs.stats.monthly_returns(df[benchmark], eoy = True, compounded = True) * 100,2)
|
||||||
|
metrics['Monthly Return'] = [ticker_monthly_returns.T.to_dict('list'), benchmark_monthly_returns.T.to_dict('list')]
|
||||||
|
|
||||||
|
|
||||||
|
metrics["MTD %"] = round(comp_func(df[df.index >= datetime(today.year, today.month, 1)]) * 100,2)
|
||||||
|
|
||||||
|
d = today - relativedelta(months=3)
|
||||||
|
metrics["3M %"] = comp_func(df[df.index >= d]) * 100
|
||||||
|
|
||||||
|
d = today - relativedelta(months=6)
|
||||||
|
metrics["6M %"] = comp_func(df[df.index >= d]) * 100
|
||||||
|
|
||||||
|
metrics["YTD %"] = comp_func(df[df.index >= datetime(today.year, 1, 1)]) * 100
|
||||||
|
|
||||||
|
d = today - relativedelta(years=1)
|
||||||
|
metrics["1Y %"] = comp_func(df[df.index >= d]) * 100
|
||||||
|
|
||||||
|
d = today - relativedelta(months=35)
|
||||||
|
metrics["3Y (ann.) %"] = qs.stats.cagr(df[df.index >= d], 0.0, compounded) * 100
|
||||||
|
|
||||||
|
d = today - relativedelta(months=59)
|
||||||
|
metrics["5Y (ann.) %"] = qs.stats.cagr(df[df.index >= d], 0.0, compounded) * 100
|
||||||
|
|
||||||
|
d = today - relativedelta(years=10)
|
||||||
|
metrics["10Y (ann.) %"] = qs.stats.cagr(df[df.index >= d], 0.0, compounded) * 100
|
||||||
|
metrics["All-time (ann.) %"] = qs.stats.cagr(df, 0.0, compounded) * 100
|
||||||
|
|
||||||
|
metrics["Best Day %"] = qs.stats.best(df, compounded=compounded, prepare_returns=False) * 100
|
||||||
|
metrics["Worst Day %"] = qs.stats.worst(df, prepare_returns=False) * 100
|
||||||
|
metrics["Best Month %"] = (qs.stats.best(df, compounded=compounded, aggregate="M", prepare_returns=False) * 100)
|
||||||
|
metrics["Worst Month %"] = (qs.stats.worst(df, aggregate="M", prepare_returns=False) * 100)
|
||||||
|
metrics["Best Year %"] = (qs.stats.best(df, compounded=compounded, aggregate="A", prepare_returns=False) * 100)
|
||||||
|
metrics["Worst Year %"] = (qs.stats.worst(df, compounded=compounded, aggregate="A", prepare_returns=False) * 100)
|
||||||
|
|
||||||
|
avg_dd_list = []
|
||||||
|
avg_dd_days_list = []
|
||||||
|
max_dd_list = []
|
||||||
|
longest_dd_days_list = []
|
||||||
|
|
||||||
|
for tt in [ticker, benchmark]:
|
||||||
|
dd = qs.stats.to_drawdown_series(df[tt])
|
||||||
|
dd_info = qs.stats.drawdown_details(dd).sort_values(by="max drawdown", ascending = True)
|
||||||
|
dd_info = dd_info[["start", "end", "max drawdown", "days"]]
|
||||||
|
dd_info.columns = ["Started", "Recovered", "Drawdown", "Days"]
|
||||||
|
|
||||||
|
avg_dd_list.append(round(dd_info['Drawdown'].mean(),2))
|
||||||
|
max_dd_list.append(round(dd_info['Drawdown'].min(),2))
|
||||||
|
|
||||||
|
avg_dd_days_list.append(round(dd_info['Days'].mean()))
|
||||||
|
longest_dd_days_list.append(round(dd_info['Days'].max()))
|
||||||
|
|
||||||
|
metrics["Max Drawdown"] = max_dd_list
|
||||||
|
metrics["Avg. Drawdown"] = avg_dd_list
|
||||||
|
|
||||||
|
metrics["Longest DD Days"] = longest_dd_days_list
|
||||||
|
metrics["Avg. Drawdown Days"] = avg_dd_days_list
|
||||||
|
|
||||||
|
worst_dd_list = []
|
||||||
|
dd = qs.stats.to_drawdown_series(df[ticker])
|
||||||
|
dd_info = qs.stats.drawdown_details(dd).sort_values(by="max drawdown", ascending = True)[0:10]
|
||||||
|
dd_info = dd_info[["start", "end", "max drawdown", "days"]]
|
||||||
|
dd_info.columns = ["Started", "Recovered", "Drawdown", "Days"]
|
||||||
|
|
||||||
|
for key, value in dd_info.T.to_dict().items():
|
||||||
|
worst_dd_list.append(value)
|
||||||
|
metrics['Worst 10 Drawdowns'] = [worst_dd_list, '-']
|
||||||
|
|
||||||
|
|
||||||
|
metrics["Recovery Factor"] = qs.stats.recovery_factor(df)
|
||||||
|
metrics["Ulcer Index"] = qs.stats.ulcer_index(df)
|
||||||
|
metrics["Serenity Index"] = qs.stats.serenity_index(df, rf)
|
||||||
|
|
||||||
|
metrics["Avg. Up Month %"] = (qs.stats.avg_win(df, compounded=compounded, aggregate="M", prepare_returns=False) * 100)
|
||||||
|
metrics["Avg. Down Month %"] = (qs.stats.avg_loss(df, compounded=compounded, aggregate="M", prepare_returns=False) * 100)
|
||||||
|
metrics["Win Days %"] = qs.stats.win_rate(df, prepare_returns=False) * 100
|
||||||
|
metrics["Win Month %"] = (qs.stats.win_rate(df, compounded=compounded, aggregate="M", prepare_returns=False) * 100)
|
||||||
|
metrics["Win Quarter %"] = (qs.stats.win_rate(df, compounded=compounded, aggregate="Q", prepare_returns=False) * 100)
|
||||||
|
metrics["Win Year %"] = (qs.stats.win_rate(df, compounded=compounded, aggregate="A", prepare_returns=False) * 100)
|
||||||
|
|
||||||
|
|
||||||
|
greeks = qs.stats.greeks(df[ticker], df[benchmark], win_year, prepare_returns=False)
|
||||||
|
|
||||||
|
metrics["Beta"] = [round(greeks["beta"], 2), "-"]
|
||||||
|
metrics["Alpha"] = [round(greeks["alpha"], 2), "-"]
|
||||||
|
metrics["Correlation"] = [round(df[benchmark].corr(df[ticker]) * 100, 2), "-",]
|
||||||
|
metrics["Treynor Ratio"] = [round(qs.stats.treynor_ratio(df[ticker], df[benchmark], win_year, rf) * 100, 2,), "-" ]
|
||||||
|
metrics["R^2"] = ([qs.stats.r_squared(df[ticker], df[benchmark], prepare_returns=False ).round(2), "-"])
|
||||||
|
|
||||||
|
metrics["Start Period"] = df.index[0].strftime("%Y-%m-%d")
|
||||||
|
metrics['End Period'] = df.index[-1].strftime("%Y-%m-%d")
|
||||||
|
|
||||||
|
|
||||||
|
metrics = metrics.T
|
||||||
|
|
||||||
|
return metrics
|
||||||
|
|
||||||
|
|
||||||
|
def create_quantstats_column(con):
|
||||||
|
"""
|
||||||
|
Create the 'quantStats' column if it doesn't exist in the db table.
|
||||||
|
"""
|
||||||
|
query_check = f"PRAGMA table_info({table_name})"
|
||||||
|
cursor = con.execute(query_check)
|
||||||
|
columns = [col[1] for col in cursor.fetchall()]
|
||||||
|
|
||||||
|
if 'quantStats' not in columns:
|
||||||
|
query = f"ALTER TABLE {table_name} ADD COLUMN quantStats TEXT"
|
||||||
|
con.execute(query)
|
||||||
|
con.commit()
|
||||||
|
|
||||||
|
|
||||||
|
def update_database_with_stats(stats_dict, symbol, con):
|
||||||
|
"""
|
||||||
|
Update the SQLite3 table with calculated statistics for a given symbol.
|
||||||
|
"""
|
||||||
|
|
||||||
|
query = f"UPDATE {table_name} SET quantStats = ? WHERE symbol = ?"
|
||||||
|
stats_json = json.dumps(stats_dict) # Convert the stats dictionary to JSON string
|
||||||
|
con.execute(query, (stats_json, symbol))
|
||||||
|
con.commit()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def process_symbol(ticker, sp500_ticker, sp500_df):
|
||||||
|
df = pd.DataFrame()
|
||||||
|
combined_df = pd.DataFrame()
|
||||||
|
try:
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
date, close
|
||||||
|
FROM
|
||||||
|
"{ticker}"
|
||||||
|
WHERE
|
||||||
|
date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
query = query_template.format(ticker=ticker)
|
||||||
|
df = pd.read_sql_query(query, con, params=(start_date, end_date))
|
||||||
|
df['date'] = pd.to_datetime(df['date'])
|
||||||
|
df = df.rename(columns={'date': 'Date'})
|
||||||
|
df[ticker] = df['close'].pct_change()
|
||||||
|
df.set_index("Date", inplace=True)
|
||||||
|
df.drop(columns=['close'], inplace=True)
|
||||||
|
|
||||||
|
combined_df = pd.concat([sp500_df, df], axis=1)
|
||||||
|
|
||||||
|
df = combined_df.dropna()
|
||||||
|
df = df[[ticker, sp500_ticker]]
|
||||||
|
stats = Quant_Stats().get_data(df, ticker)
|
||||||
|
stats_dict = stats.to_dict()
|
||||||
|
|
||||||
|
create_quantstats_column(con)
|
||||||
|
update_database_with_stats(stats_dict, ticker, con)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
print(f"Failed create quantStats for {ticker}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#Production Code
|
||||||
|
|
||||||
|
args = parse_args()
|
||||||
|
db_name = args.db
|
||||||
|
table_name = args.table
|
||||||
|
|
||||||
|
start_date = datetime(1970, 1, 1)
|
||||||
|
end_date = datetime.today()
|
||||||
|
|
||||||
|
con = sqlite3.connect(f'backup_db/{db_name}.db')
|
||||||
|
|
||||||
|
# Load the S&P 500 ticker from the database
|
||||||
|
sp500_ticker, sp500_df = get_ticker_data_from_database('backup_db/etf.db', "SPY", start_date, end_date)
|
||||||
|
|
||||||
|
symbol_query = f"SELECT DISTINCT symbol FROM {table_name}"
|
||||||
|
|
||||||
|
symbol_cursor = con.execute(symbol_query)
|
||||||
|
symbols = [symbol[0] for symbol in symbol_cursor.fetchall()]
|
||||||
|
|
||||||
|
# Number of concurrent workers
|
||||||
|
num_processes = 4 # You can adjust this based on your system's capabilities
|
||||||
|
futures = []
|
||||||
|
|
||||||
|
with concurrent.futures.ProcessPoolExecutor(max_workers=num_processes) as executor:
|
||||||
|
for symbol in symbols:
|
||||||
|
futures.append(executor.submit(process_symbol, symbol, sp500_ticker, sp500_df))
|
||||||
|
|
||||||
|
# Use tqdm to wrap around the futures for progress tracking
|
||||||
|
for future in tqdm(concurrent.futures.as_completed(futures), total=len(symbols), desc="Processing"):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#Test Code
|
||||||
|
'''
|
||||||
|
con = sqlite3.connect('backup_db/etf.db')
|
||||||
|
start_date = datetime(1970, 1, 1)
|
||||||
|
end_date = datetime.today()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
date, close
|
||||||
|
FROM
|
||||||
|
"{ticker}"
|
||||||
|
WHERE
|
||||||
|
date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
ticker_list = ['IVV','SPY']
|
||||||
|
|
||||||
|
combined_df = pd.DataFrame()
|
||||||
|
for ticker in ticker_list:
|
||||||
|
query = query_template.format(ticker=ticker)
|
||||||
|
df = pd.read_sql_query(query, con, params=(start_date, end_date))
|
||||||
|
print(df)
|
||||||
|
df['date'] = pd.to_datetime(df['date'])
|
||||||
|
df = df.rename(columns={'date': 'Date'})
|
||||||
|
df[ticker] = df['close'].pct_change()
|
||||||
|
df.set_index("Date", inplace=True)
|
||||||
|
df.drop(columns=['close'], inplace=True)
|
||||||
|
combined_df = pd.concat([combined_df, df], axis=1)
|
||||||
|
df = combined_df.dropna()
|
||||||
|
|
||||||
|
|
||||||
|
#monthly_returns = round(qs.stats.monthly_returns(df[ticker], eoy = False, compounded = True) * 100,2)
|
||||||
|
#yearly_returns = round(qs.stats.monthly_returns(df[ticker], eoy = True, compounded = True) * 100,2)
|
||||||
|
#print(yearly_returns)
|
||||||
|
#stats = Quant_Stats().get_data(df, ticker)
|
||||||
|
#print(stats)
|
||||||
|
|
||||||
|
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
'''
|
||||||
175
app/ta_signal.py
Normal file
175
app/ta_signal.py
Normal file
@ -0,0 +1,175 @@
|
|||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
from ta.utils import *
|
||||||
|
from ta.volatility import *
|
||||||
|
from ta.momentum import *
|
||||||
|
from ta.trend import *
|
||||||
|
from ta.volume import *
|
||||||
|
from backtesting import Backtest, Strategy
|
||||||
|
from datetime import datetime
|
||||||
|
import sqlite3
|
||||||
|
import concurrent.futures
|
||||||
|
import json
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
import warnings
|
||||||
|
warnings.filterwarnings("ignore", category=RuntimeWarning, message="invalid value encountered in scalar divide")
|
||||||
|
|
||||||
|
#This is for the stock screener
|
||||||
|
|
||||||
|
class TASignals:
|
||||||
|
|
||||||
|
def __init__(self,data):
|
||||||
|
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
ta_df = pd.DataFrame()
|
||||||
|
|
||||||
|
ta_df['sma_50'] = sma_indicator(self.data["Close"], window=50)
|
||||||
|
ta_df['sma_200'] = sma_indicator(self.data["Close"], window=200)
|
||||||
|
ta_df['ema_50'] = ema_indicator(self.data['Close'], window=50)
|
||||||
|
ta_df['ema_200'] = sma_indicator(self.data['Close'], window=200)
|
||||||
|
ta_df['rsi'] = rsi(self.data['Close'], window=14)
|
||||||
|
ta_df['stoch_rsi'] = stochrsi_k(self.data['Close'], window=14, smooth1 = 3, smooth2 =3)*100
|
||||||
|
ta_df['atr'] = AverageTrueRange(self.data['High'], self.data['Low'], self.data['Close'], window=14).average_true_range()
|
||||||
|
ta_df['cci'] = CCIIndicator(high=self.data['High'], low=self.data['Low'], close=self.data['Close']).cci()
|
||||||
|
ta_df['mfi'] = MFIIndicator(high=self.data['High'], low=self.data['Low'], close=self.data['Close'], volume=self.data['Volume']).money_flow_index()
|
||||||
|
# Calculate 52-week change
|
||||||
|
# Define periods for different time frames
|
||||||
|
periods = {
|
||||||
|
'1W': 5,
|
||||||
|
'1M': 21,
|
||||||
|
'3M': 63,
|
||||||
|
'6M': 126,
|
||||||
|
'1Y': 252,
|
||||||
|
'3Y': 756
|
||||||
|
}
|
||||||
|
|
||||||
|
# Calculate percentage change for each period
|
||||||
|
for period_name, period_days in periods.items():
|
||||||
|
if len(self.data['Close']) >= period_days:
|
||||||
|
change = ((self.data['Close'].iloc[-1] - self.data['Close'].iloc[-period_days]) / self.data['Close'].iloc[-period_days]) * 100
|
||||||
|
ta_df[f'change_{period_name}'] = change
|
||||||
|
else:
|
||||||
|
ta_df[f'change_{period_name}'] = np.nan # Not enough data for the period
|
||||||
|
|
||||||
|
last_values = {col: [round(ta_df[col].iloc[-1],2)] for col in ta_df.columns} if not ta_df.empty else None
|
||||||
|
last_values_df = pd.DataFrame(last_values)
|
||||||
|
|
||||||
|
return last_values_df
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def create_columns(con, ta_df):
|
||||||
|
"""
|
||||||
|
Create columns in the table for each indicator if they don't exist.
|
||||||
|
"""
|
||||||
|
cursor = con.cursor()
|
||||||
|
existing_columns = cursor.execute(f"PRAGMA table_info(stocks)").fetchall()
|
||||||
|
existing_column_names = [col[1] for col in existing_columns]
|
||||||
|
|
||||||
|
for column in ta_df.columns:
|
||||||
|
if column not in existing_column_names:
|
||||||
|
cursor.execute(f"ALTER TABLE stocks ADD COLUMN {column} REAL")
|
||||||
|
con.commit()
|
||||||
|
|
||||||
|
def update_database(res, symbol, con):
|
||||||
|
"""
|
||||||
|
Update the database for the given symbol with the indicators' last values.
|
||||||
|
"""
|
||||||
|
if not res.empty:
|
||||||
|
# Create a single row update query with all columns
|
||||||
|
columns = ', '.join(res.columns)
|
||||||
|
placeholders = ', '.join(['?'] * len(res.columns))
|
||||||
|
values = res.iloc[0].tolist()
|
||||||
|
|
||||||
|
query = f"UPDATE stocks SET ({columns}) = ({placeholders}) WHERE symbol = '{symbol}'"
|
||||||
|
con.execute(query, values)
|
||||||
|
con.commit()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def process_symbol(ticker):
|
||||||
|
try:
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
date, open, high, low, close,volume
|
||||||
|
FROM
|
||||||
|
"{ticker}"
|
||||||
|
WHERE
|
||||||
|
date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
query = query_template.format(ticker=ticker)
|
||||||
|
df = pd.read_sql_query(query, con, params=(start_date, end_date))
|
||||||
|
|
||||||
|
|
||||||
|
if not df.empty:
|
||||||
|
df = df.rename(columns={"open": "Open", "high": "High", "low": "Low", "close": "Close", "volume": "Volume"})
|
||||||
|
|
||||||
|
df['date'] = pd.to_datetime(df['date'])
|
||||||
|
df = df.set_index('date')
|
||||||
|
ta_df = TASignals(df).run()
|
||||||
|
else:
|
||||||
|
ta_df = []
|
||||||
|
|
||||||
|
create_columns(con, ta_df)
|
||||||
|
update_database(ta_df, ticker, con)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Failed create ta signals for {ticker}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
con = sqlite3.connect(f'backup_db/stocks.db')
|
||||||
|
|
||||||
|
symbol_query = f"SELECT DISTINCT symbol FROM stocks"
|
||||||
|
symbol_cursor = con.execute(symbol_query)
|
||||||
|
symbols = [symbol[0] for symbol in symbol_cursor.fetchall()]
|
||||||
|
|
||||||
|
start_date = datetime(2022, 1, 1)
|
||||||
|
end_date = datetime.today()
|
||||||
|
|
||||||
|
# Number of concurrent workers
|
||||||
|
num_processes = 4 # You can adjust this based on your system's capabilities
|
||||||
|
futures = []
|
||||||
|
|
||||||
|
with concurrent.futures.ProcessPoolExecutor(max_workers=num_processes) as executor:
|
||||||
|
for symbol in symbols:
|
||||||
|
futures.append(executor.submit(process_symbol, symbol))
|
||||||
|
|
||||||
|
# Use tqdm to wrap around the futures for progress tracking
|
||||||
|
for future in tqdm(concurrent.futures.as_completed(futures), total=len(symbols), desc="Processing"):
|
||||||
|
pass
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
|
||||||
|
#==============Test mode================
|
||||||
|
'''
|
||||||
|
ticker = 'AAPL'
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
date, open, high, low, close
|
||||||
|
FROM
|
||||||
|
{ticker}
|
||||||
|
WHERE
|
||||||
|
date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
start_date = datetime(1970, 1, 1)
|
||||||
|
end_date = datetime.today()
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
query = query_template.format(ticker=ticker)
|
||||||
|
df = pd.read_sql_query(query, con, params=(start_date, end_date))
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
df = df.rename(columns={"open": "Open", "high": "High", "low": "Low", "close": "Close"})
|
||||||
|
|
||||||
|
df['date'] = pd.to_datetime(df['date'])
|
||||||
|
df = df.set_index('date')
|
||||||
|
|
||||||
|
|
||||||
|
res = TASignals(df).run()
|
||||||
|
print(res)
|
||||||
|
'''
|
||||||
363
app/trade_signal.py
Normal file
363
app/trade_signal.py
Normal file
@ -0,0 +1,363 @@
|
|||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
from ta.utils import *
|
||||||
|
from ta.volatility import *
|
||||||
|
from ta.momentum import *
|
||||||
|
from ta.trend import *
|
||||||
|
from backtesting import Backtest, Strategy
|
||||||
|
from datetime import datetime
|
||||||
|
import sqlite3
|
||||||
|
import concurrent.futures
|
||||||
|
import json
|
||||||
|
from tqdm import tqdm
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import warnings
|
||||||
|
warnings.filterwarnings("ignore", category=RuntimeWarning, message="invalid value encountered in scalar divide")
|
||||||
|
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
parser = argparse.ArgumentParser(description='Process stock or ETF data.')
|
||||||
|
parser.add_argument('--db', choices=['stocks', 'etf'], required=True, help='Database name (stocks or etf)')
|
||||||
|
parser.add_argument('--table', choices=['stocks', 'etfs'], required=True, help='Table name (stocks or etfs)')
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
class MyStrategy(Strategy):
|
||||||
|
price_delta = 0.05
|
||||||
|
|
||||||
|
def init(self):
|
||||||
|
# Define indicator conditions as functions
|
||||||
|
self.buy_conditions = [
|
||||||
|
lambda: self.data['sm_5'] > self.data['sm_20'],
|
||||||
|
lambda: self.data['ema_10'] > self.data['ema_50'],
|
||||||
|
lambda: self.data['macd'] > self.data['signal_line'],
|
||||||
|
lambda: self.data['rsi'] <= 30,
|
||||||
|
lambda: self.data['stoch_rsi'] <= 30,
|
||||||
|
lambda: self.data['aroon_up'] > 50 and self.data['aroon_down'] < 50,
|
||||||
|
lambda: self.data['bb_middle'] < self.data['Close'],
|
||||||
|
lambda: self.data["adx_ind"] >= 25 and self.data["adx_pos_ind"] > self.data["adx_neg_ind"],
|
||||||
|
lambda: self.data['roc'] >= 5,
|
||||||
|
lambda: self.data['williams'] >= -20,
|
||||||
|
]
|
||||||
|
self.sell_conditions = [
|
||||||
|
lambda: self.data['sm_5'] <= self.data['sm_20'],
|
||||||
|
lambda: self.data['ema_10'] <= self.data['ema_50'],
|
||||||
|
lambda: self.data['macd'] <= self.data['signal_line'],
|
||||||
|
lambda: self.data['rsi'] >= 70,
|
||||||
|
lambda: self.data['stoch_rsi'] >= 70,
|
||||||
|
lambda: self.data['aroon_up'] <= 50 and self.data['aroon_down'] >= 50,
|
||||||
|
lambda: self.data['bb_middle'] > self.data['Close'],
|
||||||
|
lambda: self.data["adx_ind"] < 25 and self.data["adx_pos_ind"] < self.data["adx_neg_ind"],
|
||||||
|
lambda: self.data['roc'] <= -5,
|
||||||
|
lambda: self.data['williams'] <= -80,
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def next(self):
|
||||||
|
|
||||||
|
buy_signal_count = sum(condition() for condition in self.buy_conditions)
|
||||||
|
sell_signal_count = sum(condition() for condition in self.sell_conditions)
|
||||||
|
|
||||||
|
# Adjust the threshold according to your requirement (e.g., majority = 2 out of 3 conditions)
|
||||||
|
buy_threshold = 8
|
||||||
|
sell_threshold = 8
|
||||||
|
|
||||||
|
# Set target take-profit and stop-loss prices to be one price_delta
|
||||||
|
# away from the current closing price.
|
||||||
|
|
||||||
|
upper, lower = self.data['Close'][-1] * (1 + np.r_[1, -1]*self.price_delta)
|
||||||
|
|
||||||
|
if not self.position:
|
||||||
|
# No existing position
|
||||||
|
if buy_signal_count >= buy_threshold:
|
||||||
|
self.buy(tp=upper, sl=lower)
|
||||||
|
else:
|
||||||
|
# There is an existing position
|
||||||
|
if sell_signal_count >= sell_threshold:
|
||||||
|
self.position.close()
|
||||||
|
|
||||||
|
|
||||||
|
class TradingSignals:
|
||||||
|
|
||||||
|
def __init__(self,data):
|
||||||
|
data['sm_5'] = sma_indicator(data["Close"], window=5)
|
||||||
|
data['sm_20'] = sma_indicator(data["Close"], window=20)
|
||||||
|
|
||||||
|
# Calculate MACD and its signal line
|
||||||
|
data['macd'] = macd(data["Close"], window_slow=26, window_fast=12)
|
||||||
|
data['signal_line'] = macd_signal(data["Close"], window_slow=26, window_fast=12)
|
||||||
|
|
||||||
|
data['ema_10'] = ema_indicator(data['Close'], window=5)
|
||||||
|
data['ema_50'] = sma_indicator(data['Close'], window=20)
|
||||||
|
|
||||||
|
data['rsi'] = rsi(data['Close'], window=14)
|
||||||
|
aroon = AroonIndicator(data['Close'], low=data['Low'], window=14)
|
||||||
|
data['aroon_up'] = aroon.aroon_up()
|
||||||
|
data['aroon_down'] = aroon.aroon_down()
|
||||||
|
data['bb_middle'] = BollingerBands(close=data["Close"], window=20, window_dev=2).bollinger_mavg()
|
||||||
|
|
||||||
|
data['roc'] = roc(data['Close'], window=14)
|
||||||
|
|
||||||
|
data['williams'] = WilliamsRIndicator(high=data['High'], low=data['Low'], close=data['Close']).williams_r()
|
||||||
|
data['stoch_rsi'] = StochRSIIndicator(close=data['Close']).stochrsi()
|
||||||
|
|
||||||
|
data['adx_ind'] = adx(data['High'],data['Low'],data['Close'])
|
||||||
|
data['adx_pos_ind'] = adx_pos(data['High'], data['Low'], data['Close'])
|
||||||
|
data['adx_neg_ind'] = adx_neg(data['High'], data['Low'], data['Close'])
|
||||||
|
|
||||||
|
self.data = data
|
||||||
|
|
||||||
|
|
||||||
|
def next_pred(self):
|
||||||
|
|
||||||
|
df = self.data.copy()
|
||||||
|
|
||||||
|
buy_conditions = [
|
||||||
|
lambda: self.data['sm_5'].iloc[-1] > self.data['sm_20'].iloc[-1],
|
||||||
|
lambda: self.data['ema_10'].iloc[-1] > self.data['ema_50'].iloc[-1],
|
||||||
|
lambda: self.data['macd'].iloc[-1] > self.data['signal_line'].iloc[-1],
|
||||||
|
lambda: self.data['rsi'].iloc[-1] <= 30,
|
||||||
|
lambda: self.data['stoch_rsi'].iloc[-1] <= 30,
|
||||||
|
lambda: self.data['aroon_up'].iloc[-1] > 50 and self.data['aroon_down'].iloc[-1] < 50,
|
||||||
|
lambda: self.data['bb_middle'].iloc[-1] < self.data['Close'].iloc[-1],
|
||||||
|
lambda: self.data['adx_ind'].iloc[-1] >= 25 and self.data['adx_pos_ind'].iloc[-1] > self.data['adx_neg_ind'].iloc[-1],
|
||||||
|
lambda: self.data['roc'].iloc[-1] >= 5,
|
||||||
|
lambda: self.data['williams'].iloc[-1] >= -20,
|
||||||
|
]
|
||||||
|
|
||||||
|
sell_conditions = [
|
||||||
|
lambda: self.data['sm_5'].iloc[-1] <= self.data['sm_20'].iloc[-1],
|
||||||
|
lambda: self.data['ema_10'].iloc[-1] <= self.data['ema_50'].iloc[-1],
|
||||||
|
lambda: self.data['macd'].iloc[-1] <= self.data['signal_line'].iloc[-1],
|
||||||
|
lambda: self.data['rsi'].iloc[-1] >= 70,
|
||||||
|
lambda: self.data['stoch_rsi'].iloc[-1] >= 70,
|
||||||
|
lambda: self.data['aroon_up'].iloc[-1] <= 50 and self.data['aroon_down'].iloc[-1] >= 50,
|
||||||
|
lambda: self.data['bb_middle'].iloc[-1] > self.data['Close'].iloc[-1],
|
||||||
|
lambda: self.data['adx_ind'].iloc[-1] < 25 and self.data['adx_pos_ind'].iloc[-1] < self.data['adx_neg_ind'].iloc[-1],
|
||||||
|
lambda: self.data['roc'].iloc[-1] <= -5,
|
||||||
|
lambda: self.data['williams'].iloc[-1] <= -80,
|
||||||
|
]
|
||||||
|
|
||||||
|
buy_signal_count = sum(condition() for condition in buy_conditions)
|
||||||
|
sell_signal_count = sum(condition() for condition in sell_conditions)
|
||||||
|
|
||||||
|
buy_threshold = 8
|
||||||
|
sell_threshold =8
|
||||||
|
|
||||||
|
signal = None
|
||||||
|
if buy_signal_count >= buy_threshold and not sell_signal_count >= sell_threshold:
|
||||||
|
signal = 'Buy'
|
||||||
|
elif sell_signal_count >= sell_threshold and not buy_signal_count >= buy_threshold:
|
||||||
|
signal = 'Sell'
|
||||||
|
else:
|
||||||
|
signal = 'Hold'
|
||||||
|
|
||||||
|
return signal
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
|
||||||
|
df = self.data.copy()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
df = df.dropna()
|
||||||
|
|
||||||
|
bt = Backtest(df, MyStrategy, cash=1000000, commission=0, exclusive_orders = True, trade_on_close=True)
|
||||||
|
stats = bt.run()
|
||||||
|
#print(stats)
|
||||||
|
history_sheet = stats['_trades']
|
||||||
|
#print(history_sheet)
|
||||||
|
|
||||||
|
stats_output = stats[['Start','End','Return [%]', 'Buy & Hold Return [%]', 'Return (Ann.) [%]',\
|
||||||
|
'Duration','Volatility (Ann.) [%]','Sharpe Ratio', 'Sortino Ratio', 'Calmar Ratio',\
|
||||||
|
'Max. Drawdown [%]', 'Avg. Drawdown [%]', 'Max. Drawdown Duration','Avg. Drawdown Duration',\
|
||||||
|
'# Trades', 'Win Rate [%]','Best Trade [%]','Worst Trade [%]','Avg. Trade [%]',\
|
||||||
|
'Max. Trade Duration','Avg. Trade Duration','Profit Factor', 'Expectancy [%]','SQN']]
|
||||||
|
|
||||||
|
stats_output = stats_output.to_dict()
|
||||||
|
|
||||||
|
stats_output['Start'] = stats_output['Start'].strftime("%Y-%m-%d")
|
||||||
|
stats_output['End'] = stats_output['End'].strftime("%Y-%m-%d")
|
||||||
|
stats_output['Duration'] = str(stats_output['Duration']).replace(' days 00:00:00', '')
|
||||||
|
stats_output['Avg. Trade Duration'] = str(stats_output['Avg. Trade Duration']).replace(' days 00:00:00', '')
|
||||||
|
stats_output['Avg. Drawdown Duration'] = str(stats_output['Avg. Drawdown Duration']).replace(' days 00:00:00', '')
|
||||||
|
|
||||||
|
stats_output['Max. Drawdown Duration'] = str(stats_output['Max. Drawdown Duration']).replace(' days 00:00:00', '')
|
||||||
|
stats_output['Max. Trade Duration'] = str(stats_output['Max. Trade Duration']).replace(' days 00:00:00', '')
|
||||||
|
|
||||||
|
stats_output['nextSignal'] = self.next_pred()
|
||||||
|
#print(history_sheet)
|
||||||
|
|
||||||
|
output_history_sheet = []
|
||||||
|
|
||||||
|
for i in range(len(history_sheet)):
|
||||||
|
output_history_sheet.append(
|
||||||
|
{'time': history_sheet['EntryTime'][i].strftime("%Y-%m-%d"),
|
||||||
|
'position': 'belowBar',
|
||||||
|
'color': '#59B0F6',
|
||||||
|
'shape': 'arrowUp',
|
||||||
|
#'text': 'Buy',
|
||||||
|
'size': 2.0,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
output_history_sheet.append(
|
||||||
|
{'time': history_sheet['ExitTime'][i].strftime("%Y-%m-%d"),
|
||||||
|
'position': 'aboveBar',
|
||||||
|
'color': '#E91E63',
|
||||||
|
'shape': 'arrowDown',
|
||||||
|
#'text': 'Sell',
|
||||||
|
'size': 2.0,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
return [ stats_output , output_history_sheet]
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def create_column(con):
|
||||||
|
"""
|
||||||
|
Create the 'tradingSignals' column if it doesn't exist in the 'stocks' table.
|
||||||
|
"""
|
||||||
|
query_check = f"PRAGMA table_info({table_name})"
|
||||||
|
cursor = con.execute(query_check)
|
||||||
|
columns = [col[1] for col in cursor.fetchall()]
|
||||||
|
|
||||||
|
if 'tradingSignals' not in columns:
|
||||||
|
query = f"ALTER TABLE {table_name} ADD COLUMN tradingSignals TEXT"
|
||||||
|
con.execute(query)
|
||||||
|
con.commit()
|
||||||
|
'''
|
||||||
|
if 'ai_signal' not in columns:
|
||||||
|
query = f"ALTER TABLE {table_name} ADD COLUMN ai_signal TEXT"
|
||||||
|
con.execute(query)
|
||||||
|
con.commit()
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
def update_database(res, symbol, con):
|
||||||
|
query = f"UPDATE {table_name} SET tradingSignals = ? WHERE symbol = ?"
|
||||||
|
res_json = json.dumps(res) # Convert the pred dictionary to JSON string
|
||||||
|
con.execute(query, (res_json, symbol))
|
||||||
|
"""
|
||||||
|
query = f"UPDATE {table_name} SET ai_signal = ? WHERE symbol = ?"
|
||||||
|
if res[0]['nextSignal'] == 'Sell':
|
||||||
|
signal = 0
|
||||||
|
elif res[0]['nextSignal'] == 'Hold':
|
||||||
|
signal = 1
|
||||||
|
elif res[0]['nextSignal'] == 'Buy':
|
||||||
|
signal = 2
|
||||||
|
else:
|
||||||
|
signal = -1
|
||||||
|
con.execute(query, (signal, symbol))
|
||||||
|
"""
|
||||||
|
con.commit()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def process_symbol(ticker):
|
||||||
|
try:
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
date, open, high, low, close
|
||||||
|
FROM
|
||||||
|
"{ticker}"
|
||||||
|
WHERE
|
||||||
|
date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
query = query_template.format(ticker=ticker)
|
||||||
|
df = pd.read_sql_query(query, con, params=(start_date, end_date))
|
||||||
|
|
||||||
|
|
||||||
|
if not df.empty:
|
||||||
|
df = df.rename(columns={"open": "Open", "high": "High", "low": "Low", "close": "Close"})
|
||||||
|
|
||||||
|
df['date'] = pd.to_datetime(df['date'])
|
||||||
|
df = df.set_index('date')
|
||||||
|
res = TradingSignals(df).run()
|
||||||
|
else:
|
||||||
|
res = []
|
||||||
|
|
||||||
|
create_column(con)
|
||||||
|
update_database(res, ticker, con)
|
||||||
|
|
||||||
|
except:
|
||||||
|
print(f"Failed create trading signals for {ticker}")
|
||||||
|
|
||||||
|
|
||||||
|
args = parse_args()
|
||||||
|
db_name = args.db
|
||||||
|
table_name = args.table
|
||||||
|
con = sqlite3.connect(f'backup_db/{db_name}.db')
|
||||||
|
|
||||||
|
symbol_query = f"SELECT DISTINCT symbol FROM {table_name}"
|
||||||
|
symbol_cursor = con.execute(symbol_query)
|
||||||
|
symbols = [symbol[0] for symbol in symbol_cursor.fetchall()]
|
||||||
|
|
||||||
|
start_date = datetime(1970, 1, 1)
|
||||||
|
end_date = datetime.today()
|
||||||
|
|
||||||
|
# Number of concurrent workers
|
||||||
|
num_processes = 4 # You can adjust this based on your system's capabilities
|
||||||
|
futures = []
|
||||||
|
|
||||||
|
with concurrent.futures.ProcessPoolExecutor(max_workers=num_processes) as executor:
|
||||||
|
for symbol in symbols:
|
||||||
|
futures.append(executor.submit(process_symbol, symbol))
|
||||||
|
|
||||||
|
# Use tqdm to wrap around the futures for progress tracking
|
||||||
|
for future in tqdm(concurrent.futures.as_completed(futures), total=len(symbols), desc="Processing"):
|
||||||
|
pass
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
|
||||||
|
#==============Test mode================
|
||||||
|
'''
|
||||||
|
ticker = 'AAPL'
|
||||||
|
query_template = """
|
||||||
|
SELECT
|
||||||
|
date, open, high, low, close
|
||||||
|
FROM
|
||||||
|
{ticker}
|
||||||
|
WHERE
|
||||||
|
date BETWEEN ? AND ?
|
||||||
|
"""
|
||||||
|
|
||||||
|
start_date = datetime(2019, 1, 1)
|
||||||
|
end_date = datetime.today()
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
query = query_template.format(ticker=ticker)
|
||||||
|
df = pd.read_sql_query(query, con, params=(start_date, end_date))
|
||||||
|
con.close()
|
||||||
|
|
||||||
|
df = df.rename(columns={"open": "Open", "high": "High", "low": "Low", "close": "Close"})
|
||||||
|
|
||||||
|
df['date'] = pd.to_datetime(df['date'])
|
||||||
|
df = df.set_index('date')
|
||||||
|
|
||||||
|
|
||||||
|
res = TradingSignals(df).run()
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
fig, ax = plt.subplots(figsize=(14,8))
|
||||||
|
ax.plot(df['Close'] , label = ticker ,linewidth=0.5, color='blue', alpha = 0.9)
|
||||||
|
#ax.plot(df['sm_5'], label = 'SMA10', alpha = 0.85)
|
||||||
|
#ax.plot(df['sm_20'], label = 'SMA50' , alpha = 0.85)
|
||||||
|
ax.scatter(history_sheet['EntryTime'] , history_sheet['EntryPrice'] , label = 'Buy' , marker = '^', color = 'green',alpha =1, s=100)
|
||||||
|
ax.scatter(history_sheet['ExitTime'] , history_sheet['ExitPrice'] , label = 'Sell' , marker = 'v', color = 'red',alpha =1, s=100)
|
||||||
|
ax.set_xlabel(f'{start_date} - {end_date}' ,fontsize=18)
|
||||||
|
ax.set_ylabel('Close Price INR (₨)' , fontsize=18)
|
||||||
|
legend = ax.legend()
|
||||||
|
ax.grid()
|
||||||
|
plt.tight_layout()
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
'''
|
||||||
232
app/twitter.py
Normal file
232
app/twitter.py
Normal file
@ -0,0 +1,232 @@
|
|||||||
|
from dotenv import load_dotenv
|
||||||
|
import os
|
||||||
|
import tweepy
|
||||||
|
from requests_oauthlib import OAuth1Session
|
||||||
|
from benzinga import financial_data
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
from collections import defaultdict
|
||||||
|
import requests
|
||||||
|
import json
|
||||||
|
import ujson
|
||||||
|
import sqlite3
|
||||||
|
load_dotenv()
|
||||||
|
|
||||||
|
api_key = os.getenv('BENZINGA_API_KEY')
|
||||||
|
fin = financial_data.Benzinga(api_key)
|
||||||
|
|
||||||
|
consumer_key = os.getenv('TWITTER_API_KEY')
|
||||||
|
consumer_secret = os.getenv('TWITTER_API_SECRET')
|
||||||
|
access_token = os.getenv('TWITTER_ACCESS_TOKEN')
|
||||||
|
access_token_secret = os.getenv('TWITTER_ACCESS_TOKEN_SECRET')
|
||||||
|
|
||||||
|
con = sqlite3.connect('stocks.db')
|
||||||
|
cursor = con.cursor()
|
||||||
|
cursor.execute("PRAGMA journal_mode = wal")
|
||||||
|
cursor.execute("SELECT DISTINCT symbol FROM stocks")
|
||||||
|
stock_symbols = [row[0] for row in cursor.fetchall()]
|
||||||
|
|
||||||
|
|
||||||
|
def send_tweet(message):
|
||||||
|
# Be sure to add replace the text of the with the text you wish to Tweet. You can also add parameters to post polls, quote Tweets, Tweet with reply settings, and Tweet to Super Followers in addition to other features.
|
||||||
|
payload = {"text": message}
|
||||||
|
|
||||||
|
# Make the request
|
||||||
|
oauth = OAuth1Session(
|
||||||
|
consumer_key,
|
||||||
|
client_secret=consumer_secret,
|
||||||
|
resource_owner_key=access_token,
|
||||||
|
resource_owner_secret=access_token_secret,
|
||||||
|
)
|
||||||
|
|
||||||
|
# Making the request
|
||||||
|
response = oauth.post(
|
||||||
|
"https://api.twitter.com/2/tweets",
|
||||||
|
json=payload,
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code != 201:
|
||||||
|
raise Exception(
|
||||||
|
"Request returned an error: {} {}".format(response.status_code, response.text)
|
||||||
|
)
|
||||||
|
|
||||||
|
print("Response code: {}".format(response.status_code))
|
||||||
|
|
||||||
|
# Saving the response as JSON
|
||||||
|
json_response = response.json()
|
||||||
|
print(json.dumps(json_response, indent=4, sort_keys=True))
|
||||||
|
|
||||||
|
|
||||||
|
def get_news():
|
||||||
|
start_date = datetime.today().strftime('%Y-%m-%d')
|
||||||
|
end_date = start_date
|
||||||
|
url = "https://api.benzinga.com/api/v2/news"
|
||||||
|
querystring = {"token":api_key,"channels":"WIIM","dateFrom":start_date,"dateTo":end_date}
|
||||||
|
headers = {"accept": "application/json"}
|
||||||
|
response = requests.request("GET", url, headers=headers, params=querystring)
|
||||||
|
data = ujson.loads(response.text)
|
||||||
|
|
||||||
|
res_list = []
|
||||||
|
for item in data:
|
||||||
|
title = item['title']
|
||||||
|
stock_names = ' '.join(['$' + stock['name'] for stock in item['stocks']])
|
||||||
|
message = '{} {}'.format(stock_names, title)
|
||||||
|
send_tweet(message)
|
||||||
|
print(message)
|
||||||
|
|
||||||
|
def get_analyst_ratings():
|
||||||
|
url = "https://api.benzinga.com/api/v2.1/calendar/ratings"
|
||||||
|
querystring = {"token":api_key,"parameters[date_from]":"2024-04-16","parameters[date_to]":"2024-04-16"}
|
||||||
|
headers = {"accept": "application/json"}
|
||||||
|
response = requests.request("GET", url, headers=headers, params=querystring)
|
||||||
|
data = ujson.loads(response.text)['ratings']
|
||||||
|
|
||||||
|
for item in data:
|
||||||
|
symbol = item['ticker']
|
||||||
|
try:
|
||||||
|
item['adjusted_pt_current'] = round(float(item['adjusted_pt_current']))
|
||||||
|
item['adjusted_pt_prior'] = round(float(item['adjusted_pt_prior']))
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
if item['rating_current'] == 'Strong Sell' or item['rating_current'] == 'Strong Buy':
|
||||||
|
pass
|
||||||
|
elif item['rating_current'] == 'Neutral':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_current'] == 'Equal-Weight' or item['rating_current'] == 'Sector Weight' or item['rating_current'] == 'Sector Perform':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_current'] == 'In-Line':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_current'] == 'Outperform' and item['action_company'] == 'Downgrades':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_current'] == 'Negative':
|
||||||
|
item['rating_current'] = 'Sell'
|
||||||
|
elif (item['rating_current'] == 'Outperform' or item['rating_current'] == 'Overweight') and (item['action_company'] == 'Reiterates' or item['action_company'] == 'Initiates Coverage On'):
|
||||||
|
item['rating_current'] = 'Buy'
|
||||||
|
item['action_comapny'] = 'Initiates'
|
||||||
|
elif item['rating_current'] == 'Market Outperform' and (item['action_company'] == 'Maintains' or item['action_company'] == 'Reiterates'):
|
||||||
|
item['rating_current'] = 'Buy'
|
||||||
|
elif item['rating_current'] == 'Outperform' and (item['action_company'] == 'Maintains' or item['action_pt'] == 'Announces' or item['action_company'] == 'Upgrades'):
|
||||||
|
item['rating_current'] = 'Buy'
|
||||||
|
elif item['rating_current'] == 'Buy' and (item['action_company'] == 'Raises' or item['action_pt'] == 'Raises'):
|
||||||
|
item['rating_current'] = 'Strong Buy'
|
||||||
|
elif item['rating_current'] == 'Overweight' and (item['action_company'] == 'Maintains' or item['action_company'] == 'Upgrades' or item['action_company'] == 'Reiterates' or item['action_pt'] == 'Raises'):
|
||||||
|
item['rating_current'] = 'Buy'
|
||||||
|
elif item['rating_current'] == 'Positive' or item['rating_current'] == 'Sector Outperform':
|
||||||
|
item['rating_current'] = 'Buy'
|
||||||
|
elif item['rating_current'] == 'Underperform' or item['rating_current'] == 'Underweight':
|
||||||
|
item['rating_current'] = 'Sell'
|
||||||
|
elif item['rating_current'] == 'Reduce' and (item['action_company'] == 'Downgrades' or item['action_pt'] == 'Lowers'):
|
||||||
|
item['rating_current'] = 'Sell'
|
||||||
|
elif item['rating_current'] == 'Sell' and item['action_pt'] == 'Announces':
|
||||||
|
item['rating_current'] = 'Strong Sell'
|
||||||
|
elif item['rating_current'] == 'Market Perform':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_prior'] == 'Outperform' and item['action_company'] == 'Downgrades':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_current'] == 'Peer Perform' and item['rating_prior'] == 'Peer Perfrom':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
elif item['rating_current'] == 'Peer Perform' and item['action_pt'] == 'Announces':
|
||||||
|
item['rating_current'] = 'Hold'
|
||||||
|
item['action_comapny'] = 'Initiates'
|
||||||
|
if symbol in stock_symbols:
|
||||||
|
message = f"{item['action_company']} {item['rating_current']} rating on ${item['ticker']} from ${item['adjusted_pt_prior']} to ${item['adjusted_pt_current']} new Price Target from {item['analyst']}."
|
||||||
|
#print(message)
|
||||||
|
send_tweet(message)
|
||||||
|
|
||||||
|
|
||||||
|
def get_analyst_insight():
|
||||||
|
url = "https://api.benzinga.com/api/v1/analyst/insights"
|
||||||
|
querystring = {"token": api_key}
|
||||||
|
response = requests.request("GET", url, params=querystring)
|
||||||
|
data = ujson.loads(response.text)['analyst-insights']
|
||||||
|
#print(data)
|
||||||
|
for item in data:
|
||||||
|
try:
|
||||||
|
symbol = item['security']['symbol']
|
||||||
|
with open(f"json/trend-analysis/{symbol}.json", 'r') as file:
|
||||||
|
ai_model = ujson.load(file)
|
||||||
|
for i in ai_model:
|
||||||
|
if i['label']== 'threeMonth':
|
||||||
|
sentiment = i['sentiment']
|
||||||
|
accuracy = i['accuracy']
|
||||||
|
if symbol in stock_symbols:
|
||||||
|
#tweet = f"{item['action']} {item['rating']} rating on ${item['security']['symbol']} with ${item['pt']} Price Target from {item['firm']}. \
|
||||||
|
#\nOur own AI Model predicts a {sentiment} Trend for the next 3 months with an accuracy of {accuracy}%."
|
||||||
|
message = f"{item['action']} {item['rating']} rating on ${item['security']['symbol']} with ${item['pt']} Price Target from {item['firm']}."
|
||||||
|
print(message)
|
||||||
|
#send_tweet(message)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_biggest_options_activity():
|
||||||
|
# Initialize dictionaries to store cumulative sums and counts
|
||||||
|
call_volume_sum = defaultdict(int)
|
||||||
|
put_volume_sum = defaultdict(int)
|
||||||
|
volume_sum = defaultdict(int)
|
||||||
|
open_interest_sum = defaultdict(int)
|
||||||
|
price_sum = defaultdict(float)
|
||||||
|
cost_basis_sum = defaultdict(float)
|
||||||
|
call_count = defaultdict(int)
|
||||||
|
put_count = defaultdict(int)
|
||||||
|
|
||||||
|
try:
|
||||||
|
end_date = datetime.today()
|
||||||
|
start_date = (end_date -timedelta(10))
|
||||||
|
start_date_str = start_date.strftime('%Y-%m-%d')
|
||||||
|
end_date_str = end_date.strftime('%Y-%m-%d')
|
||||||
|
res_list = []
|
||||||
|
for page in range(0,50):
|
||||||
|
try:
|
||||||
|
data = fin.options_activity(date_from=start_date_str, date_to=end_date_str, page=0, pagesize=1000)
|
||||||
|
data = ujson.loads(fin.output(data))['option_activity']
|
||||||
|
res_list +=data
|
||||||
|
except:
|
||||||
|
break
|
||||||
|
|
||||||
|
#filtered_data = [{key: value for key, value in item.items() if key in ['ticker','cost_basis','date_expiration','open_interest','price', 'put_call','strike_price', 'volume']} for item in res_list]
|
||||||
|
# Iterate through the data
|
||||||
|
for item in res_list:
|
||||||
|
ticker = item['ticker']
|
||||||
|
if item['put_call'] == 'CALL':
|
||||||
|
call_volume_sum[ticker] += int(item['volume'])
|
||||||
|
call_count[ticker] += 1
|
||||||
|
elif item['put_call'] == 'PUT':
|
||||||
|
put_volume_sum[ticker] += int(item['volume'])
|
||||||
|
put_count[ticker] += 1
|
||||||
|
volume_sum[ticker] += int(item['volume'])
|
||||||
|
#open_interest_sum[ticker] += int(item['open_interest'])
|
||||||
|
#price_sum[ticker] += float(item['price'])
|
||||||
|
#cost_basis_sum[ticker] += float(item['cost_basis'])
|
||||||
|
|
||||||
|
sorted_volume = sorted(volume_sum.items(), key=lambda x: x[1], reverse=True)
|
||||||
|
output = []
|
||||||
|
for i, (ticker, volume) in enumerate(sorted_volume[:3], 1):
|
||||||
|
flow_sentiment = 'Neutral'
|
||||||
|
if put_volume_sum[ticker] > call_volume_sum[ticker]:
|
||||||
|
flow_sentiment = 'Bearish'
|
||||||
|
elif put_volume_sum[ticker] < call_volume_sum[ticker]:
|
||||||
|
flow_sentiment = 'Bullish'
|
||||||
|
|
||||||
|
output.append(f"{i}) ${ticker}\n \
|
||||||
|
- Call Flow: {call_volume_sum[ticker]:,}\n \
|
||||||
|
- Put Flow: {put_volume_sum[ticker]:,}\n \
|
||||||
|
- Put/Call Ratio: {round(put_volume_sum[ticker]/call_volume_sum[ticker],2)}\n \
|
||||||
|
- Flow Sentiment: {flow_sentiment}")
|
||||||
|
|
||||||
|
message = f"Market Recap: Top 3 Highest Options Activity from this Week\n\
|
||||||
|
{output[0]}\n\
|
||||||
|
{output[1]}\n\
|
||||||
|
{output[2]}"
|
||||||
|
print(message)
|
||||||
|
#send_tweet(message)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(e)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
get_news()
|
||||||
|
#get_analyst_insight()
|
||||||
|
#get_analyst_ratings()
|
||||||
|
#get_biggest_options_activity()
|
||||||
24
fastify/all-strategies/server.js
Normal file
24
fastify/all-strategies/server.js
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/all-strategies', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
const userId = data?.userId;
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
output = await pb.collection("stockscreener").getFullList({
|
||||||
|
filter: `user="${userId}"`,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
output = [];
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
27
fastify/all-watchlists/server.js
Normal file
27
fastify/all-watchlists/server.js
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/all-watchlists', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
const userId = data?.userId;
|
||||||
|
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
output = await pb.collection("watchlist").getFullList({
|
||||||
|
filter: `user="${userId}"`
|
||||||
|
})
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
//console.log(e)
|
||||||
|
output = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
483
fastify/app.js
Normal file
483
fastify/app.js
Normal file
@ -0,0 +1,483 @@
|
|||||||
|
|
||||||
|
let serverRunning = false;
|
||||||
|
|
||||||
|
const fastify = require('fastify')({})
|
||||||
|
const cors = require('@fastify/cors');
|
||||||
|
//Load API KEYS
|
||||||
|
require('dotenv').config({ path: '../app/.env' });
|
||||||
|
const fmpAPIKey = process.env.FMP_API_KEY;
|
||||||
|
const mixpanelAPIKey = process.env.MIXPANEL_API_KEY;
|
||||||
|
|
||||||
|
const Mixpanel = require('mixpanel');
|
||||||
|
const UAParser = require('ua-parser-js');
|
||||||
|
|
||||||
|
const got = require('got'); //Only version npm i got@11.8.3 works with ESM
|
||||||
|
const cheerio = require('cheerio');
|
||||||
|
const sharp = require('sharp');
|
||||||
|
const axios = require('axios');
|
||||||
|
const fs = require('fs');
|
||||||
|
const path = require('path');
|
||||||
|
const pino = require('pino');
|
||||||
|
|
||||||
|
const mixpanel = Mixpanel.init(mixpanelAPIKey, { debug: false });
|
||||||
|
|
||||||
|
|
||||||
|
const PocketBase = require('pocketbase/cjs')
|
||||||
|
const pb = new PocketBase('http://127.0.0.1:8090');
|
||||||
|
|
||||||
|
// globally disable auto cancellation
|
||||||
|
//See https://github.com/pocketbase/js-sdk#auto-cancellation
|
||||||
|
//Bug happens that get-post gives an error of auto-cancellation. Hence set it to false;
|
||||||
|
pb.autoCancellation(false);
|
||||||
|
|
||||||
|
const { serialize } = require('object-to-formdata');
|
||||||
|
|
||||||
|
// Register the CORS plugin
|
||||||
|
//Add Cors so that only localhost and my stocknear.com can send acceptable requests
|
||||||
|
fastify.register(cors);
|
||||||
|
const corsMiddleware = (request, reply, done) => {
|
||||||
|
const allowedOrigins = ['http://localhost:4173','http://127.0.0.1:4173','http://localhost:5173', 'http://127.0.0.1:5173', 'https://stocknear.com','https://www.stocknear.com', 'http://stocknear.com', 'http://www.stocknear.com'];
|
||||||
|
|
||||||
|
const origin = request?.headers?.origin;
|
||||||
|
if (!origin || allowedOrigins?.includes(origin)) {
|
||||||
|
reply.header('Access-Control-Allow-Origin', origin || '*');
|
||||||
|
reply.header('Access-Control-Allow-Methods', 'GET,POST');
|
||||||
|
reply.header('Access-Control-Allow-Headers', 'Content-Type');
|
||||||
|
done();
|
||||||
|
} else {
|
||||||
|
reply.code(403).send({ error: 'Forbidden' });
|
||||||
|
}
|
||||||
|
};
|
||||||
|
fastify.addHook('onRequest', corsMiddleware);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
fastify.register(require('./mixpanel/server'), { mixpanel, UAParser });
|
||||||
|
fastify.register(require('./get-user-stats/server'), { pb });
|
||||||
|
fastify.register(require('./get-community-stats/server'), { pb });
|
||||||
|
fastify.register(require('./get-moderators/server'), { pb });
|
||||||
|
fastify.register(require('./get-user-data/server'), { pb });
|
||||||
|
fastify.register(require('./get-all-comments/server'), { pb });
|
||||||
|
fastify.register(require('./get-post/server'), { pb });
|
||||||
|
fastify.register(require('./get-one-post/server'), { pb });
|
||||||
|
fastify.register(require('./update-watchlist/server'), { pb, serialize });
|
||||||
|
fastify.register(require('./get-portfolio-data/server'), { pb });
|
||||||
|
fastify.register(require('./create-portfolio/server'), { pb, serialize });
|
||||||
|
fastify.register(require('./buy-stock/server'), { pb });
|
||||||
|
fastify.register(require('./sell-stock/server'), { pb });
|
||||||
|
fastify.register(require('./create-post-link/server'), { got,cheerio,sharp });
|
||||||
|
fastify.register(require('./create-post-image/server'), { sharp });
|
||||||
|
fastify.register(require('./delete-comment/server'), { pb });
|
||||||
|
fastify.register(require('./delete-post/server'), { pb });
|
||||||
|
fastify.register(require('./leaderboard/server'), { pb });
|
||||||
|
fastify.register(require('./feedback/server'), { pb });
|
||||||
|
fastify.register(require('./create-watchlist/server'), { pb });
|
||||||
|
fastify.register(require('./delete-watchlist/server'), { pb });
|
||||||
|
fastify.register(require('./edit-name-watchlist/server'), { pb });
|
||||||
|
fastify.register(require('./all-watchlists/server'), { pb });
|
||||||
|
fastify.register(require('./get-notifications/server'), { pb });
|
||||||
|
fastify.register(require('./update-notifications/server'), { pb });
|
||||||
|
fastify.register(require('./create-strategy/server'), { pb });
|
||||||
|
fastify.register(require('./delete-strategy/server'), { pb });
|
||||||
|
fastify.register(require('./all-strategies/server'), { pb });
|
||||||
|
fastify.register(require('./save-strategy/server'), { pb });
|
||||||
|
fastify.register(require('./get-strategy/server'), { pb });
|
||||||
|
fastify.register(require('./get-twitch-status/server'), { axios });
|
||||||
|
fastify.register(require('./get-portfolio/server'), { pb });
|
||||||
|
fastify.register(require('./create-price-alert/server'), { pb });
|
||||||
|
fastify.register(require('./get-price-alert/server'), { pb, fs, path });
|
||||||
|
fastify.register(require('./delete-price-alert/server'), { pb });
|
||||||
|
fastify.register(require('./upvote/server'), { pb });
|
||||||
|
fastify.register(require('./downvote/server'), { pb });
|
||||||
|
fastify.register(require('./upvote-comment/server'), { pb });
|
||||||
|
fastify.register(require('./downvote-comment/server'), { pb });
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
//fastify.register(require('./create-comment/server'), { pb });
|
||||||
|
|
||||||
|
|
||||||
|
function wait(ms){
|
||||||
|
var start = new Date().getTime();
|
||||||
|
var end = start;
|
||||||
|
while(end < start + ms) {
|
||||||
|
end = new Date().getTime();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fastify.register(require('@fastify/websocket'))
|
||||||
|
|
||||||
|
const WebSocket = require('ws');
|
||||||
|
|
||||||
|
let isSend = false;
|
||||||
|
let sendInterval;
|
||||||
|
|
||||||
|
fastify.register(async function (fastify) {
|
||||||
|
fastify.get('/realtime-data', { websocket: true }, (connection, req) => {
|
||||||
|
// Send a welcome message to the client
|
||||||
|
|
||||||
|
//connection.socket.send('hi from server');
|
||||||
|
|
||||||
|
// Listen for incoming messages from the client
|
||||||
|
connection.socket.on('message', (message) => {
|
||||||
|
symbol = message.toString('utf-8');
|
||||||
|
console.log('Received message from client:', symbol);
|
||||||
|
|
||||||
|
// If you want to dynamically update the subscription based on client's message
|
||||||
|
updateSubscription();
|
||||||
|
});
|
||||||
|
|
||||||
|
//======================
|
||||||
|
const login = {
|
||||||
|
'event': 'login',
|
||||||
|
'data': {
|
||||||
|
'apiKey': fmpAPIKey,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const subscribe = {
|
||||||
|
'event': 'subscribe',
|
||||||
|
'data': {
|
||||||
|
'ticker': '', // Initial value; will be updated dynamically
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateSubscription() {
|
||||||
|
subscribe.data.ticker = symbol;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Create a new WebSocket instance for your backend
|
||||||
|
const ws = new WebSocket('wss://websockets.financialmodelingprep.com');
|
||||||
|
|
||||||
|
// Handle WebSocket connection open
|
||||||
|
|
||||||
|
ws.on('open', function open() {
|
||||||
|
ws.send(JSON.stringify(login));
|
||||||
|
wait(2000); //2 seconds in milliseconds
|
||||||
|
ws.send(JSON.stringify(subscribe));
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle WebSocket errors
|
||||||
|
ws.on('error', function (error) {
|
||||||
|
console.error('WebSocket error:', error);
|
||||||
|
// Handle the error gracefully, you might want to notify the client or log it.
|
||||||
|
// For now, let's close the connection if an error occurs
|
||||||
|
connection.socket.close();
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Handle WebSocket messages
|
||||||
|
ws.on('message', function (data, flags) {
|
||||||
|
const stringData = data.toString('utf-8');
|
||||||
|
|
||||||
|
if (connection.socket.readyState === WebSocket.OPEN && !isSend) {
|
||||||
|
connection.socket.send(stringData);
|
||||||
|
//console.log(stringData)
|
||||||
|
isSend = true
|
||||||
|
setTimeout(() => {
|
||||||
|
isSend = false}, 1000);
|
||||||
|
|
||||||
|
//wait(2000);
|
||||||
|
}
|
||||||
|
//wait(2000);
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
//======================
|
||||||
|
|
||||||
|
// Handle client disconnect
|
||||||
|
connection.socket.on('close', () => {
|
||||||
|
console.log('Client disconnected');
|
||||||
|
connection?.socket?.close();
|
||||||
|
// Check if the WebSocket is open before trying to close it
|
||||||
|
if (ws.readyState === WebSocket.OPEN) {
|
||||||
|
try {
|
||||||
|
ws.close();
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Error while closing WebSocket:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
fastify.register(async function (fastify) {
|
||||||
|
fastify.get('/realtime-crypto-data', { websocket: true }, (connection, req) => {
|
||||||
|
// Send a welcome message to the client
|
||||||
|
|
||||||
|
//connection.socket.send('hi from server');
|
||||||
|
|
||||||
|
// Listen for incoming messages from the client
|
||||||
|
connection.socket.on('message', (message) => {
|
||||||
|
symbol = message.toString('utf-8');
|
||||||
|
console.log('Received message from client:', symbol);
|
||||||
|
|
||||||
|
// If you want to dynamically update the subscription based on client's message
|
||||||
|
updateSubscription();
|
||||||
|
});
|
||||||
|
|
||||||
|
//======================
|
||||||
|
const login = {
|
||||||
|
'event': 'login',
|
||||||
|
'data': {
|
||||||
|
'apiKey': fmpAPIKey,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const subscribe = {
|
||||||
|
'event': 'subscribe',
|
||||||
|
'data': {
|
||||||
|
'ticker': '', // Initial value; will be updated dynamically
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function updateSubscription() {
|
||||||
|
subscribe.data.ticker = symbol;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Create a new WebSocket instance for your backend
|
||||||
|
const ws = new WebSocket('wss://crypto.financialmodelingprep.com');
|
||||||
|
|
||||||
|
// Handle WebSocket connection open
|
||||||
|
|
||||||
|
ws.on('open', function open() {
|
||||||
|
ws.send(JSON.stringify(login));
|
||||||
|
wait(2000); //2 seconds in milliseconds
|
||||||
|
ws.send(JSON.stringify(subscribe));
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle WebSocket errors
|
||||||
|
ws.on('error', function (error) {
|
||||||
|
console.error('WebSocket error:', error);
|
||||||
|
// Handle the error gracefully, you might want to notify the client or log it.
|
||||||
|
// For now, let's close the connection if an error occurs
|
||||||
|
connection.socket.close();
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Handle WebSocket messages
|
||||||
|
ws.on('message', function (data, flags) {
|
||||||
|
const stringData = data.toString('utf-8');
|
||||||
|
|
||||||
|
if (connection.socket.readyState === WebSocket.OPEN && !isSend) {
|
||||||
|
connection.socket.send(stringData);
|
||||||
|
//console.log(stringData)
|
||||||
|
isSend = true
|
||||||
|
setTimeout(() => {
|
||||||
|
isSend = false}, 1000);
|
||||||
|
|
||||||
|
//wait(2000);
|
||||||
|
}
|
||||||
|
//wait(2000);
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
//======================
|
||||||
|
|
||||||
|
// Handle client disconnect
|
||||||
|
connection.socket.on('close', () => {
|
||||||
|
console.log('Client disconnected');
|
||||||
|
connection?.socket?.close();
|
||||||
|
// Check if the WebSocket is open before trying to close it
|
||||||
|
if (ws.readyState === WebSocket.OPEN) {
|
||||||
|
try {
|
||||||
|
ws.close();
|
||||||
|
} catch (e) {
|
||||||
|
console.error('Error while closing WebSocket:', e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
fastify.register(async function (fastify) {
|
||||||
|
fastify.get('/options-flow-reader', { websocket: true }, (connection, req) => {
|
||||||
|
let jsonData;
|
||||||
|
let sendInterval;
|
||||||
|
|
||||||
|
// Function to send data to the client
|
||||||
|
const sendData = async () => {
|
||||||
|
const filePath = path.join(__dirname, '../app/json/options-flow/feed/data.json');
|
||||||
|
try {
|
||||||
|
if (fs.existsSync(filePath)) {
|
||||||
|
const fileData = fs.readFileSync(filePath, 'utf8');
|
||||||
|
jsonData = JSON.parse(fileData);
|
||||||
|
connection.socket.send(JSON.stringify(jsonData));
|
||||||
|
} else {
|
||||||
|
console.error('File not found:', filePath);
|
||||||
|
clearInterval(sendInterval);
|
||||||
|
connection?.socket?.close();
|
||||||
|
console.error('Connection closed')
|
||||||
|
throw new Error('This is an intentional uncaught exception!');
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Error sending data to client:', err);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Send data to the client initially
|
||||||
|
sendData();
|
||||||
|
|
||||||
|
// Start sending data periodically
|
||||||
|
sendInterval = setInterval(sendData, 5000);
|
||||||
|
|
||||||
|
// Handle client disconnect
|
||||||
|
connection.socket.on('close', () => {
|
||||||
|
console.log('Client disconnected');
|
||||||
|
clearInterval(sendInterval);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle server crash cleanup
|
||||||
|
const closeHandler = () => {
|
||||||
|
console.log('Server is closing. Cleaning up resources...');
|
||||||
|
clearInterval(sendInterval);
|
||||||
|
connection.socket.close();
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add close handler to process event
|
||||||
|
process.on('exit', closeHandler);
|
||||||
|
process.on('SIGINT', closeHandler);
|
||||||
|
process.on('SIGTERM', closeHandler);
|
||||||
|
process.on('uncaughtException', closeHandler);
|
||||||
|
process.on('unhandledRejection', closeHandler);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
fastify.register(async function (fastify) {
|
||||||
|
fastify.get('/options-zero-dte-reader', { websocket: true }, (connection, req) => {
|
||||||
|
let jsonData;
|
||||||
|
let sendInterval;
|
||||||
|
|
||||||
|
// Function to send data to the client
|
||||||
|
const sendData = async () => {
|
||||||
|
const filePath = path.join(__dirname, '../app/json/options-flow/zero-dte/data.json');
|
||||||
|
try {
|
||||||
|
if (fs.existsSync(filePath)) {
|
||||||
|
const fileData = fs.readFileSync(filePath, 'utf8');
|
||||||
|
jsonData = JSON.parse(fileData);
|
||||||
|
connection.socket.send(JSON.stringify(jsonData));
|
||||||
|
} else {
|
||||||
|
console.error('File not found:', filePath);
|
||||||
|
clearInterval(sendInterval);
|
||||||
|
console.error('Connection closed')
|
||||||
|
throw new Error('This is an intentional uncaught exception!');
|
||||||
|
}
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Error sending data to client:', err);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Send data to the client initially
|
||||||
|
sendData();
|
||||||
|
|
||||||
|
// Start sending data periodically
|
||||||
|
sendInterval = setInterval(sendData, 5000);
|
||||||
|
|
||||||
|
// Handle client disconnect
|
||||||
|
connection.socket.on('close', () => {
|
||||||
|
console.log('Client disconnected');
|
||||||
|
connection?.socket?.close();
|
||||||
|
clearInterval(sendInterval);
|
||||||
|
});
|
||||||
|
|
||||||
|
// Handle server crash cleanup
|
||||||
|
const closeHandler = () => {
|
||||||
|
console.log('Server is closing. Cleaning up resources...');
|
||||||
|
clearInterval(sendInterval);
|
||||||
|
connection?.socket?.close();
|
||||||
|
};
|
||||||
|
|
||||||
|
// Add close handler to process event
|
||||||
|
process.on('exit', closeHandler);
|
||||||
|
process.on('SIGINT', closeHandler);
|
||||||
|
process.on('SIGTERM', closeHandler);
|
||||||
|
process.on('uncaughtException', closeHandler);
|
||||||
|
process.on('unhandledRejection', closeHandler);
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Function to start the server
|
||||||
|
function startServer() {
|
||||||
|
if (!serverRunning) {
|
||||||
|
fastify.listen(2000, (err) => {
|
||||||
|
if (err) {
|
||||||
|
console.error('Error starting server:', err);
|
||||||
|
process.exit(1); // Exit the process if server start fails
|
||||||
|
}
|
||||||
|
serverRunning = true;
|
||||||
|
console.log('Server started successfully on port 2000!');
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
console.log('Server is already running.');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function to stop the server
|
||||||
|
function stopServer() {
|
||||||
|
if (serverRunning) {
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
fastify.close((err) => {
|
||||||
|
if (err) {
|
||||||
|
console.error('Error closing server:', err);
|
||||||
|
reject(err);
|
||||||
|
} else {
|
||||||
|
serverRunning = false;
|
||||||
|
console.log('Server closed successfully!');
|
||||||
|
resolve();
|
||||||
|
}
|
||||||
|
});
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
console.log('Server is not running.');
|
||||||
|
return Promise.resolve();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Function to gracefully close and restart the server
|
||||||
|
function restartServer() {
|
||||||
|
if (serverRunning) {
|
||||||
|
stopServer().then(() => {
|
||||||
|
console.log('Restarting server...');
|
||||||
|
startServer();
|
||||||
|
}).catch((error) => {
|
||||||
|
console.error('Failed to restart server:', error);
|
||||||
|
process.exit(1); // Exit the process if server restart fails
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
console.log('Server is not running. Starting server...');
|
||||||
|
startServer();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a global error handler for uncaught exceptions
|
||||||
|
process.on('uncaughtException', (err) => {
|
||||||
|
console.error('Uncaught Exception:', err);
|
||||||
|
restartServer();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Add a global error handler for unhandled promise rejections
|
||||||
|
process.on('unhandledRejection', (reason, promise) => {
|
||||||
|
console.error('Unhandled Rejection at:', promise, 'reason:', reason);
|
||||||
|
restartServer();
|
||||||
|
});
|
||||||
|
|
||||||
|
// Start the server
|
||||||
|
startServer();
|
||||||
140
fastify/buy-stock/server.js
Normal file
140
fastify/buy-stock/server.js
Normal file
@ -0,0 +1,140 @@
|
|||||||
|
// Declare a route
|
||||||
|
|
||||||
|
|
||||||
|
function processHoldingsList(holdings) {
|
||||||
|
const stockGroups = {};
|
||||||
|
|
||||||
|
for (const stock of holdings) {
|
||||||
|
if (stock.symbol in stockGroups) {
|
||||||
|
stockGroups[stock.symbol].totalCost += stock.boughtPrice * stock.numberOfShares;
|
||||||
|
stockGroups[stock.symbol].totalShares += stock.numberOfShares;
|
||||||
|
} else {
|
||||||
|
stockGroups[stock.symbol] = {
|
||||||
|
totalCost: stock.boughtPrice * stock.numberOfShares,
|
||||||
|
totalShares: stock.numberOfShares,
|
||||||
|
name: stock.name,
|
||||||
|
assetType: stock.assetType,
|
||||||
|
currentPrice: stock.currentPrice,
|
||||||
|
dailyChange: stock.dailyChange,
|
||||||
|
sinceBoughtChange: stock.sinceBoughtChange,
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update dailyChange automatically
|
||||||
|
stockGroups[stock.symbol].dailyChange = stock.dailyChange;
|
||||||
|
stockGroups[stock.symbol].sinceBoughtChange = stock.sinceBoughtChange;
|
||||||
|
stockGroups[stock.symbol].currentPrice = stock.currentPrice;
|
||||||
|
}
|
||||||
|
|
||||||
|
const updatedHoldings = [];
|
||||||
|
|
||||||
|
for (const symbol in stockGroups) {
|
||||||
|
const { totalCost, totalShares, name, assetType, currentPrice, dailyChange, sinceBoughtChange } = stockGroups[symbol];
|
||||||
|
const finalBoughtPrice = totalCost / totalShares;
|
||||||
|
const updatedStock = {
|
||||||
|
symbol,
|
||||||
|
name,
|
||||||
|
assetType,
|
||||||
|
boughtPrice: finalBoughtPrice,
|
||||||
|
currentPrice,
|
||||||
|
dailyChange,
|
||||||
|
sinceBoughtChange: Number(((currentPrice/finalBoughtPrice -1) * 100)?.toFixed(2)),
|
||||||
|
numberOfShares: totalShares,
|
||||||
|
};
|
||||||
|
updatedHoldings.push(updatedStock);
|
||||||
|
}
|
||||||
|
|
||||||
|
return updatedHoldings;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/buy-stock', async (request, reply) => {
|
||||||
|
|
||||||
|
const holidays = ['2024-01-01', '2024-01-15','2024-02-19','2024-03-29','2024-05-27','2024-06-19','2024-07-04','2024-09-02','2024-11-28','2024-12-25'];
|
||||||
|
const currentDate = new Date().toISOString().split('T')[0];
|
||||||
|
|
||||||
|
// Get the current time in the ET time zone
|
||||||
|
const etTimeZone = 'America/New_York';
|
||||||
|
const currentTime = new Date().toLocaleString('en-US', { timeZone: etTimeZone });
|
||||||
|
|
||||||
|
// Determine if the NYSE is currently open or closed
|
||||||
|
const currentHour = new Date(currentTime).getHours();
|
||||||
|
const isWeekend = new Date(currentTime).getDay() === 6 || new Date(currentTime).getDay() === 0;
|
||||||
|
const isBeforeMarketOpen = currentHour < 9 || (currentHour === 9 && new Date(currentTime).getMinutes() < 30);
|
||||||
|
const isAfterMarketClose = currentHour >= 16;
|
||||||
|
|
||||||
|
const isStockMarketOpen = !(isWeekend || isBeforeMarketOpen || isAfterMarketClose || holidays?.includes(currentDate));
|
||||||
|
let output;
|
||||||
|
if (isStockMarketOpen === true) {
|
||||||
|
|
||||||
|
const data = request.body;
|
||||||
|
const currentDate = new Date();
|
||||||
|
const year = currentDate.getFullYear();
|
||||||
|
const month = String(currentDate.getMonth() + 1).padStart(2, '0'); // Month is zero-based
|
||||||
|
const day = '01';
|
||||||
|
|
||||||
|
const formattedDate = `${year}-${month}-${day}`; // Output: "yyyy-mm-01"
|
||||||
|
|
||||||
|
const userId = data?.userId;
|
||||||
|
|
||||||
|
let newHolding = {'symbol': data['symbol'],
|
||||||
|
'name': data['name'],
|
||||||
|
'assetType': data['assetType'],
|
||||||
|
'numberOfShares': data['numberOfShares'],
|
||||||
|
'boughtPrice': data['boughtPrice'],
|
||||||
|
'currentPrice': data['boughtPrice'],
|
||||||
|
'dailyChange': 0,
|
||||||
|
'sinceBoughtChange': 0 }
|
||||||
|
|
||||||
|
let currentPortfolio = await pb.collection("portfolios").getList(1, 500, {
|
||||||
|
filter: `user="${userId}" && created >="${formattedDate}" `,
|
||||||
|
});
|
||||||
|
|
||||||
|
currentPortfolio = currentPortfolio?.items[0];
|
||||||
|
|
||||||
|
let holdings = currentPortfolio?.holdings || [];
|
||||||
|
|
||||||
|
let tradingHistory = currentPortfolio?.tradingHistory || [];
|
||||||
|
|
||||||
|
let availableCash = currentPortfolio?.availableCash - data['estimatedTotal'];
|
||||||
|
|
||||||
|
holdings.push(newHolding)
|
||||||
|
|
||||||
|
const updatedHoldings = processHoldingsList(holdings)
|
||||||
|
|
||||||
|
|
||||||
|
tradingHistory.push({'symbol': data['symbol'],
|
||||||
|
'name': data['name'],
|
||||||
|
'assetType': data['assetType'],
|
||||||
|
'numberOfShares': data['numberOfShares'],
|
||||||
|
'price': Number(data['boughtPrice']),
|
||||||
|
'type': 'buy',
|
||||||
|
'date': new Date()});
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
await pb.collection("portfolios").update(currentPortfolio?.id, {
|
||||||
|
"holdings": updatedHoldings,
|
||||||
|
"availableCash": availableCash,
|
||||||
|
"tradingHistory": tradingHistory,
|
||||||
|
})
|
||||||
|
output = 'success';
|
||||||
|
|
||||||
|
} catch (err) {
|
||||||
|
output = 'failure'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
output = 'marketClosed'
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
})
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
|
|
||||||
35
fastify/create-portfolio/server.js
Normal file
35
fastify/create-portfolio/server.js
Normal file
@ -0,0 +1,35 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
const serialize = opts.serialize;
|
||||||
|
|
||||||
|
fastify.post('/create-portfolio', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
|
||||||
|
|
||||||
|
const formData = {'user': data?.userId, 'accountValue': 100000,
|
||||||
|
'availableCash': 100000, 'overallReturn': 0, 'rank': 0,
|
||||||
|
'holdings': JSON.stringify([]), 'tradingHistory': '[]',
|
||||||
|
'metrics': JSON.stringify({'alpha': 'n/a',
|
||||||
|
'beta': 'n/a',
|
||||||
|
'maxDrawdown': 0
|
||||||
|
})
|
||||||
|
}
|
||||||
|
let output = 'failure';
|
||||||
|
try {
|
||||||
|
await pb.collection('portfolios').create(serialize(formData));
|
||||||
|
output = 'success';
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
console.log(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
reply.send({ message: output })
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
39
fastify/create-post-image/server.js
Normal file
39
fastify/create-post-image/server.js
Normal file
@ -0,0 +1,39 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
const sharp = opts.sharp;
|
||||||
|
|
||||||
|
fastify.post('/create-post-image', async (request, reply) => {
|
||||||
|
try {
|
||||||
|
const data = request.body;
|
||||||
|
let imageBufferArray = data?.imageBufferArray;
|
||||||
|
|
||||||
|
|
||||||
|
if (imageBufferArray) {
|
||||||
|
// Resize and optimize the image
|
||||||
|
const optimizedImageBuffer = await sharp(imageBufferArray)
|
||||||
|
.resize({
|
||||||
|
width: 800,
|
||||||
|
height: 1000,
|
||||||
|
fit: sharp.fit.inside,
|
||||||
|
withoutEnlargement: true,
|
||||||
|
})
|
||||||
|
.jpeg({ quality: 80 })
|
||||||
|
.toBuffer();
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
// Send the optimized image in the response
|
||||||
|
reply.send({
|
||||||
|
image: optimizedImageBuffer,
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
reply.status(400).send({ error: 'Image data is missing.' });
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error processing image:', error);
|
||||||
|
reply.status(500).send({ error: 'Internal Server Error' });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
77
fastify/create-post-link/server.js
Normal file
77
fastify/create-post-link/server.js
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
const got = opts.got;
|
||||||
|
const cheerio = opts.cheerio;
|
||||||
|
const sharp = opts.sharp;
|
||||||
|
|
||||||
|
fastify.post('/create-post-link', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
const url = data?.link;
|
||||||
|
let description;
|
||||||
|
let imageBuffer;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const response = await got(url, {
|
||||||
|
headers: {
|
||||||
|
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
|
||||||
|
},
|
||||||
|
responseType: 'buffer',
|
||||||
|
});
|
||||||
|
|
||||||
|
const $ = cheerio.load(response.body);
|
||||||
|
|
||||||
|
description = $('head meta[property="og:description"]').attr('content');
|
||||||
|
let image = $('head meta[property="og:image"]').attr('content');
|
||||||
|
|
||||||
|
if (!image) {
|
||||||
|
let largestSize = 0;
|
||||||
|
let largestImage = '';
|
||||||
|
|
||||||
|
$('img').each(async function () {
|
||||||
|
if ($(this).attr('src') && $(this).attr('src').match(/\.(webp|jpg|jpeg|png|gif)$/)) {
|
||||||
|
try {
|
||||||
|
imageBuffer = await got($(this).attr('src'), {
|
||||||
|
headers: {
|
||||||
|
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
|
||||||
|
},
|
||||||
|
responseType: 'buffer',
|
||||||
|
}).then((response) => response.body);
|
||||||
|
|
||||||
|
const metadata = await sharp(imageBuffer).metadata();
|
||||||
|
const imageSize = metadata.width * metadata.height;
|
||||||
|
|
||||||
|
if (imageSize > largestSize) {
|
||||||
|
largestSize = imageSize;
|
||||||
|
largestImage = $(this).attr('src');
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
console.error('Error getting image:', error);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
image = largestImage;
|
||||||
|
}
|
||||||
|
|
||||||
|
imageBuffer = await got(image, {
|
||||||
|
headers: {
|
||||||
|
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
|
||||||
|
},
|
||||||
|
responseType: 'buffer',
|
||||||
|
}).then((response) => response.body);
|
||||||
|
|
||||||
|
} catch (e) {
|
||||||
|
console.error(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if imageBlob is not null before sending it in the response
|
||||||
|
reply.send({
|
||||||
|
description: description,
|
||||||
|
image: imageBuffer,
|
||||||
|
})
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
|
|
||||||
41
fastify/create-price-alert/server.js
Normal file
41
fastify/create-price-alert/server.js
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
// Declare a route
|
||||||
|
|
||||||
|
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/create-price-alert', async (request, reply) => {
|
||||||
|
|
||||||
|
const data = request.body;
|
||||||
|
|
||||||
|
let output;
|
||||||
|
|
||||||
|
let newAlert = {
|
||||||
|
'user': data['userId'],
|
||||||
|
'symbol': data['symbol']?.toUpperCase(),
|
||||||
|
'name': data['name'],
|
||||||
|
'assetType': data['assetType']?.toLowerCase(),
|
||||||
|
'targetPrice': Number(data['targetPrice']),
|
||||||
|
'condition': data['condition']?.toLowerCase(),
|
||||||
|
'priceWhenCreated': Number(data['priceWhenCreated']),
|
||||||
|
'triggered': false,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
await pb.collection("priceAlert")?.create(newAlert)
|
||||||
|
output = 'success';
|
||||||
|
|
||||||
|
} catch (err) {
|
||||||
|
output = 'failure'
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
})
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
|
|
||||||
21
fastify/create-strategy/server.js
Normal file
21
fastify/create-strategy/server.js
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/create-strategy', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
output = await pb.collection("stockscreener").create(data)
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
output = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
24
fastify/create-watchlist/server.js
Normal file
24
fastify/create-watchlist/server.js
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/create-watchlist', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
output = await pb.collection("watchlist").create(data)
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
//console.log(e)
|
||||||
|
output = [];
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
48
fastify/delete-comment/server.js
Normal file
48
fastify/delete-comment/server.js
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/delete-comment', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
|
||||||
|
let output;
|
||||||
|
const userId = data?.userId;
|
||||||
|
const commentUserId = data?.commentUser;
|
||||||
|
const commentId = data.commentId
|
||||||
|
|
||||||
|
//Each delete gives the user -1 Karma points
|
||||||
|
|
||||||
|
let checkModerator = await pb.collection('moderators').getList(1, 50)
|
||||||
|
|
||||||
|
//OP and moderators have the right to delete comments
|
||||||
|
if (commentUserId === userId || checkModerator.items.some((item) => item.user === userId))
|
||||||
|
{
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
await pb.collection('comments').delete(commentId);
|
||||||
|
await pb.collection("users").update(commentUserId, {
|
||||||
|
"karma-": 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
output = 'success';
|
||||||
|
|
||||||
|
|
||||||
|
} catch (err) {
|
||||||
|
output = 'failure'
|
||||||
|
console.log(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
output = 'failure';
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
reply.send({ message: output })
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
40
fastify/delete-post/server.js
Normal file
40
fastify/delete-post/server.js
Normal file
@ -0,0 +1,40 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/delete-post', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
const postId = data?.postId;
|
||||||
|
const userId = data?.userId;
|
||||||
|
const moderator = 'db5s41oatgoeh0q' //moderators can always delete post
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
if(moderator === userId)
|
||||||
|
{
|
||||||
|
await pb.collection('posts').delete(postId);
|
||||||
|
output = 'success';
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
const res = await pb.collection('posts').getOne(postId);
|
||||||
|
if (res?.user === userId)
|
||||||
|
{
|
||||||
|
await pb.collection('posts').delete(postId);
|
||||||
|
output = 'success';
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
output = 'failure';
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
console.log(e)
|
||||||
|
output = 'failure';
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
26
fastify/delete-price-alert/server.js
Normal file
26
fastify/delete-price-alert/server.js
Normal file
@ -0,0 +1,26 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/delete-price-alert', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
const priceAlertIdList = data?.priceAlertIdList;
|
||||||
|
let output;
|
||||||
|
try {
|
||||||
|
for (const item of priceAlertIdList) {
|
||||||
|
await pb.collection("priceAlert")?.delete(item)
|
||||||
|
}
|
||||||
|
output = 'success';
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
//console.log(e)
|
||||||
|
output = 'failure';
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
22
fastify/delete-strategy/server.js
Normal file
22
fastify/delete-strategy/server.js
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/delete-strategy', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
await pb.collection("stockscreener").delete(data['strategyId'])
|
||||||
|
output = 'success';
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
output = 'failure';
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
25
fastify/delete-watchlist/server.js
Normal file
25
fastify/delete-watchlist/server.js
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/delete-watchlist', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
const watchListId = data?.watchListId
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
await pb.collection("watchlist").delete(watchListId)
|
||||||
|
output = 'success';
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
//console.log(e)
|
||||||
|
output = 'failure';
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
187
fastify/downvote-comment/server.js
Normal file
187
fastify/downvote-comment/server.js
Normal file
@ -0,0 +1,187 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/downvote-comment', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
|
||||||
|
const commentId = data?.commentId;
|
||||||
|
const userId = data?.userId;
|
||||||
|
|
||||||
|
let output = 'failure';
|
||||||
|
|
||||||
|
console.log(data)
|
||||||
|
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
let doesUserExist = await pb.collection("alreadyVoted").getList(1, 50, {
|
||||||
|
filter: `user="${userId}"` && `comment="${commentId}"`,
|
||||||
|
})
|
||||||
|
|
||||||
|
doesUserExist = doesUserExist?.items?.find(item => item?.user === userId && item?.comment === commentId);
|
||||||
|
//console.log('Does it exist yet: ', doesUserExist)
|
||||||
|
|
||||||
|
const votedId = doesUserExist?.id;
|
||||||
|
let currentVote = doesUserExist?.type;
|
||||||
|
|
||||||
|
const opPost = await pb.collection('comments').getOne(commentId)
|
||||||
|
|
||||||
|
console.log('currentVote: ', currentVote)
|
||||||
|
/*
|
||||||
|
console.log('commentId: ', commentId)
|
||||||
|
console.log('votedId: ', votedId)
|
||||||
|
console.log('currentVote: ', currentVote)
|
||||||
|
console.log('user: ', user);
|
||||||
|
*/
|
||||||
|
|
||||||
|
//If user has no history with this post create it
|
||||||
|
if( !currentVote || votedId === 'undefined')
|
||||||
|
{
|
||||||
|
console.log('created')
|
||||||
|
let formDataAlreadyVoted = new FormData();
|
||||||
|
formDataAlreadyVoted.append('post', commentId);
|
||||||
|
formDataAlreadyVoted.append('user', userId);
|
||||||
|
formDataAlreadyVoted.append('notifyType', 'downvote');
|
||||||
|
await pb.collection('alreadyVoted').create(formDataAlreadyVoted);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (currentVote === 'upvote')
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
await pb.collection("comments").update(commentId, {
|
||||||
|
"downvote+": 1,
|
||||||
|
});
|
||||||
|
|
||||||
|
await pb.collection("comments").update(commentId, {
|
||||||
|
"upvote-": 1,
|
||||||
|
});
|
||||||
|
|
||||||
|
await pb.collection("alreadyVoted").update(votedId, {
|
||||||
|
"type": 'downvote',
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
//if user is the opPost then it should only subtract -1 once
|
||||||
|
let opPost = await pb.collection('comment').getOne(commentId)
|
||||||
|
|
||||||
|
if (opPost.user === userId)
|
||||||
|
{
|
||||||
|
//Punishment: Find user of post and subtract -1 karma points
|
||||||
|
await pb.collection("users").update(opPost.user, {
|
||||||
|
"karma-": 2,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
else
|
||||||
|
{
|
||||||
|
//Punishment: Find user of post and subtract -1 karma points
|
||||||
|
await pb.collection("users").update(opPost.user, {
|
||||||
|
"karma-": 2,
|
||||||
|
})
|
||||||
|
|
||||||
|
//Punishment: User who downvotes post also loose -1 karma points
|
||||||
|
await pb.collection("users").update(userId, {
|
||||||
|
"karma-": 2,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (currentVote === 'neutral' || !currentVote)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (opPost.user === userId)
|
||||||
|
{
|
||||||
|
//Punishment: Find user of post and subtract -1 karma points
|
||||||
|
await pb.collection("users").update(opPost.user, {
|
||||||
|
"karma-": 1,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
else
|
||||||
|
{
|
||||||
|
//Punishment: Find user of post and subtract -1 karma points
|
||||||
|
await pb.collection("users").update(opPost.user, {
|
||||||
|
"karma-": 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
//Punishment: User who downvotes post also loose -1 karma points
|
||||||
|
await pb.collection("users").update(userId, {
|
||||||
|
"karma-": 1,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
await pb.collection("comments").update(commentId, {
|
||||||
|
"downvote+": 1,
|
||||||
|
});
|
||||||
|
|
||||||
|
await pb.collection("alreadyVoted").update(votedId, {
|
||||||
|
"type": 'downvote',
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
else
|
||||||
|
{
|
||||||
|
await pb.collection("comments").update(commentId, {
|
||||||
|
"downvote-": 1,
|
||||||
|
});
|
||||||
|
|
||||||
|
await pb.collection("alreadyVoted").update(votedId, {
|
||||||
|
"type": 'neutral',
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
let opPost = await pb.collection('comment').getOne(commentId)
|
||||||
|
|
||||||
|
//if user is the opPost then it should only add +1 once
|
||||||
|
if (opPost.user === userId)
|
||||||
|
{
|
||||||
|
//Reset Punishment: Find user of post and add +1 karma points back
|
||||||
|
let opPost = await pb.collection('comment').getOne(commentId)
|
||||||
|
await pb.collection("users").update(opPost.user, {
|
||||||
|
"karma+": 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
else
|
||||||
|
{
|
||||||
|
//Reset Punishment: Find user of post and add +1 karma points back
|
||||||
|
let opPost = await pb.collection('comment').getOne(commentId)
|
||||||
|
await pb.collection("users").update(opPost.user, {
|
||||||
|
"karma+": 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
//Reset Punishment: User who removes downvote gets back +1 karma points
|
||||||
|
await pb.collection("users").update(userId, {
|
||||||
|
"karma+": 1,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
console.log(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
185
fastify/downvote/server.js
Normal file
185
fastify/downvote/server.js
Normal file
@ -0,0 +1,185 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/downvote', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
|
||||||
|
const postId = data?.postId;
|
||||||
|
const userId = data?.userId;
|
||||||
|
|
||||||
|
let output = 'failure';
|
||||||
|
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
let doesUserExist = await pb.collection("alreadyVoted").getList(1, 50, {
|
||||||
|
filter: `user="${userId}"` && `post="${postId}"`,
|
||||||
|
})
|
||||||
|
|
||||||
|
doesUserExist = doesUserExist.items.find(item => item.user === userId && item.post === postId);
|
||||||
|
//console.log('Does it exist yet: ', doesUserExist)
|
||||||
|
|
||||||
|
const votedId = doesUserExist?.id;
|
||||||
|
let currentVote = doesUserExist?.type;
|
||||||
|
|
||||||
|
const opPost = await pb.collection('posts').getOne(postId)
|
||||||
|
|
||||||
|
console.log('currentVote: ', currentVote)
|
||||||
|
/*
|
||||||
|
console.log('postId: ', postId)
|
||||||
|
console.log('votedId: ', votedId)
|
||||||
|
console.log('currentVote: ', currentVote)
|
||||||
|
console.log('user: ', user);
|
||||||
|
*/
|
||||||
|
|
||||||
|
//If user has no history with this post create it
|
||||||
|
if( !currentVote || votedId === 'undefined')
|
||||||
|
{
|
||||||
|
console.log('created')
|
||||||
|
let formDataAlreadyVoted = new FormData();
|
||||||
|
formDataAlreadyVoted.append('post', postId);
|
||||||
|
formDataAlreadyVoted.append('user', userId);
|
||||||
|
formDataAlreadyVoted.append('notifyType', 'downvote');
|
||||||
|
await pb.collection('alreadyVoted').create(formDataAlreadyVoted);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (currentVote === 'upvote')
|
||||||
|
{
|
||||||
|
|
||||||
|
|
||||||
|
await pb.collection("posts").update(postId, {
|
||||||
|
"downvote+": 1,
|
||||||
|
});
|
||||||
|
|
||||||
|
await pb.collection("posts").update(postId, {
|
||||||
|
"upvote-": 1,
|
||||||
|
});
|
||||||
|
|
||||||
|
await pb.collection("alreadyVoted").update(votedId, {
|
||||||
|
"type": 'downvote',
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
//if user is the opPost then it should only subtract -1 once
|
||||||
|
let opPost = await pb.collection('posts').getOne(postId)
|
||||||
|
|
||||||
|
if (opPost.user === userId)
|
||||||
|
{
|
||||||
|
//Punishment: Find user of post and subtract -1 karma points
|
||||||
|
await pb.collection("users").update(opPost.user, {
|
||||||
|
"karma-": 2,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
else
|
||||||
|
{
|
||||||
|
//Punishment: Find user of post and subtract -1 karma points
|
||||||
|
await pb.collection("users").update(opPost.user, {
|
||||||
|
"karma-": 2,
|
||||||
|
})
|
||||||
|
|
||||||
|
//Punishment: User who downvotes post also loose -1 karma points
|
||||||
|
await pb.collection("users").update(userId, {
|
||||||
|
"karma-": 2,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (currentVote === 'neutral' || !currentVote)
|
||||||
|
{
|
||||||
|
|
||||||
|
if (opPost.user === userId)
|
||||||
|
{
|
||||||
|
//Punishment: Find user of post and subtract -1 karma points
|
||||||
|
await pb.collection("users").update(opPost.user, {
|
||||||
|
"karma-": 1,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
else
|
||||||
|
{
|
||||||
|
//Punishment: Find user of post and subtract -1 karma points
|
||||||
|
await pb.collection("users").update(opPost.user, {
|
||||||
|
"karma-": 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
//Punishment: User who downvotes post also loose -1 karma points
|
||||||
|
await pb.collection("users").update(userId, {
|
||||||
|
"karma-": 1,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
await pb.collection("posts").update(postId, {
|
||||||
|
"downvote+": 1,
|
||||||
|
});
|
||||||
|
|
||||||
|
await pb.collection("alreadyVoted").update(votedId, {
|
||||||
|
"type": 'downvote',
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
else
|
||||||
|
{
|
||||||
|
await pb.collection("posts").update(postId, {
|
||||||
|
"downvote-": 1,
|
||||||
|
});
|
||||||
|
|
||||||
|
await pb.collection("alreadyVoted").update(votedId, {
|
||||||
|
"type": 'neutral',
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
let opPost = await pb.collection('posts').getOne(postId)
|
||||||
|
|
||||||
|
//if user is the opPost then it should only add +1 once
|
||||||
|
if (opPost.user === userId)
|
||||||
|
{
|
||||||
|
//Reset Punishment: Find user of post and add +1 karma points back
|
||||||
|
let opPost = await pb.collection('posts').getOne(postId)
|
||||||
|
await pb.collection("users").update(opPost.user, {
|
||||||
|
"karma+": 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
else
|
||||||
|
{
|
||||||
|
//Reset Punishment: Find user of post and add +1 karma points back
|
||||||
|
let opPost = await pb.collection('posts').getOne(postId)
|
||||||
|
await pb.collection("users").update(opPost.user, {
|
||||||
|
"karma+": 1,
|
||||||
|
})
|
||||||
|
|
||||||
|
//Reset Punishment: User who removes downvote gets back +1 karma points
|
||||||
|
await pb.collection("users").update(userId, {
|
||||||
|
"karma+": 1,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
console.log(e)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
29
fastify/edit-name-watchlist/server.js
Normal file
29
fastify/edit-name-watchlist/server.js
Normal file
@ -0,0 +1,29 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/edit-name-watchlist', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
const watchListId = data?.watchListId;
|
||||||
|
const newTitle = data?.title;
|
||||||
|
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
await pb.collection("watchlist").update(watchListId, {
|
||||||
|
'title': newTitle
|
||||||
|
})
|
||||||
|
output = 'success';
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
//console.log(e)
|
||||||
|
output = 'failure';
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
24
fastify/feedback/server.js
Normal file
24
fastify/feedback/server.js
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/feedback', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
|
||||||
|
try {
|
||||||
|
await pb.collection("feedback").create(data)
|
||||||
|
output = 'success';
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
//console.log(e)
|
||||||
|
output = 'failure';
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
55
fastify/get-all-comments/server.js
Normal file
55
fastify/get-all-comments/server.js
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
// Declare a route
|
||||||
|
|
||||||
|
function listToTree(comments, parentProp = "reply") {
|
||||||
|
// Create id indexed comments dictionary
|
||||||
|
const commentsDict = {};
|
||||||
|
for (let comment of comments) {
|
||||||
|
commentsDict[comment.id] = {
|
||||||
|
...comment,
|
||||||
|
children: [],
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Build the tree
|
||||||
|
const tree = [];
|
||||||
|
for (const comment of comments) {
|
||||||
|
const parentId = comment[parentProp];
|
||||||
|
if (parentId) {
|
||||||
|
commentsDict[parentId].children.push(commentsDict[comment.id]);
|
||||||
|
} else {
|
||||||
|
tree.push(commentsDict[comment.id]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return tree;
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/get-all-comments', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
const postId = data?.postId
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const result = await pb.collection("comments").getFullList({
|
||||||
|
filter: `post="${postId}"`,
|
||||||
|
expand: 'user,alreadyVoted(comment)',
|
||||||
|
fields: "*,expand.user,expand.alreadyVoted(comment).user,expand.alreadyVoted(comment).type",
|
||||||
|
sort: '-created',
|
||||||
|
})
|
||||||
|
|
||||||
|
|
||||||
|
output = listToTree(result);
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
output = [];
|
||||||
|
}
|
||||||
|
|
||||||
|
+ reply.send({ items: output })
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
27
fastify/get-community-stats/server.js
Normal file
27
fastify/get-community-stats/server.js
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.get('/get-community-stats', async (request, reply) => {
|
||||||
|
let output;
|
||||||
|
let totalUsers = 0;
|
||||||
|
let totalPosts = 0;
|
||||||
|
let totalComments = 0;
|
||||||
|
|
||||||
|
try {
|
||||||
|
totalUsers = (await pb.collection("users").getList(1, 1))?.totalItems;
|
||||||
|
totalPosts = (await pb.collection("posts").getList(1, 1))?.totalItems;
|
||||||
|
totalComments = (await pb.collection("comments").getList(1, 1))?.totalItems;
|
||||||
|
|
||||||
|
|
||||||
|
output = { totalUsers, totalPosts, totalComments };
|
||||||
|
|
||||||
|
} catch (e) {
|
||||||
|
console.error(e);
|
||||||
|
output = { totalUsers, totalPosts, totalComments };
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output });
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
22
fastify/get-moderators/server.js
Normal file
22
fastify/get-moderators/server.js
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.get('/get-moderators', async (request, reply) => {
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
output = await pb.collection("moderators").getFullList({
|
||||||
|
expand: 'user'
|
||||||
|
})
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
output = [];
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
30
fastify/get-notifications/server.js
Normal file
30
fastify/get-notifications/server.js
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/get-notifications', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
|
||||||
|
const userId = data?.userId;
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
output = await pb.collection("notifications").getFullList({
|
||||||
|
filter: `opUser="${userId}"`,
|
||||||
|
expand: 'user,post,comment',
|
||||||
|
sort: '-created'
|
||||||
|
});
|
||||||
|
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
//console.log(e)
|
||||||
|
output = []
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
22
fastify/get-one-post/server.js
Normal file
22
fastify/get-one-post/server.js
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/get-one-post', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
|
||||||
|
const postId = data?.postId;
|
||||||
|
|
||||||
|
const output = await pb.collection('posts').getOne(postId, {
|
||||||
|
expand: 'user,alreadyVoted(post)',
|
||||||
|
fields: "*,expand.user,expand.alreadyVoted(post).user,expand.alreadyVoted(post).type"
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
})
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
|
|
||||||
30
fastify/get-portfolio-data/server.js
Normal file
30
fastify/get-portfolio-data/server.js
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/get-portfolio-data', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
|
||||||
|
const userId = data?.userId
|
||||||
|
|
||||||
|
const currentDate = new Date();
|
||||||
|
|
||||||
|
const year = currentDate.getFullYear();
|
||||||
|
const month = String(currentDate.getMonth() + 1).padStart(2, '0'); // Month is zero-based
|
||||||
|
const day = '01';
|
||||||
|
|
||||||
|
const formattedDate = `${year}-${month}-${day}`; // Output: "yyyy-mm-01"
|
||||||
|
|
||||||
|
|
||||||
|
//Get Portfolio of user for current month
|
||||||
|
const output = await pb.collection("portfolios").getList(1, 500, {
|
||||||
|
filter: `user="${userId}" && created >="${formattedDate}" `,
|
||||||
|
});
|
||||||
|
|
||||||
|
reply.send({ items: output.items })
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
36
fastify/get-portfolio/server.js
Normal file
36
fastify/get-portfolio/server.js
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/get-portfolio', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
|
||||||
|
const userId = data?.userId;
|
||||||
|
// Get the current date
|
||||||
|
const currentMonth = new Date();
|
||||||
|
const nextMonth = new Date(currentMonth);
|
||||||
|
nextMonth.setDate(currentMonth.getDate() + 31); // Add a number of days to ensure next month
|
||||||
|
|
||||||
|
// Set the day to 1 to get the beginning of the current month
|
||||||
|
const beginningOfMonth = new Date(currentMonth);
|
||||||
|
beginningOfMonth.setDate(1);
|
||||||
|
|
||||||
|
const beginningOfNextMonth = new Date(nextMonth);
|
||||||
|
beginningOfNextMonth.setDate(1);
|
||||||
|
|
||||||
|
// Format it as a string if needed
|
||||||
|
const startDate = beginningOfMonth.toISOString().split('T')[0];
|
||||||
|
const endDate = beginningOfNextMonth.toISOString().split('T')[0];
|
||||||
|
|
||||||
|
//console.log('Start Date:', startDate);
|
||||||
|
//console.log('End Date:', endDate);
|
||||||
|
|
||||||
|
const output = await pb.collection("portfolios").getFullList(query_params = {"filter": `user="${userId}" && created >= "${startDate}" && created < "${endDate}"`})
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
})
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
|
|
||||||
102
fastify/get-post/server.js
Normal file
102
fastify/get-post/server.js
Normal file
@ -0,0 +1,102 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/get-post', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
|
||||||
|
let filter;
|
||||||
|
const sort = data?.sortingPosts === 'hot' ? '-upvote' : '-created';
|
||||||
|
let pinnedPost;
|
||||||
|
let posts;
|
||||||
|
|
||||||
|
try {
|
||||||
|
|
||||||
|
|
||||||
|
if (data?.seenPostId.length !==0)
|
||||||
|
{
|
||||||
|
filter = data?.seenPostId?.map((id) => `id!="${id}"`).join("&&");
|
||||||
|
|
||||||
|
//applies only for profile and user directory
|
||||||
|
if (data?.userId) {
|
||||||
|
filter += `&& user="${data?.userId}" && pinned=false`;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (data?.filterTicker) {
|
||||||
|
filter += `&& tagline="${data?.filterTicker}" && pinned=false`;
|
||||||
|
}
|
||||||
|
|
||||||
|
posts = (await pb.collection('posts').getList(data?.startPage, 10, {
|
||||||
|
sort: sort,
|
||||||
|
filter: filter,
|
||||||
|
expand: 'user,comments(post),alreadyVoted(post)',
|
||||||
|
fields: "*,expand.user,expand.comments(post), expand.alreadyVoted(post).user,expand.alreadyVoted(post).type"
|
||||||
|
|
||||||
|
}))?.items;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
|
||||||
|
if (data?.userId) {
|
||||||
|
|
||||||
|
posts = (await pb.collection('posts').getList(data?.startPage, 10, {
|
||||||
|
sort: sort,
|
||||||
|
filter: `user="${data?.userId}" && pinned=false`,
|
||||||
|
expand: `user,comments(post),alreadyVoted(post)`,
|
||||||
|
fields: "*,expand.user,expand.comments(post), expand.alreadyVoted(post).user,expand.alreadyVoted(post).type"
|
||||||
|
}))?.items;
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (data?.filterTicker) {
|
||||||
|
|
||||||
|
posts = await pb.collection('posts').getList(data?.startPage, 10, {
|
||||||
|
sort: sort,
|
||||||
|
filter: `tagline="${data?.filterTicker}" && pinned=false`,
|
||||||
|
expand: `user,comments(post),alreadyVoted(post)`,
|
||||||
|
fields: "*,expand.user,expand.comments(post), expand.alreadyVoted(post).user,expand.alreadyVoted(post).type"
|
||||||
|
}) ;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
else {
|
||||||
|
posts = await pb.collection('posts').getList(data?.startPage, 10, {
|
||||||
|
sort: sort,
|
||||||
|
filter: `pinned=false`,
|
||||||
|
expand: 'user, comments(post), alreadyVoted(post)',
|
||||||
|
fields: "*,expand.user,expand.comments(post), expand.alreadyVoted(post).user,expand.alreadyVoted(post).type"
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
posts = posts.items
|
||||||
|
|
||||||
|
pinnedPost = await pb.collection('posts').getFullList({
|
||||||
|
filter: `pinned=true`,
|
||||||
|
sort: '-created',
|
||||||
|
expand: `user,comments(post),alreadyVoted(post)`,
|
||||||
|
fields: "*,expand.user,expand.comments(post), expand.alreadyVoted(post).user,expand.alreadyVoted(post).type"
|
||||||
|
});
|
||||||
|
|
||||||
|
for (let i = pinnedPost?.length - 1; i >= 0; i--) {
|
||||||
|
posts?.unshift(pinnedPost[i]);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
catch(e)
|
||||||
|
{
|
||||||
|
//console.log(e)
|
||||||
|
posts = [];
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
reply.send({ items: posts })
|
||||||
|
})
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
|
|
||||||
49
fastify/get-price-alert/server.js
Normal file
49
fastify/get-price-alert/server.js
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
|
||||||
|
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
const pb = opts.pb;
|
||||||
|
const fs = opts.fs
|
||||||
|
const path = opts.path
|
||||||
|
|
||||||
|
fastify.post('/get-price-alert', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
const userId = data?.userId;
|
||||||
|
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
output = await pb.collection("priceAlert").getFullList({
|
||||||
|
filter: `user="${userId}" && triggered=false`
|
||||||
|
});
|
||||||
|
|
||||||
|
// Read the JSON file for each symbol in the output list
|
||||||
|
const itemsWithQuotes = await Promise.all(output.map(async (item) => {
|
||||||
|
const symbol = item.symbol;
|
||||||
|
try {
|
||||||
|
const filePath = path.join(__dirname, `../../app/json/quote/${symbol}.json`);
|
||||||
|
const fileData = fs.readFileSync(filePath, 'utf8');
|
||||||
|
const jsonData = JSON.parse(fileData);
|
||||||
|
|
||||||
|
// Extract only the desired fields from the JSON data
|
||||||
|
const { changesPercentage, price, volume } = jsonData;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
return { ...item, changesPercentage, price, volume};
|
||||||
|
|
||||||
|
} catch (error) {
|
||||||
|
// Handle errors if file reading or parsing fails
|
||||||
|
console.error(`Error reading or parsing JSON for symbol ${symbol}: ${error}`);
|
||||||
|
return item;
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
reply.send({ items: itemsWithQuotes });
|
||||||
|
} catch (e) {
|
||||||
|
console.error(e);
|
||||||
|
reply.send({ items: [] });
|
||||||
|
//reply.status(500).send({ error: "Internal Server Error" });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
21
fastify/get-strategy/server.js
Normal file
21
fastify/get-strategy/server.js
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/get-strategy', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
output = await pb.collection("stockscreener").getOne(data['strategyId'])
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
output = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
47
fastify/get-twitch-status/server.js
Normal file
47
fastify/get-twitch-status/server.js
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
const axios = opts.axios;
|
||||||
|
|
||||||
|
fastify.get('/get-twitch-status', async (request, reply) => {
|
||||||
|
let twitchStatus = false;
|
||||||
|
|
||||||
|
const client_id = '5i041m3iztxuj0yx26scgzhri1etfi';
|
||||||
|
const client_secret = '8p9gdmglz23lyc2nsrpbym5tpp15w0';
|
||||||
|
const streamer_name = 'stocknear';
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Obtain an access token from Twitch
|
||||||
|
|
||||||
|
const tokenResponse = await axios.post('https://id.twitch.tv/oauth2/token', null, {
|
||||||
|
params: {
|
||||||
|
client_id,
|
||||||
|
client_secret,
|
||||||
|
grant_type: 'client_credentials',
|
||||||
|
},
|
||||||
|
});
|
||||||
|
|
||||||
|
const { access_token } = tokenResponse.data;
|
||||||
|
|
||||||
|
// Check if the stream is online
|
||||||
|
const streamResponse = await axios.get(
|
||||||
|
`https://api.twitch.tv/helix/streams?user_login=${streamer_name}`,
|
||||||
|
{
|
||||||
|
headers: {
|
||||||
|
'Client-ID': client_id,
|
||||||
|
Authorization: `Bearer ${access_token}`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
const streamData = streamResponse.data;
|
||||||
|
twitchStatus = streamData.data.length === 1;
|
||||||
|
|
||||||
|
|
||||||
|
} catch (e) {
|
||||||
|
console.error(e);
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: twitchStatus });
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
22
fastify/get-user-data/server.js
Normal file
22
fastify/get-user-data/server.js
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/get-user-data', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
const userId = data?.userId
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
output = await pb.collection("users").getOne(userId)
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
output = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
36
fastify/get-user-stats/server.js
Normal file
36
fastify/get-user-stats/server.js
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/get-user-stats', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
const userId = data?.userId;
|
||||||
|
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const getNumberOfPosts = await pb.collection("posts").getList(1,1, {
|
||||||
|
filter: `user="${userId}"`,
|
||||||
|
});
|
||||||
|
const numberOfPosts = getNumberOfPosts?.totalItems
|
||||||
|
|
||||||
|
|
||||||
|
const getNumberOfComments = await pb.collection("comments").getList(1,1, {
|
||||||
|
filter: `user="${userId}"`,
|
||||||
|
});
|
||||||
|
const numberOfComments = getNumberOfComments?.totalItems
|
||||||
|
|
||||||
|
output = {numberOfPosts, numberOfComments}
|
||||||
|
console.log(output)
|
||||||
|
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
output = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
31
fastify/leaderboard/server.js
Normal file
31
fastify/leaderboard/server.js
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const pb = opts.pb;
|
||||||
|
|
||||||
|
fastify.post('/leaderboard', async (request, reply) => {
|
||||||
|
const data = request.body;
|
||||||
|
|
||||||
|
const startDate = data?.startDate;
|
||||||
|
const endDate = data?.endDate;
|
||||||
|
let output;
|
||||||
|
|
||||||
|
try {
|
||||||
|
output = await pb.collection("portfolios").getFullList({
|
||||||
|
filter: `created >= "${startDate}" && created < "${endDate}"`,
|
||||||
|
expand: 'user'
|
||||||
|
})
|
||||||
|
|
||||||
|
}
|
||||||
|
catch(e) {
|
||||||
|
//console.log(e)
|
||||||
|
output = []
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
reply.send({ items: output })
|
||||||
|
|
||||||
|
});
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
72
fastify/mixpanel/server.js
Normal file
72
fastify/mixpanel/server.js
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
|
||||||
|
// Declare a route
|
||||||
|
module.exports = function (fastify, opts, done) {
|
||||||
|
|
||||||
|
const mixpanel = opts.mixpanel;
|
||||||
|
const UAParser = opts.UAParser;
|
||||||
|
|
||||||
|
fastify.post('/mixpanel', async (request, reply) => {
|
||||||
|
|
||||||
|
const data = request.body;
|
||||||
|
|
||||||
|
const { browser, cpu, device, os } = UAParser(data.userAgent)
|
||||||
|
|
||||||
|
let options = {
|
||||||
|
path: data.path,
|
||||||
|
browser: browser.name,
|
||||||
|
browser_version: browser.version,
|
||||||
|
device: device.vendor,
|
||||||
|
cpu: cpu.architecture,
|
||||||
|
os: os.name,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
if (data.type === 'trackPageError')
|
||||||
|
{
|
||||||
|
options.status = data.status;
|
||||||
|
options.message = data.message;
|
||||||
|
mixpanel.track('Error status', options);
|
||||||
|
console.log('Send error page data to mixpanel')
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (data.type === 'trackPageVisit')
|
||||||
|
{
|
||||||
|
mixpanel.track('Page Visit', options);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (data.type === 'trackPageDuration')
|
||||||
|
{
|
||||||
|
options.time_spent = data.time;
|
||||||
|
|
||||||
|
mixpanel.track('Page Duration', options);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (data.type === 'trackAsset')
|
||||||
|
{
|
||||||
|
const options = {
|
||||||
|
symbol: data.symbol,
|
||||||
|
assetType: data.assetType,
|
||||||
|
}
|
||||||
|
|
||||||
|
mixpanel.track('asset', options);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
else if (data.type === 'trackButton')
|
||||||
|
{
|
||||||
|
const options = {
|
||||||
|
name: data.name,
|
||||||
|
}
|
||||||
|
|
||||||
|
mixpanel.track('buttonClick', options);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
reply.send({ message: 'success' })
|
||||||
|
})
|
||||||
|
|
||||||
|
done();
|
||||||
|
};
|
||||||
|
|
||||||
3958
fastify/package-lock.json
generated
Normal file
3958
fastify/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
28
fastify/package.json
Normal file
28
fastify/package.json
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
{
|
||||||
|
"scripts": {
|
||||||
|
"start": "nodemon app.js"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"@aws-sdk/client-s3": "^3.460.0",
|
||||||
|
"@fastify/cors": "^8.5.0",
|
||||||
|
"@fastify/websocket": "^8.2.0",
|
||||||
|
"aws-sdk": "^2.1505.0",
|
||||||
|
"axios": "^1.6.2",
|
||||||
|
"blob-util": "^2.0.2",
|
||||||
|
"cheerio": "^1.0.0-rc.12",
|
||||||
|
"dotenv": "^16.4.5",
|
||||||
|
"fastify": "^4.26.0",
|
||||||
|
"got": "^11.8.3",
|
||||||
|
"https": "^1.0.0",
|
||||||
|
"mixpanel": "^0.18.0",
|
||||||
|
"object-to-formdata": "^4.5.1",
|
||||||
|
"pino": "^8.19.0",
|
||||||
|
"pocketbase": "^0.19.0",
|
||||||
|
"sharp": "^0.32.6",
|
||||||
|
"ua-parser-js": "^1.0.37",
|
||||||
|
"ws": "^8.14.2"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"nodemon": "^3.0.1"
|
||||||
|
}
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user