Repository: gita/bhagavad-gita-api Branch: main Commit: be08e55c421f Files: 65 Total size: 149.6 KB Directory structure: gitextract_dgfg7br4/ ├── .all-contributorsrc ├── .dockerignore ├── .flake8 ├── .github/ │ ├── CONTRIBUTING.md │ ├── FUNDING.yml │ ├── ISSUE_TEMPLATE/ │ │ ├── bug.yml │ │ ├── feature.yml │ │ └── other.yml │ ├── PULL_REQUEST_TEMPLATE/ │ │ └── pull_request_template.md │ └── workflows/ │ ├── deploy.yml │ ├── openai-pr-reviewer.yml │ ├── publish.yml │ └── quality.yml ├── .gitignore ├── .markdownlint.rb ├── .pre-commit-config.yaml ├── Caddyfile ├── Dockerfile ├── Dockerfile.dev ├── Dockerfile.prod ├── LICENSE ├── Makefile ├── README.md ├── bhagavad_gita_api/ │ ├── MyIGBot.py │ ├── SocialBot.py │ ├── __init__.py │ ├── api/ │ │ ├── __init__.py │ │ ├── api_v2/ │ │ │ ├── __init__.py │ │ │ ├── api.py │ │ │ └── endpoints/ │ │ │ ├── __init__.py │ │ │ ├── gita.py │ │ │ └── social.py │ │ └── deps.py │ ├── cli.py │ ├── config.py │ ├── cronjobs/ │ │ ├── __init__.py │ │ └── celery.py │ ├── crud.py │ ├── data/ │ │ ├── __init__.py │ │ ├── helpers.py │ │ └── insert/ │ │ ├── __init__.py │ │ ├── authors.py │ │ ├── chapters.py │ │ ├── commentaries.py │ │ ├── languages.py │ │ ├── translations.py │ │ └── verses.py │ ├── db/ │ │ ├── __init__.py │ │ ├── base_class.py │ │ ├── init_db.py │ │ └── session.py │ ├── graphql.py │ ├── gunicorn.conf.py │ ├── main.py │ ├── models/ │ │ ├── __init__.py │ │ ├── gita.py │ │ ├── schemas.py │ │ └── user.py │ └── utils.py ├── cookie_iiradhakrishnaii.bot ├── docker-compose.dev.yml ├── docker-compose.prod.yml ├── mypy.ini ├── pyproject.toml └── wait_for_db.sh ================================================ FILE CONTENTS ================================================ ================================================ FILE: .all-contributorsrc ================================================ { "files": [ "README.md" ], "imageSize": 100, "commit": false, "contributors": [ { "login": "Gupta-Anubhav12", "name": "Anubhav Gupta", "avatar_url": "https://avatars.githubusercontent.com/u/64721638?v=4", "profile": "https://github.com/Gupta-Anubhav12", "contributions": [ "code" ] }, { "login": "sanujsood", "name": "Sanuj Sood", "avatar_url": "https://avatars.githubusercontent.com/u/67072668?v=4", "profile": "https://github.com/sanujsood", "contributions": [ "code" ] }, { "login": "aahnik", "name": "Aahnik Daw", "avatar_url": "https://avatars.githubusercontent.com/u/66209958?v=4", "profile": "http://aahnik.dev", "contributions": [ "code" ] }, { "login": "akshatj2209", "name": "Akshat Joshi", "avatar_url": "https://avatars.githubusercontent.com/u/57488922?v=4", "profile": "https://github.com/akshatj2209", "contributions": [ "code" ] }, { "login": "Amritpal2001", "name": "Amritpal Singh", "avatar_url": "https://avatars.githubusercontent.com/u/60562606?v=4", "profile": "https://www.realdevils.com/", "contributions": [ "code" ] }, { "login": "NIKU-SINGH", "name": "Niku Singh", "avatar_url": "https://avatars.githubusercontent.com/u/72123526?v=4", "profile": "https://github.com/NIKU-SINGH", "contributions": [ "code" ] }, { "login": "sreevardhanreddi", "name": "sreevardhanreddi", "avatar_url": "https://avatars.githubusercontent.com/u/31174432?v=4", "profile": "https://sreevardhanreddi.github.io/", "contributions": [ "code", "infra" ] } ], "contributorsPerLine": 7, "projectName": "bhagavad-gita-api", "projectOwner": "gita", "repoType": "github", "repoHost": "https://github.com", "skipCi": true } ================================================ FILE: .dockerignore ================================================ # Custom ignore .vscode t.* test.py foo.py run.py # Standard python .dockerignore used by community # Git .git .gitignore # CI .codeclimate.yml .travis.yml .taskcluster.yml # Docker docker-compose.yml .docker # Byte-compiled / optimized / DLL files __pycache__/ */__pycache__/ */*/__pycache__/ */*/*/__pycache__/ *.py[cod] */*.py[cod] */*/*.py[cod] */*/*/*.py[cod] # C extensions *.so # Distribution / packaging .Python env/ build/ develop-eggs/ dist/ downloads/ eggs/ lib/ lib64/ parts/ sdist/ var/ *.egg-info/ .installed.cfg *.egg # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .coverage .cache nosetests.xml coverage.xml # Translations *.mo *.pot # Django stuff: *.log # Sphinx documentation docs/_build/ # PyBuilder target/ # Virtual environment .env/ .venv/ venv/ # PyCharm .idea # Python mode for VIM .ropeproject */.ropeproject */*/.ropeproject */*/*/.ropeproject # Vim swap files *.swp */*.swp */*/*.swp */*/*/*.swp # volume folder for postgres database db_data ================================================ FILE: .flake8 ================================================ [flake8] max-line-length = 88 select = C,E,F,W,B,B9 ignore = E203, E501, W503, E712, E301, F403, F405 exclude = .tox,.git,venv,__init__.py ================================================ FILE: .github/CONTRIBUTING.md ================================================ # Contributing Guide This guide is for anyone who wishes to contribute code to Bhagavad Gita API. Thank you for your interest and welcome here! To work on this project you will need the following software installed in your machine. - git (version control) - python3.10 or above - poetry (package management) - make (command line utils) - docker (optional, if you want to build docker images) - docker-compose (optional, if you want to develop with docker-compose) 1. First of all fork and clone this repo. Checkout a new branch to start working. For more information read [GitHub's Docs](https://docs.github.com/en/get-started/quickstart/fork-a-repo) for beginners. 2. If you don't already have `poetry`, then [install it](https://python-poetry.org/docs/#installation). Move into the project directory and run the following commands. ```shell poetry config virtualenvs.in-project true poetry install ``` 3. The virtual environment will be created in a `.venv` folder inside your project directory. In your code editor set the python interpretor path to `./.venv/bin/python` 4. Activate poetry shell. ```shell poetry shell ``` 5. Install pre-commit hooks. ```shell pre-commit install ``` 6. Setup .env file refer .env.example. ```shell cp .env.example .env ``` 7. Seed data to database. ```shell python bhagavad_gita_api/cli.py seed-data ``` 8. To start the server with hot reload, ```shell uvicorn bhagavad_gita_api.main:app --host 0.0.0.0 --port 8081 --reload ``` By default an in memory Sqlite database is used. To set the database DSN, tester API Key and other stuff, read about [configuration](../README.md/#Configuration) in the README. 9. Try to write test cases when you are adding a feature or fixing a bug. 10. Make sure that all existing tests, and code quality checks pass. ```shell pytest # run tests pre-commit run -a # run pre-commit for all files ``` 11. Make sure to write meaningful commit messages. 12. Open a PR. Please explain what your changes does in a simple words. Attach logs, screenshots and other relevant material. Congrats and thanks for opening your first PR! Please wait for the maintainers to respond. --- ## Developing with Docker and docker-compose ```shell # setup .env file, refer .env.example file cp .env.example .env # run the project with docker-compose docker-compose -f docker-compose.dev.yml up --build ``` --- ## Contributors List To add yourself to the contributors list, comment on an Issue or Pull Request, asking @all-contributors to add a contributor: ```txt @all-contributors please add @ for ``` **\**: See the [Emoji Key (Contribution Types Reference)](../emoji-key) for a list of valid `contribution` types. The bot will then create a Pull Request to add the contributor, then reply with the pull request details. ![Example usage screenshot](../.github/bot-usage.png "Example usage") ================================================ FILE: .github/FUNDING.yml ================================================ # These are supported funding model platforms github: gita patreon: # Replace with a single Patreon username open_collective: the-gita-initiative ko_fi: # Replace with a single Ko-fi username tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry liberapay: # Replace with a single Liberapay username issuehunt: gita otechie: # Replace with a single Otechie username custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] ================================================ FILE: .github/ISSUE_TEMPLATE/bug.yml ================================================ name: Bug report description: Create a report to help us improve. Report bugs found while using the project title: "[BUG] " labels: ["🛠 goal: fix"] body: - type: textarea id: actualbhv attributes: label: "🙁 Actual behavior" description: What happened, and why it was wrong validations: required: true - type: textarea id: expectedbhv attributes: label: "🙂 Expected behavior" description: What you expected to happen instead, and why validations: required: true - type: textarea id: steps attributes: label: "🔢 Steps to Reproduce the Problem" description: If possible, provide steps to reproduce the problem you're experiencing placeholder: | 1. First step 2. Second step 3. Third step validations: required: false - type: markdown attributes: value: | You can also join the Discord community [here](https://discord.gg/HPZzuJs3VY) Feel free to check out other cool repositories of the The Gita Initiative Communtiy [here](https://github.com/gita) ================================================ FILE: .github/ISSUE_TEMPLATE/feature.yml ================================================ name: Feature request description: Suggest features, propose improvements, discuss new ideas title: "[FEATURE] " labels: ["⭐ goal: addition"] body: - type: textarea id: suggestion attributes: label: ⭐ Suggestion description: A summary of what you'd like to see added or changed validations: required: true - type: textarea id: usecases attributes: label: 💻 Use Cases description: | What are possible you cases for your suggested feature? Are you using any workarounds in the meantime? validations: required: false - type: textarea id: relatedproblems attributes: label: ❌ Related Problems description: | Is your Request related to a problem? Think about linking existing Issues here! validations: required: false - type: markdown attributes: value: | You can also join the Discord community [here](https://discord.gg/HPZzuJs3VY) Feel free to check out other cool repositories of the The Gita Initiative Communtiy [here](https://github.com/gita) ================================================ FILE: .github/ISSUE_TEMPLATE/other.yml ================================================ name: Other description: Use this for any other issues. PLEASE do not create blank issues title: "[OTHER]" labels: ["🚦 status: awaiting triage"] body: - type: markdown attributes: value: "# Other issue" - type: textarea id: issuedescription attributes: label: What would you like to share? description: Provide a clear and concise explanation of your issue. validations: required: true - type: textarea id: extrainfo attributes: label: Additional information description: Is there anything else we should know about this issue? validations: required: false ================================================ FILE: .github/PULL_REQUEST_TEMPLATE/pull_request_template.md ================================================ # Description Please include a summary of the change and which issue is fixed. Please also include relevant motivation and context. List any dependencies that are required for this change. Fixes # (issue) ## Type of change Please delete options that are not relevant. - [ ] Bug fix (non-breaking change which fixes an issue) - [ ] New feature (non-breaking change which adds functionality) - [ ] Breaking change (fix or feature that would cause existing functionality to not work as expected) - [ ] This change requires a documentation update # Checklist: - [ ] My code follows the style guidelines of this project - [ ] I have performed a self-review of my own code - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have made corresponding changes to the documentation - [ ] My changes generate no new warnings - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] New and existing unit tests pass locally with my changes - [ ] Any dependent changes have been merged and published in downstream modules ================================================ FILE: .github/workflows/deploy.yml ================================================ name: Deploy to VM on: push: branches: [main] jobs: deploy: runs-on: [self-hosted] steps: - name: cleanup and reset permissions to user from docker user run: chown -R $USER:$USER $GITHUB_WORKSPACE - name: download source code uses: actions/checkout@v2 - name: set environment variables in .env file run: | echo POSTGRES_USER="${{ secrets.POSTGRES_USER }}" > .env echo POSTGRES_PASSWORD="${{ secrets.POSTGRES_PASSWORD }}" >> .env echo POSTGRES_DB="${{ secrets.POSTGRES_DB }}" >> .env echo DB_HOST="${{ secrets.DB_HOST }}" >> .env echo DB_PORT="${{ secrets.DB_PORT }}" >> .env echo TESTER_API_KEY="${{ secrets.TESTER_API_KEY }}" >> .env echo CELERY_BROKER="${{ secrets.CELERY_BROKER }}" >> .env echo CELERY_BACKEND="${{ secrets.CELERY_BACKEND }}" >> .env echo CRONJOB_BASE_URL="${{ secrets.CRONJOB_BASE_URL }}" >> .env echo CONSUMER_KEY="${{ secrets.CONSUMER_KEY }}" >> .env echo CONSUMER_SECRET="${{ secrets.CONSUMER_SECRET }}" >> .env echo CLIENT_ID="${{ secrets.CLIENT_ID }}" >> .env echo CLIENT_SECRET="${{ secrets.CLIENT_SECRET }}" >> .env echo ACCESS_TOKEN="${{ secrets.ACCESS_TOKEN }}" >> .env echo ACCESS_TOKEN_SECRET="${{ secrets.ACCESS_TOKEN_SECRET }}" >> .env echo INSTAGRAM_USERNAME="${{ secrets.INSTAGRAM_USERNAME }}" >> .env echo INSTAGRAM_PASSWORD="${{ secrets.INSTAGRAM_PASSWORD }}" >> .env - name: build docker images locally run: docker-compose -f docker-compose.prod.yml build --parallel - name: run docker compose run: docker-compose -f docker-compose.prod.yml up -d ================================================ FILE: .github/workflows/openai-pr-reviewer.yml ================================================ name: Code Review permissions: contents: read pull-requests: write on: pull_request: branches: - master - main pull_request_review_comment: types: [created] concurrency: group: ${{ github.repository }}-${{ github.event.number || github.head_ref || github.sha }}-${{ github.workflow }}-${{ github.event_name == 'pull_request_review_comment' && 'pr_comment' || 'pr' }} cancel-in-progress: ${{ github.event_name != 'pull_request_review_comment' }} jobs: review: runs-on: ubuntu-latest steps: - uses: fluxninja/openai-pr-reviewer@latest env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} with: debug: false review_simple_changes: false review_comment_lgtm: false openai_light_model: 'gpt-3.5-turbo' openai_heavy_model: 'gpt-4' ================================================ FILE: .github/workflows/publish.yml ================================================ name: Publish Packages on: release: types: [published] jobs: release: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - run: pip install --upgrade poetry - name: Publish to PyPI run: poetry publish --build env: POETRY_HTTP_BASIC_PYPI_USERNAME: "__token__" POETRY_HTTP_BASIC_PYPI_PASSWORD: ${{ secrets.PYPI_TOKEN }} - name: Login to DockerHub uses: docker/login-action@v1 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Build docker images and publish run: make docker-release ================================================ FILE: .github/workflows/quality.yml ================================================ name: Code Quality on: push: branches: [ main ] pull_request: branches: [ main ] jobs: check: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 - uses: actions/setup-python@v2 - run: pip install poetry && poetry install - uses: pre-commit/action@v2.0.0 ================================================ FILE: .gitignore ================================================ # Custom ignore .vscode t.* test.py foo.py run.py # Standard python .gitignore given by GitHub # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] *$py.class # C extensions *.so # Distribution / packaging .Python build/ develop-eggs/ dist/ downloads/ eggs/ .eggs/ lib/ lib64/ parts/ sdist/ var/ wheels/ pip-wheel-metadata/ share/python-wheels/ *.egg-info/ .installed.cfg *.egg MANIFEST # PyInstaller # Usually these files are written by a python script from a template # before PyInstaller builds the exe, so as to inject date/other infos into it. *.manifest *.spec # Installer logs pip-log.txt pip-delete-this-directory.txt # Unit test / coverage reports htmlcov/ .tox/ .nox/ .coverage .coverage.* .cache nosetests.xml coverage.xml *.cover *.py,cover .hypothesis/ .pytest_cache/ # Translations *.mo *.pot # Django stuff: *.log local_settings.py db.sqlite3 db.sqlite3-journal # Flask stuff: instance/ .webassets-cache # Scrapy stuff: .scrapy # Sphinx documentation docs/_build/ # PyBuilder target/ # Jupyter Notebook .ipynb_checkpoints # IPython profile_default/ ipython_config.py # pyenv .python-version # pipenv # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. # However, in case of collaboration, if having platform-specific dependencies or dependencies # having no cross-platform support, pipenv may install dependencies that don't work, or not # install all needed dependencies. #Pipfile.lock # PEP 582; used by e.g. github.com/David-OConnor/pyflow __pypackages__/ # Celery stuff celerybeat-schedule celerybeat.pid # SageMath parsed files *.sage.py # Environments .env .venv env/ venv/ ENV/ env.bak/ venv.bak/ # Spyder project settings .spyderproject .spyproject # Rope project settings .ropeproject # mkdocs documentation /site # mypy .mypy_cache/ .dmypy.json dmypy.json # Pyre type checker .pyre/ # volume folder for postgres database db_data # sqlite3 file bhagavad_gita_api/gita.db ================================================ FILE: .markdownlint.rb ================================================ all exclude_rule 'MD013' ================================================ FILE: .pre-commit-config.yaml ================================================ repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v4.0.1 hooks: - id: trailing-whitespace - id: end-of-file-fixer - id: debug-statements - repo: https://github.com/myint/autoflake rev: v1.4 hooks: - id: autoflake args: ['--in-place', '--remove-unused-variable', '--ignore-init-module-imports', '--remove-all-unused-imports'] - repo: https://github.com/asottile/pyupgrade rev: v2.19.4 hooks: - id: pyupgrade args: ['--py3-plus'] - repo: https://github.com/asottile/seed-isort-config rev: v2.2.0 hooks: - id: seed-isort-config - repo: https://github.com/pre-commit/mirrors-isort rev: v5.9.1 hooks: - id: isort - repo: https://github.com/ambv/black rev: 22.3.0 hooks: - id: black language_version: python3 - repo: local hooks: - id: flake8 name: flake8 types: [python] language: system entry: poetry run flake8 --config .flake8 exclude: run.py - repo: https://github.com/igorshubovych/markdownlint-cli rev: v0.27.1 hooks: - id: markdownlint args: [-s, .markdownlint.rb] ================================================ FILE: Caddyfile ================================================ api.bhagavadgita.io { reverse_proxy gita-api:8081 { header_down Strict-Transport-Security max-age=31536000; } } ================================================ FILE: Dockerfile ================================================ FROM python:3.9 ENV VENV_PATH="/venv" ENV PATH="$VENV_PATH/bin:$PATH" WORKDIR /app RUN apt-get update && apt-get upgrade -y RUN pip install --upgrade poetry RUN python -m venv /venv COPY . . RUN poetry build && \ /venv/bin/pip install --upgrade pip wheel setuptools &&\ /venv/bin/pip install dist/*.whl CMD bhagavad-gita-api ================================================ FILE: Dockerfile.dev ================================================ FROM python:3.9 ENV VENV_PATH="/venv" ENV PATH="$VENV_PATH/bin:$PATH" WORKDIR /app RUN apt-get update && apt-get upgrade -y && apt-get install netcat -y RUN pip install --upgrade poetry RUN python -m venv /venv COPY . . RUN poetry build && \ /venv/bin/pip install --upgrade pip wheel setuptools && \ /venv/bin/pip install dist/*.whl ENTRYPOINT [ "./wait_for_db.sh" ] CMD bhagavad-gita-api ================================================ FILE: Dockerfile.prod ================================================ FROM python:3.9 ENV VENV_PATH="/venv" ENV PATH="$VENV_PATH/bin:$PATH" WORKDIR /app RUN apt-get update && apt-get upgrade -y && apt-get install netcat -y RUN pip install --upgrade poetry RUN python -m venv /venv COPY . . RUN poetry build && \ /venv/bin/pip install --upgrade pip wheel setuptools && \ /venv/bin/pip install dist/*.whl ENTRYPOINT [ "./wait_for_db.sh" ] CMD gunicorn -c bhagavad_gita_api/gunicorn.conf.py bhagavad_gita_api.main:app ================================================ FILE: LICENSE ================================================ MIT License Copyright (c) 2021 The Gita Initiative Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: Makefile ================================================ # lists all available targets list: @sh -c "$(MAKE) -p no_targets__ | \ awk -F':' '/^[a-zA-Z0-9][^\$$#\/\\t=]*:([^=]|$$)/ {\ split(\$$1,A,/ /);for(i in A)print A[i]\ }' | grep -v '__\$$' | grep -v 'make\[1\]' | grep -v 'Makefile' | sort" # required for list no_targets__: VERSION=$$(poetry version -s) PROJECT="bhagavad-gita-api" DOCKER_ORG="bhagavadgita" DOCKER_REPO="$(DOCKER_ORG)/$(PROJECT)" clean: @rm -rf build dist .eggs *.egg-info @rm -rf .benchmarks .coverage coverage.xml htmlcov report.xml .tox @find . -type d -name '.mypy_cache' -exec rm -rf {} + @find . -type d -name '__pycache__' -exec rm -rf {} + @find . -type d -name '*pytest_cache*' -exec rm -rf {} + @find . -type f -name "*.py[co]" -exec rm -rf {} + fmt: clean @poetry run isort . @poetry run black . hard-clean: clean @rm -rf .venv pypi: @poetry publish --build docker: @docker build -t $(PROJECT) . @docker tag $(PROJECT) $(DOCKER_REPO):latest @docker tag $(PROJECT) $(DOCKER_REPO):$(VERSION) docker-release: docker @docker push -a $(DOCKER_REPO) release: pypi docker-release ================================================ FILE: README.md ================================================

Logo

Bhagavad Gita API

Code for the BhagavadGita.io API, which is an app built for Gita readers by Gita readers.

GitHub issues PyPI - Python Version LICENSE Stars Docs Stars

## Usage The Bhagavad Gita API allows any developer to use content from Gita in their apps. This API is built with FastAPI which is based on (and fully compatible with) the open standards for APIs: OpenAPI (previously known as Swagger) and JSON Schema. Documentation for this API is availaible in two interactive formats: - [Swagger UI](https://api.bhagavadgita.io/docs) - [Redoc](https://api.bhagavadgita.io/redoc) If you are interested in using this API for your application, please register an account at [RapidAPI](https://rapidapi.com/bhagavad-gita-bhagavad-gita-default/api/bhagavad-gita3) where you'll get both the credentials as well as sample code in your language of choice. The API is 100% FREE to use. ## Projects Here is a list of interesting projects using this API. - [BhagavadGita.io](https://bhagavadgita.io) - [Android App](https://play.google.com/store/apps/details?id=com.hanuman.bhagavadgita) Have you build something with this API ? Open a "Show and tell" discussion. The maintainers will feature your project on the README if they find it interesting. ## Self Hosting The official API is free to use for all. But If you wish you can self host anywhere you want. If you want to deploy your own instance,You can deploy the API server on your system or VPS. - Using [`pipx`](https://pypa.github.io/pipx/installation/) > **Note** If you dont have `pipx`, just `pip install pipx` ```shell pipx run bhagavad-gita-api ``` - Or using [`docker`](https://www.docker.com/) ```shell docker run -it -p 8081:8081 --env-file=.env bhagavadgita/bhagavad-gita-api ``` Now open http://localhost:8081/docs to see docs. To stop the server press Ctrl + C on your keyboard. By default an in-memory SQLite database is used. But you configure to use any SQL database of your choice. The official version uses PostgreSQL. Looking to deploy on a cloud platform ? We have detailed docs to deploy to the following platforms: - [Heroku](https://github.com/gita/bhagavad-gita-api/wiki/Heroku) - [Deta](https://github.com/gita/bhagavad-gita-api/wiki/Deta) - [Digital Ocean](https://github.com/gita/bhagavad-gita-api/wiki/Digial-Ocean) ## Configuration Here is the list of supported environment variables. | Name | Description | Default | | ------------------------- | ------------------------------------- | ----------- | | `TESTER_API_KEY` | The API key for testing. | `None` | | `SQLALCHEMY_DATABASE_URI` | The DSN for your database connection. | `sqlite://` (in memory SQLite db)| If you want to configure your deployment even more, then please take a look at module [`config.py`](bhagavad_gita_api/config.py). To set the environment variables, you may simply use a `.env` file where you specify the values in the format of `KEY=VALUE`. ## Development Feel free to use the [issue tracker](https://github.com/gita/bhagavad-gita-api/issues) for bugs and feature requests. Looking to contribute code ? PRs are most welcome! To get started with developing this API, please read the [contributing guide](.github/CONTRIBUTING.md). ## Community Join the [Discord chat server](https://discord.gg/gX8dstApZX) and hang out with others in the community. You can also use [GitHub Discussions](https://github.com/gita/bhagavad-gita-api/discussions) to ask questions or tell us about projects you have built using this API. ## Contributors ✨ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/docs/en/emoji-key)):

Anubhav Gupta

💻

Sanuj Sood

💻

Aahnik Daw

💻

Akshat Joshi

💻

Amritpal Singh

💻

Niku Singh

💻

sreevardhanreddi

💻 🚇
This project follows the [all-contributors](https://github.com/all-contributors/all-contributors) specification. Contributions of any kind welcome! ================================================ FILE: bhagavad_gita_api/MyIGBot.py ================================================ # flake8: noqa import json import os import random import string import time from datetime import datetime import requests from bs4 import BeautifulSoup as bs class bcolors: HEADER = "\033[95m" OKBLUE = "\033[94m" OKCYAN = "\033[96m" OKGREEN = "\033[92m" WARNING = "\033[93m" FAIL = "\033[91m" ENDC = "\033[0m" BOLD = "\033[1m" UNDERLINE = "\033[4m" class MyIGBot: def __init__(self, username, password, use_cookie=True, proxy=None): self.username = username self.password = password self.use_cookie = use_cookie self.proxy = proxy self.path = os.getcwd() if ( use_cookie == False or os.path.exists(self.path + f"//cookie_{self.username}.bot") == False ): link = "https://www.instagram.com/" login_url = "https://www.instagram.com/accounts/login/ajax/" time_now = int(datetime.now().timestamp()) response = requests.get(link, proxies=self.proxy) try: csrf = response.cookies["csrftoken"] except: letters = string.ascii_lowercase csrf = "".join(random.choice(letters) for i in range(8)) payload = { "username": self.username, "enc_password": f"#PWD_INSTAGRAM_BROWSER:0:{time_now}:{self.password}", "queryParams": {}, "optIntoOneTap": "false", } login_header = { "User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36", "X-Requested-With": "XMLHttpRequest", "Referer": "https://www.instagram.com/accounts/login/", "x-csrftoken": csrf, } login_response = requests.post( login_url, data=payload, headers=login_header, proxies=self.proxy ) json_data = json.loads(login_response.text) cookies = login_response.cookies cookie_jar = cookies.get_dict() try: self.csrf_token = cookie_jar["csrftoken"] except: self.csrf_token = csrf try: if json_data["authenticated"]: pass else: print( bcolors.FAIL + "[✗] Login Failed!" + bcolors.ENDC, login_response.text, ) quit() except KeyError: try: if json_data["two_factor_required"]: self.ig_nrcb = cookie_jar["ig_nrcb"] self.ig_did = cookie_jar["ig_did"] self.mid = cookie_jar["mid"] otp = input( bcolors.OKBLUE + "[!] Two Factor Auth. Detected! Enter Code Here: " + bcolors.ENDC ) twofactor_url = ( "https://www.instagram.com/accounts/login/ajax/two_factor/" ) twofactor_payload = { "username": self.username, "verificationCode": otp, "identifier": json_data["two_factor_info"][ "two_factor_identifier" ], "queryParams": {}, } twofactor_header = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "en-US,en;q=0.9", "content-type": "application/x-www-form-urlencoded", "cookie": "ig_did=" + self.ig_did + "; ig_nrcb=" + self.ig_nrcb + "; csrftoken=" + self.csrf_token + "; mid=" + self.mid, "origin": "https://www.instagram.com", "referer": "https://www.instagram.com/accounts/login/two_factor?next=%2F", "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36", "x-csrftoken": self.csrf_token, "x-ig-app-id": "936619743392459", "x-ig-www-claim": "0", "x-instagram-ajax": "00c4537694a4", "x-requested-with": "XMLHttpRequest", } login_response = requests.post( twofactor_url, data=twofactor_payload, headers=twofactor_header, proxies=self.proxy, ) try: if login_response.headers["Set-Cookie"] != 0: pass except: try: if json_data["message"] == "checkpoint_required": self.ig_nrcb = cookie_jar["ig_nrcb"] self.ig_did = cookie_jar["ig_did"] self.mid = cookie_jar["mid"] url = ( "https://www.instagram.com" + json_data["checkpoint_url"] ) header = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "en-US,en;q=0.9", "content-type": "application/x-www-form-urlencoded", "cookie": "ig_did=" + self.ig_did + "; ig_nrcb=" + self.ig_nrcb + "; csrftoken=" + self.csrf_token + "; mid=" + self.mid, "origin": "https://www.instagram.com", "referer": "https://instagram.com" + json_data["checkpoint_url"], "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36", "x-csrftoken": self.csrf_token, "x-ig-app-id": "936619743392459", "x-ig-www-claim": "0", "x-instagram-ajax": "e8e20d8ba618", "x-requested-with": "XMLHttpRequest", } code = input( bcolors.OKBLUE + json.loads( requests.post( url, headers=header, data={"choice": "1"}, ).text, proxies=self.proxy, )["extraData"]["content"][1]["text"] + " > " + bcolors.ENDC ) if ( json.loads( requests.post( url, headers=header, data={"security_code": code}, ).text, proxies=self.proxy, )["type"] == "CHALLENGE_REDIRECTION" ): login_response = requests.post( login_url, data=payload, headers=login_header, proxies=self.proxy, ) else: print( bcolors.FAIL + "[✗] Login Failed!" + bcolors.ENDC ) quit() except: print(bcolors.FAIL + "[✗] Login Failed!" + bcolors.ENDC) quit() except KeyError: try: if json_data["message"] == "checkpoint_required": self.ig_nrcb = cookie_jar["ig_nrcb"] self.ig_did = cookie_jar["ig_did"] self.mid = cookie_jar["mid"] url = ( "https://www.instagram.com" + json_data["checkpoint_url"] ) header = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "en-US,en;q=0.9", "content-type": "application/x-www-form-urlencoded", "cookie": "ig_did=" + self.ig_did + "; ig_nrcb=" + self.ig_nrcb + "; csrftoken=" + self.csrf_token + "; mid=" + self.mid, "origin": "https://www.instagram.com", "referer": "https://instagram.com" + json_data["checkpoint_url"], "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36", "x-csrftoken": self.csrf_token, "x-ig-app-id": "936619743392459", "x-ig-www-claim": "0", "x-instagram-ajax": "e8e20d8ba618", "x-requested-with": "XMLHttpRequest", } code = input( bcolors.OKBLUE + json.loads( requests.post( url, headers=header, data={"choice": "1"} ).text, proxies=self.proxy, )["extraData"]["content"][1]["text"] + " > " + bcolors.ENDC ) if ( json.loads( requests.post( url, headers=header, data={"security_code": code}, ).text, proxies=self.proxy, )["type"] == "CHALLENGE_REDIRECTION" ): login_response = requests.post( login_url, data=payload, headers=login_header, proxies=self.proxy, ) else: print(bcolors.FAIL + "[✗] Login Failed!" + bcolors.ENDC) quit() except: print(bcolors.FAIL + "[✗] Login Failed!" + bcolors.ENDC) quit() self.sessionid = ( login_response.headers["Set-Cookie"] .split("sessionid=")[1] .split(";")[0] ) self.userId = ( login_response.headers["Set-Cookie"] .split("ds_user_id=")[1] .split(";")[0] ) self.cookie = ( "sessionid=" + self.sessionid + "; csrftoken=" + self.csrf_token + "; ds_user_id=" + self.userId + ";" ) create_cookie = open( self.path + f"//cookie_{self.username}.bot", "w+", encoding="utf-8" ) create_cookie.write(self.cookie) create_cookie.close() self.session = requests.session() cookie_obj = requests.cookies.create_cookie( name="sessionid", secure=True, value=self.sessionid ) self.session.cookies.set_cookie(cookie_obj) elif os.path.exists(self.path + f"//cookie_{self.username}.bot"): try: read_cookie = open( self.path + f"//cookie_{self.username}.bot", encoding="utf-8" ) self.cookie = read_cookie.read() read_cookie.close() homelink = "https://www.instagram.com/op/" self.session = requests.session() self.sessionid = self.cookie.split("=")[1].split(";")[0] self.csrf_token = self.cookie.split("=")[2].split(";")[0] cookie_obj = requests.cookies.create_cookie( name="sessionid", secure=True, value=self.sessionid ) self.session.cookies.set_cookie(cookie_obj) login_response = self.session.get(homelink, proxies=self.proxy) time.sleep(1) soup = bs(login_response.text, "html.parser") soup.find( "strong", { "class": "-cx-PRIVATE-NavBar__username -cx-PRIVATE-NavBar__username__" }, ).get_text() except AttributeError: print( bcolors.FAIL + "[✗] Login Failed! Cookie file is corupted!" + bcolors.ENDC ) os.remove(self.path + f"//cookie_{self.username}.bot") print( bcolors.WARNING + "[-] Deleted Corupted Cookie File! Try Again!" + bcolors.ENDC ) quit() def already_liked(self, post_link): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass resp = self.session.get(post_link, proxies=self.proxy) time.sleep(1) soup = bs(resp.text, "html.parser") scripts = soup.find_all("script") data_script = str(scripts[15]) time.sleep(1) try: shortcode = post_link.split("/p/")[1].replace("/", "") data_script = data_script.replace( f"""", "") data_json = json.loads(data_object) liked = data_json["graphql"]["shortcode_media"]["viewer_has_liked"] return bool(liked) def like(self, post_link): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass try: if self.already_liked(post_link) == False: resp = self.session.get(post_link, proxies=self.proxy) time.sleep(1) soup = bs(resp.text, "html.parser") scripts = soup.find_all("script") data_script = str(scripts[15]) time.sleep(1) try: shortcode = post_link.split("/p/")[1].replace("/", "") data_script = data_script.replace( f"""", "") data_json = json.loads(data_object) id_post = data_json["graphql"]["shortcode_media"]["id"] url_post = f"https://www.instagram.com/web/likes/{id_post}/like/" headers = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "en-US,en;q=0.9", "content-length": "0", "content-type": "application/x-www-form-urlencoded", "cookie": self.cookie, "origin": "https://www.instagram.com", "referer": post_link, "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36", "x-csrftoken": self.csrf_token, "x-ig-app-id": "936619743392459", "x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFqSx", "x-instagram-ajax": "d3d3aea32e75", "x-requested-with": "XMLHttpRequest", } response = requests.request( "POST", url_post, headers=headers, proxies=self.proxy ) if response.status_code != 200: return response.status_code else: return 208 except: return 403 return 200 def unlike(self, post_link): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass try: if self.already_liked(post_link) == True: resp = self.session.get(post_link, proxies=self.proxy) time.sleep(1) soup = bs(resp.text, "html.parser") scripts = soup.find_all("script") data_script = str(scripts[15]) time.sleep(1) try: shortcode = post_link.split("/p/")[1].replace("/", "") data_script = data_script.replace( f"""", "") data_json = json.loads(data_object) id_post = data_json["graphql"]["shortcode_media"]["id"] url_post = f"https://www.instagram.com/web/likes/{id_post}/unlike/" headers = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "en-US,en;q=0.9", "content-length": "0", "content-type": "application/x-www-form-urlencoded", "cookie": self.cookie, "origin": "https://www.instagram.com", "referer": post_link, "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36", "x-csrftoken": self.csrf_token, "x-ig-app-id": "936619743392459", "x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFqSx", "x-instagram-ajax": "d3d3aea32e75", "x-requested-with": "XMLHttpRequest", } response = requests.request( "POST", url_post, headers=headers, proxies=self.proxy ) if response.status_code != 200: return response.status_code else: return 208 except: return 403 return 200 def like_recent(self, username): resp = self.session.get( "https://www.instagram.com/" + username + "/", proxies=self.proxy ) time.sleep(1) soup = bs(resp.text, "html.parser") scripts = soup.find_all("script") try: data_script = str(scripts[4]) time.sleep(1) data_script = data_script.replace( """", "") data_json = json.loads(data_object) except: data_script = str(scripts[3]) time.sleep(1) data_script = data_script.replace( """", "") data_json = json.loads(data_object) try: shortcode = data_json["entry_data"]["ProfilePage"][0]["graphql"]["user"][ "edge_owner_to_timeline_media" ]["edges"][0]["node"]["shortcode"] return self.like("https://www.instagram.com/p/" + shortcode + "/") except IndexError: return 404 except KeyError: return 404 def comment(self, post_link, comment_text): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass try: resp = self.session.get(post_link, proxies=self.proxy) time.sleep(1) soup = bs(resp.text, "html.parser") scripts = soup.find_all("script") data_script = str(scripts[15]) time.sleep(1) try: shortcode = post_link.split("/p/")[1].replace("/", "") data_script = data_script.replace( f"""", "") data_json = json.loads(data_object) id_post = data_json["graphql"]["shortcode_media"]["id"] url_post = f"https://www.instagram.com/web/comments/{id_post}/add/" headers = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "en-US,en;q=0.9", "content-length": "39", "content-type": "application/x-www-form-urlencoded", "cookie": self.cookie, "origin": "https://www.instagram.com", "referer": post_link, "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36", "x-csrftoken": self.csrf_token, "x-ig-app-id": "936619743392459", "x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFvZV", "x-instagram-ajax": "d3d3aea32e75", "x-requested-with": "XMLHttpRequest", } response = requests.request( "POST", url_post, headers=headers, data=f"comment_text={comment_text}&replied_to_comment_id=".encode( "utf-8" ), proxies=self.proxy, ) if response.status_code != 200: return response.status_code except: return 403 return 200 def comment_recent(self, username, comment_text): resp = self.session.get( "https://www.instagram.com/" + username + "/", proxies=self.proxy ) time.sleep(1) soup = bs(resp.text, "html.parser") scripts = soup.find_all("script") try: data_script = str(scripts[4]) time.sleep(1) data_script = data_script.replace( """", "") data_json = json.loads(data_object) except: data_script = str(scripts[3]) time.sleep(1) data_script = data_script.replace( """", "") data_json = json.loads(data_object) try: shortcode = data_json["entry_data"]["ProfilePage"][0]["graphql"]["user"][ "edge_owner_to_timeline_media" ]["edges"][0]["node"]["shortcode"] return self.comment( "https://www.instagram.com/p/" + shortcode + "/", comment_text ) except IndexError: return 404 except KeyError: return 404 def already_followed(self, username): resp = self.session.get( "https://www.instagram.com/" + username + "/", proxies=self.proxy ) time.sleep(1) soup = bs(resp.text, "html.parser") scripts = soup.find_all("script") try: data_script = str(scripts[4]) time.sleep(1) data_script = data_script.replace( """", "") data_json = json.loads(data_object) except: data_script = str(scripts[3]) time.sleep(1) data_script = data_script.replace( """", "") data_json = json.loads(data_object) followed = data_json["entry_data"]["ProfilePage"][0]["graphql"]["user"][ "followed_by_viewer" ] return bool(followed) def follow(self, username): try: if self.already_followed(username) == False: resp = self.session.get( "https://www.instagram.com/" + username + "/", proxies=self.proxy ) time.sleep(1) soup = bs(resp.text, "html.parser") scripts = soup.find_all("script") try: data_script = str(scripts[4]) time.sleep(1) data_script = data_script.replace( """", "") data_json = json.loads(data_object) except: data_script = str(scripts[3]) time.sleep(1) data_script = data_script.replace( """", "") data_json = json.loads(data_object) id_page = data_json["entry_data"]["ProfilePage"][0]["graphql"]["user"][ "id" ] url_page = ( f"https://www.instagram.com/web/friendships/{id_page}/follow/" ) headers = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "en-US,en;q=0.9", "content-length": "0", "content-type": "application/x-www-form-urlencoded", "cookie": self.cookie, "origin": "https://www.instagram.com", "referer": f"https://www.instagram.com/{username}/", "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36", "x-csrftoken": self.csrf_token, "x-ig-app-id": "936619743392459", "x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFvZV", "x-instagram-ajax": "d3d3aea32e75", "x-requested-with": "XMLHttpRequest", } response = requests.request( "POST", url_page, headers=headers, proxies=self.proxy ) if response.status_code == 200: return 200 else: return response.status_code else: return 208 except KeyError: return 404 def unfollow(self, username): try: if self.already_followed(username) == True: resp = self.session.get( "https://www.instagram.com/" + username + "/", proxies=self.proxy ) time.sleep(1) soup = bs(resp.text, "html.parser") scripts = soup.find_all("script") try: data_script = str(scripts[4]) time.sleep(1) data_script = data_script.replace( """", "") data_json = json.loads(data_object) except: data_script = str(scripts[3]) time.sleep(1) data_script = data_script.replace( """", "") data_json = json.loads(data_object) id_page = data_json["entry_data"]["ProfilePage"][0]["graphql"]["user"][ "id" ] url_page = ( f"https://www.instagram.com/web/friendships/{id_page}/unfollow/" ) headers = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "en-US,en;q=0.9", "content-length": "0", "content-type": "application/x-www-form-urlencoded", "cookie": self.cookie, "origin": "https://www.instagram.com", "referer": f"https://www.instagram.com/{username}/", "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36", "x-csrftoken": self.csrf_token, "x-ig-app-id": "936619743392459", "x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFvZV", "x-instagram-ajax": "d3d3aea32e75", "x-requested-with": "XMLHttpRequest", } response = requests.request( "POST", url_page, headers=headers, proxies=self.proxy ) if response.status_code == 200: return 200 else: return response.status_code else: return 208 except KeyError: return 404 def story_view(self, username): try: resp = self.session.get( "https://www.instagram.com/" + username + "/", proxies=self.proxy ) time.sleep(1) soup = bs(resp.text, "html.parser") scripts = soup.find_all("script") try: data_script = str(scripts[4]) time.sleep(1) data_script = data_script.replace( """", "") data_json = json.loads(data_object) except: try: data_script = str(scripts[3]) time.sleep(1) data_script = data_script.replace( """", "") data_json = json.loads(data_object) except: return 404 page_id = data_json["entry_data"]["ProfilePage"][0]["graphql"]["user"]["id"] surl = f"https://www.instagram.com/graphql/query/?query_hash=c9c56db64beb4c9dea2d17740d0259d9&variables=%7B%22reel_ids%22%3A%5B%22{page_id}%22%5D%2C%22tag_names%22%3A%5B%5D%2C%22location_ids%22%3A%5B%5D%2C%22highlight_reel_ids%22%3A%5B%5D%2C%22precomposed_overlay%22%3Afalse%2C%22show_story_viewer_list%22%3Atrue%2C%22story_viewer_fetch_count%22%3A50%2C%22story_viewer_cursor%22%3A%22%22%2C%22stories_video_dash_manifest%22%3Afalse%7D" resp = self.session.get(surl, proxies=self.proxy) time.sleep(1) soup = bs(resp.text, "html.parser") data_json = json.loads(str(soup)) story_count = len(data_json["data"]["reels_media"][0]["items"]) for i in range(0, story_count): id_story = data_json["data"]["reels_media"][0]["items"][i]["id"] taken_at_timestamp = data_json["data"]["reels_media"][0]["items"][i][ "taken_at_timestamp" ] stories_page = f"https://www.instagram.com/stories/reel/seen" headers = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "en-US,en;q=0.9", "content-length": "127", "content-type": "application/x-www-form-urlencoded", "cookie": self.cookie, "origin": "https://www.instagram.com", "referer": f"https://www.instagram.com/stories/{username}/{id_story}/", "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36", "x-csrftoken": self.csrf_token, "x-ig-app-id": "936619743392459", "x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFvZV", "x-instagram-ajax": "d3d3aea32e75", "x-requested-with": "XMLHttpRequest", } data = { "reelMediaId": id_story, "reelMediaOwnerId": page_id, "reelId": page_id, "reelMediaTakenAt": taken_at_timestamp, "viewSeenAt": taken_at_timestamp, } requests.request( "POST", stories_page, headers=headers, data=data, proxies=self.proxy ) except IndexError: return 404 except KeyError: return 404 return 200 def upload_post(self, image_path, caption=""): micro_time = int(datetime.now().timestamp()) headers = { "content-type": "image / jpg", "content-length": "1", "X-Entity-Name": f"fb_uploader_{micro_time}", "Offset": "0", "User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36", "x-entity-length": "1", "X-Instagram-Rupload-Params": f'{{"media_type": 1, "upload_id": {micro_time}, "upload_media_height": 1080, "upload_media_width": 1080}}', "x-csrftoken": self.csrf_token, "x-ig-app-id": "1217981644879628", "cookie": self.cookie, } upload_response = requests.post( f"https://www.instagram.com/rupload_igphoto/fb_uploader_{micro_time}", data=open(image_path, "rb"), headers=headers, proxies=self.proxy, ) json_data = json.loads(upload_response.text) upload_id = json_data["upload_id"] if json_data["status"] == "ok": url = "https://www.instagram.com/create/configure/" payload = ( "upload_id=" + upload_id + "&caption=" + caption + "&usertags=&custom_accessibility_caption=&retry_timeout=" ) payload = payload.encode("utf-8") headers = { "authority": "www.instagram.com", "x-ig-www-claim": "hmac.AR2-43UfYbG2ZZLxh-BQ8N0rqGa-hESkcmxat2RqMAXejXE3", "x-instagram-ajax": "adb961e446b7-hot", "content-type": "application/x-www-form-urlencoded", "accept": "*/*", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36", "x-requested-with": "XMLHttpRequest", "x-csrftoken": self.csrf_token, "x-ig-app-id": "1217981644879628", "origin": "https://www.instagram.com", "sec-fetch-site": "same-origin", "sec-fetch-mode": "cors", "sec-fetch-dest": "empty", "referer": "https://www.instagram.com/create/details/", "accept-language": "en-US,en;q=0.9,fa-IR;q=0.8,fa;q=0.7", "cookie": self.cookie, } response = requests.request( "POST", url, headers=headers, data=payload, proxies=self.proxy ) json_data = json.loads(response.text) if json_data["status"] == "ok": return 200 else: return 400 def upload_story(self, image_path): micro_time = int(datetime.now().timestamp()) headers = { "content-type": "image / jpg", "content-length": "1", "X-Entity-Name": f"fb_uploader_{micro_time}", "Offset": "0", "User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36", "x-entity-length": "1", "X-Instagram-Rupload-Params": f'{{"media_type": 1, "upload_id": {micro_time}, "upload_media_height": 1080, "upload_media_width": 1080}}', "x-csrftoken": self.csrf_token, "x-ig-app-id": "1217981644879628", "cookie": self.cookie, } upload_response = requests.post( f"https://www.instagram.com/rupload_igphoto/fb_uploader_{micro_time}", data=open(image_path, "rb"), headers=headers, proxies=self.proxy, ) json_data = json.loads(upload_response.text) upload_id = json_data["upload_id"] if json_data["status"] == "ok": url = "https://www.instagram.com/create/configure_to_story/" payload = ( "upload_id=" + upload_id + "&caption=&usertags=&custom_accessibility_caption=&retry_timeout=" ) headers = { "authority": "www.instagram.com", "x-ig-www-claim": "hmac.AR2-43UfYbG2ZZLxh-BQ8N0rqGa-hESkcmxat2RqMAXejXE3", "x-instagram-ajax": "adb961e446b7-hot", "content-type": "application/x-www-form-urlencoded", "accept": "*/*", "user-agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36", "x-requested-with": "XMLHttpRequest", "x-csrftoken": self.csrf_token, "x-ig-app-id": "1217981644879628", "origin": "https://www.instagram.com", "sec-fetch-site": "same-origin", "sec-fetch-mode": "cors", "sec-fetch-dest": "empty", "referer": "https://www.instagram.com/create/details/", "accept-language": "en-US,en;q=0.9,fa-IR;q=0.8,fa;q=0.7", "cookie": self.cookie, } response = requests.request( "POST", url, headers=headers, data=payload, proxies=self.proxy ) json_data = json.loads(response.text) if json_data["status"] == "ok": return 200 else: return 400 def hashtag_posts(self, hashtag, limit=20): headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/graphql/query/?query_hash=9b498c08113f1e09617a1703c22b2f32&variables=%7B%22tag_name%22%3A%22{hashtag}%22%2C%22first%22%3A{limit}%7D", headers=headers, proxies=self.proxy, ).text post_count = len( json.loads(response)["data"]["hashtag"]["edge_hashtag_to_media"]["edges"] ) if limit > post_count: limit = post_count links = [] for i in range(0, limit): links.append( "https://instagram.com/p/" + json.loads(response)["data"]["hashtag"]["edge_hashtag_to_media"][ "edges" ][i]["node"]["shortcode"] ) return links def location_posts(self, location_url, limit=20): id_location = location_url.split("/locations/")[1].split("/")[0] headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/graphql/query/?query_hash=36bd0f2bf5911908de389b8ceaa3be6d&variables=%7B%22id%22%3A%22{id_location}%22%2C%22first%22%3A{limit}%7D", headers=headers, proxies=self.proxy, ).text post_count = len( json.loads(response)["data"]["location"]["edge_location_to_media"]["edges"] ) if limit > post_count: limit = post_count links = [] for i in range(0, limit): links.append( "https://instagram.com/p/" + json.loads(response)["data"]["location"]["edge_location_to_media"][ "edges" ][i]["node"]["shortcode"] ) return links def user_posts_count(self, username): headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/{username}/?__a=1", headers=headers, proxies=self.proxy, ).text post_count = json.loads(response)["graphql"]["user"][ "edge_owner_to_timeline_media" ]["count"] return post_count def user_followers_count(self, username): headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/{username}/?__a=1", headers=headers, proxies=self.proxy, ).text followers_count = json.loads(response)["graphql"]["user"]["edge_followed_by"][ "count" ] return followers_count def user_follow_count(self, username): headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/{username}/?__a=1", headers=headers, proxies=self.proxy, ).text follow_count = json.loads(response)["graphql"]["user"]["edge_follow"]["count"] return follow_count def like_count(self, post_link): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass headers = self._get_headers() if post_link[-1] == "/": post_link = post_link[:-1] response = self.session.get( f"{post_link}/?__a=1", headers=headers, proxies=self.proxy ).text like_count = json.loads(response)["graphql"]["shortcode_media"][ "edge_media_preview_like" ]["count"] return like_count def comment_count(self, post_link): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass headers = self._get_headers() if post_link[-1] == "/": post_link = post_link[:-1] response = self.session.get( f"{post_link}/?__a=1", headers=headers, proxies=self.proxy ).text comment_count = json.loads(response)["graphql"]["shortcode_media"][ "edge_media_preview_comment" ]["count"] return comment_count def user_posts(self, username, limit=50): posts_have = self.user_posts_count(username) if posts_have < limit: limit = posts_have limit_k = limit headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/{username}/?__a=1", headers=headers, proxies=self.proxy, ).text user_id = json.loads(response)["graphql"]["user"]["id"] links = [] response = self.session.get( f"https://www.instagram.com/graphql/query/?query_hash=003056d32c2554def87228bc3fd9668a&variables=%7B%22id%22%3A%22{user_id}%22%2C%22first%22%3A{limit}%7D", headers=headers, proxies=self.proxy, ).text post_count = len( json.loads(response)["data"]["user"]["edge_owner_to_timeline_media"][ "edges" ] ) if limit > post_count: limit = post_count for i in range(0, limit): links.append( "https://instagram.com/p/" + json.loads(response)["data"]["user"]["edge_owner_to_timeline_media"][ "edges" ][i]["node"]["shortcode"] ) if limit_k > 50: limit = limit_k - 50 limit_k = limit while limit_k > 0: try: after = json.loads(response)["data"]["user"][ "edge_owner_to_timeline_media" ]["page_info"]["end_cursor"] response = self.session.get( f'https://www.instagram.com/graphql/query/?query_hash=003056d32c2554def87228bc3fd9668a&variables=%7B%22id%22%3A%22{user_id}%22%2C%22first%22%3A50%2C%22after%22%3A%22{after.replace("==","")}%3D%3D%22%7D', headers=headers, proxies=self.proxy, ).text post_count = len( json.loads(response)["data"]["user"][ "edge_owner_to_timeline_media" ]["edges"] ) if limit > post_count: limit = post_count limit_k -= limit for i in range(0, limit): links.append( "https://instagram.com/p/" + json.loads(response)["data"]["user"][ "edge_owner_to_timeline_media" ]["edges"][i]["node"]["shortcode"] ) limit = limit_k except: break return links def user_follows(self, username, limit=49): followed = self.user_follow_count(username) if followed < limit: limit = followed limit_k = limit headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/{username}/?__a=1", headers=headers, proxies=self.proxy, ).text user_id = json.loads(response)["graphql"]["user"]["id"] usernames = [] response = self.session.get( f"https://www.instagram.com/graphql/query/?query_hash=d04b0a864b4b54837c0d870b0e77e076&variables=%7B%22id%22%3A%22{user_id}%22%2C%22first%22%3A{limit}%7D", headers=headers, proxies=self.proxy, ).text follow_count = len(json.loads(response)["data"]["user"]["edge_follow"]["edges"]) if limit > follow_count: limit = follow_count for i in range(0, limit): usernames.append( json.loads(response)["data"]["user"]["edge_follow"]["edges"][i]["node"][ "username" ] ) if limit_k > 49: limit = limit_k - 49 limit_k = limit while limit_k > 0: try: after = json.loads(response)["data"]["user"]["edge_follow"][ "page_info" ]["end_cursor"] response = self.session.get( f'https://www.instagram.com/graphql/query/?query_hash=d04b0a864b4b54837c0d870b0e77e076&variables=%7B%22id%22%3A%22{user_id}%22%2C%22first%22%3A50%2C%22after%22%3A%22{after.replace("==","")}%3D%3D%22%7D', headers=headers, proxies=self.proxy, ).text follow_count = len( json.loads(response)["data"]["user"]["edge_follow"]["edges"] ) if limit > follow_count: limit = follow_count limit_k -= limit for i in range(0, limit): usernames.append( json.loads(response)["data"]["user"]["edge_follow"][ "edges" ][i]["node"]["username"] ) limit = limit_k except: break return usernames def user_followers(self, username, limit=49): follower = self.user_followers_count(username) if follower < limit: limit = follower limit_k = limit headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/{username}/?__a=1", headers=headers, proxies=self.proxy, ).text user_id = json.loads(response)["graphql"]["user"]["id"] usernames = [] response = self.session.get( f"https://www.instagram.com/graphql/query/?query_hash=c76146de99bb02f6415203be841dd25a&variables=%7B%22id%22%3A%22{user_id}%22%2C%22first%22%3A{limit}%7D", headers=headers, proxies=self.proxy, ).text follower_count = len( json.loads(response)["data"]["user"]["edge_followed_by"]["edges"] ) if limit > follower_count: limit = follower_count for i in range(0, limit): usernames.append( json.loads(response)["data"]["user"]["edge_followed_by"]["edges"][i][ "node" ]["username"] ) if limit_k > 49: limit = limit_k - 49 limit_k = limit while limit_k > 0: try: after = json.loads(response)["data"]["user"]["edge_followed_by"][ "page_info" ]["end_cursor"] response = self.session.get( f'https://www.instagram.com/graphql/query/?query_hash=c76146de99bb02f6415203be841dd25a&variables=%7B%22id%22%3A%22{user_id}%22%2C%22first%22%3A50%2C%22after%22%3A%22{after.replace("==","")}%3D%3D%22%7D', headers=headers, proxies=self.proxy, ).text follower_count = len( json.loads(response)["data"]["user"]["edge_followed_by"][ "edges" ] ) if limit > follower_count: limit = follower_count limit_k -= limit for i in range(0, limit): usernames.append( json.loads(response)["data"]["user"]["edge_followed_by"][ "edges" ][i]["node"]["username"] ) limit = limit_k except: break return usernames def post_likers(self, post_link, limit=50): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass likers = self.like_count(post_link) if likers < limit: limit = likers limit_k = limit headers = self._get_headers() shortcode = post_link.split("/p/")[1].replace("/", "") usernames = [] response = self.session.get( f"https://www.instagram.com/graphql/query/?query_hash=d5d763b1e2acf209d62d22d184488e57&variables=%7B%22shortcode%22%3A%22{shortcode}%22%2C%22first%22%3A{limit}%7D", headers=headers, proxies=self.proxy, ).text like_count = len( json.loads(response)["data"]["shortcode_media"]["edge_liked_by"]["edges"] ) if limit > like_count: limit = like_count for i in range(0, limit): usernames.append( json.loads(response)["data"]["shortcode_media"]["edge_liked_by"][ "edges" ][i]["node"]["username"] ) if limit_k > 50: limit = limit_k - 50 limit_k = limit while limit_k > 0: try: after = json.loads(response)["data"]["shortcode_media"][ "edge_liked_by" ]["page_info"]["end_cursor"] response = self.session.get( f'https://www.instagram.com/graphql/query/?query_hash=d5d763b1e2acf209d62d22d184488e57&variables=%7B%22shortcode%22%3A%22{shortcode}%22%2C%22first%22%3A50%2C%22after%22%3A%22{after.replace("==","")}%3D%3D%22%7D', headers=headers, proxies=self.proxy, ).text like_count = len( json.loads(response)["data"]["shortcode_media"][ "edge_liked_by" ]["edges"] ) if limit > like_count: limit = like_count limit_k -= limit for i in range(0, limit): usernames.append( json.loads(response)["data"]["shortcode_media"][ "edge_liked_by" ]["edges"][i]["node"]["username"] ) limit = limit_k except: break return usernames def post_commenters(self, post_link, limit=50): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass commenters = self.comment_count(post_link) if commenters < limit: limit = commenters limit_k = limit headers = self._get_headers() shortcode = post_link.split("/p/")[1].replace("/", "") usernames = [] response = self.session.get( f"https://www.instagram.com/graphql/query/?query_hash=bc3296d1ce80a24b1b6e40b1e72903f5&variables=%7B%22shortcode%22%3A%22{shortcode}%22%2C%22first%22%3A{limit}%7D", headers=headers, proxies=self.proxy, ).text comment_count = len( json.loads(response)["data"]["shortcode_media"][ "edge_media_to_parent_comment" ]["edges"] ) if limit > comment_count: limit = comment_count for i in range(0, limit): usernames.append( json.loads(response)["data"]["shortcode_media"][ "edge_media_to_parent_comment" ]["edges"][i]["node"]["owner"]["username"] ) if limit_k > 50: limit = limit_k - 50 limit_k = limit while limit_k > 0: try: response = self.session.get( "https://www.instagram.com/graphql/query/?query_hash=bc3296d1ce80a24b1b6e40b1e72903f5&variables={%22shortcode%22:%22" + shortcode + "%22,%22first%22:50,%22after%22:" + json.dumps( json.loads(response)["data"]["shortcode_media"][ "edge_media_to_parent_comment" ]["page_info"]["end_cursor"] ) + "}", headers=headers, proxies=self.proxy, ).text comment_count = len( json.loads(response)["data"]["shortcode_media"][ "edge_media_to_parent_comment" ]["edges"] ) if limit > comment_count: limit = comment_count limit_k -= limit for i in range(0, limit): usernames.append( json.loads(response)["data"]["shortcode_media"][ "edge_media_to_parent_comment" ]["edges"][i]["node"]["owner"]["username"] ) limit = limit_k except: break return usernames def feed_posts(self): headers = self._get_headers() response = self.session.get( "https://www.instagram.com/graphql/query/?query_hash=c699b185975935ae2a457f24075de8c7", headers=headers, proxies=self.proxy, ).text post_count = len( json.loads(response)["data"]["user"]["edge_web_feed_timeline"]["edges"] ) feed_posts = [] for i in range(0, post_count): feed_posts.append( "https://instagram.com/p/" + json.loads(response)["data"]["user"]["edge_web_feed_timeline"][ "edges" ][i]["node"]["shortcode"] ) return feed_posts def post_owner(self, post_link): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass headers = self._get_headers() if post_link[-1] == "/": post_link = post_link[:-1] response = self.session.get( f"{post_link}/?__a=1", headers=headers, proxies=self.proxy ).text owner = json.loads(response)["graphql"]["shortcode_media"]["owner"]["username"] return owner def post_caption(self, post_link): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except Exception: pass headers = self._get_headers() if post_link[-1] == "/": post_link = post_link[:-1] response = self.session.get( f"{post_link}/?__a=1", headers=headers, proxies=self.proxy ).text caption = json.loads(response)["graphql"]["shortcode_media"][ "edge_media_to_caption" ]["edges"][0]["node"]["text"] return caption def post_location(self, post_link): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass headers = self._get_headers() if post_link[-1] == "/": post_link = post_link[:-1] response = self.session.get( f"{post_link}/?__a=1", headers=headers, proxies=self.proxy ).text location = { "id": json.loads(response)["graphql"]["shortcode_media"]["location"]["id"], "name": json.loads(response)["graphql"]["shortcode_media"]["location"][ "name" ], } return location def post_hashtags(self, post_link): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass hashtag_filter = self.post_caption(post_link).replace("\n", " ").split() hashtags = [] for hashtag in hashtag_filter: if hashtag.startswith("#"): hashtags.append(hashtag) return hashtags def post_tagged_user(self, post_link): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass headers = self._get_headers() if post_link[-1] == "/": post_link = post_link[:-1] tagged_users = [] try: response = self.session.get( f"{post_link}/?__a=1", headers=headers, proxies=self.proxy ).text tag_count = len( json.loads(response)["graphql"]["shortcode_media"][ "edge_sidecar_to_children" ]["edges"][0]["node"]["edge_media_to_tagged_user"]["edges"] ) for i in range(0, tag_count): tagged_users.append( json.loads(response)["graphql"]["shortcode_media"][ "edge_sidecar_to_children" ]["edges"][0]["node"]["edge_media_to_tagged_user"]["edges"][i][ "node" ][ "user" ][ "username" ] ) except: try: response = self.session.get( f"{post_link}/?__a=1", headers=headers, proxies=self.proxy ).text tag_count = len( json.loads(response)["graphql"]["shortcode_media"][ "edge_media_to_tagged_user" ]["edges"] ) for i in range(0, tag_count): tagged_users.append( json.loads(response)["graphql"]["shortcode_media"][ "edge_media_to_tagged_user" ]["edges"][i]["node"]["user"]["username"] ) except: pass return tagged_users def post_time(self, post_link): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass headers = self._get_headers() if post_link[-1] == "/": post_link = post_link[:-1] response = self.session.get( f"{post_link}/?__a=1", headers=headers, proxies=self.proxy ).text time = { "timestamp": json.loads(response)["graphql"]["shortcode_media"][ "taken_at_timestamp" ], "datetime": str( datetime.fromtimestamp( json.loads(response)["graphql"]["shortcode_media"][ "taken_at_timestamp" ] ) ), } return time def post_type(self, post_link): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass headers = self._get_headers() if post_link[-1] == "/": post_link = post_link[:-1] response = self.session.get( f"{post_link}/?__a=1", headers=headers, proxies=self.proxy ).text if bool(json.loads(response)["graphql"]["shortcode_media"]["is_video"]): post_type = "video" else: post_type = "picture" return post_type def video_views_count(self, post_link): if post_link.find("/tv/") != -1: post_link = post_link.replace("/tv/", "/p/") try: post_link = post_link.replace(post_link.split("/p/")[1].split("/")[1], "") except: pass if self.post_type(post_link) == "video": headers = self._get_headers() if post_link[-1] == "/": post_link = post_link[:-1] response = self.session.get( f"{post_link}/?__a=1", headers=headers, proxies=self.proxy ).text view_count = json.loads(response)["graphql"]["shortcode_media"][ "video_view_count" ] return view_count def followed_by_me(self, username): headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/{username}/?__a=1", headers=headers, proxies=self.proxy, ).text followed_by_viewer = bool( json.loads(response)["graphql"]["user"]["followed_by_viewer"] ) return followed_by_viewer def follows_me(self, username): headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/{username}/?__a=1", headers=headers, proxies=self.proxy, ).text follows_viewer = bool(json.loads(response)["graphql"]["user"]["follows_viewer"]) return follows_viewer def user_external_url(self, username): headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/{username}/?__a=1", headers=headers, proxies=self.proxy, ).text url = json.loads(response)["graphql"]["user"]["external_url"] return url def verified_user(self, username): headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/{username}/?__a=1", headers=headers, proxies=self.proxy, ).text is_verified = bool(json.loads(response)["graphql"]["user"]["is_verified"]) return is_verified def private_user(self, username): headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/{username}/?__a=1", headers=headers, proxies=self.proxy, ).text is_private = bool(json.loads(response)["graphql"]["user"]["is_private"]) return is_private def user_bio(self, username): headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/{username}/?__a=1", headers=headers, proxies=self.proxy, ).text bio = json.loads(response)["graphql"]["user"]["biography"] return bio def user_dp(self, username): headers = self._get_headers() response = self.session.get( f"https://www.instagram.com/{username}/?__a=1", headers=headers, proxies=self.proxy, ).text dp_url = json.loads(response)["graphql"]["user"]["profile_pic_url_hd"] return dp_url def _get_headers(self, options=None): if options is None: options = dict() headers = { "accept": "*/*", "accept-encoding": "gzip, deflate, br", "accept-language": "en-US,en;q=0.9", "content-length": "0", "content-type": "application/x-www-form-urlencoded", "cookie": self.cookie, "sec-fetch-dest": "empty", "sec-fetch-mode": "cors", "sec-fetch-site": "same-origin", "user-agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36", "x-csrftoken": self.csrf_token, "x-ig-app-id": "936619743392459", "x-ig-www-claim": "hmac.AR3dC7naiVtTKkwrEY0hwTO9zj4kLxfvf4Srvp3wFyoZFqSx", "x-instagram-ajax": "d3d3aea32e75", "x-requested-with": "XMLHttpRequest", } for key, value in options.items(): headers[key] = value return headers ================================================ FILE: bhagavad_gita_api/SocialBot.py ================================================ import os import tweepy from PIL import Image, ImageDraw, ImageFont from textwrap3 import wrap from bhagavad_gita_api.config import settings from bhagavad_gita_api.models import gita as models from bhagavad_gita_api.MyIGBot import MyIGBot class SocialBot: sanskrit_text: str translation_hindi: str translation_english: str image_path: str = "bhagavad_gita_api/media/images/output.jpg" def __init__(self, verse, translations): self.translation_english = ( translations.filter(models.GitaTranslation.author_name == "Swami Sivananda") .first() .description ).replace("\n", " ") self.translation_hindi = ( translations.filter( models.GitaTranslation.author_name == "Swami Ramsukhdas" ) .first() .description ).replace("\n", " ") self.sanskrit_text = verse.text.replace("\n", " ") self.create_image_post(text=self.translation_english) def create_image_post(self, text): """ using pillow to add text on an image template, adjusting font size and line width to avoid overflows """ img = Image.open("bhagavad_gita_api/media/images/template.jpg") draw = ImageDraw.Draw(img) font_size = 40 font = ImageFont.truetype( "bhagavad_gita_api/media/helveticaneue.ttf", font_size ) lines = wrap(text=text, width=50) line_width, line_height = font.getsize(lines[0]) text_height = len(lines) * line_height while text_height > 250: print("in the loop") font_size -= 5 font = ImageFont.truetype( "bhagavad_gita_api/media/helveticaneue.ttf", font_size ) line_height -= 10 text_height = len(lines) * line_height y_text = 805 image_width, image_height = img.size for line in lines: line_width, line_height = font.getsize(line) draw.text( ((image_width - line_width) / 2, y_text), line, font=font, fill=(0, 0, 0), ) y_text += line_height rgb_im = img.convert("RGB") rgb_im.save("bhagavad_gita_api/media/images/output.jpg") print("image created") def post_on_twitter(self): auth = tweepy.OAuthHandler( settings.TWITTER["CONSUMER_KEY"], settings.TWITTER["CONSUMER_SECRET"] ) auth.set_access_token( settings.TWITTER["ACCESS_TOKEN"], settings.TWITTER["ACCESS_TOKEN_SECRET"] ) api = tweepy.API(auth) media = api.media_upload("bhagavad_gita_api/media/images/output.jpg") try: tweet_text = "Glories To Shri Hari" post_result = api.update_status( status=tweet_text, media_ids=[media.media_id_string] ) tweet_text = "Sanskrit Text : " + self.sanskrit_text sanskrit_text = api.update_status( status=tweet_text, in_reply_to_status_id=post_result.id, auto_populate_reply_metadata=True, ) tweet_text = "Hindi Translation : " + self.translation_hindi hindi_text = api.update_status( status=tweet_text, in_reply_to_status_id=sanskrit_text.id, auto_populate_reply_metadata=True, ) print(hindi_text) return 200 except Exception as e: return e def post_on_instagram(self): # remove cookie if exists, package throws error on expired cookie if os.path.exists("cookie_iiradhakrishnaii.bot"): os.remove("cookie_iiradhakrishnaii.bot") else: pass try: caption = f""" Glories To Shri Hari \n Sanskrit text : {self.sanskrit_text} \n Hindi translation: {self.translation_hindi}\n """ bot = MyIGBot( settings.INSTAGRAM["USERNAME"], settings.INSTAGRAM["PASSWORD"] ) response = bot.upload_post( "bhagavad_gita_api/media/images/output.jpg", caption=caption ) print(response) # if the response code is 200 that means ok return 200 except Exception: return 500 ================================================ FILE: bhagavad_gita_api/__init__.py ================================================ """Package bhagavad-gita-api. Bhagavad Gita API allows any developer to use content from Bhagavad Gita in their applications. MIT License Copyright (c) 2021 The Gita Initiative https://github.com/gita/bhagavad-gita-api """ from importlib.metadata import version __version__ = version(__package__) ================================================ FILE: bhagavad_gita_api/api/__init__.py ================================================ ================================================ FILE: bhagavad_gita_api/api/api_v2/__init__.py ================================================ ================================================ FILE: bhagavad_gita_api/api/api_v2/api.py ================================================ from fastapi import APIRouter from bhagavad_gita_api.api.api_v2.endpoints import gita, social api_router = APIRouter() api_router.include_router(gita.router) api_router.include_router(social.router, include_in_schema=True) ================================================ FILE: bhagavad_gita_api/api/api_v2/endpoints/__init__.py ================================================ ================================================ FILE: bhagavad_gita_api/api/api_v2/endpoints/gita.py ================================================ import logging import random from datetime import date from typing import List from fastapi import APIRouter, Depends, HTTPException, Response from sqlalchemy import func, or_ from sqlalchemy.orm import Session, joinedload from bhagavad_gita_api.api import deps from bhagavad_gita_api.models import gita as models from bhagavad_gita_api.models import schemas logger = logging.getLogger("api") logger.setLevel(logging.DEBUG) router = APIRouter() @router.get("/chapters/", response_model=List[schemas.GitaChapter], tags=["chapters"]) async def get_all_chapters( skip: int = 0, limit: int = 18, db: Session = Depends(deps.get_db), ): chapters = ( db.query(models.GitaChapter) .with_entities( models.GitaChapter.id, models.GitaChapter.slug, models.GitaChapter.name, models.GitaChapter.name_transliterated, models.GitaChapter.name_translated, models.GitaChapter.verses_count, models.GitaChapter.chapter_number, models.GitaChapter.name_meaning, models.GitaChapter.chapter_summary, models.GitaChapter.chapter_summary_hindi, ) .order_by(models.GitaChapter.id.asc()) .offset(skip) .limit(limit) .all() ) return chapters @router.get( "/chapters/{chapter_number}/", response_model=schemas.GitaChapter, tags=["chapters"] ) async def get_particular_chapter( chapter_number: int, db: Session = Depends(deps.get_db) ): chapter = ( db.query(models.GitaChapter) .filter(models.GitaChapter.chapter_number == chapter_number) .with_entities( models.GitaChapter.id, models.GitaChapter.slug, models.GitaChapter.name, models.GitaChapter.name_transliterated, models.GitaChapter.name_translated, models.GitaChapter.verses_count, models.GitaChapter.chapter_number, models.GitaChapter.name_meaning, models.GitaChapter.chapter_summary, models.GitaChapter.chapter_summary_hindi, ) .first() ) if chapter is None: raise HTTPException(status_code=404, detail="Chapter not found") return chapter # @router.get("/verses/", response_model=List[schemas.GitaVerse], tags=["verses"]) # def get_all_verses_from_all_chapters( # skip: int = 0, limit: int = 10, db: Session = Depends(deps.get_db) # ): # verses = ( # db.query(models.GitaVerse) # .options( # joinedload(models.GitaVerse.commentaries), # joinedload(models.GitaVerse.translations), # ) # .order_by(models.GitaVerse.id.asc()) # .offset(skip) # .limit(limit) # .all() # ) # return verses @router.get( "/chapters/{chapter_number}/verses/", response_model=List[schemas.GitaVerse], tags=["verses"], ) async def get_all_verses_from_particular_chapter( chapter_number: int, db: Session = Depends(deps.get_db) ): verses = ( db.query(models.GitaVerse) .options( joinedload(models.GitaVerse.commentaries), joinedload(models.GitaVerse.translations), ) .order_by(models.GitaVerse.id.asc()) .filter(models.GitaVerse.chapter_number == chapter_number) .all() ) if verses is None: raise HTTPException(status_code=404, detail="Verse not found") return verses @router.get( "/chapters/{chapter_number}/verses/{verse_number}/", response_model=schemas.GitaVerse, tags=["verses"], ) async def get_particular_verse_from_chapter( chapter_number: int, verse_number: int, db: Session = Depends(deps.get_db) ): verse = ( db.query(models.GitaVerse) .options( joinedload(models.GitaVerse.commentaries), joinedload(models.GitaVerse.translations), ) .filter( models.GitaVerse.chapter_number == chapter_number, models.GitaVerse.verse_number == verse_number, ) .first() ) if verse is None: raise HTTPException(status_code=404, detail="Verse not found") return verse @router.post("/set-daily-verse/", tags=["verses"]) async def set_daily_verse(db: Session = Depends(deps.get_db)): verse_order = random.randint(1, 700) verse = ( db.query(models.VerseOfDay) .filter( models.VerseOfDay.date == date.today(), ) .first() ) if verse is None: verse_of_day = models.VerseOfDay(verse_order=verse_order, date=date.today()) db.add(verse_of_day) db.commit() return Response(status_code=201, content="Verse of the day has been set.") else: return Response( status_code=200, content="Verse of the day has already been set." ) @router.get( "/get-daily-verse/", response_model=schemas.GitaVerse, tags=["verses"], ) async def get_daily_verse(db: Session = Depends(deps.get_db)): verse_of_day = ( db.query(models.VerseOfDay) .filter( models.VerseOfDay.date == date.today(), ) .first() ) if verse_of_day: verse = ( db.query(models.GitaVerse) .options( joinedload(models.GitaVerse.commentaries), joinedload(models.GitaVerse.translations), ) .filter(models.GitaVerse.id == verse_of_day.verse_order) .first() ) if verse: print(verse) return verse raise HTTPException(status_code=404, detail="Verse of the day not found.") @router.get("/search", response_model=List[schemas.GitaVerseBase], tags=["search"]) def search_gita(query: str, db: Session = Depends(deps.get_db)): res = ( db.query(models.GitaVerse) .filter( or_( models.GitaVerse.transliteration.op("@@")(func.plainto_tsquery(query)), models.GitaVerse.word_meanings.op("@@")(func.plainto_tsquery(query)), ) ) .all() ) res += ( db.query(models.GitaVerse) .join(models.GitaTranslation) .filter( or_( models.GitaTranslation.author_name == "Swami Sivananda", models.GitaTranslation.author_name == "Dr. S. Sankaranarayan", models.GitaTranslation.author_name == "Shri Purohit Swami", ) ) .filter( models.GitaTranslation.description.op("@@")(func.plainto_tsquery(query)) ) .all() ) return set(res) ================================================ FILE: bhagavad_gita_api/api/api_v2/endpoints/social.py ================================================ import logging from datetime import date from fastapi import APIRouter, Depends, HTTPException, Response from sqlalchemy.orm import Session from bhagavad_gita_api.api import deps from bhagavad_gita_api.models import gita as models # from bhagavad_gita_api.utils import post_on_instagram,post_on_twitter from bhagavad_gita_api.SocialBot import SocialBot logger = logging.getLogger("api") logger.setLevel(logging.DEBUG) router = APIRouter() @router.post("/post_verse_of_the_day/", tags=["social"]) async def post_instagram(db: Session = Depends(deps.get_db)): verse_of_day = ( db.query(models.VerseOfDay) .filter( models.VerseOfDay.date == date.today(), ) .first() ) if verse_of_day: verse = ( db.query(models.GitaVerse) .join(models.GitaTranslation) .filter(models.GitaVerse.id == verse_of_day.verse_order) .first() ) if verse: # CALL INSTAGRAM POSTING FUNCTION HERE translations = db.query(models.GitaTranslation).filter( models.GitaTranslation.verse_id == verse.id ) bot = SocialBot(verse, translations) twitter_response = bot.post_on_twitter() # instagram_response = bot.post_on_instagram() if twitter_response == 200: return Response( status_code=201, content="Verse of the day has been posted on twitter", ) else: HTTPException( status_code=500, detail="internal server error in posting " ) raise HTTPException(status_code=404, detail="Verse of the day not found.") ================================================ FILE: bhagavad_gita_api/api/deps.py ================================================ from typing import Generator from fastapi import Depends, HTTPException, Security, status from fastapi.security.api_key import APIKeyHeader from sqlalchemy.orm import Session from bhagavad_gita_api import crud from bhagavad_gita_api.db.session import SessionLocal from bhagavad_gita_api.models.user import User API_KEY_NAME = "X-API-KEY" api_key_header_auth = APIKeyHeader(name=API_KEY_NAME, auto_error=True) def get_db() -> Generator: try: db = SessionLocal() yield db finally: db.close() def get_current_user( db: Session = Depends(get_db), api_key_header: str = Security(api_key_header_auth) ) -> User: if api_key_header not in crud.get_valid_api_keys(db): raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API Key", ) user = crud.get_user_by_api_key(db, api_key=api_key_header) if not user: raise HTTPException(status_code=404, detail="Account not found.") return user def get_current_active_user( current_user: User = Depends(get_current_user), ) -> User: if not current_user.is_active: raise HTTPException(status_code=400, detail="Inactive account.") return current_user ================================================ FILE: bhagavad_gita_api/cli.py ================================================ import typer from bhagavad_gita_api.db.base_class import Base from bhagavad_gita_api.db.init_db import init_db from bhagavad_gita_api.db.session import SessionLocal, engine app = typer.Typer() @app.command() def delete_all_data(): """ deletes all data """ response = typer.prompt("Are you sure you want to delete all the data? [y/n]") if response == "y": typer.echo("Deleting...") Base.metadata.drop_all(bind=engine) typer.echo("Deleted.") @app.command() def seed_data(): """ seeds data to database """ typer.echo("Creating initial data") db = SessionLocal() init_db(db) typer.echo("Initial data created") if __name__ == "__main__": app() ================================================ FILE: bhagavad_gita_api/config.py ================================================ import os from typing import Optional from dotenv import load_dotenv from pydantic import AnyUrl, BaseSettings load_dotenv() def get_database_uri(): DB_USER = os.getenv("POSTGRES_USER") DB_PASS = os.getenv("POSTGRES_PASSWORD") DB_NAME = os.getenv("POSTGRES_DB") DB_HOST = os.getenv("DB_HOST") DB_PORT = os.getenv("DB_PORT") if None in [DB_USER, DB_PASS, DB_NAME, DB_HOST, DB_PORT]: return None return "postgresql://{user}:{password}@{host}:{port}/{database}".format( user=DB_USER, password=DB_PASS, host=DB_HOST, port=DB_PORT, database=DB_NAME ) class SqlDsn(AnyUrl): allowed_schemes = {"postgres", "postgresql", "sqlite", "mysql"} class Settings(BaseSettings): project_name: str = "Bhagavad Gita API" admin_email: str = "admin@bhagavadgita.io" debug: bool = False # Server server_name: Optional[str] server_host: Optional[str] sentry_dsn: Optional[str] secret_key: bytes = os.urandom(32) API_V2_STR: str = "/v2" SQLALCHEMY_DATABASE_URI: Optional[SqlDsn] = get_database_uri() TESTER_API_KEY: str # celery cronjobs CELERY_BROKER: str = os.getenv("CELERY_BROKER") CELERY_BACKEND: str = os.getenv("CELERY_BACKEND") CRONJOB_BASE_URL: str = os.getenv("CRONJOB_BASE_URL", "http://api:8081") TWITTER = { "CONSUMER_KEY": os.getenv("CONSUMER_KEY"), "CONSUMER_SECRET": os.getenv("CONSUMER_SECRET"), "CLIENT_ID": os.getenv("CLIENT_ID"), "CLIENT_SECRET": os.getenv("CLIENT_SECRET"), "ACCESS_TOKEN": os.getenv("ACCESS_TOKEN"), "ACCESS_TOKEN_SECRET": os.getenv("ACCESS_TOKEN_SECRET"), } INSTAGRAM = { "USERNAME": os.getenv("INSTAGRAM_USERNAME"), "PASSWORD": os.getenv("INSTAGRAM_PASSWORD"), } class Config: env_file = ".env" settings = Settings() if not settings.SQLALCHEMY_DATABASE_URI: print( "No SQLALCHEMY_DATABASE_URI found. \ \nUsing default set Sqlite database gita.db. This is not good for running in production!" ) settings.SQLALCHEMY_DATABASE_URI = "sqlite:///{}?{}".format( os.path.join(os.path.dirname(os.path.realpath(__file__)), "gita.db"), "check_same_thread=False", ) ================================================ FILE: bhagavad_gita_api/cronjobs/__init__.py ================================================ ================================================ FILE: bhagavad_gita_api/cronjobs/celery.py ================================================ import requests from celery import Celery from celery.schedules import crontab from bhagavad_gita_api.config import settings app = Celery( "cronjobs", broker=settings.CELERY_BROKER, backend=settings.CELERY_BACKEND, ) app.conf.timezone = "Asia/Calcutta" @app.task def set_verse(): url = "{}/v2/set-daily-verse/".format(settings.CRONJOB_BASE_URL) data = { "accept": "application/json", "X-API-KEY": settings.TESTER_API_KEY, } r = requests.post(url=url, data=data, headers=data) print(r) app.conf.beat_schedule = { "setup-verse-everyday": { "task": "bhagavad_gita_api.cronjobs.celery.set_verse", "schedule": crontab(hour=0, minute=0), }, } if __name__ == "__main__": app.start() ================================================ FILE: bhagavad_gita_api/crud.py ================================================ from sqlalchemy.orm import Session from bhagavad_gita_api.models.user import User def get_user(db: Session, user_id: int): return db.query(User).filter(User.id == user_id).first() def get_user_by_api_key(db: Session, api_key: str): return db.query(User).filter(User.api_key == api_key).first() def get_valid_api_keys(db: Session): valid_api_keys = [ u.api_key for u in db.query(User.api_key).filter(User.is_active == True).all() ] return valid_api_keys ================================================ FILE: bhagavad_gita_api/data/__init__.py ================================================ """ Load initial data into database. isort:skip_file """ import os def insert_all(): """Insert data from github.com/gita/gita into database.""" from bhagavad_gita_api.data.insert import ( authors, languages, chapters, verses, translations, commentaries, ) # importing the modules executes the code in it ================================================ FILE: bhagavad_gita_api/data/helpers.py ================================================ from urllib.request import urlopen def remote_txt_file(url: str) -> str: string = "" file = urlopen(url) for line in file: decoded_line = line.decode("utf-8") string += decoded_line return string def gh_file_url(file, owner="gita", repo="gita", branch="main", folder="data"): base = "https://raw.githubusercontent.com" return f"{base}/{owner}/{repo}/{branch}/{folder+'/' if folder else ''}{file}" def get_file(file): return remote_txt_file(gh_file_url(file)) ================================================ FILE: bhagavad_gita_api/data/insert/__init__.py ================================================ ================================================ FILE: bhagavad_gita_api/data/insert/authors.py ================================================ import json from rich.progress import track from sqlalchemy.orm import sessionmaker from bhagavad_gita_api.data.helpers import get_file from bhagavad_gita_api.db.session import engine from bhagavad_gita_api.models.gita import GitaAuthor Session = sessionmaker(bind=engine) session = Session() content = get_file("authors.json") li = [] data = json.loads(content) for i in track(data, description="Loading authors"): li.append( GitaAuthor( name=i["name"], id=i["id"], ) ) session.add_all(li) session.commit() ================================================ FILE: bhagavad_gita_api/data/insert/chapters.py ================================================ import json from rich.progress import track from sqlalchemy.orm import sessionmaker from bhagavad_gita_api.data.helpers import get_file from bhagavad_gita_api.db.session import engine from bhagavad_gita_api.models.gita import GitaChapter Session = sessionmaker(bind=engine) session = Session() content = get_file("chapters.json") li = [] data = json.loads(content) for i in track(data, description="Loading chapters"): li.append( GitaChapter( id=i["id"], name=i["name"], name_transliterated=i["name_transliterated"], name_translated=i["name_translation"], verses_count=i["verses_count"], chapter_number=i["chapter_number"], name_meaning=i["name_meaning"], chapter_summary=i["chapter_summary"], chapter_summary_hindi=i["chapter_summary_hindi"], slug=f'chapter-{i["chapter_number"]}-{i["name_translation"].replace(" ", "-").lower()}', ) ) session.add_all(li) session.commit() ================================================ FILE: bhagavad_gita_api/data/insert/commentaries.py ================================================ import json from rich.progress import track from sqlalchemy.orm import sessionmaker from bhagavad_gita_api.data.helpers import get_file from bhagavad_gita_api.db.session import engine from bhagavad_gita_api.models.gita import GitaCommentary Session = sessionmaker(bind=engine) session = Session() content = get_file("commentary.json") li = [] data = json.loads(content) for i in track(data, description="Loading commentary"): li.append( GitaCommentary( description=i["description"], author_name=i["authorName"], language=i["lang"], verse_id=i["verseNumber"], author_id=i["author_id"], language_id=i["language_id"], ) ) session.add_all(li) session.commit() ================================================ FILE: bhagavad_gita_api/data/insert/languages.py ================================================ import json from rich.progress import track from sqlalchemy.orm import sessionmaker from bhagavad_gita_api.data.helpers import get_file from bhagavad_gita_api.db.session import engine from bhagavad_gita_api.models.gita import GitaLanguage Session = sessionmaker(bind=engine) session = Session() content = get_file("languages.json") li = [] data = json.loads(content) for i in track(data, description="Loading languages"): li.append( GitaLanguage( language=i["language"], id=i["id"], ) ) session.add_all(li) session.commit() ================================================ FILE: bhagavad_gita_api/data/insert/translations.py ================================================ import json from rich.progress import track from sqlalchemy.orm import sessionmaker from bhagavad_gita_api.data.helpers import get_file from bhagavad_gita_api.db.session import engine from bhagavad_gita_api.models.gita import GitaTranslation Session = sessionmaker(bind=engine) session = Session() content = get_file("translation.json") li = [] data = json.loads(content) for i in track(data, description="Loading translations"): li.append( GitaTranslation( description=i["description"], author_name=i["authorName"], language=i["lang"], verse_id=i["verseNumber"], author_id=i["author_id"], language_id=i["language_id"], ) ) session.add_all(li) session.commit() ================================================ FILE: bhagavad_gita_api/data/insert/verses.py ================================================ import json from rich.progress import track from sqlalchemy.orm import sessionmaker from bhagavad_gita_api.data.helpers import get_file from bhagavad_gita_api.db.session import engine from bhagavad_gita_api.models.gita import GitaVerse Session = sessionmaker(bind=engine) session = Session() content = get_file("verse.json") li = [] data = json.loads(content) for i in track(data, description="Loading verses"): li.append( GitaVerse( verse_number=i["verse_number"], chapter_number=i["chapter_number"], text=i["text"], id=i["id"], chapter_id=i["chapter_id"], transliteration=i["transliteration"], word_meanings=i["word_meanings"], slug=f'chapter-{i["chapter_number"]}-verse-{i["verse_number"]}', ) ) session.add_all(li) session.commit() ================================================ FILE: bhagavad_gita_api/db/__init__.py ================================================ ================================================ FILE: bhagavad_gita_api/db/base_class.py ================================================ from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() ================================================ FILE: bhagavad_gita_api/db/init_db.py ================================================ from sqlalchemy.orm import Session from bhagavad_gita_api import crud from bhagavad_gita_api.config import settings from bhagavad_gita_api.data import insert_all from bhagavad_gita_api.db.base_class import Base from bhagavad_gita_api.db.session import engine from bhagavad_gita_api.models.gita import ( # NOQA GitaAuthor, GitaChapter, GitaCommentary, GitaLanguage, GitaTranslation, GitaVerse, ) from bhagavad_gita_api.models.user import User # import all the models to create tables def init_db(db: Session) -> None: # add test user user_in = crud.get_user(db, user_id=1) new_db = False if not user_in: new_db = True user_in = User( id=1, full_name="Radha Krishna", email="admin@bhagavadgita.io", app_name="BhagavadGita.io", app_description="BhagavadGita.io is a modern Bhagavad Gita app with a simple, beautiful and easy to use interface, helping you focus on reading. It is an app built for Bhagavad Gita readers, by Bhagavad Gita readers.", app_link="https://bhagavadgita.io", api_key=settings.TESTER_API_KEY, is_active=True, ) db.add(user_in) db.commit() Base.metadata.create_all(engine) if new_db: insert_all() ================================================ FILE: bhagavad_gita_api/db/session.py ================================================ from sqlalchemy import create_engine from sqlalchemy.orm import scoped_session, sessionmaker from bhagavad_gita_api.config import settings from bhagavad_gita_api.db.base_class import Base engine = create_engine(settings.SQLALCHEMY_DATABASE_URI, pool_pre_ping=True) SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine) Base.metadata.create_all(engine) db_session = scoped_session( sessionmaker(autocommit=False, autoflush=False, bind=engine) ) Base.query = db_session.query_property() ================================================ FILE: bhagavad_gita_api/graphql.py ================================================ from graphene import Int, List, ObjectType, String from graphene_sqlalchemy import SQLAlchemyObjectType from bhagavad_gita_api.db.session import db_session from bhagavad_gita_api.models.gita import ( GitaChapter, GitaCommentary, GitaTranslation, GitaVerse, ) db = db_session.session_factory() class GitaTranslationModel(SQLAlchemyObjectType): class Meta: model = GitaTranslation class GitaCommentryModel(SQLAlchemyObjectType): class Meta: model = GitaCommentary class GitaVerseModel(SQLAlchemyObjectType): translations = List( GitaTranslationModel, authorName=String(), language=String(), limit=Int(), first=Int(), skip=Int(), ) commentaries = List( GitaCommentryModel, authorName=String(), language=String(), limit=Int(), first=Int(), skip=Int(), ) class Meta: model = GitaVerse exclude_fields = ("translations", "commentaries") # filtering Pending def resolve_translations(parent, info, **kwargs): if parent.id: verse_id = parent.id else: verse_id = ( db.query(GitaVerse) .filter( GitaVerse.verse_number == parent.verse_number, GitaVerse.chapter_number == parent.chapter_number, ) .with_entities( GitaVerse.id, ) .first() )[0] if "limit" in kwargs.keys(): query = ( GitaTranslationModel.get_query(info) .filter(GitaTranslation.verse_id == verse_id) .limit(kwargs.get("limit")) ) elif "authorName" in kwargs.keys(): query = ( GitaTranslationModel.get_query(info) .filter(GitaTranslation.author_name == kwargs.get("authorName")) .filter(GitaTranslation.verse_id == verse_id) ) elif "language" in kwargs.keys(): query = ( GitaTranslationModel.get_query(info) .filter(GitaTranslation.language == kwargs.get("language")) .filter(GitaTranslation.verse_id == verse_id) ) else: query = GitaTranslationModel.get_query(info).filter( GitaTranslation.verse_id == verse_id ) if "skip" in kwargs.keys(): query = query[kwargs.get("skip") :] if "first" in kwargs.keys(): query = query[: kwargs.get("first")] return query def resolve_commentaries(parent, info, **kwargs): verse_id = ( db.query(GitaVerse) .filter( GitaVerse.verse_number == parent.verse_number, GitaVerse.chapter_number == parent.chapter_number, ) .with_entities( GitaVerse.id, ) .first() )[0] if "limit" in kwargs.keys(): query = ( GitaCommentryModel.get_query(info) .filter(GitaCommentary.verse_id == verse_id) .limit(kwargs.get("limit")) ) elif "authorName" in kwargs.keys(): query = ( GitaCommentryModel.get_query(info) .filter(GitaCommentary.author_name == kwargs.get("authorName")) .filter(GitaCommentary.verse_id == verse_id) ) elif "language" in kwargs.keys(): query = ( GitaCommentryModel.get_query(info) .filter(GitaCommentary.language == kwargs.get("language")) .filter(GitaCommentary.verse_id == verse_id) ) else: query = GitaCommentryModel.get_query(info).filter( GitaCommentary.verse_id == verse_id ) if "skip" in kwargs.keys(): query = query[kwargs.get("skip") :] if "first" in kwargs.keys(): query = query[: kwargs.get("first")] return query class GitaChapterModel(SQLAlchemyObjectType): verses = List( GitaVerseModel, verse_number=Int(), limit=Int(), first=Int(), skip=Int(), ) class Meta: model = GitaChapter exclude_fields = ("verses",) def resolve_verses(parent, info, **kwargs): if "limit" in kwargs.keys(): query = ( GitaVerseModel.get_query(info) .filter(GitaVerse.chapter_number == parent.chapter_number) .limit(kwargs.get("limit")) ) elif "verse_number" in kwargs.keys(): query = ( GitaVerseModel.get_query(info) .filter(GitaVerse.verse_number == kwargs.get("verse_number")) .filter(GitaVerse.chapter_number == parent.chapter_number) ) else: query = GitaVerseModel.get_query(info).filter( GitaVerse.chapter_number == parent.chapter_number ) if "skip" in kwargs.keys(): query = query[kwargs.get("skip") :] if "first" in kwargs.keys(): query = query[: kwargs.get("first")] return query class Query(ObjectType): chapters = List( GitaChapterModel, chapter_number=Int(), limit=Int(), first=Int(), skip=Int(), ) verses = List( GitaVerseModel, verse_number=Int(), verse_order=Int(), limit=Int(), first=Int(), skip=Int(), ) @staticmethod async def resolve_chapters(self, info, **kwargs): if "chapter_number" in kwargs.keys(): query = GitaChapterModel.get_query(info).filter( GitaChapter.chapter_number == kwargs.get("chapter_number") ) # SQLAlchemy query elif "limit" in kwargs.keys(): query = GitaChapterModel.get_query(info).limit(kwargs.get("limit")) else: query = GitaChapterModel.get_query(info) # SQLAlchemy query if "skip" in kwargs.keys(): query = query[kwargs.get("skip") :] if "first" in kwargs.keys(): query = query[: kwargs.get("first")] return query @staticmethod async def resolve_verses(self, info, **kwargs): if "verse_number" in kwargs.keys(): query = GitaVerseModel.get_query(info).filter( GitaVerse.verse_number == kwargs.get("verse_number") ) elif "verse_order" in kwargs.keys(): query = GitaVerseModel.get_query(info).filter( GitaVerse.id == kwargs.get("verse_order") ) elif "limit" in kwargs.keys(): query = GitaVerseModel.get_query(info).limit(kwargs.get("limit")) else: query = GitaVerseModel.get_query(info) if "skip" in kwargs.keys(): query = query[kwargs.get("skip") :] if "first" in kwargs.keys(): query = query[: kwargs.get("first")] return query ================================================ FILE: bhagavad_gita_api/gunicorn.conf.py ================================================ import multiprocessing # bind - The server socket to bind bind = "0.0.0.0:8081" # backlog - The maximum number of pending connections # Generally in range 64-2048 backlog = 2048 # workers - The number of worker processes for handling requests. # A positive integer generally in the 2-4 x $(NUM_CORES) range workers = multiprocessing.cpu_count() * 2 + 1 # worker_class - The type of workers to use # A string referring to one of the following bundled classes: # 1. sync # 2. eventlet - Requires eventlet >= 0.9.7 # 3. gevent - Requires gevent >= 0.13 # 4. tornado - Requires tornado >= 0.2 # # You’ll want to read http://docs.gunicorn.org/en/latest/design.html # for information on when you might want to choose one of the other # worker classes worker_class = "uvicorn.workers.UvicornWorker" # threads - The number of worker threads for handling requests. This will # run each worker with the specified number of threads. # A positive integer generally in the 2-4 x $(NUM_CORES) range threads = 1 # worker_connections - The maximum number of simultaneous clients # This setting only affects the Eventlet and Gevent worker types. worker_connections = 1000 # max_requests - The maximum number of requests a worker will process # before restarting # Any value greater than zero will limit the number of requests a work # will process before automatically restarting. This is a simple method # to help limit the damage of memory leaks. max_requests = 0 # max_requests_jitter - The maximum jitter to add to the max-requests setting # The jitter causes the restart per worker to be randomized by # randint(0, max_requests_jitter). This is intended to stagger worker # restarts to avoid all workers restarting at the same time. max_requests_jitter = 0 # timeout - Workers silent for more than this many seconds are killed # and restarted timeout = 30 # graceful_timeout - Timeout for graceful workers restart # How max time worker can handle request after got restart signal. # If the time is up worker will be force killed. graceful_timeout = 30 # keep_alive - The number of seconds to wait for requests on a # Keep-Alive connection # Generally set in the 1-5 seconds range. keep_alive = 2 # accesslog - The Access log file to write to. # "-" means log to stdout. accesslog = "-" # errorlog - The Error log file to write to. # "-" means log to stderr. errorlog = "-" # loglevel - The granularity of Error log outputs. # Valid level names are: # 1. debug # 2. info # 3. warning # 4. error # 5. critical loglevel = "info" ================================================ FILE: bhagavad_gita_api/main.py ================================================ import uvicorn from fastapi import Depends, FastAPI, HTTPException, Security, status from fastapi.middleware.cors import CORSMiddleware from fastapi.security.api_key import APIKeyHeader from sqlalchemy.orm import Session from bhagavad_gita_api.api import deps from bhagavad_gita_api.api.api_v2.api import api_router from bhagavad_gita_api.config import settings from bhagavad_gita_api.crud import get_valid_api_keys API_KEY_NAME = "X-API-KEY" api_key_header_auth = APIKeyHeader(name=API_KEY_NAME, auto_error=True) async def get_api_key( db: Session = Depends(deps.get_db), api_key_header: str = Security(api_key_header_auth), ) -> None: if api_key_header not in get_valid_api_keys(db): raise HTTPException( status_code=status.HTTP_401_UNAUTHORIZED, detail="Invalid API Key.", ) app = FastAPI( title="Bhagavad Gita API", description="The Bhagavad Gita Application Programming Interface (API) " "allows a web or mobile developer to use the Bhagavad Gita text " "in their web or mobile application(s). It is a RESTful API that " "follows some of the Best Practices for designing a REST API which " "makes it easy for developers to use and implement.", version="2.0", ) app.add_middleware( CORSMiddleware, allow_origins="*", allow_credentials=True, allow_methods=["GET", "POST"], allow_headers=["*"], ) @app.get("/", include_in_schema=False) async def index(): return {"message": "Hare Krishna!"} app.include_router( api_router, prefix=settings.API_V2_STR, dependencies=[Security(get_api_key, scopes=["openid"])], ) # app.add_route( # "/graphql", # GraphQLApp(executor_class=AsyncioExecutor, schema=graphene.Schema(query=Query)), # ) def cli(): # this function will be called when run from cli uvicorn.run( "bhagavad_gita_api.main:app", host="0.0.0.0", port=8081, reload=bool(settings.debug), ) ================================================ FILE: bhagavad_gita_api/models/__init__.py ================================================ ================================================ FILE: bhagavad_gita_api/models/gita.py ================================================ from sqlalchemy import Column, ForeignKey, Integer, String from sqlalchemy.orm import relationship from sqlalchemy.sql.schema import Index from sqlalchemy.sql.sqltypes import Date from sqlalchemy.types import UnicodeText from bhagavad_gita_api.db.base_class import Base class GitaCommentary(Base): __tablename__ = "gita_commentaries" id = Column(Integer, primary_key=True, autoincrement=True) description = Column(UnicodeText) author_name = Column(String(200)) language = Column(String(200)) verse_id = Column(Integer, ForeignKey("gita_verses.id")) author_id = Column(Integer, ForeignKey("gita_authors.id")) language_id = Column(Integer, ForeignKey("gita_languages.id")) __table_args__ = (Index("ix_commentary", "author_name", "language", "verse_id"),) class GitaTranslation(Base): __tablename__ = "gita_translations" id = Column(Integer, primary_key=True, autoincrement=True) description = Column(UnicodeText) author_name = Column(String(200)) language = Column(String(100)) verse_id = Column(Integer, ForeignKey("gita_verses.id")) author_id = Column(Integer, ForeignKey("gita_authors.id")) language_id = Column(Integer, ForeignKey("gita_languages.id")) __table_args__ = (Index("ix_translation", "author_name", "language", "verse_id"),) class GitaLanguage(Base): __tablename__ = "gita_languages" id = Column(Integer, primary_key=True, autoincrement=True) language = Column(String(200)) translations = relationship(GitaTranslation, lazy="joined") commentaries = relationship(GitaCommentary, lazy="joined") __table_args__ = (Index("ix_language", "language"),) class GitaAuthor(Base): __tablename__ = "gita_authors" id = Column(Integer, primary_key=True, autoincrement=True) name = Column(String(200)) translations = relationship(GitaTranslation, backref="gitaAuthor") commentaries = relationship(GitaCommentary, backref="gitaAuthor") __table_args__ = (Index("ix_author", "name"),) class GitaVerse(Base): __tablename__ = "gita_verses" id = Column(Integer, primary_key=True, autoincrement=True) slug = Column(UnicodeText) verse_number = Column(Integer) chapter_number = Column(Integer) text = Column(UnicodeText) transliteration = Column(UnicodeText) word_meanings = Column(UnicodeText) chapter_id = Column(Integer, ForeignKey("gita_chapters.id")) translations = relationship(GitaTranslation, backref="gita_verses", lazy="joined") commentaries = relationship(GitaCommentary, backref="gita_verses", lazy="joined") __table_args__ = (Index("ix_verse", "chapter_number", "verse_number", "slug"),) class GitaChapter(Base): __tablename__ = "gita_chapters" id = Column(Integer, primary_key=True, autoincrement=True) name = Column(UnicodeText) slug = Column(UnicodeText) name_transliterated = Column(UnicodeText) name_translated = Column(UnicodeText) verses_count = Column(Integer) chapter_number = Column(Integer) name_meaning = Column(UnicodeText) chapter_summary = Column(UnicodeText) chapter_summary_hindi = Column(UnicodeText) verses = relationship(GitaVerse, backref="gita_chapters", lazy="joined") __table_args__ = (Index("ix_chapter", "chapter_number", "slug"),) class VerseOfDay(Base): __tablename__ = "verse_of_the_day" id = Column(Integer, primary_key=True, autoincrement=True) verse_order = Column(Integer) date = Column(Date) ================================================ FILE: bhagavad_gita_api/models/schemas.py ================================================ from typing import List from pydantic import BaseModel class BaseGitaModel(BaseModel): id: int class Config: orm_mode = True class GitaTranslation(BaseGitaModel): description: str author_name: str language: str class GitaCommentary(BaseGitaModel): description: str author_name: str language: str class GitaVerse(BaseGitaModel): verse_number: int chapter_number: int slug: str text: str transliteration: str word_meanings: str translations: List[GitaTranslation] = [] commentaries: List[GitaCommentary] = [] class GitaVerseBase(BaseGitaModel): verse_number: int chapter_number: int slug: str text: str transliteration: str word_meanings: str class GitaChapter(BaseGitaModel): name: str slug: str name_transliterated: str name_translated: str verses_count: int chapter_number: int name_meaning: str chapter_summary: str chapter_summary_hindi: str class VerseOfDay(BaseGitaModel): id: int verse_order: int ================================================ FILE: bhagavad_gita_api/models/user.py ================================================ from sqlalchemy import Boolean, Column, Integer, String from bhagavad_gita_api.db.base_class import Base from bhagavad_gita_api.utils import AwareDateTime, tzware_datetime class User(Base): __tablename__ = "gita_users" id = Column(Integer, primary_key=True, index=True) full_name = Column(String(128)) email = Column(String, unique=True, index=True) app_name = Column(String, index=True) app_description = Column(String, index=True) app_link = Column(String(128)) api_key = Column(String, unique=True, index=True) is_active = Column(Boolean, default=True) created_on = Column(AwareDateTime(), default=tzware_datetime) ================================================ FILE: bhagavad_gita_api/utils.py ================================================ import datetime import pytz from sqlalchemy import DateTime from sqlalchemy.types import TypeDecorator def tzware_datetime(): """ Return a timezone aware datetime. :return: Datetime """ return datetime.datetime.now(pytz.utc) class AwareDateTime(TypeDecorator): """ A DateTime type which can only store tz-aware DateTimes. Source: https://gist.github.com/inklesspen/90b554c864b99340747e """ cache_ok = True impl = DateTime(timezone=True) def process_bind_param(self, value, dialect): if isinstance(value, datetime.datetime) and value.tzinfo is None: raise ValueError("{!r} must be TZ-aware".format(value)) return value def __repr__(self): return "AwareDateTime()" ================================================ FILE: cookie_iiradhakrishnaii.bot ================================================ sessionid=8275054554%3APKSLSH6uep9vTy%3A23; csrftoken=21M8J30VgJ2lZRG1jR6sRTu7gn8bxo6y; ds_user_id=8275054554; ================================================ FILE: docker-compose.dev.yml ================================================ version: "3.7" services: api: container_name: gita-api image: bhagavadgita/bhagavad-gita-api build: context: . dockerfile: Dockerfile.dev ports: - 8081:8081 volumes: - ./:/app stdin_open: true tty: true depends_on: - database env_file: - .env restart: on-failure:5 database: container_name: gita-db image: postgres:12-alpine volumes: - ./db_data:/var/lib/postgresql env_file: - .env restart: on-failure:5 redis-server: container_name: gita-redis image: redis:6.0-alpine restart: on-failure:5 celery-worker: container_name: gita-celery-worker image: bhagavadgita/bhagavad-gita-api command: celery -A bhagavad_gita_api.cronjobs worker -l INFO depends_on: - api - redis-server env_file: - .env restart: on-failure:5 celery-beat-worker: container_name: gita-celery-beat-worker image: bhagavadgita/bhagavad-gita-api command: celery -A bhagavad_gita_api.cronjobs beat -l INFO depends_on: - api - redis-server env_file: - .env restart: on-failure:5 ================================================ FILE: docker-compose.prod.yml ================================================ version: "3.7" services: api: container_name: gita-api image: bhagavadgita/bhagavad-gita-api build: context: . dockerfile: Dockerfile.prod ports: - 8081:8081 env_file: - .env restart: unless-stopped caddy: image: caddy/caddy:2.2.1-alpine container_name: caddy-service restart: unless-stopped ports: - "80:80" - "443:443" volumes: - $PWD/Caddyfile:/etc/caddy/Caddyfile - $PWD/site:/srv - caddy_data:/data - caddy_config:/config redis-server: container_name: gita-redis image: redis:6.0-alpine restart: unless-stopped celery-worker: container_name: gita-celery-worker image: bhagavadgita/bhagavad-gita-api command: celery -A bhagavad_gita_api.cronjobs worker -l INFO depends_on: - api - redis-server env_file: - .env restart: unless-stopped celery-beat-worker: container_name: gita-celery-beat-worker image: bhagavadgita/bhagavad-gita-api command: celery -A bhagavad_gita_api.cronjobs beat -l INFO depends_on: - api - redis-server env_file: - .env restart: unless-stopped volumes: caddy_data: caddy_config: ================================================ FILE: mypy.ini ================================================ [mypy] # --strict disallow_any_generics = True disallow_subclassing_any = True disallow_untyped_calls = True disallow_untyped_defs = True disallow_incomplete_defs = True check_untyped_defs = True disallow_untyped_decorators = True no_implicit_optional = True warn_redundant_casts = True warn_unused_ignores = True warn_return_any = True implicit_reexport = False strict_equality = True # --strict end [mypy-fastapi.concurrency] warn_unused_ignores = False ignore_missing_imports = True [mypy-fastapi.tests.*] ignore_missing_imports = True check_untyped_defs = True ================================================ FILE: pyproject.toml ================================================ [tool.poetry] name = "bhagavad-gita-api" version = "2.0.2" description = "Bhagavad Gita API allows any developer to use content from Bhagavad Gita in their applications." authors = ["The Gita Initiative "] license = "MIT" readme = "README.md" homepage = "https://bhagavadgita.io/" repository = "https://github.com/gita/bhagavad-gita-api" keywords = ["python","fastapi","api","gita","bhagavad-gita"] classifiers = [ "License :: OSI Approved :: MIT License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Intended Audience :: Education", "Intended Audience :: Information Technology", "Development Status :: 4 - Beta", ] [tool.poetry.dependencies] python = "^3.8" fastapi = "^0.65.2" graphene-elastic = "^0.7" psycopg2-pgevents = "^0.2.2" SQLAlchemy = "^1.4.19" uvicorn = "^0.14.0" graphene-sqlalchemy = "^2.3.0" graphene-sqlalchemy-filter = "^1.12.2" python-dotenv = "^0.18.0" pytz = "^2021.1" rich = "^10.4.0" graphene = "^2.1.8" typer = "^0.3.2" gunicorn = "^20.1.0" celery = "^5.1.2" redis = "^3.5.3" textwrap3 = "^0.9.2" Pillow = "^9.0.0" tweepy = "^4.4.0" bs4 = "^0.0.1" [tool.poetry.dev-dependencies] pre-commit = "^2.13.0" black = "^21.6b0" isort = "^5.9.1" flake8 = "^3.9.2" autoflake = "^1.4" ipykernel = "^5.5.5" [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api" [tool.poetry.scripts] bhagavad-gita-api = 'bhagavad_gita_api.main:cli' gita-api = 'bhagavad_gita_api.main:cli' [tool.isort] profile = "black" known_third_party = ["PIL", "bs4", "celery", "dotenv", "fastapi", "graphene", "graphene_sqlalchemy", "pydantic", "pytz", "requests", "rich", "sqlalchemy", "textwrap3", "tweepy", "typer", "uvicorn"] [tool.black] line-length = 88 include = '\.pyi?$' exclude = ''' ( /( \.eggs # exclude a few common directories in the | \.git # root of the project | \.hg | \.mypy_cache | \.tox | \.venv | _build | buck-out | build | dist )/ | foo.py # also separately exclude a file named foo.py in # the root of the project ) ''' ================================================ FILE: wait_for_db.sh ================================================ #!/bin/sh echo "Waiting for postgres..." while ! nc -z $DB_HOST $DB_PORT; do echo "couldn't connect to database @ $DB_HOST:$DB_PORT retrying in 1 second ..." sleep 1 done echo "PostgreSQL started" # sleep for 2 seconds for the database to be ready to accept connections sleep 2 # create tables and seed data to database # TODO change this after adding alembic migrations python bhagavad_gita_api/cli.py seed-data # below line is to tell docker to continue the rest of the build flow exec "$@"