[
  {
    "path": ".github/workflows/publish-to-pypi.yml",
    "content": "name: Publish to PyPI\n\non:\n  release:\n    types: [published]\n\njobs:\n  publish:\n    runs-on: ubuntu-latest\n    environment: release\n    permissions:\n      # IMPORTANT: this permission is mandatory for trusted publishing\n      id-token: write\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          fetch-depth: 1\n\n      - name: Set up Python 3.14\n        uses: actions/setup-python@v5\n        with:\n          python-version: \"3.14\"\n\n      - name: Install Poetry\n        uses: snok/install-poetry@v1\n        with:\n          virtualenvs-create: true\n          virtualenvs-in-project: true\n\n      - name: Install Dependencies\n        run: poetry install\n\n      - name: Build package\n        run: poetry build\n\n      - name: Publish package distributions to PyPI\n        uses: pypa/gh-action-pypi-publish@release/v1\n\n  sync-citation:\n    runs-on: ubuntu-latest\n    permissions:\n      contents: write\n    steps:\n      - uses: actions/checkout@v4\n        with:\n          ref: main\n          fetch-depth: 1\n      - name: Sync CITATION.cff to release tag\n        run: |\n          set -eu\n          version=\"${GITHUB_REF_NAME#v}\"\n          date=$(date -u +%Y-%m-%d)\n          sed -i \"s/^version: .*/version: $version/\" CITATION.cff\n          sed -i \"s/^date-released: .*/date-released: $date/\" CITATION.cff\n          if git diff --quiet CITATION.cff; then\n            echo \"CITATION.cff already in sync at $version / $date\"\n            exit 0\n          fi\n          git config user.name \"github-actions[bot]\"\n          git config user.email \"41898282+github-actions[bot]@users.noreply.github.com\"\n          git add CITATION.cff\n          git commit -m \"Sync CITATION.cff to $GITHUB_REF_NAME\"\n          git push origin HEAD:main"
  },
  {
    "path": ".gitignore",
    "content": "out/*\n\n# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\npip-wheel-metadata/\nshare/python-wheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.nox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n*.py,cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\ndb.sqlite3-journal\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# IPython\nprofile_default/\nipython_config.py\n\n# pyenv\n.python-version\n\n# pipenv\n#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.\n#   However, in case of collaboration, if having platform-specific dependencies or dependencies\n#   having no cross-platform support, pipenv may install dependencies that don't work, or not\n#   install all needed dependencies.\n#Pipfile.lock\n\n# PEP 582; used by e.g. github.com/David-OConnor/pyflow\n__pypackages__/\n\n# Celery stuff\ncelerybeat-schedule\ncelerybeat.pid\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n.dmypy.json\ndmypy.json\n\n# Pyre type checker\n.pyre/\n"
  },
  {
    "path": "CITATION.cff",
    "content": "cff-version: 1.2.0\nmessage: \"Feel free to cite this software in your research.\"\nauthors:\n  - family-names: McCain\n    given-names: Miles\n  - family-names: Thiel\n    given-names: David\n    orcid: https://orcid.org/0000-0002-0947-5921\ntitle: \"Truthbrush\"\nversion: 0.4.1\ndate-released: 2026-04-25\nurl: https://github.com/stanfordio/truthbrush\n"
  },
  {
    "path": "LICENSE",
    "content": "                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright [yyyy] [name of copyright owner]\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "### Please see maintained fork at: https://github.com/w2rc/truthbrush\n\n# truthbrush\nTruthbrush is an API client for Truth Social.\n\nCurrently, this tool can:\n\n* Search for users, statuses, groups, or hashtags\n* Pull a user's statuses\n* Pull the list of \"People to Follow\" or suggested users\n* Pull \"trending\" hashtags\n* Pull \"trending\" Truth posts\n* Pull ads\n* Pull a user's metadata\n* Pull the list of users who liked a post\n* Pull the list of comments on a post\n* Pull \"trending\" groups\n* Pull list of suggested groups\n* Pull \"trending\" group hashtags\n* Pull posts from group timeline\n\nTruthbrush is designed for academic research, open source intelligence gathering, and data archival. It pulls all data from the publicly accessible API.\n\n## Installation\n\nFrom PyPi:\n\n```sh\npip install truthbrush\n```\n\nFrom git:\n\n* To install it, run `pip install git+https://github.com/stanfordio/truthbrush.git`\n\nFrom source:\n\n* Clone the repository and run `pip3 install .`. Provided your `pip` is setup correctly, this will make `truthbrush` available both as a command and as a Python package.\n\nAfter installation, you will need to set your Truth Social username and password as environmental variables.\n\n`export TRUTHSOCIAL_USERNAME=foo`\n\n`export TRUTHSOCIAL_PASSWORD=bar`\n\nIf you encounter login issues, you can instead extract your login token from the truth:auth Local Storage store and export it in `TRUTHSOCIAL_TOKEN`.\n\nYou may also set these variables in a `.env` file in the directory from which you are running Truthbrush.\n\n### Public mode (no credentials)\n\nSome Truth Social endpoints are readable without authentication. To run Truthbrush against only those endpoints, pass `--no-auth` on the CLI or construct the client with `require_auth=False`:\n\n```sh\ntruthbrush --no-auth trends\ntruthbrush --no-auth user realDonaldTrump\n```\n\n```py\nfrom truthbrush import Api\n\napi = Api(require_auth=False)\nprint(api.trending())\n```\n\nEndpoints that require authentication will return an API error (typically HTTP 401) when called in public mode. Which endpoints are publicly accessible is determined by Truth Social and may change without notice.\n\n## CLI Usage\n\n```text\nUsage: truthbrush [OPTIONS] COMMAND [ARGS]...\n\nOptions:\n  --no-auth  Run without authentication. Only public endpoints will succeed.\n  --help     Show this message and exit.\n\n\nCommands:\n  search            Search for users, statuses or hashtags.\n  statuses          Pull a user's statuses.\n  suggestions       Pull the list of suggested users.\n  tags              Pull trendy tags.\n  trends            Pull trendy Truths.\n  ads               Pull ads.\n  user              Pull a user's metadata.\n  likes             Pull the list of users who liked a post\n  comments          Pull the list of oldest comments on a post\n  groupposts        Pull posts from a groups's timeline\n  grouptags         Pull trending group tags.\n  grouptrends       Pull trending groups.\n  groupsuggestions  Pull list of suggested groups.\n\n```\n\n**Search for users, statuses, groups, or hashtags**\n\n```bash\ntruthbrush search --searchtype [accounts|statuses|hashtags|groups] QUERY\n```\n\nRestrict status results to a date window:\n\n```bash\ntruthbrush search --searchtype statuses --start-date 2024-11-01 --end-date 2024-11-07 QUERY\n```\n\n**Pull all statuses (posts) from a user**\n\n```bash\ntruthbrush statuses HANDLE\n```\n\nRestrict to a date window (UTC assumed when no timezone is given):\n\n```bash\ntruthbrush statuses --created-after 2024-11-01 --created-before 2024-11-07 HANDLE\n```\n\n**Pull \"People to Follow\" (suggested) users**\n\n```bash\ntruthbrush suggestions\n```\n\n**Pull trendy tags**\n\n```bash\ntruthbrush tags\n```\n\n**Pull ads**\n\n```bash\ntruthbrush ads\n```\n\n**Pull all of a user's metadata**\n\n```bash\ntruthbrush user HANDLE\n```\n\n**Pull the list of users who liked a post**\n\n```bash\ntruthbrush likes POST --includeall TOP_NUM\n```\n\n**Pull the list of oldest comments on a post**\n\n```bash\ntruthbrush comments POST --includeall --onlyfirst TOP_NUM\n```\n\n**Pull trending group tags**\n\n```bash\ntruthbrush grouptags\n```\n\n**Pull trending groups**\n\n```bash\ntruthbrush grouptrends\n```\n\n**Pull list of suggested groups**\n\n```bash\ntruthbrush groupsuggestions\n```\n\n**Pull posts from a group's timeline**\n\n```bash\ntruthbrush groupposts GROUP_ID\n```\n\n## Contributing\n\nContributions are encouraged! For small bug fixes and minor improvements, feel free to just open a PR. For larger changes, please open an issue first so that other contributors can discuss your plan, avoid duplicated work, and ensure it aligns with the goals of the project. Be sure to also follow the [code of conduct](CODE_OF_CONDUCT.md). Thanks!\n\nDevelopment setup (ensure you have [Poetry](https://python-poetry.org/) installed):\n\n```sh\npoetry install\npoetry shell\ntruthbrush --help # will use your local copy of truthbrush\n```\n\nTo run the tests:\n\n```sh\npytest\n\n# optionally run tests with verbose logging outputs:\npytest --log-cli-level=DEBUG -s\n```\n\nPlease format and lint your code with `ruff`, and run `ty` to check types:\n\n```sh\nruff format .\nruff check .\nty check truthbrush/\n```\n"
  },
  {
    "path": "SECURITY.md",
    "content": "# Security Policy\n\n## Reporting a Vulnerability\n\nIf you believe you have found a vulnerability, please send an email to\n[internetobservatory@stanford.edu](mailto:internetobservatory@stanford.edu) with information\non what the vulnerability is, steps to reproduce, and estimated severity. We will\nstrive to get back to you as soon as possible.\n\nPlease do not open GitHub issues for anything you suspect may be a security vulnerability.\n"
  },
  {
    "path": "pyproject.toml",
    "content": "[tool.poetry]\nname = \"truthbrush\"\nversion = \"0.4.1\"\ndescription = \"API client for Truth Social\"\nauthors = [\"R. Miles McCain <github@sendmiles.email>\", \"David Thiel\"]\nlicense = \"Apache 2.0\"\nreadme = \"README.md\"\n\n[tool.poetry.scripts]\ntruthbrush = \"truthbrush.cli:cli\"\n\n[tool.poetry.dependencies]\npython = \"^3.14\"\nclick = \"^8.3.0\"\nloguru = \"^0.7.3\"\npython-dotenv = \"^1.2.0\"\n\npython-dateutil = \"^2.9.0\"\ncurl_cffi = \"^0.15.0\"\n\n[tool.poetry.group.dev.dependencies]\npytest = \"^9.0.3\"\nruff = \"^0.15.0\"\nty = \">=0.0.31,<1.0\"\n\n[build-system]\nrequires = [\"poetry-core>=1.0.0\"]\nbuild-backend = \"poetry.core.masonry.api\"\n\n[tool.ruff]\ntarget-version = \"py314\"\nline-length = 100\n\n[tool.ruff.lint]\nselect = [\"E\", \"F\", \"I\", \"UP\", \"B\"]\nignore = [\"E501\"]\n\n[tool.ty.rules]\nunresolved-import = \"warn\"\n"
  },
  {
    "path": "test/test_api.py",
    "content": "from datetime import UTC\n\nimport pytest\nfrom dateutil import parser as date_parse\n\nfrom truthbrush.api import Api, LoginErrorException\n\n\n@pytest.fixture(scope=\"module\")\ndef api():\n    return Api()\n\n\ndef as_datetime(date_str):\n    \"\"\"Datetime formatter function. Ensures timezone is UTC. Consider moving to Api class.\"\"\"\n    return date_parse.parse(date_str).replace(tzinfo=UTC)\n\n\ndef test_lookup(api):\n    user = api.lookup(user_handle=\"realDonaldTrump\")\n    assert list(user.keys()) == [\n        \"id\",\n        \"username\",\n        \"acct\",\n        \"display_name\",\n        \"locked\",\n        \"bot\",\n        \"discoverable\",\n        \"group\",\n        \"created_at\",\n        \"note\",\n        \"url\",\n        \"avatar\",\n        \"avatar_static\",\n        \"header\",\n        \"header_static\",\n        \"followers_count\",\n        \"following_count\",\n        \"statuses_count\",\n        \"last_status_at\",\n        \"verified\",\n        \"location\",\n        \"website\",\n        \"accepting_messages\",\n        \"chats_onboarded\",\n        \"feeds_onboarded\",\n        \"show_nonmember_group_statuses\",\n        \"pleroma\",\n        \"emojis\",\n        \"fields\",\n    ]\n    assert isinstance(user[\"id\"], str)\n\n\ndef test_pull_statuses(api):\n    username = \"truthsocial\"\n\n    # COMPLETE PULLS\n\n    # it fetches a timeline of the user's posts:\n    full_timeline = list(api.pull_statuses(username=username, replies=False, verbose=True))\n    assert len(full_timeline) > 25  # more than one page\n\n    # the posts are in reverse chronological order:\n    latest, earliest = full_timeline[0], full_timeline[-1]\n    latest_at, earliest_at = as_datetime(latest[\"created_at\"]), as_datetime(earliest[\"created_at\"])\n    assert earliest_at < latest_at\n\n    # EMPTY PULLS\n\n    # can use created_after param for filtering out posts:\n    next_pull = list(\n        api.pull_statuses(username=username, replies=False, created_after=latest_at, verbose=True)\n    )\n    assert not any(next_pull)\n\n    # can use since_id param for filtering out posts:\n    next_pull = list(\n        api.pull_statuses(username=username, replies=False, since_id=latest[\"id\"], verbose=True)\n    )\n    assert not any(next_pull)\n\n    # PARTIAL PULLS\n\n    n_posts = 50  # two and a half pages worth, to verify everything is ok\n    recent = full_timeline[n_posts]\n    recent_at = as_datetime(recent[\"created_at\"])\n\n    # can use created_after param for filtering out posts:\n    partial_pull = list(\n        api.pull_statuses(username=username, replies=False, created_after=recent_at, verbose=True)\n    )\n    assert len(partial_pull) == n_posts\n    assert recent[\"id\"] not in [post[\"id\"] for post in partial_pull]\n\n    # can use since_id param for filtering out posts:\n    partial_pull = list(\n        api.pull_statuses(username=username, replies=False, since_id=recent[\"id\"], verbose=True)\n    )\n    assert len(partial_pull) == n_posts\n    assert recent[\"id\"] not in [post[\"id\"] for post in partial_pull]\n\n    # POST INFO\n    # contains status info\n    assert list(latest.keys()) == [\n        \"id\",\n        \"created_at\",\n        \"in_reply_to_id\",\n        \"quote_id\",\n        \"in_reply_to_account_id\",\n        \"sensitive\",\n        \"spoiler_text\",\n        \"visibility\",\n        \"language\",\n        \"uri\",\n        \"url\",\n        \"content\",\n        \"account\",\n        \"media_attachments\",\n        \"mentions\",\n        \"tags\",\n        \"card\",\n        \"group\",\n        \"quote\",\n        \"in_reply_to\",\n        \"reblog\",\n        \"sponsored\",\n        \"replies_count\",\n        \"reblogs_count\",\n        \"favourites_count\",\n        \"favourited\",\n        \"reblogged\",\n        \"muted\",\n        \"pinned\",\n        \"bookmarked\",\n        \"poll\",\n        \"emojis\",\n        \"_pulled\",\n    ]\n    assert isinstance(latest[\"id\"], str)\n\n\ndef test_get_auth_id_raises_login_error_exception(api):\n    with pytest.raises(LoginErrorException):\n        api.get_auth_id(\"invalid_username\", \"invalid_password\")\n\n\ndef test_public_mode_does_not_require_credentials(monkeypatch):\n    monkeypatch.delenv(\"TRUTHSOCIAL_USERNAME\", raising=False)\n    monkeypatch.delenv(\"TRUTHSOCIAL_PASSWORD\", raising=False)\n    monkeypatch.delenv(\"TRUTHSOCIAL_TOKEN\", raising=False)\n    public_api = Api(username=None, password=None, token=None, require_auth=False)\n    assert public_api.auth_id is None\n    # user_likes calls __check_login then short-circuits on top_num < 1 before any HTTP.\n    assert list(public_api.user_likes(\"abc\", top_num=0)) == []\n\n\ndef test_strict_mode_still_raises_without_credentials(monkeypatch):\n    monkeypatch.delenv(\"TRUTHSOCIAL_USERNAME\", raising=False)\n    monkeypatch.delenv(\"TRUTHSOCIAL_PASSWORD\", raising=False)\n    monkeypatch.delenv(\"TRUTHSOCIAL_TOKEN\", raising=False)\n    strict_api = Api(username=None, password=None, token=None)\n    with pytest.raises(LoginErrorException):\n        strict_api.lookup(user_handle=\"realDonaldTrump\")\n"
  },
  {
    "path": "truthbrush/__init__.py",
    "content": "from truthbrush.api import Api, CFBlockException, GeoblockException, LoginErrorException\n\n__all__ = [\"Api\", \"CFBlockException\", \"GeoblockException\", \"LoginErrorException\"]\n"
  },
  {
    "path": "truthbrush/api.py",
    "content": "import json\nimport logging\nimport os\nfrom collections.abc import Iterator\nfrom datetime import UTC, datetime\nfrom time import sleep\nfrom typing import Any, Literal, cast\n\nimport curl_cffi\nfrom curl_cffi import requests\nfrom dateutil import parser as date_parse\nfrom dotenv import load_dotenv\nfrom loguru import logger\n\nload_dotenv()  # take environment variables from .env.\n\n_DEBUG_ENV = os.getenv(\"DEBUG\") or \"\"\nlogging.basicConfig(\n    level=logging.DEBUG if _DEBUG_ENV.lower() not in (\"\", \"false\") else logging.INFO\n)\n\nBASE_URL = \"https://truthsocial.com\"\nAPI_BASE_URL = \"https://truthsocial.com/api\"\nUSER_AGENT: str = \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/146.0.0.0 Safari/537.36\"\nIMPERSONATE_TARGET: str = \"chrome146\"\n\n# Oauth client credentials, from https://truthsocial.com/packs/js/application-d77ef3e9148ad1d0624c.js\nCLIENT_ID = \"9X1Fdd-pxNsAgEDNi_SfhJWi8T-vLuV2WVzKIbkTCw4\"\nCLIENT_SECRET = \"ozF8jzI4968oTKFkEnsBC-UbLPCdrSv0MkXGQu2o_-M\"\n\nproxies = {\"http\": os.getenv(\"http_proxy\"), \"https\": os.getenv(\"https_proxy\")}\n\nTRUTHSOCIAL_USERNAME = os.getenv(\"TRUTHSOCIAL_USERNAME\")\nTRUTHSOCIAL_PASSWORD = os.getenv(\"TRUTHSOCIAL_PASSWORD\")\n\nTRUTHSOCIAL_TOKEN = os.getenv(\"TRUTHSOCIAL_TOKEN\")\n\n\nclass LoginErrorException(Exception):\n    pass\n\n\nclass GeoblockException(LoginErrorException):\n    \"\"\"Raised when Truth Social blocks access due to geographic restrictions\"\"\"\n\n    pass\n\n\nclass CFBlockException(LoginErrorException):\n    \"\"\"Raised when Cloudflare blocks the request\"\"\"\n\n    pass\n\n\ndef date_to_bound(dt_input: str | datetime, bound: Literal[\"start\", \"end\"]) -> int:\n    if isinstance(dt_input, str):\n        dt_input = datetime.fromisoformat(dt_input)\n        if dt_input.hour or dt_input.minute or dt_input.second or dt_input.microsecond:\n            raise ValueError(\n                \"date string must not include a time component. Pass in datetime object for time-specific bounds.\"\n            )\n\n    if dt_input.tzinfo is None:\n        dt_input = dt_input.replace(tzinfo=UTC)\n\n    if bound == \"start\":\n        dt = dt_input.replace(hour=0, minute=0, second=0, microsecond=0)\n        ms = int(dt.timestamp() * 1000)\n        return (ms << 16) | 0x0000\n    else:\n        dt = dt_input.replace(hour=23, minute=59, second=59, microsecond=999999)\n        ms = int(dt.timestamp() * 1000)\n        return (ms << 16) | 0xFFFF\n\n\nclass Api:\n    def __init__(\n        self,\n        username: str | None = TRUTHSOCIAL_USERNAME,\n        password: str | None = TRUTHSOCIAL_PASSWORD,\n        token: str | None = TRUTHSOCIAL_TOKEN,\n        *,\n        require_auth: bool = True,\n    ):\n        self.ratelimit_max = 300\n        self.ratelimit_remaining: int | None = None\n        self.ratelimit_reset: datetime | None = None\n        self.__username = username\n        self.__password = password\n        self.auth_id = token\n        self.require_auth = require_auth\n\n    def __check_login(self):\n        \"\"\"Runs before any login-walled function to check for login credentials and generates an auth ID token\"\"\"\n        if self.auth_id is None:\n            if not self.require_auth:\n                return\n            if self.__username is None:\n                raise LoginErrorException(\"Username is missing.\")\n            if self.__password is None:\n                raise LoginErrorException(\"Password is missing.\")\n            self.auth_id = self.get_auth_id(self.__username, self.__password)\n            logger.warning(f\"Using token {self.auth_id}\")\n\n    def _make_session(self):\n        s = requests.Session()\n        return s\n\n    def _check_ratelimit(self, resp):\n        if resp.headers.get(\"x-ratelimit-limit\") is not None:\n            self.ratelimit_max = int(resp.headers.get(\"x-ratelimit-limit\"))\n        if resp.headers.get(\"x-ratelimit-remaining\") is not None:\n            self.ratelimit_remaining = int(resp.headers.get(\"x-ratelimit-remaining\"))\n        if resp.headers.get(\"x-ratelimit-reset\") is not None:\n            self.ratelimit_reset = date_parse.parse(resp.headers.get(\"x-ratelimit-reset\"))\n\n        if (\n            self.ratelimit_remaining is not None\n            and self.ratelimit_remaining <= 50\n            and self.ratelimit_reset is not None\n        ):  # We do 50 to be safe; their tracking is a bit stochastic... it can jump down quickly\n            now = datetime.now(UTC)\n            time_to_sleep = (self.ratelimit_reset.replace(tzinfo=UTC) - now).total_seconds()\n            logger.warning(f\"Approaching rate limit; sleeping for {time_to_sleep} seconds...\")\n            if time_to_sleep > 0:\n                sleep(time_to_sleep)\n            else:\n                sleep(10)\n\n    def _get(self, url: str, params: dict | None = None) -> Any:\n        headers = {\"User-Agent\": USER_AGENT}\n        if self.auth_id is not None:\n            headers[\"Authorization\"] = \"Bearer \" + self.auth_id\n        try:\n            resp = self._make_session().get(\n                API_BASE_URL + url,\n                params=params,\n                proxies=proxies,\n                impersonate=IMPERSONATE_TARGET,\n                headers=headers,\n            )\n        except curl_cffi.curl.CurlError as e:\n            logger.error(f\"Curl error: {e}\")\n            return None\n\n        # Will also sleep\n        self._check_ratelimit(resp)\n\n        try:\n            r = resp.json()\n        except json.JSONDecodeError:\n            body = resp.text\n            if \"Just a moment\" in body or \"cdn-cgi/challenge-platform\" in body:\n                raise CFBlockException(\n                    \"Cloudflare challenge page received instead of JSON. \"\n                    \"Source IP is likely flagged; try a different network.\"\n                ) from None\n            logger.error(f\"Failed to decode JSON: {body}\")\n            r = None\n\n        return r\n\n    def _get_paginated(\n        self, url: str, params: dict | None = None, resume: str | None = None\n    ) -> Any:\n        next_link: str | None = API_BASE_URL + url\n        headers = {\"User-Agent\": USER_AGENT}\n        if self.auth_id is not None:\n            headers[\"Authorization\"] = \"Bearer \" + self.auth_id\n\n        if resume is not None:\n            next_link += f\"?max_id={resume}\"\n\n        while next_link is not None:\n            resp = self._make_session().get(\n                next_link,\n                params=params,\n                proxies=proxies,\n                impersonate=IMPERSONATE_TARGET,\n                headers=headers,\n            )\n            link_header = resp.headers.get(\"Link\", \"\")\n            next_link = None\n            for link in link_header.split(\",\"):\n                parts = link.split(\";\")\n                if len(parts) == 2 and parts[1].strip() == 'rel=\"next\"':\n                    next_link = parts[0].strip(\"<>\")\n                    break\n            logger.info(f\"Next: {next_link}, resp: {resp}, headers: {resp.headers}\")\n            yield resp.json()\n\n            # Will also sleep\n            self._check_ratelimit(resp)\n\n    def user_likes(self, post: str, include_all: bool = False, top_num: int = 40) -> Iterator[dict]:\n        \"\"\"Return the top_num most recent (or all) users who liked the post.\"\"\"\n        self.__check_login()\n        top_num = int(top_num)\n        if top_num < 1:\n            return\n        post = post.split(\"/\")[-1]\n        n_output = 0\n        for followers_batch in self._get_paginated(\n            f\"/v1/statuses/{post}/favourited_by\", resume=None, params=dict(limit=80)\n        ):\n            for f in followers_batch:\n                yield f\n                n_output += 1\n                if not include_all and n_output >= top_num:\n                    return\n\n    def pull_comments(\n        self,\n        post: str,\n        include_all: bool = False,\n        only_first: bool = False,\n        top_num: int = 40,\n    ) -> Iterator[dict]:\n        \"\"\"Return the top_num oldest (or all) replies to a post.\"\"\"\n        self.__check_login()\n        top_num = int(top_num)\n        if top_num < 1:\n            return\n        post = post.split(\"/\")[-1]\n        n_output = 0\n        for followers_batch in self._get_paginated(\n            f\"/v1/statuses/{post}/context/descendants\",\n            resume=None,\n            params=dict(sort=\"oldest\"),\n        ):\n            # TO-DO: sort by sort=controversial, sort=newest, sort=oldest, sort=trending\n            for f in followers_batch:\n                if (only_first and f[\"in_reply_to_id\"] == post) or not only_first:\n                    yield f\n                    n_output += 1\n                    if not include_all and n_output >= top_num:\n                        return\n\n    def lookup(self, user_handle: str | None = None) -> dict | None:\n        \"\"\"Lookup a user's information.\"\"\"\n\n        self.__check_login()\n        assert user_handle is not None\n        return self._get(\"/v1/accounts/lookup\", params=dict(acct=user_handle))\n\n    def search(\n        self,\n        searchtype: str | None = None,\n        query: str | None = None,\n        limit: int = 40,\n        resolve: bool = True,\n        offset: int = 0,\n        min_id: str = \"0\",\n        max_id: str | None = None,\n        start_date: str | datetime | None = None,\n        end_date: str | datetime | None = None,\n    ) -> Iterator[dict]:\n        \"\"\"Search users, statuses or hashtags.\"\"\"\n\n        self.__check_login()\n        assert query is not None and searchtype is not None\n\n        # error handling for date and id bounds\n        if min_id != \"0\" and start_date is not None:\n            raise ValueError(\"Cannot specify both min_id and start_date\")\n        if max_id is not None and end_date is not None:\n            raise ValueError(\"Cannot specify both max_id and end_date\")\n\n        if start_date is not None:\n            min_id = str(date_to_bound(start_date, \"start\"))\n        if end_date is not None:\n            max_id = str(date_to_bound(end_date, \"end\"))\n        if max_id is not None:\n            assert min_id < max_id, \"min_id must be less than max_id\"\n\n        # Truth Social's /v2/search caps each page at ~20 regardless of `limit`,\n        # and pagination state is not shared across backend nodes — so the same\n        # offset can return data on one call and an empty page on the next.\n        PAGE_SIZE = 20\n        MAX_EMPTY_RETRIES = 1\n        total_yielded = 0\n        empty_streak = 0\n        while total_yielded < limit:\n            fetch_size = min(PAGE_SIZE, limit - total_yielded)\n            params = dict(\n                q=query,\n                resolve=resolve,\n                limit=fetch_size,\n                type=searchtype,\n                offset=offset,\n                min_id=min_id,\n            )\n            if max_id is not None:\n                params[\"max_id\"] = max_id\n\n            resp = self._get(\"/v2/search\", params=params)\n\n            if not resp:\n                break\n\n            page_count = len(resp.get(searchtype) or [])\n\n            if page_count == 0:\n                empty_streak += 1\n                if empty_streak > MAX_EMPTY_RETRIES:\n                    break\n                # Advance by PAGE_SIZE (not page_count, which is 0) so the\n                # retry probes a different offset rather than re-polling.\n                offset += PAGE_SIZE\n                sleep(1)\n                continue\n            empty_streak = 0\n\n            yield resp\n            total_yielded += page_count\n            offset += page_count\n\n    def hashtag(\n        self,\n        tag: str | None = None,\n        limit: int = 100,\n    ) -> Iterator[list[dict]]:\n        \"\"\"Collect posts with a specific hashtag.\"\"\"\n\n        self.__check_login()\n        assert tag is not None\n        if tag.startswith(\"#\"):\n            # Remove the hashtag symbol\n            tag = tag[1:]\n\n        num_results = 0\n        params: dict = dict()\n        while num_results < limit:\n            logger.info(f\"Collecting posts with hashtag: {tag}, max_id: {params.get('max_id')}\")\n            resp = self._get(\n                f\"/v1/timelines/tag/{tag}\",\n                params=params,\n            )\n\n            if not resp:\n                break\n\n            # Filter out empty results\n            results = [value for value in resp if value]\n            if not results:\n                break\n\n            num_results += len(results)\n            params[\"max_id\"] = results[-1][\"id\"]\n\n            yield results\n\n    def trending(self, limit=10):\n        \"\"\"Return trending truths.\n        Optional arg limit<20 specifies number to return.\"\"\"\n\n        self.__check_login()\n        return self._get(f\"/v1/truth/trending/truths?limit={limit}\")\n\n    def group_posts(self, group_id: str, limit: int = 20) -> list[dict]:\n        self.__check_login()\n        timeline: list[dict] = []\n        posts = self._get(f\"/v1/timelines/group/{group_id}?limit={limit}\")\n        while posts:\n            timeline += posts\n            limit = limit - len(posts)\n            if limit <= 0:\n                break\n            max_id = posts[-1][\"id\"]\n            posts = self._get(f\"/v1/timelines/group/{group_id}?max_id={max_id}&limit={limit}\")\n        return timeline\n\n    def tags(self):\n        \"\"\"Return trending tags.\"\"\"\n\n        self.__check_login()\n        return self._get(\"/v1/trends\")\n\n    def suggested(self, maximum: int = 50) -> Any:\n        \"\"\"Return a list of suggested users to follow.\"\"\"\n        self.__check_login()\n        return self._get(f\"/v2/suggestions?limit={maximum}\")\n\n    def trending_groups(self, limit=10):\n        \"\"\"Return trending group truths.\n        Optional arg limit<20 specifies number to return.\"\"\"\n\n        self.__check_login()\n        return self._get(f\"/v1/truth/trends/groups?limit={limit}\")\n\n    def group_tags(self):\n        \"\"\"Return trending group tags.\"\"\"\n\n        self.__check_login()\n        return self._get(\"/v1/groups/tags\")\n\n    def suggested_groups(self, maximum: int = 50) -> Any:\n        \"\"\"Return a list of suggested groups to follow.\"\"\"\n        self.__check_login()\n        return self._get(f\"/v1/truth/suggestions/groups?limit={maximum}\")\n\n    def ads(self, device: str = \"desktop\") -> Any:\n        \"\"\"Return a list of ads from Rumble's Ad Platform via Truth Social API.\"\"\"\n\n        self.__check_login()\n        return self._get(f\"/v3/truth/ads?device={device}\")\n\n    def user_followers(\n        self,\n        user_handle: str | None = None,\n        user_id: str | None = None,\n        maximum: int = 1000,\n        resume: str | None = None,\n    ) -> Iterator[dict]:\n        assert user_handle is not None or user_id is not None\n        if user_id is None:\n            user = self.lookup(user_handle)\n            assert user is not None, \"lookup returned no user\"\n            user_id = user[\"id\"]\n\n        n_output = 0\n        for followers_batch in self._get_paginated(\n            f\"/v1/accounts/{user_id}/followers\", resume=resume\n        ):\n            for f in followers_batch:\n                yield f\n                n_output += 1\n                if maximum is not None and n_output >= maximum:\n                    return\n\n    def user_following(\n        self,\n        user_handle: str | None = None,\n        user_id: str | None = None,\n        maximum: int = 1000,\n        resume: str | None = None,\n    ) -> Iterator[dict]:\n        assert user_handle is not None or user_id is not None\n        if user_id is None:\n            user = self.lookup(user_handle)\n            assert user is not None, \"lookup returned no user\"\n            user_id = user[\"id\"]\n\n        n_output = 0\n        for followers_batch in self._get_paginated(\n            f\"/v1/accounts/{user_id}/following\", resume=resume\n        ):\n            for f in followers_batch:\n                yield f\n                n_output += 1\n                if maximum is not None and n_output >= maximum:\n                    return\n\n    def pull_statuses(\n        self,\n        username: str | None = None,\n        replies: bool = False,\n        verbose: bool = False,\n        created_after: datetime | None = None,\n        since_id: str | int | None = None,\n        pinned: bool = False,\n        created_before: datetime | None = None,\n        *,\n        user_id: str | None = None,\n    ) -> Iterator[dict]:\n        \"\"\"Pull the given user's statuses.\n\n        Pass either `username` or `user_id`. Supplying `user_id` directly skips\n        an extra `lookup` call, which matters when `lookup` is not available\n        (e.g. in public mode, if Truth Social gates that endpoint).\n\n        Params:\n            created_after  : timezone aware datetime object (lower bound, exclusive)\n            created_before : timezone aware datetime object (upper bound). The time\n                             component is rounded up to end-of-day UTC, so passing\n                             `2024-11-07T15:30:00Z` widens to `2024-11-07T23:59:59.999999Z`.\n            since_id       : number or string\n\n        Yields posts in reverse chronological order.\n        \"\"\"\n        self.__check_login()\n        if user_id is None:\n            if username is None:\n                raise ValueError(\"pull_statuses requires either `username` or `user_id`.\")\n            user = self.lookup(username)\n            if user is None:\n                return\n            user_id = user[\"id\"]\n\n        params: dict = {}\n        if created_before is not None:\n            # Mastodon snowflake ids encode the timestamp in the high bits, so a\n            # date upper bound translates directly to a `max_id` filter — the\n            # server walks back from that point instead of us fetching newer\n            # posts only to discard them in the loop below.\n            params[\"max_id\"] = str(date_to_bound(created_before, \"end\"))\n        page_counter = 0\n        keep_going = True\n        while keep_going:\n            try:\n                url = f\"/v1/accounts/{user_id}/statuses\"\n                if pinned:\n                    url += \"?pinned=true&with_muted=true\"\n                elif not replies:\n                    url += \"?exclude_replies=true\"\n                if verbose:\n                    logger.debug(\"--------------------------\")\n                    logger.debug(f\"{url} {params}\")\n                result = self._get(url, params=params)\n                page_counter += 1\n            except json.JSONDecodeError as e:\n                logger.error(f\"Unable to pull user #{user_id}'s statuses': {e}\")\n                break\n            except CFBlockException:\n                raise\n            except Exception as e:\n                logger.error(f\"Misc. error while pulling statuses for {user_id}: {e}\")\n                break\n\n            if result is None:\n                break\n            if isinstance(result, dict) and \"error\" in result:\n                logger.error(\n                    f\"API returned an error while pulling user #{user_id}'s statuses: {result}\"\n                )\n                break\n            if not isinstance(result, list):\n                logger.error(f\"Result is not a list (it's a {type(result)}): {result}\")\n                break\n            if len(result) == 0:\n                break\n\n            posts: list[dict] = sorted(\n                cast(list[dict], result), key=lambda k: k[\"id\"], reverse=True\n            )  # reverse chronological order (recent first, older last)\n            params[\"max_id\"] = posts[-1][\n                \"id\"\n            ]  # when pulling the next page, get posts before this (the oldest)\n\n            if verbose:\n                logger.debug(f\"PAGE: {page_counter}\")\n\n            if pinned:  # assume single page\n                keep_going = False\n\n            for post in posts:\n                post[\"_pulled\"] = datetime.now().isoformat()\n\n                # only keep posts created after the specified date\n                # exclude posts created before the specified date\n                # since the page is listed in reverse chronology, we don't need any remaining posts on this page either\n                post_at = date_parse.parse(post[\"created_at\"]).replace(tzinfo=UTC)\n                if (created_after and post_at <= created_after) or (\n                    since_id and int(post[\"id\"]) <= int(since_id)\n                ):\n                    keep_going = False  # stop the loop, request no more pages\n                    break  # do not yeild this post or remaining (older) posts on this page\n\n                if verbose:\n                    logger.debug(f\"{post['id']} {post['created_at']}\")\n\n                yield post\n\n    def get_auth_id(self, username: str, password: str) -> str:\n        \"\"\"Logs in to Truth account and returns the session token\"\"\"\n        url = BASE_URL + \"/oauth/v2/token\"\n        try:\n            payload = {\n                \"client_id\": CLIENT_ID,\n                \"client_secret\": CLIENT_SECRET,\n                \"grant_type\": \"password\",\n                \"username\": username,\n                \"password\": password,\n                \"redirect_uri\": \"urn:ietf:wg:oauth:2.0:oob\",\n                \"scope\": \"read\",\n            }\n\n            sess_req = requests.request(\n                \"POST\",\n                url,\n                json=payload,\n                proxies=proxies,\n                impersonate=IMPERSONATE_TARGET,\n                headers={\n                    \"User-Agent\": USER_AGENT,\n                },\n            )\n\n            # Check for 403 errors and identify the specific type\n            if sess_req.status_code == 403:\n                response_text = sess_req.text.lower()\n\n                # Check for geographic restriction\n                if \"unavailable in your area\" in response_text:\n                    logger.error(\"Geographic restriction detected\")\n                    raise GeoblockException(\"Truth Social is unavailable in your area.\")\n\n                # Check for Cloudflare block\n                if \"you have been blocked\" in response_text:\n                    logger.error(\"Cloudflare block detected\")\n                    raise CFBlockException(\"Request blocked by Cloudflare.\")\n\n                # Generic 403 error\n                logger.error(f\"403 Forbidden: {response_text[:200]}\")\n                raise LoginErrorException(\n                    f\"Authentication forbidden (403). Response: {response_text[:200]}\"\n                )\n\n            sess_req.raise_for_status()\n        except requests.RequestsError as e:\n            logger.error(f\"Failed login request: {e!s}\")\n            raise LoginErrorException(\"Cannot authenticate to .\") from e\n\n        if not sess_req.json()[\"access_token\"]:\n            raise ValueError(\"Invalid truthsocial.com credentials provided!\")\n\n        return sess_req.json()[\"access_token\"]\n"
  },
  {
    "path": "truthbrush/cli.py",
    "content": "\"\"\"Defines the CLI for Truthbrush.\"\"\"\n\nimport datetime\nimport json\n\nimport click\n\nfrom .api import Api\n\n\n@click.group()\n@click.option(\n    \"--no-auth\",\n    is_flag=True,\n    default=False,\n    help=\"Run without authentication. Only public endpoints will succeed.\",\n)\n@click.pass_context\ndef cli(ctx: click.Context, no_auth: bool):\n    \"\"\"This is an API client for Truth Social.\"\"\"\n    ctx.ensure_object(dict)\n    ctx.obj[\"api\"] = Api(require_auth=not no_auth)\n\n\n@cli.command()\n@click.argument(\"group_id\")\n@click.option(\"--limit\", default=20, help=\"Limit the number of items returned\", type=int)\n@click.pass_context\ndef groupposts(ctx: click.Context, group_id: str, limit: int):\n    \"\"\"Pull posts from group timeline\"\"\"\n    print(json.dumps(ctx.obj[\"api\"].group_posts(group_id, limit)))\n\n\n@cli.command()\n@click.pass_context\ndef trends(ctx: click.Context):\n    \"\"\"Pull trendy Truths.\"\"\"\n    print(json.dumps(ctx.obj[\"api\"].trending()))\n\n\n@cli.command()\n@click.pass_context\ndef tags(ctx: click.Context):\n    \"\"\"Pull trendy tags.\"\"\"\n    print(json.dumps(ctx.obj[\"api\"].tags()))\n\n\n@cli.command()\n@click.pass_context\ndef grouptags(ctx: click.Context):\n    \"\"\"Pull group tags.\"\"\"\n    print(json.dumps(ctx.obj[\"api\"].group_tags()))\n\n\n@cli.command()\n@click.pass_context\ndef grouptrends(ctx: click.Context):\n    \"\"\"Pull group trends.\"\"\"\n    print(json.dumps(ctx.obj[\"api\"].trending_groups()))\n\n\n@cli.command()\n@click.pass_context\ndef groupsuggest(ctx: click.Context):\n    \"\"\"Pull group suggestions.\"\"\"\n    print(json.dumps(ctx.obj[\"api\"].suggested_groups()))\n\n\n@cli.command()\n@click.argument(\"handle\")\n@click.pass_context\ndef user(ctx: click.Context, handle: str):\n    \"\"\"Pull a user's metadata.\"\"\"\n    print(json.dumps(ctx.obj[\"api\"].lookup(handle)))\n\n\n@cli.command()\n@click.argument(\"query\")\n@click.option(\n    \"--searchtype\",\n    help=\"Type of search query (accounts, statuses, groups, or hashtags)\",\n    type=click.Choice([\"accounts\", \"statuses\", \"hashtags\", \"groups\"]),\n)\n@click.option(\"--limit\", default=40, help=\"Limit the number of items returned\", type=int)\n@click.option(\"--resolve\", help=\"Resolve\", type=bool)\n@click.option(\n    \"--start-date\", default=None, help=\"Start date for search results (e.g. 2026-01-01)\", type=str\n)\n@click.option(\n    \"--end-date\", default=None, help=\"End date for search results (e.g. 2026-03-01)\", type=str\n)\n@click.pass_context\ndef search(\n    ctx: click.Context,\n    searchtype: str,\n    query: str,\n    limit: int,\n    resolve: bool,\n    start_date: str,\n    end_date: str,\n):\n    \"\"\"Search for users, statuses, groups, or hashtags.\"\"\"\n    for page in ctx.obj[\"api\"].search(\n        searchtype, query, limit, resolve, start_date=start_date, end_date=end_date\n    ):\n        print(json.dumps(page[searchtype]))\n\n\n@cli.command()\n@click.pass_context\ndef suggestions(ctx: click.Context):\n    \"\"\"Pull the list of suggested users.\"\"\"\n    print(json.dumps(ctx.obj[\"api\"].suggested()))\n\n\n@cli.command()\n@click.pass_context\ndef ads(ctx: click.Context):\n    \"\"\"Pull ads.\"\"\"\n    print(json.dumps(ctx.obj[\"api\"].ads()))\n\n\n# @cli.command()\n# @click.argument(\"handle\")\n# @click.option(\"--maximum\", help=\"the maximum number of followers to pull\", type=int)\n# @click.option(\n#     \"--resume\",\n#     help=\"the `max_id` cursor to resume from, if necessary (pull this from logs to resume a failed/stalled export)\",\n#     type=str,\n# )\n# def followers(handle: str, maximum: int = None, resume: str = None):\n#     \"\"\"Pull a user's followers.\"\"\"\n\n#     for follower in api.user_followers(handle, maximum=maximum, resume=resume):\n#         print(json.dumps(follower))\n\n\n# @cli.command()\n# @click.argument(\"handle\")\n# @click.option(\n#     \"--maximum\", help=\"the maximum number of followed users to pull\", type=int\n# )\n# @click.option(\n#     \"--resume\",\n#     help=\"the `max_id` cursor to resume from, if necessary (pull this from logs to resume a failed/stalled export)\",\n#     type=str,\n# )\n# def following(handle: str, maximum: int = None, resume: str = None):\n#     \"\"\"Pull users a given user follows.\"\"\"\n\n#     for followed in api.user_following(handle, maximum=maximum, resume=resume):\n#         print(json.dumps(followed))\n\n\n@cli.command()\n@click.argument(\"username\")\n@click.option(\n    \"--replies/--no-replies\",\n    default=False,\n    help=\"Include replies when pulling posts (defaults to no replies)\",\n)\n@click.option(\n    \"--created-after\",\n    default=None,\n    help=\"Only pull posts created on or after the specified datetime, e.g. 2021-10-02 or 2011-11-04T00:05:23+04:00 (defaults to none). If a timezone is not specified, UTC is assumed.\",\n    type=datetime.datetime.fromisoformat,\n)\n@click.option(\n    \"--created-before\",\n    default=None,\n    help=\"Only pull posts created on or before the specified datetime, e.g. 2021-10-02 or 2011-11-04T00:05:23+04:00 (defaults to none). If a timezone is not specified, UTC is assumed.\",\n    type=datetime.datetime.fromisoformat,\n)\n@click.option(\"--pinned/--all\", default=False, help=\"Only pull pinned posts (defaults to all)\")\n@click.pass_context\ndef statuses(\n    ctx: click.Context,\n    username: str,\n    replies: bool = False,\n    created_after: datetime.datetime | None = None,\n    created_before: datetime.datetime | None = None,\n    pinned: bool = False,\n):\n    \"\"\"Pull a user's statuses\"\"\"\n    # Assume UTC if no timezone is specified\n    if created_after is not None and created_after.tzinfo is None:\n        created_after = created_after.replace(tzinfo=datetime.UTC)\n    if created_before is not None and created_before.tzinfo is None:\n        created_before = created_before.replace(tzinfo=datetime.UTC)\n\n    for page in ctx.obj[\"api\"].pull_statuses(\n        username,\n        created_after=created_after,\n        created_before=created_before,\n        replies=replies,\n        pinned=pinned,\n    ):\n        print(json.dumps(page))\n\n\n@cli.command()\n@click.argument(\"post\")\n@click.option(\"--includeall\", is_flag=True, help=\"return all comments on post.\")\n@click.argument(\"top_num\")\n@click.pass_context\ndef likes(ctx: click.Context, post: str, includeall: bool, top_num: int):\n    \"\"\"Pull the top_num most recent users who liked the post.\"\"\"\n    for page in ctx.obj[\"api\"].user_likes(post, includeall, top_num):\n        print(json.dumps(page))\n\n\n@cli.command()\n@click.argument(\"post\")\n@click.option(\"--includeall\", is_flag=True, help=\"return all comments on post. Overrides top_num.\")\n@click.option(\"--onlyfirst\", is_flag=True, help=\"return only direct replies to specified post\")\n@click.argument(\"top_num\")\n@click.pass_context\ndef comments(ctx: click.Context, post: str, includeall: bool, onlyfirst: bool, top_num: int = 40):\n    \"\"\"Pull the top_num comments on a post (defaults to all users, including replies).\"\"\"\n    for page in ctx.obj[\"api\"].pull_comments(post, includeall, onlyfirst, top_num):\n        print(page)\n"
  }
]