Repository: sherlock-project/sherlock Branch: master Commit: 574aeb4ac527 Files: 50 Total size: 282.4 KB Directory structure: gitextract_07lk6fy_/ ├── .actor/ │ ├── Dockerfile │ ├── README.md │ ├── actor.json │ ├── actor.sh │ ├── dataset_schema.json │ └── input_schema.json ├── .dockerignore ├── .editorconfig ├── .github/ │ ├── CODEOWNERS │ ├── FUNDING.yml │ ├── ISSUE_TEMPLATE/ │ │ ├── bug-report.yml │ │ ├── config.yml │ │ ├── false-negative.yml │ │ ├── false-positive.yml │ │ ├── feature-request.yml │ │ └── site-request.yml │ ├── SECURITY.md │ └── workflows/ │ ├── exclusions.yml │ ├── regression.yml │ ├── update-site-list.yml │ └── validate_modified_targets.yml ├── .gitignore ├── Dockerfile ├── LICENSE ├── devel/ │ ├── site-list.py │ └── summarize_site_validation.py ├── docs/ │ ├── CODE_OF_CONDUCT.md │ ├── README.md │ ├── pyproject/ │ │ └── README.md │ └── removed-sites.md ├── pyproject.toml ├── pytest.ini ├── sherlock_project/ │ ├── __init__.py │ ├── __main__.py │ ├── notify.py │ ├── py.typed │ ├── resources/ │ │ ├── data.json │ │ └── data.schema.json │ ├── result.py │ ├── sherlock.py │ └── sites.py ├── tests/ │ ├── conftest.py │ ├── few_test_basic.py │ ├── sherlock_interactives.py │ ├── test_manifest.py │ ├── test_probes.py │ ├── test_ux.py │ ├── test_validate_targets.py │ └── test_version.py └── tox.ini ================================================ FILE CONTENTS ================================================ ================================================ FILE: .actor/Dockerfile ================================================ FROM sherlock/sherlock as sherlock # Install Node.js RUN apt-get update; apt-get install curl gpg -y RUN mkdir -p /etc/apt/keyrings RUN curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg RUN echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list RUN apt-get update && apt-get install -y curl bash git jq jo xz-utils nodejs # Install Apify CLI (node.js) for the Actor Runtime RUN npm -g install apify-cli # Install Dependencies for the Actor Shell Script RUN apt-get update && apt-get install -y bash jq jo xz-utils nodejs # Copy Actor dir with the actorization shell script COPY .actor/ .actor ENTRYPOINT [".actor/actor.sh"] ================================================ FILE: .actor/README.md ================================================ # Sherlock Actor on Apify [![Sherlock Actor](https://apify.com/actor-badge?actor=netmilk/sherlock)](https://apify.com/netmilk/sherlock?fpr=sherlock) This Actor wraps the [Sherlock Project](https://sherlockproject.xyz/) to provide serverless username reconnaissance across social networks in the cloud. It helps you find usernames across multiple social media platforms without installing and running the tool locally. ## What are Actors? [Actors](https://docs.apify.com/platform/actors?fpr=sherlock) are serverless microservices running on the [Apify Platform](https://apify.com/?fpr=sherlock). They are based on the [Actor SDK](https://docs.apify.com/sdk/js?fpr=sherlock) and can be found in the [Apify Store](https://apify.com/store?fpr=sherlock). Learn more about Actors in the [Apify Whitepaper](https://whitepaper.actor?fpr=sherlock). ## Usage ### Apify Console 1. Go to the Apify Actor page 2. Click "Run" 3. In the input form, fill in **Username(s)** to search for 4. The Actor will run and produce its outputs in the default datastore ### Apify CLI ```bash apify call YOUR_USERNAME/sherlock --input='{ "usernames": ["johndoe", "janedoe"] }' ``` ### Using Apify API ```bash curl --request POST \ --url "https://api.apify.com/v2/acts/YOUR_USERNAME~sherlock/run" \ --header 'Content-Type: application/json' \ --header 'Authorization: Bearer YOUR_API_TOKEN' \ --data '{ "usernames": ["johndoe", "janedoe"], } }' ``` ## Input Parameters The Actor accepts a JSON schema with the following structure: | Field | Type | Required | Default | Description | |-------|------|----------|---------|-------------| | `usernames` | array | Yes | - | List of usernames to search for | | `usernames[]` | string | Yes | "json" | Username to search for | ### Example Input ```json { "usernames": ["techuser", "designuser"], } ``` ## Output The Actor provides three types of outputs: ### Dataset Record* | Field | Type | Required | Description | |-------|------|----------|-------------| | `username` | string | Yes | Username the search was conducted for | | `links` | array | Yes | Array with found links to the social media | | `links[]`| string | No | URL to the account ### Example Dataset Item (JSON) ```json { "username": "johndoe", "links": [ "https://github.com/johndoe" ] } ``` ## Performance & Resources - **Memory Requirements**: - Minimum: 512 MB RAM - Recommended: 1 GB RAM for multiple usernames - **Processing Time**: - Single username: ~1-2 minutes - Multiple usernames: 2-5 minutes - Varies based on number of sites checked and response times For more help, check the [Sherlock Project documentation](https://github.com/sherlock-project/sherlock) or raise an issue in the Actor's repository. ================================================ FILE: .actor/actor.json ================================================ { "actorSpecification": 1, "name": "sherlock", "version": "0.0", "buildTag": "latest", "environmentVariables": {}, "dockerFile": "./Dockerfile", "dockerContext": "../", "input": "./input_schema.json", "storages": { "dataset": "./dataset_schema.json" } } ================================================ FILE: .actor/actor.sh ================================================ #!/bin/bash INPUT=`apify actor:get-input | jq -r .usernames[] | xargs echo` echo "INPUT: $INPUT" sherlock $INPUT for username in $INPUT; do # escape the special meaning leading characters # https://github.com/jpmens/jo/blob/master/jo.md#description safe_username=$(echo $username | sed 's/^@/\\@/' | sed 's/^:/\\:/' | sed 's/%/\\%/') echo "pushing results for username: $username, content:" cat $username.txt sed '$d' $username.txt | jo -a | jo username=$safe_username links:=- | apify actor:push-data done ================================================ FILE: .actor/dataset_schema.json ================================================ { "actorSpecification": 1, "fields":{ "title": "Sherlock actor input", "description": "This is actor input schema", "type": "object", "schemaVersion": 1, "properties": { "links": { "title": "Links to accounts", "type": "array", "description": "A list of social media accounts found for the uername" }, "username": { "title": "Lookup username", "type": "string", "description": "Username the lookup was performed for" } }, "required": [ "username", "links" ] }, "views": { "overview": { "title": "Overview", "transformation": { "fields": [ "username", "links" ], }, "display": { "component": "table", "links": { "label": "Links" }, "username":{ "label": "Username" } } } } } ================================================ FILE: .actor/input_schema.json ================================================ { "title": "Sherlock actor input", "description": "This is actor input schema", "type": "object", "schemaVersion": 1, "properties": { "usernames": { "title": "Usernames to hunt down", "type": "array", "description": "A list of usernames to be checked for existence across social media", "editor": "stringList", "prefill": ["johndoe"] } }, "required": [ "usernames" ] } ================================================ FILE: .dockerignore ================================================ .git/ .vscode/ screenshot/ tests/ *.txt !/requirements.txt venv/ devel/ ================================================ FILE: .editorconfig ================================================ root = true [*] indent_style = space indent_size = 2 end_of_line = lf charset = utf-8 trim_trailing_whitespace = true insert_final_newline = true curly_bracket_next_line = false spaces_around_operators = true [*.{markdown,md}] trim_trailing_whitespace = false [*.py] indent_size = 4 quote_type = double ================================================ FILE: .github/CODEOWNERS ================================================ ### REPOSITORY /.github/CODEOWNERS @sdushantha @ppfeister /.github/FUNDING.yml @sdushantha /LICENSE @sdushantha ### PACKAGING # Changes made to these items without code owner approval may negatively # impact packaging pipelines. /pyproject.toml @ppfeister @sdushantha ### REGRESSION /.github/workflows/regression.yml @ppfeister /tox.ini @ppfeister /pytest.ini @ppfeister /tests/ @ppfeister ================================================ FILE: .github/FUNDING.yml ================================================ github: [ sdushantha, ppfeister, matheusfelipeog ] ================================================ FILE: .github/ISSUE_TEMPLATE/bug-report.yml ================================================ name: Bug report description: File a bug report labels: ["bug"] body: - type: dropdown id: package attributes: label: Installation method description: | Some packages are maintained by the community, rather than by the Sherlock Project. Knowing which packages are affected helps us diagnose package-specific bugs. options: - Select one - PyPI (via pip) - Homebrew - Docker - Kali repository (via apt) - Built from source - Other (indicate below) validations: required: true - type: input id: package-version attributes: label: Package version description: | Knowing the version of the package you are using can help us diagnose your issue more quickly. You can find the version by running `sherlock --version`. validations: required: true - type: textarea id: description attributes: label: Description description: | Detailed descriptions that help contributors understand and reproduce your bug are much more likely to lead to a fix. Please include the following information: - What you were trying to do - What you expected to happen - What actually happened placeholder: | When doing {action}, the expected result should be {expected result}. When doing {action}, however, the actual result was {actual result}. This is undesirable because {reason}. validations: required: true - type: textarea id: steps-to-reproduce attributes: label: Steps to reproduce description: Write a step by step list that will allow us to reproduce this bug. placeholder: | 1. Do something 2. Then do something else validations: required: true - type: textarea id: additional-info attributes: label: Additional information description: If you have some additional information, please write it here. validations: required: false - type: checkboxes id: terms attributes: label: Code of Conduct description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/sherlock-project/sherlock/blob/master/docs/CODE_OF_CONDUCT.md). options: - label: I agree to follow this project's Code of Conduct required: true ================================================ FILE: .github/ISSUE_TEMPLATE/config.yml ================================================ blank_issues_enabled: false ================================================ FILE: .github/ISSUE_TEMPLATE/false-negative.yml ================================================ name: False negative description: Report a site that is returning false negative results title: "False negative for: " labels: ["false negative"] body: - type: markdown attributes: value: | Please include the site name in the title of your issue. Submit **one site per report** for faster resolution. If you have multiple sites in the same report, it often takes longer to fix. - type: textarea id: additional-info attributes: label: Additional info description: If you know why the site is returning false negatives, or noticed any patterns, please explain. placeholder: | Reddit is returning false negatives because... validations: required: false - type: checkboxes id: terms attributes: label: Code of Conduct description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/sherlock-project/sherlock/blob/master/docs/CODE_OF_CONDUCT.md). options: - label: I agree to follow this project's Code of Conduct required: true ================================================ FILE: .github/ISSUE_TEMPLATE/false-positive.yml ================================================ name: False positive description: Report a site that is returning false positive results title: "False positive for: " labels: ["false positive"] body: - type: markdown attributes: value: | Please include the site name in the title of your issue. Submit **one site per report** for faster resolution. If you have multiple sites in the same report, it often takes longer to fix. - type: textarea id: additional-info attributes: label: Additional info description: If you know why the site is returning false positives, or noticed any patterns, please explain. placeholder: | Reddit is returning false positives because... False positives only occur after x searches... validations: required: false - type: checkboxes id: terms attributes: label: Code of Conduct description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/sherlock-project/sherlock/blob/master/docs/CODE_OF_CONDUCT.md). options: - label: I agree to follow this project's Code of Conduct required: true ================================================ FILE: .github/ISSUE_TEMPLATE/feature-request.yml ================================================ name: Feature request description: Request a feature or enhancement labels: ["enhancement"] body: - type: markdown attributes: value: | Concise and thoughtful titles help other contributors find and add your requested feature. - type: textarea id: description attributes: label: Description description: Describe the feature you are requesting placeholder: I'd like Sherlock to be able to do xyz validations: required: true - type: checkboxes id: terms attributes: label: Code of Conduct description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/sherlock-project/sherlock/blob/master/docs/CODE_OF_CONDUCT.md). options: - label: I agree to follow this project's Code of Conduct required: true ================================================ FILE: .github/ISSUE_TEMPLATE/site-request.yml ================================================ name: Reuest a new website description: Request that Sherlock add support for a new website title: "Requesting support for: " labels: ["site support request"] body: - type: markdown attributes: value: | Ensure that the site name is in the title of your request. Requests without this information will be **closed**. - type: input id: site-url attributes: label: Site URL description: | What is the URL of the website indicated in your title? Websites sometimes have similar names. This helps constributors find the correct site. placeholder: https://reddit.com validations: required: true - type: textarea id: additional-info attributes: label: Additional info description: If you have suggestions on how Sherlock should detect for usernames, please explain below placeholder: Sherlock can detect if a username exists on Reddit by checking for... validations: required: false - type: checkboxes id: terms attributes: label: Code of Conduct description: By submitting this issue, you agree to follow our [Code of Conduct](https://github.com/sherlock-project/sherlock/blob/master/docs/CODE_OF_CONDUCT.md). options: - label: I agree to follow this project's Code of Conduct required: true ================================================ FILE: .github/SECURITY.md ================================================ ## Security Policy ### Supported Versions Sherlock is a forward looking project. Only the latest and most current version is supported. ### Reporting a Vulnerability Security concerns can be submitted [__here__][report-url] without risk of exposing sensitive information. For issues that are low severity or unlikely to see exploitation, public issues are often acceptable. [report-url]: https://github.com/sherlock-project/sherlock/security/advisories/new ================================================ FILE: .github/workflows/exclusions.yml ================================================ name: Exclusions Updater on: schedule: #- cron: '0 5 * * 0' # Runs at 05:00 every Sunday - cron: '0 5 * * *' # Runs at 05:00 every day workflow_dispatch: jobs: update-exclusions: runs-on: ubuntu-latest steps: - name: Checkout repository uses: actions/checkout@v5 - name: Set up Python uses: actions/setup-python@v6 with: python-version: '3.13' - name: Install Poetry uses: abatilo/actions-poetry@v4 with: poetry-version: 'latest' - name: Install dependencies run: | poetry install --no-interaction --with dev - name: Run false positive tests run: | $(poetry env activate) pytest -q --tb no -m validate_targets_fp -n 20 | tee fp_test_results.txt deactivate - name: Parse false positive detections by desired categories run: | grep -oP '(?<=test_false_pos\[)[^\]]+(?=\].*result was Claimed)' fp_test_results.txt \ | sort -u > false_positive_exclusions.txt grep -oP '(?<=test_false_pos\[)[^\]]+(?=\].*result was WAF)' fp_test_results.txt \ | sort -u > waf_hits.txt - name: Detect if exclusions list changed id: detect_changes run: | git fetch origin exclusions || true if git show origin/exclusions:false_positive_exclusions.txt >/dev/null 2>&1; then # If the exclusions branch and file exist, compare if git diff --quiet origin/exclusions -- false_positive_exclusions.txt; then echo "exclusions_changed=false" >> "$GITHUB_OUTPUT" else echo "exclusions_changed=true" >> "$GITHUB_OUTPUT" fi else # If the exclusions branch or file do not exist, treat as changed echo "exclusions_changed=true" >> "$GITHUB_OUTPUT" fi - name: Quantify and display results run: | FP_COUNT=$(wc -l < false_positive_exclusions.txt | xargs) WAF_COUNT=$(wc -l < waf_hits.txt | xargs) echo ">>> Found $FP_COUNT false positives and $WAF_COUNT WAF hits." echo ">>> False positive exclusions:" && cat false_positive_exclusions.txt echo ">>> WAF hits:" && cat waf_hits.txt - name: Commit and push exclusions list if: steps.detect_changes.outputs.exclusions_changed == 'true' run: | git config user.name "Paul Pfeister (automation)" git config user.email "code@pfeister.dev" mv false_positive_exclusions.txt false_positive_exclusions.txt.tmp git add -f false_positive_exclusions.txt.tmp # -f required to override .gitignore git stash push -m "stash false positive exclusion list" -- false_positive_exclusions.txt.tmp git fetch origin exclusions || true # Allows creation of branch if deleted git checkout -B exclusions origin/exclusions || (git checkout --orphan exclusions && git rm -rf .) git stash pop || true mv false_positive_exclusions.txt.tmp false_positive_exclusions.txt git rm -f false_positive_exclusions.txt.tmp || true git add false_positive_exclusions.txt git commit -m "auto: update exclusions list" || echo "No changes to commit" git push origin exclusions ================================================ FILE: .github/workflows/regression.yml ================================================ name: Regression Testing on: pull_request: branches: - master - release/** paths: - '.github/workflows/regression.yml' - '**/*.json' - '**/*.py' - '**/*.ini' - '**/*.toml' - 'Dockerfile' push: branches: - master - release/** paths: - '.github/workflows/regression.yml' - '**/*.json' - '**/*.py' - '**/*.ini' - '**/*.toml' - 'Dockerfile' jobs: tox-lint: runs-on: ubuntu-latest # Linting is run through tox to ensure that the same linter # is used by local runners steps: - uses: actions/checkout@v6 - name: Set up linting environment uses: actions/setup-python@v6 with: python-version: '3.x' - name: Install tox and related dependencies run: | python -m pip install --upgrade pip pip install tox - name: Run tox linting environment run: tox -e lint tox-matrix: runs-on: ${{ matrix.os }} strategy: # We want to know what specific versions it fails on fail-fast: false matrix: os: [ ubuntu-latest, windows-latest, macos-latest, ] python-version: [ '3.10', '3.11', '3.12', '3.13', '3.14', '3.14t', ] steps: - uses: actions/checkout@v6 - name: Set up environment ${{ matrix.python-version }} uses: actions/setup-python@v6 with: python-version: ${{ matrix.python-version }} - name: Install tox and related dependencies run: | python -m pip install --upgrade pip pip install tox pip install tox-gh-actions - name: Run tox run: tox docker-build-test: runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v6 - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 - name: Get version from pyproject.toml id: get-version run: | VERSION=$(grep -m1 'version = ' pyproject.toml | cut -d'"' -f2) echo "version=$VERSION" >> $GITHUB_OUTPUT - name: Build Docker image run: | docker build \ --build-arg VERSION_TAG=${{ steps.get-version.outputs.version }} \ -t sherlock-test:latest . - name: Test Docker image runs run: docker run --rm sherlock-test:latest --version ================================================ FILE: .github/workflows/update-site-list.yml ================================================ name: Update Site List # Trigger the workflow when changes are pushed to the main branch # and the changes include the sherlock_project/resources/data.json file on: push: branches: - master paths: - sherlock_project/resources/data.json jobs: sync-json-data: # Use the latest version of Ubuntu as the runner environment runs-on: ubuntu-latest steps: # Check out the code at the specified pull request head commit - name: Checkout code uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 # Install Python 3 - name: Install Python uses: actions/setup-python@v5 with: python-version: '3.x' # Execute the site_list.py Python script - name: Execute site-list.py run: python devel/site-list.py - name: Pushes to another repository uses: sdushantha/github-action-push-to-another-repository@main env: SSH_DEPLOY_KEY: ${{ secrets.SSH_DEPLOY_KEY }} API_TOKEN_GITHUB: ${{ secrets.API_TOKEN_GITHUB }} with: source-directory: 'output' destination-github-username: 'sherlock-project' commit-message: 'Updated site list' destination-repository-name: 'sherlockproject.xyz' user-email: siddharth.dushantha@gmail.com target-branch: master ================================================ FILE: .github/workflows/validate_modified_targets.yml ================================================ name: Modified Target Validation on: pull_request_target: branches: - master paths: - "sherlock_project/resources/data.json" jobs: validate-modified-targets: runs-on: ubuntu-latest permissions: contents: read pull-requests: write steps: - name: Checkout repository uses: actions/checkout@v5 with: # Checkout the base branch but fetch all history to avoid a second fetch call ref: ${{ github.base_ref }} fetch-depth: 0 - name: Set up Python uses: actions/setup-python@v6 with: python-version: "3.13" - name: Install Poetry uses: abatilo/actions-poetry@v4 with: poetry-version: "latest" - name: Install dependencies run: | poetry install --no-interaction --with dev - name: Prepare JSON versions for comparison run: | # Fetch only the PR's branch head (single network call in this step) git fetch origin pull/${{ github.event.pull_request.number }}/head:pr # Find the merge-base commit between the target branch and the PR branch MERGE_BASE=$(git merge-base origin/${{ github.base_ref }} pr) echo "Comparing PR head against merge-base commit: $MERGE_BASE" # Safely extract the file from the PR's head and the merge-base commit git show pr:sherlock_project/resources/data.json > data.json.head git show $MERGE_BASE:sherlock_project/resources/data.json > data.json.base # CRITICAL FIX: Overwrite the checked-out data.json with the one from the PR # This ensures that pytest runs against the new, updated file. cp data.json.head sherlock_project/resources/data.json - name: Discover modified targets id: discover-modified run: | CHANGED=$( python - <<'EOF' import json import sys try: with open("data.json.base") as f: base = json.load(f) with open("data.json.head") as f: head = json.load(f) except FileNotFoundError as e: print(f"Error: Could not find {e.filename}", file=sys.stderr) sys.exit(1) except json.JSONDecodeError as e: print(f"Error: Could not decode JSON from a file - {e}", file=sys.stderr) sys.exit(1) changed = [] for k, v in head.items(): if k not in base or base[k] != v: changed.append(k) print(",".join(sorted(changed))) EOF ) # Preserve changelist echo -e ">>> Changed targets: \n$(echo $CHANGED | tr ',' '\n')" echo "changed_targets=$CHANGED" >> "$GITHUB_OUTPUT" - name: Validate remote manifest against local schema if: steps.discover-modified.outputs.changed_targets != '' run: | poetry run pytest tests/test_manifest.py::test_validate_manifest_against_local_schema # --- The rest of the steps below are unchanged --- - name: Validate modified targets if: steps.discover-modified.outputs.changed_targets != '' continue-on-error: true run: | poetry run pytest -q --tb no -rA -m validate_targets -n 20 \ --chunked-sites "${{ steps.discover-modified.outputs.changed_targets }}" \ --junitxml=validation_results.xml - name: Prepare validation summary if: steps.discover-modified.outputs.changed_targets != '' id: prepare-summary run: | summary=$( poetry run python devel/summarize_site_validation.py validation_results.xml || echo "Failed to generate summary of test results" ) echo "$summary" > validation_summary.md - name: Announce validation results if: steps.discover-modified.outputs.changed_targets != '' uses: actions/github-script@v8 with: script: | const fs = require('fs'); const body = fs.readFileSync('validation_summary.md', 'utf8'); await github.rest.issues.createComment({ issue_number: context.payload.pull_request.number, owner: context.repo.owner, repo: context.repo.repo, body: body, }); - name: This step shows as ran when no modifications are found if: steps.discover-modified.outputs.changed_targets == '' run: | echo "No modified targets found" ================================================ FILE: .gitignore ================================================ # Virtual Environments venv/ bin/ lib/ pyvenv.cfg poetry.lock # Regression Testing .coverage .tox/ # Editor Configurations .vscode/ .idea/ # Python __pycache__/ # Pip src/ # Devel, Build, and Installation *.egg-info/ dist/** # Jupyter Notebook .ipynb_checkpoints *.ipynb # Output files, except requirements.txt *.txt !requirements.txt # Comma-Separated Values (CSV) Reports *.csv #XLSX Reports *.xlsx # Excluded sites list tests/.excluded_sites # MacOS Folder Metadata File .DS_Store # Vim swap files *.swp ================================================ FILE: Dockerfile ================================================ # Release instructions: # 1. Update the version tag in the Dockerfile to match the version in sherlock/__init__.py # 2. Update the VCS_REF tag to match the tagged version's FULL commit hash # 3. Build image with BOTH latest and version tags # i.e. `docker build -t sherlock/sherlock:0.16.0 -t sherlock/sherlock:latest .` FROM python:3.12-slim-bullseye AS build WORKDIR /sherlock RUN pip3 install --no-cache-dir --upgrade pip FROM python:3.12-slim-bullseye WORKDIR /sherlock ARG VCS_REF= # CHANGE ME ON UPDATE ARG VCS_URL="https://github.com/sherlock-project/sherlock" ARG VERSION_TAG= # CHANGE ME ON UPDATE ENV SHERLOCK_ENV=docker LABEL org.label-schema.vcs-ref=$VCS_REF \ org.label-schema.vcs-url=$VCS_URL \ org.label-schema.name="Sherlock" \ org.label-schema.version=$VERSION_TAG \ website="https://sherlockproject.xyz" RUN pip3 install --no-cache-dir sherlock-project==$VERSION_TAG WORKDIR /sherlock ENTRYPOINT ["sherlock"] ================================================ FILE: LICENSE ================================================ MIT License Copyright (c) 2019 Sherlock Project Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ================================================ FILE: devel/site-list.py ================================================ #!/usr/bin/env python # This module generates the listing of supported sites which can be found in # sites.mdx. It also organizes all the sites in alphanumeric order import json import os DATA_REL_URI: str = "sherlock_project/resources/data.json" DEFAULT_ENCODING = "utf-8" # Read the data.json file with open(DATA_REL_URI, "r", encoding=DEFAULT_ENCODING) as data_file: data: dict = json.load(data_file) # Removes schema-specific keywords for proper processing social_networks = data.copy() social_networks.pop('$schema', None) # Sort the social networks in alphanumeric order social_networks = sorted(social_networks.items()) # Make output dir where the site list will be written os.mkdir("output") # Write the list of supported sites to sites.mdx with open("output/sites.mdx", "w", encoding=DEFAULT_ENCODING) as site_file: site_file.write("---\n") site_file.write("title: 'List of supported sites'\n") site_file.write("sidebarTitle: 'Supported sites'\n") site_file.write("icon: 'globe'\n") site_file.write("description: 'Sherlock currently supports **400+** sites'\n") site_file.write("---\n\n") for social_network, info in social_networks: url_main = info["urlMain"] is_nsfw = "**(NSFW)**" if info.get("isNSFW") else "" site_file.write(f"1. [{social_network}]({url_main}) {is_nsfw}\n") # Overwrite the data.json file with sorted data with open(DATA_REL_URI, "w", encoding=DEFAULT_ENCODING) as data_file: sorted_data = json.dumps(data, indent=2, sort_keys=True) data_file.write(sorted_data) data_file.write("\n") # Keep the newline after writing data print("Finished updating supported site listing!") ================================================ FILE: devel/summarize_site_validation.py ================================================ #!/usr/bin/env python # This module summarizes the results of site validation tests queued by # workflow validate_modified_targets for presentation in Issue comments. from defusedxml import ElementTree as ET import sys from pathlib import Path def summarize_junit_xml(xml_path: Path) -> str: tree = ET.parse(xml_path) root = tree.getroot() suite = root.find('testsuite') pass_message: str = ":heavy_check_mark:   Pass" fail_message: str = ":x:   Fail" if suite is None: raise ValueError("Invalid JUnit XML: No testsuite found") summary_lines: list[str] = [] summary_lines.append("#### Automatic validation of changes\n") summary_lines.append("| Target | F+ Check | F- Check |") summary_lines.append("|---|---|---|") failures = int(suite.get('failures', 0)) errors_detected: bool = False results: dict[str, dict[str, str]] = {} for testcase in suite.findall('testcase'): test_name = testcase.get('name').split('[')[0] site_name = testcase.get('name').split('[')[1].rstrip(']') failure = testcase.find('failure') error = testcase.find('error') if site_name not in results: results[site_name] = {} if test_name == "test_false_neg": results[site_name]['F- Check'] = pass_message if failure is None and error is None else fail_message elif test_name == "test_false_pos": results[site_name]['F+ Check'] = pass_message if failure is None and error is None else fail_message if error is not None: errors_detected = True for result in results: summary_lines.append(f"| {result} | {results[result].get('F+ Check', 'Error!')} | {results[result].get('F- Check', 'Error!')} |") if failures > 0: summary_lines.append("\n___\n" + "\nFailures were detected on at least one updated target. Commits containing accuracy failures" + " will often not be merged (unless a rationale is provided, such as false negatives due to regional differences).") if errors_detected: summary_lines.append("\n___\n" + "\n**Errors were detected during validation. Please review the workflow logs.**") return "\n".join(summary_lines) if __name__ == "__main__": if len(sys.argv) != 2: print("Usage: summarize_site_validation.py ") sys.exit(1) xml_path: Path = Path(sys.argv[1]) if not xml_path.is_file(): print(f"Error: File '{xml_path}' does not exist.") sys.exit(1) summary: str = summarize_junit_xml(xml_path) print(summary) ================================================ FILE: docs/CODE_OF_CONDUCT.md ================================================ # Contributor Covenant Code of Conduct ## Our Pledge We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, caste, color, religion, or sexual identity and orientation. We pledge to act and interact in ways that contribute to an open, welcoming, diverse, inclusive, and healthy community. ## Our Standards Examples of behavior that contributes to a positive environment for our community include: * Demonstrating empathy and kindness toward other people * Being respectful of differing opinions, viewpoints, and experiences * Giving and gracefully accepting constructive feedback * Accepting responsibility and apologizing to those affected by our mistakes, and learning from the experience * Focusing on what is best not just for us as individuals, but for the overall community Examples of unacceptable behavior include: * The use of sexualized language or imagery, and sexual attention or advances of any kind * Trolling, insulting or derogatory comments, and personal or political attacks * Public or private harassment * Publishing others' private information, such as a physical or email address, without their explicit permission * Other conduct which could reasonably be considered inappropriate in a professional setting ## Enforcement Responsibilities Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, offensive, or harmful. Community leaders have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, and will communicate reasons for moderation decisions when appropriate. ## Scope This Code of Conduct applies within all community spaces, and also applies when an individual is officially representing the community in public spaces. Examples of representing our community include using an official e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. ## Enforcement Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the community leaders responsible for enforcement at yahya.arbabi@gmail.com. All complaints will be reviewed and investigated promptly and fairly. All community leaders are obligated to respect the privacy and security of the reporter of any incident. ## Enforcement Guidelines Community leaders will follow these Community Impact Guidelines in determining the consequences for any action they deem in violation of this Code of Conduct: ### 1. Correction **Community Impact**: Use of inappropriate language or other behavior deemed unprofessional or unwelcome in the community. **Consequence**: A private, written warning from community leaders, providing clarity around the nature of the violation and an explanation of why the behavior was inappropriate. A public apology may be requested. ### 2. Warning **Community Impact**: A violation through a single incident or series of actions. **Consequence**: A warning with consequences for continued behavior. No interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, for a specified period of time. This includes avoiding interactions in community spaces as well as external channels like social media. Violating these terms may lead to a temporary or permanent ban. ### 3. Temporary Ban **Community Impact**: A serious violation of community standards, including sustained inappropriate behavior. **Consequence**: A temporary ban from any sort of interaction or public communication with the community for a specified period of time. No public or private interaction with the people involved, including unsolicited interaction with those enforcing the Code of Conduct, is allowed during this period. Violating these terms may lead to a permanent ban. ### 4. Permanent Ban **Community Impact**: Demonstrating a pattern of violation of community standards, including sustained inappropriate behavior, harassment of an individual, or aggression toward or disparagement of classes of individuals. **Consequence**: A permanent ban from any sort of public interaction within the community. ## Attribution This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 2.1, available at [https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. Community Impact Guidelines were inspired by [Mozilla's code of conduct enforcement ladder][Mozilla CoC]. For answers to common questions about this code of conduct, see the FAQ at [https://www.contributor-covenant.org/faq][FAQ]. Translations are available at [https://www.contributor-covenant.org/translations][translations]. [homepage]: https://www.contributor-covenant.org [v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html [Mozilla CoC]: https://github.com/mozilla/diversity [FAQ]: https://www.contributor-covenant.org/faq [translations]: https://www.contributor-covenant.org/translations ================================================ FILE: docs/README.md ================================================


sherlock
Hunt down social media accounts by username across 400+ social networks

Installation    •    Usage    •    Contributing

demo

## Installation > [!WARNING] > Packages for ParrotOS and Ubuntu 24.04, maintained by a third party, appear to be __broken__. > Users of these systems should defer to pipx/pip or Docker. | Method | Notes | | - | - | | `pipx install sherlock-project` | `pip` may be used in place of `pipx` | | `docker run -it --rm sherlock/sherlock` | | `dnf install sherlock-project` | | Community-maintained packages are available for Debian (>= 13), Ubuntu (>= 22.10), Homebrew, Kali, and BlackArch. These packages are not directly supported or maintained by the Sherlock Project. See all alternative installation methods [here](https://sherlockproject.xyz/installation) ## General usage To search for only one user: ```bash sherlock user123 ``` To search for more than one user: ```bash sherlock user1 user2 user3 ``` Accounts found will be stored in an individual text file with the corresponding username (e.g ```user123.txt```). ```console $ sherlock --help usage: sherlock [-h] [--version] [--verbose] [--folderoutput FOLDEROUTPUT] [--output OUTPUT] [--tor] [--unique-tor] [--csv] [--xlsx] [--site SITE_NAME] [--proxy PROXY_URL] [--json JSON_FILE] [--timeout TIMEOUT] [--print-all] [--print-found] [--no-color] [--browse] [--local] [--nsfw] USERNAMES [USERNAMES ...] Sherlock: Find Usernames Across Social Networks (Version 0.14.3) positional arguments: USERNAMES One or more usernames to check with social networks. Check similar usernames using {?} (replace to '_', '-', '.'). optional arguments: -h, --help show this help message and exit --version Display version information and dependencies. --verbose, -v, -d, --debug Display extra debugging information and metrics. --folderoutput FOLDEROUTPUT, -fo FOLDEROUTPUT If using multiple usernames, the output of the results will be saved to this folder. --output OUTPUT, -o OUTPUT If using single username, the output of the result will be saved to this file. --tor, -t Make requests over Tor; increases runtime; requires Tor to be installed and in system path. --unique-tor, -u Make requests over Tor with new Tor circuit after each request; increases runtime; requires Tor to be installed and in system path. --csv Create Comma-Separated Values (CSV) File. --xlsx Create the standard file for the modern Microsoft Excel spreadsheet (xlsx). --site SITE_NAME Limit analysis to just the listed sites. Add multiple options to specify more than one site. --proxy PROXY_URL, -p PROXY_URL Make requests over a proxy. e.g. socks5://127.0.0.1:1080 --json JSON_FILE, -j JSON_FILE Load data from a JSON file or an online, valid, JSON file. --timeout TIMEOUT Time (in seconds) to wait for response to requests (Default: 60) --print-all Output sites where the username was not found. --print-found Output sites where the username was found. --no-color Don't color terminal output --browse, -b Browse to all results on default browser. --local, -l Force the use of the local data.json file. --nsfw Include checking of NSFW sites from default list. ``` ## Apify Actor Usage [![Sherlock Actor](https://apify.com/actor-badge?actor=netmilk/sherlock)](https://apify.com/netmilk/sherlock?fpr=sherlock) Run Sherlock Actor on Apify You can run Sherlock in the cloud without installation using the [Sherlock Actor](https://apify.com/netmilk/sherlock?fpr=sherlock) on [Apify](https://apify.com?fpr=sherlock) free of charge. ``` bash $ echo '{"usernames":["user123"]}' | apify call -so netmilk/sherlock [{ "username": "user123", "links": [ "https://www.1337x.to/user/user123/", ... ] }] ``` Read more about the [Sherlock Actor](../.actor/README.md), including how to use it programmatically via the Apify [API](https://apify.com/netmilk/sherlock/api?fpr=sherlock), [CLI](https://docs.apify.com/cli/?fpr=sherlock) and [JS/TS and Python SDKs](https://docs.apify.com/sdk?fpr=sherlock). ## Credits Thank you to everyone who has contributed to Sherlock! ❤️ contributors ## Star History Sherlock Project Star History Chart ## License MIT © Sherlock Project
Original Creator - [Siddharth Dushantha](https://github.com/sdushantha) [ext_pypi]: https://pypi.org/project/sherlock-project/ [ext_brew]: https://formulae.brew.sh/formula/sherlock ================================================ FILE: docs/pyproject/README.md ================================================



Hunt down social media accounts by username across 400+ social networks

Additional documentation can be found at our GitHub repository

## Usage ```console $ sherlock --help usage: sherlock [-h] [--version] [--verbose] [--folderoutput FOLDEROUTPUT] [--output OUTPUT] [--tor] [--unique-tor] [--csv] [--xlsx] [--site SITE_NAME] [--proxy PROXY_URL] [--json JSON_FILE] [--timeout TIMEOUT] [--print-all] [--print-found] [--no-color] [--browse] [--local] [--nsfw] USERNAMES [USERNAMES ...] ``` To search for only one user: ```bash $ sherlock user123 ``` To search for more than one user: ```bash $ sherlock user1 user2 user3 ```
___

================================================ FILE: docs/removed-sites.md ================================================ # List Of Sites Removed From Sherlock This is a list of sites implemented in such a way that the current design of Sherlock is not capable of determining if a given username exists or not. They are listed here in the hope that things may change in the future so they may be re-included. ## gpodder.net As of 2020-05-25, all usernames are reported as available. The server is returning a HTTP Status 500 (Internal server error) for all queries. ```json "gpodder.net": { "errorType": "status_code", "rank": 2013984, "url": "https://gpodder.net/user/{}", "urlMain": "https://gpodder.net/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Investing.com As of 2020-05-25, all usernames are reported as claimed. Any query against a user seems to be redirecting to a general information page at https://www.investing.com/brokers/. Probably required login before access. ```json "Investing.com": { "errorType": "status_code", "rank": 196, "url": "https://www.investing.com/traders/{}", "urlMain": "https://www.investing.com/", "username_claimed": "jenny", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## AdobeForums As of 2020-04-12, all usernames are reported as available. When I went to the site to see what was going on, usernames that I know existed were redirecting to the main page. I was able to see user profiles without logging in, but the URL was not related to their user name. For example, user "tomke" went to https://community.adobe.com/t5/user/viewprofilepage/user-id/10882613. This can be detected, but it requires a different detection method. ```json "AdobeForums": { "errorType": "status_code", "rank": 59, "url": "https://forums.adobe.com/people/{}", "urlMain": "https://forums.adobe.com/", "username_claimed": "jack", "username_unclaimed": "noonewouldeverusethis77777" }, ``` ## Basecamp As of 2020-02-23, all usernames are reported as not existing. ```json "Basecamp": { "errorMsg": "The account you were looking for doesn't exist", "errorType": "message", "rank": 4914, "url": "https://{}.basecamphq.com", "urlMain": "https://basecamp.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Canva As of 2020-02-23, all usernames are reported as not existing. ```json "Canva": { "errorType": "response_url", "errorUrl": "https://www.canva.com/{}", "rank": 128, "url": "https://www.canva.com/{}", "urlMain": "https://www.canva.com/", "username_claimed": "jenny", "username_unclaimed": "xgtrq" }, ``` ## Pixabay As of 2020-01-21, all usernames are reported as not existing. ```json "Pixabay": { "errorType": "status_code", "rank": 378, "url": "https://pixabay.com/en/users/{}", "urlMain": "https://pixabay.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## NPM-Packages NPM-Packages are not users. ```json "NPM-Package": { "errorType": "status_code", "url": "https://www.npmjs.com/package/{}", "urlMain": "https://www.npmjs.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Pexels As of 2020-01-21, all usernames are reported as not existing. ```json "Pexels": { "errorType": "status_code", "rank": 745, "url": "https://www.pexels.com/@{}", "urlMain": "https://www.pexels.com/", "username_claimed": "bruno", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## RamblerDating As of 2019-12-31, site always times out. ```json "RamblerDating": { "errorType": "response_url", "errorUrl": "https://dating.rambler.ru/page/{}", "rank": 322, "url": "https://dating.rambler.ru/page/{}", "urlMain": "https://dating.rambler.ru/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## YandexMarket As of 2019-12-31, all usernames are reported as existing. ```json "YandexMarket": { "errorMsg": "\u0422\u0443\u0442 \u043d\u0438\u0447\u0435\u0433\u043e \u043d\u0435\u0442", "errorType": "message", "rank": 47, "url": "https://market.yandex.ru/user/{}/achievements", "urlMain": "https://market.yandex.ru/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Codementor As of 2019-12-31, usernames that exist are not detected. ```json "Codementor": { "errorType": "status_code", "rank": 10252, "url": "https://www.codementor.io/@{}", "urlMain": "https://www.codementor.io/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## KiwiFarms As of 2019-12-31, the site gives a 403 for all usernames. You have to be logged into see a profile. ```json "KiwiFarms": { "errorMsg": "The specified member cannot be found", "errorType": "message", "rank": 38737, "url": "https://kiwifarms.net/members/?username={}", "urlMain": "https://kiwifarms.net/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis" }, ``` ## Teknik As of 2019-11-30, the site causes Sherlock to just hang. ```json "Teknik": { "errorMsg": "The user does not exist", "errorType": "message", "rank": 357163, "url": "https://user.teknik.io/{}", "urlMain": "https://teknik.io/", "username_claimed": "red", "username_unclaimed": "noonewouldeverusethis7" } ``` ## Shockwave As of 2019-11-28, usernames that exist give a 503 "Service Unavailable" HTTP Status. ```json "Shockwave": { "errorMsg": "Oh no! You just finished all of the games on the internet!", "errorType": "message", "rank": 35916, "url": "http://www.shockwave.com/member/profiles/{}.jsp", "urlMain": "http://www.shockwave.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis" }, ``` ## Foursquare When usage of automated tool is detected. Whole IP is banned from future requests. There is an error message: > Please verify you are a human > Access to this page has been denied because we believe you are using automation tools to browse the website. ```json "Foursquare": { "errorType": "status_code", "rank": 1843, "url": "https://foursquare.com/{}", "urlMain": "https://foursquare.com/", "username_claimed": "dens", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Khan Academy Usernames that don't exist are detected. First noticed 2019-10-25. ```json "Khan Academy": { "errorType": "status_code", "rank": 377, "url": "https://www.khanacademy.org/profile/{}", "urlMain": "https://www.khanacademy.org/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## EVE Online Usernames that exist are not detected. ```json "EVE Online": { "errorType": "response_url", "errorUrl": "https://eveonline.com", "rank": 15347, "url": "https://evewho.com/pilot/{}/", "urlMain": "https://eveonline.com", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## AngelList Usernames that exist are not detected. Forbidden Request 403 Error. ```json "AngelList": { "errorType": "status_code", "rank": 5767, "url": "https://angel.co/u/{}", "urlMain": "https://angel.co/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## PowerShell Gallery Accidentally merged even though the original pull request showed that all user names were available. ```json "PowerShell Gallery": { "errorType": "status_code", "rank": 163562, "url": "https://www.powershellgallery.com/profiles/{}", "urlMain": "https://www.powershellgallery.com", "username_claimed": "powershellteam", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## StreamMe On 2019-04-07, I get a Timed Out message from the website. It has not been working earlier either (for some weeks). It takes about 21s before the site finally times out, so it really makes getting the results from Sherlock a pain. If the site becomes available in the future, we can put it back in. ```json "StreamMe": { "errorType": "status_code", "rank": 31702, "url": "https://www.stream.me/{}", "urlMain": "https://www.stream.me/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## BlackPlanet This site has always returned a false positive. The site returns the exact same text for a claimed or an unclaimed username. The site must be rendering all of the different content using Javascript in the browser. So, there is no way distinguish between the results with the current design of Sherlock. ```json "BlackPlanet": { "errorMsg": "My Hits", "errorType": "message", "rank": 110021, "url": "http://blackplanet.com/{}", "urlMain": "http://blackplanet.com/" }, ``` ## Fotolog Around 2019-02-09, I get a 502 HTTP error (bad gateway) for any access. On 2019-03-10, the site is up, but it is in maintenance mode. It does not seem to be working, so there is no sense in including it in Sherlock. ```json "Fotolog": { "errorType": "status_code", "rank": 47777, "url": "https://fotolog.com/{}", "urlMain": "https://fotolog.com/" }, ``` ## Google Plus On 2019-04-02, Google shutdown Google Plus. While the content for some users is available after that point, it is going away. And, no one will be able to create a new account. So, there is no value is keeping it in Sherlock. Good-bye [Google Plus](https://en.wikipedia.org/wiki/Google%2B)... ```json "Google Plus": { "errorType": "status_code", "rank": 1, "url": "https://plus.google.com/+{}", "urlMain": "https://plus.google.com/", "username_claimed": "davidbrin1", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## InsaneJournal As of 2020-02-23, InsaneJournal returns false positive, when providing a username which contains a period. Since we were not able to find the criteria for a valid username, the best thing to do now is to remove it. ```json "InsaneJournal": { "errorMsg": "Unknown user", "errorType": "message", "rank": 29728, "url": "http://{}.insanejournal.com/profile", "urlMain": "insanejournal.com", "username_claimed": "blue", "username_unclaimed": "dlyr6cd" }, ``` ## Sports Tracker As of 2020-04-02, Sports Tracker returns false positives. Checking with `errorMsg` and `response_url` did not seem to work. ``` "SportsTracker": { "errorUrl": "https://www.sports-tracker.com/page-not-found", "errorType": "response_url", "rank": 93950, "url": "https://www.sports-tracker.com/view_profile/{}", "urlMain": "https://www.sports-tracker.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeveruse" }, ``` ## Trip As of 2020-04-02, Trip by Skyscanner seems to not work beceause it keeps on redirecting to skyscanner.com whether the username exists or not. ```json "Trip": { "errorType": "status_code", "rank": 2847, "url": "https://www.trip.skyscanner.com/user/{}", "urlMain": "https://www.trip.skyscanner.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## boingboing.net As of 2020-04-02, boingboing.net requires a login to check if a user exits or not. ``` "boingboing.net": { "errorType": "status_code", "rank": 5821, "url": "https://bbs.boingboing.net/u/{}", "urlMain": "https://boingboing.net/", "username_claimed": "admin", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## elwoRU As of 2020-04-04, elwoRu does not exist anymore. I confirmed using downforeveryoneorjustme.com that the website is down. ```json "elwoRU": { "errorMsg": "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d", "errorType": "message", "rank": 254810, "url": "https://elwo.ru/index/8-0-{}", "urlMain": "https://elwo.ru/", "username_claimed": "red", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## ingvarr.net.ru As of 2020-04-04, ingvarr.net.ru does not exist anymore. I confirmed using downforeveryoneorjustme.com that the website is down. ```json "ingvarr.net.ru": { "errorMsg": "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d", "errorType": "message", "rank": 107721, "url": "http://ingvarr.net.ru/index/8-0-{}", "urlMain": "http://ingvarr.net.ru/", "username_claimed": "red", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Redsun.tf As of 2020-06-20, Redsun.tf seems to be adding random digits to the end of the usernames which makes it pretty much impossible for Sherlock to check for usernames on this particular website. ```json "Redsun.tf": { "errorMsg": "The specified member cannot be found", "errorType": "message", "rank": 3796657, "url": "https://forum.redsun.tf/members/?username={}", "urlMain": "https://redsun.tf/", "username_claimed": "dan", "username_unclaimed": "noonewouldeverusethis" }, ``` ## Creative Market As of 2020-06-20, Creative Market has a captcha to prove that you are a human, and because of this Sherlock is unable to check for username on this site because we will always get a page which asks us to prove that we are not a robot. ```json "CreativeMarket": { "errorType": "status_code", "rank": 1896, "url": "https://creativemarket.com/users/{}", "urlMain": "https://creativemarket.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## pvpru As of 2020-06-20, pvpru uses CloudFlair, and because of this we get a "Access denied" error whenever we try to check for a username. ```json "pvpru": { "errorType": "status_code", "rank": 405547, "url": "https://pvpru.com/board/member.php?username={}&tab=aboutme#aboutme", "urlMain": "https://pvpru.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## easyen As of 2020-06-21, easyen returns false positives when using a username which contains a period. Since we could not find the criteria for the usernames for this site, it will be removed ```json "easyen": { "errorMsg": "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d", "errorType": "message", "rank": 11564, "url": "https://easyen.ru/index/8-0-{}", "urlMain": "https://easyen.ru/", "username_claimed": "wd", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## pedsovet As of 2020-06-21, pedsovet returns false positives when using a username which contains a period. Since we could not find the criteria for the usernames for this site, it will be removed ```json "pedsovet": { "errorMsg": "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d", "errorType": "message", "rank": 6776, "url": "http://pedsovet.su/index/8-0-{}", "urlMain": "http://pedsovet.su/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## radioskot As of 2020-06-21, radioskot returns false positives when using a username which contains a period. Since we could not find the criteria for the usernames for this site, it will be removed ```json "radioskot": { "errorMsg": "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d", "errorType": "message", "rank": 105878, "url": "https://radioskot.ru/index/8-0-{}", "urlMain": "https://radioskot.ru/", "username_claimed": "red", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Coderwall As of 2020-07-06, Coderwall returns false positives when checking for an username which contains a period. I have tried to find out what Coderwall's criteria is for a valid username, but unfortunately I have not been able to find it and because of this, the best thing we can do now is to remove it. ```json "Coderwall": { "errorMsg": "404! Our feels when that url is used", "errorType": "message", "rank": 11256, "url": "https://coderwall.com/{}", "urlMain": "https://coderwall.com/", "username_claimed": "jenny", "username_unclaimed": "noonewouldeverusethis7" } ``` ## TamTam As of 2020-07-06, TamTam returns false positives when given a username which contains a period ```json "TamTam": { "errorType": "response_url", "errorUrl": "https://tamtam.chat/", "rank": 87903, "url": "https://tamtam.chat/{}", "urlMain": "https://tamtam.chat/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Zomato As of 2020-07-24, Zomato seems to be unstable. Majority of the time, Zomato takes a very long time to respond. ```json "Zomato": { "errorType": "status_code", "headers": { "Accept-Language": "en-US,en;q=0.9" }, "rank": 1920, "url": "https://www.zomato.com/pl/{}/foodjourney", "urlMain": "https://www.zomato.com/", "username_claimed": "deepigoyal", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Mixer As of 2020-07-22, the Mixer service has closed down. ```json "mixer.com": { "errorType": "status_code", "rank": 1544, "url": "https://mixer.com/{}", "urlMain": "https://mixer.com/", "urlProbe": "https://mixer.com/api/v1/channels/{}", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## KanoWorld As of 2020-07-22, KanoWorld's api.kano.me subdomain no longer exists which makes it not possible for us check for usernames. If an alternative way to check for usernames is found then it will added. ```json "KanoWorld": { "errorType": "status_code", "rank": 181933, "url": "https://api.kano.me/progress/user/{}", "urlMain": "https://world.kano.me/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## YandexCollection As of 2020-08-11, YandexCollection presents us with a recaptcha which prevents us from checking for usernames ```json "YandexCollection": { "errorType": "status_code", "url": "https://yandex.ru/collections/user/{}/", "urlMain": "https://yandex.ru/collections/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## PayPal As of 2020-08-24, PayPal now returns false positives, which was found when running the tests, but will most likley be added again in the near future once we find a better error detecting method. ```json "PayPal": { "errorMsg": "", "errorType": "message", "url": "https://www.paypal.com/paypalme/{}", "headers": { "User-Agent": "" }, "urlMain": "https://www.paypal.me/", "username_claimed": "blue", "username_unclaimed": "noneownsthisusername7" }, ``` ## ImageShack As of 2020-08-24, ImageShack now returns false positives, which was found when running the tests, but will most likley be added again in the near future once we find a better error detecting method. ```json "ImageShack": { "errorType": "response_url", "errorUrl": "https://imageshack.us/", "url": "https://imageshack.us/user/{}", "urlMain": "https://imageshack.us/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Aptoide As of 2020-08-24, Aptoide now returns false positives, which was found when running the tests, but will most likley be added again in the near future once we find a better error detecting method. ```json "Aptoide": { "errorType": "status_code", "url": "https://{}.en.aptoide.com/", "urlMain": "https://en.aptoide.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Crunchyroll As of 2020-08-24, Crunchyroll now returns false positives, which was found when running the tests, but will most likley be added again in the near future once we find a better error detecting method. ```json "Crunchyroll": { "errorType": "status_code", "url": "https://www.crunchyroll.com/user/{}", "urlMain": "https://www.crunchyroll.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## T-MobileSupport As of 2020-08-24, T-MobileSupport now returns false positives, which was found when running the tests, but will most likley be added again in the near future once we find a better error detecting method. ```json "T-MobileSupport": { "errorType": "status_code", "url": "https://support.t-mobile.com/people/{}", "urlMain": "https://support.t-mobile.com", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## OpenCollective As of 2020-08-24, OpenCollective now returns false positives, which was found when running the tests, but will most likley be added again in the near future once we find a better error detecting method. ```json "OpenCollective": { "errorType": "status_code", "url": "https://opencollective.com/{}", "urlMain": "https://opencollective.com/", "username_claimed": "sindresorhus", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## SegmentFault As of 2020-08-24, SegmentFault now returns false positives, which was found when running the tests, but will most likley be added again in the near future once we find a better error detecting method. ```json "SegmentFault": { "errorType": "status_code", "url": "https://segmentfault.com/u/{}", "urlMain": "https://segmentfault.com/", "username_claimed": "bule", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Viadeo As of 2020-08-24, Viadeo now returns false positives, which was found when running the tests, but will most likley be added again in the near future once we find a fix for this ```json "Viadeo": { "errorType": "status_code", "url": "http://fr.viadeo.com/en/profile/{}", "urlMain": "http://fr.viadeo.com/en/", "username_claimed": "franck.patissier", "username_unclaimed": "noonewouldeverusethis" }, ``` ## MeetMe As of 2020-09-02, MeetMe returns false positives ```json "MeetMe": { "errorType": "response_url", "errorUrl": "https://www.meetme.com/", "url": "https://www.meetme.com/{}", "urlMain": "https://www.meetme.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Linkdedin As of 2020-09-23, Linkedin returns false positives because we are prompted with prompted to login when checking for a user ```json "Linkedin": { "errorMsg": "could not be found", "errorType": "message", "rank": 0, "url": "https://www.linkedin.com/in/{}", "urlMain": "https://www.linkedin.com/", "username_claimed": "alex", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## tracr.co As of 2020-09-23, tracr.co returns false positives because the site seems to be shut down. ```json "tracr.co": { "errorMsg": "No search results", "errorType": "message", "regexCheck": "^[A-Za-z0-9]{2,32}$", "url": "https://tracr.co/users/1/{}", "urlMain": "https://tracr.co/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" } ``` ## Taringa As of 2020-09-23, Taringa returns false positives. ```json "Taringa": { "errorType": "status_code", "regexCheck": "^[^.]*$", "url": "https://www.taringa.net/{}", "urlMain": "https://taringa.net/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Photobucket As of 2020-10-21, Photobucket return false positives. This was reported in #785. ```json "Photobucket": { "errorType": "status_code", "url": "https://photobucket.com/user/{}/library", "urlMain": "https://photobucket.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## 4PDA As of 2020-10-21, 4PDA returns false positives. This was reported in #784. ```json "4pda": { "errorMsg": "[1,false,0]", "errorType": "message", "url": "https://4pda.ru/forum/index.php?act=search&source=pst&noform=1&username={}", "urlMain": "https://4pda.ru/", "urlProbe": " https://4pda.ru/forum/index.php?act=auth&action=chkname&login={}", "username_claimed": "green", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## PokerStrategy As of 2020-10-21, PokerStrategy returns false positives. This was reported in #776. ```json "PokerStrategy": { "errorType": "status_code", "url": "http://www.pokerstrategy.net/user/{}/profile/", "urlMain": "http://www.pokerstrategy.net", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Filmogs Filmogs has closed down. > **Filmogs is closed** > **31-Aug 2020** - We are preparing the last data export and collection of images. It will be published here by 19-Oct 2020. If you have requested an export of your data it will also be emailed to you by 19-Oct 2020. ```json "Filmogs": { "errorType": "status_code", "url": "https://www.filmo.gs/users/{}", "urlMain": "https://www.filmo.gs/", "username_claimed": "cupparober", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## 500px As of 2021-01-13, 500px returns false positives. This will hopefully be fixed soon once we add the ability to add different request methods. ```json "500px": { "errorMsg": "No message available", "errorType": "message", "url": "https://500px.com/p/{}", "urlMain": "https://500px.com/", "urlProbe": "https://api.500px.com/graphql?operationName=ProfileRendererQuery&variables=%7B%22username%22%3A%22{}%22%7D&extensions=%7B%22persistedQuery%22%3A%7B%22version%22%3A1%2C%22sha256Hash%22%3A%224d02ff5c13927a3ac73b3eef306490508bc765956940c31051468cf30402a503%22%7D%7D", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Badoo As of 2021-01-13, Badoo returns false positives ```json "Badoo": { "errorType": "status_code", "url": "https://badoo.com/profile/{}", "urlMain": "https://badoo.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Pling As of 2021-01-13, Pling returns false positives. ```json "Pling": { "errorMsg": "Resource not found", "errorType": "message", "url": "https://www.pling.com/u/{}/", "urlMain": "https://www.pling.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis" }, ``` ## Realmeye As of 2021-01-13, Realmeye returns false positives. ```json "Realmeye": { "errorMsg": "Sorry, but we either:", "errorType": "message", "url": "https://www.realmeye.com/player/{}", "urlMain": "https://www.realmeye.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Travellerspoint As of 2021-01-13, Travellerspoint returns false positives ```json "Travellerspoint": { "errorMsg": "Wooops. Sorry!", "errorType": "message", "url": "https://www.travellerspoint.com/users/{}", "urlMain": "https://www.travellerspoint.com", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## GDProfiles As of 2021-06-27, GDProfiles takes way too long to respond. Must be an issue on their side. ```json "GDProfiles": { "errorType": "status_code", "url": "https://gdprofiles.com/{}", "urlMain": "https://gdprofiles.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis" }, ``` ## AllTrails As of 2021-06-27, AllTrails has a captcha which prevents us from checking for usernames on the site. ```json "AllTrails": { "errorMsg": "class=\"home index\"", "errorType": "message", "url": "https://www.alltrails.com/members/{}", "urlMain": "https://www.alltrails.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis" } ``` ## Cent As of 2021-06-27, there is not way of checking if a username exists on Cent ```json "Cent": { "errorMsg": "Cent", "errorType": "message", "url": "https://beta.cent.co/@{}", "urlMain": "https://cent.co/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Anobii As of 2021-06-27, Anobii returns false positives and there is no stable way of checking usernames. ``` "Anobii": { "errorType": "response_url", "url": "https://www.anobii.com/{}/profile", "urlMain": "https://www.anobii.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" } ``` ## Kali Community As of 2021-06-27, Kali Community requires us to be logged in order to check if a user exists on their forum. ```json "Kali community": { "errorMsg": "This user has not registered and therefore does not have a profile to view.", "errorType": "message", "url": "https://forums.kali.org/member.php?username={}", "urlMain": "https://forums.kali.org/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" } ``` ## NameMC As of 2021-06-27, NameMC uses captcha through CloudFlare which prevents us from checking if usernames exists on the site. ```json "NameMC (Minecraft.net skins)": { "errorMsg": "Profiles: 0 results", "errorType": "message", "url": "https://namemc.com/profile/{}", "urlMain": "https://namemc.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## SteamID As of 2021-06-27, Steam uses captcha through CloudFlare which prevents us from checking if usernames exists on the site. ```json "Steamid": { "errorMsg": "", "errorType": "message", "url": "https://steamid.uk/profile/{}", "urlMain": "https://steamid.uk/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" } ``` ## TripAdvisor As of 2021-06-27, Trip takes too long to return a response. As of now, the reason is not known. ```json "TripAdvisor": { "errorMsg": "This page is on vacation\u2026", "errorType": "message", "url": "https://tripadvisor.com/members/{}", "urlMain": "https://tripadvisor.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ### House Mixes As of 2021-09-04, House Mixes has issues connecting causing Sherlock to freeze. ```json "House-Mixes.com": { "errorMsg": "Profile Not Found", "errorType": "message", "regexCheck": "^[a-zA-Z0-9]+(-[a-zA-Z0-9]+)*$", "url": "https://www.house-mixes.com/profile/{}", "urlMain": "https://www.house-mixes.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" } ``` ### Quora As of 2021-09-04, Quora returns false positives. ```json "Quora": { "errorMsg": "Page Not Found", "errorType": "message", "url": "https://www.quora.com/profile/{}", "urlMain": "https://www.quora.com/", "username_claimed": "Matt-Riggsby", "username_unclaimed": "noonewouldeverusethis7" } ``` ### SparkPeople As of 2021-09-04, SparkPeople returns false positives. ```json "SparkPeople": { "errorMsg": "We couldn't find that user", "errorType": "message", "url": "https://www.sparkpeople.com/mypage.asp?id={}", "urlMain": "https://www.sparkpeople.com", "username_claimed": "adam", "username_unclaimed": "noonewouldeverusethis7" } ``` ### Cloob As of 2021-10-25, Cloob seems to be down and their site is not responding. ```json "Cloob": { "errorType": "status_code", "url": "https://www.cloob.com/name/{}", "urlMain": "https://www.cloob.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" } ``` ### TM-Ladder As of 2021-11-30, TM-Ladder is returning false positives due to rate limits. ```json "TM-Ladder": { "errorMsg": "player unknown or invalid", "errorType": "message", "url": "http://en.tm-ladder.com/{}_rech.php", "urlMain": "http://en.tm-ladder.com/index.php", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis" ``` ### plug.dj As of 2021-12-02, plug.dj is returning false positives because the service is down. ```json "plug.dj": { "errorType": "status_code", "url": "https://plug.dj/@/{}", "urlMain": "https://plug.dj/", "username_claimed": "plug-dj-rock", "username_unclaimed": "noonewouldeverusethis7" } ``` ## Facenama As of 2022-02-6, Facenama seems to be down their rebuilding their site ```json "Facenama": { "errorType": "response_url", "errorUrl": "https://facenama.com/404.html", "regexCheck": "^[-a-zA-Z0-9_]+$", "url": "https://facenama.com/{}", "urlMain": "https://facenama.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis77" }, ``` ## Designspiration As of 2022-04-17, Designspiration seems to be down or very laggy. Therefore, we're removing the site for now. ```json "Designspiration": { "errorType": "status_code", "url": "https://www.designspiration.net/{}/", "urlMain": "https://www.designspiration.net/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## CapFriendly As of 2022-05-01, CapFriendly always shows that a username exists even though it doesn't. This then of course causes false positives in Sherlock's results. ```json "CapFriendly": { "errorMsg": "
No results found
", "errorType": "message", "regexCheck": "^[a-zA-z][a-zA-Z0-9_]{2,79}$", "url": "https://www.capfriendly.com/users/{}", "urlMain": "https://www.capfriendly.com/", "username_claimed": "thisactuallyexists", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Gab As of 2022-05-01, Gab returns false positives because they now use CloudFlare ```json "Gab": { "errorMsg": "The page you are looking for isn't here.", "errorType": "message", "url": "https://gab.com/{}", "urlMain": "https://gab.com", "username_claimed": "a", "username_unclaimed": "noonewouldeverusethis" }, ``` ## FanCentro As of 2022-05-1, FanCentro returns false positives. Will later in new version of Sherlock. ```json "FanCentro": { "errorMsg": "var environment", "errorType": "message", "url": "https://fancentro.com/{}", "urlMain": "https://fancentro.com/", "username_claimed": "nielsrosanna", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Smashcast As og 2022-05-01, Smashcast is down ```json "Smashcast": { "errorType": "status_code", "url": "https://www.smashcast.tv/api/media/live/{}", "urlMain": "https://www.smashcast.tv/", "username_claimed": "hello", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Countable As og 2022-05-01, Countable returns false positives ```json "Countable": { "errorType": "status_code", "url": "https://www.countable.us/{}", "urlMain": "https://www.countable.us/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Raidforums Raidforums is [now run by the FBI](https://twitter.com/janomine/status/1499453777648234501?s=21) ```json "Raidforums": { "errorType": "status_code", "url": "https://raidforums.com/User-{}", "urlMain": "https://raidforums.com/", "username_claimed": "red", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Pinterest Removed due to false positive ```json "Pinterest": { "errorType": "status_code", "url": "https://www.pinterest.com/{}/", "urlMain": "https://www.pinterest.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis76543" } ``` ## PCPartPicker As of 17-07-2022, PCPartPicker requires us to login in order to check if a user exits ```json "PCPartPicker": { "errorType": "status_code", "url": "https://pcpartpicker.com/user/{}", "urlMain": "https://pcpartpicker.com", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Ebay As of 17-07-2022, Ebay is very slow to respond. It was also reported that it returned false positives. So this is something that has been investigated further later. ```json "eBay.com": { "errorMsg": "The User ID you entered was not found. Please check the User ID and try again.", "errorType": "message", "url": "https://www.ebay.com/usr/{}", "urlMain": "https://www.ebay.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, "eBay.de": { "errorMsg": "Der eingegebene Nutzername wurde nicht gefunden. Bitte pr\u00fcfen Sie den Nutzernamen und versuchen Sie es erneut.", "errorType": "message", "url": "https://www.ebay.de/usr/{}", "urlMain": "https://www.ebay.de/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Ghost As of 17-07-2022, Ghost returns false positives ```json "Ghost": { "errorMsg": "Domain Error", "errorType": "message", "url": "https://{}.ghost.io/", "urlMain": "https://ghost.org/", "username_claimed": "troyhunt", "username_unclaimed": "noonewouldeverusethis7" } ``` ## Atom Discussions As of 25-07-2022, Atom Discussions seems to not work beceause it keeps on redirecting to github discussion tab which does not exist and is not specific to a username ```json "Atom Discussions": { "errorMsg": "Oops! That page doesn\u2019t exist or is private.", "errorType": "message", "url": "https://discuss.atom.io/u/{}/summary", "urlMain": "https://discuss.atom.io", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis" } ``` ## Gam1ng As of 25-07-2022, Gam1ng has been permanently moved and is no longer functional ```json "Gam1ng": { "errorType": "status_code", "url": "https://gam1ng.com.br/user/{}", "urlMain": "https://gam1ng.com.br", "username_claimed": "PinKgirl", "username_unclaimed": "noonewouldeverusethis77777" } ``` ## OGUsers As of 25-07-2022, OGUsers is now no longer functional ```json "OGUsers": { "errorType": "status_code", "url": "https://ogusers.com/{}", "urlMain": "https://ogusers.com/", "username_claimed": "ogusers", "username_unclaimed": "noonewouldeverusethis7" } ``` ## Otzovik As of 25-07-2022, Otzovik is now no longer functional ```json "Otzovik": { "errorType": "status_code", "url": "https://otzovik.com/profile/{}", "urlMain": "https://otzovik.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" } ``` ## radio_echo_msk As of 25-07-2022, radio_echo_msk is now no longer functional ```json "radio_echo_msk": { "errorType": "status_code", "url": "https://echo.msk.ru/users/{}", "urlMain": "https://echo.msk.ru/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" } ``` ## Ello As of 06.09.2022, Ello is now behind CloudFlare ```json "Ello": { "errorMsg": "We couldn't find the page you're looking for", "errorType": "message", "url": "https://ello.co/{}", "urlMain": "https://ello.co/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" } ``` ## GitHub Support Community As of 06.09.2022, GitHub Support Community's endpoint just redirects to the main community page ```json "GitHub Support Community": { "errorMsg": "Oops! That page doesn\u2019t exist or is private.", "errorType": "message", "url": "https://github.community/u/{}/summary", "urlMain": "https://github.community", "username_claimed": "jperl", "username_unclaimed": "noonewouldusethis298" } ``` ## GuruShots As of 08.09.2022, GuruShots returns false positives because it just returns a blank page. Need to look further into it so that it can be added back. ```json "GuruShots": { "errorType": "status_code", "url": "https://gurushots.com/{}/photos", "urlMain": "https://gurushots.com/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Google Developer As of 09.10.2022, Google Developer returns false positives. The site is dynamic so we're not abl to get any proper results ```json "Google Developer": { "errorMsg": "Sorry, the profile was not found.", "errorType": "message", "url": "https://g.dev/{}", "urlMain": "https://g.dev/", "username_claimed": "blue", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## mastodon.technology As of 18.12.2022, mastodon.technology has no A/AAAA records and the [website was shut down by the owner](https://ashfurrow.com/blog/mastodon-technology-shutdown/). ```json "mastodon.technology": { "errorType": "status_code", "url": "https://mastodon.technology/@{}", "urlMain": "https://mastodon.xyz/", "username_claimed": "ashfurrow", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Aruino As of 04.02.2023, Arduino returns false positives. Finding a fix is doable but takes some time. Will be fixed later ```json "Arduino": { "errorMsg":"Arduino Cloud", "errorType": "message", "regexCheck": "^(?![_-])[A-Za-z0-9_-]{3,}$", "url": "https://projecthub.arduino.cc/{}", "urlMain": "https://www.arduino.cc/", "username_claimed": "blue", "username_unclaimed": "noonewould" }, ``` ## Zoomit As of 04.02.2023, Zoomit return false positves. An attempt at finding a fix was made but a lot of time was used without luck. Therefore, it wont be prioritized at the moment. ```json "zoomit": { "errorMsg": "\u0645\u062a\u0627\u0633\u0641\u0627\u0646\u0647 \u0635\u0641\u062d\u0647 \u06cc\u0627\u0641\u062a \u0646\u0634\u062f", "errorType": "message", "url": "https://www.zoomit.ir/user/{}", "urlMain": "https://www.zoomit.ir", "username_claimed": "kossher", "username_unclaimed": "noonewouldeverusethis7" }, ``` ## Facebook As of 04.02.2023, Facebook returns false positives because we get prompted with the login screen to view the data ```json "Facebook": { "errorType": "status_code", "regexCheck": "^[a-zA-Z0-9\\.]{3,49}(?Tinder | Dating, Make Friends & Meet New People", "Tinder | Match. Chat. Date." ], "errorType": "message", "url": "https://www.tinder.com/@{}", "urlMain": "https://tinder.com/", "username_claimed": "blue" }, ``` ## Coil As of 2023.03.15, Coil has been discontinued. All accounts were deleted and any requests return a 404. ```json "Coil": { "errorMsg": "User not found", "errorType": "message", "request_method": "POST", "request_payload": { "operationName": "getCreator", "query": "query getCreator($userShortName:String!){getCreator(userShortName:$userShortName){id}}", "variables": { "userShortName": "{}" } }, "url": "https://coil.com/u/{}", "urlMain": "https://coil.com/", "urlProbe": "https://coil.com/gateway", "username_claimed": "adam" } ``` ## OnlyFans As of 2023.04.20, OnlyFans returns false negatives on checking usernames with the API endpoint and directly through their website. ```json "OnlyFans": { "errorType": "status_code", "isNSFW": true, "url": "https://onlyfans.com/{}", "urlMain": "https://onlyfans.com/", "urlProbe": "https://onlyfans.com/api2/v2/users/{}", "username_claimed": "theemilylynne" } ``` ## OK As of 2023.04.21, Ok.ru returns false positives ```json "OK": { "errorType": "status_code", "regexCheck": "^[a-zA-Z][a-zA-Z0-9_.-]*$", "url": "https://ok.ru/{}", "urlMain": "https://ok.ru/", "username_claimed": "ok" } ``` ## ForumhouseRU As of 2023.04.21, ForumhouseRU returns false positives ```json "forumhouseRU": { "errorMsg": "\u0423\u043a\u0430\u0437\u0430\u043d\u043d\u044b\u0439 \u043f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d. \u041f\u043e\u0436\u0430\u043b\u0443\u0439\u0441\u0442\u0430, \u0432\u0432\u0435\u0434\u0438\u0442\u0435 \u0434\u0440\u0443\u0433\u043e\u0435 \u0438\u043c\u044f.", "errorType": "message", "url": "https://www.forumhouse.ru/members/?username={}", "urlMain": "https://www.forumhouse.ru/", "username_claimed": "red" } ``` ## Enjin As of 2023.08.29, Enjin has closed down. ```json "Enjin": { "errorMsg": "Yikes, there seems to have been an error. We've taken note and will check out the problem right away!", "errorType": "message", "url": "https://www.enjin.com/profile/{}", "urlMain": "https://www.enjin.com/", "username_claimed": "blue" }, ``` ## IRL As of 2023.08.29, IRL has shut down ```json "IRL": { "errorType": "status_code", "url": "https://www.irl.com/{}", "urlMain": "https://www.irl.com/", "username_claimed": "hacker" } ``` ## Munzee As of 2023.08.29, Munzee requires us to be logged into the site in order to check if a user exists or not ```json "Munzee": { "errorType": "status_code", "url": "https://www.munzee.com/m/{}", "urlMain": "https://www.munzee.com/", "username_claimed": "blue" } ``` ## Quizlet As of 2023.08.29 Quizlet requires us to enable JavaScript to check if a user exsits on the website ```json "Quizlet": { "errorMsg": "Page Unavailable", "errorType": "message", "url": "https://quizlet.com/{}", "urlMain": "https://quizlet.com", "username_claimed": "blue" } ``` ## GunsAndAmmo As of 2023.08.29, GunsAndAmmo responds with 404 from time to time ```json "GunsAndAmmo": { "errorType": "status_code", "url": "https://forums.gunsandammo.com/profile/{}", "urlMain": "https://gunsandammo.com/", "username_claimed": "adam" } ``` ## TikTok As of 2023.12.21, TikTok returns false positives. This is because the webpage returns a somewhat blank page. This prevents us from being able to check for the existence of usernames. Proxitok does not work either. ```json "TikTok": { "errorType": "status_code", "url": "https://tiktok.com/@{}", "urlMain": "https://tiktok.com/", "username_claimed": "red" }, ``` ## Lolchess As of 2023.12.21, Lolchess returns false positives. ```json "Lolchess": { "errorMsg": "No search results", "errorType": "message", "url": "https://lolchess.gg/profile/na/{}", "urlMain": "https://lolchess.gg/", "username_claimed": "blue" }, ``` ## Virgool As of 2023.12.21, Virgool returns false positives. ```json "Virgool": { "errorMsg": "\u06f4\u06f0\u06f4", "errorType": "message", "url": "https://virgool.io/@{}", "urlMain": "https://virgool.io/", "username_claimed": "blue" }, ``` ## Whonix Forum As of 2023.12.21, Whonix Forum returns false positives. ```json "Whonix Forum": { "errorType": "status_code", "url": "https://forums.whonix.org/u/{}/summary", "urlMain": "https://forums.whonix.org/", "username_claimed": "red" }, ``` ## Ebio As of 2023.12.21, Ebio returns false positives. ```json "ebio.gg": { "errorType": "status_code", "url": "https://ebio.gg/{}", "urlMain": "https:/ebio.gg", "username_claimed": "dev" }, ``` ## HexRPG __2024-04-07 :__ HexRPG behind authentication wall. Unable to check usernames without logging in. ```json "HexRPG": { "errorMsg": "Error : User ", "errorType": "message", "regexCheck": "^[a-zA-Z0-9_ ]{3,20}$", "url": "https://www.hexrpg.com/userinfo/{}", "urlMain": "https://www.hexrpg.com/", "username_claimed": "blue" } ``` ## Oracle Communities __2024-04-07 :__ Oracle Communities behind authentication wall. Unable to check usernames without logging in. ```json "Oracle Communities": { "errorType": "status_code", "url": "https://community.oracle.com/people/{}", "urlMain": "https://community.oracle.com", "username_claimed": "dev" } ``` ## Metacritic __2024-04-07 :__ Non-existent users seemingly displayed as real users with no activity. Needs adjustment. ```json "metacritic": { "errorMsg": "User not found", "errorType": "message", "regexCheck": "^(?![-_].)[A-Za-z0-9-_]{3,15}$", "url": "https://www.metacritic.com/user/{}", "urlMain": "https://www.metacritic.com/", "username_claimed": "blue" } ``` ## G2G __2024-04-10 :__ Seems to be loading profiles with some wierd javascript setup that sherlock doesn't like, leading to difficult to control false positives ```json "G2G": { "errorType": "response_url", "errorUrl": "https://www.g2g.com/{}", "regexCheck": "^[A-Za-z][A-Za-z0-9_]{2,11}$", "url": "https://www.g2g.com/{}", "urlMain": "https://www.g2g.com/", "username_claimed": "user" } ``` ## Bitcoin Forum __2024-04-24 :__ BCF seems to have gone defunct. Uncertain. ```json "BitCoinForum": { "errorMsg": "The user whose profile you are trying to view does not exist.", "errorType": "message", "url": "https://bitcoinforum.com/profile/{}", "urlMain": "https://bitcoinforum.com", "username_claimed": "bitcoinforum.com" } ``` ## Zhihu As of 24.06.2024, Zhihu returns false positives as they obfuscate the code thats returned. Checking for patterns may allow us to find a way to detect the existans of a user, this will be need to be worked on later ```json "Zhihu": { "errorMsg": "用户不存在", "errorType": "message", "url": "https://www.zhihu.com/people/{}", "urlMain": "https://www.zhihu.com/", "username_claimed": "blue" } ``` ## Penetestit As of 24.06.2024, Pentestit returns a 403. This is most likely due to a new site structures ```json "labpentestit": { "errorType": "response_url", "errorUrl": "https://lab.pentestit.ru/{}", "url": "https://lab.pentestit.ru/profile/{}", "urlMain": "https://lab.pentestit.ru/", "username_claimed": "CSV" } ``` ## Euw __2024-06-09 :__ errorMsg detection doesn't work anymore, because the error message is included in HTTP request body, even in successful search ```json "Euw": { "errorMsg": "This summoner is not registered at OP.GG. Please check spelling.", "errorType": "message", "url": "https://euw.op.gg/summoner/userName={}", "urlMain": "https://euw.op.gg/", "username_claimed": "blue" } ``` ## Etsy __2024-06-10 :__ Http request returns 403 forbidden, and tries to verify the connection, so it doesn't work anymore ```json "Etsy": { "errorType": "status_code", "url": "https://www.etsy.com/shop/{}", "urlMain": "https://www.etsy.com/", "username_claimed": "JennyKrafts" } ``` ## Alik.cz __2024-07-21 :__ Target is now BLACKLISTED from the default manifest due to the site recieving unnecessarily high traffic from Sherlock (by request of the site owners). This target is not permitted to be reactivited. Inclusion in unrelated manifests is not impacted, but it is discouraged. ## 8tracks __2025-02-02 :__ Might be dead again. Nobody knows for sure. ```json "8tracks": { "errorType": "message", "errorMsg": "\"available\":true", "headers": { "Accept-Language": "en-US,en;q=0.5" }, "url": "https://8tracks.com/{}", "urlProbe": "https://8tracks.com/users/check_username?login={}&format=jsonh", "urlMain": "https://8tracks.com/", "username_claimed": "blue" } ``` ## Shpock __2025-02-02 :__ Can likely be added back with a new endpoint (source username availability endpoint from mobile app reg flow?) ```json "Shpock": { "errorType": "status_code", "url": "https://www.shpock.com/shop/{}/items", "urlMain": "https://www.shpock.com/", "username_claimed": "user" } ``` ## Twitch __2025-02-02 :__ ```json "Twitch": { "errorType": "message", "errorMsg": "components.availability-tracking.warn-unavailable.component", "url": "https://www.twitch.tv/{}", "urlMain": "https://www.twitch.tv/", "urlProbe": "https://m.twitch.tv/{}", "username_claimed": "jenny" } ``` ## Fiverr __2025-02-02 :__ Fiverr added CSRF protections that messed with this test ```json "Fiverr": { "errorMsg": "\"status\":\"success\"", "errorType": "message", "headers": { "Content-Type": "application/json", "Accept-Language": "en-US,en;q=0.9" }, "regexCheck": "^[A-Za-z][A-Za-z\\d_]{5,14}$", "request_method": "POST", "request_payload": { "username": "{}" }, "url": "https://www.fiverr.com/{}", "urlMain": "https://www.fiverr.com/", "urlProbe": "https://www.fiverr.com/validate_username", "username_claimed": "blueman" } ``` ## BabyRU __2025-02-02 :__ Just being problematic (possibly related to errorMsg encoding?) ```json "babyRU": { "errorMsg": [ "\u0421\u0442\u0440\u0430\u043d\u0438\u0446\u0430, \u043a\u043e\u0442\u043e\u0440\u0443\u044e \u0432\u044b \u0438\u0441\u043a\u0430\u043b\u0438, \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d\u0430", "Доступ с вашего IP-адреса временно ограничен" ], "errorType": "message", "url": "https://www.baby.ru/u/{}/", "urlMain": "https://www.baby.ru/", "username_claimed": "blue" } ``` ## v0.dev __2025-02-16 :__ Unsure if any way to view profiles exists now ```json "v0.dev": { "errorType": "message", "errorMsg": "v0 by Vercel", "url": "https://v0.dev/{}", "urlMain": "https://v0.dev", "username_claimed": "t3dotgg" } ``` ## TorrentGalaxy __2025-07-06 :__ Site appears to have gone offline in March and hasn't come back ```json "TorrentGalaxy": { "errorMsg": "TGx:Can't show details", "errorType": "message", "regexCheck": "^[A-Za-z0-9]{3,15}$", "url": "https://torrentgalaxy.to/profile/{}", "urlMain": "https://torrentgalaxy.to/", "username_claimed": "GalaxyRG" }, ``` ================================================ FILE: pyproject.toml ================================================ [build-system] requires = [ "poetry-core>=1.2.0" ] build-backend = "poetry.core.masonry.api" # poetry-core 1.8 not available in .fc39. Can upgrade to 1.8.0 at .fc39 EOL [tool.poetry-version-plugin] source = "init" [tool.poetry] name = "sherlock-project" version = "0.16.0" description = "Hunt down social media accounts by username across social networks" license = "MIT" authors = [ "Siddharth Dushantha " ] maintainers = [ "Paul Pfeister ", "Matheus Felipe ", "Sondre Karlsen Dyrnes " ] readme = "docs/pyproject/README.md" packages = [ { include = "sherlock_project"} ] keywords = [ "osint", "reconnaissance", "information gathering" ] classifiers = [ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", "Topic :: Security" ] homepage = "https://sherlockproject.xyz/" repository = "https://github.com/sherlock-project/sherlock" [tool.poetry.urls] "Bug Tracker" = "https://github.com/sherlock-project/sherlock/issues" [tool.poetry.dependencies] python = "^3.9" certifi = ">=2019.6.16" colorama = "^0.4.1" PySocks = "^1.7.0" requests = "^2.22.0" requests-futures = "^1.0.0" stem = "^1.8.0" pandas = "^2.2.1" openpyxl = "^3.0.10" tomli = "^2.2.1" [tool.poetry.group.dev.dependencies] jsonschema = "^4.0.0" rstr = "^3.2.2" pytest = "^8.4.2" pytest-xdist = "^3.8.0" [tool.poetry.group.ci.dependencies] defusedxml = "^0.7.1" [tool.poetry.scripts] sherlock = 'sherlock_project.sherlock:main' ================================================ FILE: pytest.ini ================================================ [pytest] addopts = --strict-markers -m "not validate_targets" markers = online: mark tests are requiring internet access. validate_targets: mark tests for sweeping manifest validation (sends many requests). validate_targets_fp: validate_targets, false positive tests only. validate_targets_fn: validate_targets, false negative tests only. ================================================ FILE: sherlock_project/__init__.py ================================================ """ Sherlock Module This module contains the main logic to search for usernames at social networks. """ from importlib.metadata import version as pkg_version, PackageNotFoundError import pathlib import tomli def get_version() -> str: """Fetch the version number of the installed package.""" try: return pkg_version("sherlock_project") except PackageNotFoundError: pyproject_path: pathlib.Path = pathlib.Path(__file__).resolve().parent.parent / "pyproject.toml" with pyproject_path.open("rb") as f: pyproject_data = tomli.load(f) return pyproject_data["tool"]["poetry"]["version"] # This variable is only used to check for ImportErrors induced by users running as script rather than as module or package import_error_test_var = None __shortname__ = "Sherlock" __longname__ = "Sherlock: Find Usernames Across Social Networks" __version__ = get_version() forge_api_latest_release = "https://api.github.com/repos/sherlock-project/sherlock/releases/latest" ================================================ FILE: sherlock_project/__main__.py ================================================ #! /usr/bin/env python3 """ Sherlock: Find Usernames Across Social Networks Module This module contains the main logic to search for usernames at social networks. """ import sys if __name__ == "__main__": # Check if the user is using the correct version of Python python_version = sys.version.split()[0] if sys.version_info < (3, 9): print(f"Sherlock requires Python 3.9+\nYou are using Python {python_version}, which is not supported by Sherlock.") sys.exit(1) from sherlock_project import sherlock sherlock.main() ================================================ FILE: sherlock_project/notify.py ================================================ """Sherlock Notify Module This module defines the objects for notifying the caller about the results of queries. """ from sherlock_project.result import QueryStatus from colorama import Fore, Style import webbrowser # Global variable to count the number of results. globvar = 0 class QueryNotify: """Query Notify Object. Base class that describes methods available to notify the results of a query. It is intended that other classes inherit from this base class and override the methods to implement specific functionality. """ def __init__(self, result=None): """Create Query Notify Object. Contains information about a specific method of notifying the results of a query. Keyword Arguments: self -- This object. result -- Object of type QueryResult() containing results for this query. Return Value: Nothing. """ self.result = result # return def start(self, message=None): """Notify Start. Notify method for start of query. This method will be called before any queries are performed. This method will typically be overridden by higher level classes that will inherit from it. Keyword Arguments: self -- This object. message -- Object that is used to give context to start of query. Default is None. Return Value: Nothing. """ # return def update(self, result): """Notify Update. Notify method for query result. This method will typically be overridden by higher level classes that will inherit from it. Keyword Arguments: self -- This object. result -- Object of type QueryResult() containing results for this query. Return Value: Nothing. """ self.result = result # return def finish(self, message=None): """Notify Finish. Notify method for finish of query. This method will be called after all queries have been performed. This method will typically be overridden by higher level classes that will inherit from it. Keyword Arguments: self -- This object. message -- Object that is used to give context to start of query. Default is None. Return Value: Nothing. """ # return def __str__(self): """Convert Object To String. Keyword Arguments: self -- This object. Return Value: Nicely formatted string to get information about this object. """ return str(self.result) class QueryNotifyPrint(QueryNotify): """Query Notify Print Object. Query notify class that prints results. """ def __init__(self, result=None, verbose=False, print_all=False, browse=False): """Create Query Notify Print Object. Contains information about a specific method of notifying the results of a query. Keyword Arguments: self -- This object. result -- Object of type QueryResult() containing results for this query. verbose -- Boolean indicating whether to give verbose output. print_all -- Boolean indicating whether to only print all sites, including not found. browse -- Boolean indicating whether to open found sites in a web browser. Return Value: Nothing. """ super().__init__(result) self.verbose = verbose self.print_all = print_all self.browse = browse return def start(self, message): """Notify Start. Will print the title to the standard output. Keyword Arguments: self -- This object. message -- String containing username that the series of queries are about. Return Value: Nothing. """ title = "Checking username" print(Style.BRIGHT + Fore.GREEN + "[" + Fore.YELLOW + "*" + Fore.GREEN + f"] {title}" + Fore.WHITE + f" {message}" + Fore.GREEN + " on:") # An empty line between first line and the result(more clear output) print('\r') return def countResults(self): """This function counts the number of results. Every time the function is called, the number of results is increasing. Keyword Arguments: self -- This object. Return Value: The number of results by the time we call the function. """ global globvar globvar += 1 return globvar def update(self, result): """Notify Update. Will print the query result to the standard output. Keyword Arguments: self -- This object. result -- Object of type QueryResult() containing results for this query. Return Value: Nothing. """ self.result = result response_time_text = "" if self.result.query_time is not None and self.verbose is True: response_time_text = f" [{round(self.result.query_time * 1000)}ms]" # Output to the terminal is desired. if result.status == QueryStatus.CLAIMED: self.countResults() print(Style.BRIGHT + Fore.WHITE + "[" + Fore.GREEN + "+" + Fore.WHITE + "]" + response_time_text + Fore.GREEN + f" {self.result.site_name}: " + Style.RESET_ALL + f"{self.result.site_url_user}") if self.browse: webbrowser.open(self.result.site_url_user, 2) elif result.status == QueryStatus.AVAILABLE: if self.print_all: print(Style.BRIGHT + Fore.WHITE + "[" + Fore.RED + "-" + Fore.WHITE + "]" + response_time_text + Fore.GREEN + f" {self.result.site_name}:" + Fore.YELLOW + " Not Found!") elif result.status == QueryStatus.UNKNOWN: if self.print_all: print(Style.BRIGHT + Fore.WHITE + "[" + Fore.RED + "-" + Fore.WHITE + "]" + Fore.GREEN + f" {self.result.site_name}:" + Fore.RED + f" {self.result.context}" + Fore.YELLOW + " ") elif result.status == QueryStatus.ILLEGAL: if self.print_all: msg = "Illegal Username Format For This Site!" print(Style.BRIGHT + Fore.WHITE + "[" + Fore.RED + "-" + Fore.WHITE + "]" + Fore.GREEN + f" {self.result.site_name}:" + Fore.YELLOW + f" {msg}") elif result.status == QueryStatus.WAF: if self.print_all: print(Style.BRIGHT + Fore.WHITE + "[" + Fore.RED + "-" + Fore.WHITE + "]" + Fore.GREEN + f" {self.result.site_name}:" + Fore.RED + " Blocked by bot detection" + Fore.YELLOW + " (proxy may help)") else: # It should be impossible to ever get here... raise ValueError( f"Unknown Query Status '{result.status}' for site '{self.result.site_name}'" ) return def finish(self, message="The processing has been finished."): """Notify Start. Will print the last line to the standard output. Keyword Arguments: self -- This object. message -- The 2 last phrases. Return Value: Nothing. """ NumberOfResults = self.countResults() - 1 print(Style.BRIGHT + Fore.GREEN + "[" + Fore.YELLOW + "*" + Fore.GREEN + "] Search completed with" + Fore.WHITE + f" {NumberOfResults} " + Fore.GREEN + "results" + Style.RESET_ALL ) def __str__(self): """Convert Object To String. Keyword Arguments: self -- This object. Return Value: Nicely formatted string to get information about this object. """ return str(self.result) ================================================ FILE: sherlock_project/py.typed ================================================ ================================================ FILE: sherlock_project/resources/data.json ================================================ { "$schema": "data.schema.json", "1337x": { "errorMsg": [ "Error something went wrong.", "404 Not Found" ], "errorType": "message", "regexCheck": "^[A-Za-z0-9]{4,12}$", "url": "https://www.1337x.to/user/{}/", "urlMain": "https://www.1337x.to/", "username_claimed": "FitGirl" }, "2Dimensions": { "errorType": "status_code", "url": "https://2Dimensions.com/a/{}", "urlMain": "https://2Dimensions.com/", "username_claimed": "blue" }, "7Cups": { "errorType": "status_code", "url": "https://www.7cups.com/@{}", "urlMain": "https://www.7cups.com/", "username_claimed": "blue" }, "9GAG": { "errorType": "status_code", "url": "https://www.9gag.com/u/{}", "urlMain": "https://www.9gag.com/", "username_claimed": "blue" }, "APClips": { "errorMsg": "Amateur Porn Content Creators", "errorType": "message", "isNSFW": true, "url": "https://apclips.com/{}", "urlMain": "https://apclips.com/", "username_claimed": "onlybbyraq" }, "About.me": { "errorType": "status_code", "url": "https://about.me/{}", "urlMain": "https://about.me/", "username_claimed": "blue" }, "Academia.edu": { "errorType": "status_code", "regexCheck": "^[^.]*$", "url": "https://independent.academia.edu/{}", "urlMain": "https://www.academia.edu/", "username_claimed": "blue" }, "AdmireMe.Vip": { "errorMsg": "Page Not Found", "errorType": "message", "isNSFW": true, "url": "https://admireme.vip/{}", "urlMain": "https://admireme.vip/", "username_claimed": "DemiDevil" }, "Airbit": { "errorType": "status_code", "url": "https://airbit.com/{}", "urlMain": "https://airbit.com/", "username_claimed": "airbit" }, "Airliners": { "errorType": "status_code", "url": "https://www.airliners.net/user/{}/profile/photos", "urlMain": "https://www.airliners.net/", "username_claimed": "yushinlin" }, "All Things Worn": { "errorMsg": "Sell Used Panties", "errorType": "message", "isNSFW": true, "url": "https://www.allthingsworn.com/profile/{}", "urlMain": "https://www.allthingsworn.com", "username_claimed": "pink" }, "AllMyLinks": { "errorMsg": "Page not found", "errorType": "message", "regexCheck": "^[a-z0-9][a-z0-9-]{2,32}$", "url": "https://allmylinks.com/{}", "urlMain": "https://allmylinks.com/", "username_claimed": "blue" }, "AniWorld": { "errorMsg": "Dieses Profil ist nicht verf\u00fcgbar", "errorType": "message", "url": "https://aniworld.to/user/profil/{}", "urlMain": "https://aniworld.to/", "username_claimed": "blue" }, "Anilist": { "errorType": "status_code", "regexCheck": "^[A-Za-z0-9]{2,20}$", "request_method": "POST", "request_payload": { "query": "query($name:String){User(name:$name){id}}", "variables": { "name": "{}" } }, "url": "https://anilist.co/user/{}/", "urlMain": "https://anilist.co/", "urlProbe": "https://graphql.anilist.co/", "username_claimed": "Josh" }, "Apple Developer": { "errorType": "status_code", "url": "https://developer.apple.com/forums/profile/{}", "urlMain": "https://developer.apple.com", "username_claimed": "lio24d" }, "Apple Discussions": { "errorMsg": "Looking for something in Apple Support Communities?", "errorType": "message", "url": "https://discussions.apple.com/profile/{}", "urlMain": "https://discussions.apple.com", "username_claimed": "jason" }, "Aparat": { "errorType": "status_code", "request_method": "GET", "url": "https://www.aparat.com/{}/", "urlMain": "https://www.aparat.com/", "urlProbe": "https://www.aparat.com/api/fa/v1/user/user/information/username/{}", "username_claimed": "jadi" }, "Archive of Our Own": { "errorType": "status_code", "regexCheck": "^[^.]*?$", "url": "https://archiveofourown.org/users/{}", "urlMain": "https://archiveofourown.org/", "username_claimed": "test" }, "Archive.org": { "__comment__": "'The resource could not be found' relates to archive downtime", "errorMsg": [ "could not fetch an account with user item identifier", "The resource could not be found", "Internet Archive services are temporarily offline" ], "errorType": "message", "url": "https://archive.org/details/@{}", "urlMain": "https://archive.org", "urlProbe": "https://archive.org/details/@{}?noscript=true", "username_claimed": "blue" }, "Arduino Forum": { "errorType": "status_code", "url": "https://forum.arduino.cc/u/{}/summary", "urlMain": "https://forum.arduino.cc/", "username_claimed": "system" }, "ArtStation": { "errorType": "status_code", "url": "https://www.artstation.com/{}", "urlMain": "https://www.artstation.com/", "username_claimed": "Blue" }, "Asciinema": { "errorType": "status_code", "url": "https://asciinema.org/~{}", "urlMain": "https://asciinema.org", "username_claimed": "red" }, "Ask Fedora": { "errorType": "status_code", "url": "https://ask.fedoraproject.org/u/{}", "urlMain": "https://ask.fedoraproject.org/", "username_claimed": "red" }, "Atcoder": { "errorType": "status_code", "url": "https://atcoder.jp/users/{}", "urlMain": "https://atcoder.jp/", "username_claimed": "ksun48" }, "Vjudge": { "errorType": "status_code", "url": "https://VJudge.net/user/{}", "urlMain": "https://VJudge.net/", "username_claimed": "tokitsukaze" }, "Audiojungle": { "errorType": "status_code", "regexCheck": "^[a-zA-Z0-9_]+$", "url": "https://audiojungle.net/user/{}", "urlMain": "https://audiojungle.net/", "username_claimed": "blue" }, "Autofrage": { "errorType": "status_code", "url": "https://www.autofrage.net/nutzer/{}", "urlMain": "https://www.autofrage.net/", "username_claimed": "autofrage" }, "Avizo": { "errorType": "response_url", "errorUrl": "https://www.avizo.cz/", "url": "https://www.avizo.cz/{}/", "urlMain": "https://www.avizo.cz/", "username_claimed": "blue" }, "AWS Skills Profile": { "errorType": "message", "errorMsg": "shareProfileAccepted\":false", "url": "https://skillsprofile.skillbuilder.aws/user/{}/", "urlMain": "https://skillsprofile.skillbuilder.aws", "username_claimed": "mayank04pant" }, "BOOTH": { "errorType": "response_url", "errorUrl": "https://booth.pm/", "regexCheck": "^[\\w@-]+?$", "url": "https://{}.booth.pm/", "urlMain": "https://booth.pm/", "username_claimed": "blue" }, "Bandcamp": { "errorType": "status_code", "url": "https://www.bandcamp.com/{}", "urlMain": "https://www.bandcamp.com/", "username_claimed": "blue" }, "Bazar.cz": { "errorType": "response_url", "errorUrl": "https://www.bazar.cz/error404.aspx", "url": "https://www.bazar.cz/{}/", "urlMain": "https://www.bazar.cz/", "username_claimed": "pianina" }, "Behance": { "errorType": "status_code", "url": "https://www.behance.net/{}", "urlMain": "https://www.behance.net/", "username_claimed": "blue" }, "Bezuzyteczna": { "errorType": "status_code", "url": "https://bezuzyteczna.pl/uzytkownicy/{}", "urlMain": "https://bezuzyteczna.pl", "username_claimed": "Jackson" }, "BiggerPockets": { "errorType": "status_code", "url": "https://www.biggerpockets.com/users/{}", "urlMain": "https://www.biggerpockets.com/", "username_claimed": "blue" }, "BioHacking": { "errorType": "status_code", "url": "https://forum.dangerousthings.com/u/{}", "urlMain": "https://forum.dangerousthings.com/", "username_claimed": "blue" }, "BitBucket": { "errorType": "status_code", "regexCheck": "^[a-zA-Z0-9-_]{1,30}$", "url": "https://bitbucket.org/{}/", "urlMain": "https://bitbucket.org/", "username_claimed": "white" }, "Bitwarden Forum": { "errorType": "status_code", "regexCheck": "^(?![.-])[a-zA-Z0-9_.-]{3,20}$", "url": "https://community.bitwarden.com/u/{}/summary", "urlMain": "https://bitwarden.com/", "username_claimed": "blue" }, "Blipfoto": { "errorType": "status_code", "url": "https://www.blipfoto.com/{}", "urlMain": "https://www.blipfoto.com/", "username_claimed": "blue" }, "Blitz Tactics": { "errorMsg": "That page doesn't exist", "errorType": "message", "url": "https://blitztactics.com/{}", "urlMain": "https://blitztactics.com/", "username_claimed": "Lance5500" }, "Blogger": { "errorType": "status_code", "regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$", "url": "https://{}.blogspot.com", "urlMain": "https://www.blogger.com/", "username_claimed": "blue" }, "Bluesky": { "errorType": "status_code", "url": "https://bsky.app/profile/{}.bsky.social", "urlProbe": "https://public.api.bsky.app/xrpc/app.bsky.actor.getProfile?actor={}.bsky.social", "urlMain": "https://bsky.app/", "username_claimed": "mcuban" }, "BongaCams": { "errorType": "status_code", "isNSFW": true, "url": "https://pt.bongacams.com/profile/{}", "urlMain": "https://pt.bongacams.com", "username_claimed": "asuna-black" }, "Bookcrossing": { "errorType": "status_code", "url": "https://www.bookcrossing.com/mybookshelf/{}/", "urlMain": "https://www.bookcrossing.com/", "username_claimed": "blue" }, "BoardGameGeek": { "errorMsg": "\"isValid\":true", "errorType": "message", "url": "https://boardgamegeek.com/user/{}", "urlMain": "https://boardgamegeek.com/", "urlProbe": "https://api.geekdo.com/api/accounts/validate/username?username={}", "username_claimed": "blue" }, "BraveCommunity": { "errorType": "status_code", "url": "https://community.brave.com/u/{}/", "urlMain": "https://community.brave.com/", "username_claimed": "blue" }, "BreachSta.rs Forum": { "errorMsg": "Error - BreachStars", "errorType": "message", "url": "https://breachsta.rs/profile/{}", "urlMain": "https://breachsta.rs/", "username_claimed": "Sleepybubble" }, "BugCrowd": { "errorType": "status_code", "url": "https://bugcrowd.com/{}", "urlMain": "https://bugcrowd.com/", "username_claimed": "ppfeister" }, "BuyMeACoffee": { "errorType": "status_code", "regexCheck": "[a-zA-Z0-9]{3,15}", "url": "https://buymeacoff.ee/{}", "urlMain": "https://www.buymeacoffee.com/", "urlProbe": "https://www.buymeacoffee.com/{}", "username_claimed": "red" }, "BuzzFeed": { "errorType": "status_code", "url": "https://buzzfeed.com/{}", "urlMain": "https://buzzfeed.com/", "username_claimed": "blue" }, "Cfx.re Forum": { "errorType": "status_code", "url": "https://forum.cfx.re/u/{}/summary", "urlMain": "https://forum.cfx.re", "username_claimed": "hightowerlssd" }, "CGTrader": { "errorType": "status_code", "regexCheck": "^[^.]*?$", "url": "https://www.cgtrader.com/{}", "urlMain": "https://www.cgtrader.com", "username_claimed": "blue" }, "CNET": { "errorType": "status_code", "regexCheck": "^[a-z].*$", "url": "https://www.cnet.com/profiles/{}/", "urlMain": "https://www.cnet.com/", "username_claimed": "melliott" }, "CSSBattle": { "errorType": "status_code", "url": "https://cssbattle.dev/player/{}", "urlMain": "https://cssbattle.dev", "username_claimed": "beo" }, "CTAN": { "errorType": "status_code", "url": "https://ctan.org/author/{}", "urlMain": "https://ctan.org/", "username_claimed": "briggs" }, "Caddy Community": { "errorType": "status_code", "url": "https://caddy.community/u/{}/summary", "urlMain": "https://caddy.community/", "username_claimed": "taako_magnusen" }, "Car Talk Community": { "errorType": "status_code", "url": "https://community.cartalk.com/u/{}/summary", "urlMain": "https://community.cartalk.com/", "username_claimed": "always_fixing" }, "Carbonmade": { "errorType": "response_url", "errorUrl": "https://carbonmade.com/fourohfour?domain={}.carbonmade.com", "regexCheck": "^[\\w@-]+?$", "url": "https://{}.carbonmade.com", "urlMain": "https://carbonmade.com/", "username_claimed": "jenny" }, "Career.habr": { "errorMsg": "

\u041e\u0448\u0438\u0431\u043a\u0430 404

", "errorType": "message", "url": "https://career.habr.com/{}", "urlMain": "https://career.habr.com/", "username_claimed": "blue" }, "CashApp": { "errorType": "status_code", "url": "https://cash.app/${}", "urlMain": "https://cash.app", "username_claimed": "hotdiggitydog" }, "Championat": { "errorType": "status_code", "url": "https://www.championat.com/user/{}", "urlMain": "https://www.championat.com/", "username_claimed": "blue" }, "Chaos": { "errorType": "status_code", "url": "https://chaos.social/@{}", "urlMain": "https://chaos.social/", "username_claimed": "ordnung" }, "Chatujme.cz": { "errorMsg": "Neexistujic\u00ed profil", "errorType": "message", "regexCheck": "^[a-zA-Z][a-zA-Z1-9_-]*$", "url": "https://profil.chatujme.cz/{}", "urlMain": "https://chatujme.cz/", "username_claimed": "david" }, "ChaturBate": { "errorType": "status_code", "isNSFW": true, "url": "https://chaturbate.com/{}", "urlMain": "https://chaturbate.com", "username_claimed": "cute18cute" }, "Chess": { "errorMsg": "Username is valid", "errorType": "message", "regexCheck": "^[a-zA-Z0-9_]{3,25}$", "url": "https://www.chess.com/member/{}", "urlMain": "https://www.chess.com/", "urlProbe": "https://www.chess.com/callback/user/valid?username={}", "username_claimed": "blue" }, "Choice Community": { "errorType": "status_code", "url": "https://choice.community/u/{}/summary", "urlMain": "https://choice.community/", "username_claimed": "gordon" }, "Clapper": { "errorType": "status_code", "url": "https://clapperapp.com/{}", "urlMain": "https://clapperapp.com/", "username_claimed": "blue" }, "CloudflareCommunity": { "errorType": "status_code", "url": "https://community.cloudflare.com/u/{}", "urlMain": "https://community.cloudflare.com/", "username_claimed": "blue" }, "Clozemaster": { "errorMsg": "Oh no! Player not found.", "errorType": "message", "url": "https://www.clozemaster.com/players/{}", "urlMain": "https://www.clozemaster.com", "username_claimed": "green" }, "Clubhouse": { "errorType": "status_code", "url": "https://www.clubhouse.com/@{}", "urlMain": "https://www.clubhouse.com", "username_claimed": "waniathar" }, "Code Snippet Wiki": { "errorMsg": "This user has not filled out their profile page yet", "errorType": "message", "url": "https://codesnippets.fandom.com/wiki/User:{}", "urlMain": "https://codesnippets.fandom.com", "username_claimed": "bob" }, "Codeberg": { "errorType": "status_code", "url": "https://codeberg.org/{}", "urlMain": "https://codeberg.org/", "username_claimed": "blue" }, "Codecademy": { "errorMsg": "This profile could not be found", "errorType": "message", "url": "https://www.codecademy.com/profiles/{}", "urlMain": "https://www.codecademy.com/", "username_claimed": "blue" }, "Codechef": { "errorType": "response_url", "errorUrl": "https://www.codechef.com/", "url": "https://www.codechef.com/users/{}", "urlMain": "https://www.codechef.com/", "username_claimed": "blue" }, "Codeforces": { "errorType": "status_code", "url": "https://codeforces.com/profile/{}", "urlMain": "https://codeforces.com/", "urlProbe": "https://codeforces.com/api/user.info?handles={}", "username_claimed": "tourist" }, "Codepen": { "errorType": "status_code", "url": "https://codepen.io/{}", "urlMain": "https://codepen.io/", "username_claimed": "blue" }, "Coders Rank": { "errorMsg": "not a registered member", "errorType": "message", "regexCheck": "^[a-zA-Z0-9](?:[a-zA-Z0-9]|-(?=[a-zA-Z0-9])){0,38}$", "url": "https://profile.codersrank.io/user/{}/", "urlMain": "https://codersrank.io/", "username_claimed": "rootkit7628" }, "Coderwall": { "errorType": "status_code", "url": "https://coderwall.com/{}", "urlMain": "https://coderwall.com", "username_claimed": "hacker" }, "CodeSandbox": { "errorType": "message", "errorMsg": "Could not find user with username", "regexCheck": "^[a-zA-Z0-9_-]{3,30}$", "url": "https://codesandbox.io/u/{}", "urlProbe": "https://codesandbox.io/api/v1/users/{}", "urlMain": "https://codesandbox.io", "username_claimed": "icyjoseph" }, "Codewars": { "errorType": "status_code", "url": "https://www.codewars.com/users/{}", "urlMain": "https://www.codewars.com", "username_claimed": "example" }, "Codolio": { "errorType": "message", "errorMsg": "Page Not Found | Codolio", "url": "https://codolio.com/profile/{}", "urlMain": "https://codolio.com/", "username_claimed": "testuser", "regexCheck": "^[a-zA-Z0-9_-]{3,30}$" }, "Coinvote": { "errorType": "status_code", "url": "https://coinvote.cc/profile/{}", "urlMain": "https://coinvote.cc/", "username_claimed": "blue" }, "ColourLovers": { "errorType": "status_code", "url": "https://www.colourlovers.com/lover/{}", "urlMain": "https://www.colourlovers.com/", "username_claimed": "blue" }, "Contently": { "errorType": "response_url", "errorUrl": "https://contently.com", "regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$", "url": "https://{}.contently.com/", "urlMain": "https://contently.com/", "username_claimed": "jordanteicher" }, "Coroflot": { "errorType": "status_code", "url": "https://www.coroflot.com/{}", "urlMain": "https://coroflot.com/", "username_claimed": "blue" }, "Cplusplus": { "errorType": "message", "errorMsg": "404 Page Not Found", "url": "https://cplusplus.com/user/{}", "urlMain": "https://cplusplus.com", "username_claimed": "mbozzi" }, "Cracked": { "errorType": "response_url", "errorUrl": "https://www.cracked.com/", "url": "https://www.cracked.com/members/{}/", "urlMain": "https://www.cracked.com/", "username_claimed": "blue" }, "Cracked Forum": { "errorMsg": "The member you specified is either invalid or doesn't exist", "errorType": "message", "url": "https://cracked.sh/{}", "urlMain": "https://cracked.sh/", "username_claimed": "Blue" }, "Credly": { "errorType": "status_code", "url": "https://www.credly.com/users/{}", "urlMain": "https://www.credly.com/", "username_claimed": "credly" }, "Crevado": { "errorType": "status_code", "regexCheck": "^[\\w@-]+?$", "url": "https://{}.crevado.com", "urlMain": "https://crevado.com/", "username_claimed": "blue" }, "Crowdin": { "errorType": "status_code", "regexCheck": "^[a-zA-Z0-9._-]{2,255}$", "url": "https://crowdin.com/profile/{}", "urlMain": "https://crowdin.com/", "username_claimed": "blue" }, "CryptoHack": { "errorType": "response_url", "errorUrl": "https://cryptohack.org/", "url": "https://cryptohack.org/user/{}/", "urlMain": "https://cryptohack.org/", "username_claimed": "blue" }, "Cryptomator Forum": { "errorType": "status_code", "url": "https://community.cryptomator.org/u/{}", "urlMain": "https://community.cryptomator.org/", "username_claimed": "michael" }, "Cults3D": { "errorMsg": "Oh dear, this page is not working!", "errorType": "message", "url": "https://cults3d.com/en/users/{}/creations", "urlMain": "https://cults3d.com/en", "username_claimed": "brown" }, "CyberDefenders": { "errorType": "status_code", "regexCheck": "^[^\\/:*?\"<>|@]{3,50}$", "request_method": "GET", "url": "https://cyberdefenders.org/p/{}", "urlMain": "https://cyberdefenders.org/", "username_claimed": "mlohn" }, "DEV Community": { "errorType": "status_code", "regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$", "url": "https://dev.to/{}", "urlMain": "https://dev.to/", "username_claimed": "blue" }, "DMOJ": { "errorMsg": "No such user", "errorType": "message", "url": "https://dmoj.ca/user/{}", "urlMain": "https://dmoj.ca/", "username_claimed": "junferno" }, "DailyMotion": { "errorType": "status_code", "url": "https://www.dailymotion.com/{}", "urlMain": "https://www.dailymotion.com/", "username_claimed": "blue" }, "dcinside": { "errorType": "status_code", "url": "https://gallog.dcinside.com/{}", "urlMain": "https://www.dcinside.com/", "username_claimed": "anrbrb" }, "Dealabs": { "errorMsg": "La page que vous essayez", "errorType": "message", "regexCheck": "[a-z0-9]{4,16}", "url": "https://www.dealabs.com/profile/{}", "urlMain": "https://www.dealabs.com/", "username_claimed": "blue" }, "DeviantArt": { "errorType": "message", "errorMsg": "Llama Not Found", "regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$", "url": "https://www.deviantart.com/{}", "urlMain": "https://www.deviantart.com/", "username_claimed": "blue" }, "DigitalSpy": { "errorMsg": "The page you were looking for could not be found.", "errorType": "message", "url": "https://forums.digitalspy.com/profile/{}", "urlMain": "https://forums.digitalspy.com/", "username_claimed": "blue", "regexCheck": "^\\w{3,20}$" }, "Discogs": { "errorType": "status_code", "url": "https://www.discogs.com/user/{}", "urlMain": "https://www.discogs.com/", "username_claimed": "blue" }, "Discord": { "errorType": "message", "url": "https://discord.com", "urlMain": "https://discord.com/", "urlProbe": "https://discord.com/api/v9/unique-username/username-attempt-unauthed", "errorMsg": ["{\"taken\":false}", "The resource is being rate limited"], "request_method": "POST", "request_payload": { "username": "{}" }, "headers": { "Content-Type": "application/json" }, "username_claimed": "blue" }, "Discord.bio": { "errorType": "message", "errorMsg": "Server Error (500)", "url": "https://discords.com/api-v2/bio/details/{}", "urlMain": "https://discord.bio/", "username_claimed": "robert" }, "Discuss.Elastic.co": { "errorType": "status_code", "url": "https://discuss.elastic.co/u/{}", "urlMain": "https://discuss.elastic.co/", "username_claimed": "blue" }, "Diskusjon.no": { "errorMsg": "{\"result\":\"ok\"}", "errorType": "message", "regexCheck": "^[a-zA-Z0-9_.-]{3,40}$", "urlProbe": "https://www.diskusjon.no/?app=core&module=system&controller=ajax&do=usernameExists&input={}", "url": "https://www.diskusjon.no", "urlMain": "https://www.diskusjon.no", "username_claimed": "blue" }, "Disqus": { "errorType": "status_code", "url": "https://disqus.com/{}", "urlMain": "https://disqus.com/", "username_claimed": "blue" }, "Docker Hub": { "errorType": "status_code", "url": "https://hub.docker.com/u/{}/", "urlMain": "https://hub.docker.com/", "urlProbe": "https://hub.docker.com/v2/users/{}/", "username_claimed": "blue" }, "Dribbble": { "errorMsg": "Whoops, that page is gone.", "errorType": "message", "regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$", "url": "https://dribbble.com/{}", "urlMain": "https://dribbble.com/", "username_claimed": "blue" }, "Duolingo": { "errorMsg": "{\"users\":[]}", "errorType": "message", "url": "https://www.duolingo.com/profile/{}", "urlMain": "https://duolingo.com/", "urlProbe": "https://www.duolingo.com/2017-06-30/users?username={}", "username_claimed": "blue" }, "Eintracht Frankfurt Forum": { "errorType": "status_code", "regexCheck": "^[^.]*?$", "url": "https://community.eintracht.de/fans/{}", "urlMain": "https://community.eintracht.de/", "username_claimed": "mmammu" }, "Empretienda AR": { "__comment__": "Note that Error Connecting responses may be indicative of unclaimed handles", "errorType": "status_code", "url": "https://{}.empretienda.com.ar", "urlMain": "https://empretienda.com", "username_claimed": "camalote" }, "Envato Forum": { "errorType": "status_code", "url": "https://forums.envato.com/u/{}", "urlMain": "https://forums.envato.com/", "username_claimed": "enabled" }, "Erome": { "errorType": "status_code", "isNSFW": true, "url": "https://www.erome.com/{}", "urlMain": "https://www.erome.com/", "username_claimed": "bob" }, "Exposure": { "errorType": "status_code", "regexCheck": "^[a-zA-Z0-9-]{1,63}$", "url": "https://{}.exposure.co/", "urlMain": "https://exposure.co/", "username_claimed": "jonasjacobsson" }, "exophase": { "errorType": "status_code", "url": "https://www.exophase.com/user/{}/", "urlMain": "https://www.exophase.com/", "username_claimed": "blue" }, "EyeEm": { "errorType": "status_code", "url": "https://www.eyeem.com/u/{}", "urlMain": "https://www.eyeem.com/", "username_claimed": "blue" }, "F3.cool": { "errorType": "status_code", "url": "https://f3.cool/{}/", "urlMain": "https://f3.cool/", "username_claimed": "blue" }, "Fameswap": { "errorType": "status_code", "url": "https://fameswap.com/user/{}", "urlMain": "https://fameswap.com/", "username_claimed": "fameswap" }, "Fandom": { "errorType": "status_code", "url": "https://www.fandom.com/u/{}", "urlMain": "https://www.fandom.com/", "username_claimed": "Jungypoo" }, "Fanpop": { "errorType": "response_url", "errorUrl": "https://www.fanpop.com/", "url": "https://www.fanpop.com/fans/{}", "urlMain": "https://www.fanpop.com/", "username_claimed": "blue" }, "Finanzfrage": { "errorType": "status_code", "url": "https://www.finanzfrage.net/nutzer/{}", "urlMain": "https://www.finanzfrage.net/", "username_claimed": "finanzfrage" }, "Flickr": { "errorType": "status_code", "url": "https://www.flickr.com/people/{}", "urlMain": "https://www.flickr.com/", "username_claimed": "blue" }, "Flightradar24": { "errorType": "status_code", "regexCheck": "^[a-zA-Z0-9_]{3,20}$", "url": "https://my.flightradar24.com/{}", "urlMain": "https://www.flightradar24.com/", "username_claimed": "jebbrooks" }, "Flipboard": { "errorType": "status_code", "regexCheck": "^([a-zA-Z0-9_]){1,15}$", "url": "https://flipboard.com/@{}", "urlMain": "https://flipboard.com/", "username_claimed": "blue" }, "Football": { "errorMsg": "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u0441 \u0442\u0430\u043a\u0438\u043c \u0438\u043c\u0435\u043d\u0435\u043c \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d", "errorType": "message", "url": "https://www.rusfootball.info/user/{}/", "urlMain": "https://www.rusfootball.info/", "username_claimed": "solo87" }, "FortniteTracker": { "errorType": "status_code", "url": "https://fortnitetracker.com/profile/all/{}", "urlMain": "https://fortnitetracker.com/challenges", "username_claimed": "blue" }, "Forum Ophilia": { "errorMsg": "that user does not exist", "errorType": "message", "isNSFW": true, "url": "https://www.forumophilia.com/profile.php?mode=viewprofile&u={}", "urlMain": "https://www.forumophilia.com/", "username_claimed": "bob" }, "Fosstodon": { "errorType": "status_code", "regexCheck": "^[a-zA-Z0-9_]{1,30}$", "url": "https://fosstodon.org/@{}", "urlMain": "https://fosstodon.org/", "username_claimed": "blue" }, "Framapiaf": { "errorType": "status_code", "regexCheck": "^[a-zA-Z0-9_]{1,30}$", "url": "https://framapiaf.org/@{}", "urlMain": "https://framapiaf.org", "username_claimed": "pylapp" }, "Freelancer": { "errorMsg": "\"users\":{}", "errorType": "message", "url": "https://www.freelancer.com/u/{}", "urlMain": "https://www.freelancer.com/", "urlProbe": "https://www.freelancer.com/api/users/0.1/users?usernames%5B%5D={}&compact=true", "username_claimed": "red0xff" }, "Freesound": { "errorType": "status_code", "url": "https://freesound.org/people/{}/", "urlMain": "https://freesound.org/", "username_claimed": "blue" }, "GNOME VCS": { "errorType": "response_url", "errorUrl": "https://gitlab.gnome.org/{}", "regexCheck": "^(?!-)[a-zA-Z0-9_.-]{2,255}(? GIFs - Find & Share on GIPHY", "url": "https://giphy.com/{}", "urlMain": "https://giphy.com/", "username_claimed": "red" }, "GitBook": { "errorType": "status_code", "regexCheck": "^[\\w@-]+?$", "url": "https://{}.gitbook.io/", "urlMain": "https://gitbook.com/", "username_claimed": "gitbook" }, "GitHub": { "errorType": "status_code", "regexCheck": "^[a-zA-Z0-9](?:[a-zA-Z0-9]|-(?=[a-zA-Z0-9])){0,38}$", "url": "https://www.github.com/{}", "urlMain": "https://www.github.com/", "username_claimed": "blue" }, "Warframe Market": { "errorType": "status_code", "request_method": "GET", "url": "https://warframe.market/profile/{}", "urlMain": "https://warframe.market/", "urlProbe": "https://api.warframe.market/v2/user/{}", "username_claimed": "kaiallalone" }, "GitLab": { "errorMsg": "[]", "errorType": "message", "url": "https://gitlab.com/{}", "urlMain": "https://gitlab.com/", "urlProbe": "https://gitlab.com/api/v4/users?username={}", "username_claimed": "blue" }, "Gitea": { "errorType": "status_code", "url": "https://gitea.com/{}", "urlMain": "https://gitea.com/", "username_claimed": "xorm" }, "Gitee": { "errorType": "status_code", "url": "https://gitee.com/{}", "urlMain": "https://gitee.com/", "username_claimed": "wizzer" }, "GoodReads": { "errorType": "status_code", "url": "https://www.goodreads.com/{}", "urlMain": "https://www.goodreads.com/", "username_claimed": "blue" }, "Google Play": { "errorMsg": "the requested URL was not found on this server", "errorType": "message", "url": "https://play.google.com/store/apps/developer?id={}", "urlMain": "https://play.google.com", "username_claimed": "GitHub" }, "Gradle": { "errorType": "status_code", "regexCheck": "^(?!-)[a-zA-Z0-9-]{3,}(?User Not Found - Hive", "errorType": "message", "url": "https://hive.blog/@{}", "urlMain": "https://hive.blog/", "username_claimed": "mango-juice" }, "Holopin": { "errorMsg": "true", "errorType": "message", "request_method": "POST", "request_payload": { "username": "{}" }, "url": "https://holopin.io/@{}", "urlMain": "https://holopin.io", "urlProbe": "https://www.holopin.io/api/auth/username", "username_claimed": "red" }, "Houzz": { "errorType": "status_code", "url": "https://houzz.com/user/{}", "urlMain": "https://houzz.com/", "username_claimed": "blue" }, "HubPages": { "errorType": "status_code", "url": "https://hubpages.com/@{}", "urlMain": "https://hubpages.com/", "username_claimed": "blue" }, "Hubski": { "errorMsg": "No such user", "errorType": "message", "url": "https://hubski.com/user/{}", "urlMain": "https://hubski.com/", "username_claimed": "blue" }, "HudsonRock": { "errorMsg": "This username is not associated", "errorType": "message", "url": "https://cavalier.hudsonrock.com/api/json/v2/osint-tools/search-by-username?username={}", "urlMain": "https://hudsonrock.com", "username_claimed": "testadmin" }, "Hugging Face": { "errorType": "status_code", "url": "https://huggingface.co/{}", "urlMain": "https://huggingface.co/", "username_claimed": "Pasanlaksitha" }, "IFTTT": { "errorType": "status_code", "regexCheck": "^[A-Za-z0-9]{3,35}$", "url": "https://www.ifttt.com/p/{}", "urlMain": "https://www.ifttt.com/", "username_claimed": "blue" }, "Ifunny": { "errorType": "status_code", "url": "https://ifunny.co/user/{}", "urlMain": "https://ifunny.co/", "username_claimed": "agua" }, "IRC-Galleria": { "errorType": "response_url", "errorUrl": "https://irc-galleria.net/users/search?username={}", "url": "https://irc-galleria.net/user/{}", "urlMain": "https://irc-galleria.net/", "username_claimed": "appas" }, "Icons8 Community": { "errorType": "status_code", "url": "https://community.icons8.com/u/{}/summary", "urlMain": "https://community.icons8.com/", "username_claimed": "thefourCraft" }, "Image Fap": { "errorMsg": "Not found", "errorType": "message", "isNSFW": true, "url": "https://www.imagefap.com/profile/{}", "urlMain": "https://www.imagefap.com/", "username_claimed": "blue" }, "ImgUp.cz": { "errorType": "status_code", "url": "https://imgup.cz/{}", "urlMain": "https://imgup.cz/", "username_claimed": "adam" }, "Imgur": { "errorType": "status_code", "url": "https://imgur.com/user/{}", "urlMain": "https://imgur.com/", "urlProbe": "https://api.imgur.com/account/v1/accounts/{}?client_id=546c25a59c58ad7", "username_claimed": "blue" }, "imood": { "errorType": "status_code", "url": "https://www.imood.com/users/{}", "urlMain": "https://www.imood.com/", "username_claimed": "blue" }, "Instagram": { "errorType": "status_code", "url": "https://instagram.com/{}", "urlMain": "https://instagram.com/", "urlProbe": "https://imginn.com/{}", "username_claimed": "instagram" }, "Instapaper": { "errorType": "status_code", "request_method": "GET", "url": "https://www.instapaper.com/p/{}", "urlMain": "https://www.instapaper.com/", "username_claimed": "john" }, "Instructables": { "errorType": "status_code", "url": "https://www.instructables.com/member/{}", "urlMain": "https://www.instructables.com/", "urlProbe": "https://www.instructables.com/json-api/showAuthorExists?screenName={}", "username_claimed": "blue" }, "Intigriti": { "errorType": "status_code", "regexCheck": "[a-z0-9_]{1,25}", "request_method": "GET", "url": "https://app.intigriti.com/profile/{}", "urlMain": "https://app.intigriti.com", "urlProbe": "https://api.intigriti.com/user/public/profile/{}", "username_claimed": "blue" }, "Ionic Forum": { "errorType": "status_code", "url": "https://forum.ionicframework.com/u/{}", "urlMain": "https://forum.ionicframework.com/", "username_claimed": "theblue222" }, "Issuu": { "errorType": "status_code", "url": "https://issuu.com/{}", "urlMain": "https://issuu.com/", "username_claimed": "jenny" }, "Itch.io": { "errorType": "status_code", "regexCheck": "^[\\w@-]+?$", "url": "https://{}.itch.io/", "urlMain": "https://itch.io/", "username_claimed": "blue" }, "Itemfix": { "errorMsg": "ItemFix - Channel: ", "errorType": "message", "url": "https://www.itemfix.com/c/{}", "urlMain": "https://www.itemfix.com/", "username_claimed": "blue" }, "Jellyfin Weblate": { "errorType": "status_code", "regexCheck": "^[a-zA-Z0-9@._-]{1,150}$", "url": "https://translate.jellyfin.org/user/{}/", "urlMain": "https://translate.jellyfin.org/", "username_claimed": "EraYaN" }, "Jimdo": { "errorType": "status_code", "regexCheck": "^[\\w@-]+?$", "url": "https://{}.jimdosite.com", "urlMain": "https://jimdosite.com/", "username_claimed": "jenny" }, "Joplin Forum": { "errorType": "status_code", "url": "https://discourse.joplinapp.org/u/{}", "urlMain": "https://discourse.joplinapp.org/", "username_claimed": "laurent" }, "Jupyter Community Forum": { "errorMsg": "Oops! That page doesn’t exist or is private.", "errorType": "message", "url": "https://discourse.jupyter.org/u/{}/summary", "urlMain": "https://discourse.jupyter.org", "username_claimed": "choldgraf" }, "Kaggle": { "errorType": "status_code", "url": "https://www.kaggle.com/{}", "urlMain": "https://www.kaggle.com/", "username_claimed": "dansbecker" }, "kaskus": { "errorType": "status_code", "url": "https://www.kaskus.co.id/@{}", "urlMain": "https://www.kaskus.co.id", "urlProbe": "https://www.kaskus.co.id/api/users?username={}", "request_method": "GET", "username_claimed": "l0mbart" }, "Keybase": { "errorType": "status_code", "url": "https://keybase.io/{}", "urlMain": "https://keybase.io/", "username_claimed": "blue" }, "Kick": { "__comment__": "Cloudflare. Only viable when proxied.", "errorType": "status_code", "url": "https://kick.com/{}", "urlMain": "https://kick.com/", "urlProbe": "https://kick.com/api/v2/channels/{}", "username_claimed": "blue" }, "Kik": { "errorMsg": "The page you requested was not found", "errorType": "message", "url": "https://kik.me/{}", "urlMain": "http://kik.me/", "urlProbe": "https://ws2.kik.com/user/{}", "username_claimed": "blue" }, "Kongregate": { "errorType": "status_code", "headers": { "Accept": "text/html" }, "regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$", "url": "https://www.kongregate.com/accounts/{}", "urlMain": "https://www.kongregate.com/", "username_claimed": "blue" }, "Kvinneguiden": { "errorMsg": "{\"result\":\"ok\"}", "errorType": "message", "regexCheck": "^[a-zA-Z0-9_.-]{3,18}$", "urlProbe": "https://forum.kvinneguiden.no/?app=core&module=system&controller=ajax&do=usernameExists&input={}", "url": "https://forum.kvinneguiden.no", "urlMain": "https://forum.kvinneguiden.no", "username_claimed": "blue" }, "LOR": { "errorType": "status_code", "url": "https://www.linux.org.ru/people/{}/profile", "urlMain": "https://linux.org.ru/", "username_claimed": "red" }, "Laracast": { "errorType": "status_code", "url": "https://laracasts.com/@{}", "urlMain": "https://laracasts.com/", "regexCheck": "^[a-zA-Z0-9_-]{3,}$", "username_claimed": "user1" }, "Launchpad": { "errorType": "status_code", "url": "https://launchpad.net/~{}", "urlMain": "https://launchpad.net/", "username_claimed": "blue" }, "LeetCode": { "errorType": "status_code", "url": "https://leetcode.com/{}", "urlMain": "https://leetcode.com/", "username_claimed": "blue" }, "LemmyWorld": { "errorType": "message", "errorMsg": "

Error!

", "url": "https://lemmy.world/u/{}", "urlMain": "https://lemmy.world", "username_claimed": "blue" }, "LessWrong": { "url": "https://www.lesswrong.com/users/{}", "urlMain": "https://www.lesswrong.com/", "errorType": "response_url", "errorUrl": "https://www.lesswrong.com/", "username_claimed": "habryka" }, "Letterboxd": { "errorMsg": "Sorry, we can\u2019t find the page you\u2019ve requested.", "errorType": "message", "url": "https://letterboxd.com/{}", "urlMain": "https://letterboxd.com/", "username_claimed": "blue" }, "LibraryThing": { "errorMsg": "

Error: This user doesn't exist

", "errorType": "message", "headers": { "Cookie": "LTAnonSessionID=3159599315; LTUnifiedCookie=%7B%22areyouhuman%22%3A1%7D; " }, "url": "https://www.librarything.com/profile/{}", "urlMain": "https://www.librarything.com/", "username_claimed": "blue" }, "Lichess": { "errorType": "status_code", "url": "https://lichess.org/@/{}", "urlMain": "https://lichess.org", "username_claimed": "john" }, "LinkedIn": { "errorType": "status_code", "headers": { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36", "Accept-Language": "en-US,en;q=0.9", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8" }, "regexCheck": "^[a-zA-Z0-9]{3,100}$", "request_method": "GET", "url": "https://linkedin.com/in/{}", "urlMain": "https://linkedin.com", "username_claimed": "paulpfeister" }, "Linktree": { "errorMsg": "\"statusCode\":404", "errorType": "message", "regexCheck": "^[\\w\\.]{2,30}$", "url": "https://linktr.ee/{}", "urlMain": "https://linktr.ee/", "username_claimed": "anne" }, "LinuxFR.org": { "errorType": "status_code", "url": "https://linuxfr.org/users/{}", "urlMain": "https://linuxfr.org/", "username_claimed": "pylapp" }, "Listed": { "errorType": "response_url", "errorUrl": "https://listed.to/@{}", "url": "https://listed.to/@{}", "urlMain": "https://listed.to/", "username_claimed": "listed" }, "LiveJournal": { "errorType": "status_code", "regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$", "url": "https://{}.livejournal.com", "urlMain": "https://www.livejournal.com/", "username_claimed": "blue" }, "Lobsters": { "errorType": "status_code", "regexCheck": "[A-Za-z0-9][A-Za-z0-9_-]{0,24}", "url": "https://lobste.rs/u/{}", "urlMain": "https://lobste.rs/", "username_claimed": "jcs" }, "LottieFiles": { "errorType": "status_code", "url": "https://lottiefiles.com/{}", "urlMain": "https://lottiefiles.com/", "username_claimed": "lottiefiles" }, "LushStories": { "errorType": "status_code", "isNSFW": true, "url": "https://www.lushstories.com/profile/{}", "urlMain": "https://www.lushstories.com/", "username_claimed": "chris_brown" }, "MMORPG Forum": { "errorType": "status_code", "url": "https://forums.mmorpg.com/profile/{}", "urlMain": "https://forums.mmorpg.com/", "username_claimed": "goku" }, "Mamot": { "errorType": "status_code", "regexCheck": "^[a-zA-Z0-9_]{1,30}$", "url": "https://mamot.fr/@{}", "urlMain": "https://mamot.fr/", "username_claimed": "anciensEnssat" }, "Medium": { "errorMsg": "Nitro Type | Competitive Typing Game | Race Your Friends", "errorType": "message", "url": "https://www.nitrotype.com/racer/{}", "urlMain": "https://www.nitrotype.com/", "username_claimed": "jianclash" }, "NotABug.org": { "errorType": "status_code", "url": "https://notabug.org/{}", "urlMain": "https://notabug.org/", "urlProbe": "https://notabug.org/{}/followers", "username_claimed": "red" }, "Nothing Community": { "errorType": "status_code", "url": "https://nothing.community/u/{}", "urlMain": "https://nothing.community/", "username_claimed": "Carl" }, "Nyaa.si": { "errorType": "status_code", "url": "https://nyaa.si/user/{}", "urlMain": "https://nyaa.si/", "username_claimed": "blue" }, "ObservableHQ": { "errorType": "message", "errorMsg": "Page not found", "url": "https://observablehq.com/@{}", "urlMain": "https://observablehq.com/", "username_claimed": "mbostock" }, "Open Collective": { "errorType": "status_code", "url": "https://opencollective.com/{}", "urlMain": "https://opencollective.com/", "username_claimed": "sindresorhus" }, "OpenGameArt": { "errorType": "status_code", "url": "https://opengameart.org/users/{}", "urlMain": "https://opengameart.org", "username_claimed": "ski" }, "OpenStreetMap": { "errorType": "status_code", "regexCheck": "^[^.]*?$", "url": "https://www.openstreetmap.org/user/{}", "urlMain": "https://www.openstreetmap.org/", "username_claimed": "blue" }, "Odysee": { "errorMsg": "", "errorType": "message", "url": "https://odysee.com/@{}", "urlMain": "https://odysee.com/", "username_claimed": "Odysee" }, "Opensource": { "errorType": "status_code", "url": "https://opensource.com/users/{}", "urlMain": "https://opensource.com/", "username_claimed": "red" }, "OurDJTalk": { "errorMsg": "The specified member cannot be found", "errorType": "message", "url": "https://ourdjtalk.com/members?username={}", "urlMain": "https://ourdjtalk.com/", "username_claimed": "steve" }, "Outgress": { "errorMsg": "Outgress - Error", "errorType": "message", "url": "https://outgress.com/agents/{}", "urlMain": "https://outgress.com/", "username_claimed": "pylapp" }, "PCGamer": { "errorMsg": "The specified member cannot be found. Please enter a member's entire name.", "errorType": "message", "url": "https://forums.pcgamer.com/members/?username={}", "urlMain": "https://pcgamer.com", "username_claimed": "admin" }, "PSNProfiles.com": { "errorType": "response_url", "errorUrl": "https://psnprofiles.com/?psnId={}", "url": "https://psnprofiles.com/{}", "urlMain": "https://psnprofiles.com/", "username_claimed": "blue" }, "Packagist": { "errorType": "response_url", "errorUrl": "https://packagist.org/search/?q={}&reason=vendor_not_found", "url": "https://packagist.org/packages/{}/", "urlMain": "https://packagist.org/", "username_claimed": "psr" }, "Pastebin": { "errorMsg": "Not Found (#404)", "errorType": "message", "url": "https://pastebin.com/u/{}", "urlMain": "https://pastebin.com/", "username_claimed": "blue" }, "Patched": { "errorMsg": "The member you specified is either invalid or doesn't exist.", "errorType": "message", "url": "https://patched.sh/User/{}", "urlMain": "https://patched.sh/", "username_claimed": "blue" }, "Patreon": { "errorType": "status_code", "url": "https://www.patreon.com/{}", "urlMain": "https://www.patreon.com/", "username_claimed": "blue" }, "PentesterLab": { "errorType": "status_code", "regexCheck": "^[\\w]{4,30}$", "url": "https://pentesterlab.com/profile/{}", "urlMain": "https://pentesterlab.com/", "username_claimed": "0day" }, "HotUKdeals": { "errorType": "status_code", "url": "https://www.hotukdeals.com/profile/{}", "urlMain": "https://www.hotukdeals.com/", "username_claimed": "Blue", "request_method": "GET" }, "Mydealz": { "errorType": "status_code", "url": "https://www.mydealz.de/profile/{}", "urlMain": "https://www.mydealz.de/", "username_claimed": "blue", "request_method": "GET" }, "Chollometro": { "errorType": "status_code", "url": "https://www.chollometro.com/profile/{}", "urlMain": "https://www.chollometro.com/", "username_claimed": "blue", "request_method": "GET" }, "PepperNL": { "errorType": "status_code", "url": "https://nl.pepper.com/profile/{}", "urlMain": "https://nl.pepper.com/", "username_claimed": "Dynaw", "request_method": "GET" }, "PepperPL": { "errorType": "status_code", "url": "https://www.pepper.pl/profile/{}", "urlMain": "https://www.pepper.pl/", "username_claimed": "FireChicken", "request_method": "GET" }, "Preisjaeger": { "errorType": "status_code", "url": "https://www.preisjaeger.at/profile/{}", "urlMain": "https://www.preisjaeger.at/", "username_claimed": "Stefan", "request_method": "GET" }, "Pepperdeals": { "errorType": "status_code", "url": "https://www.pepperdeals.se/profile/{}", "urlMain": "https://www.pepperdeals.se/", "username_claimed": "Mark", "request_method": "GET" }, "PepperealsUS": { "errorType": "status_code", "url": "https://www.pepperdeals.com/profile/{}", "urlMain": "https://www.pepperdeals.com/", "username_claimed": "Stepan", "request_method": "GET" }, "Promodescuentos": { "errorType": "status_code", "url": "https://www.promodescuentos.com/profile/{}", "urlMain": "https://www.promodescuentos.com/", "username_claimed": "blue", "request_method": "GET" }, "Periscope": { "errorType": "status_code", "url": "https://www.periscope.tv/{}/", "urlMain": "https://www.periscope.tv/", "username_claimed": "blue" }, "Pinkbike": { "errorType": "status_code", "regexCheck": "^[^.]*?$", "url": "https://www.pinkbike.com/u/{}/", "urlMain": "https://www.pinkbike.com/", "username_claimed": "blue" }, "pixelfed.social": { "errorType": "status_code", "url": "https://pixelfed.social/{}/", "urlMain": "https://pixelfed.social", "username_claimed": "pylapp" }, "PlayStore": { "errorType": "status_code", "url": "https://play.google.com/store/apps/developer?id={}", "urlMain": "https://play.google.com/store", "username_claimed": "Facebook" }, "Playstrategy": { "errorType": "status_code", "url": "https://playstrategy.org/@/{}", "urlMain": "https://playstrategy.org", "username_claimed": "oruro" }, "Plurk": { "errorMsg": "User Not Found!", "errorType": "message", "url": "https://www.plurk.com/{}", "urlMain": "https://www.plurk.com/", "username_claimed": "plurkoffice" }, "PocketStars": { "errorMsg": "Join Your Favorite Adult Stars", "errorType": "message", "isNSFW": true, "url": "https://pocketstars.com/{}", "urlMain": "https://pocketstars.com/", "username_claimed": "hacker" }, "Pokemon Showdown": { "errorType": "status_code", "url": "https://pokemonshowdown.com/users/{}", "urlMain": "https://pokemonshowdown.com", "username_claimed": "blue" }, "Polarsteps": { "errorType": "status_code", "url": "https://polarsteps.com/{}", "urlMain": "https://polarsteps.com/", "urlProbe": "https://api.polarsteps.com/users/byusername/{}", "username_claimed": "james" }, "Polygon": { "errorType": "status_code", "url": "https://www.polygon.com/users/{}", "urlMain": "https://www.polygon.com/", "username_claimed": "swiftstickler" }, "Polymart": { "errorType": "response_url", "errorUrl": "https://polymart.org/user/-1", "url": "https://polymart.org/user/{}", "urlMain": "https://polymart.org/", "username_claimed": "craciu25yt" }, "Pornhub": { "errorType": "status_code", "isNSFW": true, "url": "https://pornhub.com/users/{}", "urlMain": "https://pornhub.com/", "username_claimed": "blue" }, "ProductHunt": { "errorType": "status_code", "url": "https://www.producthunt.com/@{}", "urlMain": "https://www.producthunt.com/", "username_claimed": "jenny" }, "programming.dev": { "errorMsg": "Error!", "errorType": "message", "url": "https://programming.dev/u/{}", "urlMain": "https://programming.dev", "username_claimed": "pylapp" }, "Pychess": { "errorType": "message", "errorMsg": "404", "url": "https://www.pychess.org/@/{}", "urlMain": "https://www.pychess.org", "username_claimed": "gbtami" }, "PromoDJ": { "errorType": "status_code", "url": "http://promodj.com/{}", "urlMain": "http://promodj.com/", "username_claimed": "blue" }, "Pronouns.page": { "errorType": "status_code", "url": "https://pronouns.page/@{}", "urlMain": "https://pronouns.page/", "username_claimed": "andrea" }, "PyPi": { "errorType": "status_code", "url": "https://pypi.org/user/{}", "urlProbe": "https://pypi.org/_includes/administer-user-include/{}", "urlMain": "https://pypi.org", "username_claimed": "Blue" }, "Python.org Discussions": { "errorMsg": "Oops! That page doesn’t exist or is private.", "errorType": "message", "url": "https://discuss.python.org/u/{}/summary", "urlMain": "https://discuss.python.org", "username_claimed": "pablogsal" }, "Rajce.net": { "errorType": "status_code", "regexCheck": "^[\\w@-]+?$", "url": "https://{}.rajce.idnes.cz/", "urlMain": "https://www.rajce.idnes.cz/", "username_claimed": "blue" }, "Rarible": { "errorType": "status_code", "url": "https://rarible.com/marketplace/api/v4/urls/{}", "urlMain": "https://rarible.com/", "username_claimed": "blue" }, "Rate Your Music": { "errorType": "status_code", "url": "https://rateyourmusic.com/~{}", "urlMain": "https://rateyourmusic.com/", "username_claimed": "blue" }, "Rclone Forum": { "errorType": "status_code", "url": "https://forum.rclone.org/u/{}", "urlMain": "https://forum.rclone.org/", "username_claimed": "ncw" }, "RedTube": { "errorType": "status_code", "isNSFW": true, "url": "https://www.redtube.com/users/{}", "urlMain": "https://www.redtube.com/", "username_claimed": "hacker" }, "Redbubble": { "errorType": "status_code", "url": "https://www.redbubble.com/people/{}", "urlMain": "https://www.redbubble.com/", "username_claimed": "blue" }, "Reddit": { "errorMsg": "Sorry, nobody on Reddit goes by that name.", "errorType": "message", "headers": { "accept-language": "en-US,en;q=0.9" }, "url": "https://www.reddit.com/user/{}", "urlMain": "https://www.reddit.com/", "username_claimed": "blue" }, "Realmeye": { "errorMsg": "Sorry, but we either:", "errorType": "message", "url": "https://www.realmeye.com/player/{}", "urlMain": "https://www.realmeye.com/", "username_claimed": "rotmg" }, "Reisefrage": { "errorType": "status_code", "url": "https://www.reisefrage.net/nutzer/{}", "urlMain": "https://www.reisefrage.net/", "username_claimed": "reisefrage" }, "Replit.com": { "errorType": "status_code", "url": "https://replit.com/@{}", "urlMain": "https://replit.com/", "username_claimed": "blue" }, "ResearchGate": { "errorType": "response_url", "errorUrl": "https://www.researchgate.net/directory/profiles", "regexCheck": "\\w+_\\w+", "url": "https://www.researchgate.net/profile/{}", "urlMain": "https://www.researchgate.net/", "username_claimed": "John_Smith" }, "ReverbNation": { "errorMsg": "Sorry, we couldn't find that page", "errorType": "message", "url": "https://www.reverbnation.com/{}", "urlMain": "https://www.reverbnation.com/", "username_claimed": "blue" }, "Roblox": { "errorType": "status_code", "url": "https://www.roblox.com/user.aspx?username={}", "urlMain": "https://www.roblox.com/", "username_claimed": "bluewolfekiller" }, "RocketTube": { "errorMsg": "OOPS! Houston, we have a problem", "errorType": "message", "isNSFW": true, "url": "https://www.rockettube.com/{}", "urlMain": "https://www.rockettube.com/", "username_claimed": "Tatteddick5600" }, "RoyalCams": { "errorType": "status_code", "url": "https://royalcams.com/profile/{}", "urlMain": "https://royalcams.com", "username_claimed": "asuna-black" }, "Ruby Forums": { "errorMsg": "Oops! That page doesn’t exist or is private.", "errorType": "message", "url": "https://ruby-forum.com/u/{}/summary", "urlMain": "https://ruby-forums.com", "username_claimed": "rishard" }, "RubyGems": { "errorType": "status_code", "regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]{1,40}", "url": "https://rubygems.org/profiles/{}", "urlMain": "https://rubygems.org/", "username_claimed": "blue" }, "Rumble": { "errorType": "status_code", "url": "https://rumble.com/user/{}", "urlMain": "https://rumble.com/", "username_claimed": "John" }, "RuneScape": { "errorMsg": "{\"error\":\"NO_PROFILE\",\"loggedIn\":\"false\"}", "errorType": "message", "regexCheck": "^(?! )[\\w -]{1,12}(?Page no longer exists", "url": "https://slideshare.net/{}", "urlMain": "https://slideshare.net/", "username_claimed": "blue" }, "Slides": { "errorCode": 204, "errorType": "status_code", "url": "https://slides.com/{}", "urlMain": "https://slides.com/", "username_claimed": "blue" }, "SmugMug": { "errorType": "status_code", "regexCheck": "^[a-zA-Z]{1,35}$", "url": "https://{}.smugmug.com", "urlMain": "https://smugmug.com", "username_claimed": "winchester" }, "Smule": { "errorMsg": "Smule | Page Not Found (404)", "errorType": "message", "url": "https://www.smule.com/{}", "urlMain": "https://www.smule.com/", "username_claimed": "blue" }, "Snapchat": { "errorType": "status_code", "regexCheck": "^[a-z][a-z-_.]{3,15}", "request_method": "GET", "url": "https://www.snapchat.com/add/{}", "urlMain": "https://www.snapchat.com", "username_claimed": "teamsnapchat" }, "SOOP": { "errorType": "status_code", "url": "https://www.sooplive.co.kr/station/{}", "urlMain": "https://www.sooplive.co.kr/", "urlProbe": "https://api-channel.sooplive.co.kr/v1.1/channel/{}/station", "username_claimed": "udkn" }, "SoundCloud": { "errorType": "status_code", "url": "https://soundcloud.com/{}", "urlMain": "https://soundcloud.com/", "username_claimed": "blue" }, "SourceForge": { "errorType": "status_code", "url": "https://sourceforge.net/u/{}", "urlMain": "https://sourceforge.net/", "username_claimed": "blue" }, "SoylentNews": { "errorMsg": "The user you requested does not exist, no matter how much you wish this might be the case.", "errorType": "message", "url": "https://soylentnews.org/~{}", "urlMain": "https://soylentnews.org", "username_claimed": "adam" }, "SpeakerDeck": { "errorType": "status_code", "url": "https://speakerdeck.com/{}", "urlMain": "https://speakerdeck.com/", "username_claimed": "pylapp" }, "Speedrun.com": { "errorType": "status_code", "url": "https://speedrun.com/users/{}", "urlMain": "https://speedrun.com/", "username_claimed": "example" }, "Spells8": { "errorType": "status_code", "url": "https://forum.spells8.com/u/{}", "urlMain": "https://spells8.com", "username_claimed": "susurrus" }, "Splice": { "errorType": "status_code", "url": "https://splice.com/{}", "urlMain": "https://splice.com/", "username_claimed": "splice" }, "Splits.io": { "errorType": "status_code", "regexCheck": "^[^.]*?$", "url": "https://splits.io/users/{}", "urlMain": "https://splits.io", "username_claimed": "cambosteve" }, "Sporcle": { "errorType": "status_code", "url": "https://www.sporcle.com/user/{}/people", "urlMain": "https://www.sporcle.com/", "username_claimed": "blue" }, "Sportlerfrage": { "errorType": "status_code", "url": "https://www.sportlerfrage.net/nutzer/{}", "urlMain": "https://www.sportlerfrage.net/", "username_claimed": "sportlerfrage" }, "SportsRU": { "errorType": "status_code", "url": "https://www.sports.ru/profile/{}/", "urlMain": "https://www.sports.ru/", "username_claimed": "blue" }, "Spotify": { "errorType": "status_code", "url": "https://open.spotify.com/user/{}", "urlMain": "https://open.spotify.com/", "username_claimed": "blue" }, "Star Citizen": { "errorMsg": "404", "errorType": "message", "url": "https://robertsspaceindustries.com/citizens/{}", "urlMain": "https://robertsspaceindustries.com/", "username_claimed": "blue" }, "Status Cafe": { "errorMsg": "Page Not Found", "errorType": "message", "url": "https://status.cafe/users/{}", "urlMain": "https://status.cafe/", "username_claimed": "blue" }, "Steam Community (Group)": { "errorMsg": "No group could be retrieved for the given URL", "errorType": "message", "url": "https://steamcommunity.com/groups/{}", "urlMain": "https://steamcommunity.com/", "username_claimed": "blue" }, "Steam Community (User)": { "errorMsg": "The specified profile could not be found", "errorType": "message", "url": "https://steamcommunity.com/id/{}/", "urlMain": "https://steamcommunity.com/", "username_claimed": "blue" }, "Strava": { "errorType": "status_code", "regexCheck": "^[^.]*?$", "url": "https://www.strava.com/athletes/{}", "urlMain": "https://www.strava.com/", "username_claimed": "blue" }, "SublimeForum": { "errorType": "status_code", "url": "https://forum.sublimetext.com/u/{}", "urlMain": "https://forum.sublimetext.com/", "username_claimed": "blue" }, "TETR.IO": { "errorMsg": "No such user!", "errorType": "message", "url": "https://ch.tetr.io/u/{}", "urlMain": "https://tetr.io", "urlProbe": "https://ch.tetr.io/api/users/{}", "username_claimed": "osk" }, "TheMovieDB": { "errorType": "status_code", "url": "https://www.themoviedb.org/u/{}", "urlMain": "https://www.themoviedb.org/", "username_claimed": "blue" }, "TikTok": { "url": "https://www.tiktok.com/@{}", "urlMain": "https://www.tiktok.com", "errorType": "message", "errorMsg": [ "\"statusCode\":10221", "Govt. of India decided to block 59 apps" ], "username_claimed": "charlidamelio" }, "Tiendanube": { "url": "https://{}.mitiendanube.com/", "urlMain": "https://www.tiendanube.com/", "errorType": "status_code", "username_claimed": "blue" }, "Topcoder": { "errorType": "status_code", "url": "https://profiles.topcoder.com/{}/", "urlMain": "https://topcoder.com/", "username_claimed": "USER", "urlProbe": "https://api.topcoder.com/v5/members/{}", "regexCheck": "^[a-zA-Z0-9_.]+$" }, "Topmate": { "errorType": "status_code", "url": "https://topmate.io/{}", "urlMain": "https://topmate.io/", "username_claimed": "blue" }, "TRAKTRAIN": { "errorType": "status_code", "url": "https://traktrain.com/{}", "urlMain": "https://traktrain.com/", "username_claimed": "traktrain" }, "Telegram": { "errorMsg": [ "Telegram Messenger", "If you have Telegram, you can contact User ", "429 Too Many Requests" ], "errorType": "message", "regexCheck": "^[a-zA-Z0-9_]{1,15}$", "url": "https://x.com/{}", "urlMain": "https://x.com/", "urlProbe": "https://nitter.privacydev.net/{}", "username_claimed": "blue" }, "Typeracer": { "errorMsg": "Profile Not Found", "errorType": "message", "url": "https://data.typeracer.com/pit/profile?user={}", "urlMain": "https://typeracer.com", "username_claimed": "blue" }, "Ultimate-Guitar": { "errorType": "status_code", "url": "https://ultimate-guitar.com/u/{}", "urlMain": "https://ultimate-guitar.com/", "username_claimed": "blue" }, "Unsplash": { "errorType": "status_code", "regexCheck": "^[a-z0-9_]{1,60}$", "url": "https://unsplash.com/@{}", "urlMain": "https://unsplash.com/", "username_claimed": "jenny" }, "Untappd": { "errorType": "status_code", "url": "https://untappd.com/user/{}", "urlMain": "https://untappd.com/", "username_claimed": "untappd" }, "Valorant Forums": { "errorMsg": "The page you requested could not be found.", "errorType": "message", "url": "https://valorantforums.com/u/{}", "urlMain": "https://valorantforums.com", "username_claimed": "Wolves" }, "VK": { "errorType": "response_url", "errorUrl": "https://www.quora.com/profile/{}", "url": "https://vk.com/{}", "urlMain": "https://vk.com/", "username_claimed": "brown" }, "VSCO": { "errorType": "status_code", "url": "https://vsco.co/{}", "urlMain": "https://vsco.co/", "username_claimed": "blue" }, "Velog": { "errorType": "status_code", "url": "https://velog.io/@{}/posts", "urlMain": "https://velog.io/", "username_claimed": "qlgks1" }, "Velomania": { "errorMsg": "\u041f\u043e\u043b\u044c\u0437\u043e\u0432\u0430\u0442\u0435\u043b\u044c \u043d\u0435 \u0437\u0430\u0440\u0435\u0433\u0438\u0441\u0442\u0440\u0438\u0440\u043e\u0432\u0430\u043d \u0438 \u043d\u0435 \u0438\u043c\u0435\u0435\u0442 \u043f\u0440\u043e\u0444\u0438\u043b\u044f \u0434\u043b\u044f \u043f\u0440\u043e\u0441\u043c\u043e\u0442\u0440\u0430.", "errorType": "message", "url": "https://forum.velomania.ru/member.php?username={}", "urlMain": "https://forum.velomania.ru/", "username_claimed": "red" }, "Venmo": { "errorMsg": ["Venmo | Page Not Found"], "errorType": "message", "headers": { "Host": "account.venmo.com" }, "url": "https://account.venmo.com/u/{}", "urlMain": "https://venmo.com/", "urlProbe": "https://test1.venmo.com/u/{}", "username_claimed": "jenny" }, "Vero": { "errorMsg": "Not Found", "errorType": "message", "request_method": "GET", "url": "https://vero.co/{}", "urlMain": "https://vero.co/", "username_claimed": "blue" }, "Vimeo": { "errorType": "status_code", "url": "https://vimeo.com/{}", "urlMain": "https://vimeo.com/", "username_claimed": "blue" }, "VirusTotal": { "errorType": "status_code", "request_method": "GET", "url": "https://www.virustotal.com/gui/user/{}", "urlMain": "https://www.virustotal.com/", "urlProbe": "https://www.virustotal.com/ui/users/{}/avatar", "username_claimed": "blue" }, "VLR": { "errorType": "status_code", "url": "https://www.vlr.gg/user/{}", "urlMain": "https://www.vlr.gg", "username_claimed": "optms" }, "WICG Forum": { "errorType": "status_code", "regexCheck": "^(?![.-])[a-zA-Z0-9_.-]{3,20}$", "url": "https://discourse.wicg.io/u/{}/summary", "urlMain": "https://discourse.wicg.io/", "username_claimed": "stefano" }, "Wakatime": { "errorType": "status_code", "url": "https://wakatime.com/@{}", "urlMain": "https://wakatime.com/", "username_claimed": "blue" }, "Warrior Forum": { "errorType": "status_code", "url": "https://www.warriorforum.com/members/{}.html", "urlMain": "https://www.warriorforum.com/", "username_claimed": "blue" }, "Wattpad": { "errorType": "status_code", "url": "https://www.wattpad.com/user/{}", "urlMain": "https://www.wattpad.com/", "urlProbe": "https://www.wattpad.com/api/v3/users/{}/", "username_claimed": "Dogstho7951" }, "WebNode": { "errorType": "status_code", "regexCheck": "^[\\w@-]+?$", "url": "https://{}.webnode.cz/", "urlMain": "https://www.webnode.cz/", "username_claimed": "radkabalcarova" }, "Weblate": { "errorType": "status_code", "regexCheck": "^[a-zA-Z0-9@._-]{1,150}$", "url": "https://hosted.weblate.org/user/{}/", "urlMain": "https://hosted.weblate.org/", "username_claimed": "adam" }, "Weebly": { "errorType": "status_code", "regexCheck": "^[a-zA-Z0-9-]{1,63}$", "url": "https://{}.weebly.com/", "urlMain": "https://weebly.com/", "username_claimed": "blue" }, "Wikidot": { "errorMsg": "User does not exist.", "errorType": "message", "url": "http://www.wikidot.com/user:info/{}", "urlMain": "http://www.wikidot.com/", "username_claimed": "blue" }, "Wikipedia": { "errorMsg": "centralauth-admin-nonexistent:", "errorType": "message", "url": "https://en.wikipedia.org/wiki/Special:CentralAuth/{}?uselang=qqx", "urlMain": "https://www.wikipedia.org/", "username_claimed": "Hoadlck" }, "Windy": { "errorType": "status_code", "url": "https://community.windy.com/user/{}", "urlMain": "https://windy.com/", "username_claimed": "blue" }, "Wix": { "errorType": "status_code", "regexCheck": "^[\\w@-]+?$", "url": "https://{}.wix.com", "urlMain": "https://wix.com/", "username_claimed": "support" }, "WolframalphaForum": { "errorType": "status_code", "url": "https://community.wolfram.com/web/{}/home", "urlMain": "https://community.wolfram.com/", "username_claimed": "unico" }, "WordPress": { "errorType": "response_url", "errorUrl": "wordpress.com/typo/?subdomain=", "regexCheck": "^[a-zA-Z][a-zA-Z0-9_-]*$", "url": "https://{}.wordpress.com/", "urlMain": "https://wordpress.com", "username_claimed": "blue" }, "WordPressOrg": { "errorType": "response_url", "errorUrl": "https://wordpress.org", "url": "https://profiles.wordpress.org/{}/", "urlMain": "https://wordpress.org/", "username_claimed": "blue" }, "Wordnik": { "errorMsg": "Page Not Found", "errorType": "message", "regexCheck": "^[a-zA-Z0-9_.+-]{1,40}$", "url": "https://www.wordnik.com/users/{}", "urlMain": "https://www.wordnik.com/", "username_claimed": "blue" }, "Wykop": { "errorType": "status_code", "url": "https://www.wykop.pl/ludzie/{}", "urlMain": "https://www.wykop.pl", "username_claimed": "blue" }, "Xbox Gamertag": { "errorType": "status_code", "url": "https://xboxgamertag.com/search/{}", "urlMain": "https://xboxgamertag.com/", "username_claimed": "red" }, "Xvideos": { "errorType": "status_code", "isNSFW": true, "url": "https://xvideos.com/profiles/{}", "urlMain": "https://xvideos.com/", "username_claimed": "blue" }, "YandexMusic": { "__comment__": "The first and third errorMsg relate to geo-restrictions and bot detection/captchas.", "errorMsg": [ "\u041e\u0448\u0438\u0431\u043a\u0430 404", "Threads • Log in", "errorType": "message", "headers": { "Sec-Fetch-Mode": "navigate" }, "url": "https://www.threads.net/@{}", "urlMain": "https://www.threads.net/", "username_claimed": "zuck" }, "toster": { "errorType": "status_code", "url": "https://www.toster.ru/user/{}/answers", "urlMain": "https://www.toster.ru/", "username_claimed": "adam" }, "tumblr": { "errorType": "status_code", "url": "https://{}.tumblr.com/", "urlMain": "https://www.tumblr.com/", "username_claimed": "goku" }, "uid": { "errorType": "status_code", "url": "http://uid.me/{}", "urlMain": "https://uid.me/", "username_claimed": "blue" }, "write.as": { "errorType": "status_code", "url": "https://write.as/{}", "urlMain": "https://write.as", "username_claimed": "pylapp" }, "xHamster": { "errorType": "status_code", "isNSFW": true, "url": "https://xhamster.com/users/{}", "urlMain": "https://xhamster.com", "urlProbe": "https://xhamster.com/users/{}?old_browser=true", "username_claimed": "blue" }, "znanylekarz.pl": { "errorType": "status_code", "url": "https://www.znanylekarz.pl/{}", "urlMain": "https://znanylekarz.pl", "username_claimed": "janusz-nowak" }, "Platzi": { "errorType": "status_code", "errorCode": 404, "url": "https://platzi.com/p/{}/", "urlMain": "https://platzi.com/", "username_claimed": "freddier", "request_method": "GET" }, "BabyRu": { "url": "https://www.baby.ru/u/{}", "urlMain": "https://www.baby.ru/", "errorType": "message", "errorMsg": [ "\u0421\u0442\u0440\u0430\u043d\u0438\u0446\u0430, \u043a\u043e\u0442\u043e\u0440\u0443\u044e \u0432\u044b \u0438\u0441\u043a\u0430\u043b\u0438, \u043d\u0435 \u043d\u0430\u0439\u0434\u0435\u043d\u0430", "\u0414\u043e\u0441\u0442\u0443\u043f \u0441 \u0432\u0430\u0448\u0435\u0433\u043e IP-\u0430\u0434\u0440\u0435\u0441\u0430 \u0432\u0440\u0435\u043c\u0435\u043d\u043d\u043e \u043e\u0433\u0440\u0430\u043d\u0438\u0447\u0435\u043d" ], "username_claimed": "example" }, "Wowhead": { "url": "https://wowhead.com/user={}", "urlMain": "https://wowhead.com/", "errorType": "status_code", "errorCode": 404, "username_claimed": "blue" }, "addons.wago.io": { "url": "https://addons.wago.io/user/{}", "urlMain": "https://addons.wago.io/", "errorType": "status_code", "errorCode": 404, "username_claimed": "blue" }, "CurseForge": { "url": "https://www.curseforge.com/members/{}/projects", "urlMain": "https://www.curseforge.com.", "errorType": "status_code", "errorCode": 404, "username_claimed": "blue" } } ================================================ FILE: sherlock_project/resources/data.schema.json ================================================ { "$schema": "https://json-schema.org/draft/2020-12/schema", "title": "Sherlock Target Manifest", "description": "Social media targets to probe for the existence of known usernames", "type": "object", "properties": { "$schema": { "type": "string" } }, "patternProperties": { "^(?!\\$).*?$": { "type": "object", "description": "Target name and associated information (key should be human readable name)", "required": ["url", "urlMain", "errorType", "username_claimed"], "properties": { "url": { "type": "string" }, "urlMain": { "type": "string" }, "urlProbe": { "type": "string" }, "username_claimed": { "type": "string" }, "regexCheck": { "type": "string" }, "isNSFW": { "type": "boolean" }, "headers": { "type": "object" }, "request_payload": { "type": "object" }, "__comment__": { "type": "string", "description": "Used to clarify important target information if (and only if) a commit message would not suffice.\nThis key should not be parsed anywhere within Sherlock." }, "tags": { "oneOf": [ { "$ref": "#/$defs/tag" }, { "type": "array", "items": { "$ref": "#/$defs/tag" } } ] }, "request_method": { "type": "string", "enum": ["GET", "POST", "HEAD", "PUT"] }, "errorType": { "oneOf": [ { "type": "string", "enum": ["message", "response_url", "status_code"] }, { "type": "array", "items": { "type": "string", "enum": ["message", "response_url", "status_code"] } } ] }, "errorMsg": { "oneOf": [ { "type": "string" }, { "type": "array", "items": { "type": "string" } } ] }, "errorCode": { "oneOf": [ { "type": "integer" }, { "type": "array", "items": { "type": "integer" } } ] }, "errorUrl": { "type": "string" }, "response_url": { "type": "string" } }, "dependencies": { "errorMsg": { "oneOf": [ { "properties": { "errorType": { "const": "message" } } }, { "properties": { "errorType": { "type": "array", "contains": { "const": "message" } } } } ] }, "errorUrl": { "oneOf": [ { "properties": { "errorType": { "const": "response_url" } } }, { "properties": { "errorType": { "type": "array", "contains": { "const": "response_url" } } } } ] }, "errorCode": { "oneOf": [ { "properties": { "errorType": { "const": "status_code" } } }, { "properties": { "errorType": { "type": "array", "contains": { "const": "status_code" } } } } ] } }, "allOf": [ { "if": { "anyOf": [ { "properties": { "errorType": { "const": "message" } } }, { "properties": { "errorType": { "type": "array", "contains": { "const": "message" } } } } ] }, "then": { "required": ["errorMsg"] } }, { "if": { "anyOf": [ { "properties": { "errorType": { "const": "response_url" } } }, { "properties": { "errorType": { "type": "array", "contains": { "const": "response_url" } } } } ] }, "then": { "required": ["errorUrl"] } } ], "additionalProperties": false } }, "additionalProperties": false, "$defs": { "tag": { "type": "string", "enum": ["adult", "gaming"] } } } ================================================ FILE: sherlock_project/result.py ================================================ """Sherlock Result Module This module defines various objects for recording the results of queries. """ from enum import Enum class QueryStatus(Enum): """Query Status Enumeration. Describes status of query about a given username. """ CLAIMED = "Claimed" # Username Detected AVAILABLE = "Available" # Username Not Detected UNKNOWN = "Unknown" # Error Occurred While Trying To Detect Username ILLEGAL = "Illegal" # Username Not Allowable For This Site WAF = "WAF" # Request blocked by WAF (i.e. Cloudflare) def __str__(self): """Convert Object To String. Keyword Arguments: self -- This object. Return Value: Nicely formatted string to get information about this object. """ return self.value class QueryResult(): """Query Result Object. Describes result of query about a given username. """ def __init__(self, username, site_name, site_url_user, status, query_time=None, context=None): """Create Query Result Object. Contains information about a specific method of detecting usernames on a given type of web sites. Keyword Arguments: self -- This object. username -- String indicating username that query result was about. site_name -- String which identifies site. site_url_user -- String containing URL for username on site. NOTE: The site may or may not exist: this just indicates what the name would be, if it existed. status -- Enumeration of type QueryStatus() indicating the status of the query. query_time -- Time (in seconds) required to perform query. Default of None. context -- String indicating any additional context about the query. For example, if there was an error, this might indicate the type of error that occurred. Default of None. Return Value: Nothing. """ self.username = username self.site_name = site_name self.site_url_user = site_url_user self.status = status self.query_time = query_time self.context = context return def __str__(self): """Convert Object To String. Keyword Arguments: self -- This object. Return Value: Nicely formatted string to get information about this object. """ status = str(self.status) if self.context is not None: # There is extra context information available about the results. # Append it to the normal response text. status += f" ({self.context})" return status ================================================ FILE: sherlock_project/sherlock.py ================================================ #! /usr/bin/env python3 """ Sherlock: Find Usernames Across Social Networks Module This module contains the main logic to search for usernames at social networks. """ import sys try: from sherlock_project.__init__ import import_error_test_var # noqa: F401 except ImportError: print("Did you run Sherlock with `python3 sherlock/sherlock.py ...`?") print("This is an outdated method. Please see https://sherlockproject.xyz/installation for up to date instructions.") sys.exit(1) import csv import signal import pandas as pd import os import re from argparse import ArgumentParser, RawDescriptionHelpFormatter from json import loads as json_loads from time import monotonic from typing import Optional import requests from requests_futures.sessions import FuturesSession from sherlock_project.__init__ import ( __longname__, __shortname__, __version__, forge_api_latest_release, ) from sherlock_project.result import QueryStatus from sherlock_project.result import QueryResult from sherlock_project.notify import QueryNotify from sherlock_project.notify import QueryNotifyPrint from sherlock_project.sites import SitesInformation from colorama import init from argparse import ArgumentTypeError class SherlockFuturesSession(FuturesSession): def request(self, method, url, hooks=None, *args, **kwargs): """Request URL. This extends the FuturesSession request method to calculate a response time metric to each request. It is taken (almost) directly from the following Stack Overflow answer: https://github.com/ross/requests-futures#working-in-the-background Keyword Arguments: self -- This object. method -- String containing method desired for request. url -- String containing URL for request. hooks -- Dictionary containing hooks to execute after request finishes. args -- Arguments. kwargs -- Keyword arguments. Return Value: Request object. """ # Record the start time for the request. if hooks is None: hooks = {} start = monotonic() def response_time(resp, *args, **kwargs): """Response Time Hook. Keyword Arguments: resp -- Response object. args -- Arguments. kwargs -- Keyword arguments. Return Value: Nothing. """ resp.elapsed = monotonic() - start return # Install hook to execute when response completes. # Make sure that the time measurement hook is first, so we will not # track any later hook's execution time. try: if isinstance(hooks["response"], list): hooks["response"].insert(0, response_time) elif isinstance(hooks["response"], tuple): # Convert tuple to list and insert time measurement hook first. hooks["response"] = list(hooks["response"]) hooks["response"].insert(0, response_time) else: # Must have previously contained a single hook function, # so convert to list. hooks["response"] = [response_time, hooks["response"]] except KeyError: # No response hook was already defined, so install it ourselves. hooks["response"] = [response_time] return super(SherlockFuturesSession, self).request( method, url, hooks=hooks, *args, **kwargs ) def get_response(request_future, error_type, social_network): # Default for Response object if some failure occurs. response = None error_context = "General Unknown Error" exception_text = None try: response = request_future.result() if response.status_code: # Status code exists in response object error_context = None except requests.exceptions.HTTPError as errh: error_context = "HTTP Error" exception_text = str(errh) except requests.exceptions.ProxyError as errp: error_context = "Proxy Error" exception_text = str(errp) except requests.exceptions.ConnectionError as errc: error_context = "Error Connecting" exception_text = str(errc) except requests.exceptions.Timeout as errt: error_context = "Timeout Error" exception_text = str(errt) except requests.exceptions.RequestException as err: error_context = "Unknown Error" exception_text = str(err) return response, error_context, exception_text def interpolate_string(input_object, username): if isinstance(input_object, str): return input_object.replace("{}", username) elif isinstance(input_object, dict): return {k: interpolate_string(v, username) for k, v in input_object.items()} elif isinstance(input_object, list): return [interpolate_string(i, username) for i in input_object] return input_object def check_for_parameter(username): """checks if {?} exists in the username if exist it means that sherlock is looking for more multiple username""" return "{?}" in username checksymbols = ["_", "-", "."] def multiple_usernames(username): """replace the parameter with with symbols and return a list of usernames""" allUsernames = [] for i in checksymbols: allUsernames.append(username.replace("{?}", i)) return allUsernames def sherlock( username: str, site_data: dict[str, dict[str, str]], query_notify: QueryNotify, dump_response: bool = False, proxy: Optional[str] = None, timeout: int = 60, ) -> dict[str, dict[str, str | QueryResult]]: """Run Sherlock Analysis. Checks for existence of username on various social media sites. Keyword Arguments: username -- String indicating username that report should be created against. site_data -- Dictionary containing all of the site data. query_notify -- Object with base type of QueryNotify(). This will be used to notify the caller about query results. proxy -- String indicating the proxy URL timeout -- Time in seconds to wait before timing out request. Default is 60 seconds. Return Value: Dictionary containing results from report. Key of dictionary is the name of the social network site, and the value is another dictionary with the following keys: url_main: URL of main site. url_user: URL of user on site (if account exists). status: QueryResult() object indicating results of test for account existence. http_status: HTTP status code of query which checked for existence on site. response_text: Text that came back from request. May be None if there was an HTTP error when checking for existence. """ # Notify caller that we are starting the query. query_notify.start(username) # Normal requests underlying_session = requests.session() # Limit number of workers to 20. # This is probably vastly overkill. if len(site_data) >= 20: max_workers = 20 else: max_workers = len(site_data) # Create multi-threaded session for all requests. session = SherlockFuturesSession( max_workers=max_workers, session=underlying_session ) # Results from analysis of all sites results_total = {} # First create futures for all requests. This allows for the requests to run in parallel for social_network, net_info in site_data.items(): # Results from analysis of this specific site results_site = {"url_main": net_info.get("urlMain")} # Record URL of main site # A user agent is needed because some sites don't return the correct # information since they think that we are bots (Which we actually are...) headers = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:129.0) Gecko/20100101 Firefox/129.0", } if "headers" in net_info: # Override/append any extra headers required by a given site. headers.update(net_info["headers"]) # URL of user on site (if it exists) url = interpolate_string(net_info["url"], username.replace(' ', '%20')) # Don't make request if username is invalid for the site regex_check = net_info.get("regexCheck") if regex_check and re.search(regex_check, username) is None: # No need to do the check at the site: this username is not allowed. results_site["status"] = QueryResult( username, social_network, url, QueryStatus.ILLEGAL ) results_site["url_user"] = "" results_site["http_status"] = "" results_site["response_text"] = "" query_notify.update(results_site["status"]) else: # URL of user on site (if it exists) results_site["url_user"] = url url_probe = net_info.get("urlProbe") request_method = net_info.get("request_method") request_payload = net_info.get("request_payload") request = None if request_method is not None: if request_method == "GET": request = session.get elif request_method == "HEAD": request = session.head elif request_method == "POST": request = session.post elif request_method == "PUT": request = session.put else: raise RuntimeError(f"Unsupported request_method for {url}") if request_payload is not None: request_payload = interpolate_string(request_payload, username) if url_probe is None: # Probe URL is normal one seen by people out on the web. url_probe = url else: # There is a special URL for probing existence separate # from where the user profile normally can be found. url_probe = interpolate_string(url_probe, username) if request is None: if net_info["errorType"] == "status_code": # In most cases when we are detecting by status code, # it is not necessary to get the entire body: we can # detect fine with just the HEAD response. request = session.head else: # Either this detect method needs the content associated # with the GET response, or this specific website will # not respond properly unless we request the whole page. request = session.get if net_info["errorType"] == "response_url": # Site forwards request to a different URL if username not # found. Disallow the redirect so we can capture the # http status from the original URL request. allow_redirects = False else: # Allow whatever redirect that the site wants to do. # The final result of the request will be what is available. allow_redirects = True # This future starts running the request in a new thread, doesn't block the main thread if proxy is not None: proxies = {"http": proxy, "https": proxy} future = request( url=url_probe, headers=headers, proxies=proxies, allow_redirects=allow_redirects, timeout=timeout, json=request_payload, ) else: future = request( url=url_probe, headers=headers, allow_redirects=allow_redirects, timeout=timeout, json=request_payload, ) # Store future in data for access later net_info["request_future"] = future # Add this site's results into final dictionary with all the other results. results_total[social_network] = results_site # Open the file containing account links for social_network, net_info in site_data.items(): # Retrieve results again results_site = results_total.get(social_network) # Retrieve other site information again url = results_site.get("url_user") status = results_site.get("status") if status is not None: # We have already determined the user doesn't exist here continue # Get the expected error type error_type = net_info["errorType"] if isinstance(error_type, str): error_type: list[str] = [error_type] # Retrieve future and ensure it has finished future = net_info["request_future"] r, error_text, exception_text = get_response( request_future=future, error_type=error_type, social_network=social_network ) # Get response time for response of our request. try: response_time = r.elapsed except AttributeError: response_time = None # Attempt to get request information try: http_status = r.status_code except Exception: http_status = "?" try: response_text = r.text.encode(r.encoding or "UTF-8") except Exception: response_text = "" query_status = QueryStatus.UNKNOWN error_context = None # As WAFs advance and evolve, they will occasionally block Sherlock and # lead to false positives and negatives. Fingerprints should be added # here to filter results that fail to bypass WAFs. Fingerprints should # be highly targetted. Comment at the end of each fingerprint to # indicate target and date fingerprinted. WAFHitMsgs = [ r'.loading-spinner{visibility:hidden}body.no-js .challenge-running{display:none}body.dark{background-color:#222;color:#d9d9d9}body.dark a{color:#fff}body.dark a:hover{color:#ee730a;text-decoration:underline}body.dark .lds-ring div{border-color:#999 transparent transparent}body.dark .font-red{color:#b20f03}body.dark', # 2024-05-13 Cloudflare r'', # 2024-11-11 Cloudflare error page r'AwsWafIntegration.forceRefreshToken', # 2024-11-11 Cloudfront (AWS) r'{return l.onPageView}}),Object.defineProperty(r,"perimeterxIdentifiers",{enumerable:' # 2024-04-09 PerimeterX / Human Security ] if error_text is not None: error_context = error_text elif any(hitMsg in r.text for hitMsg in WAFHitMsgs): query_status = QueryStatus.WAF else: if any(errtype not in ["message", "status_code", "response_url"] for errtype in error_type): error_context = f"Unknown error type '{error_type}' for {social_network}" query_status = QueryStatus.UNKNOWN else: if "message" in error_type: # error_flag True denotes no error found in the HTML # error_flag False denotes error found in the HTML error_flag = True errors = net_info.get("errorMsg") # errors will hold the error message # it can be string or list # by isinstance method we can detect that # and handle the case for strings as normal procedure # and if its list we can iterate the errors if isinstance(errors, str): # Checks if the error message is in the HTML # if error is present we will set flag to False if errors in r.text: error_flag = False else: # If it's list, it will iterate all the error message for error in errors: if error in r.text: error_flag = False break if error_flag: query_status = QueryStatus.CLAIMED else: query_status = QueryStatus.AVAILABLE if "status_code" in error_type and query_status is not QueryStatus.AVAILABLE: error_codes = net_info.get("errorCode") query_status = QueryStatus.CLAIMED # Type consistency, allowing for both singlets and lists in manifest if isinstance(error_codes, int): error_codes = [error_codes] if error_codes is not None and r.status_code in error_codes: query_status = QueryStatus.AVAILABLE elif r.status_code >= 300 or r.status_code < 200: query_status = QueryStatus.AVAILABLE if "response_url" in error_type and query_status is not QueryStatus.AVAILABLE: # For this detection method, we have turned off the redirect. # So, there is no need to check the response URL: it will always # match the request. Instead, we will ensure that the response # code indicates that the request was successful (i.e. no 404, or # forward to some odd redirect). if 200 <= r.status_code < 300: query_status = QueryStatus.CLAIMED else: query_status = QueryStatus.AVAILABLE if dump_response: print("+++++++++++++++++++++") print(f"TARGET NAME : {social_network}") print(f"USERNAME : {username}") print(f"TARGET URL : {url}") print(f"TEST METHOD : {error_type}") try: print(f"STATUS CODES : {net_info['errorCode']}") except KeyError: pass print("Results...") try: print(f"RESPONSE CODE : {r.status_code}") except Exception: pass try: print(f"ERROR TEXT : {net_info['errorMsg']}") except KeyError: pass print(">>>>> BEGIN RESPONSE TEXT") try: print(r.text) except Exception: pass print("<<<<< END RESPONSE TEXT") print("VERDICT : " + str(query_status)) print("+++++++++++++++++++++") # Notify caller about results of query. result: QueryResult = QueryResult( username=username, site_name=social_network, site_url_user=url, status=query_status, query_time=response_time, context=error_context, ) query_notify.update(result) # Save status of request results_site["status"] = result # Save results from request results_site["http_status"] = http_status results_site["response_text"] = response_text # Add this site's results into final dictionary with all of the other results. results_total[social_network] = results_site return results_total def timeout_check(value): """Check Timeout Argument. Checks timeout for validity. Keyword Arguments: value -- Time in seconds to wait before timing out request. Return Value: Floating point number representing the time (in seconds) that should be used for the timeout. NOTE: Will raise an exception if the timeout in invalid. """ float_value = float(value) if float_value <= 0: raise ArgumentTypeError( f"Invalid timeout value: {value}. Timeout must be a positive number." ) return float_value def handler(signal_received, frame): """Exit gracefully without throwing errors Source: https://www.devdungeon.com/content/python-catch-sigint-ctrl-c """ sys.exit(0) def main(): parser = ArgumentParser( formatter_class=RawDescriptionHelpFormatter, description=f"{__longname__} (Version {__version__})", ) parser.add_argument( "--version", action="version", version=f"{__shortname__} v{__version__}", help="Display version information and dependencies.", ) parser.add_argument( "--verbose", "-v", "-d", "--debug", action="store_true", dest="verbose", default=False, help="Display extra debugging information and metrics.", ) parser.add_argument( "--folderoutput", "-fo", dest="folderoutput", help="If using multiple usernames, the output of the results will be saved to this folder.", ) parser.add_argument( "--output", "-o", dest="output", help="If using single username, the output of the result will be saved to this file.", ) parser.add_argument( "--csv", action="store_true", dest="csv", default=False, help="Create Comma-Separated Values (CSV) File.", ) parser.add_argument( "--xlsx", action="store_true", dest="xlsx", default=False, help="Create the standard file for the modern Microsoft Excel spreadsheet (xlsx).", ) parser.add_argument( "--site", action="append", metavar="SITE_NAME", dest="site_list", default=[], help="Limit analysis to just the listed sites. Add multiple options to specify more than one site.", ) parser.add_argument( "--proxy", "-p", metavar="PROXY_URL", action="store", dest="proxy", default=None, help="Make requests over a proxy. e.g. socks5://127.0.0.1:1080", ) parser.add_argument( "--dump-response", action="store_true", dest="dump_response", default=False, help="Dump the HTTP response to stdout for targeted debugging.", ) parser.add_argument( "--json", "-j", metavar="JSON_FILE", dest="json_file", default=None, help="Load data from a JSON file or an online, valid, JSON file. Upstream PR numbers also accepted.", ) parser.add_argument( "--timeout", action="store", metavar="TIMEOUT", dest="timeout", type=timeout_check, default=60, help="Time (in seconds) to wait for response to requests (Default: 60)", ) parser.add_argument( "--print-all", action="store_true", dest="print_all", default=False, help="Output sites where the username was not found.", ) parser.add_argument( "--print-found", action="store_true", dest="print_found", default=True, help="Output sites where the username was found (also if exported as file).", ) parser.add_argument( "--no-color", action="store_true", dest="no_color", default=False, help="Don't color terminal output", ) parser.add_argument( "username", nargs="+", metavar="USERNAMES", action="store", help="One or more usernames to check with social networks. Check similar usernames using {?} (replace to '_', '-', '.').", ) parser.add_argument( "--browse", "-b", action="store_true", dest="browse", default=False, help="Browse to all results on default browser.", ) parser.add_argument( "--local", "-l", action="store_true", default=False, help="Force the use of the local data.json file.", ) parser.add_argument( "--nsfw", action="store_true", default=False, help="Include checking of NSFW sites from default list.", ) # TODO deprecated in favor of --txt, retained for workflow compatibility, to be removed # in future release parser.add_argument( "--no-txt", action="store_true", dest="no_txt", default=False, help="Disable creation of a txt file - WILL BE DEPRECATED", ) parser.add_argument( "--txt", action="store_true", dest="output_txt", default=False, help="Enable creation of a txt file", ) parser.add_argument( "--ignore-exclusions", action="store_true", dest="ignore_exclusions", default=False, help="Ignore upstream exclusions (may return more false positives)", ) args = parser.parse_args() # If the user presses CTRL-C, exit gracefully without throwing errors signal.signal(signal.SIGINT, handler) # Check for newer version of Sherlock. If it exists, let the user know about it try: latest_release_raw = requests.get(forge_api_latest_release, timeout=10).text latest_release_json = json_loads(latest_release_raw) latest_remote_tag = latest_release_json["tag_name"] if latest_remote_tag[1:] != __version__: print( f"Update available! {__version__} --> {latest_remote_tag[1:]}" f"\n{latest_release_json['html_url']}" ) except Exception as error: print(f"A problem occurred while checking for an update: {error}") # Make prompts if args.proxy is not None: print("Using the proxy: " + args.proxy) if args.no_color: # Disable color output. init(strip=True, convert=False) else: # Enable color output. init(autoreset=True) # Check if both output methods are entered as input. if args.output is not None and args.folderoutput is not None: print("You can only use one of the output methods.") sys.exit(1) # Check validity for single username output. if args.output is not None and len(args.username) != 1: print("You can only use --output with a single username") sys.exit(1) # Create object with all information about sites we are aware of. try: if args.local: sites = SitesInformation( os.path.join(os.path.dirname(__file__), "resources/data.json"), honor_exclusions=False, ) else: json_file_location = args.json_file if args.json_file: # If --json parameter is a number, interpret it as a pull request number if args.json_file.isnumeric(): pull_number = args.json_file pull_url = f"https://api.github.com/repos/sherlock-project/sherlock/pulls/{pull_number}" pull_request_raw = requests.get(pull_url, timeout=10).text pull_request_json = json_loads(pull_request_raw) # Check if it's a valid pull request if "message" in pull_request_json: print(f"ERROR: Pull request #{pull_number} not found.") sys.exit(1) head_commit_sha = pull_request_json["head"]["sha"] json_file_location = f"https://raw.githubusercontent.com/sherlock-project/sherlock/{head_commit_sha}/sherlock_project/resources/data.json" sites = SitesInformation( data_file_path=json_file_location, honor_exclusions=not args.ignore_exclusions, do_not_exclude=args.site_list, ) except Exception as error: print(f"ERROR: {error}") sys.exit(1) if not args.nsfw: sites.remove_nsfw_sites(do_not_remove=args.site_list) # Create original dictionary from SitesInformation() object. # Eventually, the rest of the code will be updated to use the new object # directly, but this will glue the two pieces together. site_data_all = {site.name: site.information for site in sites} if args.site_list == []: # Not desired to look at a sub-set of sites site_data = site_data_all else: # User desires to selectively run queries on a sub-set of the site list. # Make sure that the sites are supported & build up pruned site database. site_data = {} site_missing = [] for site in args.site_list: counter = 0 for existing_site in site_data_all: if site.lower() == existing_site.lower(): site_data[existing_site] = site_data_all[existing_site] counter += 1 if counter == 0: # Build up list of sites not supported for future error message. site_missing.append(f"'{site}'") if site_missing: print(f"Error: Desired sites not found: {', '.join(site_missing)}.") if not site_data: sys.exit(1) # Create notify object for query results. query_notify = QueryNotifyPrint( result=None, verbose=args.verbose, print_all=args.print_all, browse=args.browse ) # Run report on all specified users. all_usernames = [] for username in args.username: if check_for_parameter(username): for name in multiple_usernames(username): all_usernames.append(name) else: all_usernames.append(username) for username in all_usernames: results = sherlock( username, site_data, query_notify, dump_response=args.dump_response, proxy=args.proxy, timeout=args.timeout, ) if args.output: result_file = args.output elif args.folderoutput: # The usernames results should be stored in a targeted folder. # If the folder doesn't exist, create it first os.makedirs(args.folderoutput, exist_ok=True) result_file = os.path.join(args.folderoutput, f"{username}.txt") else: result_file = f"{username}.txt" if args.output_txt: with open(result_file, "w", encoding="utf-8") as file: exists_counter = 0 for website_name in results: dictionary = results[website_name] if dictionary.get("status").status == QueryStatus.CLAIMED: exists_counter += 1 file.write(dictionary["url_user"] + "\n") file.write(f"Total Websites Username Detected On : {exists_counter}\n") if args.csv: result_file = f"{username}.csv" if args.folderoutput: # The usernames results should be stored in a targeted folder. # If the folder doesn't exist, create it first os.makedirs(args.folderoutput, exist_ok=True) result_file = os.path.join(args.folderoutput, result_file) with open(result_file, "w", newline="", encoding="utf-8") as csv_report: writer = csv.writer(csv_report) writer.writerow( [ "username", "name", "url_main", "url_user", "exists", "http_status", "response_time_s", ] ) for site in results: if ( args.print_found and not args.print_all and results[site]["status"].status != QueryStatus.CLAIMED ): continue response_time_s = results[site]["status"].query_time if response_time_s is None: response_time_s = "" writer.writerow( [ username, site, results[site]["url_main"], results[site]["url_user"], str(results[site]["status"].status), results[site]["http_status"], response_time_s, ] ) if args.xlsx: usernames = [] names = [] url_main = [] url_user = [] exists = [] http_status = [] response_time_s = [] for site in results: if ( args.print_found and not args.print_all and results[site]["status"].status != QueryStatus.CLAIMED ): continue if response_time_s is None: response_time_s.append("") else: response_time_s.append(results[site]["status"].query_time) usernames.append(username) names.append(site) url_main.append(results[site]["url_main"]) url_user.append(results[site]["url_user"]) exists.append(str(results[site]["status"].status)) http_status.append(results[site]["http_status"]) DataFrame = pd.DataFrame( { "username": usernames, "name": names, "url_main": [f'=HYPERLINK(\"{u}\")' for u in url_main], "url_user": [f'=HYPERLINK(\"{u}\")' for u in url_user], "exists": exists, "http_status": http_status, "response_time_s": response_time_s, } ) DataFrame.to_excel(f"{username}.xlsx", sheet_name="sheet1", index=False) print() query_notify.finish() if __name__ == "__main__": main() ================================================ FILE: sherlock_project/sites.py ================================================ """Sherlock Sites Information Module This module supports storing information about websites. This is the raw data that will be used to search for usernames. """ import json import requests import secrets MANIFEST_URL = "https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock_project/resources/data.json" EXCLUSIONS_URL = "https://raw.githubusercontent.com/sherlock-project/sherlock/refs/heads/exclusions/false_positive_exclusions.txt" class SiteInformation: def __init__(self, name, url_home, url_username_format, username_claimed, information, is_nsfw, username_unclaimed=secrets.token_urlsafe(10)): """Create Site Information Object. Contains information about a specific website. Keyword Arguments: self -- This object. name -- String which identifies site. url_home -- String containing URL for home of site. url_username_format -- String containing URL for Username format on site. NOTE: The string should contain the token "{}" where the username should be substituted. For example, a string of "https://somesite.com/users/{}" indicates that the individual usernames would show up under the "https://somesite.com/users/" area of the website. username_claimed -- String containing username which is known to be claimed on website. username_unclaimed -- String containing username which is known to be unclaimed on website. information -- Dictionary containing all known information about website. NOTE: Custom information about how to actually detect the existence of the username will be included in this dictionary. This information will be needed by the detection method, but it is only recorded in this object for future use. is_nsfw -- Boolean indicating if site is Not Safe For Work. Return Value: Nothing. """ self.name = name self.url_home = url_home self.url_username_format = url_username_format self.username_claimed = username_claimed self.username_unclaimed = secrets.token_urlsafe(32) self.information = information self.is_nsfw = is_nsfw return def __str__(self): """Convert Object To String. Keyword Arguments: self -- This object. Return Value: Nicely formatted string to get information about this object. """ return f"{self.name} ({self.url_home})" class SitesInformation: def __init__( self, data_file_path: str|None = None, honor_exclusions: bool = True, do_not_exclude: list[str] = [], ): """Create Sites Information Object. Contains information about all supported websites. Keyword Arguments: self -- This object. data_file_path -- String which indicates path to data file. The file name must end in ".json". There are 3 possible formats: * Absolute File Format For example, "c:/stuff/data.json". * Relative File Format The current working directory is used as the context. For example, "data.json". * URL Format For example, "https://example.com/data.json", or "http://example.com/data.json". An exception will be thrown if the path to the data file is not in the expected format, or if there was any problem loading the file. If this option is not specified, then a default site list will be used. Return Value: Nothing. """ if not data_file_path: # The default data file is the live data.json which is in the GitHub repo. The reason why we are using # this instead of the local one is so that the user has the most up-to-date data. This prevents # users from creating issue about false positives which has already been fixed or having outdated data data_file_path = MANIFEST_URL # Ensure that specified data file has correct extension. if not data_file_path.lower().endswith(".json"): raise FileNotFoundError(f"Incorrect JSON file extension for data file '{data_file_path}'.") # if "http://" == data_file_path[:7].lower() or "https://" == data_file_path[:8].lower(): if data_file_path.lower().startswith("http"): # Reference is to a URL. try: response = requests.get(url=data_file_path, timeout=30) except Exception as error: raise FileNotFoundError( f"Problem while attempting to access data file URL '{data_file_path}': {error}" ) if response.status_code != 200: raise FileNotFoundError(f"Bad response while accessing " f"data file URL '{data_file_path}'." ) try: site_data = response.json() except Exception as error: raise ValueError( f"Problem parsing json contents at '{data_file_path}': {error}." ) else: # Reference is to a file. try: with open(data_file_path, "r", encoding="utf-8") as file: try: site_data = json.load(file) except Exception as error: raise ValueError( f"Problem parsing json contents at '{data_file_path}': {error}." ) except FileNotFoundError: raise FileNotFoundError(f"Problem while attempting to access " f"data file '{data_file_path}'." ) site_data.pop('$schema', None) if honor_exclusions: try: response = requests.get(url=EXCLUSIONS_URL, timeout=10) if response.status_code == 200: exclusions = response.text.splitlines() exclusions = [exclusion.strip() for exclusion in exclusions] for site in do_not_exclude: if site in exclusions: exclusions.remove(site) for exclusion in exclusions: try: site_data.pop(exclusion, None) except KeyError: pass except Exception: # If there was any problem loading the exclusions, just continue without them print("Warning: Could not load exclusions, continuing without them.") honor_exclusions = False self.sites = {} # Add all site information from the json file to internal site list. for site_name in site_data: try: self.sites[site_name] = \ SiteInformation(site_name, site_data[site_name]["urlMain"], site_data[site_name]["url"], site_data[site_name]["username_claimed"], site_data[site_name], site_data[site_name].get("isNSFW",False) ) except KeyError as error: raise ValueError( f"Problem parsing json contents at '{data_file_path}': Missing attribute {error}." ) except TypeError: print(f"Encountered TypeError parsing json contents for target '{site_name}' at {data_file_path}\nSkipping target.\n") return def remove_nsfw_sites(self, do_not_remove: list = []): """ Remove NSFW sites from the sites, if isNSFW flag is true for site Keyword Arguments: self -- This object. Return Value: None """ sites = {} do_not_remove = [site.casefold() for site in do_not_remove] for site in self.sites: if self.sites[site].is_nsfw and site.casefold() not in do_not_remove: continue sites[site] = self.sites[site] self.sites = sites def site_name_list(self): """Get Site Name List. Keyword Arguments: self -- This object. Return Value: List of strings containing names of sites. """ return sorted([site.name for site in self], key=str.lower) def __iter__(self): """Iterator For Object. Keyword Arguments: self -- This object. Return Value: Iterator for sites object. """ for site_name in self.sites: yield self.sites[site_name] def __len__(self): """Length For Object. Keyword Arguments: self -- This object. Return Value: Length of sites object. """ return len(self.sites) ================================================ FILE: tests/conftest.py ================================================ import os import json import urllib import pytest from sherlock_project.sites import SitesInformation def fetch_local_manifest(honor_exclusions: bool = True) -> dict[str, dict[str, str]]: sites_obj = SitesInformation(data_file_path=os.path.join(os.path.dirname(__file__), "../sherlock_project/resources/data.json"), honor_exclusions=honor_exclusions) sites_iterable: dict[str, dict[str, str]] = {site.name: site.information for site in sites_obj} return sites_iterable @pytest.fixture() def sites_obj(): sites_obj = SitesInformation(data_file_path=os.path.join(os.path.dirname(__file__), "../sherlock_project/resources/data.json")) yield sites_obj @pytest.fixture(scope="session") def sites_info(): yield fetch_local_manifest() @pytest.fixture(scope="session") def remote_schema(): schema_url: str = 'https://raw.githubusercontent.com/sherlock-project/sherlock/master/sherlock_project/resources/data.schema.json' with urllib.request.urlopen(schema_url) as remoteschema: schemadat = json.load(remoteschema) yield schemadat def pytest_addoption(parser): parser.addoption( "--chunked-sites", action="store", default=None, help="For tests utilizing chunked sites, include only the (comma-separated) site(s) specified.", ) def pytest_generate_tests(metafunc): if "chunked_sites" in metafunc.fixturenames: sites_info = fetch_local_manifest(honor_exclusions=False) # Ingest and apply site selections site_filter: str | None = metafunc.config.getoption("--chunked-sites") if site_filter: selected_sites: list[str] = [site.strip() for site in site_filter.split(",")] sites_info = { site: data for site, data in sites_info.items() if site in selected_sites } params = [{name: data} for name, data in sites_info.items()] ids = list(sites_info.keys()) metafunc.parametrize("chunked_sites", params, ids=ids) ================================================ FILE: tests/few_test_basic.py ================================================ import sherlock_project #from sherlock.sites import SitesInformation #local_manifest = data_file_path=os.path.join(os.path.dirname(__file__), "../sherlock/resources/data.json") def test_username_via_message(): sherlock_project.__main__("--version") ================================================ FILE: tests/sherlock_interactives.py ================================================ import os import platform import re import subprocess class Interactives: def run_cli(args:str = "") -> str: """Pass arguments to Sherlock as a normal user on the command line""" # Adapt for platform differences (Windows likes to be special) if platform.system() == "Windows": command:str = f"py -m sherlock_project {args}" else: command:str = f"sherlock {args}" proc_out:str = "" try: proc_out = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT) return proc_out.decode() except subprocess.CalledProcessError as e: raise InteractivesSubprocessError(e.output.decode()) def walk_sherlock_for_files_with(pattern: str) -> list[str]: """Check all files within the Sherlock package for matching patterns""" pattern:re.Pattern = re.compile(pattern) matching_files:list[str] = [] for root, dirs, files in os.walk("sherlock_project"): for file in files: file_path = os.path.join(root,file) if "__pycache__" in file_path: continue with open(file_path, 'r', errors='ignore') as f: if pattern.search(f.read()): matching_files.append(file_path) return matching_files class InteractivesSubprocessError(Exception): pass ================================================ FILE: tests/test_manifest.py ================================================ import os import json import pytest from jsonschema import validate def test_validate_manifest_against_local_schema(): """Ensures that the manifest matches the local schema, for situations where the schema is being changed.""" json_relative: str = '../sherlock_project/resources/data.json' schema_relative: str = '../sherlock_project/resources/data.schema.json' json_path: str = os.path.join(os.path.dirname(__file__), json_relative) schema_path: str = os.path.join(os.path.dirname(__file__), schema_relative) with open(json_path, 'r') as f: jsondat = json.load(f) with open(schema_path, 'r') as f: schemadat = json.load(f) validate(instance=jsondat, schema=schemadat) @pytest.mark.online def test_validate_manifest_against_remote_schema(remote_schema): """Ensures that the manifest matches the remote schema, so as to not unexpectedly break clients.""" json_relative: str = '../sherlock_project/resources/data.json' json_path: str = os.path.join(os.path.dirname(__file__), json_relative) with open(json_path, 'r') as f: jsondat = json.load(f) validate(instance=jsondat, schema=remote_schema) # Ensure that the expected values are beind returned by the site list @pytest.mark.parametrize("target_name,target_expected_err_type", [ ('GitHub', 'status_code'), ('GitLab', 'message'), ]) def test_site_list_iterability (sites_info, target_name, target_expected_err_type): assert sites_info[target_name]['errorType'] == target_expected_err_type ================================================ FILE: tests/test_probes.py ================================================ import pytest import random import string import re from sherlock_project.sherlock import sherlock from sherlock_project.notify import QueryNotify from sherlock_project.result import QueryStatus #from sherlock_interactives import Interactives def simple_query(sites_info: dict, site: str, username: str) -> QueryStatus: query_notify = QueryNotify() site_data: dict = {} site_data[site] = sites_info[site] return sherlock( username=username, site_data=site_data, query_notify=query_notify, )[site]['status'].status @pytest.mark.online class TestLiveTargets: """Actively test probes against live and trusted targets""" # Known positives should only use sites trusted to be reliable and unchanging @pytest.mark.parametrize('site,username',[ ('GitLab', 'ppfeister'), ('AllMyLinks', 'blue'), ]) def test_known_positives_via_message(self, sites_info, site, username): assert simple_query(sites_info=sites_info, site=site, username=username) is QueryStatus.CLAIMED # Known positives should only use sites trusted to be reliable and unchanging @pytest.mark.parametrize('site,username',[ ('GitHub', 'ppfeister'), ('GitHub', 'sherlock-project'), ('Docker Hub', 'ppfeister'), ('Docker Hub', 'sherlock'), ]) def test_known_positives_via_status_code(self, sites_info, site, username): assert simple_query(sites_info=sites_info, site=site, username=username) is QueryStatus.CLAIMED # Known positives should only use sites trusted to be reliable and unchanging @pytest.mark.parametrize('site,username',[ ('Keybase', 'blue'), ('devRant', 'blue'), ]) def test_known_positives_via_response_url(self, sites_info, site, username): assert simple_query(sites_info=sites_info, site=site, username=username) is QueryStatus.CLAIMED # Randomly generate usernames of high length and test for positive availability # Randomly generated usernames should be simple alnum for simplicity and high # compatibility. Several attempts may be made ~just in case~ a real username is # generated. @pytest.mark.parametrize('site,random_len',[ ('GitLab', 255), ('Codecademy', 30) ]) def test_likely_negatives_via_message(self, sites_info, site, random_len): num_attempts: int = 3 attempted_usernames: list[str] = [] status: QueryStatus = QueryStatus.CLAIMED for i in range(num_attempts): acceptable_types = string.ascii_letters + string.digits random_handle = ''.join(random.choice(acceptable_types) for _ in range (random_len)) attempted_usernames.append(random_handle) status = simple_query(sites_info=sites_info, site=site, username=random_handle) if status is QueryStatus.AVAILABLE: break assert status is QueryStatus.AVAILABLE, f"Could not validate available username after {num_attempts} attempts with randomly generated usernames {attempted_usernames}." # Randomly generate usernames of high length and test for positive availability # Randomly generated usernames should be simple alnum for simplicity and high # compatibility. Several attempts may be made ~just in case~ a real username is # generated. @pytest.mark.parametrize('site,random_len',[ ('GitHub', 39), ('Docker Hub', 30) ]) def test_likely_negatives_via_status_code(self, sites_info, site, random_len): num_attempts: int = 3 attempted_usernames: list[str] = [] status: QueryStatus = QueryStatus.CLAIMED for i in range(num_attempts): acceptable_types = string.ascii_letters + string.digits random_handle = ''.join(random.choice(acceptable_types) for _ in range (random_len)) attempted_usernames.append(random_handle) status = simple_query(sites_info=sites_info, site=site, username=random_handle) if status is QueryStatus.AVAILABLE: break assert status is QueryStatus.AVAILABLE, f"Could not validate available username after {num_attempts} attempts with randomly generated usernames {attempted_usernames}." def test_username_illegal_regex(sites_info): site: str = 'BitBucket' invalid_handle: str = '*#$Y&*JRE' pattern = re.compile(sites_info[site]['regexCheck']) # Ensure that the username actually fails regex before testing sherlock assert pattern.match(invalid_handle) is None assert simple_query(sites_info=sites_info, site=site, username=invalid_handle) is QueryStatus.ILLEGAL ================================================ FILE: tests/test_ux.py ================================================ import pytest from sherlock_project import sherlock from sherlock_interactives import Interactives from sherlock_interactives import InteractivesSubprocessError def test_remove_nsfw(sites_obj): nsfw_target: str = 'Pornhub' assert nsfw_target in {site.name: site.information for site in sites_obj} sites_obj.remove_nsfw_sites() assert nsfw_target not in {site.name: site.information for site in sites_obj} # Parametrized sites should *not* include Motherless, which is acting as the control @pytest.mark.parametrize('nsfwsites', [ ['Pornhub'], ['Pornhub', 'Xvideos'], ]) def test_nsfw_explicit_selection(sites_obj, nsfwsites): for site in nsfwsites: assert site in {site.name: site.information for site in sites_obj} sites_obj.remove_nsfw_sites(do_not_remove=nsfwsites) for site in nsfwsites: assert site in {site.name: site.information for site in sites_obj} assert 'Motherless' not in {site.name: site.information for site in sites_obj} def test_wildcard_username_expansion(): assert sherlock.check_for_parameter('test{?}test') is True assert sherlock.check_for_parameter('test{.}test') is False assert sherlock.check_for_parameter('test{}test') is False assert sherlock.check_for_parameter('testtest') is False assert sherlock.check_for_parameter('test{?test') is False assert sherlock.check_for_parameter('test?}test') is False assert sherlock.multiple_usernames('test{?}test') == ["test_test" , "test-test" , "test.test"] @pytest.mark.parametrize('cliargs', [ '', '--site urghrtuight --egiotr', '--', ]) def test_no_usernames_provided(cliargs): with pytest.raises(InteractivesSubprocessError, match=r"error: the following arguments are required: USERNAMES"): Interactives.run_cli(cliargs) ================================================ FILE: tests/test_validate_targets.py ================================================ import pytest import re import rstr from sherlock_project.sherlock import sherlock from sherlock_project.notify import QueryNotify from sherlock_project.result import QueryResult, QueryStatus FALSE_POSITIVE_ATTEMPTS: int = 2 # Since the usernames are randomly generated, it's POSSIBLE that a real username can be hit FALSE_POSITIVE_QUANTIFIER_UPPER_BOUND: int = 15 # If a pattern uses quantifiers such as `+` `*` or `{n,}`, limit the upper bound (0 to disable) FALSE_POSITIVE_DEFAULT_PATTERN: str = r'^[a-zA-Z0-9]{7,20}$' # Used in absence of a regexCheck entry def set_pattern_upper_bound(pattern: str, upper_bound: int = FALSE_POSITIVE_QUANTIFIER_UPPER_BOUND) -> str: """Set upper bound for regex patterns that use quantifiers such as `+` `*` or `{n,}`.""" def replace_upper_bound(match: re.Match) -> str: # type: ignore lower_bound: int = int(match.group(1)) if match.group(1) else 0 # type: ignore nonlocal upper_bound upper_bound = upper_bound if lower_bound < upper_bound else lower_bound # type: ignore # noqa: F823 return f'{{{lower_bound},{upper_bound}}}' pattern = re.sub(r'(? QueryStatus: """Check if a site is likely to produce false positives.""" status: QueryStatus = QueryStatus.UNKNOWN for _ in range(FALSE_POSITIVE_ATTEMPTS): query_notify: QueryNotify = QueryNotify() username: str = rstr.xeger(pattern) result: QueryResult | str = sherlock( username=username, site_data=sites_info, query_notify=query_notify, )[site]['status'] if not hasattr(result, 'status'): raise TypeError(f"Result for site {site} does not have 'status' attribute. Actual result: {result}") if type(result.status) is not QueryStatus: # type: ignore raise TypeError(f"Result status for site {site} is not of type QueryStatus. Actual type: {type(result.status)}") # type: ignore status = result.status # type: ignore if status in (QueryStatus.AVAILABLE, QueryStatus.WAF): return status return status def false_negative_check(sites_info: dict[str, dict[str, str]], site: str) -> QueryStatus: """Check if a site is likely to produce false negatives.""" status: QueryStatus = QueryStatus.UNKNOWN query_notify: QueryNotify = QueryNotify() result: QueryResult | str = sherlock( username=sites_info[site]['username_claimed'], site_data=sites_info, query_notify=query_notify, )[site]['status'] if not hasattr(result, 'status'): raise TypeError(f"Result for site {site} does not have 'status' attribute. Actual result: {result}") if type(result.status) is not QueryStatus: # type: ignore raise TypeError(f"Result status for site {site} is not of type QueryStatus. Actual type: {type(result.status)}") # type: ignore status = result.status # type: ignore return status @pytest.mark.validate_targets @pytest.mark.online class Test_All_Targets: @pytest.mark.validate_targets_fp def test_false_pos(self, chunked_sites: dict[str, dict[str, str]]): """Iterate through all sites in the manifest to discover possible false-positive inducting targets.""" pattern: str for site in chunked_sites: try: pattern = chunked_sites[site]['regexCheck'] except KeyError: pattern = FALSE_POSITIVE_DEFAULT_PATTERN if FALSE_POSITIVE_QUANTIFIER_UPPER_BOUND > 0: pattern = set_pattern_upper_bound(pattern) result: QueryStatus = false_positive_check(chunked_sites, site, pattern) assert result is QueryStatus.AVAILABLE, f"{site} produced false positive with pattern {pattern}, result was {result}" @pytest.mark.validate_targets_fn def test_false_neg(self, chunked_sites: dict[str, dict[str, str]]): """Iterate through all sites in the manifest to discover possible false-negative inducting targets.""" for site in chunked_sites: result: QueryStatus = false_negative_check(chunked_sites, site) assert result is QueryStatus.CLAIMED, f"{site} produced false negative, result was {result}" ================================================ FILE: tests/test_version.py ================================================ import os from sherlock_interactives import Interactives import sherlock_project def test_versioning() -> None: # Ensure __version__ matches version presented to the user assert sherlock_project.__version__ in Interactives.run_cli("--version") # Ensure __init__ is single source of truth for __version__ in package # Temporarily allows sherlock.py so as to not trigger early upgrades found:list = Interactives.walk_sherlock_for_files_with(r'__version__ *= *') expected:list = [ # Normalization is REQUIRED for Windows ( / vs \ ) os.path.normpath("sherlock_project/__init__.py"), ] # Sorting is REQUIRED for Mac assert sorted(found) == sorted(expected) ================================================ FILE: tox.ini ================================================ [tox] requires = tox >= 3 envlist = lint py313 py312 py311 py310 [testenv] description = Attempt to build and install the package deps = coverage jsonschema pytest rstr allowlist_externals = coverage commands = coverage run --source=sherlock_project --module pytest -v coverage report --show-missing [testenv:offline] deps = jsonschema pytest commands = pytest -v -m "not online" [testenv:lint] description = Lint with Ruff deps = ruff commands = ruff check [gh-actions] python = 3.13: py313 3.12: py312 3.11: py311 3.10: py310