master 471169a992a8 cached
57 files
867.8 KB
209.6k tokens
1059 symbols
1 requests
Download .txt
Showing preview only (898K chars total). Download the full file or copy to clipboard to get everything.
Repository: georgewfraser/vscode-tree-sitter
Branch: master
Commit: 471169a992a8
Files: 57
Total size: 867.8 KB

Directory structure:
gitextract_dzd0zkcs/

├── .gitignore
├── .vscode/
│   ├── launch.json
│   ├── settings.json
│   └── tasks.json
├── .vscodeignore
├── LICENSE.md
├── README.md
├── TODO.md
├── azure-pipelines.yml
├── examples/
│   ├── cpp/
│   │   ├── marker-index.h
│   │   └── rule.cc
│   ├── go/
│   │   ├── letter_test.go
│   │   ├── no_newline_at_eof.go
│   │   ├── proc.go
│   │   ├── small.go
│   │   ├── type_switch.go
│   │   └── value.go
│   ├── javascript/
│   │   ├── destructuring.js
│   │   ├── expressions.js
│   │   ├── literals.js
│   │   ├── semicolon_insertion.js
│   │   └── statements.js
│   ├── ruby/
│   │   ├── classes.rb
│   │   ├── comments.rb
│   │   ├── control-flow.rb
│   │   ├── declarations.rb
│   │   ├── expressions.rb
│   │   ├── literals.rb
│   │   └── statements.rb
│   ├── rust/
│   │   ├── ast.rs
│   │   ├── keywords.txt
│   │   └── scratch.rs
│   └── typescript/
│       ├── keywords.txt
│       ├── parser.ts
│       └── small.ts
├── package.json
├── parsers/
│   ├── tree-sitter-cpp.wasm
│   ├── tree-sitter-go.wasm
│   ├── tree-sitter-javascript.wasm
│   ├── tree-sitter-ruby.wasm
│   ├── tree-sitter-rust.wasm
│   └── tree-sitter-typescript.wasm
├── scripts/
│   ├── build.sh
│   └── gen-parsers.sh
├── src/
│   ├── benchmark.ts
│   ├── colors.ts
│   ├── extension.ts
│   ├── print.ts
│   ├── scopes.ts
│   └── test.ts
├── textmate/
│   ├── cpp.tmLanguage.json
│   ├── go.tmLanguage.json
│   ├── ruby.tmLanguage.json
│   ├── rust.tmLanguage.json
│   └── typescript.tmLanguage.json
├── tsconfig.json
└── tslint.json

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitignore
================================================
out
node_modules
.vscode-test/
*.vsix
*.bin

================================================
FILE: .vscode/launch.json
================================================
// A launch configuration that compiles the extension and then opens it inside a new window
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
{
	"version": "0.2.0",
	"configurations": [{
			"name": "Run Extension",
			"type": "extensionHost",
			"request": "launch",
			"runtimeExecutable": "${execPath}",
			"args": [
				"--extensionDevelopmentPath=${workspaceFolder}"
			],
			"outFiles": [
				"${workspaceFolder}/out/**/*.js"
			],
			"preLaunchTask": "npm: compile"
		},
		{
			"name": "Extension Tests",
			"type": "extensionHost",
			"request": "launch",
			"runtimeExecutable": "${execPath}",
			"args": [
				"--extensionDevelopmentPath=${workspaceFolder}",
				"--extensionTestsPath=${workspaceFolder}/out/test"
			],
			"outFiles": [
				"${workspaceFolder}/out/test/**/*.js"
			],
			"preLaunchTask": "npm: compile"
		},
		{
			"name": "Debug tests",
			"type": "node",
			"request": "launch",
			"cwd": "${workspaceFolder}",
			"runtimeExecutable": "npm",
			"runtimeArgs": [
				"run-script", "debug"
			],
			"port": 9229
		}
	]
}


================================================
FILE: .vscode/settings.json
================================================
// Place your settings in this file to overwrite default and user settings.
{
    "files.exclude": {
        "out": false // set this to true to hide the "out" folder with the compiled JS files
    },
    "search.exclude": {
        "out": true // set this to false to include "out" folder in search results
    },
    // Turn off tsc task auto detection since we have the necessary tasks as npm scripts
    "typescript.tsc.autoDetect": "off"
}

================================================
FILE: .vscode/tasks.json
================================================
// See https://go.microsoft.com/fwlink/?LinkId=733558
// for the documentation about the tasks.json format
{
	"version": "2.0.0",
	"tasks": [
		{
			"type": "npm",
			"script": "watch",
			"problemMatcher": "$tsc-watch",
			"isBackground": true,
			"presentation": {
				"reveal": "never"
			},
			"group": {
				"kind": "build",
				"isDefault": true
			}
		}
	]
}


================================================
FILE: .vscodeignore
================================================
.vscode/**
.vscode-test/**
out/test/**
src/**
.gitignore
vsc-extension-quickstart.md
**/tsconfig.json
**/tslint.json
**/*.map
**/*.ts
examples/**

================================================
FILE: LICENSE.md
================================================
The MIT License (MIT)

Copyright (c) 2016 George Fraser

Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.


================================================
FILE: README.md
================================================
# Tree Sitter for VSCode [Deprecated]

**With the improving support for custom syntax coloring through language server, this extension is no longer needed**

This extension gives VSCode support for [tree-sitter](http://tree-sitter.github.io/tree-sitter/) syntax coloring. Examples with tree-sitter coloring on the right:

## Go

![Go](./screenshots/go.png)

## Rust

![Rust](./screenshots/rust.png)

## C++

![C++](./screenshots/cpp.png)

## Ruby

![Ruby](./screenshots/ruby.png)

## Javascript / Typescript

![Typescript](./screenshots/typescript.png)

## Contributing

### Fixing colorization of an existing language

If you see something getting colored wrong, or something that should be colored but isn't, you can help! The simplest way to help is to create an issue with a simple example, a screenshot, and an explanation of what is wrong. 

You are also welcome to fix the problem yourself and submit a PR. Colorization is performed by the various `colorLanguage(x, editor)` functions in `src/colors.ts`. When working on the colorization rules, please keep in mind two core principles:

1. Good colorization is *consistent*. It's better to not color at all than to color inconsistently.
2. Good colorization is *selective*. The fewer things that we color, the more emphasis the color gives.

### Adding a new language

It's straightforward to add any [language with a tree-sitter grammar](https://tree-sitter.github.io/tree-sitter/).

1. Add a dependency on the npm package for that language: `npm install tree-sitter-yourlang`.
2. Add a color function to `./src/colors.ts`
3. Add a language to the dictionary at the top of `./src/extension.ts`
4. Add a **simplified** TextMate grammar to `./textmate/yourlang.tmLanguage.json`. The job of this textmate grammar is just to color keywords and simple literals; anything tricky should be left white and colored by your color function.
5. Add a reference to the grammar to the [contributes.grammars section of package.json](https://github.com/georgewfraser/vscode-tree-sitter/blob/fb4400b78481845c6a8497d079508d28aea25c19/package.json#L26). `yourlang` must be a [VSCode language identifier](https://code.visualstudio.com/docs/languages/identifiers).
6. Add a reference to `onLanguage:yourlang` to the [activationEvents section of package.json](https://github.com/georgewfraser/vscode-tree-sitter/blob/fb4400b78481845c6a8497d079508d28aea25c19/package.json#L18). `yourlang` must be a [VSCode language identifier](https://code.visualstudio.com/docs/languages/identifiers).
7. Add an example to `examples/yourlang`.
8. Hit `F5` in VSCode, with this project open, to test your changes.
9. Take a screenshot comparing before-and-after and add it to the above list.
10. Submit a PR!


================================================
FILE: TODO.md
================================================
## Bugs
- Tree-sitter scope colors are wrong while the user is previewing other themes
- Put back react support for .js and .tsx

## Features
- Folding-range provider https://code.visualstudio.com/api/references/vscode-api#FoldingRangeProvider
- Extend-selection provider https://code.visualstudio.com/api/references/vscode-api#SelectionRangeProvider
- Document highlight provider https://code.visualstudio.com/api/references/vscode-api#DocumentHighlightProvider

================================================
FILE: azure-pipelines.yml
================================================
# Node.js
# Build a general Node.js project with npm.
# Add steps that analyze code, save build artifacts, deploy, and more:
# https://docs.microsoft.com/azure/devops/pipelines/languages/javascript

trigger:
- master

pool:
  vmImage: ubuntu-16.04

steps:
- task: NodeTool@0
  inputs:
    versionSpec: '10.x'
  displayName: 'Install Node.js'
- script: 'npm install'
  displayName: 'Install NPM deps'
- script: 'npm run compile'
  displayName: 'Compile Typescript'
- script: 'node out/test.js'
  displayName: 'Run tests'
  failOnStderr: true

================================================
FILE: examples/cpp/marker-index.h
================================================
#ifndef MARKER_INDEX_H_
#define MARKER_INDEX_H_

#include <random>
#include <unordered_map>
#include "flat_set.h"
#include "point.h"
#include "range.h"

class MarkerIndex {
public:
  using MarkerId = unsigned;
  using MarkerIdSet = flat_set<MarkerId>;

  struct SpliceResult {
    flat_set<MarkerId> touch;
    flat_set<MarkerId> inside;
    flat_set<MarkerId> overlap;
    flat_set<MarkerId> surround;
  };

  struct Boundary {
    Point position;
    flat_set<MarkerId> starting;
    flat_set<MarkerId> ending;
  };

  struct BoundaryQueryResult {
    std::vector<MarkerId> containing_start;
    std::vector<Boundary> boundaries;
  };

  MarkerIndex(unsigned seed = 0u);
  ~MarkerIndex();
  int generate_random_number();
  void insert(MarkerId id, Point start, Point end);
  void set_exclusive(MarkerId id, bool exclusive);
  void remove(MarkerId id);
  bool has(MarkerId id);
  SpliceResult splice(Point start, Point old_extent, Point new_extent);
  Point get_start(MarkerId id) const;
  Point get_end(MarkerId id) const;
  Range get_range(MarkerId id) const;

  int compare(MarkerId id1, MarkerId id2) const;
  flat_set<MarkerId> find_intersecting(Point start, Point end);
  flat_set<MarkerId> find_containing(Point start, Point end);
  flat_set<MarkerId> find_contained_in(Point start, Point end);
  flat_set<MarkerId> find_starting_in(Point start, Point end);
  flat_set<MarkerId> find_starting_at(Point position);
  flat_set<MarkerId> find_ending_in(Point start, Point end);
  flat_set<MarkerId> find_ending_at(Point position);
  BoundaryQueryResult find_boundaries_after(Point start, size_t max_count);

  std::unordered_map<MarkerId, Range> dump();

private:
  friend class Iterator;

  struct Node {
    Node *parent;
    Node *left;
    Node *right;
    Point left_extent;
    flat_set<MarkerId> left_marker_ids;
    flat_set<MarkerId> right_marker_ids;
    flat_set<MarkerId> start_marker_ids;
    flat_set<MarkerId> end_marker_ids;
    int priority;

    Node(Node *parent, Point left_extent);
    bool is_marker_endpoint();
  };

  class Iterator {
  public:
    Iterator(MarkerIndex *marker_index);
    void reset();
    Node* insert_marker_start(const MarkerId &id, const Point &start_position, const Point &end_position);
    Node* insert_marker_end(const MarkerId &id, const Point &start_position, const Point &end_position);
    Node* insert_splice_boundary(const Point &position, bool is_insertion_end);
    void find_intersecting(const Point &start, const Point &end, flat_set<MarkerId> *result);
    void find_contained_in(const Point &start, const Point &end, flat_set<MarkerId> *result);
    void find_starting_in(const Point &start, const Point &end, flat_set<MarkerId> *result);
    void find_ending_in(const Point &start, const Point &end, flat_set<MarkerId> *result);
    void find_boundaries_after(Point start, size_t max_count, BoundaryQueryResult *result);
    std::unordered_map<MarkerId, Range> dump();

  private:
    void ascend();
    void descend_left();
    void descend_right();
    void move_to_successor();
    void seek_to_first_node_greater_than_or_equal_to(const Point &position);
    void mark_right(const MarkerId &id, const Point &start_position, const Point &end_position);
    void mark_left(const MarkerId &id, const Point &start_position, const Point &end_position);
    Node* insert_left_child(const Point &position);
    Node* insert_right_child(const Point &position);
    void check_intersection(const Point &start, const Point &end, flat_set<MarkerId> *results);
    void cache_node_position() const;

    MarkerIndex *marker_index;
    Node *current_node;
    Point current_node_position;
    Point left_ancestor_position;
    Point right_ancestor_position;
    std::vector<Point> left_ancestor_position_stack;
    std::vector<Point> right_ancestor_position_stack;
  };

  Point get_node_position(const Node *node) const;
  void delete_node(Node *node);
  void delete_subtree(Node *node);
  void bubble_node_up(Node *node);
  void bubble_node_down(Node *node);
  void rotate_node_left(Node *pivot);
  void rotate_node_right(Node *pivot);
  void get_starting_and_ending_markers_within_subtree(const Node *node, flat_set<MarkerId> *starting, flat_set<MarkerId> *ending);
  void populate_splice_invalidation_sets(SpliceResult *invalidated, const Node *start_node, const Node *end_node, const flat_set<MarkerId> &starting_inside_splice, const flat_set<MarkerId> &ending_inside_splice);

  std::default_random_engine random_engine;
  std::uniform_int_distribution<int> random_distribution;
  Node *root;
  std::unordered_map<MarkerId, Node*> start_nodes_by_id;
  std::unordered_map<MarkerId, Node*> end_nodes_by_id;
  Iterator iterator;
  flat_set<MarkerId> exclusive_marker_ids;
  mutable std::unordered_map<const Node*, Point> node_position_cache;
};

#endif // MARKER_INDEX_H_


================================================
FILE: examples/cpp/rule.cc
================================================
#include "compiler/rule.h"
#include "compiler/util/hash_combine.h"

namespace tree_sitter {
namespace rules {

using std::move;
using std::vector;
using util::hash_combine;

Rule::Rule(const Rule &other) : blank_(Blank{}), type(BlankType) {
  *this = other;
}

Rule::Rule(Rule &&other) noexcept : blank_(Blank{}), type(BlankType) {
  *this = move(other);
}

static void destroy_value(Rule *rule) {
  switch (rule->type) {
    case Rule::BlankType: return rule->blank_.~Blank();
    case Rule::CharacterSetType: return rule->character_set_.~CharacterSet();
    case Rule::StringType: return rule->string_ .~String();
    case Rule::PatternType: return rule->pattern_ .~Pattern();
    case Rule::NamedSymbolType: return rule->named_symbol_.~NamedSymbol();
    case Rule::SymbolType: return rule->symbol_ .~Symbol();
    case Rule::ChoiceType: return rule->choice_ .~Choice();
    case Rule::MetadataType: return rule->metadata_ .~Metadata();
    case Rule::RepeatType: return rule->repeat_ .~Repeat();
    case Rule::SeqType: return rule->seq_ .~Seq();
  }
}

Rule &Rule::operator=(const Rule &other) {
  destroy_value(this);
  type = other.type;
  switch (type) {
    case BlankType:
      new (&blank_) Blank(other.blank_);
      break;
    case CharacterSetType:
      new (&character_set_) CharacterSet(other.character_set_);
      break;
    case StringType:
      new (&string_) String(other.string_);
      break;
    case PatternType:
      new (&pattern_) Pattern(other.pattern_);
      break;
    case NamedSymbolType:
      new (&named_symbol_) NamedSymbol(other.named_symbol_);
      break;
    case SymbolType:
      new (&symbol_) Symbol(other.symbol_);
      break;
    case ChoiceType:
      new (&choice_) Choice(other.choice_);
      break;
    case MetadataType:
      new (&metadata_) Metadata(other.metadata_);
      break;
    case RepeatType:
      new (&repeat_) Repeat(other.repeat_);
      break;
    case SeqType:
      new (&seq_) Seq(other.seq_);
      break;
  }
  return *this;
}

Rule &Rule::operator=(Rule &&other) noexcept {
  destroy_value(this);
  type = other.type;
  switch (type) {
    case BlankType:
      new (&blank_) Blank(move(other.blank_));
      break;
    case CharacterSetType:
      new (&character_set_) CharacterSet(move(other.character_set_));
      break;
    case StringType:
      new (&string_) String(move(other.string_));
      break;
    case PatternType:
      new (&pattern_) Pattern(move(other.pattern_));
      break;
    case NamedSymbolType:
      new (&named_symbol_) NamedSymbol(move(other.named_symbol_));
      break;
    case SymbolType:
      new (&symbol_) Symbol(move(other.symbol_));
      break;
    case ChoiceType:
      new (&choice_) Choice(move(other.choice_));
      break;
    case MetadataType:
      new (&metadata_) Metadata(move(other.metadata_));
      break;
    case RepeatType:
      new (&repeat_) Repeat(move(other.repeat_));
      break;
    case SeqType:
      new (&seq_) Seq(move(other.seq_));
      break;
  }
  other.type = BlankType;
  other.blank_ = Blank{};
  return *this;
}

Rule::~Rule() noexcept {
  destroy_value(this);
}

bool Rule::operator==(const Rule &other) const {
  if (type != other.type) return false;
  switch (type) {
    case Rule::CharacterSetType: return character_set_ == other.character_set_;
    case Rule::StringType: return string_ == other.string_;
    case Rule::PatternType: return pattern_ == other.pattern_;
    case Rule::NamedSymbolType: return named_symbol_ == other.named_symbol_;
    case Rule::SymbolType: return symbol_ == other.symbol_;
    case Rule::ChoiceType: return choice_ == other.choice_;
    case Rule::MetadataType: return metadata_ == other.metadata_;
    case Rule::RepeatType: return repeat_ == other.repeat_;
    case Rule::SeqType: return seq_ == other.seq_;
    default: return blank_ == other.blank_;
  }
}

template <>
bool Rule::is<Blank>() const { return type == BlankType; }

template <>
bool Rule::is<Symbol>() const { return type == SymbolType; }

template <>
bool Rule::is<Repeat>() const { return type == RepeatType; }

template <>
const Symbol & Rule::get_unchecked<Symbol>() const { return symbol_; }

static inline void add_choice_element(std::vector<Rule> *elements, const Rule &new_rule) {
  new_rule.match(
    [elements](Choice choice) {
      for (auto &element : choice.elements) {
        add_choice_element(elements, element);
      }
    },

    [elements](auto rule) {
      for (auto &element : *elements) {
        if (element == rule) return;
      }
      elements->push_back(rule);
    }
  );
}

Rule Rule::choice(const vector<Rule> &rules) {
  vector<Rule> elements;
  for (auto &element : rules) {
    add_choice_element(&elements, element);
  }
  return (elements.size() == 1) ? elements.front() : Choice{elements};
}

Rule Rule::repeat(const Rule &rule) {
  return rule.is<Repeat>() ? rule : Repeat{rule};
}

Rule Rule::seq(const vector<Rule> &rules) {
  Rule result;
  for (const auto &rule : rules) {
    rule.match(
      [](Blank) {},
      [&](Metadata metadata) {
        if (!metadata.rule->is<Blank>()) {
          result = Seq{result, rule};
        }
      },
      [&](auto) {
        if (result.is<Blank>()) {
          result = rule;
        } else {
          result = Seq{result, rule};
        }
      }
    );
  }
  return result;
}

}  // namespace rules
}  // namespace tree_sitter

namespace std {

size_t hash<Symbol>::operator()(const Symbol &symbol) const {
  auto result = hash<int>()(symbol.index);
  hash_combine(&result, hash<int>()(symbol.type));
  return result;
}

size_t hash<NamedSymbol>::operator()(const NamedSymbol &symbol) const {
  return hash<string>()(symbol.value);
}

size_t hash<Pattern>::operator()(const Pattern &symbol) const {
  return hash<string>()(symbol.value);
}

size_t hash<String>::operator()(const String &symbol) const {
  return hash<string>()(symbol.value);
}

size_t hash<CharacterSet>::operator()(const CharacterSet &character_set) const {
  size_t result = 0;
  hash_combine(&result, character_set.includes_all);
  hash_combine(&result, character_set.included_chars.size());
  for (uint32_t c : character_set.included_chars) {
    hash_combine(&result, c);
  }
  hash_combine(&result, character_set.excluded_chars.size());
  for (uint32_t c : character_set.excluded_chars) {
    hash_combine(&result, c);
  }
  return result;
}

size_t hash<Blank>::operator()(const Blank &blank) const {
  return 0;
}

size_t hash<Choice>::operator()(const Choice &choice) const {
  size_t result = 0;
  for (const auto &element : choice.elements) {
    symmetric_hash_combine(&result, element);
  }
  return result;
}

size_t hash<Repeat>::operator()(const Repeat &repeat) const {
  size_t result = 0;
  hash_combine(&result, *repeat.rule);
  return result;
}

size_t hash<Seq>::operator()(const Seq &seq) const {
  size_t result = 0;
  hash_combine(&result, *seq.left);
  hash_combine(&result, *seq.right);
  return result;
}

size_t hash<Metadata>::operator()(const Metadata &metadata) const {
  size_t result = 0;
  hash_combine(&result, *metadata.rule);
  hash_combine(&result, metadata.params.precedence);
  hash_combine<int>(&result, metadata.params.associativity);
  hash_combine(&result, metadata.params.has_precedence);
  hash_combine(&result, metadata.params.has_associativity);
  hash_combine(&result, metadata.params.is_token);
  hash_combine(&result, metadata.params.is_string);
  hash_combine(&result, metadata.params.is_active);
  hash_combine(&result, metadata.params.is_main_token);
  return result;
}

size_t hash<Rule>::operator()(const Rule &rule) const {
  size_t result = hash<int>()(rule.type);
  switch (rule.type) {
    case Rule::CharacterSetType: return result ^ hash<CharacterSet>()(rule.character_set_);
    case Rule::StringType: return result ^ hash<String>()(rule.string_);
    case Rule::PatternType: return result ^ hash<Pattern>()(rule.pattern_);
    case Rule::NamedSymbolType: return result ^ hash<NamedSymbol>()(rule.named_symbol_);
    case Rule::SymbolType: return result ^ hash<Symbol>()(rule.symbol_);
    case Rule::ChoiceType: return result ^ hash<Choice>()(rule.choice_);
    case Rule::MetadataType: return result ^ hash<Metadata>()(rule.metadata_);
    case Rule::RepeatType: return result ^ hash<Repeat>()(rule.repeat_);
    case Rule::SeqType: return result ^ hash<Seq>()(rule.seq_);
    default: return result ^ hash<Blank>()(rule.blank_);
  }
}

}  // namespace std

================================================
FILE: examples/go/letter_test.go
================================================
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package unicode_test

import (
	"flag"
	"fmt"
	"runtime"
	"sort"
	"testing"
	. "unicode"
)

var upperTest = []rune{
	0x41,
	0xc0,
	0xd8,
	0x100,
	0x139,
	0x14a,
	0x178,
	0x181,
	0x376,
	0x3cf,
	0x13bd,
	0x1f2a,
	0x2102,
	0x2c00,
	0x2c10,
	0x2c20,
	0xa650,
	0xa722,
	0xff3a,
	0x10400,
	0x1d400,
	0x1d7ca,
}

var notupperTest = []rune{
	0x40,
	0x5b,
	0x61,
	0x185,
	0x1b0,
	0x377,
	0x387,
	0x2150,
	0xab7d,
	0xffff,
	0x10000,
}

var letterTest = []rune{
	0x41,
	0x61,
	0xaa,
	0xba,
	0xc8,
	0xdb,
	0xf9,
	0x2ec,
	0x535,
	0x620,
	0x6e6,
	0x93d,
	0xa15,
	0xb99,
	0xdc0,
	0xedd,
	0x1000,
	0x1200,
	0x1312,
	0x1401,
	0x1885,
	0x2c00,
	0xa800,
	0xf900,
	0xfa30,
	0xffda,
	0xffdc,
	0x10000,
	0x10300,
	0x10400,
	0x20000,
	0x2f800,
	0x2fa1d,
}

var notletterTest = []rune{
	0x20,
	0x35,
	0x375,
	0x619,
	0x700,
	0xfffe,
	0x1ffff,
	0x10ffff,
}

// Contains all the special cased Latin-1 chars.
var spaceTest = []rune{
	0x09,
	0x0a,
	0x0b,
	0x0c,
	0x0d,
	0x20,
	0x85,
	0xA0,
	0x2000,
	0x3000,
}

type caseT struct {
	cas     int
	in, out rune
}

var caseTest = []caseT{
	// errors
	{-1, '\n', 0xFFFD},
	{UpperCase, -1, -1},
	{UpperCase, 1 << 30, 1 << 30},

	// ASCII (special-cased so test carefully)
	{UpperCase, '\n', '\n'},
	{UpperCase, 'a', 'A'},
	{UpperCase, 'A', 'A'},
	{UpperCase, '7', '7'},
	{LowerCase, '\n', '\n'},
	{LowerCase, 'a', 'a'},
	{LowerCase, 'A', 'a'},
	{LowerCase, '7', '7'},
	{TitleCase, '\n', '\n'},
	{TitleCase, 'a', 'A'},
	{TitleCase, 'A', 'A'},
	{TitleCase, '7', '7'},

	// Latin-1: easy to read the tests!
	{UpperCase, 0x80, 0x80},
	{UpperCase, 'Å', 'Å'},
	{UpperCase, 'å', 'Å'},
	{LowerCase, 0x80, 0x80},
	{LowerCase, 'Å', 'å'},
	{LowerCase, 'å', 'å'},
	{TitleCase, 0x80, 0x80},
	{TitleCase, 'Å', 'Å'},
	{TitleCase, 'å', 'Å'},

	// 0131;LATIN SMALL LETTER DOTLESS I;Ll;0;L;;;;;N;;;0049;;0049
	{UpperCase, 0x0131, 'I'},
	{LowerCase, 0x0131, 0x0131},
	{TitleCase, 0x0131, 'I'},

	// 0133;LATIN SMALL LIGATURE IJ;Ll;0;L;<compat> 0069 006A;;;;N;LATIN SMALL LETTER I J;;0132;;0132
	{UpperCase, 0x0133, 0x0132},
	{LowerCase, 0x0133, 0x0133},
	{TitleCase, 0x0133, 0x0132},

	// 212A;KELVIN SIGN;Lu;0;L;004B;;;;N;DEGREES KELVIN;;;006B;
	{UpperCase, 0x212A, 0x212A},
	{LowerCase, 0x212A, 'k'},
	{TitleCase, 0x212A, 0x212A},

	// From an UpperLower sequence
	// A640;CYRILLIC CAPITAL LETTER ZEMLYA;Lu;0;L;;;;;N;;;;A641;
	{UpperCase, 0xA640, 0xA640},
	{LowerCase, 0xA640, 0xA641},
	{TitleCase, 0xA640, 0xA640},
	// A641;CYRILLIC SMALL LETTER ZEMLYA;Ll;0;L;;;;;N;;;A640;;A640
	{UpperCase, 0xA641, 0xA640},
	{LowerCase, 0xA641, 0xA641},
	{TitleCase, 0xA641, 0xA640},
	// A64E;CYRILLIC CAPITAL LETTER NEUTRAL YER;Lu;0;L;;;;;N;;;;A64F;
	{UpperCase, 0xA64E, 0xA64E},
	{LowerCase, 0xA64E, 0xA64F},
	{TitleCase, 0xA64E, 0xA64E},
	// A65F;CYRILLIC SMALL LETTER YN;Ll;0;L;;;;;N;;;A65E;;A65E
	{UpperCase, 0xA65F, 0xA65E},
	{LowerCase, 0xA65F, 0xA65F},
	{TitleCase, 0xA65F, 0xA65E},

	// From another UpperLower sequence
	// 0139;LATIN CAPITAL LETTER L WITH ACUTE;Lu;0;L;004C 0301;;;;N;LATIN CAPITAL LETTER L ACUTE;;;013A;
	{UpperCase, 0x0139, 0x0139},
	{LowerCase, 0x0139, 0x013A},
	{TitleCase, 0x0139, 0x0139},
	// 013F;LATIN CAPITAL LETTER L WITH MIDDLE DOT;Lu;0;L;<compat> 004C 00B7;;;;N;;;;0140;
	{UpperCase, 0x013f, 0x013f},
	{LowerCase, 0x013f, 0x0140},
	{TitleCase, 0x013f, 0x013f},
	// 0148;LATIN SMALL LETTER N WITH CARON;Ll;0;L;006E 030C;;;;N;LATIN SMALL LETTER N HACEK;;0147;;0147
	{UpperCase, 0x0148, 0x0147},
	{LowerCase, 0x0148, 0x0148},
	{TitleCase, 0x0148, 0x0147},

	// Lowercase lower than uppercase.
	// AB78;CHEROKEE SMALL LETTER GE;Ll;0;L;;;;;N;;;13A8;;13A8
	{UpperCase, 0xab78, 0x13a8},
	{LowerCase, 0xab78, 0xab78},
	{TitleCase, 0xab78, 0x13a8},
	{UpperCase, 0x13a8, 0x13a8},
	{LowerCase, 0x13a8, 0xab78},
	{TitleCase, 0x13a8, 0x13a8},

	// Last block in the 5.1.0 table
	// 10400;DESERET CAPITAL LETTER LONG I;Lu;0;L;;;;;N;;;;10428;
	{UpperCase, 0x10400, 0x10400},
	{LowerCase, 0x10400, 0x10428},
	{TitleCase, 0x10400, 0x10400},
	// 10427;DESERET CAPITAL LETTER EW;Lu;0;L;;;;;N;;;;1044F;
	{UpperCase, 0x10427, 0x10427},
	{LowerCase, 0x10427, 0x1044F},
	{TitleCase, 0x10427, 0x10427},
	// 10428;DESERET SMALL LETTER LONG I;Ll;0;L;;;;;N;;;10400;;10400
	{UpperCase, 0x10428, 0x10400},
	{LowerCase, 0x10428, 0x10428},
	{TitleCase, 0x10428, 0x10400},
	// 1044F;DESERET SMALL LETTER EW;Ll;0;L;;;;;N;;;10427;;10427
	{UpperCase, 0x1044F, 0x10427},
	{LowerCase, 0x1044F, 0x1044F},
	{TitleCase, 0x1044F, 0x10427},

	// First one not in the 5.1.0 table
	// 10450;SHAVIAN LETTER PEEP;Lo;0;L;;;;;N;;;;;
	{UpperCase, 0x10450, 0x10450},
	{LowerCase, 0x10450, 0x10450},
	{TitleCase, 0x10450, 0x10450},

	// Non-letters with case.
	{LowerCase, 0x2161, 0x2171},
	{UpperCase, 0x0345, 0x0399},
}

func TestIsLetter(t *testing.T) {
	for _, r := range upperTest {
		if !IsLetter(r) {
			t.Errorf("IsLetter(U+%04X) = false, want true", r)
		}
	}
	for _, r := range letterTest {
		if !IsLetter(r) {
			t.Errorf("IsLetter(U+%04X) = false, want true", r)
		}
	}
	for _, r := range notletterTest {
		if IsLetter(r) {
			t.Errorf("IsLetter(U+%04X) = true, want false", r)
		}
	}
}

func TestIsUpper(t *testing.T) {
	for _, r := range upperTest {
		if !IsUpper(r) {
			t.Errorf("IsUpper(U+%04X) = false, want true", r)
		}
	}
	for _, r := range notupperTest {
		if IsUpper(r) {
			t.Errorf("IsUpper(U+%04X) = true, want false", r)
		}
	}
	for _, r := range notletterTest {
		if IsUpper(r) {
			t.Errorf("IsUpper(U+%04X) = true, want false", r)
		}
	}
}

func caseString(c int) string {
	switch c {
	case UpperCase:
		return "UpperCase"
	case LowerCase:
		return "LowerCase"
	case TitleCase:
		return "TitleCase"
	}
	return "ErrorCase"
}

func TestTo(t *testing.T) {
	for _, c := range caseTest {
		r := To(c.cas, c.in)
		if c.out != r {
			t.Errorf("To(U+%04X, %s) = U+%04X want U+%04X", c.in, caseString(c.cas), r, c.out)
		}
	}
}

func TestToUpperCase(t *testing.T) {
	for _, c := range caseTest {
		if c.cas != UpperCase {
			continue
		}
		r := ToUpper(c.in)
		if c.out != r {
			t.Errorf("ToUpper(U+%04X) = U+%04X want U+%04X", c.in, r, c.out)
		}
	}
}

func TestToLowerCase(t *testing.T) {
	for _, c := range caseTest {
		if c.cas != LowerCase {
			continue
		}
		r := ToLower(c.in)
		if c.out != r {
			t.Errorf("ToLower(U+%04X) = U+%04X want U+%04X", c.in, r, c.out)
		}
	}
}

func TestToTitleCase(t *testing.T) {
	for _, c := range caseTest {
		if c.cas != TitleCase {
			continue
		}
		r := ToTitle(c.in)
		if c.out != r {
			t.Errorf("ToTitle(U+%04X) = U+%04X want U+%04X", c.in, r, c.out)
		}
	}
}

func TestIsSpace(t *testing.T) {
	for _, c := range spaceTest {
		if !IsSpace(c) {
			t.Errorf("IsSpace(U+%04X) = false; want true", c)
		}
	}
	for _, c := range letterTest {
		if IsSpace(c) {
			t.Errorf("IsSpace(U+%04X) = true; want false", c)
		}
	}
}

// Check that the optimizations for IsLetter etc. agree with the tables.
// We only need to check the Latin-1 range.
func TestLetterOptimizations(t *testing.T) {
	for i := rune(0); i <= MaxLatin1; i++ {
		if Is(Letter, i) != IsLetter(i) {
			t.Errorf("IsLetter(U+%04X) disagrees with Is(Letter)", i)
		}
		if Is(Upper, i) != IsUpper(i) {
			t.Errorf("IsUpper(U+%04X) disagrees with Is(Upper)", i)
		}
		if Is(Lower, i) != IsLower(i) {
			t.Errorf("IsLower(U+%04X) disagrees with Is(Lower)", i)
		}
		if Is(Title, i) != IsTitle(i) {
			t.Errorf("IsTitle(U+%04X) disagrees with Is(Title)", i)
		}
		if Is(White_Space, i) != IsSpace(i) {
			t.Errorf("IsSpace(U+%04X) disagrees with Is(White_Space)", i)
		}
		if To(UpperCase, i) != ToUpper(i) {
			t.Errorf("ToUpper(U+%04X) disagrees with To(Upper)", i)
		}
		if To(LowerCase, i) != ToLower(i) {
			t.Errorf("ToLower(U+%04X) disagrees with To(Lower)", i)
		}
		if To(TitleCase, i) != ToTitle(i) {
			t.Errorf("ToTitle(U+%04X) disagrees with To(Title)", i)
		}
	}
}

func TestTurkishCase(t *testing.T) {
	lower := []rune("abcçdefgğhıijklmnoöprsştuüvyz")
	upper := []rune("ABCÇDEFGĞHIİJKLMNOÖPRSŞTUÜVYZ")
	for i, l := range lower {
		u := upper[i]
		if TurkishCase.ToLower(l) != l {
			t.Errorf("lower(U+%04X) is U+%04X not U+%04X", l, TurkishCase.ToLower(l), l)
		}
		if TurkishCase.ToUpper(u) != u {
			t.Errorf("upper(U+%04X) is U+%04X not U+%04X", u, TurkishCase.ToUpper(u), u)
		}
		if TurkishCase.ToUpper(l) != u {
			t.Errorf("upper(U+%04X) is U+%04X not U+%04X", l, TurkishCase.ToUpper(l), u)
		}
		if TurkishCase.ToLower(u) != l {
			t.Errorf("lower(U+%04X) is U+%04X not U+%04X", u, TurkishCase.ToLower(l), l)
		}
		if TurkishCase.ToTitle(u) != u {
			t.Errorf("title(U+%04X) is U+%04X not U+%04X", u, TurkishCase.ToTitle(u), u)
		}
		if TurkishCase.ToTitle(l) != u {
			t.Errorf("title(U+%04X) is U+%04X not U+%04X", l, TurkishCase.ToTitle(l), u)
		}
	}
}

var simpleFoldTests = []string{
	// SimpleFold(x) returns the next equivalent rune > x or wraps
	// around to smaller values.

	// Easy cases.
	"Aa",
	"δΔ",

	// ASCII special cases.
	"KkK",
	"Ssſ",

	// Non-ASCII special cases.
	"ρϱΡ",
	"ͅΙιι",

	// Extra special cases: has lower/upper but no case fold.
	"İ",
	"ı",

	// Upper comes before lower (Cherokee).
	"\u13b0\uab80",
}

func TestSimpleFold(t *testing.T) {
	for _, tt := range simpleFoldTests {
		cycle := []rune(tt)
		r := cycle[len(cycle)-1]
		for _, out := range cycle {
			if r := SimpleFold(r); r != out {
				t.Errorf("SimpleFold(%#U) = %#U, want %#U", r, r, out)
			}
			r = out
		}
	}
}

// Running 'go test -calibrate' runs the calibration to find a plausible
// cutoff point for linear search of a range list vs. binary search.
// We create a fake table and then time how long it takes to do a
// sequence of searches within that table, for all possible inputs
// relative to the ranges (something before all, in each, between each, after all).
// This assumes that all possible runes are equally likely.
// In practice most runes are ASCII so this is a conservative estimate
// of an effective cutoff value. In practice we could probably set it higher
// than what this function recommends.

var calibrate = flag.Bool("calibrate", false, "compute crossover for linear vs. binary search")

func TestCalibrate(t *testing.T) {
	if !*calibrate {
		return
	}

	if runtime.GOARCH == "amd64" {
		fmt.Printf("warning: running calibration on %s\n", runtime.GOARCH)
	}

	// Find the point where binary search wins by more than 10%.
	// The 10% bias gives linear search an edge when they're close,
	// because on predominantly ASCII inputs linear search is even
	// better than our benchmarks measure.
	n := sort.Search(64, func(n int) bool {
		tab := fakeTable(n)
		blinear := func(b *testing.B) {
			tab := tab
			max := n*5 + 20
			for i := 0; i < b.N; i++ {
				for j := 0; j <= max; j++ {
					linear(tab, uint16(j))
				}
			}
		}
		bbinary := func(b *testing.B) {
			tab := tab
			max := n*5 + 20
			for i := 0; i < b.N; i++ {
				for j := 0; j <= max; j++ {
					binary(tab, uint16(j))
				}
			}
		}
		bmlinear := testing.Benchmark(blinear)
		bmbinary := testing.Benchmark(bbinary)
		fmt.Printf("n=%d: linear=%d binary=%d\n", n, bmlinear.NsPerOp(), bmbinary.NsPerOp())
		return bmlinear.NsPerOp()*100 > bmbinary.NsPerOp()*110
	})
	fmt.Printf("calibration: linear cutoff = %d\n", n)
}

func fakeTable(n int) []Range16 {
	var r16 []Range16
	for i := 0; i < n; i++ {
		r16 = append(r16, Range16{uint16(i*5 + 10), uint16(i*5 + 12), 1})
	}
	return r16
}

func linear(ranges []Range16, r uint16) bool {
	for i := range ranges {
		range_ := &ranges[i]
		if r < range_.Lo {
			return false
		}
		if r <= range_.Hi {
			return (r-range_.Lo)%range_.Stride == 0
		}
	}
	return false
}

func binary(ranges []Range16, r uint16) bool {
	// binary search over ranges
	lo := 0
	hi := len(ranges)
	for lo < hi {
		m := lo + (hi-lo)/2
		range_ := &ranges[m]
		if range_.Lo <= r && r <= range_.Hi {
			return (r-range_.Lo)%range_.Stride == 0
		}
		if r < range_.Lo {
			hi = m
		} else {
			lo = m + 1
		}
	}
	return false
}

func TestLatinOffset(t *testing.T) {
	var maps = []map[string]*RangeTable{
		Categories,
		FoldCategory,
		FoldScript,
		Properties,
		Scripts,
	}
	for _, m := range maps {
		for name, tab := range m {
			i := 0
			for i < len(tab.R16) && tab.R16[i].Hi <= MaxLatin1 {
				i++
			}
			if tab.LatinOffset != i {
				t.Errorf("%s: LatinOffset=%d, want %d", name, tab.LatinOffset, i)
			}
		}
	}
}


================================================
FILE: examples/go/no_newline_at_eof.go
================================================
// run

// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package main

func main() {
	x := 0
	func() {
		x = 1
	}()
	func() {
		if x != 1 {
			panic("x != 1")
		}
	}()
}

================================================
FILE: examples/go/proc.go
================================================
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import (
	"runtime/internal/atomic"
	"runtime/internal/sys"
	"unsafe"
)

var buildVersion = sys.TheVersion

// Goroutine scheduler
// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
//
// The main concepts are:
// G - goroutine.
// M - worker thread, or machine.
// P - processor, a resource that is required to execute Go code.
//     M must have an associated P to execute Go code, however it can be
//     blocked or in a syscall w/o an associated P.
//
// Design doc at https://golang.org/s/go11sched.

// Worker thread parking/unparking.
// We need to balance between keeping enough running worker threads to utilize
// available hardware parallelism and parking excessive running worker threads
// to conserve CPU resources and power. This is not simple for two reasons:
// (1) scheduler state is intentionally distributed (in particular, per-P work
// queues), so it is not possible to compute global predicates on fast paths;
// (2) for optimal thread management we would need to know the future (don't park
// a worker thread when a new goroutine will be readied in near future).
//
// Three rejected approaches that would work badly:
// 1. Centralize all scheduler state (would inhibit scalability).
// 2. Direct goroutine handoff. That is, when we ready a new goroutine and there
//    is a spare P, unpark a thread and handoff it the thread and the goroutine.
//    This would lead to thread state thrashing, as the thread that readied the
//    goroutine can be out of work the very next moment, we will need to park it.
//    Also, it would destroy locality of computation as we want to preserve
//    dependent goroutines on the same thread; and introduce additional latency.
// 3. Unpark an additional thread whenever we ready a goroutine and there is an
//    idle P, but don't do handoff. This would lead to excessive thread parking/
//    unparking as the additional threads will instantly park without discovering
//    any work to do.
//
// The current approach:
// We unpark an additional thread when we ready a goroutine if (1) there is an
// idle P and there are no "spinning" worker threads. A worker thread is considered
// spinning if it is out of local work and did not find work in global run queue/
// netpoller; the spinning state is denoted in m.spinning and in sched.nmspinning.
// Threads unparked this way are also considered spinning; we don't do goroutine
// handoff so such threads are out of work initially. Spinning threads do some
// spinning looking for work in per-P run queues before parking. If a spinning
// thread finds work it takes itself out of the spinning state and proceeds to
// execution. If it does not find work it takes itself out of the spinning state
// and then parks.
// If there is at least one spinning thread (sched.nmspinning>1), we don't unpark
// new threads when readying goroutines. To compensate for that, if the last spinning
// thread finds work and stops spinning, it must unpark a new spinning thread.
// This approach smooths out unjustified spikes of thread unparking,
// but at the same time guarantees eventual maximal CPU parallelism utilization.
//
// The main implementation complication is that we need to be very careful during
// spinning->non-spinning thread transition. This transition can race with submission
// of a new goroutine, and either one part or another needs to unpark another worker
// thread. If they both fail to do that, we can end up with semi-persistent CPU
// underutilization. The general pattern for goroutine readying is: submit a goroutine
// to local work queue, #StoreLoad-style memory barrier, check sched.nmspinning.
// The general pattern for spinning->non-spinning transition is: decrement nmspinning,
// #StoreLoad-style memory barrier, check all per-P work queues for new work.
// Note that all this complexity does not apply to global run queue as we are not
// sloppy about thread unparking when submitting to global queue. Also see comments
// for nmspinning manipulation.

var (
	m0 m
	g0 g
)

//go:linkname runtime_init runtime.init
func runtime_init()

//go:linkname main_init main.init
func main_init()

// main_init_done is a signal used by cgocallbackg that initialization
// has been completed. It is made before _cgo_notify_runtime_init_done,
// so all cgo calls can rely on it existing. When main_init is complete,
// it is closed, meaning cgocallbackg can reliably receive from it.
var main_init_done chan bool

//go:linkname main_main main.main
func main_main()

// runtimeInitTime is the nanotime() at which the runtime started.
var runtimeInitTime int64

// Value to use for signal mask for newly created M's.
var initSigmask sigset

// The main goroutine.
func main() {
	g := getg()

	// Racectx of m0->g0 is used only as the parent of the main goroutine.
	// It must not be used for anything else.
	g.m.g0.racectx = 0

	// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
	// Using decimal instead of binary GB and MB because
	// they look nicer in the stack overflow failure message.
	if sys.PtrSize == 8 {
		maxstacksize = 1000000000
	} else {
		maxstacksize = 250000000
	}

	// Record when the world started.
	runtimeInitTime = nanotime()

	systemstack(func() {
		newm(sysmon, nil)
	})

	// Lock the main goroutine onto this, the main OS thread,
	// during initialization.  Most programs won't care, but a few
	// do require certain calls to be made by the main thread.
	// Those can arrange for main.main to run in the main thread
	// by calling runtime.LockOSThread during initialization
	// to preserve the lock.
	lockOSThread()

	if g.m != &m0 {
		throw("runtime.main not on m0")
	}

	runtime_init() // must be before defer

	// Defer unlock so that runtime.Goexit during init does the unlock too.
	needUnlock := true
	defer func() {
		if needUnlock {
			unlockOSThread()
		}
	}()

	gcenable()

	main_init_done = make(chan bool)
	if iscgo {
		if _cgo_thread_start == nil {
			throw("_cgo_thread_start missing")
		}
		if _cgo_malloc == nil {
			throw("_cgo_malloc missing")
		}
		if _cgo_free == nil {
			throw("_cgo_free missing")
		}
		if GOOS != "windows" {
			if _cgo_setenv == nil {
				throw("_cgo_setenv missing")
			}
			if _cgo_unsetenv == nil {
				throw("_cgo_unsetenv missing")
			}
		}
		if _cgo_notify_runtime_init_done == nil {
			throw("_cgo_notify_runtime_init_done missing")
		}
		cgocall(_cgo_notify_runtime_init_done, nil)
	}

	main_init()
	close(main_init_done)

	needUnlock = false
	unlockOSThread()

	if isarchive || islibrary {
		// A program compiled with -buildmode=c-archive or c-shared
		// has a main, but it is not executed.
		return
	}
	main_main()
	if raceenabled {
		racefini()
	}

	// Make racy client program work: if panicking on
	// another goroutine at the same time as main returns,
	// let the other goroutine finish printing the panic trace.
	// Once it does, it will exit. See issue 3934.
	if panicking != 0 {
		gopark(nil, nil, "panicwait", traceEvGoStop, 1)
	}

	exit(0)
	for {
		var x *int32
		*x = 0
	}
}

// os_beforeExit is called from os.Exit(0).
//go:linkname os_beforeExit os.runtime_beforeExit
func os_beforeExit() {
	if raceenabled {
		racefini()
	}
}

// start forcegc helper goroutine
func init() {
	go forcegchelper()
}

func forcegchelper() {
	forcegc.g = getg()
	for {
		lock(&forcegc.lock)
		if forcegc.idle != 0 {
			throw("forcegc: phase error")
		}
		atomic.Store(&forcegc.idle, 1)
		goparkunlock(&forcegc.lock, "force gc (idle)", traceEvGoBlock, 1)
		// this goroutine is explicitly resumed by sysmon
		if debug.gctrace > 0 {
			println("GC forced")
		}
		gcStart(gcBackgroundMode, true)
	}
}

//go:nosplit

// Gosched yields the processor, allowing other goroutines to run.  It does not
// suspend the current goroutine, so execution resumes automatically.
func Gosched() {
	mcall(gosched_m)
}

// Puts the current goroutine into a waiting state and calls unlockf.
// If unlockf returns false, the goroutine is resumed.
func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason string, traceEv byte, traceskip int) {
	mp := acquirem()
	gp := mp.curg
	status := readgstatus(gp)
	if status != _Grunning && status != _Gscanrunning {
		throw("gopark: bad g status")
	}
	mp.waitlock = lock
	mp.waitunlockf = *(*unsafe.Pointer)(unsafe.Pointer(&unlockf))
	gp.waitreason = reason
	mp.waittraceev = traceEv
	mp.waittraceskip = traceskip
	releasem(mp)
	// can't do anything that might move the G between Ms here.
	mcall(park_m)
}

// Puts the current goroutine into a waiting state and unlocks the lock.
// The goroutine can be made runnable again by calling goready(gp).
func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip int) {
	gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceEv, traceskip)
}

func goready(gp *g, traceskip int) {
	systemstack(func() {
		ready(gp, traceskip)
	})
}

//go:nosplit
func acquireSudog() *sudog {
	// Delicate dance: the semaphore implementation calls
	// acquireSudog, acquireSudog calls new(sudog),
	// new calls malloc, malloc can call the garbage collector,
	// and the garbage collector calls the semaphore implementation
	// in stopTheWorld.
	// Break the cycle by doing acquirem/releasem around new(sudog).
	// The acquirem/releasem increments m.locks during new(sudog),
	// which keeps the garbage collector from being invoked.
	mp := acquirem()
	pp := mp.p.ptr()
	if len(pp.sudogcache) == 0 {
		lock(&sched.sudoglock)
		// First, try to grab a batch from central cache.
		for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
			s := sched.sudogcache
			sched.sudogcache = s.next
			s.next = nil
			pp.sudogcache = append(pp.sudogcache, s)
		}
		unlock(&sched.sudoglock)
		// If the central cache is empty, allocate a new one.
		if len(pp.sudogcache) == 0 {
			pp.sudogcache = append(pp.sudogcache, new(sudog))
		}
	}
	n := len(pp.sudogcache)
	s := pp.sudogcache[n-1]
	pp.sudogcache[n-1] = nil
	pp.sudogcache = pp.sudogcache[:n-1]
	if s.elem != nil {
		throw("acquireSudog: found s.elem != nil in cache")
	}
	releasem(mp)
	return s
}

//go:nosplit
func releaseSudog(s *sudog) {
	if s.elem != nil {
		throw("runtime: sudog with non-nil elem")
	}
	if s.selectdone != nil {
		throw("runtime: sudog with non-nil selectdone")
	}
	if s.next != nil {
		throw("runtime: sudog with non-nil next")
	}
	if s.prev != nil {
		throw("runtime: sudog with non-nil prev")
	}
	if s.waitlink != nil {
		throw("runtime: sudog with non-nil waitlink")
	}
	gp := getg()
	if gp.param != nil {
		throw("runtime: releaseSudog with non-nil gp.param")
	}
	mp := acquirem() // avoid rescheduling to another P
	pp := mp.p.ptr()
	if len(pp.sudogcache) == cap(pp.sudogcache) {
		// Transfer half of local cache to the central cache.
		var first, last *sudog
		for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
			n := len(pp.sudogcache)
			p := pp.sudogcache[n-1]
			pp.sudogcache[n-1] = nil
			pp.sudogcache = pp.sudogcache[:n-1]
			if first == nil {
				first = p
			} else {
				last.next = p
			}
			last = p
		}
		lock(&sched.sudoglock)
		last.next = sched.sudogcache
		sched.sudogcache = first
		unlock(&sched.sudoglock)
	}
	pp.sudogcache = append(pp.sudogcache, s)
	releasem(mp)
}

// funcPC returns the entry PC of the function f.
// It assumes that f is a func value. Otherwise the behavior is undefined.
//go:nosplit
func funcPC(f interface{}) uintptr {
	return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
}

// called from assembly
func badmcall(fn func(*g)) {
	throw("runtime: mcall called on m->g0 stack")
}

func badmcall2(fn func(*g)) {
	throw("runtime: mcall function returned")
}

func badreflectcall() {
	panic("runtime: arg size to reflect.call more than 1GB")
}

func lockedOSThread() bool {
	gp := getg()
	return gp.lockedm != nil && gp.m.lockedg != nil
}

var (
	allgs    []*g
	allglock mutex
)

func allgadd(gp *g) {
	if readgstatus(gp) == _Gidle {
		throw("allgadd: bad status Gidle")
	}

	lock(&allglock)
	allgs = append(allgs, gp)
	allglen = uintptr(len(allgs))
	unlock(&allglock)
}

const (
	// Number of goroutine ids to grab from sched.goidgen to local per-P cache at once.
	// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
	_GoidCacheBatch = 16
)

// The bootstrap sequence is:
//
//	call osinit
//	call schedinit
//	make & queue new G
//	call runtime·mstart
//
// The new G calls runtime·main.
func schedinit() {
	// raceinit must be the first call to race detector.
	// In particular, it must be done before mallocinit below calls racemapshadow.
	_g_ := getg()
	if raceenabled {
		_g_.racectx = raceinit()
	}

	sched.maxmcount = 10000

	// Cache the framepointer experiment.  This affects stack unwinding.
	framepointer_enabled = haveexperiment("framepointer")

	tracebackinit()
	moduledataverify()
	stackinit()
	mallocinit()
	mcommoninit(_g_.m)

	msigsave(_g_.m)
	initSigmask = _g_.m.sigmask

	goargs()
	goenvs()
	parsedebugvars()
	gcinit()

	sched.lastpoll = uint64(nanotime())
	procs := int(ncpu)
	if n := atoi(gogetenv("GOMAXPROCS")); n > 0 {
		if n > _MaxGomaxprocs {
			n = _MaxGomaxprocs
		}
		procs = n
	}
	if procresize(int32(procs)) != nil {
		throw("unknown runnable goroutine during bootstrap")
	}

	if buildVersion == "" {
		// Condition should never trigger.  This code just serves
		// to ensure runtime·buildVersion is kept in the resulting binary.
		buildVersion = "unknown"
	}
}

func dumpgstatus(gp *g) {
	_g_ := getg()
	print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
	print("runtime:  g:  g=", _g_, ", goid=", _g_.goid, ",  g->atomicstatus=", readgstatus(_g_), "\n")
}

func checkmcount() {
	// sched lock is held
	if sched.mcount > sched.maxmcount {
		print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
		throw("thread exhaustion")
	}
}

func mcommoninit(mp *m) {
	_g_ := getg()

	// g0 stack won't make sense for user (and is not necessary unwindable).
	if _g_ != _g_.m.g0 {
		callers(1, mp.createstack[:])
	}

	mp.fastrand = 0x49f6428a + uint32(mp.id) + uint32(cputicks())
	if mp.fastrand == 0 {
		mp.fastrand = 0x49f6428a
	}

	lock(&sched.lock)
	mp.id = sched.mcount
	sched.mcount++
	checkmcount()
	mpreinit(mp)
	if mp.gsignal != nil {
		mp.gsignal.stackguard1 = mp.gsignal.stack.lo + _StackGuard
	}

	// Add to allm so garbage collector doesn't free g->m
	// when it is just in a register or thread-local storage.
	mp.alllink = allm

	// NumCgoCall() iterates over allm w/o schedlock,
	// so we need to publish it safely.
	atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
	unlock(&sched.lock)
}

// Mark gp ready to run.
func ready(gp *g, traceskip int) {
	if trace.enabled {
		traceGoUnpark(gp, traceskip)
	}

	status := readgstatus(gp)

	// Mark runnable.
	_g_ := getg()
	_g_.m.locks++ // disable preemption because it can be holding p in a local var
	if status&^_Gscan != _Gwaiting {
		dumpgstatus(gp)
		throw("bad g->status in ready")
	}

	// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
	casgstatus(gp, _Gwaiting, _Grunnable)
	runqput(_g_.m.p.ptr(), gp, true)
	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 { // TODO: fast atomic
		wakep()
	}
	_g_.m.locks--
	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in Case we've cleared it in newstack
		_g_.stackguard0 = stackPreempt
	}
}

func gcprocs() int32 {
	// Figure out how many CPUs to use during GC.
	// Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
	lock(&sched.lock)
	n := gomaxprocs
	if n > ncpu {
		n = ncpu
	}
	if n > _MaxGcproc {
		n = _MaxGcproc
	}
	if n > sched.nmidle+1 { // one M is currently running
		n = sched.nmidle + 1
	}
	unlock(&sched.lock)
	return n
}

func needaddgcproc() bool {
	lock(&sched.lock)
	n := gomaxprocs
	if n > ncpu {
		n = ncpu
	}
	if n > _MaxGcproc {
		n = _MaxGcproc
	}
	n -= sched.nmidle + 1 // one M is currently running
	unlock(&sched.lock)
	return n > 0
}

func helpgc(nproc int32) {
	_g_ := getg()
	lock(&sched.lock)
	pos := 0
	for n := int32(1); n < nproc; n++ { // one M is currently running
		if allp[pos].mcache == _g_.m.mcache {
			pos++
		}
		mp := mget()
		if mp == nil {
			throw("gcprocs inconsistency")
		}
		mp.helpgc = n
		mp.p.set(allp[pos])
		mp.mcache = allp[pos].mcache
		pos++
		notewakeup(&mp.park)
	}
	unlock(&sched.lock)
}

// freezeStopWait is a large value that freezetheworld sets
// sched.stopwait to in order to request that all Gs permanently stop.
const freezeStopWait = 0x7fffffff

// Similar to stopTheWorld but best-effort and can be called several times.
// There is no reverse operation, used during crashing.
// This function must not lock any mutexes.
func freezetheworld() {
	// stopwait and preemption requests can be lost
	// due to races with concurrently executing threads,
	// so try several times
	for i := 0; i < 5; i++ {
		// this should tell the scheduler to not start any new goroutines
		sched.stopwait = freezeStopWait
		atomic.Store(&sched.gcwaiting, 1)
		// this should stop running goroutines
		if !preemptall() {
			break // no running goroutines
		}
		usleep(1000)
	}
	// to be sure
	usleep(1000)
	preemptall()
	usleep(1000)
}

func isscanstatus(status uint32) bool {
	if status == _Gscan {
		throw("isscanstatus: Bad status Gscan")
	}
	return status&_Gscan == _Gscan
}

// All reads and writes of g's status go through readgstatus, casgstatus
// castogscanstatus, casfrom_Gscanstatus.
//go:nosplit
func readgstatus(gp *g) uint32 {
	return atomic.Load(&gp.atomicstatus)
}

// Ownership of gscanvalid:
//
// If gp is running (meaning status == _Grunning or _Grunning|_Gscan),
// then gp owns gp.gscanvalid, and other goroutines must not modify it.
//
// Otherwise, a second goroutine can lock the scan state by setting _Gscan
// in the status bit and then modify gscanvalid, and then unlock the scan state.
//
// Note that the first condition implies an exception to the second:
// if a second goroutine changes gp's status to _Grunning|_Gscan,
// that second goroutine still does not have the right to modify gscanvalid.

// The Gscanstatuses are acting like locks and this releases them.
// If it proves to be a performance hit we should be able to make these
// simple atomic stores but for now we are going to throw if
// we see an inconsistent state.
func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
	success := false

	// Check that transition is valid.
	switch oldval {
	default:
		print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
		dumpgstatus(gp)
		throw("casfrom_Gscanstatus:top gp->status is not in scan state")
	case _Gscanrunnable,
		_Gscanwaiting,
		_Gscanrunning,
		_Gscansyscall:
		if newval == oldval&^_Gscan {
			success = atomic.Cas(&gp.atomicstatus, oldval, newval)
		}
	case _Gscanenqueue:
		if newval == _Gwaiting {
			success = atomic.Cas(&gp.atomicstatus, oldval, newval)
		}
	}
	if !success {
		print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
		dumpgstatus(gp)
		throw("casfrom_Gscanstatus: gp->status is not in scan state")
	}
	if newval == _Grunning {
		gp.gcscanvalid = false
	}
}

// This will return false if the gp is not in the expected status and the cas fails.
// This acts like a lock acquire while the casfromgstatus acts like a lock release.
func castogscanstatus(gp *g, oldval, newval uint32) bool {
	switch oldval {
	case _Grunnable,
		_Gwaiting,
		_Gsyscall:
		if newval == oldval|_Gscan {
			return atomic.Cas(&gp.atomicstatus, oldval, newval)
		}
	case _Grunning:
		if newval == _Gscanrunning || newval == _Gscanenqueue {
			return atomic.Cas(&gp.atomicstatus, oldval, newval)
		}
	}
	print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
	throw("castogscanstatus")
	panic("not reached")
}

// If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
// and casfrom_Gscanstatus instead.
// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
// put it in the Gscan state is finished.
//go:nosplit
func casgstatus(gp *g, oldval, newval uint32) {
	if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
		systemstack(func() {
			print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
			throw("casgstatus: bad incoming values")
		})
	}

	if oldval == _Grunning && gp.gcscanvalid {
		// If oldvall == _Grunning, then the actual status must be
		// _Grunning or _Grunning|_Gscan; either way,
		// we own gp.gcscanvalid, so it's safe to read.
		// gp.gcscanvalid must not be true when we are running.
		print("runtime: casgstatus ", hex(oldval), "->", hex(newval), " gp.status=", hex(gp.atomicstatus), " gp.gcscanvalid=true\n")
		throw("casgstatus")
	}

	// loop if gp->atomicstatus is in a scan state giving
	// GC time to finish and change the state to oldval.
	for !atomic.Cas(&gp.atomicstatus, oldval, newval) {
		if oldval == _Gwaiting && gp.atomicstatus == _Grunnable {
			systemstack(func() {
				throw("casgstatus: waiting for Gwaiting but is Grunnable")
			})
		}
		// Help GC if needed.
		// if gp.preemptscan && !gp.gcworkdone && (oldval == _Grunning || oldval == _Gsyscall) {
		// 	gp.preemptscan = false
		// 	systemstack(func() {
		// 		gcphasework(gp)
		// 	})
		// }
	}
	if newval == _Grunning {
		gp.gcscanvalid = false
	}
}

// casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
// Returns old status. Cannot call casgstatus directly, because we are racing with an
// async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus,
// it might have become Grunnable by the time we get to the cas. If we called casgstatus,
// it would loop waiting for the status to go back to Gwaiting, which it never will.
//go:nosplit
func casgcopystack(gp *g) uint32 {
	for {
		oldstatus := readgstatus(gp) &^ _Gscan
		if oldstatus != _Gwaiting && oldstatus != _Grunnable {
			throw("copystack: bad status, not Gwaiting or Grunnable")
		}
		if atomic.Cas(&gp.atomicstatus, oldstatus, _Gcopystack) {
			return oldstatus
		}
	}
}

// scang blocks until gp's stack has been scanned.
// It might be scanned by scang or it might be scanned by the goroutine itself.
// Either way, the stack scan has completed when scang returns.
func scang(gp *g) {
	// Invariant; we (the caller, markroot for a specific goroutine) own gp.gcscandone.
	// Nothing is racing with us now, but gcscandone might be set to true left over
	// from an earlier round of stack scanning (we scan twice per GC).
	// We use gcscandone to record whether the scan has been done during this round.
	// It is important that the scan happens exactly once: if called twice,
	// the installation of stack barriers will detect the double scan and die.

	gp.gcscandone = false

	// Endeavor to get gcscandone set to true,
	// either by doing the stack scan ourselves or by coercing gp to scan itself.
	// gp.gcscandone can transition from false to true when we're not looking
	// (if we asked for preemption), so any time we lock the status using
	// castogscanstatus we have to double-check that the scan is still not done.
	for !gp.gcscandone {
		switch s := readgstatus(gp); s {
		default:
			dumpgstatus(gp)
			throw("stopg: invalid status")

		case _Gdead:
			// No stack.
			gp.gcscandone = true

		case _Gcopystack:
		// Stack being switched. Go around again.

		case _Grunnable, _Gsyscall, _Gwaiting:
			// Claim goroutine by setting scan bit.
			// Racing with execution or readying of gp.
			// The scan bit keeps them from running
			// the goroutine until we're done.
			if castogscanstatus(gp, s, s|_Gscan) {
				if !gp.gcscandone {
					scanstack(gp)
					gp.gcscandone = true
				}
				restartg(gp)
			}

		case _Gscanwaiting:
		// newstack is doing a scan for us right now. Wait.

		case _Grunning:
			// Goroutine running. Try to preempt execution so it can scan itself.
			// The preemption handler (in newstack) does the actual scan.

			// Optimization: if there is already a pending preemption request
			// (from the previous loop iteration), don't bother with the atomics.
			if gp.preemptscan && gp.preempt && gp.stackguard0 == stackPreempt {
				break
			}

			// Ask for preemption and self scan.
			if castogscanstatus(gp, _Grunning, _Gscanrunning) {
				if !gp.gcscandone {
					gp.preemptscan = true
					gp.preempt = true
					gp.stackguard0 = stackPreempt
				}
				casfrom_Gscanstatus(gp, _Gscanrunning, _Grunning)
			}
		}
	}

	gp.preemptscan = false // cancel scan request if no longer needed
}

// The GC requests that this routine be moved from a scanmumble state to a mumble state.
func restartg(gp *g) {
	s := readgstatus(gp)
	switch s {
	default:
		dumpgstatus(gp)
		throw("restartg: unexpected status")

	case _Gdead:
	// ok

	case _Gscanrunnable,
		_Gscanwaiting,
		_Gscansyscall:
		casfrom_Gscanstatus(gp, s, s&^_Gscan)

	// Scan is now completed.
	// Goroutine now needs to be made runnable.
	// We put it on the global run queue; ready blocks on the global scheduler lock.
	case _Gscanenqueue:
		casfrom_Gscanstatus(gp, _Gscanenqueue, _Gwaiting)
		if gp != getg().m.curg {
			throw("processing Gscanenqueue on wrong m")
		}
		dropg()
		ready(gp, 0)
	}
}

// stopTheWorld stops all P's from executing goroutines, interrupting
// all goroutines at GC safe points and records reason as the reason
// for the stop. On return, only the current goroutine's P is running.
// stopTheWorld must not be called from a system stack and the caller
// must not hold worldsema. The caller must call startTheWorld when
// other P's should resume execution.
//
// stopTheWorld is safe for multiple goroutines to call at the
// same time. Each will execute its own stop, and the stops will
// be serialized.
//
// This is also used by routines that do stack dumps. If the system is
// in panic or being exited, this may not reliably stop all
// goroutines.
func stopTheWorld(reason string) {
	semacquire(&worldsema, false)
	getg().m.preemptoff = reason
	systemstack(stopTheWorldWithSema)
}

// startTheWorld undoes the effects of stopTheWorld.
func startTheWorld() {
	systemstack(startTheWorldWithSema)
	// worldsema must be held over startTheWorldWithSema to ensure
	// gomaxprocs cannot change while worldsema is held.
	semrelease(&worldsema)
	getg().m.preemptoff = ""
}

// Holding worldsema grants an M the right to try to stop the world
// and prevents gomaxprocs from changing concurrently.
var worldsema uint32 = 1

// stopTheWorldWithSema is the core implementation of stopTheWorld.
// The caller is responsible for acquiring worldsema and disabling
// preemption first and then should stopTheWorldWithSema on the system
// stack:
//
//	semacquire(&worldsema, false)
//	m.preemptoff = "reason"
//	systemstack(stopTheWorldWithSema)
//
// When finished, the caller must either call startTheWorld or undo
// these three operations separately:
//
//	m.preemptoff = ""
//	systemstack(startTheWorldWithSema)
//	semrelease(&worldsema)
//
// It is allowed to acquire worldsema once and then execute multiple
// startTheWorldWithSema/stopTheWorldWithSema pairs.
// Other P's are able to execute between successive calls to
// startTheWorldWithSema and stopTheWorldWithSema.
// Holding worldsema causes any other goroutines invoking
// stopTheWorld to block.
func stopTheWorldWithSema() {
	_g_ := getg()

	// If we hold a lock, then we won't be able to stop another M
	// that is blocked trying to acquire the lock.
	if _g_.m.locks > 0 {
		throw("stopTheWorld: holding locks")
	}

	lock(&sched.lock)
	sched.stopwait = gomaxprocs
	atomic.Store(&sched.gcwaiting, 1)
	preemptall()
	// stop current P
	_g_.m.p.ptr().status = _Pgcstop // Pgcstop is only diagnostic.
	sched.stopwait--
	// try to retake all P's in Psyscall status
	for i := 0; i < int(gomaxprocs); i++ {
		p := allp[i]
		s := p.status
		if s == _Psyscall && atomic.Cas(&p.status, s, _Pgcstop) {
			if trace.enabled {
				traceGoSysBlock(p)
				traceProcStop(p)
			}
			p.syscalltick++
			sched.stopwait--
		}
	}
	// stop idle P's
	for {
		p := pidleget()
		if p == nil {
			break
		}
		p.status = _Pgcstop
		sched.stopwait--
	}
	wait := sched.stopwait > 0
	unlock(&sched.lock)

	// wait for remaining P's to stop voluntarily
	if wait {
		for {
			// wait for 100us, then try to re-preempt in case of any races
			if notetsleep(&sched.stopnote, 100*1000) {
				noteclear(&sched.stopnote)
				break
			}
			preemptall()
		}
	}
	if sched.stopwait != 0 {
		throw("stopTheWorld: not stopped")
	}
	for i := 0; i < int(gomaxprocs); i++ {
		p := allp[i]
		if p.status != _Pgcstop {
			throw("stopTheWorld: not stopped")
		}
	}
}

func mhelpgc() {
	_g_ := getg()
	_g_.m.helpgc = -1
}

func startTheWorldWithSema() {
	_g_ := getg()

	_g_.m.locks++        // disable preemption because it can be holding p in a local var
	gp := netpoll(false) // non-blocking
	injectglist(gp)
	add := needaddgcproc()
	lock(&sched.lock)

	procs := gomaxprocs
	if newprocs != 0 {
		procs = newprocs
		newprocs = 0
	}
	p1 := procresize(procs)
	sched.gcwaiting = 0
	if sched.sysmonwait != 0 {
		sched.sysmonwait = 0
		notewakeup(&sched.sysmonnote)
	}
	unlock(&sched.lock)

	for p1 != nil {
		p := p1
		p1 = p1.link.ptr()
		if p.m != 0 {
			mp := p.m.ptr()
			p.m = 0
			if mp.nextp != 0 {
				throw("startTheWorld: inconsistent mp->nextp")
			}
			mp.nextp.set(p)
			notewakeup(&mp.park)
		} else {
			// Start M to run P.  Do not start another M below.
			newm(nil, p)
			add = false
		}
	}

	// Wakeup an additional proc in case we have excessive runnable goroutines
	// in local queues or in the global queue. If we don't, the proc will park itself.
	// If we have lots of excessive work, resetspinning will unpark additional procs as necessary.
	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 {
		wakep()
	}

	if add {
		// If GC could have used another helper proc, start one now,
		// in the hope that it will be available next time.
		// It would have been even better to start it before the collection,
		// but doing so requires allocating memory, so it's tricky to
		// coordinate.  This lazy approach works out in practice:
		// we don't mind if the first couple gc rounds don't have quite
		// the maximum number of procs.
		newm(mhelpgc, nil)
	}
	_g_.m.locks--
	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
		_g_.stackguard0 = stackPreempt
	}
}

// Called to start an M.
//go:nosplit
func mstart() {
	_g_ := getg()

	if _g_.stack.lo == 0 {
		// Initialize stack bounds from system stack.
		// Cgo may have left stack size in stack.hi.
		size := _g_.stack.hi
		if size == 0 {
			size = 8192 * sys.StackGuardMultiplier
		}
		_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
		_g_.stack.lo = _g_.stack.hi - size + 1024
	}
	// Initialize stack guards so that we can start calling
	// both Go and C functions with stack growth prologues.
	_g_.stackguard0 = _g_.stack.lo + _StackGuard
	_g_.stackguard1 = _g_.stackguard0
	mstart1()
}

func mstart1() {
	_g_ := getg()

	if _g_ != _g_.m.g0 {
		throw("bad runtime·mstart")
	}

	// Record top of stack for use by mcall.
	// Once we call schedule we're never coming back,
	// so other calls can reuse this stack space.
	gosave(&_g_.m.g0.sched)
	_g_.m.g0.sched.pc = ^uintptr(0) // make sure it is never used
	asminit()
	minit()

	// Install signal handlers; after minit so that minit can
	// prepare the thread to be able to handle the signals.
	if _g_.m == &m0 {
		// Create an extra M for callbacks on threads not created by Go.
		if iscgo && !cgoHasExtraM {
			cgoHasExtraM = true
			newextram()
		}
		initsig(false)
	}

	if fn := _g_.m.mstartfn; fn != nil {
		fn()
	}

	if _g_.m.helpgc != 0 {
		_g_.m.helpgc = 0
		stopm()
	} else if _g_.m != &m0 {
		acquirep(_g_.m.nextp.ptr())
		_g_.m.nextp = 0
	}
	schedule()
}

// forEachP calls fn(p) for every P p when p reaches a GC safe point.
// If a P is currently executing code, this will bring the P to a GC
// safe point and execute fn on that P. If the P is not executing code
// (it is idle or in a syscall), this will call fn(p) directly while
// preventing the P from exiting its state. This does not ensure that
// fn will run on every CPU executing Go code, but it acts as a global
// memory barrier. GC uses this as a "ragged barrier."
//
// The caller must hold worldsema.
//
//go:systemstack
func forEachP(fn func(*p)) {
	mp := acquirem()
	_p_ := getg().m.p.ptr()

	lock(&sched.lock)
	if sched.safePointWait != 0 {
		throw("forEachP: sched.safePointWait != 0")
	}
	sched.safePointWait = gomaxprocs - 1
	sched.safePointFn = fn

	// Ask all Ps to run the safe point function.
	for _, p := range allp[:gomaxprocs] {
		if p != _p_ {
			atomic.Store(&p.runSafePointFn, 1)
		}
	}
	preemptall()

	// Any P entering _Pidle or _Psyscall from now on will observe
	// p.runSafePointFn == 1 and will call runSafePointFn when
	// changing its status to _Pidle/_Psyscall.

	// Run safe point function for all idle Ps. sched.pidle will
	// not change because we hold sched.lock.
	for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
		if atomic.Cas(&p.runSafePointFn, 1, 0) {
			fn(p)
			sched.safePointWait--
		}
	}

	wait := sched.safePointWait > 0
	unlock(&sched.lock)

	// Run fn for the current P.
	fn(_p_)

	// Force Ps currently in _Psyscall into _Pidle and hand them
	// off to induce safe point function execution.
	for i := 0; i < int(gomaxprocs); i++ {
		p := allp[i]
		s := p.status
		if s == _Psyscall && p.runSafePointFn == 1 && atomic.Cas(&p.status, s, _Pidle) {
			if trace.enabled {
				traceGoSysBlock(p)
				traceProcStop(p)
			}
			p.syscalltick++
			handoffp(p)
		}
	}

	// Wait for remaining Ps to run fn.
	if wait {
		for {
			// Wait for 100us, then try to re-preempt in
			// case of any races.
			//
			// Requires system stack.
			if notetsleep(&sched.safePointNote, 100*1000) {
				noteclear(&sched.safePointNote)
				break
			}
			preemptall()
		}
	}
	if sched.safePointWait != 0 {
		throw("forEachP: not done")
	}
	for i := 0; i < int(gomaxprocs); i++ {
		p := allp[i]
		if p.runSafePointFn != 0 {
			throw("forEachP: P did not run fn")
		}
	}

	lock(&sched.lock)
	sched.safePointFn = nil
	unlock(&sched.lock)
	releasem(mp)
}

// runSafePointFn runs the safe point function, if any, for this P.
// This should be called like
//
//     if getg().m.p.runSafePointFn != 0 {
//         runSafePointFn()
//     }
//
// runSafePointFn must be checked on any transition in to _Pidle or
// _Psyscall to avoid a race where forEachP sees that the P is running
// just before the P goes into _Pidle/_Psyscall and neither forEachP
// nor the P run the safe-point function.
func runSafePointFn() {
	p := getg().m.p.ptr()
	// Resolve the race between forEachP running the safe-point
	// function on this P's behalf and this P running the
	// safe-point function directly.
	if !atomic.Cas(&p.runSafePointFn, 1, 0) {
		return
	}
	sched.safePointFn(p)
	lock(&sched.lock)
	sched.safePointWait--
	if sched.safePointWait == 0 {
		notewakeup(&sched.safePointNote)
	}
	unlock(&sched.lock)
}

// When running with cgo, we call _cgo_thread_start
// to start threads for us so that we can play nicely with
// foreign code.
var cgoThreadStart unsafe.Pointer

type cgothreadstart struct {
	g   guintptr
	tls *uint64
	fn  unsafe.Pointer
}

// Allocate a new m unassociated with any thread.
// Can use p for allocation context if needed.
// fn is recorded as the new m's m.mstartfn.
//
// This function it known to the compiler to inhibit the
// go:nowritebarrierrec annotation because it uses P for allocation.
func allocm(_p_ *p, fn func()) *m {
	_g_ := getg()
	_g_.m.locks++ // disable GC because it can be called from sysmon
	if _g_.m.p == 0 {
		acquirep(_p_) // temporarily borrow p for mallocs in this function
	}
	mp := new(m)
	mp.mstartfn = fn
	mcommoninit(mp)

	// In case of cgo or Solaris, pthread_create will make us a stack.
	// Windows and Plan 9 will layout sched stack on OS stack.
	if iscgo || GOOS == "solaris" || GOOS == "windows" || GOOS == "plan9" {
		mp.g0 = malg(-1)
	} else {
		mp.g0 = malg(8192 * sys.StackGuardMultiplier)
	}
	mp.g0.m = mp

	if _p_ == _g_.m.p.ptr() {
		releasep()
	}
	_g_.m.locks--
	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
		_g_.stackguard0 = stackPreempt
	}

	return mp
}

// needm is called when a cgo callback happens on a
// thread without an m (a thread not created by Go).
// In this case, needm is expected to find an m to use
// and return with m, g initialized correctly.
// Since m and g are not set now (likely nil, but see below)
// needm is limited in what routines it can call. In particular
// it can only call nosplit functions (textflag 7) and cannot
// do any scheduling that requires an m.
//
// In order to avoid needing heavy lifting here, we adopt
// the following strategy: there is a stack of available m's
// that can be stolen. Using compare-and-swap
// to pop from the stack has ABA races, so we simulate
// a lock by doing an exchange (via casp) to steal the stack
// head and replace the top pointer with MLOCKED (1).
// This serves as a simple spin lock that we can use even
// without an m. The thread that locks the stack in this way
// unlocks the stack by storing a valid stack head pointer.
//
// In order to make sure that there is always an m structure
// available to be stolen, we maintain the invariant that there
// is always one more than needed. At the beginning of the
// program (if cgo is in use) the list is seeded with a single m.
// If needm finds that it has taken the last m off the list, its job
// is - once it has installed its own m so that it can do things like
// allocate memory - to create a spare m and put it on the list.
//
// Each of these extra m's also has a g0 and a curg that are
// pressed into service as the scheduling stack and current
// goroutine for the duration of the cgo callback.
//
// When the callback is done with the m, it calls dropm to
// put the m back on the list.
//go:nosplit
func needm(x byte) {
	if iscgo && !cgoHasExtraM {
		// Can happen if C/C++ code calls Go from a global ctor.
		// Can not throw, because scheduler is not initialized yet.
		write(2, unsafe.Pointer(&earlycgocallback[0]), int32(len(earlycgocallback)))
		exit(1)
	}

	// Lock extra list, take head, unlock popped list.
	// nilokay=false is safe here because of the invariant above,
	// that the extra list always contains or will soon contain
	// at least one m.
	mp := lockextra(false)

	// Set needextram when we've just emptied the list,
	// so that the eventual call into cgocallbackg will
	// allocate a new m for the extra list. We delay the
	// allocation until then so that it can be done
	// after exitsyscall makes sure it is okay to be
	// running at all (that is, there's no garbage collection
	// running right now).
	mp.needextram = mp.schedlink == 0
	unlockextra(mp.schedlink.ptr())

	// Save and block signals before installing g.
	// Once g is installed, any incoming signals will try to execute,
	// but we won't have the sigaltstack settings and other data
	// set up appropriately until the end of minit, which will
	// unblock the signals. This is the same dance as when
	// starting a new m to run Go code via newosproc.
	msigsave(mp)
	sigblock()

	// Install g (= m->g0) and set the stack bounds
	// to match the current stack. We don't actually know
	// how big the stack is, like we don't know how big any
	// scheduling stack is, but we assume there's at least 32 kB,
	// which is more than enough for us.
	setg(mp.g0)
	_g_ := getg()
	_g_.stack.hi = uintptr(noescape(unsafe.Pointer(&x))) + 1024
	_g_.stack.lo = uintptr(noescape(unsafe.Pointer(&x))) - 32*1024
	_g_.stackguard0 = _g_.stack.lo + _StackGuard

	// Initialize this thread to use the m.
	asminit()
	minit()
}

var earlycgocallback = []byte("fatal error: cgo callback before cgo call\n")

// newextram allocates an m and puts it on the extra list.
// It is called with a working local m, so that it can do things
// like call schedlock and allocate.
func newextram() {
	// Create extra goroutine locked to extra m.
	// The goroutine is the context in which the cgo callback will run.
	// The sched.pc will never be returned to, but setting it to
	// goexit makes clear to the traceback routines where
	// the goroutine stack ends.
	mp := allocm(nil, nil)
	gp := malg(4096)
	gp.sched.pc = funcPC(goexit) + sys.PCQuantum
	gp.sched.sp = gp.stack.hi
	gp.sched.sp -= 4 * sys.RegSize // extra space in case of reads slightly beyond frame
	gp.sched.lr = 0
	gp.sched.g = guintptr(unsafe.Pointer(gp))
	gp.syscallpc = gp.sched.pc
	gp.syscallsp = gp.sched.sp
	gp.stktopsp = gp.sched.sp
	// malg returns status as Gidle, change to Gsyscall before adding to allg
	// where GC will see it.
	casgstatus(gp, _Gidle, _Gsyscall)
	gp.m = mp
	mp.curg = gp
	mp.locked = _LockInternal
	mp.lockedg = gp
	gp.lockedm = mp
	gp.goid = int64(atomic.Xadd64(&sched.goidgen, 1))
	if raceenabled {
		gp.racectx = racegostart(funcPC(newextram))
	}
	// put on allg for garbage collector
	allgadd(gp)

	// Add m to the extra list.
	mnext := lockextra(true)
	mp.schedlink.set(mnext)
	unlockextra(mp)
}

// dropm is called when a cgo callback has called needm but is now
// done with the callback and returning back into the non-Go thread.
// It puts the current m back onto the extra list.
//
// The main expense here is the call to signalstack to release the
// m's signal stack, and then the call to needm on the next callback
// from this thread. It is tempting to try to save the m for next time,
// which would eliminate both these costs, but there might not be
// a next time: the current thread (which Go does not control) might exit.
// If we saved the m for that thread, there would be an m leak each time
// such a thread exited. Instead, we acquire and release an m on each
// call. These should typically not be scheduling operations, just a few
// atomics, so the cost should be small.
//
// TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
// variable using pthread_key_create. Unlike the pthread keys we already use
// on OS X, this dummy key would never be read by Go code. It would exist
// only so that we could register at thread-exit-time destructor.
// That destructor would put the m back onto the extra list.
// This is purely a performance optimization. The current version,
// in which dropm happens on each cgo call, is still correct too.
// We may have to keep the current version on systems with cgo
// but without pthreads, like Windows.
func dropm() {
	// Clear m and g, and return m to the extra list.
	// After the call to setg we can only call nosplit functions
	// with no pointer manipulation.
	mp := getg().m

	// Block signals before unminit.
	// Unminit unregisters the signal handling stack (but needs g on some systems).
	// Setg(nil) clears g, which is the signal handler's cue not to run Go handlers.
	// It's important not to try to handle a signal between those two steps.
	sigmask := mp.sigmask
	sigblock()
	unminit()

	mnext := lockextra(true)
	mp.schedlink.set(mnext)

	setg(nil)

	// Commit the release of mp.
	unlockextra(mp)

	msigrestore(sigmask)
}

// A helper function for EnsureDropM.
func getm() uintptr {
	return uintptr(unsafe.Pointer(getg().m))
}

var extram uintptr

// lockextra locks the extra list and returns the list head.
// The caller must unlock the list by storing a new list head
// to extram. If nilokay is true, then lockextra will
// return a nil list head if that's what it finds. If nilokay is false,
// lockextra will keep waiting until the list head is no longer nil.
//go:nosplit
func lockextra(nilokay bool) *m {
	const locked = 1

	for {
		old := atomic.Loaduintptr(&extram)
		if old == locked {
			yield := osyield
			yield()
			continue
		}
		if old == 0 && !nilokay {
			usleep(1)
			continue
		}
		if atomic.Casuintptr(&extram, old, locked) {
			return (*m)(unsafe.Pointer(old))
		}
		yield := osyield
		yield()
		continue
	}
}

//go:nosplit
func unlockextra(mp *m) {
	atomic.Storeuintptr(&extram, uintptr(unsafe.Pointer(mp)))
}

// Create a new m.  It will start off with a call to fn, or else the scheduler.
// fn needs to be static and not a heap allocated closure.
// May run with m.p==nil, so write barriers are not allowed.
//go:nowritebarrier
func newm(fn func(), _p_ *p) {
	mp := allocm(_p_, fn)
	mp.nextp.set(_p_)
	mp.sigmask = initSigmask
	if iscgo {
		var ts cgothreadstart
		if _cgo_thread_start == nil {
			throw("_cgo_thread_start missing")
		}
		ts.g.set(mp.g0)
		ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
		ts.fn = unsafe.Pointer(funcPC(mstart))
		if msanenabled {
			msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
		}
		asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
		return
	}
	newosproc(mp, unsafe.Pointer(mp.g0.stack.hi))
}

// Stops execution of the current m until new work is available.
// Returns with acquired P.
func stopm() {
	_g_ := getg()

	if _g_.m.locks != 0 {
		throw("stopm holding locks")
	}
	if _g_.m.p != 0 {
		throw("stopm holding p")
	}
	if _g_.m.spinning {
		throw("stopm spinning")
	}

retry:
	lock(&sched.lock)
	mput(_g_.m)
	unlock(&sched.lock)
	notesleep(&_g_.m.park)
	noteclear(&_g_.m.park)
	if _g_.m.helpgc != 0 {
		gchelper()
		_g_.m.helpgc = 0
		_g_.m.mcache = nil
		_g_.m.p = 0
		goto retry
	}
	acquirep(_g_.m.nextp.ptr())
	_g_.m.nextp = 0
}

func mspinning() {
	// startm's caller incremented nmspinning. Set the new M's spinning.
	getg().m.spinning = true
}

// Schedules some M to run the p (creates an M if necessary).
// If p==nil, tries to get an idle P, if no idle P's does nothing.
// May run with m.p==nil, so write barriers are not allowed.
// If spinning is set, the caller has incremented nmspinning and startm will
// either decrement nmspinning or set m.spinning in the newly started M.
//go:nowritebarrier
func startm(_p_ *p, spinning bool) {
	lock(&sched.lock)
	if _p_ == nil {
		_p_ = pidleget()
		if _p_ == nil {
			unlock(&sched.lock)
			if spinning {
				// The caller incremented nmspinning, but there are no idle Ps,
				// so it's okay to just undo the increment and give up.
				if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
					throw("startm: negative nmspinning")
				}
			}
			return
		}
	}
	mp := mget()
	unlock(&sched.lock)
	if mp == nil {
		var fn func()
		if spinning {
			// The caller incremented nmspinning, so set m.spinning in the new M.
			fn = mspinning
		}
		newm(fn, _p_)
		return
	}
	if mp.spinning {
		throw("startm: m is spinning")
	}
	if mp.nextp != 0 {
		throw("startm: m has p")
	}
	if spinning && !runqempty(_p_) {
		throw("startm: p has runnable gs")
	}
	// The caller incremented nmspinning, so set m.spinning in the new M.
	mp.spinning = spinning
	mp.nextp.set(_p_)
	notewakeup(&mp.park)
}

// Hands off P from syscall or locked M.
// Always runs without a P, so write barriers are not allowed.
//go:nowritebarrier
func handoffp(_p_ *p) {
	// handoffp must start an M in any situation where
	// findrunnable would return a G to run on _p_.

	// if it has local work, start it straight away
	if !runqempty(_p_) || sched.runqsize != 0 {
		startm(_p_, false)
		return
	}
	// if it has GC work, start it straight away
	if gcBlackenEnabled != 0 && gcMarkWorkAvailable(_p_) {
		startm(_p_, false)
		return
	}
	// no local work, check that there are no spinning/idle M's,
	// otherwise our help is not required
	if atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) == 0 && atomic.Cas(&sched.nmspinning, 0, 1) { // TODO: fast atomic
		startm(_p_, true)
		return
	}
	lock(&sched.lock)
	if sched.gcwaiting != 0 {
		_p_.status = _Pgcstop
		sched.stopwait--
		if sched.stopwait == 0 {
			notewakeup(&sched.stopnote)
		}
		unlock(&sched.lock)
		return
	}
	if _p_.runSafePointFn != 0 && atomic.Cas(&_p_.runSafePointFn, 1, 0) {
		sched.safePointFn(_p_)
		sched.safePointWait--
		if sched.safePointWait == 0 {
			notewakeup(&sched.safePointNote)
		}
	}
	if sched.runqsize != 0 {
		unlock(&sched.lock)
		startm(_p_, false)
		return
	}
	// If this is the last running P and nobody is polling network,
	// need to wakeup another M to poll network.
	if sched.npidle == uint32(gomaxprocs-1) && atomic.Load64(&sched.lastpoll) != 0 {
		unlock(&sched.lock)
		startm(_p_, false)
		return
	}
	pidleput(_p_)
	unlock(&sched.lock)
}

// Tries to add one more P to execute G's.
// Called when a G is made runnable (newproc, ready).
func wakep() {
	// be conservative about spinning threads
	if !atomic.Cas(&sched.nmspinning, 0, 1) {
		return
	}
	startm(nil, true)
}

// Stops execution of the current m that is locked to a g until the g is runnable again.
// Returns with acquired P.
func stoplockedm() {
	_g_ := getg()

	if _g_.m.lockedg == nil || _g_.m.lockedg.lockedm != _g_.m {
		throw("stoplockedm: inconsistent locking")
	}
	if _g_.m.p != 0 {
		// Schedule another M to run this p.
		_p_ := releasep()
		handoffp(_p_)
	}
	incidlelocked(1)
	// Wait until another thread schedules lockedg again.
	notesleep(&_g_.m.park)
	noteclear(&_g_.m.park)
	status := readgstatus(_g_.m.lockedg)
	if status&^_Gscan != _Grunnable {
		print("runtime:stoplockedm: g is not Grunnable or Gscanrunnable\n")
		dumpgstatus(_g_)
		throw("stoplockedm: not runnable")
	}
	acquirep(_g_.m.nextp.ptr())
	_g_.m.nextp = 0
}

// Schedules the locked m to run the locked gp.
// May run during STW, so write barriers are not allowed.
//go:nowritebarrier
func startlockedm(gp *g) {
	_g_ := getg()

	mp := gp.lockedm
	if mp == _g_.m {
		throw("startlockedm: locked to me")
	}
	if mp.nextp != 0 {
		throw("startlockedm: m has p")
	}
	// directly handoff current P to the locked m
	incidlelocked(-1)
	_p_ := releasep()
	mp.nextp.set(_p_)
	notewakeup(&mp.park)
	stopm()
}

// Stops the current m for stopTheWorld.
// Returns when the world is restarted.
func gcstopm() {
	_g_ := getg()

	if sched.gcwaiting == 0 {
		throw("gcstopm: not waiting for gc")
	}
	if _g_.m.spinning {
		_g_.m.spinning = false
		// OK to just drop nmspinning here,
		// startTheWorld will unpark threads as necessary.
		if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
			throw("gcstopm: negative nmspinning")
		}
	}
	_p_ := releasep()
	lock(&sched.lock)
	_p_.status = _Pgcstop
	sched.stopwait--
	if sched.stopwait == 0 {
		notewakeup(&sched.stopnote)
	}
	unlock(&sched.lock)
	stopm()
}

// Schedules gp to run on the current M.
// If inheritTime is true, gp inherits the remaining time in the
// current time slice. Otherwise, it starts a new time slice.
// Never returns.
func execute(gp *g, inheritTime bool) {
	_g_ := getg()

	casgstatus(gp, _Grunnable, _Grunning)
	gp.waitsince = 0
	gp.preempt = false
	gp.stackguard0 = gp.stack.lo + _StackGuard
	if !inheritTime {
		_g_.m.p.ptr().schedtick++
	}
	_g_.m.curg = gp
	gp.m = _g_.m

	// Check whether the profiler needs to be turned on or off.
	hz := sched.profilehz
	if _g_.m.profilehz != hz {
		resetcpuprofiler(hz)
	}

	if trace.enabled {
		// GoSysExit has to happen when we have a P, but before GoStart.
		// So we emit it here.
		if gp.syscallsp != 0 && gp.sysblocktraced {
			// Since gp.sysblocktraced is true, we must emit an event.
			// There is a race between the code that initializes sysexitseq
			// and sysexitticks (in exitsyscall, which runs without a P,
			// and therefore is not stopped with the rest of the world)
			// and the code that initializes a new trace.
			// The recorded sysexitseq and sysexitticks must therefore
			// be treated as "best effort". If they are valid for this trace,
			// then great, use them for greater accuracy.
			// But if they're not valid for this trace, assume that the
			// trace was started after the actual syscall exit (but before
			// we actually managed to start the goroutine, aka right now),
			// and assign a fresh time stamp to keep the log consistent.
			seq, ts := gp.sysexitseq, gp.sysexitticks
			if seq == 0 || int64(seq)-int64(trace.seqStart) < 0 {
				seq, ts = tracestamp()
			}
			traceGoSysExit(seq, ts)
		}
		traceGoStart()
	}

	gogo(&gp.sched)
}

// Finds a runnable goroutine to execute.
// Tries to steal from other P's, get g from global queue, poll network.
func findrunnable() (gp *g, inheritTime bool) {
	_g_ := getg()

	// The conditions here and in handoffp must agree: if
	// findrunnable would return a G to run, handoffp must start
	// an M.

top:
	if sched.gcwaiting != 0 {
		gcstopm()
		goto top
	}
	if _g_.m.p.ptr().runSafePointFn != 0 {
		runSafePointFn()
	}
	if fingwait && fingwake {
		if gp := wakefing(); gp != nil {
			ready(gp, 0)
		}
	}

	// local runq
	if gp, inheritTime := runqget(_g_.m.p.ptr()); gp != nil {
		return gp, inheritTime
	}

	// global runq
	if sched.runqsize != 0 {
		lock(&sched.lock)
		gp := globrunqget(_g_.m.p.ptr(), 0)
		unlock(&sched.lock)
		if gp != nil {
			return gp, false
		}
	}

	// Poll network.
	// This netpoll is only an optimization before we resort to stealing.
	// We can safely skip it if there a thread blocked in netpoll already.
	// If there is any kind of logical race with that blocked thread
	// (e.g. it has already returned from netpoll, but does not set lastpoll yet),
	// this thread will do blocking netpoll below anyway.
	if netpollinited() && sched.lastpoll != 0 {
		if gp := netpoll(false); gp != nil { // non-blocking
			// netpoll returns list of goroutines linked by schedlink.
			injectglist(gp.schedlink.ptr())
			casgstatus(gp, _Gwaiting, _Grunnable)
			if trace.enabled {
				traceGoUnpark(gp, 0)
			}
			return gp, false
		}
	}

	// If number of spinning M's >= number of busy P's, block.
	// This is necessary to prevent excessive CPU consumption
	// when GOMAXPROCS>>1 but the program parallelism is low.
	if !_g_.m.spinning && 2*atomic.Load(&sched.nmspinning) >= uint32(gomaxprocs)-atomic.Load(&sched.npidle) { // TODO: fast atomic
		goto stop
	}
	if !_g_.m.spinning {
		_g_.m.spinning = true
		atomic.Xadd(&sched.nmspinning, 1)
	}
	// random steal from other P's
	for i := 0; i < int(4*gomaxprocs); i++ {
		if sched.gcwaiting != 0 {
			goto top
		}
		_p_ := allp[fastrand1()%uint32(gomaxprocs)]
		var gp *g
		if _p_ == _g_.m.p.ptr() {
			gp, _ = runqget(_p_)
		} else {
			stealRunNextG := i > 2*int(gomaxprocs) // first look for ready queues with more than 1 g
			gp = runqsteal(_g_.m.p.ptr(), _p_, stealRunNextG)
		}
		if gp != nil {
			return gp, false
		}
	}

stop:

	// We have nothing to do. If we're in the GC mark phase, can
	// safely scan and blacken objects, and have work to do, run
	// idle-time marking rather than give up the P.
	if _p_ := _g_.m.p.ptr(); gcBlackenEnabled != 0 && _p_.gcBgMarkWorker != 0 && gcMarkWorkAvailable(_p_) {
		_p_.gcMarkWorkerMode = gcMarkWorkerIdleMode
		gp := _p_.gcBgMarkWorker.ptr()
		casgstatus(gp, _Gwaiting, _Grunnable)
		if trace.enabled {
			traceGoUnpark(gp, 0)
		}
		return gp, false
	}

	// return P and block
	lock(&sched.lock)
	if sched.gcwaiting != 0 || _g_.m.p.ptr().runSafePointFn != 0 {
		unlock(&sched.lock)
		goto top
	}
	if sched.runqsize != 0 {
		gp := globrunqget(_g_.m.p.ptr(), 0)
		unlock(&sched.lock)
		return gp, false
	}
	_p_ := releasep()
	pidleput(_p_)
	unlock(&sched.lock)

	// Delicate dance: thread transitions from spinning to non-spinning state,
	// potentially concurrently with submission of new goroutines. We must
	// drop nmspinning first and then check all per-P queues again (with
	// #StoreLoad memory barrier in between). If we do it the other way around,
	// another thread can submit a goroutine after we've checked all run queues
	// but before we drop nmspinning; as the result nobody will unpark a thread
	// to run the goroutine.
	// If we discover new work below, we need to restore m.spinning as a signal
	// for resetspinning to unpark a new worker thread (because there can be more
	// than one starving goroutine). However, if after discovering new work
	// we also observe no idle Ps, it is OK to just park the current thread:
	// the system is fully loaded so no spinning threads are required.
	// Also see "Worker thread parking/unparking" comment at the top of the file.
	wasSpinning := _g_.m.spinning
	if _g_.m.spinning {
		_g_.m.spinning = false
		if int32(atomic.Xadd(&sched.nmspinning, -1)) < 0 {
			throw("findrunnable: negative nmspinning")
		}
	}

	// check all runqueues once again
	for i := 0; i < int(gomaxprocs); i++ {
		_p_ := allp[i]
		if _p_ != nil && !runqempty(_p_) {
			lock(&sched.lock)
			_p_ = pidleget()
			unlock(&sched.lock)
			if _p_ != nil {
				acquirep(_p_)
				if wasSpinning {
					_g_.m.spinning = true
					atomic.Xadd(&sched.nmspinning, 1)
				}
				goto top
			}
			break
		}
	}

	// poll network
	if netpollinited() && atomic.Xchg64(&sched.lastpoll, 0) != 0 {
		if _g_.m.p != 0 {
			throw("findrunnable: netpoll with p")
		}
		if _g_.m.spinning {
			throw("findrunnable: netpoll with spinning")
		}
		gp := netpoll(true) // block until new work is available
		atomic.Store64(&sched.lastpoll, uint64(nanotime()))
		if gp != nil {
			lock(&sched.lock)
			_p_ = pidleget()
			unlock(&sched.lock)
			if _p_ != nil {
				acquirep(_p_)
				injectglist(gp.schedlink.ptr())
				casgstatus(gp, _Gwaiting, _Grunnable)
				if trace.enabled {
					traceGoUnpark(gp, 0)
				}
				return gp, false
			}
			injectglist(gp)
		}
	}
	stopm()
	goto top
}

func resetspinning() {
	_g_ := getg()
	if !_g_.m.spinning {
		throw("resetspinning: not a spinning m")
	}
	_g_.m.spinning = false
	nmspinning := atomic.Xadd(&sched.nmspinning, -1)
	if int32(nmspinning) < 0 {
		throw("findrunnable: negative nmspinning")
	}
	// M wakeup policy is deliberately somewhat conservative, so check if we
	// need to wakeup another P here. See "Worker thread parking/unparking"
	// comment at the top of the file for details.
	if nmspinning == 0 && atomic.Load(&sched.npidle) > 0 {
		wakep()
	}
}

// Injects the list of runnable G's into the scheduler.
// Can run concurrently with GC.
func injectglist(glist *g) {
	if glist == nil {
		return
	}
	if trace.enabled {
		for gp := glist; gp != nil; gp = gp.schedlink.ptr() {
			traceGoUnpark(gp, 0)
		}
	}
	lock(&sched.lock)
	var n int
	for n = 0; glist != nil; n++ {
		gp := glist
		glist = gp.schedlink.ptr()
		casgstatus(gp, _Gwaiting, _Grunnable)
		globrunqput(gp)
	}
	unlock(&sched.lock)
	for ; n != 0 && sched.npidle != 0; n-- {
		startm(nil, false)
	}
}

// One round of scheduler: find a runnable goroutine and execute it.
// Never returns.
func schedule() {
	_g_ := getg()

	if _g_.m.locks != 0 {
		throw("schedule: holding locks")
	}

	if _g_.m.lockedg != nil {
		stoplockedm()
		execute(_g_.m.lockedg, false) // Never returns.
	}

top:
	if sched.gcwaiting != 0 {
		gcstopm()
		goto top
	}
	if _g_.m.p.ptr().runSafePointFn != 0 {
		runSafePointFn()
	}

	var gp *g
	var inheritTime bool
	if trace.enabled || trace.shutdown {
		gp = traceReader()
		if gp != nil {
			casgstatus(gp, _Gwaiting, _Grunnable)
			traceGoUnpark(gp, 0)
		}
	}
	if gp == nil && gcBlackenEnabled != 0 {
		gp = gcController.findRunnableGCWorker(_g_.m.p.ptr())
	}
	if gp == nil {
		// Check the global runnable queue once in a while to ensure fairness.
		// Otherwise two goroutines can completely occupy the local runqueue
		// by constantly respawning each other.
		if _g_.m.p.ptr().schedtick%61 == 0 && sched.runqsize > 0 {
			lock(&sched.lock)
			gp = globrunqget(_g_.m.p.ptr(), 1)
			unlock(&sched.lock)
		}
	}
	if gp == nil {
		gp, inheritTime = runqget(_g_.m.p.ptr())
		if gp != nil && _g_.m.spinning {
			throw("schedule: spinning with local work")
		}
	}
	if gp == nil {
		gp, inheritTime = findrunnable() // blocks until work is available
	}

	// This thread is going to run a goroutine and is not spinning anymore,
	// so if it was marked as spinning we need to reset it now and potentially
	// start a new spinning M.
	if _g_.m.spinning {
		resetspinning()
	}

	if gp.lockedm != nil {
		// Hands off own p to the locked m,
		// then blocks waiting for a new p.
		startlockedm(gp)
		goto top
	}

	execute(gp, inheritTime)
}

// dropg removes the association between m and the current goroutine m->curg (gp for short).
// Typically a caller sets gp's status away from Grunning and then
// immediately calls dropg to finish the job. The caller is also responsible
// for arranging that gp will be restarted using ready at an
// appropriate time. After calling dropg and arranging for gp to be
// readied later, the caller can do other work but eventually should
// call schedule to restart the scheduling of goroutines on this m.
func dropg() {
	_g_ := getg()

	if _g_.m.lockedg == nil {
		_g_.m.curg.m = nil
		_g_.m.curg = nil
	}
}

func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
	unlock((*mutex)(lock))
	return true
}

// park continuation on g0.
func park_m(gp *g) {
	_g_ := getg()

	if trace.enabled {
		traceGoPark(_g_.m.waittraceev, _g_.m.waittraceskip, gp)
	}

	casgstatus(gp, _Grunning, _Gwaiting)
	dropg()

	if _g_.m.waitunlockf != nil {
		fn := *(*func(*g, unsafe.Pointer) bool)(unsafe.Pointer(&_g_.m.waitunlockf))
		ok := fn(gp, _g_.m.waitlock)
		_g_.m.waitunlockf = nil
		_g_.m.waitlock = nil
		if !ok {
			if trace.enabled {
				traceGoUnpark(gp, 2)
			}
			casgstatus(gp, _Gwaiting, _Grunnable)
			execute(gp, true) // Schedule it back, never returns.
		}
	}
	schedule()
}

func goschedImpl(gp *g) {
	status := readgstatus(gp)
	if status&^_Gscan != _Grunning {
		dumpgstatus(gp)
		throw("bad g status")
	}
	casgstatus(gp, _Grunning, _Grunnable)
	dropg()
	lock(&sched.lock)
	globrunqput(gp)
	unlock(&sched.lock)

	schedule()
}

// Gosched continuation on g0.
func gosched_m(gp *g) {
	if trace.enabled {
		traceGoSched()
	}
	goschedImpl(gp)
}

func gopreempt_m(gp *g) {
	if trace.enabled {
		traceGoPreempt()
	}
	goschedImpl(gp)
}

// Finishes execution of the current goroutine.
func goexit1() {
	if raceenabled {
		racegoend()
	}
	if trace.enabled {
		traceGoEnd()
	}
	mcall(goexit0)
}

// goexit continuation on g0.
func goexit0(gp *g) {
	_g_ := getg()

	casgstatus(gp, _Grunning, _Gdead)
	if isSystemGoroutine(gp) {
		atomic.Xadd(&sched.ngsys, -1)
	}
	gp.m = nil
	gp.lockedm = nil
	_g_.m.lockedg = nil
	gp.paniconfault = false
	gp._defer = nil // should be true already but just in case.
	gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data.
	gp.writebuf = nil
	gp.waitreason = ""
	gp.param = nil

	dropg()

	if _g_.m.locked&^_LockExternal != 0 {
		print("invalid m->locked = ", _g_.m.locked, "\n")
		throw("internal lockOSThread error")
	}
	_g_.m.locked = 0
	gfput(_g_.m.p.ptr(), gp)
	schedule()
}

//go:nosplit
//go:nowritebarrier
func save(pc, sp uintptr) {
	_g_ := getg()

	_g_.sched.pc = pc
	_g_.sched.sp = sp
	_g_.sched.lr = 0
	_g_.sched.ret = 0
	_g_.sched.ctxt = nil
	_g_.sched.g = guintptr(unsafe.Pointer(_g_))
}

// The goroutine g is about to enter a system call.
// Record that it's not using the cpu anymore.
// This is called only from the go syscall library and cgocall,
// not from the low-level system calls used by the runtime.
//
// Entersyscall cannot split the stack: the gosave must
// make g->sched refer to the caller's stack segment, because
// entersyscall is going to return immediately after.
//
// Nothing entersyscall calls can split the stack either.
// We cannot safely move the stack during an active call to syscall,
// because we do not know which of the uintptr arguments are
// really pointers (back into the stack).
// In practice, this means that we make the fast path run through
// entersyscall doing no-split things, and the slow path has to use systemstack
// to run bigger things on the system stack.
//
// reentersyscall is the entry point used by cgo callbacks, where explicitly
// saved SP and PC are restored. This is needed when exitsyscall will be called
// from a function further up in the call stack than the parent, as g->syscallsp
// must always point to a valid stack frame. entersyscall below is the normal
// entry point for syscalls, which obtains the SP and PC from the caller.
//
// Syscall tracing:
// At the start of a syscall we emit traceGoSysCall to capture the stack trace.
// If the syscall does not block, that is it, we do not emit any other events.
// If the syscall blocks (that is, P is retaken), retaker emits traceGoSysBlock;
// when syscall returns we emit traceGoSysExit and when the goroutine starts running
// (potentially instantly, if exitsyscallfast returns true) we emit traceGoStart.
// To ensure that traceGoSysExit is emitted strictly after traceGoSysBlock,
// we remember current value of syscalltick in m (_g_.m.syscalltick = _g_.m.p.ptr().syscalltick),
// whoever emits traceGoSysBlock increments p.syscalltick afterwards;
// and we wait for the increment before emitting traceGoSysExit.
// Note that the increment is done even if tracing is not enabled,
// because tracing can be enabled in the middle of syscall. We don't want the wait to hang.
//
//go:nosplit
func reentersyscall(pc, sp uintptr) {
	_g_ := getg()

	// Disable preemption because during this function g is in Gsyscall status,
	// but can have inconsistent g->sched, do not let GC observe it.
	_g_.m.locks++

	// Entersyscall must not call any function that might split/grow the stack.
	// (See details in comment above.)
	// Catch calls that might, by replacing the stack guard with something that
	// will trip any stack check and leaving a flag to tell newstack to die.
	_g_.stackguard0 = stackPreempt
	_g_.throwsplit = true

	// Leave SP around for GC and traceback.
	save(pc, sp)
	_g_.syscallsp = sp
	_g_.syscallpc = pc
	casgstatus(_g_, _Grunning, _Gsyscall)
	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
		systemstack(func() {
			print("entersyscall inconsistent ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
			throw("entersyscall")
		})
	}

	if trace.enabled {
		systemstack(traceGoSysCall)
		// systemstack itself clobbers g.sched.{pc,sp} and we might
		// need them later when the G is genuinely blocked in a
		// syscall
		save(pc, sp)
	}

	if atomic.Load(&sched.sysmonwait) != 0 { // TODO: fast atomic
		systemstack(entersyscall_sysmon)
		save(pc, sp)
	}

	if _g_.m.p.ptr().runSafePointFn != 0 {
		// runSafePointFn may stack split if run on this stack
		systemstack(runSafePointFn)
		save(pc, sp)
	}

	_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
	_g_.sysblocktraced = true
	_g_.m.mcache = nil
	_g_.m.p.ptr().m = 0
	atomic.Store(&_g_.m.p.ptr().status, _Psyscall)
	if sched.gcwaiting != 0 {
		systemstack(entersyscall_gcwait)
		save(pc, sp)
	}

	// Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
	// We set _StackGuard to StackPreempt so that first split stack check calls morestack.
	// Morestack detects this case and throws.
	_g_.stackguard0 = stackPreempt
	_g_.m.locks--
}

// Standard syscall entry used by the go syscall library and normal cgo calls.
//go:nosplit
func entersyscall(dummy int32) {
	reentersyscall(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))
}

func entersyscall_sysmon() {
	lock(&sched.lock)
	if atomic.Load(&sched.sysmonwait) != 0 {
		atomic.Store(&sched.sysmonwait, 0)
		notewakeup(&sched.sysmonnote)
	}
	unlock(&sched.lock)
}

func entersyscall_gcwait() {
	_g_ := getg()
	_p_ := _g_.m.p.ptr()

	lock(&sched.lock)
	if sched.stopwait > 0 && atomic.Cas(&_p_.status, _Psyscall, _Pgcstop) {
		if trace.enabled {
			traceGoSysBlock(_p_)
			traceProcStop(_p_)
		}
		_p_.syscalltick++
		if sched.stopwait--; sched.stopwait == 0 {
			notewakeup(&sched.stopnote)
		}
	}
	unlock(&sched.lock)
}

// The same as entersyscall(), but with a hint that the syscall is blocking.
//go:nosplit
func entersyscallblock(dummy int32) {
	_g_ := getg()

	_g_.m.locks++ // see comment in entersyscall
	_g_.throwsplit = true
	_g_.stackguard0 = stackPreempt // see comment in entersyscall
	_g_.m.syscalltick = _g_.m.p.ptr().syscalltick
	_g_.sysblocktraced = true
	_g_.m.p.ptr().syscalltick++

	// Leave SP around for GC and traceback.
	pc := getcallerpc(unsafe.Pointer(&dummy))
	sp := getcallersp(unsafe.Pointer(&dummy))
	save(pc, sp)
	_g_.syscallsp = _g_.sched.sp
	_g_.syscallpc = _g_.sched.pc
	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
		sp1 := sp
		sp2 := _g_.sched.sp
		sp3 := _g_.syscallsp
		systemstack(func() {
			print("entersyscallblock inconsistent ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
			throw("entersyscallblock")
		})
	}
	casgstatus(_g_, _Grunning, _Gsyscall)
	if _g_.syscallsp < _g_.stack.lo || _g_.stack.hi < _g_.syscallsp {
		systemstack(func() {
			print("entersyscallblock inconsistent ", hex(sp), " ", hex(_g_.sched.sp), " ", hex(_g_.syscallsp), " [", hex(_g_.stack.lo), ",", hex(_g_.stack.hi), "]\n")
			throw("entersyscallblock")
		})
	}

	systemstack(entersyscallblock_handoff)

	// Resave for traceback during blocked call.
	save(getcallerpc(unsafe.Pointer(&dummy)), getcallersp(unsafe.Pointer(&dummy)))

	_g_.m.locks--
}

func entersyscallblock_handoff() {
	if trace.enabled {
		traceGoSysCall()
		traceGoSysBlock(getg().m.p.ptr())
	}
	handoffp(releasep())
}

// The goroutine g exited its system call.
// Arrange for it to run on a cpu again.
// This is called only from the go syscall library, not
// from the low-level system calls used by the
//go:nosplit
func exitsyscall(dummy int32) {
	_g_ := getg()

	_g_.m.locks++ // see comment in entersyscall
	if getcallersp(unsafe.Pointer(&dummy)) > _g_.syscallsp {
		throw("exitsyscall: syscall frame is no longer valid")
	}

	_g_.waitsince = 0
	oldp := _g_.m.p.ptr()
	if exitsyscallfast() {
		if _g_.m.mcache == nil {
			throw("lost mcache")
		}
		if trace.enabled {
			if oldp != _g_.m.p.ptr() || _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
				systemstack(traceGoStart)
			}
		}
		// There's a cpu for us, so we can run.
		_g_.m.p.ptr().syscalltick++
		// We need to cas the status and scan before resuming...
		casgstatus(_g_, _Gsyscall, _Grunning)

		// Garbage collector isn't running (since we are),
		// so okay to clear syscallsp.
		_g_.syscallsp = 0
		_g_.m.locks--
		if _g_.preempt {
			// restore the preemption request in case we've cleared it in newstack
			_g_.stackguard0 = stackPreempt
		} else {
			// otherwise restore the real _StackGuard, we've spoiled it in entersyscall/entersyscallblock
			_g_.stackguard0 = _g_.stack.lo + _StackGuard
		}
		_g_.throwsplit = false
		return
	}

	_g_.sysexitticks = 0
	_g_.sysexitseq = 0
	if trace.enabled {
		// Wait till traceGoSysBlock event is emitted.
		// This ensures consistency of the trace (the goroutine is started after it is blocked).
		for oldp != nil && oldp.syscalltick == _g_.m.syscalltick {
			osyield()
		}
		// We can't trace syscall exit right now because we don't have a P.
		// Tracing code can invoke write barriers that cannot run without a P.
		// So instead we remember the syscall exit time and emit the event
		// in execute when we have a P.
		_g_.sysexitseq, _g_.sysexitticks = tracestamp()
	}

	_g_.m.locks--

	// Call the scheduler.
	mcall(exitsyscall0)

	if _g_.m.mcache == nil {
		throw("lost mcache")
	}

	// Scheduler returned, so we're allowed to run now.
	// Delete the syscallsp information that we left for
	// the garbage collector during the system call.
	// Must wait until now because until gosched returns
	// we don't know for sure that the garbage collector
	// is not running.
	_g_.syscallsp = 0
	_g_.m.p.ptr().syscalltick++
	_g_.throwsplit = false
}

//go:nosplit
func exitsyscallfast() bool {
	_g_ := getg()

	// Freezetheworld sets stopwait but does not retake P's.
	if sched.stopwait == freezeStopWait {
		_g_.m.mcache = nil
		_g_.m.p = 0
		return false
	}

	// Try to re-acquire the last P.
	if _g_.m.p != 0 && _g_.m.p.ptr().status == _Psyscall && atomic.Cas(&_g_.m.p.ptr().status, _Psyscall, _Prunning) {
		// There's a cpu for us, so we can run.
		_g_.m.mcache = _g_.m.p.ptr().mcache
		_g_.m.p.ptr().m.set(_g_.m)
		if _g_.m.syscalltick != _g_.m.p.ptr().syscalltick {
			if trace.enabled {
				// The p was retaken and then enter into syscall again (since _g_.m.syscalltick has changed).
				// traceGoSysBlock for this syscall was already emitted,
				// but here we effectively retake the p from the new syscall running on the same p.
				systemstack(func() {
					// Denote blocking of the new syscall.
					traceGoSysBlock(_g_.m.p.ptr())
					// Denote completion of the current syscall.
					traceGoSysExit(tracestamp())
				})
			}
			_g_.m.p.ptr().syscalltick++
		}
		return true
	}

	// Try to get any other idle P.
	oldp := _g_.m.p.ptr()
	_g_.m.mcache = nil
	_g_.m.p = 0
	if sched.pidle != 0 {
		var ok bool
		systemstack(func() {
			ok = exitsyscallfast_pidle()
			if ok && trace.enabled {
				if oldp != nil {
					// Wait till traceGoSysBlock event is emitted.
					// This ensures consistency of the trace (the goroutine is started after it is blocked).
					for oldp.syscalltick == _g_.m.syscalltick {
						osyield()
					}
				}
				traceGoSysExit(tracestamp())
			}
		})
		if ok {
			return true
		}
	}
	return false
}

func exitsyscallfast_pidle() bool {
	lock(&sched.lock)
	_p_ := pidleget()
	if _p_ != nil && atomic.Load(&sched.sysmonwait) != 0 {
		atomic.Store(&sched.sysmonwait, 0)
		notewakeup(&sched.sysmonnote)
	}
	unlock(&sched.lock)
	if _p_ != nil {
		acquirep(_p_)
		return true
	}
	return false
}

// exitsyscall slow path on g0.
// Failed to acquire P, enqueue gp as runnable.
func exitsyscall0(gp *g) {
	_g_ := getg()

	casgstatus(gp, _Gsyscall, _Grunnable)
	dropg()
	lock(&sched.lock)
	_p_ := pidleget()
	if _p_ == nil {
		globrunqput(gp)
	} else if atomic.Load(&sched.sysmonwait) != 0 {
		atomic.Store(&sched.sysmonwait, 0)
		notewakeup(&sched.sysmonnote)
	}
	unlock(&sched.lock)
	if _p_ != nil {
		acquirep(_p_)
		execute(gp, false) // Never returns.
	}
	if _g_.m.lockedg != nil {
		// Wait until another thread schedules gp and so m again.
		stoplockedm()
		execute(gp, false) // Never returns.
	}
	stopm()
	schedule() // Never returns.
}

func beforefork() {
	gp := getg().m.curg

	// Fork can hang if preempted with signals frequently enough (see issue 5517).
	// Ensure that we stay on the same M where we disable profiling.
	gp.m.locks++
	if gp.m.profilehz != 0 {
		resetcpuprofiler(0)
	}

	// This function is called before fork in syscall package.
	// Code between fork and exec must not allocate memory nor even try to grow stack.
	// Here we spoil g->_StackGuard to reliably detect any attempts to grow stack.
	// runtime_AfterFork will undo this in parent process, but not in child.
	gp.stackguard0 = stackFork
}

// Called from syscall package before fork.
//go:linkname syscall_runtime_BeforeFork syscall.runtime_BeforeFork
//go:nosplit
func syscall_runtime_BeforeFork() {
	systemstack(beforefork)
}

func afterfork() {
	gp := getg().m.curg

	// See the comment in beforefork.
	gp.stackguard0 = gp.stack.lo + _StackGuard

	hz := sched.profilehz
	if hz != 0 {
		resetcpuprofiler(hz)
	}
	gp.m.locks--
}

// Called from syscall package after fork in parent.
//go:linkname syscall_runtime_AfterFork syscall.runtime_AfterFork
//go:nosplit
func syscall_runtime_AfterFork() {
	systemstack(afterfork)
}

// Allocate a new g, with a stack big enough for stacksize bytes.
func malg(stacksize int32) *g {
	newg := new(g)
	if stacksize >= 0 {
		stacksize = round2(_StackSystem + stacksize)
		systemstack(func() {
			newg.stack, newg.stkbar = stackalloc(uint32(stacksize))
		})
		newg.stackguard0 = newg.stack.lo + _StackGuard
		newg.stackguard1 = ^uintptr(0)
		newg.stackAlloc = uintptr(stacksize)
	}
	return newg
}

// Create a new g running fn with siz bytes of arguments.
// Put it on the queue of g's waiting to run.
// The compiler turns a go statement into a call to this.
// Cannot split the stack because it assumes that the arguments
// are available sequentially after &fn; they would not be
// copied if a stack split occurred.
//go:nosplit
func newproc(siz int32, fn *funcval) {
	argp := add(unsafe.Pointer(&fn), sys.PtrSize)
	pc := getcallerpc(unsafe.Pointer(&siz))
	systemstack(func() {
		newproc1(fn, (*uint8)(argp), siz, 0, pc)
	})
}

// Create a new g running fn with narg bytes of arguments starting
// at argp and returning nret bytes of results.  callerpc is the
// address of the go statement that created this.  The new g is put
// on the queue of g's waiting to run.
func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr) *g {
	_g_ := getg()

	if fn == nil {
		_g_.m.throwing = -1 // do not dump full stacks
		throw("go of nil func value")
	}
	_g_.m.locks++ // disable preemption because it can be holding p in a local var
	siz := narg + nret
	siz = (siz + 7) &^ 7

	// We could allocate a larger initial stack if necessary.
	// Not worth it: this is almost always an error.
	// 4*sizeof(uintreg): extra space added below
	// sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
	if siz >= _StackMin-4*sys.RegSize-sys.RegSize {
		throw("newproc: function arguments too large for new goroutine")
	}

	_p_ := _g_.m.p.ptr()
	newg := gfget(_p_)
	if newg == nil {
		newg = malg(_StackMin)
		casgstatus(newg, _Gidle, _Gdead)
		allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
	}
	if newg.stack.hi == 0 {
		throw("newproc1: newg missing stack")
	}

	if readgstatus(newg) != _Gdead {
		throw("newproc1: new g is not Gdead")
	}

	totalSize := 4*sys.RegSize + uintptr(siz) + sys.MinFrameSize // extra space in case of reads slightly beyond frame
	totalSize += -totalSize & (sys.SpAlign - 1)                  // align to spAlign
	sp := newg.stack.hi - totalSize
	spArg := sp
	if usesLR {
		// caller's LR
		*(*unsafe.Pointer)(unsafe.Pointer(sp)) = nil
		prepGoExitFrame(sp)
		spArg += sys.MinFrameSize
	}
	memmove(unsafe.Pointer(spArg), unsafe.Pointer(argp), uintptr(narg))

	memclr(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
	newg.sched.sp = sp
	newg.stktopsp = sp
	newg.sched.pc = funcPC(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function
	newg.sched.g = guintptr(unsafe.Pointer(newg))
	gostartcallfn(&newg.sched, fn)
	newg.gopc = callerpc
	newg.startpc = fn.fn
	if isSystemGoroutine(newg) {
		atomic.Xadd(&sched.ngsys, +1)
	}
	casgstatus(newg, _Gdead, _Grunnable)

	if _p_.goidcache == _p_.goidcacheend {
		// Sched.goidgen is the last allocated id,
		// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
		// At startup sched.goidgen=0, so main goroutine receives goid=1.
		_p_.goidcache = atomic.Xadd64(&sched.goidgen, _GoidCacheBatch)
		_p_.goidcache -= _GoidCacheBatch - 1
		_p_.goidcacheend = _p_.goidcache + _GoidCacheBatch
	}
	newg.goid = int64(_p_.goidcache)
	_p_.goidcache++
	if raceenabled {
		newg.racectx = racegostart(callerpc)
	}
	if trace.enabled {
		traceGoCreate(newg, newg.startpc)
	}
	runqput(_p_, newg, true)

	if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && unsafe.Pointer(fn.fn) != unsafe.Pointer(funcPC(main)) { // TODO: fast atomic
		wakep()
	}
	_g_.m.locks--
	if _g_.m.locks == 0 && _g_.preempt { // restore the preemption request in case we've cleared it in newstack
		_g_.stackguard0 = stackPreempt
	}
	return newg
}

// Put on gfree list.
// If local list is too long, transfer a batch to the global list.
func gfput(_p_ *p, gp *g) {
	if readgstatus(gp) != _Gdead {
		throw("gfput: bad status (not Gdead)")
	}

	stksize := gp.stackAlloc

	if stksize != _FixedStack {
		// non-standard stack size - free it.
		stackfree(gp.stack, gp.stackAlloc)
		gp.stack.lo = 0
		gp.stack.hi = 0
		gp.stackguard0 = 0
		gp.stkbar = nil
		gp.stkbarPos = 0
	} else {
		// Reset stack barriers.
		gp.stkbar = gp.stkbar[:0]
		gp.stkbarPos = 0
	}

	gp.schedlink.set(_p_.gfree)
	_p_.gfree = gp
	_p_.gfreecnt++
	if _p_.gfreecnt >= 64 {
		lock(&sched.gflock)
		for _p_.gfreecnt >= 32 {
			_p_.gfreecnt--
			gp = _p_.gfree
			_p_.gfree = gp.schedlink.ptr()
			gp.schedlink.set(sched.gfree)
			sched.gfree = gp
			sched.ngfree++
		}
		unlock(&sched.gflock)
	}
}

// Get from gfree list.
// If local list is empty, grab a batch from global list.
func gfget(_p_ *p) *g {
retry:
	gp := _p_.gfree
	if gp == nil && sched.gfree != nil {
		lock(&sched.gflock)
		for _p_.gfreecnt < 32 && sched.gfree != nil {
			_p_.gfreecnt++
			gp = sched.gfree
			sched.gfree = gp.schedlink.ptr()
			sched.ngfree--
			gp.schedlink.set(_p_.gfree)
			_p_.gfree = gp
		}
		unlock(&sched.gflock)
		goto retry
	}
	if gp != nil {
		_p_.gfree = gp.schedlink.ptr()
		_p_.gfreecnt--
		if gp.stack.lo == 0 {
			// Stack was deallocated in gfput.  Allocate a new one.
			systemstack(func() {
				gp.stack, gp.stkbar = stackalloc(_FixedStack)
			})
			gp.stackguard0 = gp.stack.lo + _StackGuard
			gp.stackAlloc = _FixedStack
		} else {
			if raceenabled {
				racemalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc)
			}
			if msanenabled {
				msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stackAlloc)
			}
		}
	}
	return gp
}

// Purge all cached G's from gfree list to the global list.
func gfpurge(_p_ *p) {
	lock(&sched.gflock)
	for _p_.gfreecnt != 0 {
		_p_.gfreecnt--
		gp := _p_.gfree
		_p_.gfree = gp.schedlink.ptr()
		gp.schedlink.set(sched.gfree)
		sched.gfree = gp
		sched.ngfree++
	}
	unlock(&sched.gflock)
}

// Breakpoint executes a breakpoint trap.
func Breakpoint() {
	breakpoint()
}

// dolockOSThread is called by LockOSThread and lockOSThread below
// after they modify m.locked. Do not allow preemption during this call,
// or else the m might be different in this function than in the caller.
//go:nosplit
func dolockOSThread() {
	_g_ := getg()
	_g_.m.lockedg = _g_
	_g_.lockedm = _g_.m
}

//go:nosplit

// LockOSThread wires the calling goroutine to its current operating system thread.
// Until the calling goroutine exits or calls UnlockOSThread, it will always
// execute in that thread, and no other goroutine can.
func LockOSThread() {
	getg().m.locked |= _LockExternal
	dolockOSThread()
}

//go:nosplit
func lockOSThread() {
	getg().m.locked += _LockInternal
	dolockOSThread()
}

// dounlockOSThread is called by UnlockOSThread and unlockOSThread below
// after they update m->locked. Do not allow preemption during this call,
// or else the m might be in different in this function than in the caller.
//go:nosplit
func dounlockOSThread() {
	_g_ := getg()
	if _g_.m.locked != 0 {
		return
	}
	_g_.m.lockedg = nil
	_g_.lockedm = nil
}

//go:nosplit

// UnlockOSThread unwires the calling goroutine from its fixed operating system thread.
// If the calling goroutine has not called LockOSThread, UnlockOSThread is a no-op.
func UnlockOSThread() {
	getg().m.locked &^= _LockExternal
	dounlockOSThread()
}

//go:nosplit
func unlockOSThread() {
	_g_ := getg()
	if _g_.m.locked < _LockInternal {
		systemstack(badunlockosthread)
	}
	_g_.m.locked -= _LockInternal
	dounlockOSThread()
}

func badunlockosthread() {
	throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
}

func gcount() int32 {
	n := int32(allglen) - sched.ngfree - int32(atomic.Load(&sched.ngsys))
	for i := 0; ; i++ {
		_p_ := allp[i]
		if _p_ == nil {
			break
		}
		n -= _p_.gfreecnt
	}

	// All these variables can be changed concurrently, so the result can be inconsistent.
	// But at least the current goroutine is running.
	if n < 1 {
		n = 1
	}
	return n
}

func mcount() int32 {
	return sched.mcount
}

var prof struct {
	lock uint32
	hz   int32
}

func _System()       { _System() }
func _ExternalCode() { _ExternalCode() }
func _GC()           { _GC() }

// Called if we receive a SIGPROF signal.
func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
	if prof.hz == 0 {
		return
	}

	// Profiling runs concurrently with GC, so it must not allocate.
	mp.mallocing++

	// Define that a "user g" is a user-created goroutine, and a "system g"
	// is one that is m->g0 or m->gsignal.
	//
	// We might be interrupted for profiling halfway through a
	// goroutine switch. The switch involves updating three (or four) values:
	// g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
	// because once it gets updated the new g is running.
	//
	// When switching from a user g to a system g, LR is not considered live,
	// so the update only affects g, SP, and PC. Since PC must be last, there
	// the possible partial transitions in ordinary execution are (1) g alone is updated,
	// (2) both g and SP are updated, and (3) SP alone is updated.
	// If SP or g alone is updated, we can detect the partial transition by checking
	// whether the SP is within g's stack bounds. (We could also require that SP
	// be changed only after g, but the stack bounds check is needed by other
	// cases, so there is no need to impose an additional requirement.)
	//
	// There is one exceptional transition to a system g, not in ordinary execution.
	// When a signal arrives, the operating system starts the signal handler running
	// with an updated PC and SP. The g is updated last, at the beginning of the
	// handler. There are two reasons this is okay. First, until g is updated the
	// g and SP do not match, so the stack bounds check detects the partial transition.
	// Second, signal handlers currently run with signals disabled, so a profiling
	// signal cannot arrive during the handler.
	//
	// When switching from a system g to a user g, there are three possibilities.
	//
	// First, it may be that the g switch has no PC update, because the SP
	// either corresponds to a user g throughout (as in asmcgocall)
	// or because it has been arranged to look like a user g frame
	// (as in cgocallback_gofunc). In this case, since the entire
	// transition is a g+SP update, a partial transition updating just one of
	// those will be detected by the stack bounds check.
	//
	// Second, when returning from a signal handler, the PC and SP updates
	// are performed by the operating system in an atomic update, so the g
	// update must be done before them. The stack bounds check detects
	// the partial transition here, and (again) signal handlers run with signals
	// disabled, so a profiling signal cannot arrive then anyway.
	//
	// Third, the common case: it may be that the switch updates g, SP, and PC
	// separately. If the PC is within any of the functions that does this,
	// we don't ask for a traceback. C.F. the function setsSP for more about this.
	//
	// There is another apparently viable approach, recorded here in case
	// the "PC within setsSP function" check turns out not to be usable.
	// It would be possible to delay the update of either g or SP until immediately
	// before the PC update instruction. Then, because of the stack bounds check,
	// the only problematic interrupt point is just before that PC update instruction,
	// and the sigprof handler can detect that instruction and simulate stepping past
	// it in order to reach a consistent state. On ARM, the update of g must be made
	// in two places (in R10 and also in a TLS slot), so the delayed update would
	// need to be the SP update. The sigprof handler must read the instruction at
	// the current PC and if it was the known instruction (for example, JMP BX or
	// MOV R2, PC), use that other register in place of the PC value.
	// The biggest drawback to this solution is that it requires that we can tell
	// whether it's safe to read from the memory pointed at by PC.
	// In a correct program, we can test PC == nil and otherwise read,
	// but if a profiling signal happens at the instant that a program executes
	// a bad jump (before the program manages to handle the resulting fault)
	// the profiling handler could fault trying to read nonexistent memory.
	//
	// To recap, there are no constraints on the assembly being used for the
	// transition. We simply require that g and SP match and that the PC is not
	// in gogo.
	traceback := true
	if gp == nil || sp < gp.stack.lo || gp.stack.hi < sp || setsSP(pc) {
		traceback = false
	}
	var stk [maxCPUProfStack]uintptr
	var haveStackLock *g
	n := 0
	if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
		// Cgo, we can't unwind and symbolize arbitrary C code,
		// so instead collect Go stack that leads to the cgo call.
		// This is especially important on windows, since all syscalls are cgo calls.
		if gcTryLockStackBarriers(mp.curg) {
			haveStackLock = mp.curg
			n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[0], len(stk), nil, nil, 0)
		}
	} else if traceback {
		var flags uint = _TraceTrap
		if gp.m.curg != nil && gcTryLockStackBarriers(gp.m.curg) {
			// It's safe to traceback the user stack.
			haveStackLock = gp.m.curg
			flags |= _TraceJumpStack
		}
		// Traceback is safe if we're on the system stack (if
		// necessary, flags will stop it before switching to
		// the user stack), or if we locked the user stack.
		if gp != gp.m.curg || haveStackLock != nil {
			n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, flags)
		}
	}
	if haveStackLock != nil {
		gcUnlockStackBarriers(haveStackLock)
	}

	if n <= 0 {
		// Normal traceback is impossible or has failed.
		// See if it falls into several common cases.
		n = 0
		if GOOS == "windows" && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
			// Libcall, i.e. runtime syscall on windows.
			// Collect Go stack that leads to the call.
			if gcTryLockStackBarriers(mp.libcallg.ptr()) {
				n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
				gcUnlockStackBarriers(mp.libcallg.ptr())
			}
		}
		if n == 0 {
			// If all of the above has failed, account it against abstract "System" or "GC".
			n = 2
			// "ExternalCode" is better than "etext".
			if pc > firstmoduledata.etext {
				pc = funcPC(_ExternalCode) + sys.PCQuantum
			}
			stk[0] = pc
			if mp.preemptoff != "" || mp.helpgc != 0 {
				stk[1] = funcPC(_GC) + sys.PCQuantum
			} else {
				stk[1] = funcPC(_System) + sys.PCQuantum
			}
		}
	}

	if prof.hz != 0 {
		// Simple cas-lock to coordinate with setcpuprofilerate.
		for !atomic.Cas(&prof.lock, 0, 1) {
			osyield()
		}
		if prof.hz != 0 {
			cpuprof.add(stk[:n])
		}
		atomic.Store(&prof.lock, 0)
	}
	mp.mallocing--
}

// Reports whether a function will set the SP
// to an absolute value. Important that
// we don't traceback when these are at the bottom
// of the stack since we can't be sure that we will
// find the caller.
//
// If the function is not on the bottom of the stack
// we assume that it will have set it up so that traceback will be consistent,
// either by being a traceback terminating function
// or putting one on the stack at the right offset.
func setsSP(pc uintptr) bool {
	f := findfunc(pc)
	if f == nil {
		// couldn't find the function for this PC,
		// so assume the worst and stop traceback
		return true
	}
	switch f.entry {
	case gogoPC, systemstackPC, mcallPC, morestackPC:
		return true
	}
	return false
}

// Arrange to call fn with a traceback hz times a second.
func setcpuprofilerate_m(hz int32) {
	// Force sane arguments.
	if hz < 0 {
		hz = 0
	}

	// Disable preemption, otherwise we can be rescheduled to another thread
	// that has profiling enabled.
	_g_ := getg()
	_g_.m.locks++

	// Stop profiler on this thread so that it is safe to lock prof.
	// if a profiling signal came in while we had prof locked,
	// it would deadlock.
	resetcpuprofiler(0)

	for !atomic.Cas(&prof.lock, 0, 1) {
		osyield()
	}
	prof.hz = hz
	atomic.Store(&prof.lock, 0)

	lock(&sched.lock)
	sched.profilehz = hz
	unlock(&sched.lock)

	if hz != 0 {
		resetcpuprofiler(hz)
	}

	_g_.m.locks--
}

// Change number of processors.  The world is stopped, sched is locked.
// gcworkbufs are not being modified by either the GC or
// the write barrier code.
// Returns list of Ps with local work, they need to be scheduled by the caller.
func procresize(nprocs int32) *p {
	old := gomaxprocs
	if old < 0 || old > _MaxGomaxprocs || nprocs <= 0 || nprocs > _MaxGomaxprocs {
		throw("procresize: invalid arg")
	}
	if trace.enabled {
		traceGomaxprocs(nprocs)
	}

	// update statistics
	now := nanotime()
	if sched.procresizetime != 0 {
		sched.totaltime += int64(old) * (now - sched.procresizetime)
	}
	sched.procresizetime = now

	// initialize new P's
	for i := int32(0); i < nprocs; i++ {
		pp := allp[i]
		if pp == nil {
			pp = new(p)
			pp.id = i
			pp.status = _Pgcstop
			pp.sudogcache = pp.sudogbuf[:0]
			for i := range pp.deferpool {
				pp.deferpool[i] = pp.deferpoolbuf[i][:0]
			}
			atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
		}
		if pp.mcache == nil {
			if old == 0 && i == 0 {
				if getg().m.mcache == nil {
					throw("missing mcache?")
				}
				pp.mcache = getg().m.mcache // bootstrap
			} else {
				pp.mcache = allocmcache()
			}
		}
	}

	// free unused P's
	for i := nprocs; i < old; i++ {
		p := allp[i]
		if trace.enabled {
			if p == getg().m.p.ptr() {
				// moving to p[0], pretend that we were descheduled
				// and then scheduled again to keep the trace sane.
				traceGoSched()
				traceProcStop(p)
			}
		}
		// move all runnable goroutines to the global queue
		for p.runqhead != p.runqtail {
			// pop from tail of local queue
			p.runqtail--
			gp := p.runq[p.runqtail%uint32(len(p.runq))].ptr()
			// push onto head of global queue
			globrunqputhead(gp)
		}
		if p.runnext != 0 {
			globrunqputhead(p.runnext.ptr())
			p.runnext = 0
		}
		// if there's a background worker, make it runnable and put
		// it on the global queue so it can clean itself up
		if gp := p.gcBgMarkWorker.ptr(); gp != nil {
			casgstatus(gp, _Gwaiting, _Grunnable)
			if trace.enabled {
				traceGoUnpark(gp, 0)
			}
			globrunqput(gp)
			// This assignment doesn't race because the
			// world is stopped.
			p.gcBgMarkWorker.set(nil)
		}
		for i := range p.sudogbuf {
			p.sudogbuf[i] = nil
		}
		p.sudogcache = p.sudogbuf[:0]
		for i := range p.deferpool {
			for j := range p.deferpoolbuf[i] {
				p.deferpoolbuf[i][j] = nil
			}
			p.deferpool[i] = p.deferpoolbuf[i][:0]
		}
		freemcache(p.mcache)
		p.mcache = nil
		gfpurge(p)
		traceProcFree(p)
		p.status = _Pdead
		// can't free P itself because it can be referenced by an M in syscall
	}

	_g_ := getg()
	if _g_.m.p != 0 && _g_.m.p.ptr().id < nprocs {
		// continue to use the current P
		_g_.m.p.ptr().status = _Prunning
	} else {
		// release the current P and acquire allp[0]
		if _g_.m.p != 0 {
			_g_.m.p.ptr().m = 0
		}
		_g_.m.p = 0
		_g_.m.mcache = nil
		p := allp[0]
		p.m = 0
		p.status = _Pidle
		acquirep(p)
		if trace.enabled {
			traceGoStart()
		}
	}
	var runnablePs *p
	for i := nprocs - 1; i >= 0; i-- {
		p := allp[i]
		if _g_.m.p.ptr() == p {
			continue
		}
		p.status = _Pidle
		if runqempty(p) {
			pidleput(p)
		} else {
			p.m.set(mget())
			p.link.set(runnablePs)
			runnablePs = p
		}
	}
	var int32p *int32 = &gomaxprocs // make compiler check that gomaxprocs is an int32
	atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
	return runnablePs
}

// Associate p and the current m.
func acquirep(_p_ *p) {
	acquirep1(_p_)

	// have p; write barriers now allowed
	_g_ := getg()
	_g_.m.mcache = _p_.mcache

	if trace.enabled {
		traceProcStart()
	}
}

// May run during STW, so write barriers are not allowed.
//go:nowritebarrier
func acquirep1(_p_ *p) {
	_g_ := getg()

	if _g_.m.p != 0 || _g_.m.mcache != nil {
		throw("acquirep: already in go")
	}
	if _p_.m != 0 || _p_.status != _Pidle {
		id := int32(0)
		if _p_.m != 0 {
			id = _p_.m.ptr().id
		}
		print("acquirep: p->m=", _p_.m, "(", id, ") p->status=", _p_.status, "\n")
		throw("acquirep: invalid p state")
	}
	_g_.m.p.set(_p_)
	_p_.m.set(_g_.m)
	_p_.status = _Prunning
}

// Disassociate p and the current m.
func releasep() *p {
	_g_ := getg()

	if _g_.m.p == 0 || _g_.m.mcache == nil {
		throw("releasep: invalid arg")
	}
	_p_ := _g_.m.p.ptr()
	if _p_.m.ptr() != _g_.m || _p_.mcache != _g_.m.mcache || _p_.status != _Prunning {
		print("releasep: m=", _g_.m, " m->p=", _g_.m.p.ptr(), " p->m=", _p_.m, " m->mcache=", _g_.m.mcache, " p->mcache=", _p_.mcache, " p->status=", _p_.status, "\n")
		throw("releasep: invalid p state")
	}
	if trace.enabled {
		traceProcStop(_g_.m.p.ptr())
	}
	_g_.m.p = 0
	_g_.m.mcache = nil
	_p_.m = 0
	_p_.status = _Pidle
	return _p_
}

func incidlelocked(v int32) {
	lock(&sched.lock)
	sched.nmidlelocked += v
	if v > 0 {
		checkdead()
	}
	unlock(&sched.lock)
}

// Check for deadlock situation.
// The check is based on number of running M's, if 0 -> deadlock.
func checkdead() {
	// For -buildmode=c-shared or -buildmode=c-archive it's OK if
	// there are no running goroutines.  The calling program is
	// assumed to be running.
	if islibrary || isarchive {
		return
	}

	// If we are dying because of a signal caught on an already idle thread,
	// freezetheworld will cause all running threads to block.
	// And runtime will essentially enter into deadlock state,
	// except that there is a thread that will call exit soon.
	if panicking > 0 {
		return
	}

	// -1 for sysmon
	run := sched.mcount - sched.nmidle - sched.nmidlelocked - 1
	if run > 0 {
		return
	}
	if run < 0 {
		print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", sched.mcount, "\n")
		throw("checkdead: inconsistent counts")
	}

	grunning := 0
	lock(&allglock)
	for i := 0; i < len(allgs); i++ {
		gp := allgs[i]
		if isSystemGoroutine(gp) {
			continue
		}
		s := readgstatus(gp)
		switch s &^ _Gscan {
		case _Gwaiting:
			grunning++
		case _Grunnable,
			_Grunning,
			_Gsyscall:
			unlock(&allglock)
			print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
			throw("checkdead: runnable g")
		}
	}
	unlock(&allglock)
	if grunning == 0 { // possible if main goroutine calls runtime·Goexit()
		throw("no goroutines (main called runtime.Goexit) - deadlock!")
	}

	// Maybe jump time forward for playground.
	gp := timejump()
	if gp != nil {
		casgstatus(gp, _Gwaiting, _Grunnable)
		globrunqput(gp)
		_p_ := pidleget()
		if _p_ == nil {
			throw("checkdead: no p for timer")
		}
		mp := mget()
		if mp == nil {
			// There should always be a free M since
			// nothing is running.
			throw("checkdead: no m for timer")
		}
		mp.nextp.set(_p_)
		notewakeup(&mp.park)
		return
	}

	getg().m.throwing = -1 // do not dump full stacks
	throw("all goroutines are asleep - deadlock!")
}

// forcegcperiod is the maximum time in nanoseconds between garbage
// collections. If we go this long without a garbage collection, one
// is forced to run.
//
// This is a variable for testing purposes. It normally doesn't change.
var forcegcperiod int64 = 2 * 60 * 1e9

// Always runs without a P, so write barriers are not allowed.
//
//go:nowritebarrierrec
func sysmon() {
	// If a heap span goes unused for 5 minutes after a garbage collection,
	// we hand it back to the operating system.
	scavengelimit := int64(5 * 60 * 1e9)

	if debug.scavenge > 0 {
		// Scavenge-a-lot for testing.
		forcegcperiod = 10 * 1e6
		scavengelimit = 20 * 1e6
	}

	lastscavenge := nanotime()
	nscavenge := 0

	lasttrace := int64(0)
	idle := 0 // how many cycles in succession we had not wokeup somebody
	delay := uint32(0)
	for {
		if idle == 0 { // start with 20us sleep...
			delay = 20
		} else if idle > 50 { // start doubling the sleep after 1ms...
			delay *= 2
		}
		if delay > 10*1000 { // up to 10ms
			delay = 10 * 1000
		}
		usleep(delay)
		if debug.schedtrace <= 0 && (sched.gcwaiting != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs)) { // TODO: fast atomic
			lock(&sched.lock)
			if atomic.Load(&sched.gcwaiting) != 0 || atomic.Load(&sched.npidle) == uint32(gomaxprocs) {
				atomic.Store(&sched.sysmonwait, 1)
				unlock(&sched.lock)
				// Make wake-up period small enough
				// for the sampling to be correct.
				maxsleep := forcegcperiod / 2
				if scavengelimit < forcegcperiod {
					maxsleep = scavengelimit / 2
				}
				notetsleep(&sched.sysmonnote, maxsleep)
				lock(&sched.lock)
				atomic.Store(&sched.sysmonwait, 0)
				noteclear(&sched.sysmonnote)
				idle = 0
				delay = 20
			}
			unlock(&sched.lock)
		}
		// poll network if not polled for more than 10ms
		lastpoll := int64(atomic.Load64(&sched.lastpoll))
		now := nanotime()
		unixnow := unixnanotime()
		if lastpoll != 0 && lastpoll+10*1000*1000 < now {
			atomic.Cas64(&sched.lastpoll, uint64(lastpoll), uint64(now))
			gp := netpoll(false) // non-blocking - returns list of goroutines
			if gp != nil {
				// Need to decrement number of idle locked M's
				// (pretending that one more is running) before injectglist.
				// Otherwise it can lead to the following situation:
				// injectglist grabs all P's but before it starts M's to run the P's,
				// another M returns from syscall, finishes running its G,
				// observes that there is no work to do and no other running M's
				// and reports deadlock.
				incidlelocked(-1)
				injectglist(gp)
				incidlelocked(1)
			}
		}
		// retake P's blocked in syscalls
		// and preempt long running G's
		if retake(now) != 0 {
			idle = 0
		} else {
			idle++
		}
		// check if we need to force a GC
		lastgc := int64(atomic.Load64(&memstats.last_gc))
		if gcphase == _GCoff && lastgc != 0 && unixnow-lastgc > forcegcperiod && atomic.Load(&forcegc.idle) != 0 {
			lock(&forcegc.lock)
			forcegc.idle = 0
			forcegc.g.schedlink = 0
			injectglist(forcegc.g)
			unlock(&forcegc.lock)
		}
		// scavenge heap once in a while
		if lastscavenge+scavengelimit/2 < now {
			mheap_.scavenge(int32(nscavenge), uint64(now), uint64(scavengelimit))
			lastscavenge = now
			nscavenge++
		}
		if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
			lasttrace = now
			schedtrace(debug.scheddetail > 0)
		}
	}
}

var pdesc [_MaxGomaxprocs]struct {
	schedtick   uint32
	schedwhen   int64
	syscalltick uint32
	syscallwhen int64
}

// forcePreemptNS is the time slice given to a G before it is
// preempted.
const forcePreemptNS = 10 * 1000 * 1000 // 10ms

func retake(now int64) uint32 {
	n := 0
	for i := int32(0); i < gomaxprocs; i++ {
		_p_ := allp[i]
		if _p_ == nil {
			continue
		}
		pd := &pdesc[i]
		s := _p_.status
		if s == _Psyscall {
			// Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
			t := int64(_p_.syscalltick)
			if int64(pd.syscalltick) != t {
				pd.syscalltick = uint32(t)
				pd.syscallwhen = now
				continue
			}
			// On the one hand we don't want to retake Ps if there is no other work to do,
			// but on the other hand we want to retake them eventually
			// because they can prevent the sysmon thread from deep sleep.
			if runqempty(_p_) && atomic.Load(&sched.nmspinning)+atomic.Load(&sched.npidle) > 0 && pd.syscallwhen+10*1000*1000 > now {
				continue
			}
			// Need to decrement number of idle locked M's
			// (pretending that one more is running) before the CAS.
			// Otherwise the M from which we retake can exit the syscall,
			// increment nmidle and report deadlock.
			incidlelocked(-1)
			if atomic.Cas(&_p_.status, s, _Pidle) {
				if trace.enabled {
					traceGoSysBlock(_p_)
					traceProcStop(_p_)
				}
				n++
				_p_.syscalltick++
				handoffp(_p_)
			}
			incidlelocked(1)
		} else if s == _Prunning {
			// Preempt G if it's running for too long.
			t := int64(_p_.schedtick)
			if int64(pd.schedtick) != t {
				pd.schedtick = uint32(t)
				pd.schedwhen = now
				continue
			}
			if pd.schedwhen+forcePreemptNS > now {
				continue
			}
			preemptone(_p_)
		}
	}
	return uint32(n)
}

// Tell all goroutines that they have been preempted and they should stop.
// This function is purely best-effort.  It can fail to inform a goroutine if a
// processor just started running it.
// No locks need to be held.
// Returns true if preemption request was issued to at least one goroutine.
func preemptall() bool {
	res := false
	for i := int32(0); i < gomaxprocs; i++ {
		_p_ := allp[i]
		if _p_ == nil || _p_.status != _Prunning {
			continue
		}
		if preemptone(_p_) {
			res = true
		}
	}
	return res
}

// Tell the goroutine running on processor P to stop.
// This function is purely best-effort.  It can incorrectly fail to inform the
// goroutine.  It can send inform the wrong goroutine.  Even if it informs the
// correct goroutine, that goroutine might ignore the request if it is
// simultaneously executing newstack.
// No lock needs to be held.
// Returns true if preemption request was issued.
// The actual preemption will happen at some point in the future
// and will be indicated by the gp->status no longer being
// Grunning
func preemptone(_p_ *p) bool {
	mp := _p_.m.ptr()
	if mp == nil || mp == getg().m {
		return false
	}
	gp := mp.curg
	if gp == nil || gp == mp.g0 {
		return false
	}

	gp.preempt = true

	// Every call in a go routine checks for stack overflow by
	// comparing the current stack pointer to gp->stackguard0.
	// Setting gp->stackguard0 to StackPreempt folds
	// preemption into the normal stack overflow check.
	gp.stackguard0 = stackPreempt
	return true
}

var starttime int64

func schedtrace(detailed bool) {
	now := nanotime()
	if starttime == 0 {
		starttime = now
	}

	lock(&sched.lock)
	print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle, " threads=", sched.mcount, " spinningthreads=", sched.nmspinning, " idlethreads=", sched.nmidle, " runqueue=", sched.runqsize)
	if detailed {
		print(" gcwaiting=", sched.gcwaiting, " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait, "\n")
	}
	// We must be careful while reading data from P's, M's and G's.
	// Even if we hold schedlock, most data can be changed concurrently.
	// E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
	for i := int32(0); i < gomaxprocs; i++ {
		_p_ := allp[i]
		if _p_ == nil {
			continue
		}
		mp := _p_.m.ptr()
		h := atomic.Load(&_p_.runqhead)
		t := atomic.Load(&_p_.runqtail)
		if detailed {
			id := int32(-1)
			if mp != nil {
				id = mp.id
			}
			print("  P", i, ": status=", _p_.status, " schedtick=", _p_.schedtick, " syscalltick=", _p_.syscalltick, " m=", id, " runqsize=", t-h, " gfreecnt=", _p_.gfreecnt, "\n")
		} else {
			// In non-detailed mode format lengths of per-P run queues as:
			// [len1 len2 len3 len4]
			print(" ")
			if i == 0 {
				print("[")
			}
			print(t - h)
			if i == gomaxprocs-1 {
				print("]\n")
			}
		}
	}

	if !detailed {
		unlock(&sched.lock)
		return
	}

	for mp := allm; mp != nil; mp = mp.alllink {
		_p_ := mp.p.ptr()
		gp := mp.curg
		lockedg := mp.lockedg
		id1 := int32(-1)
		if _p_ != nil {
			id1 = _p_.id
		}
		id2 := int64(-1)
		if gp != nil {
			id2 = gp.goid
		}
		id3 := int64(-1)
		if lockedg != nil {
			id3 = lockedg.goid
		}
		print("  M", mp.id, ": p=", id1, " curg=", id2, " mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, ""+" locks=", mp.locks, " dying=", mp.dying, " helpgc=", mp.helpgc, " spinning=", mp.spinning, " blocked=", getg().m.blocked, " lockedg=", id3, "\n")
	}

	lock(&allglock)
	for gi := 0; gi < len(allgs); gi++ {
		gp := allgs[gi]
		mp := gp.m
		lockedm := gp.lockedm
		id1 := int32(-1)
		if mp != nil {
			id1 = mp.id
		}
		id2 := int32(-1)
		if lockedm != nil {
			id2 = lockedm.id
		}
		print("  G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason, ") m=", id1, " lockedm=", id2, "\n")
	}
	unlock(&allglock)
	unlock(&sched.lock)
}

// Put mp on midle list.
// Sched must be locked.
// May run during STW, so write barriers are not allowed.
//go:nowritebarrier
func mput(mp *m) {
	mp.schedlink = sched.midle
	sched.midle.set(mp)
	sched.nmidle++
	checkdead()
}

// Try to get an m from midle list.
// Sched must be locked.
// May run during STW, so write barriers are not allowed.
//go:nowritebarrier
func mget() *m {
	mp := sched.midle.ptr()
	if mp != nil {
		sched.midle = mp.schedlink
		sched.nmidle--
	}
	return mp
}

// Put gp on the global runnable queue.
// Sched must be locked.
// May run during STW, so write barriers are not allowed.
//go:nowritebarrier
func globrunqput(gp *g) {
	gp.schedlink = 0
	if sched.runqtail != 0 {
		sched.runqtail.ptr().schedlink.set(gp)
	} else {
		sched.runqhead.set(gp)
	}
	sched.runqtail.set(gp)
	sched.runqsize++
}

// Put gp at the head of the global runnable queue.
// Sched must be locked.
// May run during STW, so write barriers are not allowed.
//go:nowritebarrier
func globrunqputhead(gp *g) {
	gp.schedlink = sched.runqhead
	sched.runqhead.set(gp)
	if sched.runqtail == 0 {
		sched.runqtail.set(gp)
	}
	sched.runqsize++
}

// Put a batch of runnable goroutines on the global runnable queue.
// Sched must be locked.
func globrunqputbatch(ghead *g, gtail *g, n int32) {
	gtail.schedlink = 0
	if sched.runqtail != 0 {
		sched.runqtail.ptr().schedlink.set(ghead)
	} else {
		sched.runqhead.set(ghead)
	}
	sched.runqtail.set(gtail)
	sched.runqsize += n
}

// Try get a batch of G's from the global runnable queue.
// Sched must be locked.
func globrunqget(_p_ *p, max int32) *g {
	if sched.runqsize == 0 {
		return nil
	}

	n := sched.runqsize/gomaxprocs + 1
	if n > sched.runqsize {
		n = sched.runqsize
	}
	if max > 0 && n > max {
		n = max
	}
	if n > int32(len(_p_.runq))/2 {
		n = int32(len(_p_.runq)) / 2
	}

	sched.runqsize -= n
	if sched.runqsize == 0 {
		sched.runqtail = 0
	}

	gp := sched.runqhead.ptr()
	sched.runqhead = gp.schedlink
	n--
	for ; n > 0; n-- {
		gp1 := sched.runqhead.ptr()
		sched.runqhead = gp1.schedlink
		runqput(_p_, gp1, false)
	}
	return gp
}

// Put p to on _Pidle list.
// Sched must be locked.
// May run during STW, so write barriers are not allowed.
//go:nowritebarrier
func pidleput(_p_ *p) {
	if !runqempty(_p_) {
		throw("pidleput: P has non-empty run queue")
	}
	_p_.link = sched.pidle
	sched.pidle.set(_p_)
	atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
}

// Try get a p from _Pidle list.
// Sched must be locked.
// May run during STW, so write barriers are not allowed.
//go:nowritebarrier
func pidleget() *p {
	_p_ := sched.pidle.ptr()
	if _p_ != nil {
		sched.pidle = _p_.link
		atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
	}
	return _p_
}

// runqempty returns true if _p_ has no Gs on its local run queue.
// Note that this test is generally racy.
func runqempty(_p_ *p) bool {
	return _p_.runqhead == _p_.runqtail && _p_.runnext == 0
}

// To shake out latent assumptions about scheduling order,
// we introduce some randomness into scheduling decisions
// when running with the race detector.
// The need for this was made obvious by changing the
// (deterministic) scheduling order in Go 1.5 and breaking
// many poorly-written tests.
// With the randomness here, as long as the tests pass
// consistently with -race, they shouldn't have latent scheduling
// assumptions.
const randomizeScheduler = raceenabled

// runqput tries to put g on the local runnable queue.
// If next if false, runqput adds g to the tail of the runnable queue.
// If next is true, runqput puts g in the _p_.runnext slot.
// If the run queue is full, runnext puts g on the global queue.
// Executed only by the owner P.
func runqput(_p_ *p, gp *g, next bool) {
	if randomizeScheduler && next && fastrand1()%2 == 0 {
		next = false
	}

	if next {
	retryNext:
		oldnext := _p_.runnext
		if !_p_.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
			goto retryNext
		}
		if oldnext == 0 {
			return
		}
		// Kick the old runnext out to the regular run queue.
		gp = oldnext.ptr()
	}

retry:
	h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
	t := _p_.runqtail
	if t-h < uint32(len(_p_.runq)) {
		_p_.runq[t%uint32(len(_p_.runq))].set(gp)
		atomic.Store(&_p_.runqtail, t+1) // store-release, makes the item available for consumption
		return
	}
	if runqputslow(_p_, gp, h, t) {
		return
	}
	// the queue is not full, now the put above must suceed
	goto retry
}

// Put g and a batch of work from local runnable queue on global queue.
// Executed only by the owner P.
func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
	var batch [len(_p_.runq)/2 + 1]*g

	// First, grab a batch from local queue.
	n := t - h
	n = n / 2
	if n != uint32(len(_p_.runq)/2) {
		throw("runqputslow: queue is not full")
	}
	for i := uint32(0); i < n; i++ {
		batch[i] = _p_.runq[(h+i)%uint32(len(_p_.runq))].ptr()
	}
	if !atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
		return false
	}
	batch[n] = gp

	if randomizeScheduler {
		for i := uint32(1); i <= n; i++ {
			j := fastrand1() % (i + 1)
			batch[i], batch[j] = batch[j], batch[i]
		}
	}

	// Link the goroutines.
	for i := uint32(0); i < n; i++ {
		batch[i].schedlink.set(batch[i+1])
	}

	// Now put the batch on global queue.
	lock(&sched.lock)
	globrunqputbatch(batch[0], batch[n], int32(n+1))
	unlock(&sched.lock)
	return true
}

// Get g from local runnable queue.
// If inheritTime is true, gp should inherit the remaining time in the
// current time slice. Otherwise, it should start a new time slice.
// Executed only by the owner P.
func runqget(_p_ *p) (gp *g, inheritTime bool) {
	// If there's a runnext, it's the next G to run.
	for {
		next := _p_.runnext
		if next == 0 {
			break
		}
		if _p_.runnext.cas(next, 0) {
			return next.ptr(), true
		}
	}

	for {
		h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
		t := _p_.runqtail
		if t == h {
			return nil, false
		}
		gp := _p_.runq[h%uint32(len(_p_.runq))].ptr()
		if atomic.Cas(&_p_.runqhead, h, h+1) { // cas-release, commits consume
			return gp, false
		}
	}
}

// Grabs a batch of goroutines from _p_'s runnable queue into batch.
// Batch is a ring buffer starting at batchHead.
// Returns number of grabbed goroutines.
// Can be executed by any P.
func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
	for {
		h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with other consumers
		t := atomic.Load(&_p_.runqtail) // load-acquire, synchronize with the producer
		n := t - h
		n = n - n/2
		if n == 0 {
			if stealRunNextG {
				// Try to steal from _p_.runnext.
				if next := _p_.runnext; next != 0 {
					// Sleep to ensure that _p_ isn't about to run the g we
					// are about to steal.
					// The important use case here is when the g running on _p_
					// ready()s another g and then almost immediately blocks.
					// Instead of stealing runnext in this window, back off
					// to give _p_ a chance to schedule runnext. This will avoid
					// thrashing gs between different Ps.
					usleep(100)
					if !_p_.runnext.cas(next, 0) {
						continue
					}
					batch[batchHead%uint32(len(batch))] = next
					return 1
				}
			}
			return 0
		}
		if n > uint32(len(_p_.runq)/2) { // read inconsistent h and t
			continue
		}
		for i := uint32(0); i < n; i++ {
			g := _p_.runq[(h+i)%uint32(len(_p_.runq))]
			batch[(batchHead+i)%uint32(len(batch))] = g
		}
		if atomic.Cas(&_p_.runqhead, h, h+n) { // cas-release, commits consume
			return n
		}
	}
}

// Steal half of elements from local runnable queue of p2
// and put onto local runnable queue of p.
// Returns one of the stolen elements (or nil if failed).
func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
	t := _p_.runqtail
	n := runqgrab(p2, &_p_.runq, t, stealRunNextG)
	if n == 0 {
		return nil
	}
	n--
	gp := _p_.runq[(t+n)%uint32(len(_p_.runq))].ptr()
	if n == 0 {
		return gp
	}
	h := atomic.Load(&_p_.runqhead) // load-acquire, synchronize with consumers
	if t-h+n >= uint32(len(_p_.runq)) {
		throw("runqsteal: runq overflow")
	}
	atomic.Store(&_p_.runqtail, t+n) // store-release, makes the item available for consumption
	return gp
}

func testSchedLocalQueue() {
	_p_ := new(p)
	gs := make([]g, len(_p_.runq))
	for i := 0; i < len(_p_.runq); i++ {
		if g, _ := runqget(_p_); g != nil {
			throw("runq is not empty initially")
		}
		for j := 0; j < i; j++ {
			runqput(_p_, &gs[i], false)
		}
		for j := 0; j < i; j++ {
			if g, _ := runqget(_p_); g != &gs[i] {
				print("bad element at iter ", i, "/", j, "\n")
				throw("bad element")
			}
		}
		if g, _ := runqget(_p_); g != nil {
			throw("runq is not empty afterwards")
		}
	}
}

func testSchedLocalQueueSteal() {
	p1 := new(p)
	p2 := new(p)
	gs := make([]g, len(p1.runq))
	for i := 0; i < len(p1.runq); i++ {
		for j := 0; j < i; j++ {
			gs[j].sig = 0
			runqput(p1, &gs[j], false)
		}
		gp := runqsteal(p2, p1, true)
		s := 0
		if gp != nil {
			s++
			gp.sig++
		}
		for {
			gp, _ = runqget(p2)
			if gp == nil {
				break
			}
			s++
			gp.sig++
		}
		for {
			gp, _ = runqget(p1)
			if gp == nil {
				break
			}
			gp.sig++
		}
		for j := 0; j < i; j++ {
			if gs[j].sig != 1 {
				print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
				throw("bad element")
			}
		}
		if s != i/2 && s != i/2+1 {
			print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
			throw("bad steal")
		}
	}
}

//go:linkname setMaxThreads runtime/debug.setMaxThreads
func setMaxThreads(in int) (out int) {
	lock(&sched.lock)
	out = int(sched.maxmcount)
	sched.maxmcount = int32(in)
	checkmcount()
	unlock(&sched.lock)
	return
}

func haveexperiment(name string) bool {
	x := sys.Goexperiment
	for x != "" {
		xname := ""
		i := index(x, ",")
		if i < 0 {
			xname, x = x, ""
		} else {
			xname, x = x[:i], x[i+1:]
		}
		if xname == name {
			return true
		}
	}
	return false
}

//go:nosplit
func procPin() int {
	_g_ := getg()
	mp := _g_.m

	mp.locks++
	return int(mp.p.ptr().id)
}

//go:nosplit
func procUnpin() {
	_g_ := getg()
	_g_.m.locks--
}

//go:linkname sync_runtime_procPin sync.runtime_procPin
//go:nosplit
func sync_runtime_procPin() int {
	return procPin()
}

//go:linkname sync_runtime_procUnpin sync.runtime_procUnpin
//go:nosplit
func sync_runtime_procUnpin() {
	procUnpin()
}

//go:linkname sync_atomic_runtime_procPin sync/atomic.runtime_procPin
//go:nosplit
func sync_atomic_runtime_procPin() int {
	return procPin()
}

//go:linkname sync_atomic_runtime_procUnpin sync/atomic.runtime_procUnpin
//go:nosplit
func sync_atomic_runtime_procUnpin() {
	procUnpin()
}

// Active spinning for sync.Mutex.
//go:linkname sync_runtime_canSpin sync.runtime_canSpin
//go:nosplit
func sync_runtime_canSpin(i int) bool {
	// sync.Mutex is cooperative, so we are conservative with spinning.
	// Spin only few times and only if running on a multicore machine and
	// GOMAXPROCS>1 and there is at least one other running P and local runq is empty.
	// As opposed to runtime mutex we don't do passive spinning here,
	// because there can be work on global runq on on other Ps.
	if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 {
		return false
	}
	if p := getg().m.p.ptr(); !runqempty(p) {
		return false
	}
	return true
}

//go:linkname sync_runtime_doSpin sync.runtime_doSpin
//go:nosplit
func sync_runtime_doSpin() {
	procyield(active_spin_cnt)
}


================================================
FILE: examples/go/small.go
================================================
package example

type Person struct {
	name string
	mom  *Person
}

func NewPerson(name string, mom *Person) Person {
	return Person{name: name, mom: mom}
}

func (self *Person) GetName() string {
	return self.name
}

func (self *Person) GetMom() *Person {
	return self.mom
}

var people = []Person{
	Person{name: "Pebbles", mom: "Wilma"},
	Person{name: "Wilma", mom: "Pearl"},
}

func main() {
	for p := range people {
		println(p)
	}
}


================================================
FILE: examples/go/type_switch.go
================================================
package p

func f(a interface{}) {
	switch aa := a.(type) {
	case *int:
		print(aa)
	}
}


================================================
FILE: examples/go/value.go
================================================
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package reflect

import (
	"math"
	"runtime"
	"unsafe"
)

const ptrSize = 4 << (^uintptr(0) >> 63) // unsafe.Sizeof(uintptr(0)) but an ideal const
const cannotSet = "cannot set value obtained from unexported struct field"

// Value is the reflection interface to a Go value.
//
// Not all methods apply to all kinds of values.  Restrictions,
// if any, are noted in the documentation for each method.
// Use the Kind method to find out the kind of value before
// calling kind-specific methods.  Calling a method
// inappropriate to the kind of type causes a run time panic.
//
// The zero Value represents no value.
// Its IsValid method returns false, its Kind method returns Invalid,
// its String method returns "<invalid Value>", and all other methods panic.
// Most functions and methods never return an invalid value.
// If one does, its documentation states the conditions explicitly.
//
// A Value can be used concurrently by multiple goroutines provided that
// the underlying Go value can be used concurrently for the equivalent
// direct operations.
//
// Using == on two Values does not compare the underlying values
// they represent, but rather the contents of the Value structs.
// To compare two Values, compare the results of the Interface method.
type Value struct {
	// typ holds the type of the value represented by a Value.
	typ *rtype

	// Pointer-valued data or, if flagIndir is set, pointer to data.
	// Valid when either flagIndir is set or typ.pointers() is true.
	ptr unsafe.Pointer

	// flag holds metadata about the value.
	// The lowest bits are flag bits:
	//	- flagStickyRO: obtained via unexported not embedded field, so read-only
	//	- flagEmbedRO: obtained via unexported embedded field, so read-only
	//	- flagIndir: val holds a pointer to the data
	//	- flagAddr: v.CanAddr is true (implies flagIndir)
	//	- flagMethod: v is a method value.
	// The next five bits give the Kind of the value.
	// This repeats typ.Kind() except for method values.
	// The remaining 23+ bits give a method number for method values.
	// If flag.kind() != Func, code can assume that flagMethod is unset.
	// If ifaceIndir(typ), code can assume that flagIndir is set.
	flag

	// A method value represents a curried method invocation
	// like r.Read for some receiver r.  The typ+val+flag bits describe
	// the receiver r, but the flag's Kind bits say Func (methods are
	// functions), and the top bits of the flag give the method number
	// in r's type's method table.
}

type flag uintptr

const (
	flagKindWidth        = 5 // there are 27 kinds
	flagKindMask    flag = 1<<flagKindWidth - 1
	flagStickyRO    flag = 1 << 5
	flagEmbedRO     flag = 1 << 6
	flagIndir       flag = 1 << 7
	flagAddr        flag = 1 << 8
	flagMethod      flag = 1 << 9
	flagMethodShift      = 10
	flagRO          flag = flagStickyRO | flagEmbedRO
)

func (f flag) kind() Kind {
	return Kind(f & flagKindMask)
}

// pointer returns the underlying pointer represented by v.
// v.Kind() must be Ptr, Map, Chan, Func, or UnsafePointer
func (v Value) pointer() unsafe.Pointer {
	if v.typ.size != ptrSize || !v.typ.pointers() {
		panic("can't call pointer on a non-pointer Value")
	}
	if v.flag&flagIndir != 0 {
		return *(*unsafe.Pointer)(v.ptr)
	}
	return v.ptr
}

// packEface converts v to the empty interface.
func packEface(v Value) interface{} {
	t := v.typ
	var i interface{}
	e := (*emptyInterface)(unsafe.Pointer(&i))
	// First, fill in the data portion of the interface.
	switch {
	case ifaceIndir(t):
		if v.flag&flagIndir == 0 {
			panic("bad indir")
		}
		// Value is indirect, and so is the interface we're making.
		ptr := v.ptr
		if v.flag&flagAddr != 0 {
			// TODO: pass safe boolean from valueInterface so
			// we don't need to copy if safe==true?
			c := unsafe_New(t)
			typedmemmove(t, c, ptr)
			ptr = c
		}
		e.word = ptr
	case v.flag&flagIndir != 0:
		// Value is indirect, but interface is direct.  We need
		// to load the data at v.ptr into the interface data word.
		e.word = *(*unsafe.Pointer)(v.ptr)
	default:
		// Value is direct, and so is the interface.
		e.word = v.ptr
	}
	// Now, fill in the type portion.  We're very careful here not
	// to have any operation between the e.word and e.typ assignments
	// that would let the garbage collector observe the partially-built
	// interface value.
	e.typ = t
	return i
}

// unpackEface converts the empty interface i to a Value.
func unpackEface(i interface{}) Value {
	e := (*emptyInterface)(unsafe.Pointer(&i))
	// NOTE: don't read e.word until we know whether it is really a pointer or not.
	t := e.typ
	if t == nil {
		return Value{}
	}
	f := flag(t.Kind())
	if ifaceIndir(t) {
		f |= flagIndir
	}
	return Value{t, e.word, f}
}

// A ValueError occurs when a Value method is invoked on
// a Value that does not support it.  Such cases are documented
// in the description of each method.
type ValueError struct {
	Method string
	Kind   Kind
}

func (e *ValueError) Error() string {
	if e.Kind == 0 {
		return "reflect: call of " + e.Method + " on zero Value"
	}
	return "reflect: call of " + e.Method + " on " + e.Kind.String() + " Value"
}

// methodName returns the name of the calling method,
// assumed to be two stack frames above.
func methodName() string {
	pc, _, _, _ := runtime.Caller(2)
	f := runtime.FuncForPC(pc)
	if f == nil {
		return "unknown method"
	}
	return f.Name()
}

// emptyInterface is the header for an interface{} value.
type emptyInterface struct {
	typ  *rtype
	word unsafe.Pointer
}

// nonEmptyInterface is the header for a interface value with methods.
type nonEmptyInterface struct {
	// see ../runtime/iface.go:/Itab
	itab *struct {
		ityp   *rtype // static interface type
		typ    *rtype // dynamic concrete type
		link   unsafe.Pointer
		bad    int32
		unused int32
		fun    [100000]unsafe.Pointer // method table
	}
	word unsafe.Pointer
}

// mustBe panics if f's kind is not expected.
// Making this a method on flag instead of on Value
// (and embedding flag in Value) means that we can write
// the very clear v.mustBe(Bool) and have it compile into
// v.flag.mustBe(Bool), which will only bother to copy the
// single important word for the receiver.
func (f flag) mustBe(expected Kind) {
	if f.kind() != expected {
		panic(&ValueError{methodName(), f.kind()})
	}
}

// mustBeExported panics if f records that the value was obtained using
// an unexported field.
func (f flag) mustBeExported() {
	if f == 0 {
		panic(&ValueError{methodName(), 0})
	}
	if f&flagRO != 0 {
		panic("reflect: " + methodName() + " using value obtained using unexported field")
	}
}

// mustBeAssignable panics if f records that the value is not assignable,
// which is to say that either it was obtained using an unexported field
// or it is not addressable.
func (f flag) mustBeAssignable() {
	if f == 0 {
		panic(&ValueError{methodName(), Invalid})
	}
	// Assignable if addressable and not read-only.
	if f&flagRO != 0 {
		panic("reflect: " + methodName() + " using value obtained using unexported field")
	}
	if f&flagAddr == 0 {
		panic("reflect: " + methodName() + " using unaddressable value")
	}
}

// Addr returns a pointer value representing the address of v.
// It panics if CanAddr() returns false.
// Addr is typically used to obtain a pointer to a struct field
// or slice element in order to call a method that requires a
// pointer receiver.
func (v Value) Addr() Value {
	if v.flag&flagAddr == 0 {
		panic("reflect.Value.Addr of unaddressable value")
	}
	return Value{v.typ.ptrTo(), v.ptr, (v.flag & flagRO) | flag(Ptr)}
}

// Bool returns v's underlying value.
// It panics if v's kind is not Bool.
func (v Value) Bool() bool {
	v.mustBe(Bool)
	return *(*bool)(v.ptr)
}

// Bytes returns v's underlying value.
// It panics if v's underlying value is not a slice of bytes.
func (v Value) Bytes() []byte {
	v.mustBe(Slice)
	if v.typ.Elem().Kind() != Uint8 {
		panic("reflect.Value.Bytes of non-byte slice")
	}
	// Slice is always bigger than a word; assume flagIndir.
	return *(*[]byte)(v.ptr)
}

// runes returns v's underlying value.
// It panics if v's underlying value is not a slice of runes (int32s).
func (v Value) runes() []rune {
	v.mustBe(Slice)
	if v.typ.Elem().Kind() != Int32 {
		panic("reflect.Value.Bytes of non-rune slice")
	}
	// Slice is always bigger than a word; assume flagIndir.
	return *(*[]rune)(v.ptr)
}

// CanAddr reports whether the value's address can be obtained with Addr.
// Such values are called addressable.  A value is addressable if it is
// an element of a slice, an element of an addressable array,
// a field of an addressable struct, or the result of dereferencing a pointer.
// If CanAddr returns false, calling Addr will panic.
func (v Value) CanAddr() bool {
	return v.flag&flagAddr != 0
}

// CanSet reports whether the value of v can be changed.
// A Value can be changed only if it is addressable and was not
// obtained by the use of unexported struct fields.
// If CanSet returns false, calling Set or any type-specific
// setter (e.g., SetBool, SetInt) will panic.
func (v Value) CanSet() bool {
	return v.flag&(flagAddr|flagRO) == flagAddr
}

// Call calls the function v with the input arguments in.
// For example, if len(in) == 3, v.Call(in) represents the Go call v(in[0], in[1], in[2]).
// Call panics if v's Kind is not Func.
// It returns the output results as Values.
// As in Go, each input argument must be assignable to the
// type of the function's corresponding input parameter.
// If v is a variadic function, Call creates the variadic slice parameter
// itself, copying in the corresponding values.
func (v Value) Call(in []Value) []Value {
	v.mustBe(Func)
	v.mustBeExported()
	return v.call("Call", in)
}

// CallSlice calls the variadic function v with the input arguments in,
// assigning the slice in[len(in)-1] to v's final variadic argument.
// For example, if len(in) == 3, v.CallSlice(in) represents the Go call v(in[0], in[1], in[2]...).
// CallSlice panics if v's Kind is not Func or if v is not variadic.
// It returns the output results as Values.
// As in Go, each input argument must be assignable to the
// type of the function's corresponding input parameter.
func (v Value) CallSlice(in []Value) []Value {
	v.mustBe(Func)
	v.mustBeExported()
	return v.call("CallSlice", in)
}

var callGC bool // for testing; see TestCallMethodJump

func (v Value) call(op string, in []Value) []Value {
	// Get function pointer, type.
	t := v.typ
	var (
		fn       unsafe.Pointer
		rcvr     Value
		rcvrtype *rtype
	)
	if v.flag&flagMethod != 0 {
		rcvr = v
		rcvrtype, t, fn = methodReceiver(op, v, int(v.flag)>>flagMethodShift)
	} else if v.flag&flagIndir != 0 {
		fn = *(*unsafe.Pointer)(v.ptr)
	} else {
		fn = v.ptr
	}

	if fn == nil {
		panic("reflect.Value.Call: call of nil function")
	}

	isSlice := op == "CallSlice"
	n := t.NumIn()
	if isSlice {
		if !t.IsVariadic() {
			panic("reflect: CallSlice of non-variadic function")
		}
		if len(in) < n {
			panic("reflect: CallSlice with too few input arguments")
		}
		if len(in) > n {
			panic("reflect: CallSlice with too many input arguments")
		}
	} else {
		if t.IsVariadic() {
			n--
		}
		if len(in) < n {
			panic("reflect: Call with too few input arguments")
		}
		if !t.IsVariadic() && len(in) > n {
			panic("reflect: Call with too many input arguments")
		}
	}
	for _, x := range in {
		if x.Kind() == Invalid {
			panic("reflect: " + op + " using zero Value argument")
		}
	}
	for i := 0; i < n; i++ {
		if xt, targ := in[i].Type(), t.In(i); !xt.AssignableTo(targ) {
			panic("reflect: " + op + " using " + xt.String() + " as type " + targ.String())
		}
	}
	if !isSlice && t.IsVariadic() {
		// prepare slice for remaining values
		m := len(in) - n
		slice := MakeSlice(t.In(n), m, m)
		elem := t.In(n).Elem()
		for i := 0; i < m; i++ {
			x := in[n+i]
			if xt := x.Type(); !xt.AssignableTo(elem) {
				panic("reflect: cannot use " + xt.String() + " as type " + elem.String() + " in " + op)
			}
			slice.Index(i).Set(x)
		}
		origIn := in
		in = make([]Value, n+1)
		copy(in[:n], origIn)
		in[n] = slice
	}

	nin := len(in)
	if nin != t.NumIn() {
		panic("reflect.Value.Call: wrong argument count")
	}
	nout := t.NumOut()

	// Compute frame type.
	frametype, _, retOffset, _, framePool := funcLayout(t, rcvrtype)

	// Allocate a chunk of memory for frame.
	var args unsafe.Pointer
	if nout == 0 {
		args = framePool.Get().(unsafe.Pointer)
	} else {
		// Can't use pool if the function has return values.
		// We will leak pointer to args in ret, so its lifetime is not scoped.
		args = unsafe_New(frametype)
	}
	off := uintptr(0)

	// Copy inputs into args.
	if rcvrtype != nil {
		storeRcvr(rcvr, args)
		off = ptrSize
	}
	for i, v := range in {
		v.mustBeExported()
		targ := t.In(i).(*rtype)
		a := uintptr(targ.align)
		off = (off + a - 1) &^ (a - 1)
		n := targ.size
		addr := unsafe.Pointer(uintptr(args) + off)
		v = v.assignTo("reflect.Value.Call", targ, addr)
		if v.flag&flagIndir != 0 {
			typedmemmove(targ, addr, v.ptr)
		} else {
			*(*unsafe.Pointer)(addr) = v.ptr
		}
		off += n
	}

	// Call.
	call(frametype, fn, args, uint32(frametype.size), uint32(retOffset))

	// For testing; see TestCallMethodJump.
	if callGC {
		runtime.GC()
	}

	var ret []Value
	if nout == 0 {
		memclr(args, frametype.size)
		framePool.Put(args)
	} else {
		// Zero the now unused input area of args,
		// because the Values returned by this function contain pointers to the args object,
		// and will thus keep the args object alive indefinitely.
		memclr(args, retOffset)
		// Copy return values out of args.
		ret = make([]Value, nout)
		off = retOffset
		for i := 0; i < nout; i++ {
			tv := t.Out(i)
			a := uintptr(tv.Align())
			off = (off + a - 1) &^ (a - 1)
			fl := flagIndir | flag(tv.Kind())
			ret[i] = Value{tv.common(), unsafe.Pointer(uintptr(args) + off), fl}
			off += tv.Size()
		}
	}

	return ret
}

// callReflect is the call implementation used by a function
// returned by MakeFunc. In many ways it is the opposite of the
// method Value.call above. The method above converts a call using Values
// into a call of a function with a concrete argument frame, while
// callReflect converts a call of a function with a concrete argument
// frame into a call using Values.
// It is in this file so that it can be next to the call method above.
// The remainder of the MakeFunc implementation is in makefunc.go.
//
// NOTE: This function must be marked as a "wrapper" in the generated code,
// so that the linker can make it work correctly for panic and recover.
// The gc compilers know to do that for the name "reflect.callReflect".
func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer) {
	ftyp := ctxt.typ
	f := ctxt.fn

	// Copy argument frame into Values.
	ptr := frame
	off := uintptr(0)
	in := make([]Value, 0, len(ftyp.in))
	for _, arg := range ftyp.in {
		typ := arg
		off += -off & uintptr(typ.align-1)
		addr := unsafe.Pointer(uintptr(ptr) + off)
		v := Value{typ, nil, flag(typ.Kind())}
		if ifaceIndir(typ) {
			// value cannot be inlined in interface data.
			// Must make a copy, because f might keep a reference to it,
			// and we cannot let f keep a reference to the stack frame
			// after this function returns, not even a read-only reference.
			v.ptr = unsafe_New(typ)
			typedmemmove(typ, v.ptr, addr)
			v.flag |= flagIndir
		} else {
			v.ptr = *(*unsafe.Pointer)(addr)
		}
		in = append(in, v)
		off += typ.size
	}

	// Call underlying function.
	out := f(in)
	if len(out) != len(ftyp.out) {
		panic("reflect: wrong return count from function created by MakeFunc")
	}

	// Copy results back into argument frame.
	if len(ftyp.out) > 0 {
		off += -off & (ptrSize - 1)
		if runtime.GOARCH == "amd64p32" {
			off = align(off, 8)
		}
		for i, arg := range ftyp.out {
			typ := arg
			v := out[i]
			if v.typ != typ {
				panic("reflect: function created by MakeFunc using " + funcName(f) +
					" returned wrong type: have " +
					out[i].typ.String() + " for " + typ.String())
			}
			if v.flag&flagRO != 0 {
				panic("reflect: function created by MakeFunc using " + funcName(f) +
					" returned value obtained from unexported field")
			}
			off += -off & uintptr(typ.align-1)
			addr := unsafe.Pointer(uintptr(ptr) + off)
			if v.flag&flagIndir != 0 {
				typedmemmove(typ, addr, v.ptr)
			} else {
				*(*unsafe.Pointer)(addr) = v.ptr
			}
			off += typ.size
		}
	}
}

// methodReceiver returns information about the receiver
// described by v. The Value v may or may not have the
// flagMethod bit set, so the kind cached in v.flag should
// not be used.
// The return value rcvrtype gives the method's actual receiver type.
// The return value t gives the method type signature (without the receiver).
// The return value fn is a pointer to the method code.
func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *rtype, fn unsafe.Pointer) {
	i := methodIndex
	if v.typ.Kind() == Interface {
		tt := (*interfaceType)(unsafe.Pointer(v.typ))
		if uint(i) >= uint(len(tt.methods)) {
			panic("reflect: internal error: invalid method index")
		}
		m := &tt.methods[i]
		if m.pkgPath != nil {
			panic("reflect: " + op + " of unexported method")
		}
		iface := (*nonEmptyInterface)(v.ptr)
		if iface.itab == nil {
			panic("reflect: " + op + " of method on nil interface value")
		}
		rcvrtype = iface.itab.typ
		fn = unsafe.Pointer(&iface.itab.fun[i])
		t = m.typ
	} else {
		rcvrtype = v.typ
		ut := v.typ.uncommon()
		if ut == nil || uint(i) >= uint(len(ut.methods)) {
			panic("reflect: internal error: invalid method index")
		}
		m := &ut.methods[i]
		if m.pkgPath != nil {
			panic("reflect: " + op + " of unexported method")
		}
		fn = unsafe.Pointer(&m.ifn)
		t = m.mtyp
	}
	return
}

// v is a method receiver.  Store at p the word which is used to
// encode that receiver at the start of the argument list.
// Reflect uses the "interface" calling convention for
// methods, which always uses one word to record the receiver.
func storeRcvr(v Value, p unsafe.Pointer) {
	t := v.typ
	if t.Kind() == Interface {
		// the interface data word becomes the receiver word
		iface := (*nonEmptyInterface)(v.ptr)
		*(*unsafe.Pointer)(p) = iface.word
	} else if v.flag&flagIndir != 0 && !ifaceIndir(t) {
		*(*unsafe.Pointer)(p) = *(*unsafe.Pointer)(v.ptr)
	} else {
		*(*unsafe.Pointer)(p) = v.ptr
	}
}

// align returns the result of rounding x up to a multiple of n.
// n must be a power of two.
func align(x, n uintptr) uintptr {
	return (x + n - 1) &^ (n - 1)
}

// callMethod is the call implementation used by a function returned
// by makeMethodValue (used by v.Method(i).Interface()).
// It is a streamlined version of the usual reflect call: the caller has
// already laid out the argument frame for us, so we don't have
// to deal with individual Values for each argument.
// It is in this file so that it can be next to the two similar functions above.
// The remainder of the makeMethodValue implementation is in makefunc.go.
//
// NOTE: This function must be marked as a "wrapper" in the generated code,
// so that the linker can make it work correctly for panic and recover.
// The gc compilers know to do that for the name "reflect.callMethod".
func callMethod(ctxt *methodValue, frame unsafe.Pointer) {
	rcvr := ctxt.rcvr
	rcvrtype, t, fn := methodReceiver("call", rcvr, ctxt.method)
	frametype, argSize, retOffset, _, framePool := funcLayout(t, rcvrtype)

	// Make a new frame that is one word bigger so we can store the receiver.
	args := framePool.Get().(unsafe.Pointer)

	// Copy in receiver and rest of args.
	storeRcvr(rcvr, args)
	typedmemmovepartial(frametype, unsafe.Pointer(uintptr(args)+ptrSize), frame, ptrSize, argSize-ptrSize)

	// Call.
	call(frametype, fn, args, uint32(frametype.size), uint32(retOffset))

	// Copy return values. On amd64p32, the beginning of return values
	// is 64-bit aligned, so the caller's frame layout (which doesn't have
	// a receiver) is different from the layout of the fn call, which has
	// a receiver.
	// Ignore any changes to args and just copy return values.
	callerRetOffset := retOffset - ptrSize
	if runtime.GOARCH == "amd64p32" {
		callerRetOffset = align(argSize-ptrSize, 8)
	}
	typedmemmovepartial(frametype,
		unsafe.Pointer(uintptr(frame)+callerRetOffset),
		unsafe.Pointer(uintptr(args)+retOffset),
		retOffset,
		frametype.size-retOffset)

	memclr(args, frametype.size)
	framePool.Put(args)
}

// funcName returns the name of f, for use in error messages.
func funcName(f func([]Value) []Value) string {
	pc := *(*uintptr)(unsafe.Pointer(&f))
	rf := runtime.FuncForPC(pc)
	if rf != nil {
		return rf.Name()
	}
	return "closure"
}

// Cap returns v's capacity.
// It panics if v's Kind is not Array, Chan, or Slice.
func (v Value) Cap() int {
	k := v.kind()
	switch k {
	case Array:
		return v.typ.Len()
	case Chan:
		return int(chancap(v.pointer()))
	case Slice:
		// Slice is always bigger than a word; assume flagIndir.
		return (*sliceHeader)(v.ptr).Cap
	}
	panic(&ValueError{"reflect.Value.Cap", v.kind()})
}

// Close closes the channel v.
// It panics if v's Kind is not Chan.
func (v Value) Close() {
	v.mustBe(Chan)
	v.mustBeExported()
	chanclose(v.pointer())
}

// Complex returns v's underlying value, as a complex128.
// It panics if v's Kind is not Complex64 or Complex128
func (v Value) Complex() complex128 {
	k := v.kind()
	switch k {
	case Complex64:
		return complex128(*(*complex64)(v.ptr))
	case Complex128:
		return *(*complex128)(v.ptr)
	}
	panic(&ValueError{"reflect.Value.Complex", v.kind()})
}

// Elem returns the value that the interface v contains
// or that the pointer v points to.
// It panics if v's Kind is not Interface or Ptr.
// It returns the zero Value if v is nil.
func (v Value) Elem() Value {
	k := v.kind()
	switch k {
	case Interface:
		var eface interface{}
		if v.typ.NumMethod() == 0 {
			eface = *(*interface{})(v.ptr)
		} else {
			eface = (interface{})(*(*interface {
				M()
			})(v.ptr))
		}
		x := unpackEface(eface)
		if x.flag != 0 {
			x.flag |= v.flag & flagRO
		}
		return x
	case Ptr:
		ptr := v.ptr
		if v.flag&flagIndir != 0 {
			ptr = *(*unsafe.Pointer)(ptr)
		}
		// The returned value's address is v's value.
		if ptr == nil {
			return Value{}
		}
		tt := (*ptrType)(unsafe.Pointer(v.typ))
		typ := tt.elem
		fl := v.flag&flagRO | flagIndir | flagAddr
		fl |= flag(typ.Kind())
		return Value{typ, ptr, fl}
	}
	panic(&ValueError{"reflect.Value.Elem", v.kind()})
}

// Field returns the i'th field of the struct v.
// It panics if v's Kind is not Struct or i is out of range.
func (v Value) Field(i int) Value {
	if v.kind() != Struct {
		panic(&ValueError{"reflect.Value.Field", v.kind()})
	}
	tt := (*structType)(unsafe.Pointer(v.typ))
	if uint(i) >= uint(len(tt.fields)) {
		panic("reflect: Field index out of range")
	}
	field := &tt.fields[i]
	typ := field.typ

	// Inherit permission bits from v, but clear flagEmbedRO.
	fl := v.flag&(flagStickyRO|flagIndir|flagAddr) | flag(typ.Kind())
	// Using an unexported field forces flagRO.
	if field.pkgPath != nil {
		if field.name == nil {
			fl |= flagEmbedRO
		} else {
			fl |= flagStickyRO
		}
	}
	// Either flagIndir is set and v.ptr points at struct,
	// or flagIndir is not set and v.ptr is the actual struct data.
	// In the former case, we want v.ptr + offset.
	// In the latter case, we must have field.offset = 0,
	// so v.ptr + field.offset is still okay.
	ptr := unsafe.Pointer(uintptr(v.ptr) + field.offset)
	return Value{typ, ptr, fl}
}

// FieldByIndex returns the nested field corresponding to index.
// It panics if v's Kind is not struct.
func (v Value) FieldByIndex(index []int) Value {
	if len(index) == 1 {
		return v.Field(index[0])
	}
	v.mustBe(Struct)
	for i, x := range index {
		if i > 0 {
			if v.Kind() == Ptr && v.typ.Elem().Kind() == Struct {
				if v.IsNil() {
					panic("reflect: indirection through nil pointer to embedded struct")
				}
				v = v.Elem()
			}
		}
		v = v.Field(x)
	}
	return v
}

// FieldByName returns the struct field with the given name.
// It returns the zero Value if no field was found.
// It panics if v's Kind is not struct.
func (v Value) FieldByName(name string) Value {
	v.mustBe(Struct)
	if f, ok := v.typ.FieldByName(name); ok {
		return v.FieldByIndex(f.Index)
	}
	return Value{}
}

// FieldByNameFunc returns the struct field with a name
// that satisfies the match function.
// It panics if v's Kind is not struct.
// It returns the zero Value if no field was found.
func (v Value) FieldByNameFunc(match func(string) bool) Value {
	if f, ok := v.typ.FieldByNameFunc(match); ok {
		return v.FieldByIndex(f.Index)
	}
	return Value{}
}

// Float returns v's underlying value, as a float64.
// It panics if v's Kind is not Float32 or Float64
func (v Value) Float() float64 {
	k := v.kind()
	switch k {
	case Float32:
		return float64(*(*float32)(v.ptr))
	case Float64:
		return *(*float64)(v.ptr)
	}
	panic(&ValueError{"reflect.Value.Float", v.kind()})
}

var uint8Type = TypeOf(uint8(0)).(*rtype)

// Index returns v's i'th element.
// It panics if v's Kind is not Array, Slice, or String or i is out of range.
func (v Value) Index(i int) Value {
	switch v.kind() {
	case Array:
		tt := (*arrayType)(unsafe.Pointer(v.typ))
		if uint(i) >= uint(tt.len) {
			panic("reflect: array index out of range")
		}
		typ := tt.elem
		offset := uintptr(i) * typ.size

		// Either flagIndir is set and v.ptr points at array,
		// or flagIndir is not set and v.ptr is the actual array data.
		// In the former case, we want v.ptr + offset.
		// In the latter case, we must be doing Index(0), so offset = 0,
		// so v.ptr + offset is still okay.
		val := unsafe.Pointer(uintptr(v.ptr) + offset)
		fl := v.flag&(flagRO|flagIndir|flagAddr) | flag(typ.Kind()) // bits same as overall array
		return Value{typ, val, fl}

	case Slice:
		// Element flag same as Elem of Ptr.
		// Addressable, indirect, possibly read-only.
		s := (*sliceHeader)(v.ptr)
		if uint(i) >= uint(s.Len) {
			panic("reflect: slice index out of range")
		}
		tt := (*sliceType)(unsafe.Pointer(v.typ))
		typ := tt.elem
		val := arrayAt(s.Data, i, typ.size)
		fl := flagAddr | flagIndir | v.flag&flagRO | flag(typ.Kind())
		return Value{typ, val, fl}

	case String:
		s := (*stringHeader)(v.ptr)
		if uint(i) >= uint(s.Len) {
			panic("reflect: string index out of range")
		}
		p := arrayAt(s.Data, i, 1)
		fl := v.flag&flagRO | flag(Uint8) | flagIndir
		return Value{uint8Type, p, fl}
	}
	panic(&ValueError{"reflect.Value.Index", v.kind()})
}

// Int returns v's underlying value, as an int64.
// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64.
func (v Value) Int() int64 {
	k := v.kind()
	p := v.ptr
	switch k {
	case Int:
		return int64(*(*int)(p))
	case Int8:
		return int64(*(*int8)(p))
	case Int16:
		return int64(*(*int16)(p))
	case Int32:
		return int64(*(*int32)(p))
	case Int64:
		return int64(*(*int64)(p))
	}
	panic(&ValueError{"reflect.Value.Int", v.kind()})
}

// CanInterface reports whether Interface can be used without panicking.
func (v Value) CanInterface() bool {
	if v.flag == 0 {
		panic(&ValueError{"reflect.Value.CanInterface", Invalid})
	}
	return v.flag&flagRO == 0
}

// Interface returns v's current value as an interface{}.
// It is equivalent to:
//	var i interface{} = (v's underlying value)
// It panics if the Value was obtained by accessing
// unexported struct fields.
func (v Value) Interface() (i interface{}) {
	return valueInterface(v, true)
}

func valueInterface(v Value, safe bool) interface{} {
	if v.flag == 0 {
		panic(&ValueError{"reflect.Value.Interface", 0})
	}
	if safe && v.flag&flagRO != 0 {
		// Do not allow access to unexported values via Interface,
		// because they might be pointers that should not be
		// writable or methods or function that should not be callable.
		panic("reflect.Value.Interface: cannot return value obtained from unexported field or method")
	}
	if v.flag&flagMethod != 0 {
		v = makeMethodValue("Interface", v)
	}

	if v.kind() == Interface {
		// Special case: return the element inside the interface.
		// Empty interface has one layout, all interfaces with
		// methods have a second layout.
		if v.NumMethod() == 0 {
			return *(*interface{})(v.ptr)
		}
		return *(*interface {
			M()
		})(v.ptr)
	}

	// TODO: pass safe to packEface so we don't need to copy if safe==true?
	return packEface(v)
}

// InterfaceData returns the interface v's value as a uintptr pair.
// It panics if v's Kind is not Interface.
func (v Value) InterfaceData() [2]uintptr {
	// TODO: deprecate this
	v.mustBe(Interface)
	// We treat this as a read operation, so we allow
	// it even for unexported data, because the caller
	// has to import "unsafe" to turn it into something
	// that can be abused.
	// Interface value is always bigger than a word; assume flagIndir.
	return *(*[2]uintptr)(v.ptr)
}

// IsNil reports whether its argument v is nil. The argument must be
// a chan, func, interface, map, pointer, or slice value; if it is
// not, IsNil panics. Note that IsNil is not always equivalent to a
// regular comparison with nil in Go. For example, if v was created
// by calling ValueOf with an uninitialized interface variable i,
// i==nil will be true but v.IsNil will panic as v will be the zero
// Value.
func (v Value) IsNil() bool {
	k := v.kind()
	switch k {
	case Chan, Func, Map, Ptr:
		if v.flag&flagMethod != 0 {
			return false
		}
		ptr := v.ptr
		if v.flag&flagIndir != 0 {
			ptr = *(*unsafe.Pointer)(ptr)
		}
		return ptr == nil
	case Interface, Slice:
		// Both interface and slice are nil if first word is 0.
		// Both are always bigger than a word; assume flagIndir.
		return *(*unsafe.Pointer)(v.ptr) == nil
	}
	panic(&ValueError{"reflect.Value.IsNil", v.kind()})
}

// IsValid reports whether v represents a value.
// It returns false if v is the zero Value.
// If IsValid returns false, all other methods except String panic.
// Most functions and methods never return an invalid value.
// If one does, its documentation states the conditions explicitly.
func (v Value) IsValid() bool {
	return v.flag != 0
}

// Kind returns v's Kind.
// If v is the zero Value (IsValid returns false), Kind returns Invalid.
func (v Value) Kind() Kind {
	return v.kind()
}

// Len returns v's length.
// It panics if v's Kind is not Array, Chan, Map, Slice, or String.
func (v Value) Len() int {
	k := v.kind()
	switch k {
	case Array:
		tt := (*arrayType)(unsafe.Pointer(v.typ))
		return int(tt.len)
	case Chan:
		return chanlen(v.pointer())
	case Map:
		return maplen(v.pointer())
	case Slice:
		// Slice is bigger than a word; assume flagIndir.
		return (*sliceHeader)(v.ptr).Len
	case String:
		// String is bigger than a word; assume flagIndir.
		return (*stringHeader)(v.ptr).Len
	}
	panic(&ValueError{"reflect.Value.Len", v.kind()})
}

// MapIndex returns the value associated with key in the map v.
// It panics if v's Kind is not Map.
// It returns the zero Value if key is not found in the map or if v represents a nil map.
// As in Go, the key's value must be assignable to the map's key type.
func (v Value) MapIndex(key Value) Value {
	v.mustBe(Map)
	tt := (*mapType)(unsafe.Pointer(v.typ))

	// Do not require key to be exported, so that DeepEqual
	// and other programs can use all the keys returned by
	// MapKeys as arguments to MapIndex.  If either the map
	// or the key is unexported, though, the result will be
	// considered unexported.  This is consistent with the
	// behavior for structs, which allow read but not write
	// of unexported fields.
	key = key.assignTo("reflect.Value.MapIndex", tt.key, nil)

	var k unsafe.Pointer
	if key.flag&flagIndir != 0 {
		k = key.ptr
	} else {
		k = unsafe.Pointer(&key.ptr)
	}
	e := mapaccess(v.typ, v.pointer(), k)
	if e == nil {
		return Value{}
	}
	typ := tt.elem
	fl := (v.flag | key.flag) & flagRO
	fl |= flag(typ.Kind())
	if ifaceIndir(typ) {
		// Copy result so future changes to the map
		// won't change the underlying value.
		c := unsafe_New(typ)
		typedmemmove(typ, c, e)
		return Value{typ, c, fl | flagIndir}
	} else {
		return Value{typ, *(*unsafe.Pointer)(e), fl}
	}
}

// MapKeys returns a slice containing all the keys present in the map,
// in unspecified order.
// It panics if v's Kind is not Map.
// It returns an empty slice if v represents a nil map.
func (v Value) MapKeys() []Value {
	v.mustBe(Map)
	tt := (*mapType)(unsafe.Pointer(v.typ))
	keyType := tt.key

	fl := v.flag&flagRO | flag(keyType.Kind())

	m := v.pointer()
	mlen := int(0)
	if m != nil {
		mlen = maplen(m)
	}
	it := mapiterinit(v.typ, m)
	a := make([]Value, mlen)
	var i int
	for i = 0; i < len(a); i++ {
		key := mapiterkey(it)
		if key == nil {
			// Someone deleted an entry from the map since we
			// called maplen above.  It's a data race, but nothing
			// we can do about it.
			break
		}
		if ifaceIndir(keyType) {
			// Copy result so future changes to the map
			// won't change the underlying value.
			c := unsafe_New(keyType)
			typedmemmove(keyType, c, key)
			a[i] = Value{keyType, c, fl | flagIndir}
		} else {
			a[i] = Value{keyType, *(*unsafe.Pointer)(key), fl}
		}
		mapiternext(it)
	}
	return a[:i]
}

// Method returns a function value corresponding to v's i'th method.
// The arguments to a Call on the returned function should not include
// a receiver; the returned function will always use v as the receiver.
// Method panics if i is out of range or if v is a nil interface value.
func (v Value) Method(i int) Value {
	if v.typ == nil {
		panic(&ValueError{"reflect.Value.Method", Invalid})
	}
	if v.flag&flagMethod != 0 || uint(i) >= uint(v.typ.NumMethod()) {
		panic("reflect: Method index out of range")
	}
	if v.typ.Kind() == Interface && v.IsNil() {
		panic("reflect: Method on nil interface value")
	}
	fl := v.flag & (flagStickyRO | flagIndir) // Clear flagEmbedRO
	fl |= flag(Func)
	fl |= flag(i)<<flagMethodShift | flagMethod
	return Value{v.typ, v.ptr, fl}
}

// NumMethod returns the number of methods in the value's method set.
func (v Value) NumMethod() int {
	if v.typ == nil {
		panic(&ValueError{"reflect.Value.NumMethod", Invalid})
	}
	if v.flag&flagMethod != 0 {
		return 0
	}
	return v.typ.NumMethod()
}

// MethodByName returns a function value corresponding to the method
// of v with the given name.
// The arguments to a Call on the returned function should not include
// a receiver; the returned function will always use v as the receiver.
// It returns the zero Value if no method was found.
func (v Value) MethodByName(name string) Value {
	if v.typ == nil {
		panic(&ValueError{"reflect.Value.MethodByName", Invalid})
	}
	if v.flag&flagMethod != 0 {
		return Value{}
	}
	m, ok := v.typ.MethodByName(name)
	if !ok {
		return Value{}
	}
	return v.Method(m.Index)
}

// NumField returns the number of fields in the struct v.
// It panics if v's Kind is not Struct.
func (v Value) NumField() int {
	v.mustBe(Struct)
	tt := (*structType)(unsafe.Pointer(v.typ))
	return len(tt.fields)
}

// OverflowComplex reports whether the complex128 x cannot be represented by v's type.
// It panics if v's Kind is not Complex64 or Complex128.
func (v Value) OverflowComplex(x complex128) bool {
	k := v.kind()
	switch k {
	case Complex64:
		return overflowFloat32(real(x)) || overflowFloat32(imag(x))
	case Complex128:
		return false
	}
	panic(&ValueError{"reflect.Value.OverflowComplex", v.kind()})
}

// OverflowFloat reports whether the float64 x cannot be represented by v's type.
// It panics if v's Kind is not Float32 or Float64.
func (v Value) OverflowFloat(x float64) bool {
	k := v.kind()
	switch k {
	case Float32:
		return overflowFloat32(x)
	case Float64:
		return false
	}
	panic(&ValueError{"reflect.Value.OverflowFloat", v.kind()})
}

func overflowFloat32(x float64) bool {
	if x < 0 {
		x = -x
	}
	return math.MaxFloat32 < x && x <= math.MaxFloat64
}

// OverflowInt reports whether the int64 x cannot be represented by v's type.
// It panics if v's Kind is not Int, Int8, int16, Int32, or Int64.
func (v Value) OverflowInt(x int64) bool {
	k := v.kind()
	switch k {
	case Int, Int8, Int16, Int32, Int64:
		bitSize := v.typ.size * 8
		trunc := (x << (64 - bitSize)) >> (64 - bitSize)
		return x != trunc
	}
	panic(&ValueError{"reflect.Value.OverflowInt", v.kind()})
}

// OverflowUint reports whether the uint64 x cannot be represented by v's type.
// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64.
func (v Value) OverflowUint(x uint64) bool {
	k := v.kind()
	switch k {
	case Uint, Uintptr, Uint8, Uint16, Uint32, Uint64:
		bitSize := v.typ.size * 8
		trunc := (x << (64 - bitSize)) >> (64 - bitSize)
		return x != trunc
	}
	panic(&ValueError{"reflect.Value.OverflowUint", v.kind()})
}

// Pointer returns v's value as a uintptr.
// It returns uintptr instead of unsafe.Pointer so that
// code using reflect cannot obtain unsafe.Pointers
// without importing the unsafe package explicitly.
// It panics if v's Kind is not Chan, Func, Map, Ptr, Slice, or UnsafePointer.
//
// If v's Kind is Func, the returned pointer is an underlying
// code pointer, but not necessarily enough to identify a
// single function uniquely. The only guarantee is that the
// result is zero if and only if v is a nil func Value.
//
// If v's Kind is Slice, the returned pointer is to the first
// element of the slice.  If the slice is nil the returned value
// is 0.  If the slice is empty but non-nil the return value is non-zero.
func (v Value) Pointer() uintptr {
	// TODO: deprecate
	k := v.kind()
	switch k {
	case Chan, Map, Ptr, UnsafePointer:
		return uintptr(v.pointer())
	case Func:
		if v.flag&flagMethod != 0 {
			// As the doc comment says, the returned pointer is an
			// underlying code pointer but not necessarily enough to
			// identify a single function uniquely. All method expressions
			// created via reflect have the same underlying code pointer,
			// so their Pointers are equal. The function used here must
			// match the one used in makeMethodValue.
			f := methodValueCall
			return **(**uintptr)(unsafe.Pointer(&f))
		}
		p := v.pointer()
		// Non-nil func value points at data block.
		// First word of data block is actual code.
		if p != nil {
			p = *(*unsafe.Pointer)(p)
		}
		return uintptr(p)

	case Slice:
		return (*SliceHeader)(v.ptr).Data
	}
	panic(&ValueError{"reflect.Value.Pointer", v.kind()})
}

// Recv receives and returns a value from the channel v.
// It panics if v's Kind is not Chan.
// The receive blocks until a value is ready.
// The boolean value ok is true if the value x corresponds to a send
// on the channel, false if it is a zero value received because the channel is closed.
func (v Value) Recv() (x Value, ok bool) {
	v.mustBe(Chan)
	v.mustBeExported()
	return v.recv(false)
}

// internal recv, possibly non-blocking (nb).
// v is known to be a channel.
func (v Value) recv(nb bool) (val Value, ok bool) {
	tt := (*chanType)(unsafe.Pointer(v.typ))
	if ChanDir(tt.dir)&RecvDir == 0 {
		panic("reflect: recv on send-only channel")
	}
	t := tt.elem
	val = Value{t, nil, flag(t.Kind())}
	var p unsafe.Pointer
	if ifaceIndir(t) {
		p = unsafe_New(t)
		val.ptr = p
		val.flag |= flagIndir
	} else {
		p = unsafe.Pointer(&val.ptr)
	}
	selected, ok := chanrecv(v.typ, v.pointer(), nb, p)
	if !selected {
		val = Value{}
	}
	return
}

// Send sends x on the channel v.
// It panics if v's kind is not Chan or if x's type is not the same type as v's element type.
// As in Go, x's value must be assignable to the channel's element type.
func (v Value) Send(x Value) {
	v.mustBe(Chan)
	v.mustBeExported()
	v.send(x, false)
}

// internal send, possibly non-blocking.
// v is known to be a channel.
func (v Value) send(x Value, nb bool) (selected bool) {
	tt := (*chanType)(unsafe.Pointer(v.typ))
	if ChanDir(tt.dir)&SendDir == 0 {
		panic("reflect: send on recv-only channel")
	}
	x.mustBeExported()
	x = x.assignTo("reflect.Value.Send", tt.elem, nil)
	var p unsafe.Pointer
	if x.flag&flagIndir != 0 {
		p = x.ptr
	} else {
		p = unsafe.Pointer(&x.ptr)
	}
	return chansend(v.typ, v.pointer(), p, nb)
}

// Set assigns x to the value v.
// It panics if CanSet returns false.
// As in Go, x's value must be assignable to v's type.
func (v Value) Set(x Value) {
	v.mustBeAssignable()
	x.mustBeExported() // do not let unexported x leak
	var target unsafe.Pointer
	if v.kind() == Interface {
		target = v.ptr
	}
	x = x.assignTo("reflect.Set", v.typ, target)
	if x.flag&flagIndir != 0 {
		typedmemmove(v.typ, v.ptr, x.ptr)
	} else {
		*(*unsafe.Pointer)(v.ptr) = x.ptr
	}
}

// SetBool sets v's underlying value.
// It panics if v's Kind is not Bool or if CanSet() is false.
func (v Value) SetBool(x bool) {
	v.mustBeAssignable()
	v.mustBe(Bool)
	*(*bool)(v.ptr) = x
}

// SetBytes sets v's underlying value.
// It panics if v's underlying value is not a slice of bytes.
func (v Value) SetBytes(x []byte) {
	v.mustBeAssignable()
	v.mustBe(Slice)
	if v.typ.Elem().Kind() != Uint8 {
		panic("reflect.Value.SetBytes of non-byte slice")
	}
	*(*[]byte)(v.ptr) = x
}

// setRunes sets v's underlying value.
// It panics if v's underlying value is not a slice of runes (int32s).
func (v Value) setRunes(x []rune) {
	v.mustBeAssignable()
	v.mustBe(Slice)
	if v.typ.Elem().Kind() != Int32 {
		panic("reflect.Value.setRunes of non-rune slice")
	}
	*(*[]rune)(v.ptr) = x
}

// SetComplex sets v's underlying value to x.
// It panics if v's Kind is not Complex64 or Complex128, or if CanSet() is false.
func (v Value) SetComplex(x complex128) {
	v.mustBeAssignable()
	switch k := v.kind(); k {
	default:
		panic(&ValueError{"reflect.Value.SetComplex", v.kind()})
	case Complex64:
		*(*complex64)(v.ptr) = complex64(x)
	case Complex128:
		*(*complex128)(v.ptr) = x
	}
}

// SetFloat sets v's underlying value to x.
// It panics if v's Kind is not Float32 or Float64, or if CanSet() is false.
func (v Value) SetFloat(x float64) {
	v.mustBeAssignable()
	switch k := v.kind(); k {
	default:
		panic(&ValueError{"reflect.Value.SetFloat", v.kind()})
	case Float32:
		*(*float32)(v.ptr) = float32(x)
	case Float64:
		*(*float64)(v.ptr) = x
	}
}

// SetInt sets v's underlying value to x.
// It panics if v's Kind is not Int, Int8, Int16, Int32, or Int64, or if CanSet() is false.
func (v Value) SetInt(x int64) {
	v.mustBeAssignable()
	switch k := v.kind(); k {
	default:
		panic(&ValueError{"reflect.Value.SetInt", v.kind()})
	case Int:
		*(*int)(v.ptr) = int(x)
	case Int8:
		*(*int8)(v.ptr) = int8(x)
	case Int16:
		*(*int16)(v.ptr) = int16(x)
	case Int32:
		*(*int32)(v.ptr) = int32(x)
	case Int64:
		*(*int64)(v.ptr) = x
	}
}

// SetLen sets v's length to n.
// It panics if v's Kind is not Slice or if n is negative or
// greater than the capacity of the slice.
func (v Value) SetLen(n int) {
	v.mustBeAssignable()
	v.mustBe(Slice)
	s := (*sliceHeader)(v.ptr)
	if uint(n) > uint(s.Cap) {
		panic("reflect: slice length out of range in SetLen")
	}
	s.Len = n
}

// SetCap sets v's capacity to n.
// It panics if v's Kind is not Slice or if n is smaller than the length or
// greater than the capacity of the slice.
func (v Value) SetCap(n int) {
	v.mustBeAssignable()
	v.mustBe(Slice)
	s := (*sliceHeader)(v.ptr)
	if n < int(s.Len) || n > int(s.Cap) {
		panic("reflect: slice capacity out of range in SetCap")
	}
	s.Cap = n
}

// SetMapIndex sets the value associated with key in the map v to val.
// It panics if v's Kind is not Map.
// If val is the zero Value, SetMapIndex deletes the key from the map.
// Otherwise if v holds a nil map, SetMapIndex will panic.
// As in Go, key's value must be assignable to the map's key type,
// and val's value must be assignable to the map's value type.
func (v Value) SetMapIndex(key, val Value) {
	v.mustBe(Map)
	v.mustBeExported()
	key.mustBeExported()
	tt := (*mapType)(unsafe.Pointer(v.typ))
	key = key.assignTo("reflect.Value.SetMapIndex", tt.key, nil)
	var k unsafe.Pointer
	if key.flag&flagIndir != 0 {
		k = key.ptr
	} else {
		k = unsafe.Pointer(&key.ptr)
	}
	if val.typ == nil {
		mapdelete(v.typ, v.pointer(), k)
		return
	}
	val.mustBeExported()
	val = val.assignTo("reflect.Value.SetMapIndex", tt.elem, nil)
	var e unsafe.Pointer
	if val.flag&flagIndir != 0 {
		e = val.ptr
	} else {
		e = unsafe.Pointer(&val.ptr)
	}
	mapassign(v.typ, v.pointer(), k, e)
}

// SetUint sets v's underlying value to x.
// It panics if v's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64, or if CanSet() is false.
func (v Value) SetUint(x uint64) {
	v.mustBeAssignable()
	switch k := v.kind(); k {
	default:
		panic(&ValueError{"reflect.Value.SetUint", v.kind()})
	case Uint:
		*(*uint)(v.ptr) = uint(x)
	case Uint8:
		*(*uint8)(v.ptr) = uint8(x)
	case Uint16:
		*(*uint16)(v.ptr) = uint16(x)
	case Uint32:
		*(*uint32)(v.ptr) = uint32(x)
	case Uint64:
		*(*uint64)(v.ptr) = x
	case Uintptr:
		*(*uintptr)(v.ptr) = uintptr(x)
	}
}

// SetPointer sets the unsafe.Pointer value v to x.
// It panics if v's Kind is not UnsafePointer.
func (v Value) SetPointer(x unsafe.Pointer) {
	v.mustBeAssignable()
	v.mustBe(UnsafePointer)
	*(*unsafe.Pointer)(v.ptr) = x
}

// SetString sets v's underlying value to x.
// It panics if v's Kind is not String or if CanSet() is false.
func (v Value) SetString(x string) {
	v.mustBeAssignable()
	v.mustBe(String)
	*(*string)(v.ptr) = x
}

// Slice returns v[i:j].
// It panics if v's Kind is not Array, Slice or String, or if v is an unaddressable array,
// or if the indexes are out of bounds.
func (v Value) Slice(i, j int) Value {
	var (
		cap  int
		typ 
Download .txt
gitextract_dzd0zkcs/

├── .gitignore
├── .vscode/
│   ├── launch.json
│   ├── settings.json
│   └── tasks.json
├── .vscodeignore
├── LICENSE.md
├── README.md
├── TODO.md
├── azure-pipelines.yml
├── examples/
│   ├── cpp/
│   │   ├── marker-index.h
│   │   └── rule.cc
│   ├── go/
│   │   ├── letter_test.go
│   │   ├── no_newline_at_eof.go
│   │   ├── proc.go
│   │   ├── small.go
│   │   ├── type_switch.go
│   │   └── value.go
│   ├── javascript/
│   │   ├── destructuring.js
│   │   ├── expressions.js
│   │   ├── literals.js
│   │   ├── semicolon_insertion.js
│   │   └── statements.js
│   ├── ruby/
│   │   ├── classes.rb
│   │   ├── comments.rb
│   │   ├── control-flow.rb
│   │   ├── declarations.rb
│   │   ├── expressions.rb
│   │   ├── literals.rb
│   │   └── statements.rb
│   ├── rust/
│   │   ├── ast.rs
│   │   ├── keywords.txt
│   │   └── scratch.rs
│   └── typescript/
│       ├── keywords.txt
│       ├── parser.ts
│       └── small.ts
├── package.json
├── parsers/
│   ├── tree-sitter-cpp.wasm
│   ├── tree-sitter-go.wasm
│   ├── tree-sitter-javascript.wasm
│   ├── tree-sitter-ruby.wasm
│   ├── tree-sitter-rust.wasm
│   └── tree-sitter-typescript.wasm
├── scripts/
│   ├── build.sh
│   └── gen-parsers.sh
├── src/
│   ├── benchmark.ts
│   ├── colors.ts
│   ├── extension.ts
│   ├── print.ts
│   ├── scopes.ts
│   └── test.ts
├── textmate/
│   ├── cpp.tmLanguage.json
│   ├── go.tmLanguage.json
│   ├── ruby.tmLanguage.json
│   ├── rust.tmLanguage.json
│   └── typescript.tmLanguage.json
├── tsconfig.json
└── tslint.json
Download .txt
SYMBOL INDEX (1059 symbols across 25 files)

FILE: examples/cpp/marker-index.h
  type SpliceResult (line 15) | struct SpliceResult {
  type Boundary (line 22) | struct Boundary {
  type BoundaryQueryResult (line 28) | struct BoundaryQueryResult {
  type Node (line 60) | struct Node {

FILE: examples/cpp/rule.cc
  type tree_sitter (line 4) | namespace tree_sitter {
    type rules (line 5) | namespace rules {
      function destroy_value (line 19) | static void destroy_value(Rule *rule) {
      function Rule (line 34) | Rule &Rule::operator=(const Rule &other) {
      function Rule (line 72) | Rule &Rule::operator=(Rule &&other) noexcept {
      function Symbol (line 142) | const Symbol & Rule::get_unchecked<Symbol>() const { return symbol_; }
      function add_choice_element (line 144) | static inline void add_choice_element(std::vector<Rule> *elements, c...
      function Rule (line 161) | Rule Rule::choice(const vector<Rule> &rules) {
      function Rule (line 169) | Rule Rule::repeat(const Rule &rule) {
      function Rule (line 173) | Rule Rule::seq(const vector<Rule> &rules) {
  type std (line 198) | namespace std {

FILE: examples/go/letter_test.go
  type caseT (line 116) | type caseT struct
  function TestIsLetter (line 237) | func TestIsLetter(t *testing.T) {
  function TestIsUpper (line 255) | func TestIsUpper(t *testing.T) {
  function caseString (line 273) | func caseString(c int) string {
  function TestTo (line 285) | func TestTo(t *testing.T) {
  function TestToUpperCase (line 294) | func TestToUpperCase(t *testing.T) {
  function TestToLowerCase (line 306) | func TestToLowerCase(t *testing.T) {
  function TestToTitleCase (line 318) | func TestToTitleCase(t *testing.T) {
  function TestIsSpace (line 330) | func TestIsSpace(t *testing.T) {
  function TestLetterOptimizations (line 345) | func TestLetterOptimizations(t *testing.T) {
  function TestTurkishCase (line 374) | func TestTurkishCase(t *testing.T) {
  function TestSimpleFold (line 424) | func TestSimpleFold(t *testing.T) {
  function TestCalibrate (line 449) | func TestCalibrate(t *testing.T) {
  function fakeTable (line 490) | func fakeTable(n int) []Range16 {
  function linear (line 498) | func linear(ranges []Range16, r uint16) bool {
  function binary (line 511) | func binary(ranges []Range16, r uint16) bool {
  function TestLatinOffset (line 530) | func TestLatinOffset(t *testing.T) {

FILE: examples/go/no_newline_at_eof.go
  function main (line 9) | func main() {

FILE: examples/go/proc.go
  function runtime_init (line 84) | func runtime_init()
  function main_init (line 87) | func main_init()
  function main_main (line 96) | func main_main()
  function main (line 105) | func main() {
  function os_beforeExit (line 210) | func os_beforeExit() {
  function init (line 217) | func init() {
  function forcegchelper (line 221) | func forcegchelper() {
  function Gosched (line 242) | func Gosched() {
  function gopark (line 248) | func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, ...
  function goparkunlock (line 267) | func goparkunlock(lock *mutex, reason string, traceEv byte, traceskip in...
  function goready (line 271) | func goready(gp *g, traceskip int) {
  function acquireSudog (line 278) | func acquireSudog() *sudog {
  function releaseSudog (line 316) | func releaseSudog(s *sudog) {
  function funcPC (line 365) | func funcPC(f interface{}) uintptr {
  function badmcall (line 370) | func badmcall(fn func(*g)) {
  function badmcall2 (line 374) | func badmcall2(fn func(*g)) {
  function badreflectcall (line 378) | func badreflectcall() {
  function lockedOSThread (line 382) | func lockedOSThread() bool {
  function allgadd (line 392) | func allgadd(gp *g) {
  constant _GoidCacheBatch (line 406) | _GoidCacheBatch = 16
  function schedinit (line 417) | func schedinit() {
  function dumpgstatus (line 463) | func dumpgstatus(gp *g) {
  function checkmcount (line 469) | func checkmcount() {
  function mcommoninit (line 477) | func mcommoninit(mp *m) {
  function ready (line 510) | func ready(gp *g, traceskip int) {
  function gcprocs (line 537) | func gcprocs() int32 {
  function needaddgcproc (line 555) | func needaddgcproc() bool {
  function helpgc (line 569) | func helpgc(nproc int32) {
  constant freezeStopWait (line 592) | freezeStopWait = 0x7fffffff
  function freezetheworld (line 597) | func freezetheworld() {
  function isscanstatus (line 617) | func isscanstatus(status uint32) bool {
  function readgstatus (line 627) | func readgstatus(gp *g) uint32 {
  function casfrom_Gscanstatus (line 647) | func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
  function castogscanstatus (line 680) | func castogscanstatus(gp *g, oldval, newval uint32) bool {
  function casgstatus (line 703) | func casgstatus(gp *g, oldval, newval uint32) {
  function casgcopystack (line 747) | func casgcopystack(gp *g) uint32 {
  function scang (line 762) | func scang(gp *g) {
  function restartg (line 832) | func restartg(gp *g) {
  function stopTheWorld (line 874) | func stopTheWorld(reason string) {
  function startTheWorld (line 881) | func startTheWorld() {
  function stopTheWorldWithSema (line 915) | func stopTheWorldWithSema() {
  function mhelpgc (line 978) | func mhelpgc() {
  function startTheWorldWithSema (line 983) | func startTheWorldWithSema() {
  function mstart (line 1048) | func mstart() {
  function mstart1 (line 1068) | func mstart1() {
  function forEachP (line 1119) | func forEachP(fn func(*p)) {
  function runSafePointFn (line 1213) | func runSafePointFn() {
  type cgothreadstart (line 1235) | type cgothreadstart struct
  function allocm (line 1247) | func allocm(_p_ *p, fn func()) *m {
  function needm (line 1311) | func needm(x byte) {
  function newextram (line 1365) | func newextram() {
  function dropm (line 1425) | func dropm() {
  function getm (line 1451) | func getm() uintptr {
  function lockextra (line 1463) | func lockextra(nilokay bool) *m {
  function unlockextra (line 1487) | func unlockextra(mp *m) {
  function newm (line 1495) | func newm(fn func(), _p_ *p) {
  function stopm (line 1518) | func stopm() {
  function mspinning (line 1548) | func mspinning() {
  function startm (line 1559) | func startm(_p_ *p, spinning bool) {
  function handoffp (line 1604) | func handoffp(_p_ *p) {
  function wakep (line 1659) | func wakep() {
  function stoplockedm (line 1669) | func stoplockedm() {
  function startlockedm (line 1697) | func startlockedm(gp *g) {
  function gcstopm (line 1717) | func gcstopm() {
  function execute (line 1746) | func execute(gp *g, inheritTime bool) {
  function findrunnable (line 1795) | func findrunnable() (gp *g, inheritTime bool) {
  function resetspinning (line 1977) | func resetspinning() {
  function injectglist (line 1997) | func injectglist(glist *g) {
  function schedule (line 2022) | func schedule() {
  function dropg (line 2099) | func dropg() {
  function parkunlock_c (line 2108) | func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
  function park_m (line 2114) | func park_m(gp *g) {
  function goschedImpl (line 2140) | func goschedImpl(gp *g) {
  function gosched_m (line 2156) | func gosched_m(gp *g) {
  function gopreempt_m (line 2163) | func gopreempt_m(gp *g) {
  function goexit1 (line 2171) | func goexit1() {
  function goexit0 (line 2182) | func goexit0(gp *g) {
  function save (line 2212) | func save(pc, sp uintptr) {
  function reentersyscall (line 2260) | func reentersyscall(pc, sp uintptr) {
  function entersyscall (line 2324) | func entersyscall(dummy int32) {
  function entersyscall_sysmon (line 2328) | func entersyscall_sysmon() {
  function entersyscall_gcwait (line 2337) | func entersyscall_gcwait() {
  function entersyscallblock (line 2357) | func entersyscallblock(dummy int32) {
  function entersyscallblock_handoff (line 2398) | func entersyscallblock_handoff() {
  function exitsyscall (line 2411) | func exitsyscall(dummy int32) {
  function exitsyscallfast (line 2486) | func exitsyscallfast() bool {
  function exitsyscallfast_pidle (line 2544) | func exitsyscallfast_pidle() bool {
  function exitsyscall0 (line 2561) | func exitsyscall0(gp *g) {
  function beforefork (line 2588) | func beforefork() {
  function syscall_runtime_BeforeFork (line 2608) | func syscall_runtime_BeforeFork() {
  function afterfork (line 2612) | func afterfork() {
  function syscall_runtime_AfterFork (line 2628) | func syscall_runtime_AfterFork() {
  function malg (line 2633) | func malg(stacksize int32) *g {
  function newproc (line 2654) | func newproc(siz int32, fn *funcval) {
  function newproc1 (line 2666) | func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc...
  function gfput (line 2755) | func gfput(_p_ *p, gp *g) {
  function gfget (line 2795) | func gfget(_p_ *p) *g {
  function gfpurge (line 2834) | func gfpurge(_p_ *p) {
  function Breakpoint (line 2848) | func Breakpoint() {
  function dolockOSThread (line 2856) | func dolockOSThread() {
  function LockOSThread (line 2867) | func LockOSThread() {
  function lockOSThread (line 2873) | func lockOSThread() {
  function dounlockOSThread (line 2882) | func dounlockOSThread() {
  function UnlockOSThread (line 2895) | func UnlockOSThread() {
  function unlockOSThread (line 2901) | func unlockOSThread() {
  function badunlockosthread (line 2910) | func badunlockosthread() {
  function gcount (line 2914) | func gcount() int32 {
  function mcount (line 2932) | func mcount() int32 {
  function _System (line 2941) | func _System()       { _System() }
  function _ExternalCode (line 2942) | func _ExternalCode() { _ExternalCode() }
  function _GC (line 2943) | func _GC()           { _GC() }
  function sigprof (line 2946) | func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
  function setsSP (line 3103) | func setsSP(pc uintptr) bool {
  function setcpuprofilerate_m (line 3118) | func setcpuprofilerate_m(hz int32) {
  function procresize (line 3155) | func procresize(nprocs int32) *p {
  function acquirep (line 3289) | func acquirep(_p_ *p) {
  function acquirep1 (line 3303) | func acquirep1(_p_ *p) {
  function releasep (line 3323) | func releasep() *p {
  function incidlelocked (line 3344) | func incidlelocked(v int32) {
  function checkdead (line 3355) | func checkdead() {
  function sysmon (line 3439) | func sysmon() {
  constant forcePreemptNS (line 3544) | forcePreemptNS = 10 * 1000 * 1000
  function retake (line 3546) | func retake(now int64) uint32 {
  function preemptall (line 3606) | func preemptall() bool {
  function preemptone (line 3630) | func preemptone(_p_ *p) bool {
  function schedtrace (line 3652) | func schedtrace(detailed bool) {
  function mput (line 3741) | func mput(mp *m) {
  function mget (line 3752) | func mget() *m {
  function globrunqput (line 3765) | func globrunqput(gp *g) {
  function globrunqputhead (line 3780) | func globrunqputhead(gp *g) {
  function globrunqputbatch (line 3791) | func globrunqputbatch(ghead *g, gtail *g, n int32) {
  function globrunqget (line 3804) | func globrunqget(_p_ *p, max int32) *g {
  function pidleput (line 3840) | func pidleput(_p_ *p) {
  function pidleget (line 3853) | func pidleget() *p {
  function runqempty (line 3864) | func runqempty(_p_ *p) bool {
  constant randomizeScheduler (line 3877) | randomizeScheduler = raceenabled
  function runqput (line 3884) | func runqput(_p_ *p, gp *g, next bool) {
  function runqputslow (line 3919) | func runqputslow(_p_ *p, gp *g, h, t uint32) bool {
  function runqget (line 3959) | func runqget(_p_ *p) (gp *g, inheritTime bool) {
  function runqgrab (line 3988) | func runqgrab(_p_ *p, batch *[256]guintptr, batchHead uint32, stealRunNe...
  function runqsteal (line 4031) | func runqsteal(_p_, p2 *p, stealRunNextG bool) *g {
  function testSchedLocalQueue (line 4050) | func testSchedLocalQueue() {
  function testSchedLocalQueueSteal (line 4072) | func testSchedLocalQueueSteal() {
  function setMaxThreads (line 4116) | func setMaxThreads(in int) (out int) {
  function haveexperiment (line 4125) | func haveexperiment(name string) bool {
  function procPin (line 4143) | func procPin() int {
  function procUnpin (line 4152) | func procUnpin() {
  function sync_runtime_procPin (line 4159) | func sync_runtime_procPin() int {
  function sync_runtime_procUnpin (line 4165) | func sync_runtime_procUnpin() {
  function sync_atomic_runtime_procPin (line 4171) | func sync_atomic_runtime_procPin() int {
  function sync_atomic_runtime_procUnpin (line 4177) | func sync_atomic_runtime_procUnpin() {
  function sync_runtime_canSpin (line 4184) | func sync_runtime_canSpin(i int) bool {
  function sync_runtime_doSpin (line 4201) | func sync_runtime_doSpin() {

FILE: examples/go/small.go
  type Person (line 3) | type Person struct
    method GetName (line 12) | func (self *Person) GetName() string {
    method GetMom (line 16) | func (self *Person) GetMom() *Person {
  function NewPerson (line 8) | func NewPerson(name string, mom *Person) Person {
  function main (line 25) | func main() {

FILE: examples/go/type_switch.go
  function f (line 3) | func f(a interface{}) {

FILE: examples/go/value.go
  constant ptrSize (line 13) | ptrSize = 4 << (^uintptr(0) >> 63)
  constant cannotSet (line 14) | cannotSet = "cannot set value obtained from unexported struct field"
  type Value (line 37) | type Value struct
    method pointer (line 86) | func (v Value) pointer() unsafe.Pointer {
    method Addr (line 238) | func (v Value) Addr() Value {
    method Bool (line 247) | func (v Value) Bool() bool {
    method Bytes (line 254) | func (v Value) Bytes() []byte {
    method runes (line 265) | func (v Value) runes() []rune {
    method CanAddr (line 279) | func (v Value) CanAddr() bool {
    method CanSet (line 288) | func (v Value) CanSet() bool {
    method Call (line 300) | func (v Value) Call(in []Value) []Value {
    method CallSlice (line 313) | func (v Value) CallSlice(in []Value) []Value {
    method call (line 321) | func (v Value) call(op string, in []Value) []Value {
    method Cap (line 664) | func (v Value) Cap() int {
    method Close (line 680) | func (v Value) Close() {
    method Complex (line 688) | func (v Value) Complex() complex128 {
    method Elem (line 703) | func (v Value) Elem() Value {
    method Field (line 740) | func (v Value) Field(i int) Value {
    method FieldByIndex (line 772) | func (v Value) FieldByIndex(index []int) Value {
    method FieldByName (line 794) | func (v Value) FieldByName(name string) Value {
    method FieldByNameFunc (line 806) | func (v Value) FieldByNameFunc(match func(string) bool) Value {
    method Float (line 815) | func (v Value) Float() float64 {
    method Index (line 830) | func (v Value) Index(i int) Value {
    method Int (line 876) | func (v Value) Int() int64 {
    method CanInterface (line 895) | func (v Value) CanInterface() bool {
    method Interface (line 907) | func (v Value) Interface() (i interface{}) {
    method InterfaceData (line 943) | func (v Value) InterfaceData() [2]uintptr {
    method IsNil (line 961) | func (v Value) IsNil() bool {
    method IsValid (line 986) | func (v Value) IsValid() bool {
    method Kind (line 992) | func (v Value) Kind() Kind {
    method Len (line 998) | func (v Value) Len() int {
    method MapIndex (line 1022) | func (v Value) MapIndex(key Value) Value {
    method MapKeys (line 1063) | func (v Value) MapKeys() []Value {
    method Method (line 1104) | func (v Value) Method(i int) Value {
    method NumMethod (line 1121) | func (v Value) NumMethod() int {
    method MethodByName (line 1136) | func (v Value) MethodByName(name string) Value {
    method NumField (line 1152) | func (v Value) NumField() int {
    method OverflowComplex (line 1160) | func (v Value) OverflowComplex(x complex128) bool {
    method OverflowFloat (line 1173) | func (v Value) OverflowFloat(x float64) bool {
    method OverflowInt (line 1193) | func (v Value) OverflowInt(x int64) bool {
    method OverflowUint (line 1206) | func (v Value) OverflowUint(x uint64) bool {
    method Pointer (line 1231) | func (v Value) Pointer() uintptr {
    method Recv (line 1267) | func (v Value) Recv() (x Value, ok bool) {
    method recv (line 1275) | func (v Value) recv(nb bool) (val Value, ok bool) {
    method Send (line 1300) | func (v Value) Send(x Value) {
    method send (line 1308) | func (v Value) send(x Value, nb bool) (selected bool) {
    method Set (line 1327) | func (v Value) Set(x Value) {
    method SetBool (line 1344) | func (v Value) SetBool(x bool) {
    method SetBytes (line 1352) | func (v Value) SetBytes(x []byte) {
    method setRunes (line 1363) | func (v Value) setRunes(x []rune) {
    method SetComplex (line 1374) | func (v Value) SetComplex(x complex128) {
    method SetFloat (line 1388) | func (v Value) SetFloat(x float64) {
    method SetInt (line 1402) | func (v Value) SetInt(x int64) {
    method SetLen (line 1423) | func (v Value) SetLen(n int) {
    method SetCap (line 1436) | func (v Value) SetCap(n int) {
    method SetMapIndex (line 1452) | func (v Value) SetMapIndex(key, val Value) {
    method SetUint (line 1481) | func (v Value) SetUint(x uint64) {
    method SetPointer (line 1503) | func (v Value) SetPointer(x unsafe.Pointer) {
    method SetString (line 1511) | func (v Value) SetString(x string) {
    method Slice (line 1520) | func (v Value) Slice(i, j int) Value {
    method Slice3 (line 1579) | func (v Value) Slice3(i, j, k int) Value {
    method String (line 1634) | func (v Value) String() string {
    method TryRecv (line 1651) | func (v Value) TryRecv() (x Value, ok bool) {
    method TrySend (line 1661) | func (v Value) TrySend(x Value) bool {
    method Type (line 1668) | func (v Value) Type() Type {
    method Uint (line 1701) | func (v Value) Uint() uint64 {
    method UnsafeAddr (line 1724) | func (v Value) UnsafeAddr() uintptr {
    method assignTo (line 2136) | func (v Value) assignTo(context string, dst *rtype, target unsafe.Poin...
    method Convert (line 2170) | func (v Value) Convert(t Type) Value {
  type flag (line 66) | type flag
    method kind (line 80) | func (f flag) kind() Kind {
    method mustBe (line 200) | func (f flag) mustBe(expected Kind) {
    method mustBeExported (line 208) | func (f flag) mustBeExported() {
    method mustBeAssignable (line 220) | func (f flag) mustBeAssignable() {
  constant flagKindWidth (line 69) | flagKindWidth        = 5
  constant flagKindMask (line 70) | flagKindMask    flag = 1<<flagKindWidth - 1
  constant flagStickyRO (line 71) | flagStickyRO    flag = 1 << 5
  constant flagEmbedRO (line 72) | flagEmbedRO     flag = 1 << 6
  constant flagIndir (line 73) | flagIndir       flag = 1 << 7
  constant flagAddr (line 74) | flagAddr        flag = 1 << 8
  constant flagMethod (line 75) | flagMethod      flag = 1 << 9
  constant flagMethodShift (line 76) | flagMethodShift      = 10
  constant flagRO (line 77) | flagRO          flag = flagStickyRO | flagEmbedRO
  function packEface (line 97) | func packEface(v Value) interface{} {
  function unpackEface (line 134) | func unpackEface(i interface{}) Value {
  type ValueError (line 151) | type ValueError struct
    method Error (line 156) | func (e *ValueError) Error() string {
  function methodName (line 165) | func methodName() string {
  type emptyInterface (line 175) | type emptyInterface struct
  type nonEmptyInterface (line 181) | type nonEmptyInterface struct
  function callReflect (line 479) | func callReflect(ctxt *makeFuncImpl, frame unsafe.Pointer) {
  function methodReceiver (line 550) | func methodReceiver(op string, v Value, methodIndex int) (rcvrtype, t *r...
  function storeRcvr (line 588) | func storeRcvr(v Value, p unsafe.Pointer) {
  function align (line 603) | func align(x, n uintptr) uintptr {
  function callMethod (line 618) | func callMethod(ctxt *methodValue, frame unsafe.Pointer) {
  function funcName (line 653) | func funcName(f func([]Value) []Value) string {
  function valueInterface (line 911) | func valueInterface(v Value, safe bool) interface{} {
  function overflowFloat32 (line 1184) | func overflowFloat32(x float64) bool {
  type StringHeader (line 1741) | type StringHeader struct
  type stringHeader (line 1747) | type stringHeader struct
  type SliceHeader (line 1758) | type SliceHeader struct
  type sliceHeader (line 1765) | type sliceHeader struct
  function typesMustMatch (line 1771) | func typesMustMatch(what string, t1, t2 Type) {
  function arrayAt (line 1779) | func arrayAt(p unsafe.Pointer, i int, eltSize uintptr) unsafe.Pointer {
  function grow (line 1785) | func grow(s Value, extra int) (Value, int, int) {
  function Append (line 1813) | func Append(s Value, x ...Value) Value {
  function AppendSlice (line 1824) | func AppendSlice(s, t Value) Value {
  function Copy (line 1838) | func Copy(dst, src Value) int {
  type runtimeSelect (line 1879) | type runtimeSelect struct
  function rselect (line 1891) | func rselect([]runtimeSelect) (chosen int, recvOK bool)
  type SelectDir (line 1894) | type SelectDir
  constant _ (line 1899) | _             SelectDir = iota
  constant SelectSend (line 1900) | SelectSend
  constant SelectRecv (line 1901) | SelectRecv
  constant SelectDefault (line 1902) | SelectDefault
  type SelectCase (line 1922) | type SelectCase struct
  function Select (line 1935) | func Select(cases []SelectCase) (chosen int, recv Value, recvOK bool) {
  function unsafe_New (line 2025) | func unsafe_New(*rtype) unsafe.Pointer
  function unsafe_NewArray (line 2026) | func unsafe_NewArray(*rtype, int) unsafe.Pointer
  function MakeSlice (line 2030) | func MakeSlice(typ Type, len, cap int) Value {
  function MakeChan (line 2049) | func MakeChan(typ Type, buffer int) Value {
  function MakeMap (line 2064) | func MakeMap(typ Type) Value {
  function Indirect (line 2075) | func Indirect(v Value) Value {
  function ValueOf (line 2084) | func ValueOf(i interface{}) Value {
  function Zero (line 2103) | func Zero(typ Type) Value {
  function New (line 2117) | func New(typ Type) Value {
  function NewAt (line 2128) | func NewAt(typ Type, p unsafe.Pointer) Value {
  function convertOp (line 2183) | func convertOp(dst, src *rtype) func(Value, Type) Value {
  function makeInt (line 2266) | func makeInt(f flag, bits uint64, t Type) Value {
  function makeFloat (line 2284) | func makeFloat(f flag, v float64, t Type) Value {
  function makeComplex (line 2298) | func makeComplex(f flag, v complex128, t Type) Value {
  function makeString (line 2310) | func makeString(f flag, v string, t Type) Value {
  function makeBytes (line 2317) | func makeBytes(f flag, v []byte, t Type) Value {
  function makeRunes (line 2324) | func makeRunes(f flag, v []rune, t Type) Value {
  function cvtInt (line 2337) | func cvtInt(v Value, t Type) Value {
  function cvtUint (line 2342) | func cvtUint(v Value, t Type) Value {
  function cvtFloatInt (line 2347) | func cvtFloatInt(v Value, t Type) Value {
  function cvtFloatUint (line 2352) | func cvtFloatUint(v Value, t Type) Value {
  function cvtIntFloat (line 2357) | func cvtIntFloat(v Value, t Type) Value {
  function cvtUintFloat (line 2362) | func cvtUintFloat(v Value, t Type) Value {
  function cvtFloat (line 2367) | func cvtFloat(v Value, t Type) Value {
  function cvtComplex (line 2372) | func cvtComplex(v Value, t Type) Value {
  function cvtIntString (line 2377) | func cvtIntString(v Value, t Type) Value {
  function cvtUintString (line 2382) | func cvtUintString(v Value, t Type) Value {
  function cvtBytesString (line 2387) | func cvtBytesString(v Value, t Type) Value {
  function cvtStringBytes (line 2392) | func cvtStringBytes(v Value, t Type) Value {
  function cvtRunesString (line 2397) | func cvtRunesString(v Value, t Type) Value {
  function cvtStringRunes (line 2402) | func cvtStringRunes(v Value, t Type) Value {
  function cvtDirect (line 2407) | func cvtDirect(v Value, typ Type) Value {
  function cvtT2I (line 2422) | func cvtT2I(v Value, typ Type) Value {
  function cvtI2I (line 2434) | func cvtI2I(v Value, typ Type) Value {
  function chancap (line 2444) | func chancap(ch unsafe.Pointer) int
  function chanclose (line 2445) | func chanclose(ch unsafe.Pointer)
  function chanlen (line 2446) | func chanlen(ch unsafe.Pointer) int
  function chanrecv (line 2457) | func chanrecv(t *rtype, ch unsafe.Pointer, nb bool, val unsafe.Pointer) ...
  function chansend (line 2460) | func chansend(t *rtype, ch unsafe.Pointer, val unsafe.Pointer, nb bool) ...
  function makechan (line 2462) | func makechan(typ *rtype, size uint64) (ch unsafe.Pointer)
  function makemap (line 2463) | func makemap(t *rtype) (m unsafe.Pointer)
  function mapaccess (line 2466) | func mapaccess(t *rtype, m unsafe.Pointer, key unsafe.Pointer) (val unsa...
  function mapassign (line 2469) | func mapassign(t *rtype, m unsafe.Pointer, key, val unsafe.Pointer)
  function mapdelete (line 2472) | func mapdelete(t *rtype, m unsafe.Pointer, key unsafe.Pointer)
  function mapiterinit (line 2477) | func mapiterinit(t *rtype, m unsafe.Pointer) unsafe.Pointer
  function mapiterkey (line 2480) | func mapiterkey(it unsafe.Pointer) (key unsafe.Pointer)
  function mapiternext (line 2483) | func mapiternext(it unsafe.Pointer)
  function maplen (line 2486) | func maplen(m unsafe.Pointer) int
  function call (line 2493) | func call(argtype *rtype, fn, arg unsafe.Pointer, n uint32, retoffset ui...
  function ifaceE2I (line 2495) | func ifaceE2I(t *rtype, src interface{}, dst unsafe.Pointer)
  function typedmemmove (line 2499) | func typedmemmove(t *rtype, dst, src unsafe.Pointer)
  function typedmemmovepartial (line 2504) | func typedmemmovepartial(t *rtype, dst, src unsafe.Pointer, off, size ui...
  function typedslicecopy (line 2509) | func typedslicecopy(elemType *rtype, dst, src sliceHeader) int
  function memclr (line 2512) | func memclr(ptr unsafe.Pointer, n uintptr)
  function escapes (line 2517) | func escapes(x interface{}) {

FILE: examples/javascript/destructuring.js
  function a (line 8) | function a ({b, c}, {d}) {}
  function a (line 20) | function a({b = true}, [c, d = false]) {}
  function b (line 21) | function b({c} = {}) {}

FILE: examples/javascript/expressions.js
  method [methodName] (line 89) | [methodName]() {
  method add (line 101) | add(a, b) {
  method bar (line 105) | get bar() { return c; }
  method bar (line 107) | set bar(a) { c = a; }
  method barGenerator (line 109) | *barGenerator() { yield c; }
  method get (line 111) | get() { return 1; }
  method finally (line 116) | finally() {}
  method catch (line 117) | catch() {}
  class Foo (line 125) | class Foo {
    method one (line 126) | static one(a) { return a; }
    method two (line 127) | two(b) { return b; }
    method three (line 128) | three(c) { return c; }
    method constructor (line 132) | constructor() {
    method bar (line 136) | bar() {
    method catch (line 143) | catch() {}
    method finally (line 144) | finally() {}
    method foo (line 155) | @foo.bar(baz) static foo() {
    method bar (line 248) | async bar() {}
  class Foo (line 131) | class Foo extends require('another-class') {
    method one (line 126) | static one(a) { return a; }
    method two (line 127) | two(b) { return b; }
    method three (line 128) | three(c) { return c; }
    method constructor (line 132) | constructor() {
    method bar (line 136) | bar() {
    method catch (line 143) | catch() {}
    method finally (line 144) | finally() {}
    method foo (line 155) | @foo.bar(baz) static foo() {
    method bar (line 248) | async bar() {}
  class Foo (line 142) | class Foo {
    method one (line 126) | static one(a) { return a; }
    method two (line 127) | two(b) { return b; }
    method three (line 128) | three(c) { return c; }
    method constructor (line 132) | constructor() {
    method bar (line 136) | bar() {
    method catch (line 143) | catch() {}
    method finally (line 144) | finally() {}
    method foo (line 155) | @foo.bar(baz) static foo() {
    method bar (line 248) | async bar() {}
  class Foo (line 148) | class Foo {
    method one (line 126) | static one(a) { return a; }
    method two (line 127) | two(b) { return b; }
    method three (line 128) | three(c) { return c; }
    method constructor (line 132) | constructor() {
    method bar (line 136) | bar() {
    method catch (line 143) | catch() {}
    method finally (line 144) | finally() {}
    method foo (line 155) | @foo.bar(baz) static foo() {
    method bar (line 248) | async bar() {}
  class Foo (line 153) | @eval
    method one (line 126) | static one(a) { return a; }
    method two (line 127) | two(b) { return b; }
    method three (line 128) | three(c) { return c; }
    method constructor (line 132) | constructor() {
    method bar (line 136) | bar() {
    method catch (line 143) | catch() {}
    method finally (line 144) | finally() {}
    method foo (line 155) | @foo.bar(baz) static foo() {
    method bar (line 248) | async bar() {}
  function a (line 200) | function a({b}, c = d, e = f) {
  function foo (line 240) | async function foo() {}
  method bar (line 243) | async bar() {
  class Foo (line 247) | class Foo {
    method one (line 126) | static one(a) { return a; }
    method two (line 127) | two(b) { return b; }
    method three (line 128) | three(c) { return c; }
    method constructor (line 132) | constructor() {
    method bar (line 136) | bar() {
    method catch (line 143) | catch() {}
    method finally (line 144) | finally() {}
    method foo (line 155) | @foo.bar(baz) static foo() {
    method bar (line 248) | async bar() {}

FILE: examples/javascript/semicolon_insertion.js
  function x (line 28) | function x() {}
  function a (line 79) | function a() {b}
  function c (line 80) | function c() {return d}
  function a (line 103) | function a () { function b () {} function *c () {} class D {} return }

FILE: examples/javascript/statements.js
  function name1 (line 23) | function name1() { }
  class Foo (line 34) | class Foo {

FILE: examples/ruby/classes.rb
  class Fred (line 2) | class Fred
    method initialize (line 6) | def initialize(v)
    method set (line 11) | def set(v)
    method get (line 15) | def get
    method inc (line 31) | def inc
  class Fred (line 30) | class Fred
    method initialize (line 6) | def initialize(v)
    method set (line 11) | def set(v)
    method get (line 15) | def get
    method inc (line 31) | def inc
  function dec (line 41) | def b.dec
  function x_to_string (line 86) | def x_to_string
  class Human (line 95) | class Human

FILE: examples/ruby/declarations.rb
  function foo (line 1) | def foo
  function foo? (line 4) | def foo?
  function foo! (line 7) | def foo!
  function foo (line 12) | def foo
  function foo= (line 18) | def foo=
  function ` (line 23) | def `(a)
  function -@ (line 27) | def -@(a)
  function % (line 30) | def %(a)
  function .. (line 33) | def ..(a)
  function !~ (line 36) | def !~(a)
  function / (line 43) | def /(name)
  function / (line 46) | def / name
  function foo (line 52) | def foo
  function foo (line 56) | def foo
  function foo (line 60) | def foo
  function foo (line 66) | def foo(bar)
  function foo (line 69) | def foo(bar); end
  function foo (line 70) | def foo(bar) end
  function foo (line 74) | def foo bar
  function foo (line 79) | def foo(bar, quux)
  function foo (line 84) | def foo bar, quux
  function foo (line 89) | def foo(bar: nil, baz:)
  function foo (line 94) | def foo(bar = nil)
  function foo (line 97) | def foo(bar=nil)
  function foo (line 102) | def foo(*options)
  function foo (line 105) | def foo(x, *options)
  function foo (line 108) | def foo(x, *options, y)
  function foo (line 111) | def foo(**options)
  function foo (line 114) | def foo(name:, **)
  function foo (line 117) | def foo(&block)
  function foo (line 122) | def self.foo
  function foo (line 127) | def self.foo
  function foo (line 134) | def self.foo(bar)
  function foo (line 139) | def self.foo bar
  function foo (line 144) | def self.foo(bar, baz)
  function foo (line 150) | def self.foo bar, baz
  class Foo (line 155) | class Foo
    method bar (line 188) | def bar
    function bar (line 223) | def bar
  class Foo (line 158) | class Foo; end
    method bar (line 188) | def bar
    function bar (line 223) | def bar
  class Foo::Bar (line 160) | class Foo::Bar
  class ::Foo::Bar (line 163) | class ::Foo::Bar
  class Cß (line 166) | class Cß
  class Foo (line 171) | class Foo < Bar
    method bar (line 188) | def bar
    function bar (line 223) | def bar
  class Foo (line 176) | class Foo < Bar::Quux
    method bar (line 188) | def bar
    function bar (line 223) | def bar
  class Foo (line 179) | class Foo < ::Bar
    method bar (line 188) | def bar
    function bar (line 223) | def bar
  class Foo (line 182) | class Foo < Bar::Baz.new(foo)
    method bar (line 188) | def bar
    function bar (line 223) | def bar
  class Foo (line 187) | class Foo
    method bar (line 188) | def bar
    function bar (line 223) | def bar
  class foo()::Bar (line 194) | class foo()::Bar
  type Foo (line 214) | module Foo
    method bar (line 188) | def bar
    function bar (line 223) | def bar
  type Foo::Bar (line 217) | module Foo::Bar
  type Foo (line 222) | module Foo
    method bar (line 188) | def bar
    function bar (line 223) | def bar
  type Foo (line 229) | module Foo end
    method bar (line 188) | def bar
    function bar (line 223) | def bar

FILE: examples/ruby/expressions.rb
  function foo (line 402) | def foo(d, *f, (x, y))
  function foo (line 405) | def foo d, *f, (x, y)

FILE: examples/rust/ast.rs
  type Lifetime (line 37) | pub struct Lifetime {
    method fmt (line 44) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
  type LifetimeDef (line 51) | pub struct LifetimeDef {
  type Path (line 64) | pub struct Path {
    method eq (line 72) | fn eq(&self, string: &&'a str) -> bool {
    method fmt (line 78) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
    method fmt (line 84) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
    method from_ident (line 92) | pub fn from_ident(s: Span, identifier: Ident) -> Path {
    method default_to_global (line 101) | pub fn default_to_global(mut self) -> Path {
    method is_global (line 112) | pub fn is_global(&self) -> bool {
  type PathSegment (line 121) | pub struct PathSegment {
    method from_ident (line 137) | pub fn from_ident(ident: Ident, span: Span) -> Self {
    method crate_root (line 140) | pub fn crate_root(span: Span) -> Self {
  type PathParameters (line 153) | pub enum PathParameters {
    method span (line 161) | pub fn span(&self) -> Span {
  type AngleBracketedParameterData (line 171) | pub struct AngleBracketedParameterData {
    method into (line 185) | fn into(self) -> Option<P<PathParameters>> {
  type ParenthesizedParameterData (line 198) | pub struct ParenthesizedParameterData {
    method into (line 191) | fn into(self) -> Option<P<PathParameters>> {
  type NodeId (line 210) | pub struct NodeId(u32);
    method new (line 213) | pub fn new(x: usize) -> NodeId {
    method from_u32 (line 218) | pub fn from_u32(x: u32) -> NodeId {
    method as_usize (line 222) | pub fn as_usize(&self) -> usize {
    method as_u32 (line 226) | pub fn as_u32(&self) -> u32 {
    method placeholder_from_mark (line 230) | pub fn placeholder_from_mark(mark: Mark) -> Self {
    method placeholder_to_mark (line 234) | pub fn placeholder_to_mark(self) -> Mark {
    method fmt (line 240) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
    method default_encode (line 246) | fn default_encode<S: Encoder>(&self, s: &mut S) -> Result<(), S::Error> {
    method default_decode (line 252) | fn default_decode<D: Decoder>(d: &mut D) -> Result<NodeId, D::Error> {
    method new (line 258) | fn new(idx: usize) -> Self {
    method index (line 262) | fn index(self) -> usize {
  constant CRATE_NODE_ID (line 268) | pub const CRATE_NODE_ID: NodeId = NodeId(0);
  constant DUMMY_NODE_ID (line 273) | pub const DUMMY_NODE_ID: NodeId = NodeId(!0);
  type TyParamBound (line 280) | pub enum TyParamBound {
  type TraitBoundModifier (line 288) | pub enum TraitBoundModifier {
  type TyParamBounds (line 293) | pub type TyParamBounds = Vec<TyParamBound>;
  type TyParam (line 296) | pub struct TyParam {
  type GenericParam (line 306) | pub enum GenericParam {
    method is_lifetime_param (line 312) | pub fn is_lifetime_param(&self) -> bool {
    method is_type_param (line 319) | pub fn is_type_param(&self) -> bool {
  type Generics (line 330) | pub struct Generics {
    method is_lt_parameterized (line 337) | pub fn is_lt_parameterized(&self) -> bool {
    method is_type_parameterized (line 341) | pub fn is_type_parameterized(&self) -> bool {
    method is_parameterized (line 345) | pub fn is_parameterized(&self) -> bool {
    method span_for_name (line 349) | pub fn span_for_name(&self, name: &str) -> Option<Span> {
  method default (line 363) | fn default() ->  Generics {
  type WhereClause (line 378) | pub struct WhereClause {
  type WherePredicate (line 386) | pub enum WherePredicate {
  type WhereBoundPredicate (line 399) | pub struct WhereBoundPredicate {
  type WhereRegionPredicate (line 413) | pub struct WhereRegionPredicate {
  type WhereEqPredicate (line 423) | pub struct WhereEqPredicate {
  type CrateConfig (line 432) | pub type CrateConfig = HashSet<(Name, Option<Symbol>)>;
  type Crate (line 435) | pub struct Crate {
  type NestedMetaItem (line 442) | pub type NestedMetaItem = Spanned<NestedMetaItemKind>;
  type NestedMetaItemKind (line 448) | pub enum NestedMetaItemKind {
  type MetaItem (line 461) | pub struct MetaItem {
  type MetaItemKind (line 471) | pub enum MetaItemKind {
  type Block (line 490) | pub struct Block {
  type Pat (line 501) | pub struct Pat {
    method fmt (line 508) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
    method to_ty (line 514) | pub(super) fn to_ty(&self) -> Option<P<Ty>> {
    method walk (line 538) | pub fn walk<F>(&self, it: &mut F) -> bool
  type FieldPat (line 579) | pub struct FieldPat {
  type BindingMode (line 589) | pub enum BindingMode {
  type RangeEnd (line 595) | pub enum RangeEnd {
  type RangeSyntax (line 601) | pub enum RangeSyntax {
  type PatKind (line 607) | pub enum PatKind {
  type Mutability (line 652) | pub enum Mutability {
  type BinOpKind (line 658) | pub enum BinOpKind {
    method to_string (line 698) | pub fn to_string(&self) -> &'static str {
    method lazy (line 721) | pub fn lazy(&self) -> bool {
    method is_shift (line 728) | pub fn is_shift(&self) -> bool {
    method is_comparison (line 735) | pub fn is_comparison(&self) -> bool {
    method is_by_value (line 747) | pub fn is_by_value(&self) -> bool {
  type BinOp (line 752) | pub type BinOp = Spanned<BinOpKind>;
  type UnOp (line 755) | pub enum UnOp {
    method is_by_value (line 766) | pub fn is_by_value(u: UnOp) -> bool {
    method to_string (line 773) | pub fn to_string(op: UnOp) -> &'static str {
  type Stmt (line 784) | pub struct Stmt {
    method add_trailing_semicolon (line 791) | pub fn add_trailing_semicolon(mut self) -> Self {
    method is_item (line 802) | pub fn is_item(&self) -> bool {
    method fmt (line 811) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
  type StmtKind (line 818) | pub enum StmtKind {
  type MacStmtStyle (line 834) | pub enum MacStmtStyle {
  type Local (line 848) | pub struct Local {
  type Arm (line 869) | pub struct Arm {
  type Field (line 878) | pub struct Field {
  type SpannedIdent (line 886) | pub type SpannedIdent = Spanned<Ident>;
  type BlockCheckMode (line 889) | pub enum BlockCheckMode {
  type UnsafeSource (line 895) | pub enum UnsafeSource {
  type Expr (line 902) | pub struct Expr {
    method returns (line 912) | pub fn returns(&self) -> bool {
    method to_bound (line 934) | fn to_bound(&self) -> Option<TyParamBound> {
    method to_ty (line 943) | pub(super) fn to_ty(&self) -> Option<P<Ty>> {
    method precedence (line 973) | pub fn precedence(&self) -> ExprPrecedence {
    method fmt (line 1018) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
  type RangeLimits (line 1025) | pub enum RangeLimits {
  type ExprKind (line 1033) | pub enum ExprKind {
  type QSelf (line 1185) | pub struct QSelf {
  type CaptureBy (line 1192) | pub enum CaptureBy {
  type Mac (line 1197) | pub type Mac = Spanned<Mac_>;
  type Mac_ (line 1206) | pub struct Mac_ {
    method stream (line 1212) | pub fn stream(&self) -> TokenStream {
  type MacroDef (line 1218) | pub struct MacroDef {
    method stream (line 1224) | pub fn stream(&self) -> TokenStream {
  type StrStyle (line 1230) | pub enum StrStyle {
  type Lit (line 1240) | pub type Lit = Spanned<LitKind>;
  type LitIntType (line 1243) | pub enum LitIntType {
  type LitKind (line 1253) | pub enum LitKind {
    method is_str (line 1274) | pub fn is_str(&self) -> bool {
    method is_unsuffixed (line 1283) | pub fn is_unsuffixed(&self) -> bool {
    method is_suffixed (line 1301) | pub fn is_suffixed(&self) -> bool {
  type MutTy (line 1309) | pub struct MutTy {
  type MethodSig (line 1317) | pub struct MethodSig {
  type TraitItem (line 1329) | pub struct TraitItem {
  type TraitItemKind (line 1341) | pub enum TraitItemKind {
  type ImplItem (line 1349) | pub struct ImplItem {
  type ImplItemKind (line 1363) | pub enum ImplItemKind {
  type IntTy (line 1372) | pub enum IntTy {
    method fmt (line 1382) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
    method fmt (line 1388) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
    method ty_to_string (line 1394) | pub fn ty_to_string(&self) -> &'static str {
    method val_to_string (line 1405) | pub fn val_to_string(&self, val: i128) -> String {
    method bit_width (line 1412) | pub fn bit_width(&self) -> Option<usize> {
  type UintTy (line 1426) | pub enum UintTy {
    method ty_to_string (line 1436) | pub fn ty_to_string(&self) -> &'static str {
    method val_to_string (line 1447) | pub fn val_to_string(&self, val: u128) -> String {
    method bit_width (line 1451) | pub fn bit_width(&self) -> Option<usize> {
    method fmt (line 1464) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
    method fmt (line 1470) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
  type FloatTy (line 1477) | pub enum FloatTy {
    method fmt (line 1483) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
    method fmt (line 1489) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
    method ty_to_string (line 1495) | pub fn ty_to_string(&self) -> &'static str {
    method bit_width (line 1502) | pub fn bit_width(&self) -> usize {
  type TypeBinding (line 1512) | pub struct TypeBinding {
  type Ty (line 1520) | pub struct Ty {
    method fmt (line 1527) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
  type BareFnTy (line 1533) | pub struct BareFnTy {
  type TyKind (line 1542) | pub enum TyKind {
  type TraitObjectSyntax (line 1585) | pub enum TraitObjectSyntax {
  type AsmDialect (line 1594) | pub enum AsmDialect {
  type InlineAsmOutput (line 1603) | pub struct InlineAsmOutput {
  type InlineAsm (line 1614) | pub struct InlineAsm {
  type Arg (line 1630) | pub struct Arg {
    method to_self (line 1652) | pub fn to_self(&self) -> Option<ExplicitSelf> {
    method is_self (line 1668) | pub fn is_self(&self) -> bool {
    method from_self (line 1676) | pub fn from_self(eself: ExplicitSelf, eself_ident: SpannedIdent) -> Arg {
  type SelfKind (line 1640) | pub enum SelfKind {
  type ExplicitSelf (line 1649) | pub type ExplicitSelf = Spanned<SelfKind>;
  type FnDecl (line 1708) | pub struct FnDecl {
    method get_self (line 1715) | pub fn get_self(&self) -> Option<ExplicitSelf> {
    method has_self (line 1718) | pub fn has_self(&self) -> bool {
  type IsAuto (line 1725) | pub enum IsAuto {
  type Unsafety (line 1731) | pub enum Unsafety {
    method fmt (line 1749) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
  type Constness (line 1737) | pub enum Constness {
  type Defaultness (line 1743) | pub enum Defaultness {
  type ImplPolarity (line 1758) | pub enum ImplPolarity {
    method fmt (line 1766) | fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
  type FunctionRetTy (line 1776) | pub enum FunctionRetTy {
    method span (line 1788) | pub fn span(&self) -> Span {
  type Mod (line 1800) | pub struct Mod {
  type ForeignMod (line 1812) | pub struct ForeignMod {
  type GlobalAsm (line 1821) | pub struct GlobalAsm {
  type EnumDef (line 1827) | pub struct EnumDef {
  type Variant_ (line 1832) | pub struct Variant_ {
  type Variant (line 1840) | pub type Variant = Spanned<Variant_>;
  type UseTreeKind (line 1843) | pub enum UseTreeKind {
  type UseTree (line 1850) | pub struct UseTree {
  type AttrStyle (line 1860) | pub enum AttrStyle {
  type AttrId (line 1866) | pub struct AttrId(pub usize);
  type Attribute (line 1871) | pub struct Attribute {
  type TraitRef (line 1887) | pub struct TraitRef {
  type PolyTraitRef (line 1893) | pub struct PolyTraitRef {
    method new (line 1904) | pub fn new(generic_params: Vec<GenericParam>, path: Path, span: Span) ...
  type CrateSugar (line 1914) | pub enum CrateSugar {
  type Visibility (line 1923) | pub enum Visibility {
  type StructField (line 1934) | pub struct StructField {
  type VariantData (line 1955) | pub enum VariantData {
    method fields (line 1971) | pub fn fields(&self) -> &[StructField] {
    method id (line 1977) | pub fn id(&self) -> NodeId {
    method is_struct (line 1982) | pub fn is_struct(&self) -> bool {
    method is_tuple (line 1985) | pub fn is_tuple(&self) -> bool {
    method is_unit (line 1988) | pub fn is_unit(&self) -> bool {
  type Item (line 1997) | pub struct Item {
  type ItemKind (line 2016) | pub enum ItemKind {
    method descriptive_variant (line 2091) | pub fn descriptive_variant(&self) -> &str {
  type ForeignItem (line 2115) | pub struct ForeignItem {
  type ForeignItemKind (line 2126) | pub enum ForeignItemKind {
    method descriptive_variant (line 2137) | pub fn descriptive_variant(&self) -> &str {
  function check_asts_encodable (line 2153) | fn check_asts_encodable() {

FILE: examples/rust/scratch.rs
  function f (line 1) | fn f() {

FILE: examples/typescript/parser.ts
  type SignatureFlags (line 5) | const enum SignatureFlags {
  function createNode (line 22) | function createNode(kind: SyntaxKind, pos?: number, end?: number): Node {
  function visitNode (line 37) | function visitNode<T>(cbNode: (node: Node) => T, node: Node): T | undefi...
  function visitNodes (line 41) | function visitNodes<T>(cbNode: (node: Node) => T, cbNodes: (node: NodeAr...
  function forEachChild (line 68) | function forEachChild<T>(node: Node, cbNode: (node: Node) => T | undefin...
  function createSourceFile (line 494) | function createSourceFile(fileName: string, sourceText: string, language...
  function parseIsolatedEntityName (line 502) | function parseIsolatedEntityName(text: string, languageVersion: ScriptTa...
  function parseJsonText (line 511) | function parseJsonText(fileName: string, sourceText: string): JsonSource...
  function isExternalModule (line 516) | function isExternalModule(file: SourceFile): boolean {
  function updateSourceFile (line 529) | function updateSourceFile(sourceFile: SourceFile, newText: string, textC...
  function parseIsolatedJSDocComment (line 538) | function parseIsolatedJSDocComment(content: string, start?: number, leng...
  function parseJSDocTypeExpressionForTests (line 551) | function parseJSDocTypeExpressionForTests(content: string, start?: numbe...
  function parseSourceFile (line 661) | function parseSourceFile(fileName: string, sourceText: string, languageV...
  function parseIsolatedEntityName (line 673) | function parseIsolatedEntityName(content: string, languageVersion: Scrip...
  function parseJsonText (line 684) | function parseJsonText(fileName: string, sourceText: string): JsonSource...
  function getLanguageVariant (line 709) | function getLanguageVariant(scriptKind: ScriptKind) {
  function initializeState (line 714) | function initializeState(_sourceText: string, languageVersion: ScriptTar...
  function clearState (line 748) | function clearState() {
  function parseSourceFileWorker (line 761) | function parseSourceFileWorker(fileName: string, languageVersion: Script...
  function addJSDocComment (line 792) | function addJSDocComment<T extends HasJSDoc>(node: T): T {
  function fixupParentReferences (line 803) | function fixupParentReferences(rootNode: Node) {
  function createSourceFile (line 835) | function createSourceFile(fileName: string, languageVersion: ScriptTarge...
  function setContextFlag (line 852) | function setContextFlag(val: boolean, flag: NodeFlags) {
  function setDisallowInContext (line 861) | function setDisallowInContext(val: boolean) {
  function setYieldContext (line 865) | function setYieldContext(val: boolean) {
  function setDecoratorContext (line 869) | function setDecoratorContext(val: boolean) {
  function setAwaitContext (line 873) | function setAwaitContext(val: boolean) {
  function doOutsideOfContext (line 877) | function doOutsideOfContext<T>(context: NodeFlags, func: () => T): T {
  function doInsideOfContext (line 898) | function doInsideOfContext<T>(context: NodeFlags, func: () => T): T {
  function allowInAnd (line 919) | function allowInAnd<T>(func: () => T): T {
  function disallowInAnd (line 923) | function disallowInAnd<T>(func: () => T): T {
  function doInYieldContext (line 927) | function doInYieldContext<T>(func: () => T): T {
  function doInDecoratorContext (line 931) | function doInDecoratorContext<T>(func: () => T): T {
  function doInAwaitContext (line 935) | function doInAwaitContext<T>(func: () => T): T {
  function doOutsideOfAwaitContext (line 939) | function doOutsideOfAwaitContext<T>(func: () => T): T {
  function doInYieldAndAwaitContext (line 943) | function doInYieldAndAwaitContext<T>(func: () => T): T {
  function inContext (line 947) | function inContext(flags: NodeFlags) {
  function inYieldContext (line 951) | function inYieldContext() {
  function inDisallowInContext (line 955) | function inDisallowInContext() {
  function inDecoratorContext (line 959) | function inDecoratorContext() {
  function inAwaitContext (line 963) | function inAwaitContext() {
  function parseErrorAtCurrentToken (line 967) | function parseErrorAtCurrentToken(message: DiagnosticMessage, arg0?: any...
  function parseErrorAtPosition (line 974) | function parseErrorAtPosition(start: number, length: number, message: Di...
  function scanError (line 986) | function scanError(message: DiagnosticMessage, length?: number) {
  function getNodePos (line 991) | function getNodePos(): number {
  function token (line 1001) | function token(): SyntaxKind {
  function nextToken (line 1005) | function nextToken(): SyntaxKind {
  function reScanGreaterToken (line 1009) | function reScanGreaterToken(): SyntaxKind {
  function reScanSlashToken (line 1013) | function reScanSlashToken(): SyntaxKind {
  function reScanTemplateToken (line 1017) | function reScanTemplateToken(): SyntaxKind {
  function scanJsxIdentifier (line 1021) | function scanJsxIdentifier(): SyntaxKind {
  function scanJsxText (line 1025) | function scanJsxText(): SyntaxKind {
  function scanJsxAttributeValue (line 1029) | function scanJsxAttributeValue(): SyntaxKind {
  function speculationHelper (line 1033) | function speculationHelper<T>(callback: () => T, isLookAhead: boolean): T {
  function lookAhead (line 1070) | function lookAhead<T>(callback: () => T): T {
  function tryParse (line 1079) | function tryParse<T>(callback: () => T): T {
  function isIdentifier (line 1084) | function isIdentifier(): boolean {
  function parseExpected (line 1104) | function parseExpected(kind: SyntaxKind, diagnosticMessage?: DiagnosticM...
  function parseOptional (line 1122) | function parseOptional(t: SyntaxKind): boolean {
  function parseOptionalToken (line 1131) | function parseOptionalToken(t: SyntaxKind): Node {
  function parseExpectedToken (line 1139) | function parseExpectedToken(t: SyntaxKind, diagnosticMessage?: Diagnosti...
  function parseTokenNode (line 1144) | function parseTokenNode<T extends Node>(): T {
  function canParseSemicolon (line 1150) | function canParseSemicolon() {
  function parseSemicolon (line 1160) | function parseSemicolon(): boolean {
  function createNode (line 1174) | function createNode(kind: SyntaxKind, pos?: number): Node {
  function createNodeWithJSDoc (line 1182) | function createNodeWithJSDoc(kind: SyntaxKind): Node {
  function createNodeArray (line 1190) | function createNodeArray<T extends Node>(elements: T[], pos: number, end...
  function finishNode (line 1201) | function finishNode<T extends Node>(node: T, end?: number): T {
  function createMissingNode (line 1219) | function createMissingNode<T extends Node>(kind: T["kind"], reportAtCurr...
  function internIdentifier (line 1239) | function internIdentifier(text: string): string {
  function createIdentifier (line 1250) | function createIdentifier(isIdentifier: boolean, diagnosticMessage?: Dia...
  function parseIdentifier (line 1270) | function parseIdentifier(diagnosticMessage?: DiagnosticMessage): Identif...
  function parseIdentifierName (line 1274) | function parseIdentifierName(diagnosticMessage?: DiagnosticMessage): Ide...
  function isLiteralPropertyName (line 1278) | function isLiteralPropertyName(): boolean {
  function parsePropertyNameWorker (line 1284) | function parsePropertyNameWorker(allowComputedPropertyNames: boolean): P...
  function parsePropertyName (line 1296) | function parsePropertyName(): PropertyName {
  function parseComputedPropertyName (line 1300) | function parseComputedPropertyName(): ComputedPropertyName {
  function parseContextualModifier (line 1316) | function parseContextualModifier(t: SyntaxKind): boolean {
  function nextTokenIsOnSameLineAndCanFollowModifier (line 1320) | function nextTokenIsOnSameLineAndCanFollowModifier() {
  function nextTokenCanFollowModifier (line 1328) | function nextTokenCanFollowModifier() {
  function parseAnyContextualModifier (line 1351) | function parseAnyContextualModifier(): boolean {
  function canFollowModifier (line 1355) | function canFollowModifier(): boolean {
  function nextTokenCanFollowDefaultKeyword (line 1363) | function nextTokenCanFollowDefaultKeyword(): boolean {
  function isListElement (line 1372) | function isListElement(parsingContext: ParsingContext, inErrorRecovery: ...
  function isValidHeritageClauseObjectLiteral (line 1456) | function isValidHeritageClauseObjectLiteral() {
  function nextTokenIsIdentifier (line 1474) | function nextTokenIsIdentifier() {
  function nextTokenIsIdentifierOrKeyword (line 1479) | function nextTokenIsIdentifierOrKeyword() {
  function nextTokenIsIdentifierOrKeywordOrGreaterThan (line 1484) | function nextTokenIsIdentifierOrKeywordOrGreaterThan() {
  function isHeritageClauseExtendsOrImplementsKeyword (line 1489) | function isHeritageClauseExtendsOrImplementsKeyword(): boolean {
  function nextTokenIsStartOfExpression (line 1499) | function nextTokenIsStartOfExpression() {
  function nextTokenIsStartOfType (line 1504) | function nextTokenIsStartOfType() {
  function isListTerminator (line 1510) | function isListTerminator(kind: ParsingContext): boolean {
  function isVariableDeclaratorListTerminator (line 1558) | function isVariableDeclaratorListTerminator(): boolean {
  function isInSomeParsingContext (line 1584) | function isInSomeParsingContext(): boolean {
  function parseList (line 1597) | function parseList<T extends Node>(kind: ParsingContext, parseElement: (...
  function parseListElement (line 1620) | function parseListElement<T extends Node>(parsingContext: ParsingContext...
  function currentNode (line 1629) | function currentNode(parsingContext: ParsingContext): Node {
  function consumeNode (line 1694) | function consumeNode(node: Node) {
  function canReuseNode (line 1701) | function canReuseNode(node: Node, parsingContext: ParsingContext): boole...
  function isReusableClassMember (line 1781) | function isReusableClassMember(node: Node) {
  function isReusableSwitchClause (line 1806) | function isReusableSwitchClause(node: Node) {
  function isReusableStatement (line 1818) | function isReusableStatement(node: Node) {
  function isReusableEnumMember (line 1857) | function isReusableEnumMember(node: Node) {
  function isReusableTypeMember (line 1861) | function isReusableTypeMember(node: Node) {
  function isReusableVariableDeclaration (line 1876) | function isReusableVariableDeclaration(node: Node) {
  function isReusableParameter (line 1899) | function isReusableParameter(node: Node) {
  function abortParsingListOrMoveToNextToken (line 1910) | function abortParsingListOrMoveToNextToken(kind: ParsingContext) {
  function parsingContextErrors (line 1920) | function parsingContextErrors(context: ParsingContext): DiagnosticMessage {
  function parseDelimitedList (line 1949) | function parseDelimitedList<T extends Node>(kind: ParsingContext, parseE...
  function createMissingList (line 2017) | function createMissingList<T extends Node>(): NodeArray<T> {
  function parseBracketedList (line 2021) | function parseBracketedList<T extends Node>(kind: ParsingContext, parseE...
  function parseEntityName (line 2031) | function parseEntityName(allowReservedWords: boolean, diagnosticMessage?...
  function createQualifiedName (line 2046) | function createQualifiedName(entity: EntityName, name: Identifier): Qual...
  function parseRightSideOfDot (line 2053) | function parseRightSideOfDot(allowIdentifierNames: boolean): Identifier {
  function parseTemplateExpression (line 2087) | function parseTemplateExpression(): TemplateExpression {
  function parseTemplateSpan (line 2106) | function parseTemplateSpan(): TemplateSpan {
  function parseLiteralNode (line 2123) | function parseLiteralNode(): LiteralExpression {
  function parseTemplateHead (line 2127) | function parseTemplateHead(): TemplateHead {
  function parseTemplateMiddleOrTemplateTail (line 2133) | function parseTemplateMiddleOrTemplateTail(): TemplateMiddle | TemplateT...
  function parseLiteralLikeNode (line 2139) | function parseLiteralLikeNode(kind: SyntaxKind): LiteralExpression | Lit...
  function parseTypeReference (line 2170) | function parseTypeReference(): TypeReferenceNode {
  function parseThisTypePredicate (line 2179) | function parseThisTypePredicate(lhs: ThisTypeNode): TypePredicateNode {
  function parseThisTypeNode (line 2187) | function parseThisTypeNode(): ThisTypeNode {
  function parseJSDocAllType (line 2193) | function parseJSDocAllType(): JSDocAllType {
  function parseJSDocUnknownOrNullableType (line 2199) | function parseJSDocUnknownOrNullableType(): JSDocUnknownType | JSDocNull...
  function parseJSDocFunctionType (line 2231) | function parseJSDocFunctionType(): JSDocFunctionType | TypeReferenceNode {
  function parseJSDocParameter (line 2243) | function parseJSDocParameter(): ParameterDeclaration {
  function parseJSDocNodeWithType (line 2253) | function parseJSDocNodeWithType(kind: SyntaxKind.JSDocVariadicType | Syn...
  function parseTypeQuery (line 2260) | function parseTypeQuery(): TypeQueryNode {
  function parseTypeParameter (line 2267) | function parseTypeParameter(): TypeParameterDeclaration {
  function parseTypeParameters (line 2297) | function parseTypeParameters(): NodeArray<TypeParameterDeclaration> | un...
  function parseParameterType (line 2303) | function parseParameterType(): TypeNode {
  function isStartOfParameter (line 2311) | function isStartOfParameter(): boolean {
  function parseParameter (line 2319) | function parseParameter(): ParameterDeclaration {
  function fillSignature (line 2353) | function fillSignature(
  function parseReturnType (line 2364) | function parseReturnType(returnToken: SyntaxKind.ColonToken | SyntaxKind...
  function shouldParseReturnType (line 2367) | function shouldParseReturnType(returnToken: SyntaxKind.ColonToken | Synt...
  function parseParameterList (line 2384) | function parseParameterList(flags: SignatureFlags) {
  function parseTypeMemberSemicolon (line 2425) | function parseTypeMemberSemicolon() {
  function parseSignatureMember (line 2436) | function parseSignatureMember(kind: SyntaxKind.CallSignature | SyntaxKin...
  function isIndexSignature (line 2446) | function isIndexSignature(): boolean {
  function isUnambiguouslyIndexSignature (line 2450) | function isUnambiguouslyIndexSignature() {
  function parseIndexSignatureDeclaration (line 2505) | function parseIndexSignatureDeclaration(node: IndexSignatureDeclaration)...
  function parsePropertyOrMethodSignature (line 2513) | function parsePropertyOrMethodSignature(node: PropertySignature | Method...
  function isTypeMemberStart (line 2536) | function isTypeMemberStart(): boolean {
  function parseTypeMember (line 2569) | function parseTypeMember(): TypeElement {
  function nextTokenIsOpenParenOrLessThan (line 2584) | function nextTokenIsOpenParenOrLessThan() {
  function parseTypeLiteral (line 2589) | function parseTypeLiteral(): TypeLiteralNode {
  function parseObjectTypeMembers (line 2595) | function parseObjectTypeMembers(): NodeArray<TypeElement> {
  function isStartOfMappedType (line 2608) | function isStartOfMappedType() {
  function parseMappedTypeParameter (line 2619) | function parseMappedTypeParameter() {
  function parseMappedType (line 2627) | function parseMappedType() {
  function parseTupleType (line 2651) | function parseTupleType(): TupleTypeNode {
  function parseParenthesizedType (line 2657) | function parseParenthesizedType(): ParenthesizedTypeNode {
  function parseFunctionOrConstructorType (line 2665) | function parseFunctionOrConstructorType(kind: SyntaxKind): FunctionOrCon...
  function parseKeywordAndNoDot (line 2674) | function parseKeywordAndNoDot(): TypeNode | undefined {
  function parseLiteralTypeNode (line 2679) | function parseLiteralTypeNode(negative?: boolean): LiteralTypeNode {
  function nextTokenIsNumericLiteral (line 2699) | function nextTokenIsNumericLiteral() {
  function parseNonArrayType (line 2703) | function parseNonArrayType(): TypeNode {
  function isStartOfType (line 2756) | function isStartOfType(inStartOfParameter?: boolean): boolean {
  function isStartOfParenthesizedOrFunctionType (line 2798) | function isStartOfParenthesizedOrFunctionType() {
  function parsePostfixTypeOrHigher (line 2803) | function parsePostfixTypeOrHigher(): TypeNode {
  function createJSDocPostfixType (line 2847) | function createJSDocPostfixType(kind: SyntaxKind, type: TypeNode) {
  function parseTypeOperator (line 2854) | function parseTypeOperator(operator: SyntaxKind.KeyOfKeyword | SyntaxKin...
  function parseInferType (line 2862) | function parseInferType(): InferTypeNode {
  function parseTypeOperatorOrHigher (line 2871) | function parseTypeOperatorOrHigher(): TypeNode {
  function parseUnionOrIntersectionType (line 2889) | function parseUnionOrIntersectionType(kind: SyntaxKind.UnionType | Synta...
  function parseIntersectionTypeOrHigher (line 2904) | function parseIntersectionTypeOrHigher(): TypeNode {
  function parseUnionTypeOrHigher (line 2908) | function parseUnionTypeOrHigher(): TypeNode {
  function isStartOfFunctionType (line 2912) | function isStartOfFunctionType(): boolean {
  function skipParameterStart (line 2919) | function skipParameterStart(): boolean {
  function isUnambiguouslyStartOfFunctionType (line 2937) | function isUnambiguouslyStartOfFunctionType() {
  function parseTypeOrTypePredicate (line 2966) | function parseTypeOrTypePredicate(): TypeNode {
  function parseTypePredicatePrefix (line 2980) | function parseTypePredicatePrefix() {
  function parseType (line 2988) | function parseType(): TypeNode {
  function parseTypeWorker (line 2994) | function parseTypeWorker(noConditionalTypes?: boolean): TypeNode {
  function parseTypeAnnotation (line 3016) | function parseTypeAnnotation(): TypeNode {
  function isStartOfLeftHandSideExpression (line 3021) | function isStartOfLeftHandSideExpression(): boolean {
  function isStartOfExpression (line 3049) | function isStartOfExpression(): boolean {
  function isStartOfExpressionStatement (line 3084) | function isStartOfExpressionStatement(): boolean {
  function parseExpression (line 3093) | function parseExpression(): Expression {
  function parseInitializer (line 3116) | function parseInitializer(): Expression | undefined {
  function parseAssignmentExpressionOrHigher (line 3120) | function parseAssignmentExpressionOrHigher(): Expression {
  function isYieldExpression (line 3185) | function isYieldExpression(): boolean {
  function nextTokenIsIdentifierOnSameLine (line 3213) | function nextTokenIsIdentifierOnSameLine() {
  function parseYieldExpression (line 3218) | function parseYieldExpression(): YieldExpression {
  function parseSimpleArrowFunctionExpression (line 3240) | function parseSimpleArrowFunctionExpression(identifier: Identifier, asyn...
  function tryParseParenthesizedArrowFunctionExpression (line 3264) | function tryParseParenthesizedArrowFunctionExpression(): Expression | un...
  function isParenthesizedArrowFunctionExpression (line 3301) | function isParenthesizedArrowFunctionExpression(): Tristate {
  function isParenthesizedArrowFunctionExpressionWorker (line 3316) | function isParenthesizedArrowFunctionExpressionWorker() {
  function parsePossibleParenthesizedArrowFunctionExpressionHead (line 3439) | function parsePossibleParenthesizedArrowFunctionExpressionHead(): ArrowF...
  function tryParseAsyncSimpleArrowFunctionExpression (line 3443) | function tryParseAsyncSimpleArrowFunctionExpression(): ArrowFunction | u...
  function isUnParenthesizedAsyncArrowFunctionWorker (line 3455) | function isUnParenthesizedAsyncArrowFunctionWorker(): Tristate {
  function parseParenthesizedArrowFunctionExpressionHead (line 3476) | function parseParenthesizedArrowFunctionExpressionHead(allowAmbiguity: b...
  function parseArrowFunctionExpressionBody (line 3510) | function parseArrowFunctionExpressionBody(isAsync: boolean): Block | Exp...
  function parseConditionalExpressionRest (line 3542) | function parseConditionalExpressionRest(leftOperand: Expression): Expres...
  function parseBinaryExpressionOrHigher (line 3562) | function parseBinaryExpressionOrHigher(precedence: number): Expression {
  function isInOrOfKeyword (line 3567) | function isInOrOfKeyword(t: SyntaxKind) {
  function parseBinaryExpressionRest (line 3571) | function parseBinaryExpressionRest(precedence: number, leftOperand: Expr...
  function isBinaryOperator (line 3634) | function isBinaryOperator() {
  function getBinaryOperatorPrecedence (line 3642) | function getBinaryOperatorPrecedence(): number {
  function makeBinaryExpression (line 3687) | function makeBinaryExpression(left: Expression, operatorToken: BinaryOpe...
  function makeAsExpression (line 3695) | function makeAsExpression(left: Expression, right: TypeNode): AsExpressi...
  function parsePrefixUnaryExpression (line 3702) | function parsePrefixUnaryExpression() {
  function parseDeleteExpression (line 3711) | function parseDeleteExpression() {
  function parseTypeOfExpression (line 3718) | function parseTypeOfExpression() {
  function parseVoidExpression (line 3725) | function parseVoidExpression() {
  function isAwaitExpression (line 3732) | function isAwaitExpression(): boolean {
  function parseAwaitExpression (line 3745) | function parseAwaitExpression() {
  function parseUnaryExpressionOrHigher (line 3760) | function parseUnaryExpressionOrHigher(): UnaryExpression | BinaryExpress...
  function parseSimpleUnaryExpression (line 3815) | function parseSimpleUnaryExpression(): UnaryExpression {
  function isUpdateExpression (line 3853) | function isUpdateExpression(): boolean {
  function parseUpdateExpression (line 3889) | function parseUpdateExpression(): UpdateExpression {
  function parseLeftHandSideExpressionOrHigher (line 3916) | function parseLeftHandSideExpressionOrHigher(): LeftHandSideExpression {
  function parseMemberExpressionOrHigher (line 3967) | function parseMemberExpressionOrHigher(): MemberExpression {
  function parseSuperExpression (line 4019) | function parseSuperExpression(): MemberExpression {
  function tagNamesAreEquivalent (line 4034) | function tagNamesAreEquivalent(lhs: JsxTagNameExpression, rhs: JsxTagNam...
  function parseJsxElementOrSelfClosingElementOrFragment (line 4055) | function parseJsxElementOrSelfClosingElementOrFragment(inExpressionConte...
  function parseJsxText (line 4109) | function parseJsxText(): JsxText {
  function parseJsxChild (line 4116) | function parseJsxChild(): JsxChild {
  function parseJsxChildren (line 4129) | function parseJsxChildren(openingTag: JsxOpeningElement | JsxOpeningFrag...
  function parseJsxAttributes (line 4167) | function parseJsxAttributes(): JsxAttributes {
  function parseJsxOpeningOrSelfClosingElementOrOpeningFragment (line 4173) | function parseJsxOpeningOrSelfClosingElementOrOpeningFragment(inExpressi...
  function parseJsxElementName (line 4214) | function parseJsxElementName(): JsxTagNameExpression {
  function parseJsxExpression (line 4232) | function parseJsxExpression(inExpressionContext: boolean): JsxExpression {
  function parseJsxAttribute (line 4251) | function parseJsxAttribute(): JsxAttribute | JsxSpreadAttribute {
  function parseJsxSpreadAttribute (line 4272) | function parseJsxSpreadAttribute(): JsxSpreadAttribute {
  function parseJsxClosingElement (line 4281) | function parseJsxClosingElement(inExpressionContext: boolean): JsxClosin...
  function parseJsxClosingFragment (line 4295) | function parseJsxClosingFragment(inExpressionContext: boolean): JsxClosi...
  function parseTypeAssertion (line 4312) | function parseTypeAssertion(): TypeAssertion {
  function parseMemberExpressionRest (line 4321) | function parseMemberExpressionRest(expression: LeftHandSideExpression): ...
  function parseCallExpressionRest (line 4374) | function parseCallExpressionRest(expression: LeftHandSideExpression): Le...
  function parseArgumentList (line 4406) | function parseArgumentList() {
  function parseTypeArgumentsInExpression (line 4413) | function parseTypeArgumentsInExpression() {
  function canFollowTypeArgumentsInExpression (line 4431) | function canFollowTypeArgumentsInExpression(): boolean {
  function parsePrimaryExpression (line 4471) | function parsePrimaryExpression(): PrimaryExpression {
  function parseParenthesizedExpression (line 4517) | function parseParenthesizedExpression(): ParenthesizedExpression {
  function parseSpreadElement (line 4525) | function parseSpreadElement(): Expression {
  function parseArgumentOrArrayLiteralElement (line 4532) | function parseArgumentOrArrayLiteralElement(): Expression {
  function parseArgumentExpression (line 4538) | function parseArgumentExpression(): Expression {
  function parseArrayLiteralExpression (line 4542) | function parseArrayLiteralExpression(): ArrayLiteralExpression {
  function parseObjectLiteralElement (line 4553) | function parseObjectLiteralElement(): ObjectLiteralElementLike {
  function parseObjectLiteralExpression (line 4604) | function parseObjectLiteralExpression(): ObjectLiteralExpression {
  function parseFunctionExpression (line 4616) | function parseFunctionExpression(): FunctionExpression {
  function parseOptionalIdentifier (line 4650) | function parseOptionalIdentifier(): Identifier | undefined {
  function parseNewExpression (line 4654) | function parseNewExpression(): NewExpression | MetaProperty {
  function parseBlock (line 4674) | function parseBlock(ignoreMissingOpenBrace: boolean, diagnosticMessage?:...
  function parseFunctionBlock (line 4690) | function parseFunctionBlock(flags: SignatureFlags, diagnosticMessage?: D...
  function parseEmptyStatement (line 4716) | function parseEmptyStatement(): Statement {
  function parseIfStatement (line 4722) | function parseIfStatement(): IfStatement {
  function parseDoStatement (line 4733) | function parseDoStatement(): DoStatement {
  function parseWhileStatement (line 4750) | function parseWhileStatement(): WhileStatement {
  function parseForOrForInOrForOfStatement (line 4760) | function parseForOrForInOrForOfStatement(): Statement {
  function parseBreakOrContinueStatement (line 4811) | function parseBreakOrContinueStatement(kind: SyntaxKind): BreakOrContinu...
  function parseReturnStatement (line 4823) | function parseReturnStatement(): ReturnStatement {
  function parseWithStatement (line 4835) | function parseWithStatement(): WithStatement {
  function parseCaseClause (line 4845) | function parseCaseClause(): CaseClause {
  function parseDefaultClause (line 4854) | function parseDefaultClause(): DefaultClause {
  function parseCaseOrDefaultClause (line 4862) | function parseCaseOrDefaultClause(): CaseOrDefaultClause {
  function parseSwitchStatement (line 4866) | function parseSwitchStatement(): SwitchStatement {
  function parseThrowStatement (line 4880) | function parseThrowStatement(): ThrowStatement {
  function parseTryStatement (line 4897) | function parseTryStatement(): TryStatement {
  function parseCatchClause (line 4914) | function parseCatchClause(): CatchClause {
  function parseDebuggerStatement (line 4931) | function parseDebuggerStatement(): Statement {
  function parseExpressionOrLabeledStatement (line 4938) | function parseExpressionOrLabeledStatement(): ExpressionStatement | Labe...
  function nextTokenIsIdentifierOrKeywordOnSameLine (line 4957) | function nextTokenIsIdentifierOrKeywordOnSameLine() {
  function nextTokenIsClassKeywordOnSameLine (line 4962) | function nextTokenIsClassKeywordOnSameLine() {
  function nextTokenIsFunctionKeywordOnSameLine (line 4967) | function nextTokenIsFunctionKeywordOnSameLine() {
  function nextTokenIsIdentifierOrKeywordOrLiteralOnSameLine (line 4972) | function nextTokenIsIdentifierOrKeywordOrLiteralOnSameLine() {
  function isDeclaration (line 4977) | function isDeclaration(): boolean {
  function isStartOfDeclaration (line 5055) | function isStartOfDeclaration(): boolean {
  function isStartOfStatement (line 5059) | function isStartOfStatement(): boolean {
  function nextTokenIsIdentifierOrStartOfDestructuring (line 5118) | function nextTokenIsIdentifierOrStartOfDestructuring() {
  function isLetDeclaration (line 5123) | function isLetDeclaration() {
  function parseStatement (line 5129) | function parseStatement(): Statement {
  function isDeclareModifier (line 5200) | function isDeclareModifier(modifier: Modifier) {
  function parseDeclaration (line 5204) | function parseDeclaration(): Statement {
  function parseDeclarationWorker (line 5219) | function parseDeclarationWorker(node: Statement): Statement {
  function nextTokenIsIdentifierOrStringLiteralOnSameLine (line 5265) | function nextTokenIsIdentifierOrStringLiteralOnSameLine() {
  function parseFunctionBlockOrSemicolon (line 5270) | function parseFunctionBlockOrSemicolon(flags: SignatureFlags, diagnostic...
  function parseArrayBindingElement (line 5281) | function parseArrayBindingElement(): ArrayBindingElement {
  function parseObjectBindingElement (line 5292) | function parseObjectBindingElement(): BindingElement {
  function parseObjectBindingPattern (line 5309) | function parseObjectBindingPattern(): ObjectBindingPattern {
  function parseArrayBindingPattern (line 5317) | function parseArrayBindingPattern(): ArrayBindingPattern {
  function isIdentifierOrPattern (line 5325) | function isIdentifierOrPattern() {
  function parseIdentifierOrPattern (line 5329) | function parseIdentifierOrPattern(): Identifier | BindingPattern {
  function parseVariableDeclarationAllowExclamation (line 5339) | function parseVariableDeclarationAllowExclamation() {
  function parseVariableDeclaration (line 5343) | function parseVariableDeclaration(allowExclamation?: boolean): VariableD...
  function parseVariableDeclarationList (line 5357) | function parseVariableDeclarationList(inForStatementInitializer: boolean...
  function canFollowContextualOfKeyword (line 5400) | function canFollowContextualOfKeyword(): boolean {
  function parseVariableStatement (line 5404) | function parseVariableStatement(node: VariableStatement): VariableStatem...
  function parseFunctionDeclaration (line 5411) | function parseFunctionDeclaration(node: FunctionDeclaration): FunctionDe...
  function parseConstructorDeclaration (line 5423) | function parseConstructorDeclaration(node: ConstructorDeclaration): Cons...
  function parseMethodDeclaration (line 5431) | function parseMethodDeclaration(node: MethodDeclaration, asteriskToken: ...
  function parsePropertyDeclaration (line 5441) | function parsePropertyDeclaration(node: PropertyDeclaration): PropertyDe...
  function parsePropertyOrMethodDeclaration (line 5465) | function parsePropertyOrMethodDeclaration(node: PropertyDeclaration | Me...
  function parseAccessorDeclaration (line 5477) | function parseAccessorDeclaration(node: AccessorDeclaration, kind: Acces...
  function isClassMemberModifier (line 5485) | function isClassMemberModifier(idToken: SyntaxKind) {
  function isClassMemberStart (line 5498) | function isClassMemberStart(): boolean {
  function parseDecorators (line 5567) | function parseDecorators(): NodeArray<Decorator> | undefined {
  function parseModifiers (line 5590) | function parseModifiers(permitInvalidConstAsModifier?: boolean): NodeArr...
  function parseModifiersForArrowFunction (line 5616) | function parseModifiersForArrowFunction(): NodeArray<Modifier> {
  function parseClassElement (line 5628) | function parseClassElement(): ClassElement {
  function parseClassExpression (line 5676) | function parseClassExpression(): ClassExpression {
  function parseClassDeclaration (line 5680) | function parseClassDeclaration(node: ClassLikeDeclaration): ClassDeclara...
  function parseClassDeclarationOrExpression (line 5684) | function parseClassDeclarationOrExpression(node: ClassLikeDeclaration, k...
  function parseNameOfClassDeclarationOrExpression (line 5704) | function parseNameOfClassDeclarationOrExpression(): Identifier | undefin...
  function isImplementsClause (line 5715) | function isImplementsClause() {
  function parseHeritageClauses (line 5719) | function parseHeritageClauses(): NodeArray<HeritageClause> | undefined {
  function parseHeritageClause (line 5730) | function parseHeritageClause(): HeritageClause | undefined {
  function parseExpressionWithTypeArguments (line 5743) | function parseExpressionWithTypeArguments(): ExpressionWithTypeArguments {
  function tryParseTypeArguments (line 5750) | function tryParseTypeArguments(): NodeArray<TypeNode> | undefined {
  function isHeritageClause (line 5756) | function isHeritageClause(): boolean {
  function parseClassMembers (line 5760) | function parseClassMembers(): NodeArray<ClassElement> {
  function parseInterfaceDeclaration (line 5764) | function parseInterfaceDeclaration(node: InterfaceDeclaration): Interfac...
  function parseTypeAliasDeclaration (line 5774) | function parseTypeAliasDeclaration(node: TypeAliasDeclaration): TypeAlia...
  function parseEnumMember (line 5789) | function parseEnumMember(): EnumMember {
  function parseEnumDeclaration (line 5796) | function parseEnumDeclaration(node: EnumDeclaration): EnumDeclaration {
  function parseModuleBlock (line 5810) | function parseModuleBlock(): ModuleBlock {
  function parseModuleOrNamespaceDeclaration (line 5822) | function parseModuleOrNamespaceDeclaration(node: ModuleDeclaration, flag...
  function parseAmbientExternalModuleDeclaration (line 5835) | function parseAmbientExternalModuleDeclaration(node: ModuleDeclaration):...
  function parseModuleDeclaration (line 5855) | function parseModuleDeclaration(node: ModuleDeclaration): ModuleDeclarat...
  function isExternalModuleReference (line 5873) | function isExternalModuleReference() {
  function nextTokenIsOpenParen (line 5878) | function nextTokenIsOpenParen() {
  function nextTokenIsSlash (line 5882) | function nextTokenIsSlash() {
  function parseNamespaceExportDeclaration (line 5886) | function parseNamespaceExportDeclaration(node: NamespaceExportDeclaratio...
  function parseImportDeclarationOrImportEqualsDeclaration (line 5895) | function parseImportDeclarationOrImportEqualsDeclaration(node: ImportEqu...
  function parseImportEqualsDeclaration (line 5924) | function parseImportEqualsDeclaration(node: ImportEqualsDeclaration, ide...
  function parseImportClause (line 5933) | function parseImportClause(identifier: Identifier, fullStart: number) {
  function parseModuleReference (line 5958) | function parseModuleReference() {
  function parseExternalModuleReference (line 5964) | function parseExternalModuleReference() {
  function parseModuleSpecifier (line 5973) | function parseModuleSpecifier(): Expression {
  function parseNamespaceImport (line 5987) | function parseNamespaceImport(): NamespaceImport {
  function parseNamedImportsOrExports (line 5999) | function parseNamedImportsOrExports(kind: SyntaxKind): NamedImportsOrExp...
  function parseExportSpecifier (line 6016) | function parseExportSpecifier() {
  function parseImportSpecifier (line 6020) | function parseImportSpecifier() {
  function parseImportOrExportSpecifier (line 6024) | function parseImportOrExportSpecifier(kind: SyntaxKind): ImportOrExportS...
  function parseExportDeclaration (line 6054) | function parseExportDeclaration(node: ExportDeclaration): ExportDeclarat...
  function parseExportAssignment (line 6074) | function parseExportAssignment(node: ExportAssignment): ExportAssignment {
  function processReferenceComments (line 6087) | function processReferenceComments(sourceFile: SourceFile): void {
  function setExternalModuleIndicator (line 6175) | function setExternalModuleIndicator(sourceFile: SourceFile) {
  type ParsingContext (line 6186) | const enum ParsingContext {
  type Tristate (line 6213) | const enum Tristate {
  function parseJSDocTypeExpressionForTests (line 6220) | function parseJSDocTypeExpressionForTests(content: string, start: number...
  function parseJSDocTypeExpression (line 6233) | function parseJSDocTypeExpression(mayOmitBraces?: boolean): JSDocTypeExp...
  function parseIsolatedJSDocComment (line 6246) | function parseIsolatedJSDocComment(content: string, start: number, lengt...
  function parseJSDocComment (line 6256) | function parseJSDocComment(parent: HasJSDoc, start: number, length: numb...
  type JSDocState (line 6279) | const enum JSDocState {
  type PropertyLikeParse (line 6285) | const enum PropertyLikeParse {
  function parseJSDocCommentWorker (line 6290) | function parseJSDocCommentWorker(start: number, length: number): JSDoc {
  function updateSourceFile (line 6982) | function updateSourceFile(sourceFile: SourceFile, newText: string, textC...
  function moveElementEntirelyPastChangeRange (line 7063) | function moveElementEntirelyPastChangeRange(element: IncrementalElement,...
  function shouldCheckNode (line 7111) | function shouldCheckNode(node: Node) {
  function adjustIntersectingElement (line 7122) | function adjustIntersectingElement(element: IncrementalElement, changeSt...
  function checkNodePositions (line 7197) | function checkNodePositions(node: Node, aggressiveChecks: boolean) {
  function updateTokenPositionsAndMarkElements (line 7208) | function updateTokenPositionsAndMarkElements(
  function extendToAffectedRange (line 7280) | function extendToAffectedRange(sourceFile: SourceFile, changeRange: Text...
  function findNearestNodeStartingBeforeOrAtPosition (line 7312) | function findNearestNodeStartingBeforeOrAtPosition(sourceFile: SourceFil...
  function checkChangeRange (line 7407) | function checkChangeRange(sourceFile: SourceFile, newText: string, textC...
  type IncrementalElement (line 7424) | interface IncrementalElement extends TextRange {
  type IncrementalNode (line 7431) | interface IncrementalNode extends Node, IncrementalElement {
  type IncrementalNodeArray (line 7435) | interface IncrementalNodeArray extends NodeArray<IncrementalNode>, Incre...
  type SyntaxCursor (line 7442) | interface SyntaxCursor {
  function createSyntaxCursor (line 7446) | function createSyntaxCursor(sourceFile: SourceFile): SyntaxCursor {
  type InvalidPosition (line 7547) | const enum InvalidPosition {
  function isDeclarationFileName (line 7552) | function isDeclarationFileName(fileName: string): boolean {

FILE: examples/typescript/small.ts
  class Foo (line 1) | class Foo {
    method constructor (line 2) | constructor() {}
  function foo (line 5) | function foo() {

FILE: src/benchmark.ts
  function benchmarkGo (line 8) | async function benchmarkGo() {

FILE: src/colors.ts
  type Range (line 3) | type Range = {start: Parser.Point, end: Parser.Point}
  type ColorFunction (line 4) | type ColorFunction = (x: Parser.Tree, visibleRanges: {start: number, end...
  function colorGo (line 6) | function colorGo(root: Parser.Tree, visibleRanges: {start: number, end: ...
  function colorTypescript (line 272) | function colorTypescript(root: Parser.Tree, visibleRanges: {start: numbe...
  function colorRuby (line 345) | function colorRuby(root: Parser.Tree, visibleRanges: {start: number, end...
  function colorRust (line 471) | function colorRust(root: Parser.Tree, visibleRanges: {start: number, end...
  function colorCpp (line 539) | function colorCpp(root: Parser.Tree, visibleRanges: {start: number, end:...
  function isVisible (line 598) | function isVisible(x: Parser.SyntaxNode, visibleRanges: {start: number, ...
  function visible (line 605) | function visible(x: Parser.TreeCursor, visibleRanges: { start: number, e...

FILE: src/extension.ts
  function decoration (line 20) | function decoration(scope: string): vscode.TextEditorDecorationType|unde...
  function createDecorationFromTextmate (line 35) | function createDecorationFromTextmate(themeStyle: scopes.TextMateRuleSet...
  function loadStyles (line 66) | async function loadStyles() {
  function activate (line 79) | async function activate(context: vscode.ExtensionContext) {
  function visibleLines (line 189) | function visibleLines(editor: vscode.TextEditor) {
  function range (line 197) | function range(x: colors.Range): vscode.Range {
  function deactivate (line 202) | function deactivate() {}

FILE: src/print.ts
  function testRust (line 7) | async function testRust() {
  function maxWidth (line 31) | function maxWidth(lines: string[]): number {
  function collectTypes (line 39) | function collectTypes(node: Parser.SyntaxNode, line: number, types: stri...

FILE: src/scopes.ts
  type TextMateRule (line 6) | interface TextMateRule {
  type TextMateRuleSettings (line 11) | interface TextMateRuleSettings {
  function find (line 20) | function find(scope: string): TextMateRuleSettings|undefined {
  function load (line 25) | async function load() {
  function loadThemeNamed (line 43) | async function loadThemeNamed(themeName: string) {
  function loadThemeFile (line 65) | async function loadThemeFile(themePath: string) {
  function loadColors (line 80) | function loadColors(textMateRules: TextMateRule[]): void {
  function checkFileExists (line 96) | function checkFileExists(filePath: string): Promise<boolean> {
  function readFileText (line 109) | function readFileText(filePath: string, encoding: string = "utf8"): Prom...

FILE: src/test.ts
  type Assert (line 4) | type Assert = [string, string|{not:string}]
  type TestCase (line 5) | type TestCase = [string, ...Assert[]]
  function test (line 246) | async function test(testCases: TestCase[], wasm: string, color: colors.C...
  function index (line 299) | function index(code: string, point: Parser.Point): number {
  function join (line 315) | function join(strings: IterableIterator<string>) {
Condensed preview — 57 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (955K chars).
[
  {
    "path": ".gitignore",
    "chars": 43,
    "preview": "out\nnode_modules\n.vscode-test/\n*.vsix\n*.bin"
  },
  {
    "path": ".vscode/launch.json",
    "chars": 1186,
    "preview": "// A launch configuration that compiles the extension and then opens it inside a new window\n// Use IntelliSense to learn"
  },
  {
    "path": ".vscode/settings.json",
    "chars": 444,
    "preview": "// Place your settings in this file to overwrite default and user settings.\n{\n    \"files.exclude\": {\n        \"out\": fals"
  },
  {
    "path": ".vscode/tasks.json",
    "chars": 366,
    "preview": "// See https://go.microsoft.com/fwlink/?LinkId=733558\n// for the documentation about the tasks.json format\n{\n\t\"version\":"
  },
  {
    "path": ".vscodeignore",
    "chars": 145,
    "preview": ".vscode/**\n.vscode-test/**\nout/test/**\nsrc/**\n.gitignore\nvsc-extension-quickstart.md\n**/tsconfig.json\n**/tslint.json\n**/"
  },
  {
    "path": "LICENSE.md",
    "chars": 1080,
    "preview": "The MIT License (MIT)\n\nCopyright (c) 2016 George Fraser\n\nPermission is hereby granted, free of charge, to any person obt"
  },
  {
    "path": "README.md",
    "chars": 2728,
    "preview": "# Tree Sitter for VSCode [Deprecated]\n\n**With the improving support for custom syntax coloring through language server, "
  },
  {
    "path": "TODO.md",
    "chars": 462,
    "preview": "## Bugs\n- Tree-sitter scope colors are wrong while the user is previewing other themes\n- Put back react support for .js "
  },
  {
    "path": "azure-pipelines.yml",
    "chars": 540,
    "preview": "# Node.js\n# Build a general Node.js project with npm.\n# Add steps that analyze code, save build artifacts, deploy, and m"
  },
  {
    "path": "examples/cpp/marker-index.h",
    "chars": 4832,
    "preview": "#ifndef MARKER_INDEX_H_\n#define MARKER_INDEX_H_\n\n#include <random>\n#include <unordered_map>\n#include \"flat_set.h\"\n#inclu"
  },
  {
    "path": "examples/cpp/rule.cc",
    "chars": 8448,
    "preview": "#include \"compiler/rule.h\"\n#include \"compiler/util/hash_combine.h\"\n\nnamespace tree_sitter {\nnamespace rules {\n\nusing std"
  },
  {
    "path": "examples/go/letter_test.go",
    "chars": 12399,
    "preview": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license "
  },
  {
    "path": "examples/go/no_newline_at_eof.go",
    "chars": 280,
    "preview": "// run\n\n// Copyright 2015 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// "
  },
  {
    "path": "examples/go/proc.go",
    "chars": 118827,
    "preview": "// Copyright 2014 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license "
  },
  {
    "path": "examples/go/small.go",
    "chars": 438,
    "preview": "package example\n\ntype Person struct {\n\tname string\n\tmom  *Person\n}\n\nfunc NewPerson(name string, mom *Person) Person {\n\tr"
  },
  {
    "path": "examples/go/type_switch.go",
    "chars": 89,
    "preview": "package p\n\nfunc f(a interface{}) {\n\tswitch aa := a.(type) {\n\tcase *int:\n\t\tprint(aa)\n\t}\n}\n"
  },
  {
    "path": "examples/go/value.go",
    "chars": 73937,
    "preview": "// Copyright 2009 The Go Authors. All rights reserved.\n// Use of this source code is governed by a BSD-style\n// license "
  },
  {
    "path": "examples/javascript/destructuring.js",
    "chars": 244,
    "preview": "let {a, b} = object\nlet {a, b, ...c} = object\nconst {a, b: {c, d}} = object\n\n\n\n\nfunction a ({b, c}, {d}) {}\n\n\n\n\n[a, b] ="
  },
  {
    "path": "examples/javascript/expressions.js",
    "chars": 4173,
    "preview": "\"A string with \\\"double\\\" and 'single' quotes\";\n'A string with \"double\" and \\'single\\' quotes';\n'\\\\'\n\"\\\\\"\n\n'A string wit"
  },
  {
    "path": "examples/javascript/literals.js",
    "chars": 81,
    "preview": "04000\n400\n100n\n\nconst últimaVez = 1\nvar x = { 県: '大阪府', '': '' }\n\n\"//ok\\n//what\"\n"
  },
  {
    "path": "examples/javascript/semicolon_insertion.js",
    "chars": 923,
    "preview": "if (a) {\n  var b = c\n  d()\n  e()\n  return f\n}\n\n\n\n\nif (a)\n  d()\n++b\n\nif (a)\n  d()\n--b\n\n \n\nobject\n  .someProperty\n  .other"
  },
  {
    "path": "examples/javascript/statements.js",
    "chars": 2593,
    "preview": "#!/usr/bin/env node\n\nimport defaultMember from \"module-name\";\nimport * as name from \"module-name\";\nimport { member } fro"
  },
  {
    "path": "examples/ruby/classes.rb",
    "chars": 1641,
    "preview": "# Class names must be capitalized.  Technically, it's a constant.\nclass Fred\n  \n  # The initialize method is the constru"
  },
  {
    "path": "examples/ruby/comments.rb",
    "chars": 229,
    "preview": "# anything else here should be ignored\n\n=begin\n=end\n\n=begin\nwhatever\n=end\n\n=begin rdoc\n=end\n\n\n=begin\nwhatever\nmultiple l"
  },
  {
    "path": "examples/ruby/control-flow.rb",
    "chars": 1258,
    "preview": "while foo do\nend\n\nwhile foo\nend\n\nwhile foo do\n  bar\nend\n\nuntil foo do\nend\n\nuntil foo do\n  bar\nend\n\nif foo\nend\n\nif foo th"
  },
  {
    "path": "examples/ruby/declarations.rb",
    "chars": 1465,
    "preview": "def foo\nend\n\ndef foo?\nend\n\ndef foo!\nend\n\n\n\ndef foo\n  bar\nend\n\n\n\ndef foo=\nend\n\n\n\ndef `(a)\n  \"`\"\nend\n\ndef -@(a)\nend\n\ndef %"
  },
  {
    "path": "examples/ruby/expressions.rb",
    "chars": 3168,
    "preview": "Foo::bar\n::Bar\n\nputs ::Foo::Bar\n\n\n\nfoo[bar]\nfoo[*bar]\nfoo[* bar]\nfoo[]\n\n\n\nfoo[\"bar\"]\n\n\n\nfoo[:bar]\n\n\n\nfoo[bar] = 1\n\n\n\n()\n"
  },
  {
    "path": "examples/ruby/literals.rb",
    "chars": 4039,
    "preview": ":foo\n:foo!\n:foo?\n:foo=\n:@foo\n:@foo_0123_bar\n:@@foo\n:$foo\n:$0\n:_bar\n:åäö\n\n\n\n\n:+\n:-\n:+@\n:-@\n:[]\n:[]=\n:&\n:!\n:`\n:^\n:|\n:~\n:/\n"
  },
  {
    "path": "examples/ruby/statements.rb",
    "chars": 226,
    "preview": "foo if bar\nreturn if false\nreturn true if foo\nreturn nil if foo\n\n\n\nfoo while bar\n\n\n\nfoo unless bar\n\n\n\nfoo until bar\n\n\n\na"
  },
  {
    "path": "examples/rust/ast.rs",
    "chars": 65499,
    "preview": "// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT\n// file at the top-level directory of this distrib"
  },
  {
    "path": "examples/rust/keywords.txt",
    "chars": 296,
    "preview": "false\ntrue\n\nas\nasync\nawait\nbecome\nbreak\ncontinue\ndo\nelse\nfor\nif\nin\nloop\nmatch\nmove\nreturn\ntry\ntypeof\nunsafe\nuse\nwhile\nyi"
  },
  {
    "path": "examples/rust/scratch.rs",
    "chars": 74,
    "preview": "fn f() {\n    match self.node {\n        Foo::PatKind::Ident(_) => 1\n    }\n}"
  },
  {
    "path": "examples/typescript/keywords.txt",
    "chars": 367,
    "preview": "abstract\narguments\nclass\nconst\ndeclare\nenum\nexport\nextends\nfrom\nfunction\nimplements\nimport\ninterface\nlet\nmodule\nnamespac"
  },
  {
    "path": "examples/typescript/parser.ts",
    "chars": 376385,
    "preview": "/// <reference path=\"utilities.ts\"/>\n/// <reference path=\"scanner.ts\"/>\n\nnamespace ts {\n    const enum SignatureFlags {\n"
  },
  {
    "path": "examples/typescript/small.ts",
    "chars": 81,
    "preview": "class Foo {\n    constructor() {}\n}\n\nfunction foo() {\n    \n}\n\nconst s = `${foo()}`"
  },
  {
    "path": "package.json",
    "chars": 2377,
    "preview": "{\n\t\"name\": \"vscode-tree-sitter\",\n\t\"displayName\": \"Tree Sitter [Deprecated]\",\n\t\"description\": \"Accurate syntax coloring w"
  },
  {
    "path": "scripts/build.sh",
    "chars": 142,
    "preview": "#!/bin/bash\n\nset -e\n\n# Build vsix\nnpm run-script build\n\ncode --install-extension build.vsix --force\n\necho 'Reload VSCode"
  },
  {
    "path": "scripts/gen-parsers.sh",
    "chars": 629,
    "preview": "#!/usr/bin/env bash\n\n# TODO this still doesn't work on my mac laptop :(\n# fix it and delete parsers/*.wasm from git\n\nset"
  },
  {
    "path": "src/benchmark.ts",
    "chars": 680,
    "preview": "// import extension = require('./extension')\nimport Parser = require('web-tree-sitter')\nimport fs = require('fs')\nimport"
  },
  {
    "path": "src/colors.ts",
    "chars": 17166,
    "preview": "import * as Parser from 'web-tree-sitter'\n\nexport type Range = {start: Parser.Point, end: Parser.Point}\nexport type Colo"
  },
  {
    "path": "src/extension.ts",
    "chars": 7603,
    "preview": "import * as vscode from 'vscode'\nimport * as Parser from 'web-tree-sitter'\nimport * as path from 'path'\nimport * as scop"
  },
  {
    "path": "src/print.ts",
    "chars": 1565,
    "preview": "// import extension = require('./extension')\nimport Parser = require('web-tree-sitter')\nimport fs = require('fs')\n\ntestR"
  },
  {
    "path": "src/scopes.ts",
    "chars": 3937,
    "preview": "import * as vscode from 'vscode'\nimport * as path from 'path'\nimport * as fs from 'fs'\nimport * as jsonc from \"jsonc-par"
  },
  {
    "path": "src/test.ts",
    "chars": 8591,
    "preview": "import Parser = require('web-tree-sitter')\nimport colors = require('./colors')\n\ntype Assert = [string, string|{not:strin"
  },
  {
    "path": "textmate/cpp.tmLanguage.json",
    "chars": 33014,
    "preview": "{\n\t\"$schema\": \"https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json\",\n\t\"version\": \"https://git"
  },
  {
    "path": "textmate/go.tmLanguage.json",
    "chars": 5474,
    "preview": "{\n\t\"$schema\": \"https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json\",\n\t\"version\": \"https://git"
  },
  {
    "path": "textmate/ruby.tmLanguage.json",
    "chars": 94731,
    "preview": "{\n    \"$schema\": \"https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json\",\n    \"name\": \"Ruby\",\n "
  },
  {
    "path": "textmate/rust.tmLanguage.json",
    "chars": 6867,
    "preview": "{\n\t\"$schema\": \"https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json\",\n\t\"version\": \"https://git"
  },
  {
    "path": "textmate/typescript.tmLanguage.json",
    "chars": 15943,
    "preview": "{\n\t\"$schema\": \"https://raw.githubusercontent.com/martinring/tmlanguage/master/tmlanguage.json\",\n\t\"name\": \"TypeScript\",\n\t"
  },
  {
    "path": "tsconfig.json",
    "chars": 698,
    "preview": "{\n\t\"compilerOptions\": {\n\t\t\"module\": \"commonjs\",\n\t\t\"target\": \"es6\",\n\t\t\"outDir\": \"out\",\n\t\t\"lib\": [\n\t\t\t\"es6\"\n\t\t],\n\t\t\"source"
  },
  {
    "path": "tslint.json",
    "chars": 196,
    "preview": "{\n\t\"rules\": {\n\t\t\"no-string-throw\": true,\n\t\t\"no-unused-expression\": true,\n\t\t\"no-duplicate-variable\": true,\n\t\t\"class-name\""
  }
]

// ... and 6 more files (download for full content)

About this extraction

This page contains the full source code of the georgewfraser/vscode-tree-sitter GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 57 files (867.8 KB), approximately 209.6k tokens, and a symbol index with 1059 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!