[
  {
    "path": ".gitignore",
    "content": "*.class\n*.lst\n.idea/*\n*.iml\n*.xml\n*.pyc\n\n"
  },
  {
    "path": "CITATION.cff",
    "content": "@inproceedings{\n    alon2018codeseq,\n    title={code2seq: Generating Sequences from Structured Representations of Code},\n    author={Uri Alon and Shaked Brody and Omer Levy and Eran Yahav},\n    booktitle={International Conference on Learning Representations},\n    year={2019},\n    url={https://openreview.net/forum?id=H1gKYo09tX},\n}\n"
  },
  {
    "path": "CSharpExtractor/.gitattributes",
    "content": "###############################################################################\n# Set default behavior to automatically normalize line endings.\n###############################################################################\n* text=auto\n\n###############################################################################\n# Set default behavior for command prompt diff.\n#\n# This is need for earlier builds of msysgit that does not have it on by\n# default for csharp files.\n# Note: This is only used by command line\n###############################################################################\n#*.cs     diff=csharp\n\n###############################################################################\n# Set the merge driver for project and solution files\n#\n# Merging from the command prompt will add diff markers to the files if there\n# are conflicts (Merging from VS is not affected by the settings below, in VS\n# the diff markers are never inserted). Diff markers may cause the following \n# file extensions to fail to load in VS. An alternative would be to treat\n# these files as binary and thus will always conflict and require user\n# intervention with every merge. To do so, just uncomment the entries below\n###############################################################################\n#*.sln       merge=binary\n#*.csproj    merge=binary\n#*.vbproj    merge=binary\n#*.vcxproj   merge=binary\n#*.vcproj    merge=binary\n#*.dbproj    merge=binary\n#*.fsproj    merge=binary\n#*.lsproj    merge=binary\n#*.wixproj   merge=binary\n#*.modelproj merge=binary\n#*.sqlproj   merge=binary\n#*.wwaproj   merge=binary\n\n###############################################################################\n# behavior for image files\n#\n# image files are treated as binary by default.\n###############################################################################\n#*.jpg   binary\n#*.png   binary\n#*.gif   binary\n\n###############################################################################\n# diff behavior for common document formats\n# \n# Convert binary document formats to text before diffing them. This feature\n# is only available from the command line. Turn it on by uncommenting the \n# entries below.\n###############################################################################\n#*.doc   diff=astextplain\n#*.DOC   diff=astextplain\n#*.docx  diff=astextplain\n#*.DOCX  diff=astextplain\n#*.dot   diff=astextplain\n#*.DOT   diff=astextplain\n#*.pdf   diff=astextplain\n#*.PDF   diff=astextplain\n#*.rtf   diff=astextplain\n#*.RTF   diff=astextplain\n"
  },
  {
    "path": "CSharpExtractor/.gitignore",
    "content": "## Ignore Visual Studio temporary files, build results, and\n## files generated by popular Visual Studio add-ons.\n\n# User-specific files\n*.suo\n*.user\n*.userosscache\n*.sln.docstates\n\n# User-specific files (MonoDevelop/Xamarin Studio)\n*.userprefs\n\n# Build results\n[Dd]ebug/\n[Dd]ebugPublic/\n[Rr]elease/\n[Rr]eleases/\nx64/\nx86/\nbld/\n[Bb]in/\n[Oo]bj/\n[Ll]og/\n\n# Visual Studio 2015 cache/options directory\n.vs/\n# Uncomment if you have tasks that create the project's static files in wwwroot\n#wwwroot/\n\n# MSTest test Results\n[Tt]est[Rr]esult*/\n[Bb]uild[Ll]og.*\n\n# NUNIT\n*.VisualState.xml\nTestResult.xml\n\n# Build Results of an ATL Project\n[Dd]ebugPS/\n[Rr]eleasePS/\ndlldata.c\n\n# DNX\nproject.lock.json\nartifacts/\n\n*_i.c\n*_p.c\n*_i.h\n*.ilk\n*.meta\n*.obj\n*.pch\n*.pdb\n*.pgc\n*.pgd\n*.rsp\n*.sbr\n*.tlb\n*.tli\n*.tlh\n*.tmp\n*.tmp_proj\n*.log\n*.vspscc\n*.vssscc\n.builds\n*.pidb\n*.svclog\n*.scc\n\n# Chutzpah Test files\n_Chutzpah*\n\n# Visual C++ cache files\nipch/\n*.aps\n*.ncb\n*.opendb\n*.opensdf\n*.sdf\n*.cachefile\n*.VC.db\n*.VC.VC.opendb\n\n# Visual Studio profiler\n*.psess\n*.vsp\n*.vspx\n*.sap\n\n# TFS 2012 Local Workspace\n$tf/\n\n# Guidance Automation Toolkit\n*.gpState\n\n# ReSharper is a .NET coding add-in\n_ReSharper*/\n*.[Rr]e[Ss]harper\n*.DotSettings.user\n\n# JustCode is a .NET coding add-in\n.JustCode\n\n# TeamCity is a build add-in\n_TeamCity*\n\n# DotCover is a Code Coverage Tool\n*.dotCover\n\n# NCrunch\n_NCrunch_*\n.*crunch*.local.xml\nnCrunchTemp_*\n\n# MightyMoose\n*.mm.*\nAutoTest.Net/\n\n# Web workbench (sass)\n.sass-cache/\n\n# Installshield output folder\n[Ee]xpress/\n\n# DocProject is a documentation generator add-in\nDocProject/buildhelp/\nDocProject/Help/*.HxT\nDocProject/Help/*.HxC\nDocProject/Help/*.hhc\nDocProject/Help/*.hhk\nDocProject/Help/*.hhp\nDocProject/Help/Html2\nDocProject/Help/html\n\n# Click-Once directory\npublish/\n\n# Publish Web Output\n*.[Pp]ublish.xml\n*.azurePubxml\n# TODO: Comment the next line if you want to checkin your web deploy settings\n# but database connection strings (with potential passwords) will be unencrypted\n*.pubxml\n*.publishproj\n\n# Microsoft Azure Web App publish settings. Comment the next line if you want to\n# checkin your Azure Web App publish settings, but sensitive information contained\n# in these scripts will be unencrypted\nPublishScripts/\n\n# NuGet Packages\n*.nupkg\n# The packages folder can be ignored because of Package Restore\n**/packages/*\n# except build/, which is used as an MSBuild target.\n!**/packages/build/\n# Uncomment if necessary however generally it will be regenerated when needed\n#!**/packages/repositories.config\n# NuGet v3's project.json files produces more ignoreable files\n*.nuget.props\n*.nuget.targets\n\n# Microsoft Azure Build Output\ncsx/\n*.build.csdef\n\n# Microsoft Azure Emulator\necf/\nrcf/\n\n# Windows Store app package directories and files\nAppPackages/\nBundleArtifacts/\nPackage.StoreAssociation.xml\n_pkginfo.txt\n\n# Visual Studio cache files\n# files ending in .cache can be ignored\n*.[Cc]ache\n# but keep track of directories ending in .cache\n!*.[Cc]ache/\n\n# Others\nClientBin/\n~$*\n*~\n*.dbmdl\n*.dbproj.schemaview\n*.pfx\n*.publishsettings\nnode_modules/\norleans.codegen.cs\n\n# Since there are multiple workflows, uncomment next line to ignore bower_components\n# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)\n#bower_components/\n\n# RIA/Silverlight projects\nGenerated_Code/\n\n# Backup & report files from converting an old project file\n# to a newer Visual Studio version. Backup files are not needed,\n# because we have git ;-)\n_UpgradeReport_Files/\nBackup*/\nUpgradeLog*.XML\nUpgradeLog*.htm\n\n# SQL Server files\n*.mdf\n*.ldf\n\n# Business Intelligence projects\n*.rdl.data\n*.bim.layout\n*.bim_*.settings\n\n# Microsoft Fakes\nFakesAssemblies/\n\n# GhostDoc plugin setting file\n*.GhostDoc.xml\n\n# Node.js Tools for Visual Studio\n.ntvs_analysis.dat\n\n# Visual Studio 6 build log\n*.plg\n\n# Visual Studio 6 workspace options file\n*.opt\n\n# Visual Studio LightSwitch build output\n**/*.HTMLClient/GeneratedArtifacts\n**/*.DesktopClient/GeneratedArtifacts\n**/*.DesktopClient/ModelManifest.xml\n**/*.Server/GeneratedArtifacts\n**/*.Server/ModelManifest.xml\n_Pvt_Extensions\n\n# Paket dependency manager\n.paket/paket.exe\npaket-files/\n\n# FAKE - F# Make\n.fake/\n\n# JetBrains Rider\n.idea/\n*.sln.iml\n\n# no data\ndata/*\nbackupdata/*\n"
  },
  {
    "path": "CSharpExtractor/CSharpExtractor/.nuget/packages.config",
    "content": "﻿<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<packages>\n  <package id=\"NUnit.ConsoleRunner\" version=\"3.6.0\" />\n  <package id=\"NUnit.Extension.NUnitProjectLoader\" version=\"3.5.0\" />\n  <package id=\"NUnit.Extension.NUnitV2Driver\" version=\"3.6.0\" />\n  <package id=\"NUnit.Extension.NUnitV2ResultWriter\" version=\"3.5.0\" />\n  <package id=\"NUnit.Extension.TeamCityEventListener\" version=\"1.0.2\" />\n  <package id=\"NUnit.Extension.VSProjectLoader\" version=\"3.5.0\" />\n  <package id=\"NUnit3TestAdapter\" version=\"3.7.0\" />\n</packages>"
  },
  {
    "path": "CSharpExtractor/CSharpExtractor/CSharpExtractor.sln",
    "content": "﻿\nMicrosoft Visual Studio Solution File, Format Version 12.00\n# Visual Studio 15\nVisualStudioVersion = 15.0.28307.136\nMinimumVisualStudioVersion = 10.0.40219.1\nProject(\"{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}\") = \"Extractor\", \"Extractor\\Extractor.csproj\", \"{481EDE3F-0ED1-4CB9-814A-63A821022552}\"\nEndProject\nGlobal\n\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\n\t\tDebug|Any CPU = Debug|Any CPU\n\t\tDebug|x64 = Debug|x64\n\t\tDebug|x86 = Debug|x86\n\t\tRelease|Any CPU = Release|Any CPU\n\t\tRelease|x64 = Release|x64\n\t\tRelease|x86 = Release|x86\n\t\tRelease20|Any CPU = Release20|Any CPU\n\t\tRelease20|x64 = Release20|x64\n\t\tRelease20|x86 = Release20|x86\n\tEndGlobalSection\n\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Debug|Any CPU.ActiveCfg = Debug|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Debug|Any CPU.Build.0 = Debug|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Debug|x64.ActiveCfg = Debug|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Debug|x64.Build.0 = Debug|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Debug|x86.ActiveCfg = Debug|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Debug|x86.Build.0 = Debug|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Release|Any CPU.ActiveCfg = Release|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Release|Any CPU.Build.0 = Release|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Release|x64.ActiveCfg = Release|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Release|x64.Build.0 = Release|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Release|x86.ActiveCfg = Release|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Release|x86.Build.0 = Release|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Release20|Any CPU.ActiveCfg = Release|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Release20|Any CPU.Build.0 = Release|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Release20|x64.ActiveCfg = Release|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Release20|x64.Build.0 = Release|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Release20|x86.ActiveCfg = Release|Any CPU\n\t\t{481EDE3F-0ED1-4CB9-814A-63A821022552}.Release20|x86.Build.0 = Release|Any CPU\n\tEndGlobalSection\n\tGlobalSection(SolutionProperties) = preSolution\n\t\tHideSolutionNode = FALSE\n\tEndGlobalSection\n\tGlobalSection(ExtensibilityGlobals) = postSolution\n\t\tSolutionGuid = {13A0DA89-D5D9-4E75-850E-70B9FBE88FF8}\n\tEndGlobalSection\nEndGlobal\n"
  },
  {
    "path": "CSharpExtractor/CSharpExtractor/Extractor/Extractor.cs",
    "content": "﻿using Extractor.Semantics;\nusing Microsoft.CodeAnalysis;\nusing Microsoft.CodeAnalysis.CSharp;\nusing Microsoft.CodeAnalysis.CSharp.Syntax;\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Diagnostics;\n\n\nnamespace Extractor\n{\n    public class Extractor\n    {\n        public const string InternalDelimiter = \"|\";\n        public const string UpTreeChar = InternalDelimiter;\n        public const string DownTreeChar = InternalDelimiter;\n        public const string MethodNameConst = \"METHOD_NAME\";\n        public static SyntaxKind[] ParentTypeToAddChildId = new SyntaxKind[] { SyntaxKind.SimpleAssignmentExpression,\n            SyntaxKind.ElementAccessExpression, SyntaxKind.SimpleMemberAccessExpression, SyntaxKind.InvocationExpression, SyntaxKind.BracketedArgumentList, SyntaxKind.ArgumentList};\n\n        private ICollection<Variable> variables;\n\n        public int LengthLimit { get; set; }\n        public int WidthLimit { get; set; }\n        public string Code { get; set; }\n        public bool ShouldHash { get; set; }\n        public int MaxContexts { get; set; }\n\n        public Extractor(string code, Options opts)\n\t\t{\n            LengthLimit = opts.MaxLength;\n            WidthLimit = opts.MaxWidth;\n            ShouldHash = !opts.NoHash;\n            MaxContexts = opts.MaxContexts;\n            Code = code;\n\t\t}\n\n\n\t\tStringBuilder builder = new StringBuilder();\n\n\t\tprivate string PathNodesToString(PathFinder.Path path)\n\t\t{\n\t\t\tbuilder.Clear();\n            var nodeTypes = path.LeftSide;\n\t\t\tif (nodeTypes.Count() > 0)\n\t\t\t{\n\t\t\t\tbuilder.Append(nodeTypes.First().Kind());\n                if (ParentTypeToAddChildId.Contains(nodeTypes.First().Parent.Kind()))\n                {\n                    builder.Append(GetTruncatedChildId(nodeTypes.First()));\n                }\n                foreach (var n in nodeTypes.Skip(1))\n                {\n                    builder.Append(UpTreeChar).Append(n.Kind());\n                    if (ParentTypeToAddChildId.Contains(n.Parent.Kind()))\n                    {\n                        builder.Append(GetTruncatedChildId(n));\n                    }\n                }\n\t\t\t\tbuilder.Append(UpTreeChar);\n\t\t\t}\n\t\t\tbuilder.Append(path.Ancesstor.Kind());\n            nodeTypes = path.RightSide;\n\t\t\tif (nodeTypes.Count() > 0)\n\t\t\t{\n\t\t\t\tbuilder.Append(DownTreeChar);\n\t\t\t\tbuilder.Append(nodeTypes.First().Kind());\n                if (ParentTypeToAddChildId.Contains(nodeTypes.First().Parent.Kind()))\n                {\n                    builder.Append(GetTruncatedChildId(nodeTypes.First()));\n                }\n                foreach (var n in nodeTypes.Skip(1))\n                {\n                    builder.Append(DownTreeChar).Append(n.Kind());\n                    if (ParentTypeToAddChildId.Contains(n.Parent.Kind()))\n                    {\n                        builder.Append(GetTruncatedChildId(n));\n                    }\n                }\n\t\t\t\t\n\t\t\t}\n\t\t\treturn builder.ToString();\n\t\t}\n\n        private int GetTruncatedChildId(SyntaxNode n)\n        {\n            var parent = n.Parent;\n            int index = parent.ChildNodes().ToList().IndexOf(n);\n            if (index > 3)\n            {\n                index = 3;\n            }\n            return index;\n        }\n\n        private string PathToString(PathFinder.Path path)\n\t\t{\n\t\t\tSyntaxNode ancesstor = path.Ancesstor;\n\t\t\tStringBuilder builder = new StringBuilder();\n\t\t\tbuilder.Append(path.Left.Text).Append(UpTreeChar);\n\t\t\tbuilder.Append(this.PathNodesToString(path));\n\t\t\tbuilder.Append(DownTreeChar).Append(path.Right.Text);\n\t\t\treturn builder.ToString();\n\t\t}\n\n        internal IEnumerable<PathFinder.Path> GetInternalPaths(Tree tree)\n        {\n            var finder = new PathFinder(tree, LengthLimit, WidthLimit);\n\n            var allPairs = Utilities.ReservoirSample(Utilities.WeakConcat(Utilities.Choose2(variables),\n                         variables.Select((arg) => new Tuple<Variable, Variable>(arg, arg))), MaxContexts);\n\n            //iterate over variable-variable pairs\n            foreach (Tuple<Variable, Variable> varPair in allPairs)\n            {\n                bool pathToSelf = varPair.Item1 == varPair.Item2;\n\n                foreach (var rhs in varPair.Item2.Leaves)\n                    foreach (var lhs in varPair.Item1.Leaves)\n    \t\t\t\t{\n                        \n                        if (lhs == rhs)\n    \t\t\t\t\t\tcontinue;\n\n                        PathFinder.Path path = finder.FindPath(lhs, rhs, limited: true);\n\n    \t\t\t\t\tif (path == null)\n    \t\t\t\t\t\tcontinue;\n                            \n                        yield return path;\n    \t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t    private string SplitNameUnlessEmpty(string original)\n\t    {\n\t        var subtokens = Utilities.SplitToSubtokens(original).Where(s => s.Length > 0);\n            String name = String.Join(InternalDelimiter, subtokens);\n\t        if (name.Length == 0)\n\t        {\n\t            name = Utilities.NormalizeName(original);\n\t        }\n\n\t        if (String.IsNullOrWhiteSpace(name))\n\t        {\n\t            name = \"SPACE\";\n\t        }\n\n\t        if (String.IsNullOrEmpty(name))\n\t        {\n\t            name = \"BLANK\";\n\t        }\n            if (original == Extractor.MethodNameConst)\n            {\n                name = original;\n            }\n\t        return name;\n        }\n\n\n\t    static readonly char[] removeFromComments = new char[] {' ', '/', '*', '{', '}'};\n\n        public List<String> Extract()\n\t\t{\n            var tree = new Tree(CSharpSyntaxTree.ParseText(Code).GetRoot());\n\n            IEnumerable<MethodDeclarationSyntax> methods = tree.GetRoot().DescendantNodesAndSelf().OfType<MethodDeclarationSyntax>().ToList();\n\n            List<String> results = new List<string>();\n\n            foreach(var method in methods) {\n\n                String methodName = method.Identifier.ValueText;\n                Tree methodTree = new Tree(method);\n                var subtokensMethodName = Utilities.SplitToSubtokens(methodName);\n                var tokenToVar = new Dictionary<SyntaxToken, Variable>();\n                this.variables = Variable.CreateFromMethod(methodTree).ToArray();\n\n                foreach (var variable in variables)\n                {\n                    foreach (SyntaxToken token in variable.Leaves)\n                    {\n                        tokenToVar[token] = variable;\n                    }\n                }\n\n                List<String> contexts = new List<String>();\n\n                foreach (PathFinder.Path path in GetInternalPaths(methodTree))\n                {\n                    String pathString = SplitNameUnlessEmpty(tokenToVar[path.Left].Name)\n                        + \",\" + MaybeHash(this.PathNodesToString(path))\n                        + \",\" + SplitNameUnlessEmpty(tokenToVar[path.Right].Name);\n\n                    Debug.WriteLine(path.Left.FullSpan+\" \"+tokenToVar[path.Left].Name+ \",\" +this.PathNodesToString(path)+ \",\" + tokenToVar[path.Right].Name+\" \"+path.Right.FullSpan);    \n                    contexts.Add(pathString);\n                }\n\n                var commentNodes = tree.GetRoot().DescendantTrivia().Where(\n                    node => node.IsKind(SyntaxKind.MultiLineCommentTrivia) || node.IsKind(SyntaxKind.SingleLineCommentTrivia) || node.IsKind(SyntaxKind.MultiLineDocumentationCommentTrivia));\n                foreach (SyntaxTrivia trivia in commentNodes)\n                {\n\n                    string commentText = trivia.ToString().Trim(removeFromComments);\n\n                    string normalizedTrivia = SplitNameUnlessEmpty(commentText);\n                    var parts = normalizedTrivia.Split('|');\n                    for (int i = 0; i < Math.Ceiling((double)parts.Length / (double)5); i++)\n                    {\n                        var batch = String.Join(\"|\", parts.Skip(i * 5).Take(5));\n                        contexts.Add(batch + \",\" + \"COMMENT\" + \",\" + batch);\n                    }\n                }\n                results.Add(String.Join(\"|\", subtokensMethodName) + \" \" + String.Join(\" \", contexts));  \n            }\n            return results;\n        }\n\n        private string MaybeHash(string v)\n        {\n            if (this.ShouldHash)\n            {\n                return v.GetHashCode().ToString();\n            } else\n            {\n                return v;\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "CSharpExtractor/CSharpExtractor/Extractor/Extractor.csproj",
    "content": "<Project Sdk=\"Microsoft.NET.Sdk\">\n\n  <PropertyGroup>\n    <OutputType>Exe</OutputType>\n    <TargetFramework>netcoreapp2.2</TargetFramework>\n    <StartupObject>Extractor.Program</StartupObject>\n  </PropertyGroup>\n\n  <ItemGroup>\n    <Compile Remove=\"Temp.cs\" />\n  </ItemGroup>\n\n  <ItemGroup>\n    <None Include=\"Temp.cs\" />\n  </ItemGroup>\n\n  <ItemGroup>\n    <PackageReference Include=\"CommandLineParser\" Version=\"2.3.0\" />\n    <PackageReference Include=\"Microsoft.CodeAnalysis\" Version=\"2.10.0\" />\n  </ItemGroup>\n\n</Project>\n"
  },
  {
    "path": "CSharpExtractor/CSharpExtractor/Extractor/PathFinder.cs",
    "content": "using Microsoft.CodeAnalysis;\nusing Microsoft.CodeAnalysis.CSharp.Syntax;\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\n\nnamespace Extractor\n{\n\n    internal class PathFinder\n\t{\n\t\tinternal class Path\n\t\t{\n\t\t\tpublic SyntaxToken Left { get; }\n\t\t\tpublic List<SyntaxNode> LeftSide { get; }\n\t\t\tpublic SyntaxNode Ancesstor { get; }\n\t\t\tpublic List<SyntaxNode> RightSide { get; }\n\t\t\tpublic SyntaxToken Right { get; }\n\n\t\t\tpublic Path(SyntaxToken left, IEnumerable<SyntaxNode> leftSide, SyntaxNode ancesstor, \n\t\t\t            IEnumerable<SyntaxNode> rightSide, SyntaxToken right)\n\t\t\t{\n\t\t\t\tthis.Left = left;\n\t\t\t\tthis.LeftSide = leftSide.ToList();\n\t\t\t\tthis.Ancesstor = ancesstor;\n\t\t\t\tthis.RightSide = rightSide.ToList();\n\t\t\t\tthis.Right = right;\n\t\t\t}\n\t\t}\n\n\t\tpublic int Length { get; }\n\t\tpublic int Width { get; }\n\n\t\tTree tree;\n\n\t\tpublic PathFinder(Tree tree, int length = 7, int width = 4)\n\t\t{\n\t\t\tif (length < 1 || width < 1)\n\t\t\t\tthrow new ArgumentException(\"Width and Length params must be positive.\");\n\n\t\t\tLength = length;\n\t\t\tWidth = width;\n\t\t\tthis.tree = tree;\n\t\t}\n\n\t\tprivate int GetDepth(SyntaxNode n)\n\t\t{\n            int depth = 0;\n\t\t\twhile(n.Parent != null)\n            {\n                n = n.Parent;\n                depth++;\n            }\n            return depth;\n\t\t}\n\n\t\tpublic SyntaxNode FirstAncestor(SyntaxNode l, SyntaxNode r)\n\t\t{\n\t\t\tif (l.Equals(r))\n\t\t\t\treturn l;\n\n\t\t\tif (GetDepth(l) >= GetDepth(r))\n\t\t\t{\n\t\t\t\tl = l.Parent;\n\t\t\t}\n\t\t\telse\n\t\t\t{\n\t\t\t\tr = r.Parent;\n\t\t\t}\n\t\t\treturn FirstAncestor(l, r);\n\t\t}\n\n\t\tprivate IEnumerable<SyntaxNode> CollectPathToParent(SyntaxNode start, SyntaxNode parent)\n\t\t{\n\t\t\twhile (!start.Equals(parent))\n\t\t\t{\n\t\t\t\tyield return start;\n\t\t\t\tstart = start.Parent;\n\t\t\t}\n\t\t}\n\n\t\tinternal Path FindPath(SyntaxToken l, SyntaxToken r, bool limited = true)\n\t\t{\n\t\t\tSyntaxNode p = FirstAncestor(l.Parent, r.Parent);\n\n\t\t\t// + 2 for the distance of the leafs themselves\n\t\t\tif (GetDepth(r.Parent) + GetDepth(l.Parent) - 2 * GetDepth(p) + 2 > Length)\n\t\t\t{\n\t\t\t\treturn null;\n\t\t\t}\n\n\t\t\tvar leftSide = CollectPathToParent(l.Parent, p);\n\t\t\tvar rightSide = CollectPathToParent(r.Parent, p);\n\t\t\trightSide = rightSide.Reverse();\n\n\t\t\tList<SyntaxNode> widthCheck = p.ChildNodes().ToList();\n\t\t\tif (limited && leftSide.Count() != 0\n\t\t\t    && rightSide.Count() != 0)\n\t\t\t{\n\t\t\t\tint indexOfLeft = widthCheck.IndexOf(leftSide.Last());\n\t\t\t\tint indexOfRight = widthCheck.IndexOf(rightSide.First());\n\t\t\t\tif (Math.Abs(indexOfLeft - indexOfRight) >= Width)\n\t\t\t\t{\n\t\t\t\t\treturn null;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn new Path(l, leftSide, p, rightSide, r);\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "CSharpExtractor/CSharpExtractor/Extractor/Program.cs",
    "content": "﻿using CommandLine;\nusing CommandLine.Text;\nusing System;\nusing System.Collections.Generic;\nusing System.IO;\nusing System.Linq;\n\nnamespace Extractor\n{\n    class Program\n    {\n        static List<String> ExtractSingleFile(string filename, Options opts)\n        {\n            string data = File.ReadAllText(filename);\n            var extractor = new Extractor(data, opts);\n            List<String> result = extractor.Extract();\n\n            return result;\n        }\n\n        static void Main(string[] args)\n        {\n            Options options = new Options();\n            Parser.Default.ParseArguments<Options>(args)\n                .WithParsed(opt => options = opt)\n                .WithNotParsed(errors =>\n                {\n                    Console.WriteLine(errors);\n                    return;\n                });\n\n            string path = options.Path;\n            string[] files;\n            if (Directory.Exists(path))\n            {\n                files = Directory.GetFiles(path, \"*.cs\", SearchOption.AllDirectories);\n            }\n            else\n            {\n                files = new string[] { path };\n            }\n\n            IEnumerable<string> results = null;\n\n            results = files.AsParallel().WithDegreeOfParallelism(options.Threads).SelectMany(filename => ExtractSingleFile(filename, options));\n\n            using (StreamWriter sw = new StreamWriter(options.OFileName, append: true))\n            {\n                foreach (var res in results)\n                {\n                    sw.WriteLine(res);\n                }\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "CSharpExtractor/CSharpExtractor/Extractor/Properties/launchSettings.json",
    "content": "{\n  \"profiles\": {\n    \"Extractor\": {\n      \"commandName\": \"Project\",\n      \"commandLineArgs\": \"--path C:\\\\Users\\\\urial\\\\Source\\\\Repos\\\\CSharpExtractor\\\\CSharpExtractor\\\\Extractor\\\\bin\\\\ --no_hash\"\n    }\n  }\n}"
  },
  {
    "path": "CSharpExtractor/CSharpExtractor/Extractor/Temp.cs",
    "content": "﻿namespace Extractor\n{\n    class Temp\n    {\n        class NestedClass\n        {\n            void fooBar()\n            {\n                a.b = c;\n            }\n        }\n    }\n}\n"
  },
  {
    "path": "CSharpExtractor/CSharpExtractor/Extractor/Tree/Tree.cs",
    "content": "﻿using System;\nusing System.Collections.Generic;\nusing System.IO;\nusing System.Linq;\nusing System.Text;\nusing Microsoft.CodeAnalysis;\nusing Microsoft.CodeAnalysis.CSharp;\nusing Microsoft.CodeAnalysis.CSharp.Syntax;\n\nnamespace Extractor\n{\n    public class Tree\n    {\n        public const string DummyClass = \"IgnoreDummyClass\";\n        public const string DummyMethodName = \"IgnoreDummyMethod\";\n        public const string DummyType = \"IgnoreDummyType\";\n        internal static readonly SyntaxKind[] literals = { SyntaxKind.NumericLiteralToken, SyntaxKind.StringLiteralToken, SyntaxKind.CharacterLiteralToken };\n\n        internal static readonly HashSet<SyntaxKind> identifiers = new HashSet<SyntaxKind>(new SyntaxKind[] { SyntaxKind.IdentifierToken }); //, SyntaxKind.VoidKeyword, SyntaxKind.StringKeyword });\n        internal static readonly HashSet<SyntaxKind> keywords = new HashSet<SyntaxKind>(new SyntaxKind[] { SyntaxKind.RefKeyword, SyntaxKind.OutKeyword, SyntaxKind.ConstKeyword });\n        internal static readonly HashSet<SyntaxKind> declarations = new HashSet<SyntaxKind>(new SyntaxKind[] { SyntaxKind.VariableDeclarator, SyntaxKind.Parameter, SyntaxKind.CatchDeclaration, SyntaxKind.ForEachStatement });\n        internal static readonly HashSet<SyntaxKind> memberAccesses = new HashSet<SyntaxKind>(new SyntaxKind[] { SyntaxKind.SimpleMemberAccessExpression, SyntaxKind.PointerMemberAccessExpression });\n        internal static readonly HashSet<SyntaxKind> scopeEnders = new HashSet<SyntaxKind>(\n            new SyntaxKind[]{ SyntaxKind.Block, SyntaxKind.ForStatement, SyntaxKind.MethodDeclaration,\n            SyntaxKind.ForEachStatement, SyntaxKind.CatchClause, SyntaxKind.SwitchSection, SyntaxKind.UsingStatement });\n\n        internal static readonly HashSet<SyntaxKind> lambdaScopeStarters = new HashSet<SyntaxKind>(\n            new SyntaxKind[]{ SyntaxKind.AnonymousMethodExpression,\n            SyntaxKind.SimpleLambdaExpression, SyntaxKind.ParenthesizedLambdaExpression });\n\n        public static bool IsScopeEnder(SyntaxNode node)\n        {\n            return Tree.scopeEnders.Contains(node.Kind());\n        }\n\n        class TreeBuilderWalker : CSharpSyntaxWalker\n        {\n            Dictionary<SyntaxNode, Node> nodes;\n            HashSet<SyntaxNode> visitedNodes;\n            List<SyntaxNode> Desc;\n            List<SyntaxToken> Tokens;\n            Dictionary<SyntaxToken, Leaf> tokens;\n\n            internal TreeBuilderWalker(Dictionary<SyntaxNode, Node> nodes, Dictionary<SyntaxToken, Leaf> tokens)\n            {\n                visitedNodes = new HashSet<SyntaxNode>();\n                this.nodes = nodes;\n                this.tokens = tokens;\n            }\n\n            public override\n            void Visit(SyntaxNode node)\n            {\n                visitedNodes.Add(node);\n\n                base.Visit(node);\n\n                visitedNodes.Remove(node);\n\n                Desc = new List<SyntaxNode>();\n                Tokens = new List<SyntaxToken>();\n                foreach (var c in node.ChildNodes())\n                {\n                    if (!nodes.ContainsKey(c))\n                    {\n                        continue;\n                    }\n                    Desc.AddRange(nodes[c].Descendents);\n                    Desc.Add(c);\n                    Tokens.AddRange(nodes[c].Leaves);\n                }\n                foreach (var token in node.ChildTokens())\n                {\n                    if (Leaf.IsLeafToken(token))\n                    {\n                        tokens[token] = new Leaf(nodes, token);\n                        Tokens.Add(token);\n                    }\n                }\n\n                Node res = new Node(This: node,\n                                       Ancestors: new HashSet<SyntaxNode>(visitedNodes),\n                                       Descendents: Desc.ToArray(),\n                                       Leaves: Tokens.ToArray(),\n                                       Kind: node.Kind());\n                nodes[node] = res;\n\n            }\n        }\n\n        internal SyntaxNode GetRoot()\n        {\n            return tree;\n        }\n\n        SyntaxNode tree;\n        internal Dictionary<SyntaxNode, Node> nodes = new Dictionary<SyntaxNode, Node>();\n        internal Dictionary<SyntaxToken, Leaf> leaves = new Dictionary<SyntaxToken, Leaf>();\n\n        public Tree(SyntaxNode syntaxTree)\n        {\n            this.tree = syntaxTree;\n\n            /*if (this.tree.ChildNodes().ToList().Count() == 0)\n            {\n                this.tree = CSharpSyntaxTree.ParseText($\"private {DummyType} {DummyMethodName}() {{ {code} }}\");\n            }*/\n            new TreeBuilderWalker(nodes, leaves).Visit(this.tree);\n\n            List<SyntaxTrivia> commentNodes = tree.DescendantTrivia().Where(\n                node => node.IsKind(SyntaxKind.MultiLineCommentTrivia) || node.IsKind(SyntaxKind.SingleLineCommentTrivia)).ToList();\n\n        }\n    }\n\n    public class Node\n    {\n        public Node(SyntaxNode This, HashSet<SyntaxNode> Ancestors, SyntaxNode[] Descendents,\n                    SyntaxToken[] Leaves, SyntaxKind Kind)\n        {\n            this.This = This;\n            this.Ancestors = Ancestors;\n            this.Descendents = Descendents;\n            this.AncestorsAndSelf = new HashSet<SyntaxNode>(Ancestors);\n            this.AncestorsAndSelf.Add(This);\n            this.Leaves = Leaves;\n            this.Depth = Depth;\n            this.Kind = Kind;\n            this.KindName = Kind.ToString();\n        }\n\n        public SyntaxNode This { get; }\n\n        public HashSet<SyntaxNode> Ancestors { get; }\n\n        public HashSet<SyntaxNode> AncestorsAndSelf { get; }\n\n        public SyntaxNode[] Descendents { get; }\n\n        public SyntaxToken[] Leaves { get; }\n\n        public SyntaxKind Kind { get; }\n\n        public string KindName { get; }\n\n        public int Depth { get; }\n\n        public override bool Equals(object obj)\n        {\n            var item = obj as Node;\n\n            if (item == null)\n            {\n                return false;\n            }\n\n            return this.This.Equals(item.This);\n        }\n\n        public override int GetHashCode()\n        {\n            return this.This.GetHashCode();\n        }\n    }\n\n    public class Leaf\n    {\n        internal static bool IsLeafToken(SyntaxToken token)\n        {\n            if (token.Text.Equals(\"var\") && token.IsKind(SyntaxKind.IdentifierToken)\n                && token.Parent.IsKind(SyntaxKind.IdentifierName) && token.Parent.Parent.IsKind(SyntaxKind.VariableDeclaration)\n                && token.Parent.Parent.Parent.IsKind(SyntaxKind.LocalDeclarationStatement))\n            {\n                return false;\n            }\n\n            if (token.ValueText == Tree.DummyMethodName || token.ValueText == Tree.DummyType)\n            {\n                return false;\n            }\n\n            return Tree.identifiers.Contains(token.Kind()) || Tree.literals.Contains(token.Kind()) || token.Parent.Kind() == SyntaxKind.PredefinedType;\n        }\n\n        public SyntaxToken token { get; }\n        public SyntaxKind Kind { get; }\n        public string KindName { get; }\n        public string Text { get; set; }\n        public bool IsConst { get; }\n        public string VariableName { get; }\n\n        public Leaf(Dictionary<SyntaxNode, Node> nodes, SyntaxToken token)\n        {\n            this.token = token;\n            Kind = token.Kind();\n            KindName = Kind.ToString();\n            IsConst = !(Tree.identifiers.Contains(Kind) && Tree.declarations.Contains(token.Parent.Kind()));\n\n            Text = token.ValueText;\n            SyntaxNode node = token.Parent.Parent;\n            SyntaxNode current = token.Parent;\n            VariableName = Text;\n        }\n    }\n\n    public class SyntaxViewer\n    {\n        private string ToDot(SyntaxTree tree)\n        {\n            List<SyntaxNode> nodes = tree.GetRoot().DescendantNodesAndSelf().ToList();\n            SyntaxToken[] tokens = tree.GetRoot().DescendantTokens().ToArray();\n\n            string[] tokenStrings = tokens.Select((arg) => arg.Kind().ToString() + \"-\" + arg.ToString()).ToArray();\n            string[] nodeStrings = nodes.Select((arg) => arg.Kind().ToString()).ToArray();\n\n            Dictionary<string, int> counts = new Dictionary<string, int>();\n            Dictionary<int, string> nodeNames = new Dictionary<int, string>();\n            IEnumerable<string> allItems = nodeStrings.Concat(tokenStrings);\n            int i = 0;\n\n            foreach (string name in allItems)\n            {\n                if (!counts.ContainsKey(name))\n                    counts[name] = 0;\n                counts[name] += 1;\n\n                nodeNames[i] = name + counts[name].ToString();\n                i++;\n            }\n\n            StringBuilder builder = new StringBuilder();\n            builder.AppendLine(\"digraph G {\");\n\n            // vertexes\n            for (i = 0; i < allItems.Count(); i++)\n            {\n                builder.AppendFormat(\"\\\"{0}\\\" ;\\n\", nodeNames[i]);\n            }\n\n            builder.AppendLine();\n\n            // edges\n            for (i = 1; i < nodes.Count(); i++)\n            {\n                builder.AppendFormat(\"\\\"{0}\\\"->\\\"{1}\\\" [];\\n\", nodeNames[nodes.IndexOf(nodes[i].Parent)], nodeNames[i]);\n            }\n\n            for (i = 0; i < tokens.Count(); i++)\n            {\n                builder.AppendFormat(\"\\\"{0}\\\"->\\\"{1}\\\" [];\\n\", nodeNames[nodes.IndexOf(tokens[i].Parent)], nodeNames[i + nodes.Count()]);\n            }\n\n            builder.AppendLine(\"}\");\n            return builder.ToString();\n        }\n\n        public SyntaxViewer(SyntaxTree tree, string path = \"out.ong\")\n        {\n\n            string dotData = ToDot(tree);\n            \n            File.WriteAllText(\"out.dot\", dotData);\n        }\n    }\n}\n"
  },
  {
    "path": "CSharpExtractor/CSharpExtractor/Extractor/Utilities.cs",
    "content": "﻿using CommandLine;\nusing System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing System.Text;\nusing System.Diagnostics;\nusing System.Text.RegularExpressions;\n\nnamespace Extractor\n{\n    public class Options\n    {\n        [Option('t', \"threads\", Default = 1, HelpText = \"How many threads to use <1>\")]\n        public int Threads { get; set; }\n\n        [Option('p', \"path\", Default = \"./data/\", HelpText = \"Where to find code files. <.>\")]\n        public string Path { get; set; }\n\n        [Option('l', \"max_length\", Default = 9, HelpText = \"Max path length\")]\n        public int MaxLength { get; set; }\n\n        [Option('l', \"max_width\", Default = 2, HelpText = \"Max path length\")]\n        public int MaxWidth { get; set; }\n\n        [Option('o', \"ofile_name\", Default = \"test.txt\", HelpText = \"Output file name\")]\n        public String OFileName { get; set; }\n\n        [Option('h', \"no_hash\", Default = true, HelpText = \"When enabled, prints the whole path strings (not hashed)\")]\n        public Boolean NoHash { get; set; }\n\n        [Option('l', \"max_contexts\", Default = 30000, HelpText = \"Max number of path contexts to sample. Affects only very large snippets\")]\n        public int MaxContexts { get; set; }\n    }\n\n    public static class Utilities\n\t{\n\t    public static String[] NumbericLiteralsToKeep = new String[] { \"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"10\" };\n        public static IEnumerable<Tuple<T, T>> Choose2<T>(IEnumerable<T> enumerable)\n\t\t{\n\t\t\tint index = 0;\n\n\t\t\tforeach (var e in enumerable)\n\t\t\t{\n\t\t\t\t++index;\n\t\t\t\tforeach (var t in enumerable.Skip(index))\n\t\t\t\t\tyield return Tuple.Create(e, t);\n\t\t\t}\n\t\t}\n\n        /// <summary>\n        /// Sample uniform randomly numSamples from an enumerable, using reservoir sampling.\n        /// See https://en.wikipedia.org/wiki/Reservoir_sampling\n        /// </summary>\n        /// <typeparam name=\"T\"></typeparam>\n        /// <param name=\"input\"></param>\n        /// <param name=\"numSamples\"></param>\n        /// <returns></returns>\n        public static IEnumerable<TSource> ReservoirSample<TSource>(this IEnumerable<TSource> input, int numSamples)\n        {\n            var rng = new Random();\n            var sampledElements = new List<TSource>(numSamples);\n            int seenElementCount = 0;\n            foreach (var element in input)\n            {\n                seenElementCount++;\n                if (sampledElements.Count < numSamples)\n                {\n                    sampledElements.Add(element);\n                }\n                else\n                {\n                    int position = rng.Next(seenElementCount);\n                    if (position < numSamples)\n                    {\n                        sampledElements[position] = element;\n                    }\n                }\n            }\n            Debug.Assert(sampledElements.Count <= numSamples);\n            return sampledElements;\n        }\n\n\n        public static IEnumerable<T> WeakConcat<T>(IEnumerable<T> enumerable1, IEnumerable<T> enumerable2)\n\t\t{\n\t\t\tforeach (T t in enumerable1)\n\t\t\t\tyield return t;\n\t\t\tforeach (T t in enumerable2)\n\t\t\t\tyield return t;\n\t\t}\n\n        public static IEnumerable<String> SplitToSubtokens(String name)\n        {\n            return Regex.Split(name.Trim(), \"(?<=[a-z])(?=[A-Z])|_|[0-9]|(?<=[A-Z])(?=[A-Z][a-z])|\\\\s+\")\n                .Where(s => s.Length > 0)\n                .Select(s => NormalizeName(s))\n                .Where(s => s.Length > 0);\n        }\n\n        private static Regex Whitespaces = new Regex(@\"\\s\");\n        private static Regex NonAlphabetic = new Regex(\"[^A-Za-z]\");\n\n        public static String NormalizeName(string s)\n        {\n            String partiallyNormalized = s.ToLowerInvariant()\n                .Replace(\"\\\\\\\\n\", String.Empty)\n                .Replace(\"[\\\"',]\", String.Empty);\n\n            partiallyNormalized = Whitespaces.Replace(partiallyNormalized, \"\");\n            partiallyNormalized = Encoding.ASCII.GetString(\n                Encoding.Convert(\n                    Encoding.UTF8,\n                    Encoding.GetEncoding(\n                        Encoding.ASCII.EncodingName,\n                        new EncoderReplacementFallback(string.Empty),\n                        new DecoderExceptionFallback()\n                    ),\n                    Encoding.UTF8.GetBytes(partiallyNormalized)\n                )\n            );\n\n            if (partiallyNormalized.Contains('\\n'))\n            {\n                partiallyNormalized = partiallyNormalized.Replace('\\n', 'N');\n            }\n            if (partiallyNormalized.Contains('\\r'))\n            {\n                partiallyNormalized = partiallyNormalized.Replace('\\r', 'R');\n            }\n            if (partiallyNormalized.Contains(','))\n            {\n                partiallyNormalized = partiallyNormalized.Replace(',', 'C');\n            }\n\n            String completelyNormalized = NonAlphabetic.Replace(partiallyNormalized, String.Empty);\n            if (completelyNormalized.Length == 0)\n            {\n                if (Regex.IsMatch(partiallyNormalized, @\"^\\d+$\"))\n                {\n                    if (NumbericLiteralsToKeep.Contains(partiallyNormalized))\n                    {\n                        return partiallyNormalized;\n                    }\n                    else\n                    {\n                        return \"NUM\";\n                    }\n                }\n\n                return String.Empty;\n            }\n            return completelyNormalized;\n            \n        }\n    }\n}\n"
  },
  {
    "path": "CSharpExtractor/CSharpExtractor/Extractor/Variable.cs",
    "content": "﻿using System;\nusing System.Collections.Generic;\nusing System.Linq;\nusing Microsoft.CodeAnalysis;\nusing Microsoft.CodeAnalysis.CSharp.Syntax;\n\nnamespace Extractor\n{\n\tnamespace Semantics\n\t{\n\t\tpublic class Variable\n\t\t{\n\t\t\tTree tree;\n\n\t\t\tpublic string Name { get; }\n\t\t\tprivate HashSet<SyntaxToken> leaves;\n\t\t\tpublic HashSet<SyntaxToken> Leaves\n\t\t\t{\n\t\t\t\tget\n\t\t\t\t{\n\t\t\t\t\treturn leaves;\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tprivate Nullable<bool> constant;\n\t\t\tpublic bool Const\n\t\t\t{\n\t\t\t\tget\n\t\t\t\t{\n\t\t\t\t\treturn constant.Value;\n\t\t\t\t}\n\t\t\t}\n\n\n\t\t\tprivate Variable(string name, SyntaxToken[] leaves, Tree tree)\n\t\t\t{\n\t\t\t\tthis.tree = tree;\n\t\t\t\tthis.Name = name;\n\t\t\t\tthis.leaves = new HashSet<SyntaxToken>(leaves);\n\n\n\t\t\t\tconstant = true;\n\t\t\t\tforeach (var leaf in leaves)\n\t\t\t\t{\n\t\t\t\t\tif (!tree.leaves[leaf].IsConst)\n\t\t\t\t\t{\n\t\t\t\t\t\tconstant = false;\n\t\t\t\t\t\t// If not constant the it is a decleration token\n\t\t\t\t\t\tbreak;\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tpublic override int GetHashCode()\n\t\t\t{\n\t\t\t\treturn this.Name.GetHashCode();\n\t\t\t}\n\n\t\t\tpublic bool IsLiteral()\n\t\t\t{\n\t\t\t\treturn Tree.literals.Contains(tree.leaves[Leaves.First()].Kind);\n\t\t\t}\n\n            internal static Boolean isMethodName(SyntaxToken token)\n            {\n                return token.Parent.IsKind(Microsoft.CodeAnalysis.CSharp.SyntaxKind.MethodDeclaration) \n                    && token.IsKind(Microsoft.CodeAnalysis.CSharp.SyntaxKind.IdentifierToken);\n            }\n\n\t\t\t// Create a variable for each variable in scope from tokens while splitting identically named but differently scoped vars.\n\t\t\tinternal static IEnumerable<Variable> CreateFromMethod(Tree methodTree)\n\t\t\t{\n\t\t\t    var root = methodTree.nodes[methodTree.GetRoot()];\n\t\t\t\tvar leaves = root.Leaves.ToArray();\n\t\t\t\tDictionary<SyntaxToken, string> tokenToName = new Dictionary<SyntaxToken, string>();\n\t\t\t\tDictionary<string, List<SyntaxToken>> nameToTokens = new Dictionary<string, List<SyntaxToken>>();\n\t\t\t\tforeach (SyntaxToken token in root.Leaves)\n\t\t\t\t{\n\t\t\t\t\tstring name = methodTree.leaves[token].VariableName;\n                    if (isMethodName(token))\n                    {\n                        name = Extractor.MethodNameConst;\n                    }\n                    tokenToName[token] = name;\n\t\t\t\t\tif (!nameToTokens.ContainsKey(name))\n\t\t\t\t\t\tnameToTokens[name] = new List<SyntaxToken>();\n\t\t\t\t\tnameToTokens[name].Add(token);\n\t\t\t\t}\n\n                List<Variable> results = new List<Variable>();\n\n                foreach (SyntaxToken leaf in leaves)\n\t\t\t\t{\n\t\t\t\t\tstring name = tokenToName[leaf];\n\t\t\t\t\tSyntaxToken[] syntaxTokens = nameToTokens[name].ToArray();\n                    var v = new Variable(name, syntaxTokens, methodTree);\n\n                    //check if exists\n                    var matches = results.Where(p => p.Name == name).ToList();\n                    bool alreadyExists = (matches.Count != 0);\n                    if (!alreadyExists)\n                    {\n                        results.Add(v);\n                    }\n                }\n\n                return results;\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "CSharpExtractor/extract.py",
    "content": "#!/usr/bin/python\n\nimport itertools\nimport multiprocessing\nimport os\nimport sys\nimport shutil\nimport subprocess\nfrom threading import Timer\nimport sys\nfrom argparse import ArgumentParser\nfrom subprocess import Popen, PIPE, STDOUT, call\n\n\n\ndef get_immediate_subdirectories(a_dir):\n    return [(os.path.join(a_dir, name)) for name in os.listdir(a_dir)\n            if os.path.isdir(os.path.join(a_dir, name))]\n\n\nTMP_DIR = \"\"\n\ndef ParallelExtractDir(args, dir):\n    ExtractFeaturesForDir(args, dir, \"\")\n\n\ndef ExtractFeaturesForDir(args, dir, prefix):\n    command = ['dotnet', 'run', '--project', args.csproj,\n               '--max_length', str(args.max_path_length), '--max_width', str(args.max_path_width),\n               '--path', dir, '--threads', str(args.num_threads), '--ofile_name', str(args.ofile_name)]\n\n\n    # print command\n    # os.system(command)\n    kill = lambda process: process.kill()\n    sleeper = subprocess.Popen(command, stderr=subprocess.PIPE)\n    timer = Timer(600000, kill, [sleeper])\n\n    try:\n        timer.start()\n        _, stderr = sleeper.communicate()\n    finally:\n        timer.cancel()\n\n    if sleeper.poll() == 0:\n        if len(stderr) > 0:\n            print(sys.stderr, stderr)\n    else:\n        print(sys.stderr, 'dir: ' + str(dir) + ' was not completed in time')\n        failed = True\n        subdirs = get_immediate_subdirectories(dir)\n        for subdir in subdirs:\n            ExtractFeaturesForDir(args, subdir, prefix + dir.split('/')[-1] + '_')\n    if failed:\n        if os.path.exists(str(args.ofile_name)):\n            os.remove(str(args.ofile_name))\n\ndef ExtractFeaturesForDirsList(args, dirs):\n    global TMP_DIR\n    TMP_DIR = \"./tmp/feature_extractor%d/\" % (os.getpid())\n    if os.path.exists(TMP_DIR):\n        shutil.rmtree(TMP_DIR, ignore_errors=True)\n    os.makedirs(TMP_DIR)\n    try:\n        p = multiprocessing.Pool(4)\n        p.starmap(ParallelExtractDir, zip(itertools.repeat(args), dirs))\n        #for dir in dirs:\n        #    ExtractFeaturesForDir(args, dir, '')\n        output_files = os.listdir(TMP_DIR)\n        for f in output_files:\n            os.system(\"cat %s/%s\" % (TMP_DIR, f))\n    finally:\n        shutil.rmtree(TMP_DIR, ignore_errors=True)\n\n\nif __name__ == '__main__':\n\n    parser = ArgumentParser()\n    parser.add_argument(\"-maxlen\", \"--max_path_length\", dest=\"max_path_length\", required=False, default=8)\n    parser.add_argument(\"-maxwidth\", \"--max_path_width\", dest=\"max_path_width\", required=False, default=2)\n    parser.add_argument(\"-threads\", \"--num_threads\", dest=\"num_threads\", required=False, default=64)\n    parser.add_argument(\"--csproj\", dest=\"csproj\", required=True)\n    parser.add_argument(\"-dir\", \"--dir\", dest=\"dir\", required=False)\n    parser.add_argument(\"-ofile_name\", \"--ofile_name\", dest=\"ofile_name\", required=True)\n    args = parser.parse_args()\n\n    if args.dir is not None:\n        subdirs = get_immediate_subdirectories(args.dir)\n        to_extract = subdirs\n        if len(subdirs) == 0:\n            to_extract = [args.dir.rstrip('/')]\n        ExtractFeaturesForDirsList(args, to_extract)\n"
  },
  {
    "path": "Input.java",
    "content": "public String getName() {\n\t\treturn name;\n\t}"
  },
  {
    "path": "JavaExtractor/JPredict/.classpath",
    "content": "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<classpath>\n\t<classpathentry excluding=\"Test.java\" kind=\"src\" output=\"target/classes\" path=\"src/main/java\">\n\t\t<attributes>\n\t\t\t<attribute name=\"optional\" value=\"true\"/>\n\t\t\t<attribute name=\"maven.pomderived\" value=\"true\"/>\n\t\t</attributes>\n\t</classpathentry>\n\t<classpathentry kind=\"con\" path=\"org.eclipse.jdt.launching.JRE_CONTAINER/org.eclipse.jdt.internal.debug.ui.launcher.StandardVMType/JavaSE-1.8\">\n\t\t<attributes>\n\t\t\t<attribute name=\"maven.pomderived\" value=\"true\"/>\n\t\t</attributes>\n\t</classpathentry>\n\t<classpathentry kind=\"con\" path=\"org.eclipse.m2e.MAVEN2_CLASSPATH_CONTAINER\">\n\t\t<attributes>\n\t\t\t<attribute name=\"maven.pomderived\" value=\"true\"/>\n\t\t</attributes>\n\t</classpathentry>\n\t<classpathentry kind=\"con\" path=\"org.eclipse.jdt.junit.JUNIT_CONTAINER/4\"/>\n\t<classpathentry kind=\"src\" output=\"target/test-classes\" path=\"src/test/java\">\n\t\t<attributes>\n\t\t\t<attribute name=\"optional\" value=\"true\"/>\n\t\t\t<attribute name=\"maven.pomderived\" value=\"true\"/>\n\t\t</attributes>\n\t</classpathentry>\n\t<classpathentry kind=\"output\" path=\"target/classes\"/>\n</classpath>\n"
  },
  {
    "path": "JavaExtractor/JPredict/.gitignore",
    "content": "/target/"
  },
  {
    "path": "JavaExtractor/JPredict/src/main/java/JavaExtractor/App.java",
    "content": "package JavaExtractor;\n\nimport JavaExtractor.Common.CommandLineValues;\nimport org.kohsuke.args4j.CmdLineException;\n\nimport java.io.IOException;\nimport java.nio.file.Files;\nimport java.nio.file.Paths;\nimport java.util.LinkedList;\nimport java.util.List;\nimport java.util.concurrent.ExecutionException;\nimport java.util.concurrent.Executors;\nimport java.util.concurrent.Future;\nimport java.util.concurrent.ThreadPoolExecutor;\n\npublic class App {\n    private static CommandLineValues s_CommandLineValues;\n\n    public static void main(String[] args) {\n        try {\n            s_CommandLineValues = new CommandLineValues(args);\n        } catch (CmdLineException e) {\n            e.printStackTrace();\n            return;\n        }\n\n        if (s_CommandLineValues.File != null) {\n            ExtractFeaturesTask extractFeaturesTask = new ExtractFeaturesTask(s_CommandLineValues,\n                    s_CommandLineValues.File.toPath());\n            extractFeaturesTask.processFile();\n        } else if (s_CommandLineValues.Dir != null) {\n            extractDir();\n        }\n    }\n\n    private static void extractDir() {\n        ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(s_CommandLineValues.NumThreads);\n        LinkedList<ExtractFeaturesTask> tasks = new LinkedList<>();\n        try {\n            Files.walk(Paths.get(s_CommandLineValues.Dir)).filter(Files::isRegularFile)\n                    .filter(p -> p.toString().toLowerCase().endsWith(\".java\")).forEach(f -> {\n                ExtractFeaturesTask task = new ExtractFeaturesTask(s_CommandLineValues, f);\n                tasks.add(task);\n            });\n        } catch (IOException e) {\n            e.printStackTrace();\n            return;\n        }\n        List<Future<Void>> tasksResults = null;\n        try {\n            tasksResults = executor.invokeAll(tasks);\n        } catch (InterruptedException e) {\n            e.printStackTrace();\n        } finally {\n            executor.shutdown();\n        }\n        tasksResults.forEach(f -> {\n            try {\n                f.get();\n            } catch (InterruptedException | ExecutionException e) {\n                e.printStackTrace();\n            }\n        });\n    }\n}\n"
  },
  {
    "path": "JavaExtractor/JPredict/src/main/java/JavaExtractor/Common/CommandLineValues.java",
    "content": "package JavaExtractor.Common;\n\nimport org.kohsuke.args4j.CmdLineException;\nimport org.kohsuke.args4j.CmdLineParser;\nimport org.kohsuke.args4j.Option;\n\nimport java.io.File;\n\n/**\n * This class handles the programs arguments.\n */\npublic class CommandLineValues {\n    @Option(name = \"--file\", required = false)\n    public File File = null;\n\n    @Option(name = \"--dir\", required = false, forbids = \"--file\")\n    public String Dir = null;\n\n    @Option(name = \"--max_path_length\", required = true)\n    public int MaxPathLength;\n\n    @Option(name = \"--max_path_width\", required = true)\n    public int MaxPathWidth;\n\n    @Option(name = \"--num_threads\", required = false)\n    public int NumThreads = 64;\n\n    @Option(name = \"--min_code_len\", required = false)\n    public int MinCodeLength = 1;\n\n    @Option(name = \"--max_code_len\", required = false)\n    public int MaxCodeLength = -1;\n\n    @Option(name = \"--max_file_len\", required = false)\n    public int MaxFileLength = -1;\n\n    @Option(name = \"--pretty_print\", required = false)\n    public boolean PrettyPrint = false;\n\n    @Option(name = \"--max_child_id\", required = false)\n    public int MaxChildId = 3;\n\n    @Option(name = \"--json_output\", required = false)\n    public boolean JsonOutput = false;\n\n    public CommandLineValues(String... args) throws CmdLineException {\n        CmdLineParser parser = new CmdLineParser(this);\n        try {\n            parser.parseArgument(args);\n        } catch (CmdLineException e) {\n            System.err.println(e.getMessage());\n            parser.printUsage(System.err);\n            throw e;\n        }\n    }\n\n    public CommandLineValues() {\n\n    }\n}"
  },
  {
    "path": "JavaExtractor/JPredict/src/main/java/JavaExtractor/Common/Common.java",
    "content": "package JavaExtractor.Common;\n\nimport JavaExtractor.FeaturesEntities.Property;\nimport com.github.javaparser.ast.Node;\nimport com.github.javaparser.ast.UserDataKey;\n\nimport java.util.ArrayList;\nimport java.util.stream.Collectors;\nimport java.util.stream.Stream;\n\npublic final class Common {\n    public static final UserDataKey<Property> PropertyKey = new UserDataKey<Property>() {\n    };\n    public static final UserDataKey<Integer> ChildId = new UserDataKey<Integer>() {\n    };\n    public static final String EmptyString = \"\";\n\n    public static final String MethodDeclaration = \"MethodDeclaration\";\n    public static final String NameExpr = \"NameExpr\";\n    public static final String BlankWord = \"BLANK\";\n\n    public static final int c_MaxLabelLength = 50;\n    public static final String methodName = \"METHOD_NAME\";\n    public static final String internalSeparator = \"|\";\n\n    public static String normalizeName(String original, String defaultString) {\n        original = original.toLowerCase().replaceAll(\"\\\\\\\\n\", \"\") // escaped new\n                // lines\n                .replaceAll(\"//s+\", \"\") // whitespaces\n                .replaceAll(\"[\\\"',]\", \"\") // quotes, apostrophies, commas\n                .replaceAll(\"\\\\P{Print}\", \"\"); // unicode weird characters\n        String stripped = original.replaceAll(\"[^A-Za-z]\", \"\");\n        if (stripped.length() == 0) {\n            String carefulStripped = original.replaceAll(\" \", \"_\");\n            if (carefulStripped.length() == 0) {\n                return defaultString;\n            } else {\n                return carefulStripped;\n            }\n        } else {\n            return stripped;\n        }\n    }\n\n    public static boolean isMethod(Node node, String type) {\n        Property parentProperty = node.getParentNode().getUserData(Common.PropertyKey);\n        if (parentProperty == null) {\n            return false;\n        }\n\n        String parentType = parentProperty.getType();\n        return Common.NameExpr.equals(type) && Common.MethodDeclaration.equals(parentType);\n    }\n\n    public static ArrayList<String> splitToSubtokens(String str1) {\n        String str2 = str1.replace(\"|\", \" \");\n        String str3 = str2.trim();\n        return Stream.of(str3.split(\"(?<=[a-z])(?=[A-Z])|_|[0-9]|(?<=[A-Z])(?=[A-Z][a-z])|\\\\s+\"))\n                .filter(s -> s.length() > 0).map(s -> Common.normalizeName(s, Common.EmptyString))\n                .filter(s -> s.length() > 0).collect(Collectors.toCollection(ArrayList::new));\n    }\n}\n"
  },
  {
    "path": "JavaExtractor/JPredict/src/main/java/JavaExtractor/Common/MethodContent.java",
    "content": "package JavaExtractor.Common;\n\nimport com.github.javaparser.ast.Node;\n\nimport java.util.ArrayList;\n\npublic class MethodContent {\n    private final ArrayList<Node> leaves;\n    private final String name;\n\n    private final String content;\n\n    public MethodContent(ArrayList<Node> leaves, String name, String content) {\n        this.leaves = leaves;\n        this.name = name;\n        this.content = content;\n    }\n\n    public ArrayList<Node> getLeaves() {\n        return leaves;\n    }\n\n    public String getName() {\n        return name;\n    }\n\n    public String getContent() {\n        return content;\n    }\n}\n"
  },
  {
    "path": "JavaExtractor/JPredict/src/main/java/JavaExtractor/ExtractFeaturesTask.java",
    "content": "package JavaExtractor;\n\nimport JavaExtractor.Common.CommandLineValues;\nimport JavaExtractor.Common.Common;\nimport JavaExtractor.FeaturesEntities.ProgramFeatures;\nimport org.apache.commons.lang3.StringUtils;\n\nimport java.io.IOException;\nimport java.nio.charset.Charset;\nimport java.nio.file.Files;\nimport java.nio.file.Path;\nimport java.util.ArrayList;\nimport java.util.List;\nimport java.util.concurrent.Callable;\nimport com.google.gson.Gson;\n\nclass ExtractFeaturesTask implements Callable<Void> {\n    private final CommandLineValues commandLineValues;\n    private final Path filePath;\n\n    public ExtractFeaturesTask(CommandLineValues commandLineValues, Path path) {\n        this.commandLineValues = commandLineValues;\n        this.filePath = path;\n    }\n\n    @Override\n    public Void call() {\n        processFile();\n        return null;\n    }\n\n    public void processFile() {\n        ArrayList<ProgramFeatures> features;\n        try {\n            features = extractSingleFile();\n        } catch (IOException e) {\n            e.printStackTrace();\n            return;\n        }\n        if (features == null) {\n            return;\n        }\n\n        String toPrint = featuresToString(features);\n        if (toPrint.length() > 0) {\n            System.out.println(toPrint);\n        }\n    }\n\n    private ArrayList<ProgramFeatures> extractSingleFile() throws IOException {\n        String code;\n\n        if (commandLineValues.MaxFileLength > 0 &&\n                Files.lines(filePath, Charset.defaultCharset()).count() > commandLineValues.MaxFileLength) {\n            return new ArrayList<>();\n        }\n        try {\n            code = new String(Files.readAllBytes(filePath));\n        } catch (IOException e) {\n            e.printStackTrace();\n            code = Common.EmptyString;\n        }\n        FeatureExtractor featureExtractor = new FeatureExtractor(commandLineValues, this.filePath);\n\n        return featureExtractor.extractFeatures(code);\n    }\n\n    public String featuresToString(ArrayList<ProgramFeatures> features) {\n        if (features == null || features.isEmpty()) {\n            return Common.EmptyString;\n        }\n\n        List<String> methodsOutputs = new ArrayList<>();\n\n        for (ProgramFeatures singleMethodFeatures : features) {\n            StringBuilder builder = new StringBuilder();\n\n            String toPrint;\n            if (commandLineValues.JsonOutput) {\n                toPrint = new Gson().toJson(singleMethodFeatures);\n            }\n            else {\n                toPrint = singleMethodFeatures.toString();\n            }\n            if (commandLineValues.PrettyPrint) {\n                toPrint = toPrint.replace(\" \", \"\\n\\t\");\n            }\n            builder.append(toPrint);\n\n\n            methodsOutputs.add(builder.toString());\n\n        }\n        return StringUtils.join(methodsOutputs, \"\\n\");\n    }\n}\n"
  },
  {
    "path": "JavaExtractor/JPredict/src/main/java/JavaExtractor/FeatureExtractor.java",
    "content": "package JavaExtractor;\n\nimport JavaExtractor.Common.CommandLineValues;\nimport JavaExtractor.Common.Common;\nimport JavaExtractor.Common.MethodContent;\nimport JavaExtractor.FeaturesEntities.ProgramFeatures;\nimport JavaExtractor.FeaturesEntities.Property;\nimport JavaExtractor.Visitors.FunctionVisitor;\nimport com.github.javaparser.JavaParser;\nimport com.github.javaparser.ParseProblemException;\nimport com.github.javaparser.ast.CompilationUnit;\nimport com.github.javaparser.ast.Node;\n\nimport java.io.File;\nimport java.nio.file.Path;\nimport java.util.ArrayList;\nimport java.util.HashSet;\nimport java.util.Set;\nimport java.util.StringJoiner;\nimport java.util.stream.Collectors;\nimport java.util.stream.Stream;\n\n@SuppressWarnings(\"StringEquality\")\nclass FeatureExtractor {\n    private final static String upSymbol = \"|\";\n    private final static String downSymbol = \"|\";\n    private static final Set<String> s_ParentTypeToAddChildId = Stream\n            .of(\"AssignExpr\", \"ArrayAccessExpr\", \"FieldAccessExpr\", \"MethodCallExpr\")\n            .collect(Collectors.toCollection(HashSet::new));\n    private final CommandLineValues m_CommandLineValues;\n    private final Path filePath;\n\n    public FeatureExtractor(CommandLineValues commandLineValues, Path filePath) {\n        this.m_CommandLineValues = commandLineValues;\n        this.filePath = filePath;\n    }\n\n    private static ArrayList<Node> getTreeStack(Node node) {\n        ArrayList<Node> upStack = new ArrayList<>();\n        Node current = node;\n        while (current != null) {\n            upStack.add(current);\n            current = current.getParentNode();\n        }\n        return upStack;\n    }\n\n    public ArrayList<ProgramFeatures> extractFeatures(String code) {\n        CompilationUnit m_CompilationUnit = parseFileWithRetries(code);\n        FunctionVisitor functionVisitor = new FunctionVisitor(m_CommandLineValues);\n\n        functionVisitor.visit(m_CompilationUnit, null);\n\n        ArrayList<MethodContent> methods = functionVisitor.getMethodContents();\n\n        return generatePathFeatures(methods);\n    }\n\n    private CompilationUnit parseFileWithRetries(String code) {\n        final String classPrefix = \"public class Test {\";\n        final String classSuffix = \"}\";\n        final String methodPrefix = \"SomeUnknownReturnType f() {\";\n        final String methodSuffix = \"return noSuchReturnValue; }\";\n\n        String content = code;\n        CompilationUnit parsed;\n        try {\n            parsed = JavaParser.parse(content);\n        } catch (ParseProblemException e1) {\n            // Wrap with a class and method\n            try {\n                content = classPrefix + methodPrefix + code + methodSuffix + classSuffix;\n                parsed = JavaParser.parse(content);\n            } catch (ParseProblemException e2) {\n                // Wrap with a class only\n                content = classPrefix + code + classSuffix;\n                parsed = JavaParser.parse(content);\n            }\n        }\n\n        return parsed;\n    }\n\n    private ArrayList<ProgramFeatures> generatePathFeatures(ArrayList<MethodContent> methods) {\n        ArrayList<ProgramFeatures> methodsFeatures = new ArrayList<>();\n        for (MethodContent content : methods) {\n            ProgramFeatures singleMethodFeatures = generatePathFeaturesForFunction(content);\n            if (!singleMethodFeatures.isEmpty()) {\n                methodsFeatures.add(singleMethodFeatures);\n            }\n        }\n        return methodsFeatures;\n    }\n\n    private ProgramFeatures generatePathFeaturesForFunction(MethodContent methodContent) {\n        ArrayList<Node> functionLeaves = methodContent.getLeaves();\n        ProgramFeatures programFeatures = new ProgramFeatures(\n                methodContent.getName(), this.filePath, methodContent.getContent());\n\n        for (int i = 0; i < functionLeaves.size(); i++) {\n            for (int j = i + 1; j < functionLeaves.size(); j++) {\n                String separator = Common.EmptyString;\n\n                String path = generatePath(functionLeaves.get(i), functionLeaves.get(j), separator);\n                if (path != Common.EmptyString) {\n                    Property source = functionLeaves.get(i).getUserData(Common.PropertyKey);\n                    Property target = functionLeaves.get(j).getUserData(Common.PropertyKey);\n                    programFeatures.addFeature(source, path, target);\n                }\n            }\n        }\n        return programFeatures;\n    }\n\n    private String generatePath(Node source, Node target, String separator) {\n\n        StringJoiner stringBuilder = new StringJoiner(separator);\n        ArrayList<Node> sourceStack = getTreeStack(source);\n        ArrayList<Node> targetStack = getTreeStack(target);\n\n        int commonPrefix = 0;\n        int currentSourceAncestorIndex = sourceStack.size() - 1;\n        int currentTargetAncestorIndex = targetStack.size() - 1;\n        while (currentSourceAncestorIndex >= 0 && currentTargetAncestorIndex >= 0\n                && sourceStack.get(currentSourceAncestorIndex) == targetStack.get(currentTargetAncestorIndex)) {\n            commonPrefix++;\n            currentSourceAncestorIndex--;\n            currentTargetAncestorIndex--;\n        }\n\n        int pathLength = sourceStack.size() + targetStack.size() - 2 * commonPrefix;\n        if (pathLength > m_CommandLineValues.MaxPathLength) {\n            return Common.EmptyString;\n        }\n\n        if (currentSourceAncestorIndex >= 0 && currentTargetAncestorIndex >= 0) {\n            int pathWidth = targetStack.get(currentTargetAncestorIndex).getUserData(Common.ChildId)\n                    - sourceStack.get(currentSourceAncestorIndex).getUserData(Common.ChildId);\n            if (pathWidth > m_CommandLineValues.MaxPathWidth) {\n                return Common.EmptyString;\n            }\n        }\n\n        for (int i = 0; i < sourceStack.size() - commonPrefix; i++) {\n            Node currentNode = sourceStack.get(i);\n            String childId = Common.EmptyString;\n            String parentRawType = currentNode.getParentNode().getUserData(Common.PropertyKey).getRawType();\n            if (i == 0 || s_ParentTypeToAddChildId.contains(parentRawType)) {\n                childId = saturateChildId(currentNode.getUserData(Common.ChildId))\n                        .toString();\n            }\n            stringBuilder.add(String.format(\"%s%s%s\",\n                    currentNode.getUserData(Common.PropertyKey).getType(true), childId, upSymbol));\n        }\n\n        Node commonNode = sourceStack.get(sourceStack.size() - commonPrefix);\n        String commonNodeChildId = Common.EmptyString;\n        Property parentNodeProperty = commonNode.getParentNode().getUserData(Common.PropertyKey);\n        String commonNodeParentRawType = Common.EmptyString;\n        if (parentNodeProperty != null) {\n            commonNodeParentRawType = parentNodeProperty.getRawType();\n        }\n        if (s_ParentTypeToAddChildId.contains(commonNodeParentRawType)) {\n            commonNodeChildId = saturateChildId(commonNode.getUserData(Common.ChildId))\n                    .toString();\n        }\n        stringBuilder.add(String.format(\"%s%s\",\n                commonNode.getUserData(Common.PropertyKey).getType(true), commonNodeChildId));\n\n        for (int i = targetStack.size() - commonPrefix - 1; i >= 0; i--) {\n            Node currentNode = targetStack.get(i);\n            String childId = Common.EmptyString;\n            if (i == 0 || s_ParentTypeToAddChildId.contains(currentNode.getUserData(Common.PropertyKey).getRawType())) {\n                childId = saturateChildId(currentNode.getUserData(Common.ChildId))\n                        .toString();\n            }\n            stringBuilder.add(String.format(\"%s%s%s\", downSymbol,\n                    currentNode.getUserData(Common.PropertyKey).getType(true), childId));\n        }\n\n        return stringBuilder.toString();\n    }\n\n    private Integer saturateChildId(int childId) {\n        return Math.min(childId, m_CommandLineValues.MaxChildId);\n    }\n}\n"
  },
  {
    "path": "JavaExtractor/JPredict/src/main/java/JavaExtractor/FeaturesEntities/ProgramFeatures.java",
    "content": "package JavaExtractor.FeaturesEntities;\n\nimport java.nio.file.Path;\nimport java.util.ArrayList;\nimport java.util.stream.Collectors;\n\npublic class ProgramFeatures {\n    String name;\n\n    transient ArrayList<ProgramRelation> features = new ArrayList<>();\n    String textContent;\n\n    String filePath;\n\n    public ProgramFeatures(String name, Path filePath, String textContent) {\n\n        this.name = name;\n        this.filePath = filePath.toAbsolutePath().toString();\n        this.textContent = textContent;\n    }\n\n    @SuppressWarnings(\"StringBufferReplaceableByString\")\n    @Override\n    public String toString() {\n        StringBuilder stringBuilder = new StringBuilder();\n        stringBuilder.append(name).append(\" \");\n        stringBuilder.append(features.stream().map(ProgramRelation::toString).collect(Collectors.joining(\" \")));\n\n        return stringBuilder.toString();\n    }\n\n    public void addFeature(Property source, String path, Property target) {\n        ProgramRelation newRelation = new ProgramRelation(source, target, path);\n        features.add(newRelation);\n    }\n\n    public boolean isEmpty() {\n        return features.isEmpty();\n    }\n}\n"
  },
  {
    "path": "JavaExtractor/JPredict/src/main/java/JavaExtractor/FeaturesEntities/ProgramRelation.java",
    "content": "package JavaExtractor.FeaturesEntities;\n\npublic class ProgramRelation {\n    Property source;\n    Property target;\n    String path;\n\n    public ProgramRelation(Property sourceName, Property targetName, String path) {\n        source = sourceName;\n        target = targetName;\n        this.path = path;\n    }\n\n    public String toString() {\n        return String.format(\"%s,%s,%s\", source.getName(), path,\n                target.getName());\n    }\n}\n"
  },
  {
    "path": "JavaExtractor/JPredict/src/main/java/JavaExtractor/FeaturesEntities/Property.java",
    "content": "package JavaExtractor.FeaturesEntities;\n\nimport JavaExtractor.Common.Common;\nimport com.github.javaparser.ast.Node;\nimport com.github.javaparser.ast.expr.AssignExpr;\nimport com.github.javaparser.ast.expr.BinaryExpr;\nimport com.github.javaparser.ast.expr.IntegerLiteralExpr;\nimport com.github.javaparser.ast.expr.UnaryExpr;\nimport com.github.javaparser.ast.type.ClassOrInterfaceType;\n\nimport java.util.*;\nimport java.util.stream.Collectors;\nimport java.util.stream.Stream;\n\npublic class Property {\n    public static final HashSet<String> NumericalKeepValues = Stream.of(\"0\", \"1\", \"32\", \"64\")\n            .collect(Collectors.toCollection(HashSet::new));\n    private static final Map<String, String> shortTypes = Collections.unmodifiableMap(new HashMap<String, String>() {\n        /**\n         *\n         */\n        private static final long serialVersionUID = 1L;\n\n        {\n            put(\"ArrayAccessExpr\", \"ArAc\");\n            put(\"ArrayBracketPair\", \"ArBr\");\n            put(\"ArrayCreationExpr\", \"ArCr\");\n            put(\"ArrayCreationLevel\", \"ArCrLvl\");\n            put(\"ArrayInitializerExpr\", \"ArIn\");\n            put(\"ArrayType\", \"ArTy\");\n            put(\"AssertStmt\", \"Asrt\");\n            put(\"AssignExpr:and\", \"AsAn\");\n            put(\"AssignExpr:assign\", \"As\");\n            put(\"AssignExpr:lShift\", \"AsLS\");\n            put(\"AssignExpr:minus\", \"AsMi\");\n            put(\"AssignExpr:or\", \"AsOr\");\n            put(\"AssignExpr:plus\", \"AsP\");\n            put(\"AssignExpr:rem\", \"AsRe\");\n            put(\"AssignExpr:rSignedShift\", \"AsRSS\");\n            put(\"AssignExpr:rUnsignedShift\", \"AsRUS\");\n            put(\"AssignExpr:slash\", \"AsSl\");\n            put(\"AssignExpr:star\", \"AsSt\");\n            put(\"AssignExpr:xor\", \"AsX\");\n            put(\"BinaryExpr:and\", \"And\");\n            put(\"BinaryExpr:binAnd\", \"BinAnd\");\n            put(\"BinaryExpr:binOr\", \"BinOr\");\n            put(\"BinaryExpr:divide\", \"Div\");\n            put(\"BinaryExpr:equals\", \"Eq\");\n            put(\"BinaryExpr:greater\", \"Gt\");\n            put(\"BinaryExpr:greaterEquals\", \"Geq\");\n            put(\"BinaryExpr:less\", \"Ls\");\n            put(\"BinaryExpr:lessEquals\", \"Leq\");\n            put(\"BinaryExpr:lShift\", \"LS\");\n            put(\"BinaryExpr:minus\", \"Minus\");\n            put(\"BinaryExpr:notEquals\", \"Neq\");\n            put(\"BinaryExpr:or\", \"Or\");\n            put(\"BinaryExpr:plus\", \"Plus\");\n            put(\"BinaryExpr:remainder\", \"Mod\");\n            put(\"BinaryExpr:rSignedShift\", \"RSS\");\n            put(\"BinaryExpr:rUnsignedShift\", \"RUS\");\n            put(\"BinaryExpr:times\", \"Mul\");\n            put(\"BinaryExpr:xor\", \"Xor\");\n            put(\"BlockStmt\", \"Bk\");\n            put(\"BooleanLiteralExpr\", \"BoolEx\");\n            put(\"CastExpr\", \"Cast\");\n            put(\"CatchClause\", \"Catch\");\n            put(\"CharLiteralExpr\", \"CharEx\");\n            put(\"ClassExpr\", \"ClsEx\");\n            put(\"ClassOrInterfaceDeclaration\", \"ClsD\");\n            put(\"ClassOrInterfaceType\", \"Cls\");\n            put(\"ConditionalExpr\", \"Cond\");\n            put(\"ConstructorDeclaration\", \"Ctor\");\n            put(\"DoStmt\", \"Do\");\n            put(\"DoubleLiteralExpr\", \"Dbl\");\n            put(\"EmptyMemberDeclaration\", \"Emp\");\n            put(\"EnclosedExpr\", \"Enc\");\n            put(\"ExplicitConstructorInvocationStmt\", \"ExpCtor\");\n            put(\"ExpressionStmt\", \"Ex\");\n            put(\"FieldAccessExpr\", \"Fld\");\n            put(\"FieldDeclaration\", \"FldDec\");\n            put(\"ForeachStmt\", \"Foreach\");\n            put(\"ForStmt\", \"For\");\n            put(\"IfStmt\", \"If\");\n            put(\"InitializerDeclaration\", \"Init\");\n            put(\"InstanceOfExpr\", \"InstanceOf\");\n            put(\"IntegerLiteralExpr\", \"IntEx\");\n            put(\"IntegerLiteralMinValueExpr\", \"IntMinEx\");\n            put(\"LabeledStmt\", \"Labeled\");\n            put(\"LambdaExpr\", \"Lambda\");\n            put(\"LongLiteralExpr\", \"LongEx\");\n            put(\"MarkerAnnotationExpr\", \"MarkerExpr\");\n            put(\"MemberValuePair\", \"Mvp\");\n            put(\"MethodCallExpr\", \"Cal\");\n            put(\"MethodDeclaration\", \"Mth\");\n            put(\"MethodReferenceExpr\", \"MethRef\");\n            put(\"NameExpr\", \"Nm\");\n            put(\"NormalAnnotationExpr\", \"NormEx\");\n            put(\"NullLiteralExpr\", \"Null\");\n            put(\"ObjectCreationExpr\", \"ObjEx\");\n            put(\"Parameter\", \"Prm\");\n            put(\"PrimitiveType\", \"Prim\");\n            put(\"QualifiedNameExpr\", \"Qua\");\n            put(\"ReturnStmt\", \"Ret\");\n            put(\"SingleMemberAnnotationExpr\", \"SMEx\");\n            put(\"StringLiteralExpr\", \"StrEx\");\n            put(\"SuperExpr\", \"SupEx\");\n            put(\"SwitchEntryStmt\", \"SwiEnt\");\n            put(\"SwitchStmt\", \"Switch\");\n            put(\"SynchronizedStmt\", \"Sync\");\n            put(\"ThisExpr\", \"This\");\n            put(\"ThrowStmt\", \"Thro\");\n            put(\"TryStmt\", \"Try\");\n            put(\"TypeDeclarationStmt\", \"TypeDec\");\n            put(\"TypeExpr\", \"Type\");\n            put(\"TypeParameter\", \"TypePar\");\n            put(\"UnaryExpr:inverse\", \"Inverse\");\n            put(\"UnaryExpr:negative\", \"Neg\");\n            put(\"UnaryExpr:not\", \"Not\");\n            put(\"UnaryExpr:posDecrement\", \"PosDec\");\n            put(\"UnaryExpr:posIncrement\", \"PosInc\");\n            put(\"UnaryExpr:positive\", \"Pos\");\n            put(\"UnaryExpr:preDecrement\", \"PreDec\");\n            put(\"UnaryExpr:preIncrement\", \"PreInc\");\n            put(\"UnionType\", \"Unio\");\n            put(\"VariableDeclarationExpr\", \"VDE\");\n            put(\"VariableDeclarator\", \"VD\");\n            put(\"VariableDeclaratorId\", \"VDID\");\n            put(\"VoidType\", \"Void\");\n            put(\"WhileStmt\", \"While\");\n            put(\"WildcardType\", \"Wild\");\n        }\n    });\n    private final String RawType;\n    private String Type;\n    private String SplitName;\n\n    public Property(Node node, boolean isLeaf, boolean isGenericParent) {\n        Class<?> nodeClass = node.getClass();\n        RawType = Type = nodeClass.getSimpleName();\n        if (node instanceof ClassOrInterfaceType && ((ClassOrInterfaceType) node).isBoxedType()) {\n            Type = \"PrimitiveType\";\n        }\n        String operator = \"\";\n        if (node instanceof BinaryExpr) {\n            operator = ((BinaryExpr) node).getOperator().toString();\n        } else if (node instanceof UnaryExpr) {\n            operator = ((UnaryExpr) node).getOperator().toString();\n        } else if (node instanceof AssignExpr) {\n            operator = ((AssignExpr) node).getOperator().toString();\n        }\n        if (operator.length() > 0) {\n            Type += \":\" + operator;\n        }\n\n        String nameToSplit = node.toString();\n        if (isGenericParent) {\n            nameToSplit = ((ClassOrInterfaceType) node).getName();\n            if (isLeaf) {\n                // if it is a generic parent which counts as a leaf, then when\n                // it is participating in a path\n                // as a parent, it should be GenericClass and not a simple\n                // ClassOrInterfaceType.\n                Type = \"GenericClass\";\n            }\n        }\n        ArrayList<String> splitNameParts = Common.splitToSubtokens(nameToSplit);\n        SplitName = String.join(Common.internalSeparator, splitNameParts);\n\n        String name = Common.normalizeName(node.toString(), Common.BlankWord);\n        if (name.length() > Common.c_MaxLabelLength) {\n            name = name.substring(0, Common.c_MaxLabelLength);\n        } else if (node instanceof ClassOrInterfaceType && ((ClassOrInterfaceType) node).isBoxedType()) {\n            name = ((ClassOrInterfaceType) node).toUnboxedType().toString();\n        }\n\n        if (Common.isMethod(node, Type)) {\n            name = SplitName = Common.methodName;\n        }\n\n        if (SplitName.length() == 0) {\n            SplitName = name;\n            if (node instanceof IntegerLiteralExpr && !NumericalKeepValues.contains(SplitName)) {\n                // This is a numeric literal, but not in our white list\n                SplitName = \"<NUM>\";\n            }\n        }\n    }\n\n    public String getRawType() {\n        return RawType;\n    }\n\n    public String getType() {\n        return Type;\n    }\n\n    public String getType(boolean shorten) {\n        if (shorten) {\n            return shortTypes.getOrDefault(Type, Type);\n        } else {\n            return Type;\n        }\n    }\n\n    public String getName() {\n        return SplitName;\n    }\n}\n"
  },
  {
    "path": "JavaExtractor/JPredict/src/main/java/JavaExtractor/Visitors/FunctionVisitor.java",
    "content": "package JavaExtractor.Visitors;\n\nimport JavaExtractor.Common.CommandLineValues;\nimport JavaExtractor.Common.Common;\nimport JavaExtractor.Common.MethodContent;\nimport com.github.javaparser.ast.Node;\nimport com.github.javaparser.ast.body.MethodDeclaration;\nimport com.github.javaparser.ast.visitor.VoidVisitorAdapter;\n\nimport java.util.ArrayList;\nimport java.util.Arrays;\n\n@SuppressWarnings(\"StringEquality\")\npublic class FunctionVisitor extends VoidVisitorAdapter<Object> {\n    private final ArrayList<MethodContent> methods = new ArrayList<>();\n    private final CommandLineValues commandLineValues;\n\n    public FunctionVisitor(CommandLineValues commandLineValues) {\n        this.commandLineValues = commandLineValues;\n    }\n\n    @Override\n    public void visit(MethodDeclaration node, Object arg) {\n        visitMethod(node);\n\n        super.visit(node, arg);\n    }\n\n    private void visitMethod(MethodDeclaration node) {\n        LeavesCollectorVisitor leavesCollectorVisitor = new LeavesCollectorVisitor();\n        leavesCollectorVisitor.visitDepthFirst(node);\n        ArrayList<Node> leaves = leavesCollectorVisitor.getLeaves();\n\n        String normalizedMethodName = Common.normalizeName(node.getName(), Common.BlankWord);\n        ArrayList<String> splitNameParts = Common.splitToSubtokens(node.getName());\n        String splitName = normalizedMethodName;\n        if (splitNameParts.size() > 0) {\n            splitName = String.join(Common.internalSeparator, splitNameParts);\n        }\n\n        node.setName(Common.methodName);\n\n        if (node.getBody() != null) {\n            long methodLength = getMethodLength(node.getBody().toString());\n            if (commandLineValues.MaxCodeLength <= 0 ||\n                    (methodLength >= commandLineValues.MinCodeLength && methodLength <= commandLineValues.MaxCodeLength)) {\n                methods.add(new MethodContent(leaves, splitName, node.toString()));\n            }\n        }\n    }\n\n    private long getMethodLength(String code) {\n        String cleanCode = code.replaceAll(\"\\r\\n\", \"\\n\").replaceAll(\"\\t\", \" \");\n        if (cleanCode.startsWith(\"{\\n\"))\n            cleanCode = cleanCode.substring(3).trim();\n        if (cleanCode.endsWith(\"\\n}\"))\n            cleanCode = cleanCode.substring(0, cleanCode.length() - 2).trim();\n        if (cleanCode.length() == 0) {\n            return 0;\n        }\n        return Arrays.stream(cleanCode.split(\"\\n\"))\n                .filter(line -> (line.trim() != \"{\" && line.trim() != \"}\" && line.trim() != \"\"))\n                .filter(line -> !line.trim().startsWith(\"/\") && !line.trim().startsWith(\"*\")).count();\n    }\n\n    public ArrayList<MethodContent> getMethodContents() {\n        return methods;\n    }\n}\n"
  },
  {
    "path": "JavaExtractor/JPredict/src/main/java/JavaExtractor/Visitors/LeavesCollectorVisitor.java",
    "content": "package JavaExtractor.Visitors;\n\nimport JavaExtractor.Common.Common;\nimport JavaExtractor.FeaturesEntities.Property;\nimport com.github.javaparser.ast.Node;\nimport com.github.javaparser.ast.comments.Comment;\nimport com.github.javaparser.ast.expr.NullLiteralExpr;\nimport com.github.javaparser.ast.stmt.Statement;\nimport com.github.javaparser.ast.type.ClassOrInterfaceType;\nimport com.github.javaparser.ast.visitor.TreeVisitor;\n\nimport java.util.ArrayList;\nimport java.util.List;\n\npublic class LeavesCollectorVisitor extends TreeVisitor {\n    private final ArrayList<Node> m_Leaves = new ArrayList<>();\n\n    @Override\n    public void process(Node node) {\n        if (node instanceof Comment) {\n            return;\n        }\n        boolean isLeaf = false;\n        boolean isGenericParent = isGenericParent(node);\n        if (hasNoChildren(node) && isNotComment(node)) {\n            if (!node.toString().isEmpty() && (!\"null\".equals(node.toString()) || (node instanceof NullLiteralExpr))) {\n                m_Leaves.add(node);\n                isLeaf = true;\n            }\n        }\n\n        int childId = getChildId(node);\n        node.setUserData(Common.ChildId, childId);\n        Property property = new Property(node, isLeaf, isGenericParent);\n        node.setUserData(Common.PropertyKey, property);\n    }\n\n    private boolean isGenericParent(Node node) {\n        return (node instanceof ClassOrInterfaceType)\n                && ((ClassOrInterfaceType) node).getTypeArguments() != null\n                && ((ClassOrInterfaceType) node).getTypeArguments().size() > 0;\n    }\n\n    private boolean hasNoChildren(Node node) {\n        return node.getChildrenNodes().size() == 0;\n    }\n\n    private boolean isNotComment(Node node) {\n        return !(node instanceof Comment) && !(node instanceof Statement);\n    }\n\n    public ArrayList<Node> getLeaves() {\n        return m_Leaves;\n    }\n\n    private int getChildId(Node node) {\n        Node parent = node.getParentNode();\n        List<Node> parentsChildren = parent.getChildrenNodes();\n        int childId = 0;\n        for (Node child : parentsChildren) {\n            if (child.getRange().equals(node.getRange())) {\n                return childId;\n            }\n            childId++;\n        }\n        return childId;\n    }\n}\n"
  },
  {
    "path": "JavaExtractor/JPredict/src/main/java/Test.java",
    "content": "class Test {\n    void fooBar() {\n        System.out.println(\"http://github.com\");\n    }\n}"
  },
  {
    "path": "JavaExtractor/extract.py",
    "content": "#!/usr/bin/python\n\nimport itertools\nimport multiprocessing\nimport os\nimport shutil\nimport subprocess\nimport sys\nfrom argparse import ArgumentParser\nfrom threading import Timer\n\n\ndef get_immediate_subdirectories(a_dir):\n    return [(os.path.join(a_dir, name)) for name in os.listdir(a_dir)\n            if os.path.isdir(os.path.join(a_dir, name))]\n\n\nTMP_DIR = \"\"\n\n\ndef ParallelExtractDir(args, dir):\n    ExtractFeaturesForDir(args, dir, \"\")\n\n\ndef ExtractFeaturesForDir(args, dir, prefix):\n    command = ['java', '-Xmx100g', '-XX:MaxNewSize=60g', '-cp', args.jar, 'JavaExtractor.App',\n               '--max_path_length', str(args.max_path_length), '--max_path_width', str(args.max_path_width),\n               '--dir', dir, '--num_threads', str(args.num_threads)]\n\n    # print command\n    # os.system(command)\n    kill = lambda process: process.kill()\n    outputFileName = TMP_DIR + prefix + dir.split('/')[-1]\n    failed = False\n    with open(outputFileName, 'a') as outputFile:\n        sleeper = subprocess.Popen(command, stdout=outputFile, stderr=subprocess.PIPE)\n        timer = Timer(60 * 60, kill, [sleeper])\n\n        try:\n            timer.start()\n            stdout, stderr = sleeper.communicate()\n        finally:\n            timer.cancel()\n\n        if sleeper.poll() == 0:\n            if len(stderr) > 0:\n                print(stderr, file=sys.stderr)\n        else:\n            print('dir: ' + str(dir) + ' was not completed in time', file=sys.stderr)\n            failed = True\n            subdirs = get_immediate_subdirectories(dir)\n            for subdir in subdirs:\n                ExtractFeaturesForDir(args, subdir, prefix + dir.split('/')[-1] + '_')\n    if failed:\n        if os.path.exists(outputFileName):\n            os.remove(outputFileName)\n\n\ndef ExtractFeaturesForDirsList(args, dirs):\n    global TMP_DIR\n    TMP_DIR = \"./tmp/feature_extractor%d/\" % (os.getpid())\n    if os.path.exists(TMP_DIR):\n        shutil.rmtree(TMP_DIR, ignore_errors=True)\n    os.makedirs(TMP_DIR)\n    try:\n        p = multiprocessing.Pool(6)\n        p.starmap(ParallelExtractDir, zip(itertools.repeat(args), dirs))\n        # for dir in dirs:\n        #    ExtractFeaturesForDir(args, dir, '')\n        output_files = os.listdir(TMP_DIR)\n        for f in output_files:\n            os.system(\"cat %s/%s\" % (TMP_DIR, f))\n    finally:\n        shutil.rmtree(TMP_DIR, ignore_errors=True)\n\n\nif __name__ == '__main__':\n    parser = ArgumentParser()\n    parser.add_argument(\"-maxlen\", \"--max_path_length\", dest=\"max_path_length\", required=False, default=8)\n    parser.add_argument(\"-maxwidth\", \"--max_path_width\", dest=\"max_path_width\", required=False, default=2)\n    parser.add_argument(\"-threads\", \"--num_threads\", dest=\"num_threads\", required=False, default=64)\n    parser.add_argument(\"-j\", \"--jar\", dest=\"jar\", required=True)\n    parser.add_argument(\"-dir\", \"--dir\", dest=\"dir\", required=False)\n    parser.add_argument(\"-file\", \"--file\", dest=\"file\", required=False)\n    args = parser.parse_args()\n\n    if args.file is not None:\n        command = 'java -cp ' + args.jar + ' JavaExtractor.App --max_path_length ' + \\\n                  str(args.max_path_length) + ' --max_path_width ' + str(args.max_path_width) + ' --file ' + args.file\n        os.system(command)\n    elif args.dir is not None:\n        subdirs = get_immediate_subdirectories(args.dir)\n        if len(subdirs) == 0:\n            subdirs = [args.dir]\n        ExtractFeaturesForDirsList(args, subdirs)\n"
  },
  {
    "path": "LICENSE",
    "content": "MIT License\n\nCopyright (c) 2019 Technion\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all\ncopies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\nSOFTWARE.\n"
  },
  {
    "path": "Python150kExtractor/README.md",
    "content": "# Python150k dataset\n\n## Steps to reproduce\n\n1. Download parsed python dataset from [here](https://www.sri.inf.ethz.ch/py150\n), unarchive and place under `PYTHON150K_DIR`:\n\n```bash\n# Replace with desired path.\n>>> PYTHON150K_DIR=/path/to/data/dir\n>>> mkdir -p $PYTHON150K_DIR\n>>> cd $PYTHON150K_DIR\n>>> wget http://files.srl.inf.ethz.ch/data/py150.tar.gz\n...\n>>> tar -xzvf py150.tar.gz\n...\n```\n\n2. Extract samples to `DATA_DIR`:\n\n```bash\n# Replace with desired path.\n>>> DATA_DIR=$(pwd)/data/default\n>>> SEED=239\n>>> python extract.py \\\n    --data_dir=$PYTHON150K_DIR \\\n    --output_dir=$DATA_DIR \\\n    --seed=$SEED\n...\n```\n\n3. Preprocess for training:\n\n```bash\n>>> ./preprocess.sh $DATA_DIR\n...\n```\n\n4. Train:\n\n```bash\n>>> cd ..\n>>> DESC=default\n>>> CUDA=0\n>>> ./train_python150k.sh $DATA_DIR $DESC $CUDA $SEED\n...\n```\n\n## Test results (seed=239)\n\n### Best scores\n\n**setup#2**: `batch_size=64`  \n**setup#3**: `embedding_size=256,use_momentum=False`  \n**setup#4**: `batch_size=32,embedding_size=256,embeddings_dropout_keep_prob=0.5,use_momentum=False`\n\n| params | Precision | Recall | F1 | ROUGE-2 | ROUGE-L | \n|---|---|---|---|---|---|\n| default | 0.37 | 0.27 | 0.31 | 0.06 | 0.38 |\n| setup#2 | 0.40 | 0.31 | 0.34 | 0.08 | 0.41 |\n| setup#3 | 0.36 | 0.31 | 0.33 | 0.09 | 0.38 |\n| setup#4 | 0.33 | 0.25 | 0.28 | 0.05 | 0.34 |\n\n### Ablation studies\n\n| params | Precision | Recall | F1 | ROUGE-2 | ROUGE-L | \n|---|---|---|---|---|---|\n| default | 0.37 | 0.27 | 0.31 | 0.06 | 0.38 |\n| no ast nodes (5th epoch) | 0.27 | 0.16 | 0.20 | 0.02 | 0.28 |\n| no token split (4th epoch) | 0.60 | 0.09 | 0.15 | 0.00 | 0.60 |"
  },
  {
    "path": "Python150kExtractor/extract.py",
    "content": "import argparse\nimport re\nimport json\nimport multiprocessing\nimport itertools\nimport tqdm\nimport joblib\nimport numpy as np\n\nfrom pathlib import Path\nfrom sklearn import model_selection as sklearn_model_selection\n\nMETHOD_NAME, NUM = 'METHODNAME', 'NUM'\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--data_dir', required=True, type=str)\nparser.add_argument('--valid_p', type=float, default=0.2)\nparser.add_argument('--max_path_length', type=int, default=8)\nparser.add_argument('--max_path_width', type=int, default=2)\nparser.add_argument('--use_method_name', type=bool, default=True)\nparser.add_argument('--use_nums', type=bool, default=True)\nparser.add_argument('--output_dir', required=True, type=str)\nparser.add_argument('--n_jobs', type=int, default=multiprocessing.cpu_count())\nparser.add_argument('--seed', type=int, default=239)\n\n\ndef __collect_asts(json_file):\n    with open(json_file, 'r', encoding='utf-8') as f:\n        for line in tqdm.tqdm(f):\n            yield line\n\n\ndef __terminals(ast, node_index, args):\n    stack, paths = [], []\n\n    def dfs(v):\n        stack.append(v)\n\n        v_node = ast[v]\n\n        if 'value' in v_node:\n            if v == node_index:  # Top-level func def node.\n                if args.use_method_name:\n                    paths.append((stack.copy(), METHOD_NAME))\n            else:\n                v_type = v_node['type']\n\n                if v_type.startswith('Name'):\n                    paths.append((stack.copy(), v_node['value']))\n                elif args.use_nums and v_type == 'Num':\n                    paths.append((stack.copy(), NUM))\n                else:\n                    pass\n\n        if 'children' in v_node:\n            for child in v_node['children']:\n                dfs(child)\n\n        stack.pop()\n\n    dfs(node_index)\n\n    return paths\n\n\ndef __merge_terminals2_paths(v_path, u_path):\n    s, n, m = 0, len(v_path), len(u_path)\n    while s < min(n, m) and v_path[s] == u_path[s]:\n        s += 1\n\n    prefix = list(reversed(v_path[s:]))\n    lca = v_path[s - 1]\n    suffix = u_path[s:]\n\n    return prefix, lca, suffix\n\n\ndef __raw_tree_paths(ast, node_index, args):\n    tnodes = __terminals(ast, node_index, args)\n\n    tree_paths = []\n    for (v_path, v_value), (u_path, u_value) in itertools.combinations(\n            iterable=tnodes,\n            r=2,\n    ):\n        prefix, lca, suffix = __merge_terminals2_paths(v_path, u_path)\n        if (len(prefix) + 1 + len(suffix) <= args.max_path_length) \\\n                and (abs(len(prefix) - len(suffix)) <= args.max_path_width):\n            path = prefix + [lca] + suffix\n            tree_path = v_value, path, u_value\n            tree_paths.append(tree_path)\n\n    return tree_paths\n\n\ndef __delim_name(name):\n    if name in {METHOD_NAME, NUM}:\n        return name\n\n    def camel_case_split(identifier):\n        matches = re.finditer(\n            '.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)',\n            identifier,\n        )\n        return [m.group(0) for m in matches]\n\n    blocks = []\n    for underscore_block in name.split('_'):\n        blocks.extend(camel_case_split(underscore_block))\n\n    return '|'.join(block.lower() for block in blocks)\n\n\ndef __collect_sample(ast, fd_index, args):\n    root = ast[fd_index]\n    if root['type'] != 'FunctionDef':\n        raise ValueError('Wrong node type.')\n\n    target = root['value']\n\n    tree_paths = __raw_tree_paths(ast, fd_index, args)\n    contexts = []\n    for tree_path in tree_paths:\n        start, connector, finish = tree_path\n\n        start, finish = __delim_name(start), __delim_name(finish)\n        connector = '|'.join(ast[v]['type'] for v in connector)\n\n        context = f'{start},{connector},{finish}'\n        contexts.append(context)\n\n    if len(contexts) == 0:\n        return None\n\n    target = __delim_name(target)\n    context = ' '.join(contexts)\n\n    return f'{target} {context}'\n\n\ndef __collect_samples(ast, args):\n    samples = []\n    for node_index, node in enumerate(ast):\n        if node['type'] == 'FunctionDef':\n            sample = __collect_sample(ast, node_index, args)\n            if sample is not None:\n                samples.append(sample)\n\n    return samples\n\n\ndef __collect_all_and_save(asts, args, output_file):\n    parallel = joblib.Parallel(n_jobs=args.n_jobs)\n    func = joblib.delayed(__collect_samples)\n\n    samples = parallel(func(ast, args) for ast in tqdm.tqdm(asts))\n    samples = list(itertools.chain.from_iterable(samples))\n\n    with open(output_file, 'w') as f:\n        for line_index, line in enumerate(samples):\n            f.write(line + ('' if line_index == len(samples) - 1 else '\\n'))\n\n\ndef main():\n    args = parser.parse_args()\n    np.random.seed(args.seed)\n\n    data_dir = Path(args.data_dir)\n    trains = list(__collect_asts(data_dir / 'python100k_train.json'))\n    evals = list(__collect_asts(data_dir / 'python50k_eval.json'))\n\n    train, valid = sklearn_model_selection.train_test_split(\n        trains,\n        test_size=args.valid_p,\n    )\n    test = evals\n\n    output_dir = Path(args.output_dir)\n    output_dir.mkdir(exist_ok=True)\n    for split_name, split in zip(\n            ('train', 'valid', 'test'),\n            (train, valid, test),\n    ):\n        output_file = output_dir / f'{split_name}_output_file.txt'\n        __collect_all_and_save((json.loads(line) for line in split), args, output_file)\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "Python150kExtractor/preprocess.sh",
    "content": "#!/usr/bin/env bash\n\nMAX_CONTEXTS=200\nMAX_DATA_CONTEXTS=1000\nSUBTOKEN_VOCAB_SIZE=186277\nTARGET_VOCAB_SIZE=26347\n\ndata_dir=${1:-data}\nmkdir -p \"${data_dir}\"\ntrain_data_file=$data_dir/train_output_file.txt\nvalid_data_file=$data_dir/valid_output_file.txt\ntest_data_file=$data_dir/test_output_file.txt\n\necho \"Creating histograms from the training data...\"\ntarget_histogram_file=$data_dir/histo.tgt.c2s\nsource_subtoken_histogram=$data_dir/histo.ori.c2s\nnode_histogram_file=$data_dir/histo.node.c2s\ncut <\"${train_data_file}\" -d' ' -f1 | tr '|' '\\n' | awk '{n[$0]++} END {for (i in n) print i,n[i]}' >\"${target_histogram_file}\"\ncut <\"${train_data_file}\" -d' ' -f2- | tr ' ' '\\n' | cut -d',' -f1,3 | tr ',|' '\\n' | awk '{n[$0]++} END {for (i in n) print i,n[i]}' >\"${source_subtoken_histogram}\"\ncut <\"${train_data_file}\" -d' ' -f2- | tr ' ' '\\n' | cut -d',' -f2 | tr '|' '\\n' | awk '{n[$0]++} END {for (i in n) print i,n[i]}' >\"${node_histogram_file}\"\n\necho \"Preprocessing...\"\npython ../preprocess.py \\\n  --train_data \"${train_data_file}\" \\\n  --val_data \"${valid_data_file}\" \\\n  --test_data \"${test_data_file}\" \\\n  --max_contexts ${MAX_CONTEXTS} \\\n  --max_data_contexts ${MAX_DATA_CONTEXTS} \\\n  --subtoken_vocab_size ${SUBTOKEN_VOCAB_SIZE} \\\n  --target_vocab_size ${TARGET_VOCAB_SIZE} \\\n  --target_histogram \"${target_histogram_file}\" \\\n  --subtoken_histogram \"${source_subtoken_histogram}\" \\\n  --node_histogram \"${node_histogram_file}\" \\\n  --output_name \"${data_dir}\"/\"$(basename \"${data_dir}\")\"\nrm \\\n  \"${target_histogram_file}\" \\\n  \"${source_subtoken_histogram}\" \\\n  \"${node_histogram_file}\"\n"
  },
  {
    "path": "README.md",
    "content": "# code2seq\nThis is an official implementation of the model described in:\n\n[Uri Alon](http://urialon.cswp.cs.technion.ac.il), [Shaked Brody](http://www.cs.technion.ac.il/people/shakedbr/), [Omer Levy](https://levyomer.wordpress.com) and [Eran Yahav](http://www.cs.technion.ac.il/~yahave/), \"code2seq: Generating Sequences from Structured Representations of Code\" [[PDF]](https://openreview.net/pdf?id=H1gKYo09tX)\n\nAppeared in **ICLR'2019** (**poster** available [here](https://urialon.cswp.cs.technion.ac.il/wp-content/uploads/sites/83/2019/05/ICLR19_poster_code2seq.pdf))\n\nAn **online demo** is available at [https://code2seq.org](https://code2seq.org).\n\nThis is a TensorFlow implementation of the network, with Java and C# extractors for preprocessing the input code. \nIt can be easily extended to other languages, \nsince the TensorFlow network is agnostic to the input programming language (see [Extending to other languages](#extending-to-other-languages).\nContributions are welcome.\n\n<center style=\"padding: 40px\"><img width=\"70%\" src=\"https://github.com/tech-srl/code2seq/raw/master/images/network.png\" /></center>\n\n## See also:\n  * **Structural Language Models for Code** (ICML'2020) is a new paper that learns to generate the missing code within a larger code snippet. This is similar to code completion, but is able to predict complex expressions rather than a single token at a time. See [PDF](https://arxiv.org/pdf/1910.00577.pdf), demo at [http://AnyCodeGen.org](http://AnyCodeGen.org).\n  * **Adversarial Examples for Models of Code** is a new paper that shows how to slightly mutate the input code snippet of code2vec and GNNs models (thus, introducing adversarial examples), such that the model (code2vec or GNNs) will output a prediction of our choice. See [PDF](https://arxiv.org/pdf/1910.07517.pdf) (code: soon).\n  * **Neural Reverse Engineering of Stripped Binaries** is a new paper that learns to predict procedure names in stripped binaries, thus use neural networks for reverse engineering. See [PDF](https://arxiv.org/pdf/1902.09122) (code: soon).\n  * **code2vec** (POPL'2019) is our previous model. It can only generate a single label at a time (rather than a sequence as code2seq), but it is much faster to train (because of its simplicity). See [PDF](https://urialon.cswp.cs.technion.ac.il/wp-content/uploads/sites/83/2018/12/code2vec-popl19.pdf), demo at [https://code2vec.org](https://code2vec.org) and [code](https://github.com/tech-srl/code2vec/).\n\n\nTable of Contents\n=================\n  * [Requirements](#requirements)\n  * [Quickstart](#quickstart)\n  * [Configuration](#configuration)\n  * [Releasing a trained mode](#releasing-a-trained-model)\n  * [Extending to other languages](#extending-to-other-languages)\n  * [Datasets](#datasets)\n  * [Baselines](#baselines)\n  * [Citation](#citation)\n\n## Requirements\n  * [python3](https://www.linuxbabe.com/ubuntu/install-python-3-6-ubuntu-16-04-16-10-17-04) \n  * TensorFlow 1.12 ([install](https://www.tensorflow.org/install/install_linux)). To check TensorFlow version:\n> python3 -c 'import tensorflow as tf; print(tf.\\_\\_version\\_\\_)'\n  - For a TensorFlow 2.1 implementation by [@Kolkir](https://github.com/Kolkir/), see: [https://github.com/Kolkir/code2seq](https://github.com/Kolkir/code2seq)\n  * For [creating a new Java dataset](#creating-and-preprocessing-a-new-java-dataset) or [manually examining a trained model](#step-4-manual-examination-of-a-trained-model) (any operation that requires parsing of a new code example): [JDK](https://openjdk.java.net/install/)\n  * For creating a C# dataset: [dotnet-core](https://dotnet.microsoft.com/download) version 2.2 or newer.\n  * `pip install rouge` for computing rouge scores.\n\n## Quickstart\n### Step 0: Cloning this repository\n```\ngit clone https://github.com/tech-srl/code2seq\ncd code2seq\n```\n\n### Step 1: Creating a new dataset from Java sources\nTo obtain a preprocessed dataset to train a network on, you can either download our\npreprocessed dataset, or create a new dataset from Java source files.\n\n#### Download our preprocessed dataset Java-large dataset (~16M examples, compressed: 11G, extracted 125GB)\n```\nmkdir data\ncd data\nwget https://s3.amazonaws.com/code2seq/datasets/java-large-preprocessed.tar.gz\ntar -xvzf java-large-preprocessed.tar.gz\n```\nThis will create a `data/java-large/` sub-directory, containing the files that hold training, test and validation sets,\nand a dict file for various dataset properties.\n\n#### Creating and preprocessing a new Java dataset\nTo create and preprocess a new dataset (for example, to compare code2seq to another model on another dataset):\n  * Edit the file [preprocess.sh](preprocess.sh) using the instructions there, pointing it to the correct training, validation and test directories.\n  * Run the preprocess.sh file:\n> bash preprocess.sh\n\n### Step 2: Training a model\nYou can either download an already trained model, or train a new model using a preprocessed dataset.\n\n#### Downloading a trained model (137 MB)\nWe already trained a model for 52 epochs on the data that was preprocessed in the previous step. This model is the same model that was \n used in the paper and the same model that serves the demo at [code2seq.org](code2seq.org).\n```\nwget https://s3.amazonaws.com/code2seq/model/java-large/java-large-model.tar.gz\ntar -xvzf java-large-model.tar.gz\n```\n\n##### Note:\nThis trained model is in a \"released\" state, which means that we stripped it from its training parameters.\n\n#### Training a model from scratch\nTo train a model from scratch:\n  * Edit the file [train.sh](train.sh) to point it to the right preprocessed data. By default, \n  it points to our \"java-large\" dataset that was preprocessed in the previous step.\n  * Before training, you can edit the configuration hyper-parameters in the file [config.py](config.py),\n  as explained in [Configuration](#configuration).\n  * Run the [train.sh](train.sh) script:\n```\nbash train.sh\n```\n\n### Step 3: Evaluating a trained model\nAfter `config.PATIENCE` iterations of no improvement on the validation set, training stops by itself.\n\nSuppose that iteration #52 is our chosen model, run:\n```\npython3 code2seq.py --load models/java-large-model/model_iter52.release --test data/java-large/java-large.test.c2s\n```\nWhile evaluating, a file named \"log.txt\" is written to the same dir as the saved models, with each test example name and the model's prediction.\n\n### Step 4: Manual examination of a trained model\nTo manually examine a trained model, run:\n```\npython3 code2seq.py --load models/java-large-model/model_iter52.release --predict\n```\nAfter the model loads, follow the instructions and edit the file `Input.java` and enter a Java \nmethod or code snippet, and examine the model's predictions and attention scores.\n\n#### Note: \nDue to TensorFlow's limitations, if using beam search (`config.BEAM_WIDTH > 0`), then `BEAM_WIDTH` hypotheses will be printed, but\nwithout attention weights. If not using beam search (`config.BEAM_WIDTH == 0`), then a single hypothesis will be printed *with \nthe attention weights* in every decoding timestep. \n\n## Configuration\nChanging hyper-parameters is possible by editing the file [config.py](config.py).\n\nHere are some of the parameters and their description:\n#### config.NUM_EPOCHS = 3000\nThe max number of epochs to train the model. \n#### config.SAVE_EVERY_EPOCHS = 1\nThe frequency, in epochs, of saving a model and evaluating on the validation set during training.\n#### config.PATIENCE = 10\nControlling early stopping: how many epochs of no improvement should training continue before stopping.  \n#### config.BATCH_SIZE = 512\nBatch size during training.\n#### config.TEST_BATCH_SIZE = 256\nBatch size during evaluation. Affects only the evaluation speed and memory consumption, does not affect the results.\n#### config.SHUFFLE_BUFFER_SIZE = 10000\nThe buffer size that the reader uses for shuffling the training data. \nControls the randomness of the data. \nIncreasing this value might hurt training throughput. \n#### config.CSV_BUFFER_SIZE = 100 * 1024 * 1024  \nThe buffer size (in bytes) of the CSV dataset reader.\n#### config.MAX_CONTEXTS = 200\nThe number of contexts to sample in each example during training \n(resampling a different subset of this size every training iteration).\n#### config.SUBTOKENS_VOCAB_MAX_SIZE = 190000\nThe max size of the subtoken vocabulary.\n#### config.TARGET_VOCAB_MAX_SIZE = 27000\nThe max size of the target words vocabulary.\n#### config.EMBEDDINGS_SIZE = 128\nEmbedding size for subtokens, AST nodes and target symbols.\n#### config.RNN_SIZE = 128 * 2 \nThe total size of the two LSTMs that are used to embed the paths if `config.BIRNN` is `True`, or the size of the single LSTM if `config.BIRNN` is `False`.\n#### config.DECODER_SIZE = 320\nSize of each LSTM layer in the decoder.\n#### config.NUM_DECODER_LAYERS = 1\nNumber of decoder LSTM layers. Can be increased to support long target sequences.\n#### config.MAX_PATH_LENGTH = 8 + 1\nThe max number of nodes in a path \n#### config.MAX_NAME_PARTS = 5\nThe max number of subtokens in an input token. If the token is longer, only the first subtokens will be read.\n#### config.MAX_TARGET_PARTS = 6\nThe max number of symbols in the target sequence. \nSet to 6 by default for method names, but can be increased for learning datasets with longer sequences.\n### config.BIRNN = True\nIf True, use a bidirectional LSTM to encode each path. If False, use a unidirectional LSTM only. \n#### config.RANDOM_CONTEXTS = True\nWhen True, sample `MAX_CONTEXT` from every example every training iteration. \nWhen False, take the first `MAX_CONTEXTS` only.\n#### config.BEAM_WIDTH = 0\nBeam width in beam search. Inactive when 0. \n#### config.USE_MOMENTUM = True\nIf `True`, use Momentum optimizer with nesterov. If `False`, use Adam \n(Adam converges in fewer epochs; Momentum leads to slightly better results). \n\n## Releasing a trained model\nIf you wish to keep a trained model for inference only (without the ability to continue training it) you can\nrelease the model using:\n```\npython3 code2seq.py --load models/java-large-model/model_iter52 --release\n```\nThis will save a copy of the trained model with the '.release' suffix.\nA \"released\" model usually takes ~3x less disk space.\n\n## Extending to other languages  \n\nThis project currently supports Java and C\\# as the input languages.\n\n_**March 2020** - a code2seq extractor for **C++** based on LLVM was developed by [@Kolkir](https://github.com/Kolkir/) and is available here: [https://github.com/Kolkir/cppminer](https://github.com/Kolkir/cppminer)._\n\n_**January 2020** - a code2seq extractor for Python (specifically targeting the Python150k dataset) was contributed by [@stasbel](https://github.com/stasbel). See: [https://github.com/tech-srl/code2seq/tree/master/Python150kExtractor](https://github.com/tech-srl/code2seq/tree/master/Python150kExtractor)._\n\n_**January 2020** - an extractor for predicting TypeScript type annotations for JavaScript input using code2vec was developed by [@izosak](https://github.com/izosak) and Noa Cohen, and is available here:\n[https://github.com/tech-srl/id2vec](https://github.com/tech-srl/id2vec)._\n\n~~_**June 2019** - an extractor for **C** that is compatible with our model was developed by [CMU SEI team](https://github.com/cmu-sei/code2vec-c)._~~ - removed by CMU SEI team.\n\n_**June 2019** - a code2vec extractor for **Python, Java, C, C++** by JetBrains Research is available here: [PathMiner](https://github.com/JetBrains-Research/astminer)._\n\nTo extend code2seq to other languages other than Java and C#, a new extractor (similar to the [JavaExtractor](JavaExtractor))\nshould be implemented, and be called by [preprocess.sh](preprocess.sh).\nBasically, an extractor should be able to output for each directory containing source files:\n  * A single text file, where each row is an example.\n  * Each example is a space-delimited list of fields, where:\n  1. The first field is the target label, internally delimited by the \"|\" character (for example: `compare|ignore|case`)\n  2. Each of the following field are contexts, where each context has three components separated by commas (\",\"). None of these components can include spaces nor commas.\n  \n  We refer to these three components as a token, a path, and another token, but in general other types of ternary contexts can be considered.  \n  \n  Each \"token\" component is a token in the code, split to subtokens using the \"|\" character.\n  \n  Each path is a path between two tokens, split to path nodes (or other kinds of building blocks) using the \"|\" character.\n  Example for a context:\n  \n`my|key,StringExression|MethodCall|Name,get|value`\n\nHere `my|key` and `get|value` are tokens, and `StringExression|MethodCall|Name` is the syntactic path that connects them. \n\n## Datasets\n### Java\nTo download the Java-small, Java-med and Java-large datasets used in the Code Summarization task as raw `*.java` files, use:\n\n  * [Java-small](https://s3.amazonaws.com/code2seq/datasets/java-small.tar.gz)\n  * [Java-med](https://s3.amazonaws.com/code2seq/datasets/java-med.tar.gz)\n  * [Java-large](https://s3.amazonaws.com/code2seq/datasets/java-large.tar.gz)\n  \nTo download the preprocessed datasets, use:\n  * [Java-small-preprocessed](https://s3.amazonaws.com/code2seq/datasets/java-small-preprocessed.tar.gz)\n  * [Java-med-preprocessed](https://s3.amazonaws.com/code2seq/datasets/java-med-preprocessed.tar.gz)\n  * [Java-large-preprocessed](https://s3.amazonaws.com/code2seq/datasets/java-large-preprocessed.tar.gz)\n\n### C#\nThe C# dataset used in the Code Captioning task can be downloaded from the [CodeNN](https://github.com/sriniiyer/codenn/) repository.\n\n## Baselines\n### Using the trained model\nFor the NMT baselines (BiLSTM, Transformer) we used the implementation of [OpenNMT-py](http://opennmt.net/OpenNMT-py/).\nThe trained BiLSTM model is available here:\n`https://code2seq.s3.amazonaws.com/lstm_baseline/model_acc_62.88_ppl_12.03_e16.pt`\n\nTest+validation sources and targets:\n```\nhttps://code2seq.s3.amazonaws.com/lstm_baseline/test_expected_actual.txt\nhttps://code2seq.s3.amazonaws.com/lstm_baseline/test_source.txt\nhttps://code2seq.s3.amazonaws.com/lstm_baseline/test_target.txt\nhttps://code2seq.s3.amazonaws.com/lstm_baseline/val_source.txt\nhttps://code2seq.s3.amazonaws.com/lstm_baseline/val_target.txt\n```\n\nThe command line for \"translating\" a \"source\" file to a \"target\" is:\n`python3 translate.py -model model_acc_62.88_ppl_12.03_e16.pt -src test_source.txt -output translation_epoch16.txt -gpu 0`\n\nThis results in a `translation_epoch16.txt` which we compare to `test_target.txt` to compute the score.\nThe file `test_expected_actual.txt` is a line-by-line concatenation of the true reference (\"expected\") with the corresponding prediction (the \"actual\").\n\n### Creating data for the baseline\nWe first modified the JavaExtractor (the same one as in this) to locate the methods to train on and print them to a file where each method is a single line. This modification is currently not checked in, but instead of extracting paths, it just prints `node.toString()` and replaces \"\\n\" with space, where `node` is the object holding the AST node of type `MethodDeclaration`.\n\nThen, we tokenized (including sub-tokenization of identifiers, i.e., `\"ArrayList\"-> [\"Array\",\"List\"])` each method body using `javalang`, using [this](baseline_tokenization/subtokenize_nmt_baseline.py) script (which can be run on [this](baseline_tokenization/input_example.txt) input example).\nSo a program of:\n```\nvoid methodName(String fooBar) {\n    System.out.println(\"hello world\");\n}\n```\n\nshould be printed by the modified JavaExtractor as:\n\n```method name|void (String fooBar){ System.out.println(\"hello world\");}```\n\nand the tokenization script would turn it into: \n\n```void ( String foo Bar ) { System . out . println ( \" hello world \" ) ; }```\n\nand the label to be predicted, i.e., \"method name\", into a separate file.\n\nOpenNMT-py can then be trained over these training source and target files.\n\n## Citation \n\n[code2seq: Generating Sequences from Structured Representations of Code](https://arxiv.org/pdf/1808.01400)\n\n```\n@inproceedings{\n    alon2018codeseq,\n    title={code2seq: Generating Sequences from Structured Representations of Code},\n    author={Uri Alon and Shaked Brody and Omer Levy and Eran Yahav},\n    booktitle={International Conference on Learning Representations},\n    year={2019},\n    url={https://openreview.net/forum?id=H1gKYo09tX},\n}\n```\n"
  },
  {
    "path": "__init__.py",
    "content": ""
  },
  {
    "path": "baseline_tokenization/input_example.txt",
    "content": "requires landscape|boolean (){ return false; }\nget parent key|Object (){ return new ContactsUiKey(); }\nget parent key|Object (){ return new ContactsUiKey(); }\nget layout id|int (){ return R.layout.loose_screen; }\nget parent key|Object (){ return new EditContactKey(contactId); }\nto contact|Contact (){ return new Contact(id, name, email); }\nto string|String (){ return \"Welcome!\\nClick to continue.\"; }\nget parent key|Object (){ return new EditContactKey(contactId); }\ntear down services|void (@NonNull Services services){ }\nget layout id|int (){ return R.layout.landscape_screen; }\n"
  },
  {
    "path": "baseline_tokenization/javalang/__init__.py",
    "content": "\nfrom . import parser\nfrom . import parse\nfrom . import tokenizer\nfrom . import javadoc\n\n\n__version__ = \"0.10.1\"\n"
  },
  {
    "path": "baseline_tokenization/javalang/ast.py",
    "content": "import pickle\n\nimport six\n\n\nclass MetaNode(type):\n    def __new__(mcs, name, bases, dict):\n        attrs = list(dict['attrs'])\n        dict['attrs'] = list()\n\n        for base in bases:\n            if hasattr(base, 'attrs'):\n                dict['attrs'].extend(base.attrs)\n\n        dict['attrs'].extend(attrs)\n\n        return type.__new__(mcs, name, bases, dict)\n\n\n@six.add_metaclass(MetaNode)\nclass Node(object):\n    attrs = ()\n\n    def __init__(self, **kwargs):\n        values = kwargs.copy()\n\n        for attr_name in self.attrs:\n            value = values.pop(attr_name, None)\n            setattr(self, attr_name, value)\n\n        if values:\n            raise ValueError('Extraneous arguments')\n\n    def __equals__(self, other):\n        if type(other) is not type(self):\n            return False\n\n        for attr in self.attrs:\n            if getattr(other, attr) != getattr(self, attr):\n                return False\n\n        return True\n\n    def __repr__(self):\n        return type(self).__name__\n\n    def __iter__(self):\n        return walk_tree(self)\n\n    def filter(self, pattern):\n        for path, node in self:\n            if ((isinstance(pattern, type) and isinstance(node, pattern)) or\n                (node == pattern)):\n                yield path, node\n\n    @property\n    def children(self):\n        return [getattr(self, attr_name) for attr_name in self.attrs]\n\ndef walk_tree(root):\n    children = None\n\n    if isinstance(root, Node):\n        yield (), root\n        children = root.children\n    else:\n        children = root\n\n    for child in children:\n        if isinstance(child, (Node, list, tuple)):\n            for path, node in walk_tree(child):\n                yield (root,) + path, node\n\ndef dump(ast, file):\n    pickle.dump(ast, file)\n\ndef load(file):\n    return pickle.load(file)\n"
  },
  {
    "path": "baseline_tokenization/javalang/javadoc.py",
    "content": "\nimport re\n\ndef join(s):\n    return ' '.join(l.strip() for l in s.split('\\n'))\n\nclass DocBlock(object):\n    def __init__(self):\n        self.description = ''\n        self.return_doc = None\n        self.params = []\n\n        self.authors = []\n        self.deprecated = False\n\n        # @exception and @throw are equivalent\n        self.throws = {}\n        self.exceptions = self.throws\n\n        self.tags = {}\n\n    def add_block(self, name, value):\n        value = value.strip()\n\n        if name == 'param':\n            try:\n                param, description = value.split(None, 1)\n            except ValueError:\n                param, description = value, ''\n            self.params.append((param, join(description)))\n\n        elif name in ('throws', 'exception'):\n            try:\n                ex, description = value.split(None, 1)\n            except ValueError:\n                ex, description = value, ''\n            self.throws[ex] = join(description)\n\n        elif name == 'return':\n            self.return_doc = value\n\n        elif name == 'author':\n            self.authors.append(value)\n\n        elif name == 'deprecated':\n            self.deprecated = True\n\n        self.tags.setdefault(name, []).append(value)\n\nblocks_re = re.compile('(^@)', re.MULTILINE)\nleading_space_re = re.compile(r'^\\s*\\*', re.MULTILINE)\nblocks_justify_re = re.compile(r'^\\s*@', re.MULTILINE)\n\ndef _sanitize(s):\n    s = s.strip()\n\n    if not (s[:3] == '/**' and s[-2:] == '*/'):\n        raise ValueError('not a valid Javadoc comment')\n\n    s = s.replace('\\t', '    ')\n\n    return s\n\ndef _uncomment(s):\n    # Remove /** and */\n    s = s[3:-2].strip()\n\n    return leading_space_re.sub('', s)\n\ndef _get_indent_level(s):\n    return len(s) - len(s.lstrip())\n\ndef _left_justify(s):\n    lines = s.rstrip().splitlines()\n\n    if not lines:\n        return ''\n\n    indent_levels = []\n    for line in lines:\n        if line.strip():\n            indent_levels.append(_get_indent_level(line))\n    indent_levels.sort()\n\n    common_indent = indent_levels[0]\n    if common_indent == 0:\n        return s\n    else:\n        lines = [line[common_indent:] for line in lines]\n        return '\\n'.join(lines)\n\ndef _force_blocks_left(s):\n    return blocks_justify_re.sub('@', s)\n\ndef parse(raw):\n    sanitized = _sanitize(raw)\n    uncommented = _uncomment(sanitized)\n    justified = _left_justify(uncommented)\n    justified_fixed = _force_blocks_left(justified)\n    prepared = justified_fixed\n\n    blocks = blocks_re.split(prepared)\n\n    doc = DocBlock()\n\n    if blocks[0] != '@':\n        doc.description = blocks[0].strip()\n        blocks = blocks[2::2]\n    else:\n        blocks = blocks[1::2]\n\n    for block in blocks:\n        try:\n            tag, value = block.split(None, 1)\n        except ValueError:\n            tag, value = block, ''\n\n        doc.add_block(tag, value)\n\n    return doc\n"
  },
  {
    "path": "baseline_tokenization/javalang/parse.py",
    "content": "\nfrom .parser import Parser\nfrom .tokenizer import tokenize\n\ndef parse_expression(exp):\n    if not exp.endswith(';'):\n        exp = exp + ';'\n\n    tokens = tokenize(exp)\n    parser = Parser(tokens)\n\n    return parser.parse_expression()\n\ndef parse_member_signature(sig):\n    if not sig.endswith(';'):\n        sig = sig + ';'\n\n    tokens = tokenize(sig)\n    parser = Parser(tokens)\n\n    return parser.parse_member_declaration()\n\ndef parse_constructor_signature(sig):\n    # Add an empty body to the signature, replacing a ; if necessary\n    if sig.endswith(';'):\n        sig = sig[:-1]\n    sig = sig + '{ }'\n\n    tokens = tokenize(sig)\n    parser = Parser(tokens)\n\n    return parser.parse_member_declaration()\n\ndef parse_type(s):\n    tokens = tokenize(s)\n    parser = Parser(tokens)\n\n    return parser.parse_type()\n\ndef parse_type_signature(sig):\n    if sig.endswith(';'):\n        sig = sig[:-1]\n    sig = sig + '{ }'\n\n    tokens = tokenize(sig)\n    parser = Parser(tokens)\n\n    return parser.parse_class_or_interface_declaration()\n\ndef parse(s):\n    tokens = tokenize(s)\n    parser = Parser(tokens)\n    return parser.parse()\n"
  },
  {
    "path": "baseline_tokenization/javalang/parser.py",
    "content": "import six\n\nfrom . import util\nfrom . import tree\nfrom .tokenizer import (\n    EndOfInput, Keyword, Modifier, BasicType, Identifier,\n    Annotation, Literal, Operator, JavaToken,\n    )\n\nENABLE_DEBUG_SUPPORT = False\n\ndef parse_debug(method):\n    global ENABLE_DEBUG_SUPPORT\n\n    if ENABLE_DEBUG_SUPPORT:\n        def _method(self):\n            if not hasattr(self, 'recursion_depth'):\n                self.recursion_depth = 0\n\n            if self.debug:\n                depth = \"%02d\" % (self.recursion_depth,)\n                token = six.text_type(self.tokens.look())\n                start_value = self.tokens.look().value\n                name = method.__name__\n                sep = (\"-\" * self.recursion_depth)\n                e_message = \"\"\n\n                print(\"%s %s> %s(%s)\" % (depth, sep, name, token))\n\n                self.recursion_depth += 1\n\n                try:\n                    r = method(self)\n\n                except JavaSyntaxError as e:\n                    e_message = e.description\n                    raise\n\n                except Exception as e:\n                    e_message = six.text_type(e)\n                    raise\n\n                finally:\n                    token = six.text_type(self.tokens.last())\n                    print(\"%s <%s %s(%s, %s) %s\" %\n                        (depth, sep, name, start_value, token, e_message))\n                    self.recursion_depth -= 1\n            else:\n                self.recursion_depth += 1\n                try:\n                    r = method(self)\n                finally:\n                    self.recursion_depth -= 1\n\n            return r\n\n        return _method\n\n    else:\n        return method\n\n# ------------------------------------------------------------------------------\n# ---- Parsing exception ----\n\nclass JavaParserBaseException(Exception):\n    def __init__(self, message=''):\n        super(JavaParserBaseException, self).__init__(message)\n\nclass JavaSyntaxError(JavaParserBaseException):\n    def __init__(self, description, at=None):\n        super(JavaSyntaxError, self).__init__()\n\n        self.description = description\n        self.at = at\n\nclass JavaParserError(JavaParserBaseException):\n    pass\n\n# ------------------------------------------------------------------------------\n# ---- Parser class ----\n\nclass Parser(object):\n    operator_precedence = [ set(('||',)),\n                            set(('&&',)),\n                            set(('|',)),\n                            set(('^',)),\n                            set(('&',)),\n                            set(('==', '!=')),\n                            set(('<', '>', '>=', '<=', 'instanceof')),\n                            set(('<<', '>>', '>>>')),\n                            set(('+', '-')),\n                            set(('*', '/', '%')) ]\n\n    def __init__(self, tokens):\n        self.tokens = util.LookAheadListIterator(tokens)\n        self.tokens.set_default(EndOfInput(None))\n\n        self.debug = False\n\n# ------------------------------------------------------------------------------\n# ---- Debug control ----\n\n    def set_debug(self, debug=True):\n        self.debug = debug\n\n# ------------------------------------------------------------------------------\n# ---- Parsing entry point ----\n\n    def parse(self):\n        return self.parse_compilation_unit()\n\n# ------------------------------------------------------------------------------\n# ---- Helper methods ----\n\n    def illegal(self, description, at=None):\n        if not at:\n            at = self.tokens.look()\n\n        raise JavaSyntaxError(description, at)\n\n    def accept(self, *accepts):\n        last = None\n\n        if len(accepts) == 0:\n            raise JavaParserError(\"Missing acceptable values\")\n\n        for accept in accepts:\n            token = next(self.tokens)\n            if isinstance(accept, six.string_types) and (\n                    not token.value == accept):\n                self.illegal(\"Expected '%s'\" % (accept,))\n            elif isinstance(accept, type) and not isinstance(token, accept):\n                self.illegal(\"Expected %s\" % (accept.__name__,))\n\n            last = token\n\n        return last.value\n\n    def would_accept(self, *accepts):\n        if len(accepts) == 0:\n            raise JavaParserError(\"Missing acceptable values\")\n\n        for i, accept in enumerate(accepts):\n            token = self.tokens.look(i)\n\n            if isinstance(accept, six.string_types) and (\n                    not token.value == accept):\n                return False\n            elif isinstance(accept, type) and not isinstance(token, accept):\n                return False\n\n        return True\n\n    def try_accept(self, *accepts):\n        if len(accepts) == 0:\n            raise JavaParserError(\"Missing acceptable values\")\n\n        for i, accept in enumerate(accepts):\n            token = self.tokens.look(i)\n\n            if isinstance(accept, six.string_types) and (\n                    not token.value == accept):\n                return False\n            elif isinstance(accept, type) and not isinstance(token, accept):\n                return False\n\n        for i in range(0, len(accepts)):\n            next(self.tokens)\n\n        return True\n\n    def build_binary_operation(self, parts, start_level=0):\n        if len(parts) == 1:\n            return parts[0]\n\n        operands = list()\n        operators = list()\n\n        i = 0\n\n        for level in range(start_level, len(self.operator_precedence)):\n            for j in range(1, len(parts) - 1, 2):\n                if parts[j] in self.operator_precedence[level]:\n                    operand = self.build_binary_operation(parts[i:j], level + 1)\n                    operator = parts[j]\n                    i = j + 1\n\n                    operands.append(operand)\n                    operators.append(operator)\n\n            if operands:\n                break\n\n        operand = self.build_binary_operation(parts[i:], level + 1)\n        operands.append(operand)\n\n        operation = operands[0]\n\n        for operator, operandr in zip(operators, operands[1:]):\n            operation = tree.BinaryOperation(operandl=operation)\n            operation.operator = operator\n            operation.operandr = operandr\n\n        return operation\n\n    def is_annotation(self, i=0):\n        \"\"\" Returns true if the position is the start of an annotation application\n        (as opposed to an annotation declaration)\n\n        \"\"\"\n\n        return (isinstance(self.tokens.look(i), Annotation)\n                and not self.tokens.look(i + 1).value == 'interface')\n\n    def is_annotation_declaration(self, i=0):\n        \"\"\" Returns true if the position is the start of an annotation application\n        (as opposed to an annotation declaration)\n\n        \"\"\"\n\n        return (isinstance(self.tokens.look(i), Annotation)\n                and self.tokens.look(i + 1).value == 'interface')\n\n# ------------------------------------------------------------------------------\n# ---- Parsing methods ----\n\n# ------------------------------------------------------------------------------\n# -- Identifiers --\n\n    @parse_debug\n    def parse_identifier(self):\n        return self.accept(Identifier)\n\n    @parse_debug\n    def parse_qualified_identifier(self):\n        qualified_identifier = list()\n\n        while True:\n            identifier = self.parse_identifier()\n            qualified_identifier.append(identifier)\n\n            if not self.try_accept('.'):\n                break\n\n        return '.'.join(qualified_identifier)\n\n    @parse_debug\n    def parse_qualified_identifier_list(self):\n        qualified_identifiers = list()\n\n        while True:\n            qualified_identifier = self.parse_qualified_identifier()\n            qualified_identifiers.append(qualified_identifier)\n\n            if not self.try_accept(','):\n                break\n\n        return qualified_identifiers\n\n# ------------------------------------------------------------------------------\n# -- Top level units --\n\n    @parse_debug\n    def parse_compilation_unit(self):\n        package = None\n        package_annotations = None\n        javadoc = None\n        import_declarations = list()\n        type_declarations = list()\n\n        self.tokens.push_marker()\n        next_token = self.tokens.look()\n        if next_token:\n            javadoc = next_token.javadoc\n\n        if self.is_annotation():\n            package_annotations = self.parse_annotations()\n\n        if self.try_accept('package'):\n            self.tokens.pop_marker(False)\n            package_name = self.parse_qualified_identifier()\n            package = tree.PackageDeclaration(annotations=package_annotations,\n                                              name=package_name,\n                                              documentation=javadoc)\n            self.accept(';')\n        else:\n            self.tokens.pop_marker(True)\n            package_annotations = None\n\n        while self.would_accept('import'):\n            import_declaration = self.parse_import_declaration()\n            import_declarations.append(import_declaration)\n\n        while not isinstance(self.tokens.look(), EndOfInput):\n            try:\n                type_declaration = self.parse_type_declaration()\n            except StopIteration:\n                self.illegal(\"Unexpected end of input\")\n\n            if type_declaration:\n                type_declarations.append(type_declaration)\n\n        return tree.CompilationUnit(package=package,\n                                    imports=import_declarations,\n                                    types=type_declarations)\n\n    @parse_debug\n    def parse_import_declaration(self):\n        qualified_identifier = list()\n        static = False\n        import_all = False\n\n        self.accept('import')\n\n        if self.try_accept('static'):\n            static = True\n\n        while True:\n            identifier = self.parse_identifier()\n            qualified_identifier.append(identifier)\n\n            if self.try_accept('.'):\n                if self.try_accept('*'):\n                    self.accept(';')\n                    import_all = True\n                    break\n\n            else:\n                self.accept(';')\n                break\n\n        return tree.Import(path='.'.join(qualified_identifier),\n                           static=static,\n                           wildcard=import_all)\n\n    @parse_debug\n    def parse_type_declaration(self):\n        if self.try_accept(';'):\n            return None\n        else:\n            return self.parse_class_or_interface_declaration()\n\n    @parse_debug\n    def parse_class_or_interface_declaration(self):\n        modifiers, annotations, javadoc = self.parse_modifiers()\n        type_declaration = None\n\n        token = self.tokens.look()\n        if token.value == 'class':\n            type_declaration = self.parse_normal_class_declaration()\n        elif token.value == 'enum':\n            type_declaration = self.parse_enum_declaration()\n        elif token.value == 'interface':\n            type_declaration = self.parse_normal_interface_declaration()\n        elif self.is_annotation_declaration():\n            type_declaration = self.parse_annotation_type_declaration()\n        else:\n            self.illegal(\"Expected type declaration\")\n\n        type_declaration.modifiers = modifiers\n        type_declaration.annotations = annotations\n        type_declaration.documentation = javadoc\n\n        return type_declaration\n\n    @parse_debug\n    def parse_normal_class_declaration(self):\n        name = None\n        type_params = None\n        extends = None\n        implements = None\n        body = None\n\n        self.accept('class')\n\n        name = self.parse_identifier()\n\n        if self.would_accept('<'):\n            type_params = self.parse_type_parameters()\n\n        if self.try_accept('extends'):\n            extends = self.parse_type()\n\n        if self.try_accept('implements'):\n            implements = self.parse_type_list()\n\n        body = self.parse_class_body()\n\n        return tree.ClassDeclaration(name=name,\n                                     type_parameters=type_params,\n                                     extends=extends,\n                                     implements=implements,\n                                     body=body)\n\n    @parse_debug\n    def parse_enum_declaration(self):\n        name = None\n        implements = None\n        body = None\n\n        self.accept('enum')\n        name = self.parse_identifier()\n\n        if self.try_accept('implements'):\n            implements = self.parse_type_list()\n\n        body = self.parse_enum_body()\n\n        return tree.EnumDeclaration(name=name,\n                                    implements=implements,\n                                    body=body)\n\n    @parse_debug\n    def parse_normal_interface_declaration(self):\n        name = None\n        type_parameters = None\n        extends = None\n        body = None\n\n        self.accept('interface')\n        name = self.parse_identifier()\n\n        if self.would_accept('<'):\n            type_parameters = self.parse_type_parameters()\n\n        if self.try_accept('extends'):\n            extends = self.parse_type_list()\n\n        body = self.parse_interface_body()\n\n        return tree.InterfaceDeclaration(name=name,\n                                         type_parameters=type_parameters,\n                                         extends=extends,\n                                         body=body)\n\n    @parse_debug\n    def parse_annotation_type_declaration(self):\n        name = None\n        body = None\n\n        self.accept('@', 'interface')\n\n        name = self.parse_identifier()\n        body = self.parse_annotation_type_body()\n\n        return tree.AnnotationDeclaration(name=name,\n                                          body=body)\n\n# ------------------------------------------------------------------------------\n# -- Types --\n\n    @parse_debug\n    def parse_type(self):\n        java_type = None\n\n        if isinstance(self.tokens.look(), BasicType):\n            java_type = self.parse_basic_type()\n        elif isinstance(self.tokens.look(), Identifier):\n            java_type = self.parse_reference_type()\n        else:\n            self.illegal(\"Expected type\")\n\n        java_type.dimensions = self.parse_array_dimension()\n\n        return java_type\n\n    @parse_debug\n    def parse_basic_type(self):\n        return tree.BasicType(name=self.accept(BasicType))\n\n    @parse_debug\n    def parse_reference_type(self):\n        reference_type = tree.ReferenceType()\n        tail = reference_type\n\n        while True:\n            tail.name = self.parse_identifier()\n\n            if self.would_accept('<'):\n                tail.arguments = self.parse_type_arguments()\n\n            if self.try_accept('.'):\n                tail.sub_type = tree.ReferenceType()\n                tail = tail.sub_type\n            else:\n                break\n\n        return reference_type\n\n    @parse_debug\n    def parse_type_arguments(self):\n        type_arguments = list()\n\n        self.accept('<')\n\n        while True:\n            type_argument = self.parse_type_argument()\n            type_arguments.append(type_argument)\n\n            if self.try_accept('>'):\n                break\n\n            self.accept(',')\n\n        return type_arguments\n\n    @parse_debug\n    def parse_type_argument(self):\n        pattern_type = None\n        base_type = None\n\n        if self.try_accept('?'):\n            if self.tokens.look().value in ('extends', 'super'):\n                pattern_type = self.tokens.next().value\n            else:\n                return tree.TypeArgument(pattern_type='?')\n\n        if self.would_accept(BasicType):\n            base_type = self.parse_basic_type()\n            self.accept('[', ']')\n            base_type.dimensions = [None]\n        else:\n            base_type = self.parse_reference_type()\n            base_type.dimensions = []\n\n        base_type.dimensions += self.parse_array_dimension()\n\n        return tree.TypeArgument(type=base_type,\n                                 pattern_type=pattern_type)\n\n    @parse_debug\n    def parse_nonwildcard_type_arguments(self):\n        self.accept('<')\n        type_arguments = self.parse_type_list()\n        self.accept('>')\n\n        return [tree.TypeArgument(type=t) for t in type_arguments]\n\n    @parse_debug\n    def parse_type_list(self):\n        types = list()\n\n        while True:\n            if self.would_accept(BasicType):\n                base_type = self.parse_basic_type()\n                self.accept('[', ']')\n                base_type.dimensions = [None]\n            else:\n                base_type = self.parse_reference_type()\n                base_type.dimensions = []\n\n            base_type.dimensions += self.parse_array_dimension()\n            types.append(base_type)\n\n            if not self.try_accept(','):\n                break\n\n        return types\n\n    @parse_debug\n    def parse_type_arguments_or_diamond(self):\n        if self.try_accept('<', '>'):\n            return list()\n        else:\n            return self.parse_type_arguments()\n\n    @parse_debug\n    def parse_nonwildcard_type_arguments_or_diamond(self):\n        if self.try_accept('<', '>'):\n            return list()\n        else:\n            return self.parse_nonwildcard_type_arguments()\n\n    @parse_debug\n    def parse_type_parameters(self):\n        type_parameters = list()\n\n        self.accept('<')\n\n        while True:\n            type_parameter = self.parse_type_parameter()\n            type_parameters.append(type_parameter)\n\n            if self.try_accept('>'):\n                break\n            else:\n                self.accept(',')\n\n        return type_parameters\n\n    @parse_debug\n    def parse_type_parameter(self):\n        identifier = self.parse_identifier()\n        extends = None\n\n        if self.try_accept('extends'):\n            extends = list()\n\n            while True:\n                reference_type = self.parse_reference_type()\n                extends.append(reference_type)\n\n                if not self.try_accept('&'):\n                    break\n\n        return tree.TypeParameter(name=identifier,\n                                  extends=extends)\n\n    @parse_debug\n    def parse_array_dimension(self):\n        array_dimension = 0\n\n        while self.try_accept('[', ']'):\n            array_dimension += 1\n\n        return [None] * array_dimension\n\n# ------------------------------------------------------------------------------\n# -- Annotations and modifiers --\n\n    @parse_debug\n    def parse_modifiers(self):\n        annotations = list()\n        modifiers = set()\n        javadoc = None\n\n        next_token = self.tokens.look()\n        if next_token:\n            javadoc = next_token.javadoc\n\n        while True:\n            if self.would_accept(Modifier):\n                modifiers.add(self.accept(Modifier))\n\n            elif self.is_annotation():\n                annotation = self.parse_annotation()\n                annotations.append(annotation)\n\n            else:\n                break\n\n        return (modifiers, annotations, javadoc)\n\n    @parse_debug\n    def parse_annotations(self):\n        annotations = list()\n\n        while True:\n            annotation = self.parse_annotation()\n            annotations.append(annotation)\n\n            if not self.is_annotation():\n                break\n\n        return annotations\n\n    @parse_debug\n    def parse_annotation(self):\n        qualified_identifier = None\n        annotation_element = None\n\n        self.accept('@')\n        qualified_identifier = self.parse_qualified_identifier()\n\n        if self.try_accept('('):\n            if not self.would_accept(')'):\n                annotation_element = self.parse_annotation_element()\n            self.accept(')')\n\n        return tree.Annotation(name=qualified_identifier,\n                               element=annotation_element)\n\n    @parse_debug\n    def parse_annotation_element(self):\n        if self.would_accept(Identifier, '='):\n            return self.parse_element_value_pairs()\n        else:\n            return self.parse_element_value()\n\n    @parse_debug\n    def parse_element_value_pairs(self):\n        pairs = list()\n\n        while True:\n            pair = self.parse_element_value_pair()\n            pairs.append(pair)\n\n            if not self.try_accept(','):\n                break\n\n        return pairs\n\n    @parse_debug\n    def parse_element_value_pair(self):\n        identifier = self.parse_identifier()\n        self.accept('=')\n        value = self.parse_element_value()\n\n        return tree.ElementValuePair(name=identifier,\n                                     value=value)\n\n    @parse_debug\n    def parse_element_value(self):\n        if self.is_annotation():\n            return self.parse_annotation()\n\n        elif self.would_accept('{'):\n            return self.parse_element_value_array_initializer()\n\n        else:\n            return self.parse_expressionl()\n\n    @parse_debug\n    def parse_element_value_array_initializer(self):\n        self.accept('{')\n\n        if self.try_accept('}'):\n            return list()\n\n        element_values = self.parse_element_values()\n        self.try_accept(',')\n        self.accept('}')\n\n        return tree.ElementArrayValue(values=element_values)\n\n    @parse_debug\n    def parse_element_values(self):\n        element_values = list()\n\n        while True:\n            element_value = self.parse_element_value()\n            element_values.append(element_value)\n\n            if self.would_accept('}') or self.would_accept(',', '}'):\n                break\n\n            self.accept(',')\n\n        return element_values\n\n# ------------------------------------------------------------------------------\n# -- Class body --\n\n    @parse_debug\n    def parse_class_body(self):\n        declarations = list()\n\n        self.accept('{')\n\n        while not self.would_accept('}'):\n            declaration = self.parse_class_body_declaration()\n            if declaration:\n                declarations.append(declaration)\n\n        self.accept('}')\n\n        return declarations\n\n    @parse_debug\n    def parse_class_body_declaration(self):\n        token = self.tokens.look()\n\n        if self.try_accept(';'):\n            return None\n\n        elif self.would_accept('static', '{'):\n            self.accept('static')\n            return self.parse_block()\n\n        elif self.would_accept('{'):\n            return self.parse_block()\n\n        else:\n            return self.parse_member_declaration()\n\n    @parse_debug\n    def parse_member_declaration(self):\n        modifiers, annotations, javadoc = self.parse_modifiers()\n        member = None\n\n        token = self.tokens.look()\n        if self.try_accept('void'):\n            method_name = self.parse_identifier()\n            member = self.parse_void_method_declarator_rest()\n            member.name = method_name\n\n        elif token.value == '<':\n            member = self.parse_generic_method_or_constructor_declaration()\n\n        elif token.value == 'class':\n            member = self.parse_normal_class_declaration()\n\n        elif token.value == 'enum':\n            member = self.parse_enum_declaration()\n\n        elif token.value == 'interface':\n            member = self.parse_normal_interface_declaration()\n\n        elif self.is_annotation_declaration():\n            member = self.parse_annotation_type_declaration()\n\n        elif self.would_accept(Identifier, '('):\n            constructor_name = self.parse_identifier()\n            member = self.parse_constructor_declarator_rest()\n            member.name = constructor_name\n\n        else:\n            member = self.parse_method_or_field_declaraction()\n\n        member._position = token.position\n        member.modifiers = modifiers\n        member.annotations = annotations\n        member.documentation = javadoc\n\n        return member\n\n    @parse_debug\n    def parse_method_or_field_declaraction(self):\n        member_type = self.parse_type()\n        member_name = self.parse_identifier()\n\n        member = self.parse_method_or_field_rest()\n\n        if isinstance(member, tree.MethodDeclaration):\n            member_type.dimensions += member.return_type.dimensions\n\n            member.name = member_name\n            member.return_type = member_type\n        else:\n            member.type = member_type\n            member.declarators[0].name = member_name\n\n        return member\n\n    @parse_debug\n    def parse_method_or_field_rest(self):\n        if self.would_accept('('):\n            return self.parse_method_declarator_rest()\n        else:\n            rest = self.parse_field_declarators_rest()\n            self.accept(';')\n            return rest\n\n    @parse_debug\n    def parse_field_declarators_rest(self):\n        array_dimension, initializer = self.parse_variable_declarator_rest()\n        declarators = [tree.VariableDeclarator(dimensions=array_dimension,\n                                               initializer=initializer)]\n\n        while self.try_accept(','):\n            declarator = self.parse_variable_declarator()\n            declarators.append(declarator)\n\n        return tree.FieldDeclaration(declarators=declarators)\n\n    @parse_debug\n    def parse_method_declarator_rest(self):\n        formal_parameters = self.parse_formal_parameters()\n        additional_dimensions = self.parse_array_dimension()\n        throws = None\n        body = None\n\n        if self.try_accept('throws'):\n            throws = self.parse_qualified_identifier_list()\n\n        if self.would_accept('{'):\n            body = self.parse_block()\n        else:\n            self.accept(';')\n\n        return tree.MethodDeclaration(parameters=formal_parameters,\n                                     throws=throws,\n                                     body=body,\n                                     return_type=tree.Type(dimensions=additional_dimensions))\n\n    @parse_debug\n    def parse_void_method_declarator_rest(self):\n        formal_parameters = self.parse_formal_parameters()\n        throws = None\n        body = None\n\n        if self.try_accept('throws'):\n            throws = self.parse_qualified_identifier_list()\n\n        if self.would_accept('{'):\n            body = self.parse_block()\n        else:\n            self.accept(';')\n\n        return tree.MethodDeclaration(parameters=formal_parameters,\n                                      throws=throws,\n                                      body=body)\n\n    @parse_debug\n    def parse_constructor_declarator_rest(self):\n        formal_parameters = self.parse_formal_parameters()\n        throws = None\n        body = None\n\n        if self.try_accept('throws'):\n            throws = self.parse_qualified_identifier_list()\n\n        body = self.parse_block()\n\n        return tree.ConstructorDeclaration(parameters=formal_parameters,\n                                           throws=throws,\n                                           body=body)\n\n    @parse_debug\n    def parse_generic_method_or_constructor_declaration(self):\n        type_parameters = self.parse_type_parameters()\n        method = None\n\n        if self.would_accept(Identifier, '('):\n            constructor_name = self.parse_identifier()\n            method = self.parse_constructor_declarator_rest()\n            method.name = constructor_name\n        elif self.try_accept('void'):\n            method_name = self.parse_identifier()\n            method = self.parse_void_method_declarator_rest()\n            method.name = method_name\n\n        else:\n            method_return_type = self.parse_type()\n            method_name = self.parse_identifier()\n\n            method = self.parse_method_declarator_rest()\n\n            method_return_type.dimensions += method.return_type.dimensions\n            method.return_type = method_return_type\n            method.name = method_name\n\n        method.type_parameters = type_parameters\n        return method\n\n# ------------------------------------------------------------------------------\n# -- Interface body --\n\n    @parse_debug\n    def parse_interface_body(self):\n        declarations = list()\n\n        self.accept('{')\n        while not self.would_accept('}'):\n            declaration = self.parse_interface_body_declaration()\n\n            if declaration:\n                declarations.append(declaration)\n        self.accept('}')\n\n        return declarations\n\n    @parse_debug\n    def parse_interface_body_declaration(self):\n        if self.try_accept(';'):\n            return None\n\n        modifiers, annotations, javadoc = self.parse_modifiers()\n\n        declaration = self.parse_interface_member_declaration()\n        declaration.modifiers = modifiers\n        declaration.annotations = annotations\n        declaration.documentation = javadoc\n\n        return declaration\n\n    @parse_debug\n    def parse_interface_member_declaration(self):\n        declaration = None\n\n        if self.would_accept('class'):\n            declaration = self.parse_normal_class_declaration()\n        elif self.would_accept('interface'):\n            declaration = self.parse_normal_interface_declaration()\n        elif self.would_accept('enum'):\n            declaration = self.parse_enum_declaration()\n        elif self.is_annotation_declaration():\n            declaration = self.parse_annotation_type_declaration()\n        elif self.would_accept('<'):\n            declaration = self.parse_interface_generic_method_declarator()\n        elif self.try_accept('void'):\n            method_name = self.parse_identifier()\n            declaration = self.parse_void_interface_method_declarator_rest()\n            declaration.name = method_name\n        else:\n            declaration = self.parse_interface_method_or_field_declaration()\n\n        return declaration\n\n    @parse_debug\n    def parse_interface_method_or_field_declaration(self):\n        java_type = self.parse_type()\n        name = self.parse_identifier()\n        member = self.parse_interface_method_or_field_rest()\n\n        if isinstance(member, tree.MethodDeclaration):\n            java_type.dimensions += member.return_type.dimensions\n            member.name = name\n            member.return_type = java_type\n        else:\n            member.declarators[0].name = name\n            member.type = java_type\n\n        return member\n\n    @parse_debug\n    def parse_interface_method_or_field_rest(self):\n        rest = None\n\n        if self.would_accept('('):\n            rest = self.parse_interface_method_declarator_rest()\n        else:\n            rest = self.parse_constant_declarators_rest()\n            self.accept(';')\n\n        return rest\n\n    @parse_debug\n    def parse_constant_declarators_rest(self):\n        array_dimension, initializer = self.parse_constant_declarator_rest()\n        declarators = [tree.VariableDeclarator(dimensions=array_dimension,\n                                               initializer=initializer)]\n\n        while self.try_accept(','):\n            declarator = self.parse_constant_declarator()\n            declarators.append(declarator)\n\n        return tree.ConstantDeclaration(declarators=declarators)\n\n    @parse_debug\n    def parse_constant_declarator_rest(self):\n        array_dimension = self.parse_array_dimension()\n        self.accept('=')\n        initializer = self.parse_variable_initializer()\n\n        return (array_dimension, initializer)\n\n    @parse_debug\n    def parse_constant_declarator(self):\n        name = self.parse_identifier()\n        additional_dimension, initializer = self.parse_constant_declarator_rest()\n\n        return tree.VariableDeclarator(name=name,\n                                       dimensions=additional_dimension,\n                                       initializer=initializer)\n\n    @parse_debug\n    def parse_interface_method_declarator_rest(self):\n        parameters = self.parse_formal_parameters()\n        array_dimension = self.parse_array_dimension()\n        throws = None\n        body = None\n\n        if self.try_accept('throws'):\n            throws = self.parse_qualified_identifier_list()\n\n        if self.would_accept('{'):\n            body = self.parse_block()\n        else:\n            self.accept(';')\n\n        return tree.MethodDeclaration(parameters=parameters,\n                                      throws=throws,\n                                      body=body,\n                                      return_type=tree.Type(dimensions=array_dimension))\n\n    @parse_debug\n    def parse_void_interface_method_declarator_rest(self):\n        parameters = self.parse_formal_parameters()\n        throws = None\n        body = None\n\n        if self.try_accept('throws'):\n            throws = self.parse_qualified_identifier_list()\n\n        if self.would_accept('{'):\n            body = self.parse_block()\n        else:\n            self.accept(';')\n\n        return tree.MethodDeclaration(parameters=parameters,\n                                      throws=throws,\n                                      body=body)\n\n    @parse_debug\n    def parse_interface_generic_method_declarator(self):\n        type_parameters = self.parse_type_parameters()\n        return_type = None\n        method_name = None\n\n        if not self.try_accept('void'):\n            return_type = self.parse_type()\n\n        method_name = self.parse_identifier()\n        method = self.parse_interface_method_declarator_rest()\n        method.name = method_name\n        method.return_type = return_type\n        method.type_parameters = type_parameters\n\n        return method\n\n# ------------------------------------------------------------------------------\n# -- Parameters and variables --\n\n    @parse_debug\n    def parse_formal_parameters(self):\n        formal_parameters = list()\n\n        self.accept('(')\n\n        if self.try_accept(')'):\n            return formal_parameters\n\n        while True:\n            modifiers, annotations = self.parse_variable_modifiers()\n            parameter_type = self.parse_type()\n            varargs = False\n\n            if self.try_accept('...'):\n                varargs = True\n\n            parameter_name = self.parse_identifier()\n            parameter_type.dimensions += self.parse_array_dimension()\n\n            parameter = tree.FormalParameter(modifiers=modifiers,\n                                             annotations=annotations,\n                                             type=parameter_type,\n                                             name=parameter_name,\n                                             varargs=varargs)\n\n            formal_parameters.append(parameter)\n\n            if varargs:\n                # varargs parameter must be the last\n                break\n\n            if not self.try_accept(','):\n                break\n\n        self.accept(')')\n\n        return formal_parameters\n\n    @parse_debug\n    def parse_variable_modifiers(self):\n        modifiers = set()\n        annotations = list()\n\n        while True:\n            if self.try_accept('final'):\n                modifiers.add('final')\n            elif self.is_annotation():\n                annotation = self.parse_annotation()\n                annotations.append(annotation)\n            else:\n                break\n\n        return modifiers, annotations\n\n    @parse_debug\n    def parse_variable_declators(self):\n        declarators = list()\n\n        while True:\n            declarator = self.parse_variable_declator()\n            declarators.append(declarator)\n\n            if not self.try_accept(','):\n                break\n\n        return declarators\n\n    @parse_debug\n    def parse_variable_declarators(self):\n        declarators = list()\n\n        while True:\n            declarator = self.parse_variable_declarator()\n            declarators.append(declarator)\n\n            if not self.try_accept(','):\n                break\n\n        return declarators\n\n    @parse_debug\n    def parse_variable_declarator(self):\n        identifier = self.parse_identifier()\n        array_dimension, initializer = self.parse_variable_declarator_rest()\n\n        return tree.VariableDeclarator(name=identifier,\n                                       dimensions=array_dimension,\n                                       initializer=initializer)\n\n    @parse_debug\n    def parse_variable_declarator_rest(self):\n        array_dimension = self.parse_array_dimension()\n        initializer = None\n\n        if self.try_accept('='):\n            initializer = self.parse_variable_initializer()\n\n        return (array_dimension, initializer)\n\n    @parse_debug\n    def parse_variable_initializer(self):\n        if self.would_accept('{'):\n            return self.parse_array_initializer()\n        else:\n            return self.parse_expression()\n\n    @parse_debug\n    def parse_array_initializer(self):\n        array_initializer = tree.ArrayInitializer(initializers=list())\n\n        self.accept('{')\n\n        if self.try_accept(','):\n            self.accept('}')\n            return array_initializer\n\n        if self.try_accept('}'):\n            return array_initializer\n\n        while True:\n            initializer = self.parse_variable_initializer()\n            array_initializer.initializers.append(initializer)\n\n            if not self.would_accept('}'):\n                self.accept(',')\n\n            if self.try_accept('}'):\n                return array_initializer\n\n# ------------------------------------------------------------------------------\n# -- Blocks and statements --\n\n    @parse_debug\n    def parse_block(self):\n        statements = list()\n\n        self.accept('{')\n\n        while not self.would_accept('}'):\n            statement = self.parse_block_statement()\n            statements.append(statement)\n        self.accept('}')\n\n        return statements\n\n    @parse_debug\n    def parse_block_statement(self):\n        if self.would_accept(Identifier, ':'):\n            # Labeled statement\n            return self.parse_statement()\n\n        if self.would_accept('synchronized'):\n            return self.parse_statement()\n\n        token = None\n        found_annotations = False\n        i = 0\n\n        # Look past annoatations and modifiers. If we find a modifier that is not\n        # 'final' then the statement must be a class or interface declaration\n        while True:\n            token = self.tokens.look(i)\n\n            if isinstance(token, Modifier):\n                if not token.value == 'final':\n                    return self.parse_class_or_interface_declaration()\n\n            elif self.is_annotation(i):\n                found_annotations = True\n\n                i += 2\n                while self.tokens.look(i).value == '.':\n                    i += 2\n\n                if self.tokens.look(i).value == '(':\n                    parens = 1\n                    i += 1\n\n                    while parens > 0:\n                        token = self.tokens.look(i)\n                        if token.value == '(':\n                            parens += 1\n                        elif token.value == ')':\n                            parens -= 1\n                        i += 1\n                    continue\n\n            else:\n                break\n\n            i += 1\n\n        if token.value in ('class', 'enum', 'interface', '@'):\n            return self.parse_class_or_interface_declaration()\n\n        if found_annotations or isinstance(token, BasicType):\n            return self.parse_local_variable_declaration_statement()\n\n        # At this point, if the block statement is a variable definition the next\n        # token MUST be an identifier, so if it isn't we can conclude the block\n        # statement is a normal statement\n        if not isinstance(token, Identifier):\n            return self.parse_statement()\n\n        # We can't easily determine the statement type. Try parsing as a variable\n        # declaration first and fall back to a statement\n        try:\n            with self.tokens:\n                return self.parse_local_variable_declaration_statement()\n        except JavaSyntaxError:\n            return self.parse_statement()\n\n    @parse_debug\n    def parse_local_variable_declaration_statement(self):\n        modifiers, annotations = self.parse_variable_modifiers()\n        java_type = self.parse_type()\n        declarators = self.parse_variable_declarators()\n        self.accept(';')\n\n        var = tree.LocalVariableDeclaration(modifiers=modifiers,\n                                            annotations=annotations,\n                                            type=java_type,\n                                            declarators=declarators)\n        return var\n\n    @parse_debug\n    def parse_statement(self):\n        token = self.tokens.look()\n        if self.would_accept('{'):\n            block = self.parse_block()\n            return tree.BlockStatement(statements=block)\n\n        elif self.try_accept(';'):\n            return tree.Statement()\n\n        elif self.would_accept(Identifier, ':'):\n            identifer = self.parse_identifier()\n            self.accept(':')\n\n            statement = self.parse_statement()\n            statement.label = identifer\n\n            return statement\n\n        elif self.try_accept('if'):\n            condition = self.parse_par_expression()\n            then = self.parse_statement()\n            else_statement = None\n\n            if self.try_accept('else'):\n                else_statement = self.parse_statement()\n\n            return tree.IfStatement(condition=condition,\n                                    then_statement=then,\n                                    else_statement=else_statement)\n\n        elif self.try_accept('assert'):\n            condition = self.parse_expression()\n            value = None\n\n            if self.try_accept(':'):\n                value = self.parse_expression()\n\n            self.accept(';')\n\n            return tree.AssertStatement(condition=condition,\n                                        value=value)\n\n        elif self.try_accept('switch'):\n            switch_expression = self.parse_par_expression()\n            self.accept('{')\n            switch_block = self.parse_switch_block_statement_groups()\n            self.accept('}')\n\n            return tree.SwitchStatement(expression=switch_expression,\n                                        cases=switch_block)\n\n        elif self.try_accept('while'):\n            condition = self.parse_par_expression()\n            action = self.parse_statement()\n\n            return tree.WhileStatement(condition=condition,\n                                       body=action)\n\n        elif self.try_accept('do'):\n            action = self.parse_statement()\n            self.accept('while')\n            condition = self.parse_par_expression()\n            self.accept(';')\n\n            return tree.DoStatement(condition=condition,\n                                    body=action)\n\n        elif self.try_accept('for'):\n            self.accept('(')\n            for_control = self.parse_for_control()\n            self.accept(')')\n            for_statement = self.parse_statement()\n\n            return tree.ForStatement(control=for_control,\n                                     body=for_statement)\n\n        elif self.try_accept('break'):\n            label = None\n\n            if self.would_accept(Identifier):\n                label = self.parse_identifier()\n\n            self.accept(';')\n\n            return tree.BreakStatement(goto=label)\n\n        elif self.try_accept('continue'):\n            label = None\n\n            if self.would_accept(Identifier):\n                label = self.parse_identifier()\n\n            self.accept(';')\n\n            return tree.ContinueStatement(goto=label)\n\n        elif self.try_accept('return'):\n            value = None\n\n            if not self.would_accept(';'):\n                value = self.parse_expression()\n\n            self.accept(';')\n\n            return tree.ReturnStatement(expression=value)\n\n        elif self.try_accept('throw'):\n            value = self.parse_expression()\n            self.accept(';')\n\n            return tree.ThrowStatement(expression=value)\n\n        elif self.try_accept('synchronized'):\n            lock = self.parse_par_expression()\n            block = self.parse_block()\n\n            return tree.SynchronizedStatement(lock=lock,\n                                              block=block)\n\n        elif self.try_accept('try'):\n            resource_specification = None\n            block = None\n            catches = None\n            finally_block = None\n\n            if self.would_accept('{'):\n                block = self.parse_block()\n\n                if self.would_accept('catch'):\n                    catches = self.parse_catches()\n\n                if self.try_accept('finally'):\n                    finally_block = self.parse_block()\n\n                if catches == None and finally_block == None:\n                    self.illegal(\"Expected catch/finally block\")\n\n            else:\n                resource_specification = self.parse_resource_specification()\n                block = self.parse_block()\n\n                if self.would_accept('catch'):\n                    catches = self.parse_catches()\n\n                if self.try_accept('finally'):\n                    finally_block = self.parse_block()\n\n            return tree.TryStatement(resources=resource_specification,\n                                     block=block,\n                                     catches=catches,\n                                     finally_block=finally_block)\n\n        else:\n            expression = self.parse_expression()\n            self.accept(';')\n\n            return tree.StatementExpression(expression=expression)\n\n# ------------------------------------------------------------------------------\n# -- Try / catch --\n\n    @parse_debug\n    def parse_catches(self):\n        catches = list()\n\n        while True:\n            catch = self.parse_catch_clause()\n            catches.append(catch)\n\n            if not self.would_accept('catch'):\n                break\n\n        return catches\n\n    @parse_debug\n    def parse_catch_clause(self):\n        self.accept('catch', '(')\n\n        modifiers, annotations = self.parse_variable_modifiers()\n        catch_parameter = tree.CatchClauseParameter(types=list())\n\n        while True:\n            catch_type = self.parse_qualified_identifier()\n            catch_parameter.types.append(catch_type)\n\n            if not self.try_accept('|'):\n                break\n        catch_parameter.name = self.parse_identifier()\n\n        self.accept(')')\n        block = self.parse_block()\n\n        return tree.CatchClause(parameter=catch_parameter,\n                                block=block)\n\n    @parse_debug\n    def parse_resource_specification(self):\n        resources = list()\n\n        self.accept('(')\n\n        while True:\n            resource = self.parse_resource()\n            resources.append(resource)\n\n            if not self.would_accept(')'):\n                self.accept(';')\n\n            if self.try_accept(')'):\n                break\n\n        return resources\n\n    @parse_debug\n    def parse_resource(self):\n        modifiers, annotations = self.parse_variable_modifiers()\n        reference_type = self.parse_reference_type()\n        reference_type.dimensions = self.parse_array_dimension()\n        name = self.parse_identifier()\n        reference_type.dimensions += self.parse_array_dimension()\n        self.accept('=')\n        value = self.parse_expression()\n\n        return tree.TryResource(modifiers=modifiers,\n                                annotations=annotations,\n                                type=reference_type,\n                                name=name,\n                                value=value)\n\n# ------------------------------------------------------------------------------\n# -- Switch and for statements ---\n\n    @parse_debug\n    def parse_switch_block_statement_groups(self):\n        statement_groups = list()\n\n        while self.tokens.look().value in ('case', 'default'):\n            statement_group = self.parse_switch_block_statement_group()\n            statement_groups.append(statement_group)\n\n        return statement_groups\n\n    @parse_debug\n    def parse_switch_block_statement_group(self):\n        labels = list()\n        statements = list()\n\n        while True:\n            case_type = self.tokens.next().value\n            case_value = None\n\n            if case_type == 'case':\n                if self.would_accept(Identifier, ':'):\n                    case_value = self.parse_identifier()\n                else:\n                    case_value = self.parse_expression()\n\n                labels.append(case_value)\n            elif not case_type == 'default':\n                self.illegal(\"Expected switch case\")\n\n            self.accept(':')\n\n            if self.tokens.look().value not in ('case', 'default'):\n                break\n\n        while self.tokens.look().value not in ('case', 'default', '}'):\n            statement = self.parse_block_statement()\n            statements.append(statement)\n\n        return tree.SwitchStatementCase(case=labels,\n                                        statements=statements)\n\n    @parse_debug\n    def parse_for_control(self):\n        # Try for_var_control and fall back to normal three part for control\n\n        try:\n            with self.tokens:\n                return self.parse_for_var_control()\n        except JavaSyntaxError:\n            pass\n\n        init = None\n        if not self.would_accept(';'):\n            init = self.parse_for_init_or_update()\n\n        self.accept(';')\n\n        condition = None\n        if not self.would_accept(';'):\n            condition = self.parse_expression()\n\n        self.accept(';')\n\n        update = None\n        if not self.would_accept(')'):\n            update = self.parse_for_init_or_update()\n\n        return tree.ForControl(init=init,\n                               condition=condition,\n                               update=update)\n\n    @parse_debug\n    def parse_for_var_control(self):\n        modifiers, annotations = self.parse_variable_modifiers()\n        var_type = self.parse_type()\n        var_name = self.parse_identifier()\n        var_type.dimensions += self.parse_array_dimension()\n\n        var = tree.VariableDeclaration(modifiers=modifiers,\n                                       annotations=annotations,\n                                       type=var_type)\n\n        rest = self.parse_for_var_control_rest()\n\n        if isinstance(rest, tree.Expression):\n            var.declarators = [tree.VariableDeclarator(name=var_name)]\n            return tree.EnhancedForControl(var=var,\n                                           iterable=rest)\n        else:\n            declarators, condition, update = rest\n            declarators[0].name = var_name\n            var.declarators = declarators\n            return tree.ForControl(init=var,\n                                   condition=condition,\n                                   update=update)\n\n    @parse_debug\n    def parse_for_var_control_rest(self):\n        if self.try_accept(':'):\n            expression = self.parse_expression()\n            return expression\n\n        declarators = None\n        if not self.would_accept(';'):\n            declarators = self.parse_for_variable_declarator_rest()\n        else:\n            declarators = [tree.VariableDeclarator()]\n        self.accept(';')\n\n        condition = None\n        if not self.would_accept(';'):\n            condition = self.parse_expression()\n        self.accept(';')\n\n        update = None\n        if not self.would_accept(')'):\n            update = self.parse_for_init_or_update()\n\n        return (declarators, condition, update)\n\n    @parse_debug\n    def parse_for_variable_declarator_rest(self):\n        initializer = None\n\n        if self.try_accept('='):\n            initializer = self.parse_variable_initializer()\n\n        declarators = [tree.VariableDeclarator(initializer=initializer)]\n\n        while self.try_accept(','):\n            declarator = self.parse_variable_declarator()\n            declarators.append(declarator)\n\n        return declarators\n\n    @parse_debug\n    def parse_for_init_or_update(self):\n        expressions = list()\n\n        while True:\n            expression = self.parse_expression()\n            expressions.append(expression)\n\n            if not self.try_accept(','):\n                break\n\n        return expressions\n\n# ------------------------------------------------------------------------------\n# -- Expressions --\n\n    @parse_debug\n    def parse_expression(self):\n        expressionl = self.parse_expressionl()\n        assignment_type = None\n        assignment_expression = None\n\n        if self.tokens.look().value in Operator.ASSIGNMENT:\n            assignment_type = self.tokens.next().value\n            assignment_expression = self.parse_expression()\n            return tree.Assignment(expressionl=expressionl,\n                                   type=assignment_type,\n                                   value=assignment_expression)\n        else:\n            return expressionl\n\n    @parse_debug\n    def parse_expressionl(self):\n        expression_2 = self.parse_expression_2()\n        true_expression = None\n        false_expression = None\n\n        if self.try_accept('?'):\n            true_expression = self.parse_expression()\n            self.accept(':')\n            false_expression = self.parse_expressionl()\n\n            return tree.TernaryExpression(condition=expression_2,\n                                          if_true=true_expression,\n                                          if_false=false_expression)\n        if self.would_accept('->'):\n            body = self.parse_lambda_method_body()\n            return tree.LambdaExpression(parameters=[expression_2],\n                                         body=body)\n        if self.try_accept('::'):\n            method_reference, type_arguments = self.parse_method_reference()\n            return tree.MethodReference(\n                expression=expression_2,\n                method=method_reference,\n                type_arguments=type_arguments)\n        return expression_2\n\n    @parse_debug\n    def parse_expression_2(self):\n        expression_3 = self.parse_expression_3()\n        token = self.tokens.look()\n        if token.value in Operator.INFIX or token.value == 'instanceof':\n            parts = self.parse_expression_2_rest()\n            parts.insert(0, expression_3)\n            return self.build_binary_operation(parts)\n\n        return expression_3\n\n    @parse_debug\n    def parse_expression_2_rest(self):\n        parts = list()\n\n        token = self.tokens.look()\n        while token.value in Operator.INFIX or token.value == 'instanceof':\n            if self.try_accept('instanceof'):\n                comparison_type = self.parse_type()\n                parts.extend(('instanceof', comparison_type))\n            else:\n                operator = self.parse_infix_operator()\n                expression = self.parse_expression_3()\n                parts.extend((operator, expression))\n\n            token = self.tokens.look()\n\n        return parts\n\n# ------------------------------------------------------------------------------\n# -- Expression operators --\n\n    @parse_debug\n    def parse_expression_3(self):\n        prefix_operators = list()\n        while self.tokens.look().value in Operator.PREFIX:\n            prefix_operators.append(self.tokens.next().value)\n\n        if self.would_accept('('):\n            try:\n                with self.tokens:\n                        lambda_exp = self.parse_lambda_expression()\n                        if lambda_exp:\n                            return lambda_exp\n            except JavaSyntaxError:\n                pass\n            try:\n                with self.tokens:\n                    self.accept('(')\n                    cast_target = self.parse_type()\n                    self.accept(')')\n                    expression = self.parse_expression_3()\n\n                    return tree.Cast(type=cast_target,\n                                     expression=expression)\n            except JavaSyntaxError:\n                pass\n\n        primary = self.parse_primary()\n        primary.prefix_operators = prefix_operators\n        primary.selectors = list()\n        primary.postfix_operators = list()\n\n        token = self.tokens.look()\n        while token.value in '[.':\n            selector = self.parse_selector()\n            primary.selectors.append(selector)\n\n            token = self.tokens.look()\n\n        while token.value in Operator.POSTFIX:\n            primary.postfix_operators.append(self.tokens.next().value)\n            token = self.tokens.look()\n\n        return primary\n\n    @parse_debug\n    def parse_method_reference(self):\n        type_arguments = list()\n        if self.would_accept('<'):\n            type_arguments = self.parse_nonwildcard_type_arguments()\n        if self.would_accept('new'):\n            method_reference = tree.MemberReference(member=self.accept('new'))\n        else:\n            method_reference = self.parse_expression()\n        return method_reference, type_arguments\n\n    @parse_debug\n    def parse_lambda_expression(self):\n        lambda_expr = None\n        parameters = None\n        if self.would_accept('(', Identifier, ','):\n            self.accept('(')\n            parameters = []\n            while not self.would_accept(')'):\n                parameters.append(tree.InferredFormalParameter(\n                    name=self.parse_identifier()))\n                self.try_accept(',')\n            self.accept(')')\n        else:\n            parameters = self.parse_formal_parameters()\n        body = self.parse_lambda_method_body()\n        return tree.LambdaExpression(parameters=parameters,\n                                     body=body)\n\n    @parse_debug\n    def parse_lambda_method_body(self):\n        if self.accept('->'):\n            if self.would_accept('{'):\n                return self.parse_block()\n            else:\n                return self.parse_expression()\n\n    @parse_debug\n    def parse_infix_operator(self):\n        operator = self.accept(Operator)\n\n        if not operator in Operator.INFIX:\n            self.illegal(\"Expected infix operator\")\n\n        if operator == '>' and self.try_accept('>'):\n            operator = '>>'\n\n            if self.try_accept('>'):\n                operator = '>>>'\n\n        return operator\n\n# ------------------------------------------------------------------------------\n# -- Primary expressions --\n\n    @parse_debug\n    def parse_primary(self):\n        token = self.tokens.look()\n\n        if isinstance(token, Literal):\n            return self.parse_literal()\n\n        elif token.value == '(':\n            return self.parse_par_expression()\n\n        elif self.try_accept('this'):\n            arguments = None\n\n            if self.would_accept('('):\n                arguments = self.parse_arguments()\n                return tree.ExplicitConstructorInvocation(arguments=arguments)\n\n            return tree.This()\n        elif self.would_accept('super', '::'):\n            self.accept('super')\n            return token\n        elif self.try_accept('super'):\n            super_suffix = self.parse_super_suffix()\n            return super_suffix\n\n        elif self.try_accept('new'):\n            return self.parse_creator()\n\n        elif token.value == '<':\n            type_arguments = self.parse_nonwildcard_type_arguments()\n\n            if self.try_accept('this'):\n                arguments = self.parse_arguments()\n                return tree.ExplicitConstructorInvocation(type_arguments=type_arguments,\n                                                          arguments=arguments)\n            else:\n                invocation = self.parse_explicit_generic_invocation_suffix()\n                invocation.type_arguments = type_arguments\n\n                return invocation\n\n        elif isinstance(token, Identifier):\n            qualified_identifier = [self.parse_identifier()]\n\n            while self.would_accept('.', Identifier):\n                self.accept('.')\n                identifier = self.parse_identifier()\n                qualified_identifier.append(identifier)\n\n            identifier_suffix = self.parse_identifier_suffix()\n\n            if isinstance(identifier_suffix, (tree.MemberReference, tree.MethodInvocation)):\n                # Take the last identifer as the member and leave the rest for the qualifier\n                identifier_suffix.member = qualified_identifier.pop()\n\n            elif isinstance(identifier_suffix, tree.ClassReference):\n                identifier_suffix.type = tree.ReferenceType(name=qualified_identifier.pop())\n\n            identifier_suffix.qualifier = '.'.join(qualified_identifier)\n\n            return identifier_suffix\n\n        elif isinstance(token, BasicType):\n            base_type = self.parse_basic_type()\n            base_type.dimensions = self.parse_array_dimension()\n            self.accept('.', 'class')\n\n            return tree.ClassReference(type=base_type)\n\n        elif self.try_accept('void'):\n            self.accept('.', 'class')\n            return tree.VoidClassReference()\n\n        self.illegal(\"Expected expression\")\n\n    @parse_debug\n    def parse_literal(self):\n        literal = self.accept(Literal)\n        return tree.Literal(value=literal)\n\n    @parse_debug\n    def parse_par_expression(self):\n        self.accept('(')\n        expression = self.parse_expression()\n        self.accept(')')\n\n        return expression\n\n    @parse_debug\n    def parse_arguments(self):\n        expressions = list()\n\n        self.accept('(')\n\n        if self.try_accept(')'):\n            return expressions\n\n        while True:\n            expression = self.parse_expression()\n            expressions.append(expression)\n\n            if not self.try_accept(','):\n                break\n\n        self.accept(')')\n\n        return expressions\n\n    @parse_debug\n    def parse_super_suffix(self):\n        identifier = None\n        type_arguments = None\n        arguments = None\n\n        if self.try_accept('.'):\n            if self.would_accept('<'):\n                type_arguments = self.parse_nonwildcard_type_arguments()\n\n            identifier = self.parse_identifier()\n\n            if self.would_accept('('):\n                arguments = self.parse_arguments()\n        else:\n            arguments = self.parse_arguments()\n\n        if identifier and arguments is not None:\n            return tree.SuperMethodInvocation(member=identifier,\n                                              arguments=arguments,\n                                              type_arguments=type_arguments)\n        elif arguments is not None:\n            return tree.SuperConstructorInvocation(arguments=arguments)\n        else:\n            return tree.SuperMemberReference(member=identifier)\n\n    @parse_debug\n    def parse_explicit_generic_invocation_suffix(self):\n        identifier = None\n        arguments = None\n        if self.try_accept('super'):\n            return self.parse_super_suffix()\n        else:\n            identifier = self.parse_identifier()\n            arguments = self.parse_arguments()\n            return tree.MethodInvocation(member=identifier,\n                                         arguments=arguments)\n\n# ------------------------------------------------------------------------------\n# -- Creators --\n\n    @parse_debug\n    def parse_creator(self):\n        constructor_type_arguments = None\n\n        if self.would_accept(BasicType):\n            created_name = self.parse_basic_type()\n            rest = self.parse_array_creator_rest()\n            rest.type = created_name\n            return rest\n\n        if self.would_accept('<'):\n            constructor_type_arguments = self.parse_nonwildcard_type_arguments()\n\n        created_name = self.parse_created_name()\n\n        if self.would_accept('['):\n            if constructor_type_arguments:\n                self.illegal(\"Array creator not allowed with generic constructor type arguments\")\n\n            rest = self.parse_array_creator_rest()\n            rest.type = created_name\n            return rest\n        else:\n            arguments, body = self.parse_class_creator_rest()\n            return tree.ClassCreator(constructor_type_arguments=constructor_type_arguments,\n                                     type=created_name,\n                                     arguments=arguments,\n                                     body=body)\n\n    @parse_debug\n    def parse_created_name(self):\n        created_name = tree.ReferenceType()\n        tail = created_name\n\n        while True:\n            tail.name = self.parse_identifier()\n\n            if self.would_accept('<'):\n                tail.arguments = self.parse_type_arguments_or_diamond()\n\n            if self.try_accept('.'):\n                tail.sub_type = tree.ReferenceType()\n                tail = tail.sub_type\n            else:\n                break\n\n        return created_name\n\n    @parse_debug\n    def parse_class_creator_rest(self):\n        arguments = self.parse_arguments()\n        class_body = None\n\n        if self.would_accept('{'):\n            class_body = self.parse_class_body()\n\n        return (arguments, class_body)\n\n    @parse_debug\n    def parse_array_creator_rest(self):\n        if self.would_accept('[', ']'):\n            array_dimension = self.parse_array_dimension()\n            array_initializer = self.parse_array_initializer()\n\n            return tree.ArrayCreator(dimensions=array_dimension,\n                                     initializer=array_initializer)\n\n        else:\n            array_dimensions = list()\n\n            while self.would_accept('[') and not self.would_accept('[', ']'):\n                self.accept('[')\n                expression = self.parse_expression()\n                array_dimensions.append(expression)\n                self.accept(']')\n\n            array_dimensions += self.parse_array_dimension()\n            return tree.ArrayCreator(dimensions=array_dimensions)\n\n    @parse_debug\n    def parse_identifier_suffix(self):\n        if self.try_accept('[', ']'):\n            array_dimension = [None] + self.parse_array_dimension()\n            self.accept('.', 'class')\n            return tree.ClassReference(type=tree.Type(dimensions=array_dimension))\n\n        elif self.would_accept('('):\n            arguments = self.parse_arguments()\n            return tree.MethodInvocation(arguments=arguments)\n\n        elif self.try_accept('.', 'class'):\n            return tree.ClassReference()\n\n        elif self.try_accept('.', 'this'):\n            return tree.This()\n\n        elif self.would_accept('.', '<'):\n            next(self.tokens)\n            return self.parse_explicit_generic_invocation()\n\n        elif self.try_accept('.', 'new'):\n            type_arguments = None\n\n            if self.would_accept('<'):\n                type_arguments = self.parse_nonwildcard_type_arguments()\n\n            inner_creator = self.parse_inner_creator()\n            inner_creator.constructor_type_arguments = type_arguments\n\n            return inner_creator\n\n        elif self.would_accept('.', 'super', '('):\n            self.accept('.', 'super')\n            arguments = self.parse_arguments()\n            return tree.SuperConstructorInvocation(arguments=arguments)\n\n        else:\n            return tree.MemberReference()\n\n    @parse_debug\n    def parse_explicit_generic_invocation(self):\n        type_arguments = self.parse_nonwildcard_type_arguments()\n\n        invocation = self.parse_explicit_generic_invocation_suffix()\n        invocation.type_arguments = type_arguments\n\n        return invocation\n\n    @parse_debug\n    def parse_inner_creator(self):\n        identifier = self.parse_identifier()\n        type_arguments = None\n\n        if self.would_accept('<'):\n            type_arguments = self.parse_nonwildcard_type_arguments_or_diamond()\n\n        java_type = tree.ReferenceType(name=identifier,\n                                       arguments=type_arguments)\n\n        arguments, class_body = self.parse_class_creator_rest()\n\n        return tree.InnerClassCreator(type=java_type,\n                                      arguments=arguments,\n                                      body=class_body)\n\n    @parse_debug\n    def parse_selector(self):\n        if self.try_accept('['):\n            expression = self.parse_expression()\n            self.accept(']')\n            return tree.ArraySelector(index=expression)\n\n        elif self.try_accept('.'):\n\n            token = self.tokens.look()\n            if isinstance(token, Identifier):\n                identifier = self.tokens.next().value\n                arguments = None\n\n                if self.would_accept('('):\n                    arguments = self.parse_arguments()\n\n                    return tree.MethodInvocation(member=identifier,\n                                                 arguments=arguments)\n                else:\n                    return tree.MemberReference(member=identifier)\n            elif self.would_accept('super', '::'):\n                self.accept('super')\n                return token\n            elif self.would_accept('<'):\n                return self.parse_explicit_generic_invocation()\n            elif self.try_accept('this'):\n                return tree.This()\n            elif self.try_accept('super'):\n                return self.parse_super_suffix()\n            elif self.try_accept('new'):\n                type_arguments = None\n\n                if self.would_accept('<'):\n                    type_arguments = self.parse_nonwildcard_type_arguments()\n\n                inner_creator = self.parse_inner_creator()\n                inner_creator.constructor_type_arguments = type_arguments\n\n                return inner_creator\n\n        self.illegal(\"Expected selector\")\n\n# ------------------------------------------------------------------------------\n# -- Enum and annotation body --\n\n    @parse_debug\n    def parse_enum_body(self):\n        constants = list()\n        body_declarations = list()\n\n        self.accept('{')\n\n        if not self.try_accept(','):\n            while not (self.would_accept(';') or self.would_accept('}')):\n                constant = self.parse_enum_constant()\n                constants.append(constant)\n\n                if not self.try_accept(','):\n                    break\n\n        if self.try_accept(';'):\n            while not self.would_accept('}'):\n                declaration = self.parse_class_body_declaration()\n\n                if declaration:\n                    body_declarations.append(declaration)\n\n        self.accept('}')\n\n        return tree.EnumBody(constants=constants,\n                             declarations=body_declarations)\n\n    @parse_debug\n    def parse_enum_constant(self):\n        annotations = list()\n        javadoc = None\n        constant_name = None\n        arguments = None\n        body = None\n\n        next_token = self.tokens.look()\n        if next_token:\n            javadoc = next_token.javadoc\n\n        if self.would_accept(Annotation):\n            annotations = self.parse_annotations()\n\n        constant_name = self.parse_identifier()\n\n        if self.would_accept('('):\n            arguments = self.parse_arguments()\n\n        if self.would_accept('{'):\n            body = self.parse_class_body()\n\n        return tree.EnumConstantDeclaration(annotations=annotations,\n                                            name=constant_name,\n                                            arguments=arguments,\n                                            body=body,\n                                            documentation=javadoc)\n\n    @parse_debug\n    def parse_annotation_type_body(self):\n        declarations = None\n\n        self.accept('{')\n        declarations = self.parse_annotation_type_element_declarations()\n        self.accept('}')\n\n        return declarations\n\n    @parse_debug\n    def parse_annotation_type_element_declarations(self):\n        declarations = list()\n\n        while not self.would_accept('}'):\n            declaration = self.parse_annotation_type_element_declaration()\n            declarations.append(declaration)\n\n        return declarations\n\n    @parse_debug\n    def parse_annotation_type_element_declaration(self):\n        modifiers, annotations, javadoc = self.parse_modifiers()\n        declaration = None\n\n        if self.would_accept('class'):\n            declaration = self.parse_normal_class_declaration()\n        elif self.would_accept('interface'):\n            declaration = self.parse_normal_interface_declaration()\n        elif self.would_accept('enum'):\n            declaration = self.parse_enum_declaration()\n        elif self.is_annotation_declaration():\n            declaration = self.parse_annotation_type_declaration()\n        else:\n            attribute_type = self.parse_type()\n            attribute_name = self.parse_identifier()\n            declaration = self.parse_annotation_method_or_constant_rest()\n            self.accept(';')\n\n            if isinstance(declaration, tree.AnnotationMethod):\n                declaration.name = attribute_name\n                declaration.return_type = attribute_type\n            else:\n                declaration.declarators[0].name = attribute_name\n                declaration.type = attribute_type\n\n        declaration.modifiers = modifiers\n        declaration.annotations = annotations\n        declaration.documentation = javadoc\n\n        return declaration\n\n    @parse_debug\n    def parse_annotation_method_or_constant_rest(self):\n        if self.try_accept('('):\n            self.accept(')')\n\n            array_dimension = self.parse_array_dimension()\n            default = None\n\n            if self.try_accept('default'):\n                default = self.parse_element_value()\n\n            return tree.AnnotationMethod(dimensions=array_dimension,\n                                         default=default)\n        else:\n            return self.parse_constant_declarators_rest()\n\ndef parse(tokens, debug=False):\n    parser = Parser(tokens)\n    parser.set_debug(debug)\n    return parser.parse()\n"
  },
  {
    "path": "baseline_tokenization/javalang/test/__init__.py",
    "content": ""
  },
  {
    "path": "baseline_tokenization/javalang/test/source/package-info/AnnotationJavadoc.java",
    "content": "@Package\n/**\n Test that includes java doc first but no annotation\n*/\npackage org.javalang.test;"
  },
  {
    "path": "baseline_tokenization/javalang/test/source/package-info/AnnotationOnly.java",
    "content": "@Package\npackage org.javalang.test;"
  },
  {
    "path": "baseline_tokenization/javalang/test/source/package-info/JavadocAnnotation.java",
    "content": "/**\n Test that includes java doc first but no annotation\n*/\n@Package\npackage org.javalang.test;"
  },
  {
    "path": "baseline_tokenization/javalang/test/source/package-info/JavadocOnly.java",
    "content": "/**\n Test that includes java doc first but no annotation\n*/\npackage org.javalang.test;"
  },
  {
    "path": "baseline_tokenization/javalang/test/source/package-info/NoAnnotationNoJavadoc.java",
    "content": "package org.javalang.test;"
  },
  {
    "path": "baseline_tokenization/javalang/test/test_java_8_syntax.py",
    "content": "import unittest\n\nfrom pkg_resources import resource_string\nfrom .. import parse, parser, tree\n\n\ndef setup_java_class(content_to_add):\n    \"\"\" returns an example java class with the\n        given content_to_add contained within a method.\n    \"\"\"\n    template = \"\"\"\npublic class Lambda {\n\n    public static void main(String args[]) {\n        %s\n    }\n}\n        \"\"\"\n    return template % content_to_add\n\n\ndef filter_type_in_method(clazz, the_type, method_name):\n    \"\"\" yields the result of filtering the given class for the given\n        type inside the given method identified by its name.\n    \"\"\"\n    for path, node in clazz.filter(the_type):\n        for p in reversed(path):\n            if isinstance(p, tree.MethodDeclaration):\n                if p.name == method_name:\n                    yield path, node\n\n\nclass LambdaSupportTest(unittest.TestCase):\n\n    \"\"\" Contains tests for java 8 lambda syntax. \"\"\"\n\n    def assert_contains_lambda_expression_in_m(\n            self, clazz, method_name='main'):\n        \"\"\" asserts that the given tree contains a method with the supplied\n            method name containing a lambda expression.\n        \"\"\"\n        matches = list(filter_type_in_method(\n            clazz, tree.LambdaExpression, method_name))\n        if not matches:\n            self.fail('No matching lambda expression found.')\n        return matches\n\n    def test_lambda_support_no_parameters_no_body(self):\n        \"\"\" tests support for lambda with no parameters and no body. \"\"\"\n        self.assert_contains_lambda_expression_in_m(\n            parse.parse(setup_java_class(\"() -> {};\")))\n\n    def test_lambda_support_no_parameters_expression_body(self):\n        \"\"\" tests support for lambda with no parameters and an\n            expression body.\n        \"\"\"\n        test_classes = [\n            setup_java_class(\"() -> 3;\"),\n            setup_java_class(\"() -> null;\"),\n            setup_java_class(\"() -> { return 21; };\"),\n            setup_java_class(\"() -> { System.exit(1); };\"),\n        ]\n        for test_class in test_classes:\n            clazz = parse.parse(test_class)\n            self.assert_contains_lambda_expression_in_m(clazz)\n\n    def test_lambda_support_no_parameters_complex_expression(self):\n        \"\"\" tests support for lambda with no parameters and a\n            complex expression body.\n        \"\"\"\n        code = \"\"\"\n                () -> {\n            if (true) return 21;\n            else\n            {\n                int result = 21;\n                return result / 2;\n            }\n        };\"\"\"\n        self.assert_contains_lambda_expression_in_m(\n            parse.parse(setup_java_class(code)))\n\n    def test_parameter_no_type_expression_body(self):\n        \"\"\" tests support for lambda with parameters with inferred types. \"\"\"\n        test_classes = [\n            setup_java_class(\"(bar) -> bar + 1;\"),\n            setup_java_class(\"bar -> bar + 1;\"),\n            setup_java_class(\"x -> x.length();\"),\n            setup_java_class(\"y -> { y.boom(); };\"),\n        ]\n        for test_class in test_classes:\n            clazz = parse.parse(test_class)\n            self.assert_contains_lambda_expression_in_m(clazz)\n\n    def test_parameter_with_type_expression_body(self):\n        \"\"\" tests support for lambda with parameters with formal types. \"\"\"\n        test_classes = [\n            setup_java_class(\"(int foo) -> { return foo + 2; };\"),\n            setup_java_class(\"(String s) -> s.length();\"),\n            setup_java_class(\"(int foo) -> foo + 1;\"),\n            setup_java_class(\"(Thread th) -> { th.start(); };\"),\n            setup_java_class(\"(String foo, String bar) -> \"\n                             \"foo + bar;\"),\n        ]\n        for test_class in test_classes:\n            clazz = parse.parse(test_class)\n            self.assert_contains_lambda_expression_in_m(clazz)\n\n    def test_parameters_with_no_type_expression_body(self):\n        \"\"\" tests support for multiple lambda parameters\n            that are specified without their types.\n        \"\"\"\n        self.assert_contains_lambda_expression_in_m(\n            parse.parse(setup_java_class(\"(x, y) -> x + y;\")))\n\n    def test_parameters_with_mixed_inferred_and_declared_types(self):\n        \"\"\" this tests that lambda type specification mixing is considered\n            invalid as per the specifications.\n        \"\"\"\n        with self.assertRaises(parser.JavaSyntaxError):\n            parse.parse(setup_java_class(\"(x, int y) -> x+y;\"))\n\n    def test_parameters_inferred_types_with_modifiers(self):\n        \"\"\" this tests that lambda inferred type parameters with modifiers are\n            considered invalid as per the specifications.\n        \"\"\"\n        with self.assertRaises(parser.JavaSyntaxError):\n            parse.parse(setup_java_class(\"(x, final y) -> x+y;\"))\n\n    def test_invalid_parameters_are_invalid(self):\n        \"\"\" this tests that invalid lambda parameters are are\n            considered invalid as per the specifications.\n        \"\"\"\n        with self.assertRaises(parser.JavaSyntaxError):\n            parse.parse(setup_java_class(\"(a b c) -> {};\"))\n\n    def test_cast_works(self):\n        \"\"\" this tests that a cast expression works as expected. \"\"\"\n        parse.parse(setup_java_class(\"String x = (String) A.x() ;\"))\n\n\nclass MethodReferenceSyntaxTest(unittest.TestCase):\n\n    \"\"\" Contains tests for java 8 method reference syntax. \"\"\"\n\n    def assert_contains_method_reference_expression_in_m(\n            self, clazz, method_name='main'):\n        \"\"\" asserts that the given class contains a method with the supplied\n            method name containing a method reference.\n        \"\"\"\n        matches = list(filter_type_in_method(\n            clazz, tree.MethodReference, method_name))\n        if not matches:\n            self.fail('No matching method reference found.')\n        return matches\n\n    def test_method_reference(self):\n        \"\"\" tests that method references are supported. \"\"\"\n        self.assert_contains_method_reference_expression_in_m(\n            parse.parse(setup_java_class(\"String::length;\")))\n\n    def test_method_reference_to_the_new_method(self):\n        \"\"\" test support for method references to 'new'. \"\"\"\n        self.assert_contains_method_reference_expression_in_m(\n            parse.parse(setup_java_class(\"String::new;\")))\n\n    def test_method_reference_to_the_new_method_with_explict_type(self):\n        \"\"\" test support for method references to 'new' with an\n            explicit type.\n        \"\"\"\n        self.assert_contains_method_reference_expression_in_m(\n            parse.parse(setup_java_class(\"String::<String> new;\")))\n\n    def test_method_reference_from_super(self):\n        \"\"\" test support for method references from 'super'. \"\"\"\n        self.assert_contains_method_reference_expression_in_m(\n            parse.parse(setup_java_class(\"super::toString;\")))\n\n    def test_method_reference_from_super_with_identifier(self):\n        \"\"\" test support for method references from Identifier.super. \"\"\"\n        self.assert_contains_method_reference_expression_in_m(\n            parse.parse(setup_java_class(\"String.super::toString;\")))\n\n    @unittest.expectedFailure\n    def test_method_reference_explicit_type_arguments_for_generic_type(self):\n        \"\"\" currently there is no support for method references\n            for an explicit type.\n        \"\"\"\n        self.assert_contains_method_reference_expression_in_m(\n            parse.parse(setup_java_class(\"List<String>::size;\")))\n\n    def test_method_reference_explicit_type_arguments(self):\n        \"\"\" test support for method references with an explicit type.\n        \"\"\"\n        self.assert_contains_method_reference_expression_in_m(\n            parse.parse(setup_java_class(\"Arrays::<String> sort;\")))\n\n    @unittest.expectedFailure\n    def test_method_reference_from_array_type(self):\n        \"\"\" currently there is no support for method references\n            from a primary type.\n        \"\"\"\n        self.assert_contains_method_reference_expression_in_m(\n            parse.parse(setup_java_class(\"int[]::new;\")))\n\n\nclass InterfaceSupportTest(unittest.TestCase):\n\n    \"\"\" Contains tests for java 8 interface extensions. \"\"\"\n\n    def test_interface_support_static_methods(self):\n        parse.parse(\"\"\"\ninterface Foo {\n    void foo();\n\n    static Foo create() {\n        return new Foo() {\n            @Override\n            void foo() {\n                System.out.println(\"foo\");\n            }\n        };\n    }\n}\n        \"\"\")\n\n    def test_interface_support_default_methods(self):\n        parse.parse(\"\"\"\ninterface Foo {\n    default void foo() {\n        System.out.println(\"foo\");\n    }\n}\n        \"\"\")\n\n\ndef main():\n    unittest.main()\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "baseline_tokenization/javalang/test/test_javadoc.py",
    "content": "import unittest\n\nfrom .. import javadoc\n\n\nclass TestJavadoc(unittest.TestCase):\n    def test_empty_comment(self):\n        javadoc.parse('/** */')\n        javadoc.parse('/***/')\n        javadoc.parse('/**\\n *\\n */')\n        javadoc.parse('/**\\n *\\n *\\n */')\n\nif __name__ == \"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "baseline_tokenization/javalang/test/test_package_declaration.py",
    "content": "import unittest\n\nfrom pkg_resources import resource_string\nfrom .. import parse\n\n\n# From my reading of the spec (http://docs.oracle.com/javase/specs/jls/se7/html/jls-7.html) the\n# allowed order is javadoc, optional annotation, package declaration\nclass PackageInfo(unittest.TestCase):\n    def testPackageDeclarationOnly(self):\n        source_file = \"source/package-info/NoAnnotationNoJavadoc.java\"\n        ast = self.get_ast(source_file)\n\n        self.failUnless(ast.package.name == \"org.javalang.test\")\n        self.failIf(ast.package.annotations)\n        self.failIf(ast.package.documentation)\n\n    def testAnnotationOnly(self):\n        source_file = \"source/package-info/AnnotationOnly.java\"\n        ast = self.get_ast(source_file)\n\n        self.failUnless(ast.package.name == \"org.javalang.test\")\n        self.failUnless(ast.package.annotations)\n        self.failIf(ast.package.documentation)\n\n    def testJavadocOnly(self):\n        source_file = \"source/package-info/JavadocOnly.java\"\n        ast = self.get_ast(source_file)\n\n        self.failUnless(ast.package.name == \"org.javalang.test\")\n        self.failIf(ast.package.annotations)\n        self.failUnless(ast.package.documentation)\n\n    def testAnnotationThenJavadoc(self):\n        source_file = \"source/package-info/AnnotationJavadoc.java\"\n        ast = self.get_ast(source_file)\n\n        self.failUnless(ast.package.name == \"org.javalang.test\")\n        self.failUnless(ast.package.annotations)\n        self.failIf(ast.package.documentation)\n\n    def testJavadocThenAnnotation(self):\n        source_file = \"source/package-info/JavadocAnnotation.java\"\n        ast = self.get_ast(source_file)\n\n        self.failUnless(ast.package.name == \"org.javalang.test\")\n        self.failUnless(ast.package.annotations)\n        self.failUnless(ast.package.documentation)\n\n    def get_ast(self, filename):\n        source = resource_string(__name__, filename)\n        ast = parse.parse(source)\n\n        return ast\n\n\ndef main():\n    unittest.main()\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "baseline_tokenization/javalang/test/test_util.py",
    "content": "import unittest\n\nfrom ..util import LookAheadIterator\n\n\nclass TestLookAheadIterator(unittest.TestCase):\n    def test_usage(self):\n        i = LookAheadIterator(list(range(0, 10000)))\n\n        self.assertEqual(next(i), 0)\n        self.assertEqual(next(i), 1)\n        self.assertEqual(next(i), 2)\n\n        self.assertEqual(i.last(), 2)\n\n        self.assertEqual(i.look(), 3)\n        self.assertEqual(i.last(), 3)\n\n        self.assertEqual(i.look(1), 4)\n        self.assertEqual(i.look(2), 5)\n        self.assertEqual(i.look(3), 6)\n        self.assertEqual(i.look(4), 7)\n\n        self.assertEqual(i.last(), 7)\n\n        i.push_marker()\n        self.assertEqual(next(i), 3)\n        self.assertEqual(next(i), 4)\n        self.assertEqual(next(i), 5)\n        i.pop_marker(True) # reset\n\n        self.assertEqual(i.look(), 3)\n        self.assertEqual(next(i), 3)\n\n        i.push_marker() #1\n        self.assertEqual(next(i), 4)\n        self.assertEqual(next(i), 5)\n        i.push_marker() #2\n        self.assertEqual(next(i), 6)\n        self.assertEqual(next(i), 7)\n        i.push_marker() #3\n        self.assertEqual(next(i), 8)\n        self.assertEqual(next(i), 9)\n        i.pop_marker(False) #3\n        self.assertEqual(next(i), 10)\n        i.pop_marker(True) #2\n        self.assertEqual(next(i), 6)\n        self.assertEqual(next(i), 7)\n        self.assertEqual(next(i), 8)\n        i.pop_marker(False) #1\n        self.assertEqual(next(i), 9)\n\n        try:\n            with i:\n                self.assertEqual(next(i), 10)\n                self.assertEqual(next(i), 11)\n                raise Exception()\n        except:\n            self.assertEqual(next(i), 10)\n            self.assertEqual(next(i), 11)\n\n        with i:\n            self.assertEqual(next(i), 12)\n            self.assertEqual(next(i), 13)\n        self.assertEqual(next(i), 14)\n\n\nif __name__==\"__main__\":\n    unittest.main()\n"
  },
  {
    "path": "baseline_tokenization/javalang/tokenizer.py",
    "content": "import re\nimport unicodedata\n\nimport six\n\n\nclass LexerError(Exception):\n    pass\n\nclass JavaToken(object):\n    def __init__(self, value, position=None, javadoc=None):\n        self.value = value\n        self.position = position\n        self.javadoc = javadoc\n\n    def __repr__(self):\n        if self.position:\n            return '%s \"%s\" line %d, position %d' % (\n                self.__class__.__name__, self.value, self.position[0], self.position[1]\n                )\n        else:\n            return '%s \"%s\"' % (self.__class__.__name__, self.value)\n\n    def __str__(self):\n        return repr(self)\n\n    def __eq__(self, other):\n        raise Exception(\"Direct comparison not allowed\")\n\nclass EndOfInput(JavaToken):\n    pass\n\nclass Keyword(JavaToken):\n    VALUES = set(['abstract', 'assert', 'boolean', 'break', 'byte', 'case',\n                  'catch', 'char', 'class', 'const', 'continue', 'default',\n                  'do', 'double', 'else', 'enum', 'extends', 'final',\n                  'finally', 'float', 'for', 'goto', 'if', 'implements',\n                  'import', 'instanceof', 'int', 'interface', 'long', 'native',\n                  'new', 'package', 'private', 'protected', 'public', 'return',\n                  'short', 'static', 'strictfp', 'super', 'switch',\n                  'synchronized', 'this', 'throw', 'throws', 'transient', 'try',\n                  'void', 'volatile', 'while'])\n\n\nclass Modifier(Keyword):\n    VALUES = set(['abstract', 'default', 'final', 'native', 'private',\n                  'protected', 'public', 'static', 'strictfp', 'synchronized',\n                  'transient', 'volatile'])\n\nclass BasicType(Keyword):\n    VALUES = set(['boolean', 'byte', 'char', 'double',\n                  'float', 'int', 'long', 'short'])\n\nclass Literal(JavaToken):\n    pass\n\nclass Integer(Literal):\n    pass\n\nclass DecimalInteger(Literal):\n    pass\n\nclass OctalInteger(Integer):\n    pass\n\nclass BinaryInteger(Integer):\n    pass\n\nclass HexInteger(Integer):\n    pass\n\nclass FloatingPoint(Literal):\n    pass\n\nclass DecimalFloatingPoint(FloatingPoint):\n    pass\n\nclass HexFloatingPoint(FloatingPoint):\n    pass\n\nclass Boolean(Literal):\n    VALUES = set([\"true\", \"false\"])\n\nclass Character(Literal):\n    pass\n\nclass String(Literal):\n    pass\n\nclass Null(Literal):\n    pass\n\nclass Separator(JavaToken):\n    VALUES = set(['(', ')', '{', '}', '[', ']', ';', ',', '.'])\n\nclass Operator(JavaToken):\n    MAX_LEN = 4\n    VALUES = set(['>>>=', '>>=', '<<=',  '%=', '^=', '|=', '&=', '/=',\n                  '*=', '-=', '+=', '<<', '--', '++', '||', '&&', '!=',\n                  '>=', '<=', '==', '%', '^', '|', '&', '/', '*', '-',\n                  '+', ':', '?', '~', '!', '<', '>', '=', '...', '->', '::'])\n\n    # '>>>' and '>>' are excluded so that >> becomes two tokens and >>> becomes\n    # three. This is done because we can not distinguish the operators >> and\n    # >>> from the closing of multipel type parameter/argument lists when\n    # lexing. The job of potentially recombining these symbols is left to the\n    # parser\n\n    INFIX = set(['||', '&&', '|', '^', '&', '==', '!=', '<', '>', '<=', '>=',\n                 '<<', '>>', '>>>', '+', '-', '*', '/', '%'])\n\n    PREFIX = set(['++', '--', '!', '~', '+', '-'])\n\n    POSTFIX = set(['++', '--'])\n\n    ASSIGNMENT = set(['=', '+=', '-=', '*=', '/=', '&=', '|=', '^=', '%=',\n                      '<<=', '>>=', '>>>='])\n\n    LAMBDA = set(['->'])\n\n    METHOD_REFERENCE = set(['::',])\n\n    def is_infix(self):\n        return self.value in self.INFIX\n\n    def is_prefix(self):\n        return self.value in self.PREFIX\n\n    def is_postfix(self):\n        return self.value in self.POSTFIX\n\n    def is_assignment(self):\n        return self.value in self.ASSIGNMENT\n\n\nclass Annotation(JavaToken):\n    pass\n\nclass Identifier(JavaToken):\n    pass\n\n\nclass JavaTokenizer(object):\n\n    IDENT_START_CATEGORIES = set(['Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Nl', 'Pc', 'Sc'])\n\n    IDENT_PART_CATEGORIES = set(['Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Mc', 'Mn', 'Nd', 'Nl', 'Pc', 'Sc'])\n\n    def __init__(self, data):\n        self.data = data\n\n        self.current_line = 1\n        self.start_of_line = 0\n\n        self.operators = [set() for i in range(0, Operator.MAX_LEN)]\n\n        for v in Operator.VALUES:\n            self.operators[len(v) - 1].add(v)\n\n        self.whitespace_consumer = re.compile(r'[^\\s]')\n\n        self.javadoc = None\n\n\n    def reset(self):\n        self.i = 0\n        self.j = 0\n\n    def consume_whitespace(self):\n        match = self.whitespace_consumer.search(self.data, self.i + 1)\n\n        if not match:\n            self.i = self.length\n            return\n\n        i = match.start()\n\n        start_of_line = self.data.rfind('\\n', self.i, i)\n\n        if start_of_line != -1:\n            self.start_of_line = start_of_line\n            self.current_line += self.data.count('\\n', self.i, i)\n\n        self.i = i\n\n    def read_string(self):\n        delim = self.data[self.i]\n\n        state = 0\n        j = self.i + 1\n        length = self.length\n\n        while True:\n            if j >= length:\n                self.error('Unterminated character/string literal')\n\n            if state == 0:\n                if self.data[j] == '\\\\':\n                    state = 1\n                elif self.data[j] == delim:\n                    break\n\n            elif state == 1:\n                if self.data[j] in 'btnfru\"\\'\\\\':\n                    state = 0\n                elif self.data[j] in '0123':\n                    state = 2\n                elif self.data[j] in '01234567':\n                    state = 3\n                else:\n                    self.error('Illegal escape character', self.data[j])\n\n            elif state == 2:\n                # Possibly long octal\n                if self.data[j] in '01234567':\n                    state = 3\n                elif self.data[j] == '\\\\':\n                    state = 1\n                elif self.data[j] == delim:\n                    break\n\n            elif state == 3:\n                state = 0\n\n                if self.data[j] == '\\\\':\n                    state = 1\n                elif self.data[j] == delim:\n                    break\n\n            j += 1\n\n        self.j = j + 1\n\n    def try_operator(self):\n        for l in range(min(self.length - self.i, Operator.MAX_LEN), 0, -1):\n            if self.data[self.i:self.i + l] in self.operators[l - 1]:\n                self.j = self.i + l\n                return True\n        return False\n\n    def read_comment(self):\n        if self.data[self.i + 1] == '/':\n            i = self.data.find('\\n', self.i + 2)\n\n            if i == -1:\n                self.i = self.length\n                return\n\n            i += 1\n\n            self.start_of_line = i\n            self.current_line += 1\n            self.i = i\n\n        else:\n            i = self.data.find('*/', self.i + 2)\n\n            if i == -1:\n                self.i = self.length\n                return\n\n            i += 2\n\n            self.start_of_line = i\n            self.current_line += self.data.count('\\n', self.i, i)\n            self.i = i\n\n    def try_javadoc_comment(self):\n        if self.i + 2 >= self.length or self.data[self.i + 2] != '*':\n            return False\n\n        j = self.data.find('*/', self.i + 2)\n\n        if j == -1:\n            self.j = self.length\n            return False\n\n        j += 2\n\n        self.start_of_line = j\n        self.current_line += self.data.count('\\n', self.i, j)\n        self.j = j\n\n        return True\n\n    def read_decimal_float_or_integer(self):\n        orig_i = self.i\n        self.j = self.i\n\n        self.read_decimal_integer()\n\n        if self.data[self.j] not in '.eEfFdD':\n            return DecimalInteger\n\n        if self.data[self.j] == '.':\n            self.i = self.j + 1\n            self.read_decimal_integer()\n\n        if self.data[self.j] in 'eE':\n            self.j = self.j + 1\n\n            if self.data[self.j] in '-+':\n                self.j = self.j + 1\n\n            self.i = self.j\n            self.read_decimal_integer()\n\n        if self.data[self.j] in 'fFdD':\n            self.j = self.j + 1\n\n        self.i = orig_i\n        return DecimalFloatingPoint\n\n    def read_hex_integer_or_float(self):\n        orig_i = self.i\n        self.j = self.i + 2\n\n        self.read_hex_integer()\n\n        if self.data[self.j] not in '.pP':\n            return HexInteger\n\n        if self.data[self.j] == '.':\n            self.j = self.j + 1\n            self.read_digits('0123456789abcdefABCDEF')\n\n        if self.data[self.j] in 'pP':\n            self.j = self.j + 1\n        else:\n            self.error('Invalid hex float literal')\n\n        if self.data[self.j] in '-+':\n            self.j = self.j + 1\n\n        self.i = self.j\n        self.read_decimal_integer()\n\n        if self.data[self.j] in 'fFdD':\n            self.j = self.j + 1\n\n        self.i = orig_i\n        return HexFloatingPoint\n\n    def read_digits(self, digits):\n        tmp_i = 0\n        c = None\n\n        while True:\n            c = self.data[self.j + tmp_i]\n\n            if c in digits:\n                self.j += 1 + tmp_i\n                tmp_i = 0\n            elif c == '_':\n                tmp_i += 1\n            else:\n                break\n\n        if c in 'lL':\n            self.j += 1\n\n    def read_decimal_integer(self):\n        self.j = self.i\n        self.read_digits('0123456789')\n\n    def read_hex_integer(self):\n        self.j = self.i + 2\n        self.read_digits('0123456789abcdefABCDEF')\n\n    def read_bin_integer(self):\n        self.j = self.i + 2\n        self.read_digits('01')\n\n    def read_octal_integer(self):\n        self.j = self.i + 1\n        self.read_digits('01234567')\n\n    def read_integer_or_float(self, c, c_next):\n        if c == '0' and c_next in 'xX':\n            return self.read_hex_integer_or_float()\n        elif c == '0' and c_next in 'bB':\n            self.read_bin_integer()\n            return BinaryInteger\n        elif c == '0' and c_next in '01234567':\n            self.read_octal_integer()\n            return OctalInteger\n        else:\n            return self.read_decimal_float_or_integer()\n\n    def try_separator(self):\n        if self.data[self.i] in Separator.VALUES:\n            self.j = self.i + 1\n            return True\n        return False\n\n    def decode_data(self):\n        # Encodings to try in order\n        codecs = ['utf_8', 'iso-8859-1']\n\n        # If data is already unicode don't try to redecode\n        if isinstance(self.data, six.text_type):\n            return self.data\n\n        for codec in codecs:\n            try:\n                data = self.data.decode(codec)\n                return data\n            except UnicodeDecodeError:\n                pass\n\n        self.error('Could not decode input data')\n\n    def is_java_identifier_start(self, c):\n        return unicodedata.category(c) in self.IDENT_START_CATEGORIES\n\n    def read_identifier(self):\n        self.j = self.i + 1\n\n        while unicodedata.category(self.data[self.j]) in self.IDENT_PART_CATEGORIES:\n            self.j += 1\n\n        ident = self.data[self.i:self.j]\n        if ident in Keyword.VALUES:\n            token_type = Keyword\n\n            if ident in BasicType.VALUES:\n                token_type = BasicType\n            elif ident in Modifier.VALUES:\n                token_type = Modifier\n\n        elif ident in Boolean.VALUES:\n            token_type = Boolean\n        elif ident == 'null':\n            token_type = Null\n        else:\n            token_type = Identifier\n\n        return token_type\n\n    def pre_tokenize(self):\n        new_data = list()\n        data = self.decode_data()\n\n        i = 0\n        j = 0\n        length = len(data)\n\n        NONE         = 0\n        ELIGIBLE     = 1\n        MARKER_FOUND = 2\n\n        state = NONE\n\n        while j < length:\n            if state == NONE:\n                j = data.find('\\\\', j)\n\n                if j == -1:\n                    j = length\n                    break\n\n                state = ELIGIBLE\n\n            elif state == ELIGIBLE:\n                c = data[j]\n\n                if c == 'u':\n                    state = MARKER_FOUND\n                    new_data.append(data[i:j - 1])\n                else:\n                    state = NONE\n\n            elif state == MARKER_FOUND:\n                c = data[j]\n\n                if c != 'u':\n                    try:\n                        escape_code = int(data[j:j+4], 16)\n                    except ValueError:\n                        self.error('Invalid unicode escape', data[j:j+4])\n\n                    new_data.append(six.unichr(escape_code))\n\n                    i = j + 4\n                    j = i\n\n                    state = NONE\n\n                    continue\n\n            j = j + 1\n\n        new_data.append(data[i:])\n\n        self.data = ''.join(new_data)\n        self.length = len(self.data)\n\n    def tokenize(self):\n        self.reset()\n\n        # Convert unicode escapes\n        self.pre_tokenize()\n\n        while self.i < self.length:\n            token_type = None\n\n            c = self.data[self.i]\n            c_next = None\n            startswith = c\n\n            if self.i + 1 < self.length:\n                c_next = self.data[self.i + 1]\n                startswith = c + c_next\n\n            if c.isspace():\n                self.consume_whitespace()\n                continue\n\n            elif startswith in (\"//\", \"/*\"):\n                if startswith == \"/*\" and self.try_javadoc_comment():\n                    self.javadoc = self.data[self.i:self.j]\n                    self.i = self.j\n                else:\n                    self.read_comment()\n                continue\n\n            elif startswith == '..' and self.try_operator():\n                # Ensure we don't mistake a '...' operator as a sequence of\n                # three '.' separators. This is done as an optimization instead\n                # of moving try_operator higher in the chain because operators\n                # aren't as common and try_operator is expensive\n                token_type = Operator\n\n            elif c == '@':\n                token_type = Annotation\n                self.j = self.i + 1\n\n            elif c == '.' and c_next.isdigit():\n                token_type = self.read_decimal_float_or_integer()\n\n            elif self.try_separator():\n                token_type = Separator\n\n            elif c in (\"'\", '\"'):\n                token_type = String\n                self.read_string()\n\n            elif c in '0123456789':\n                token_type = self.read_integer_or_float(c, c_next)\n\n            elif self.is_java_identifier_start(c):\n                token_type = self.read_identifier()\n\n            elif self.try_operator():\n                token_type = Operator\n\n            else:\n                self.error('Could not process token', c)\n\n            position = (self.current_line, self.i - self.start_of_line)\n            token = token_type(self.data[self.i:self.j], position, self.javadoc)\n            yield token\n\n            if self.javadoc:\n                self.javadoc = None\n\n            self.i = self.j\n\n    def error(self, message, char=None):\n        # Provide additional information in the errors message\n        line_start = self.data.rfind('\\n', 0, self.i) + 1\n        line_end = self.data.find('\\n', self.i)\n        line = self.data[line_start:line_end].strip()\n\n        line_number = self.current_line\n\n        if not char:\n            char = self.data[self.j]\n\n        message = u'%s at \"%s\", line %s: %s' % (message, char, line_number, line)\n\n        raise LexerError(message)\n\ndef tokenize(code):\n    tokenizer = JavaTokenizer(code)\n    return tokenizer.tokenize()\n\ndef reformat_tokens(tokens):\n    indent = 0\n    closed_block = False\n    ident_last = False\n\n    output = list()\n\n    for token in tokens:\n        if closed_block:\n            closed_block = False\n            indent -= 4\n\n            output.append('\\n')\n            output.append(' ' * indent)\n            output.append('}')\n\n            if isinstance(token, (Literal, Keyword, Identifier)):\n                output.append('\\n')\n                output.append(' ' * indent)\n\n        if token.value == '{':\n            indent += 4\n            output.append(' {\\n')\n            output.append(' ' * indent)\n\n        elif token.value == '}':\n            closed_block = True\n\n        elif token.value == ',':\n            output.append(', ')\n\n        elif isinstance(token, (Literal, Keyword, Identifier)):\n            if ident_last:\n                # If the last token was a literla/keyword/identifer put a space in between\n                output.append(' ')\n            ident_last = True\n            output.append(token.value)\n\n        elif isinstance(token, Operator):\n            output.append(' ' + token.value + ' ')\n\n        elif token.value == ';':\n            output.append(';\\n')\n            output.append(' ' * indent)\n\n        else:\n            output.append(token.value)\n\n        ident_last = isinstance(token, (Literal, Keyword, Identifier))\n\n    if closed_block:\n        output.append('\\n}')\n\n    output.append('\\n')\n\n    return ''.join(output)\n"
  },
  {
    "path": "baseline_tokenization/javalang/tree.py",
    "content": "\nfrom .ast import Node\n\n# ------------------------------------------------------------------------------\n\nclass CompilationUnit(Node):\n    attrs = (\"package\", \"imports\", \"types\")\n\nclass Import(Node):\n    attrs = (\"path\", \"static\", \"wildcard\")\n\nclass Documented(Node):\n    attrs = (\"documentation\",)\n\nclass Declaration(Node):\n    attrs = (\"modifiers\", \"annotations\")\n\nclass TypeDeclaration(Declaration, Documented):\n    attrs = (\"name\", \"body\")\n\n    @property\n    def fields(self):\n        return [decl for decl in self.body if isinstance(decl, FieldDeclaration)]\n\n    @property\n    def methods(self):\n        return [decl for decl in self.body if isinstance(decl, MethodDeclaration)]\n\n    @property\n    def constructors(self):\n        return [decl for decl in self.body if isinstance(decl, ConstructorDeclaration)]\n\nclass PackageDeclaration(Declaration, Documented):\n    attrs = (\"name\",)\n\nclass ClassDeclaration(TypeDeclaration):\n    attrs = (\"type_parameters\", \"extends\", \"implements\")\n\nclass EnumDeclaration(TypeDeclaration):\n    attrs = (\"implements\",)\n\nclass InterfaceDeclaration(TypeDeclaration):\n    attrs = (\"type_parameters\", \"extends\",)\n\nclass AnnotationDeclaration(TypeDeclaration):\n    attrs = ()\n\n# ------------------------------------------------------------------------------\n\nclass Type(Node):\n    attrs = (\"name\", \"dimensions\",)\n\nclass BasicType(Type):\n    attrs = ()\n\nclass ReferenceType(Type):\n    attrs = (\"arguments\", \"sub_type\")\n\nclass TypeArgument(Node):\n    attrs = (\"type\", \"pattern_type\")\n\n# ------------------------------------------------------------------------------\n\nclass TypeParameter(Node):\n    attrs = (\"name\", \"extends\")\n\n# ------------------------------------------------------------------------------\n\nclass Annotation(Node):\n    attrs = (\"name\", \"element\")\n\nclass ElementValuePair(Node):\n    attrs = (\"name\", \"value\")\n\nclass ElementArrayValue(Node):\n    attrs = (\"values\",)\n\n# ------------------------------------------------------------------------------\n\nclass Member(Documented):\n    attrs = ()\n\nclass MethodDeclaration(Member, Declaration):\n    attrs = (\"type_parameters\", \"return_type\", \"name\", \"parameters\", \"throws\", \"body\")\n\nclass FieldDeclaration(Member, Declaration):\n    attrs = (\"type\", \"declarators\")\n\nclass ConstructorDeclaration(Declaration, Documented):\n    attrs = (\"type_parameters\", \"name\", \"parameters\", \"throws\", \"body\")\n\n# ------------------------------------------------------------------------------\n\nclass ConstantDeclaration(FieldDeclaration):\n    attrs = ()\n\nclass ArrayInitializer(Node):\n    attrs = (\"initializers\",)\n\nclass VariableDeclaration(Declaration):\n    attrs = (\"type\", \"declarators\")\n\nclass LocalVariableDeclaration(VariableDeclaration):\n    attrs = ()\n\nclass VariableDeclarator(Node):\n    attrs = (\"name\", \"dimensions\", \"initializer\")\n\nclass FormalParameter(Declaration):\n    attrs = (\"type\", \"name\", \"varargs\")\n\nclass InferredFormalParameter(Node):\n    attrs = ('name',)\n\n# ------------------------------------------------------------------------------\n\nclass Statement(Node):\n    attrs = (\"label\",)\n\nclass IfStatement(Statement):\n    attrs = (\"condition\", \"then_statement\", \"else_statement\")\n\nclass WhileStatement(Statement):\n    attrs = (\"condition\", \"body\")\n\nclass DoStatement(Statement):\n    attrs = (\"condition\", \"body\")\n\nclass ForStatement(Statement):\n    attrs = (\"control\", \"body\")\n\nclass AssertStatement(Statement):\n    attrs = (\"condition\", \"value\")\n\nclass BreakStatement(Statement):\n    attrs = (\"goto\",)\n\nclass ContinueStatement(Statement):\n    attrs = (\"goto\",)\n\nclass ReturnStatement(Statement):\n    attrs = (\"expression\",)\n\nclass ThrowStatement(Statement):\n    attrs = (\"expression\",)\n\nclass SynchronizedStatement(Statement):\n    attrs = (\"lock\", \"block\")\n\nclass TryStatement(Statement):\n    attrs = (\"resources\", \"block\", \"catches\", \"finally_block\")\n\nclass SwitchStatement(Statement):\n    attrs = (\"expression\", \"cases\")\n\nclass BlockStatement(Statement):\n    attrs = (\"statements\",)\n\nclass StatementExpression(Statement):\n    attrs = (\"expression\",)\n\n# ------------------------------------------------------------------------------\n\nclass TryResource(Declaration):\n    attrs = (\"type\", \"name\", \"value\")\n\nclass CatchClause(Statement):\n    attrs = (\"parameter\", \"block\")\n\nclass CatchClauseParameter(Declaration):\n    attrs = (\"types\", \"name\")\n\n# ------------------------------------------------------------------------------\n\nclass SwitchStatementCase(Node):\n    attrs = (\"case\", \"statements\")\n\nclass ForControl(Node):\n    attrs = (\"init\", \"condition\", \"update\")\n\nclass EnhancedForControl(Node):\n    attrs = (\"var\", \"iterable\")\n\n# ------------------------------------------------------------------------------\n\nclass Expression(Node):\n    attrs = ()\n\nclass Assignment(Expression):\n    attrs = (\"expressionl\", \"value\", \"type\")\n\nclass TernaryExpression(Expression):\n    attrs = (\"condition\", \"if_true\", \"if_false\")\n\nclass BinaryOperation(Expression):\n    attrs = (\"operator\", \"operandl\", \"operandr\")\n\nclass Cast(Expression):\n    attrs = (\"type\", \"expression\")\n\nclass MethodReference(Expression):\n    attrs = (\"expression\", \"method\", \"type_arguments\")\n\nclass LambdaExpression(Expression):\n    attrs = ('parameters', 'body')\n\n# ------------------------------------------------------------------------------\n\nclass Primary(Expression):\n    attrs = (\"prefix_operators\", \"postfix_operators\", \"qualifier\", \"selectors\")\n\nclass Literal(Primary):\n    attrs = (\"value\",)\n\nclass This(Primary):\n    attrs = ()\n\nclass MemberReference(Primary):\n    attrs = (\"member\",)\n\nclass Invocation(Primary):\n    attrs = (\"type_arguments\", \"arguments\")\n\nclass ExplicitConstructorInvocation(Invocation):\n    attrs = ()\n\nclass SuperConstructorInvocation(Invocation):\n    attrs = ()\n\nclass MethodInvocation(Invocation):\n    attrs = (\"member\",)\n\nclass SuperMethodInvocation(Invocation):\n    attrs = (\"member\",)\n\nclass SuperMemberReference(Primary):\n    attrs = (\"member\",)\n\nclass ArraySelector(Expression):\n    attrs = (\"index\",)\n\nclass ClassReference(Primary):\n    attrs = (\"type\",)\n\nclass VoidClassReference(ClassReference):\n    attrs = ()\n\n# ------------------------------------------------------------------------------\n\nclass Creator(Primary):\n    attrs = (\"type\",)\n\nclass ArrayCreator(Creator):\n    attrs = (\"dimensions\", \"initializer\")\n\nclass ClassCreator(Creator):\n    attrs = (\"constructor_type_arguments\", \"arguments\", \"body\")\n\nclass InnerClassCreator(Creator):\n    attrs = (\"constructor_type_arguments\", \"arguments\", \"body\")\n\n# ------------------------------------------------------------------------------\n\nclass EnumBody(Node):\n    attrs = (\"constants\", \"declarations\")\n\nclass EnumConstantDeclaration(Declaration, Documented):\n    attrs = (\"name\", \"arguments\", \"body\")\n\nclass AnnotationMethod(Declaration):\n    attrs = (\"name\", \"return_type\", \"dimensions\", \"default\")\n\n"
  },
  {
    "path": "baseline_tokenization/javalang/util.py",
    "content": "\n\nclass LookAheadIterator(object):\n    def __init__(self, iterable):\n        self.iterable = iter(iterable)\n        self.look_ahead = list()\n        self.markers = list()\n        self.default = None\n        self.value = None\n\n    def __iter__(self):\n        return self\n\n    def set_default(self, value):\n        self.default = value\n\n    def next(self):\n        return self.__next__()\n\n    def __next__(self):\n        if self.look_ahead:\n            self.value = self.look_ahead.pop(0)\n        else:\n            self.value = next(self.iterable)\n\n        if self.markers:\n            self.markers[-1].append(self.value)\n\n        return self.value\n\n    def look(self, i=0):\n        \"\"\" Look ahead of the iterable by some number of values with advancing\n        past them.\n\n        If the requested look ahead is past the end of the iterable then None is\n        returned.\n\n        \"\"\"\n\n        length = len(self.look_ahead)\n\n        if length <= i:\n            try:\n                self.look_ahead.extend([next(self.iterable)\n                    for _ in range(length, i + 1)])\n            except StopIteration:\n                return self.default\n\n        self.value = self.look_ahead[i]\n        return self.value\n\n    def last(self):\n        return self.value\n\n    def __enter__(self):\n        self.push_marker()\n        return self\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        # Reset the iterator if there was an error\n        if exc_type or exc_val or exc_tb:\n            self.pop_marker(True)\n        else:\n            self.pop_marker(False)\n\n    def push_marker(self):\n        \"\"\" Push a marker on to the marker stack \"\"\"\n        self.markers.append(list())\n\n    def pop_marker(self, reset):\n        \"\"\" Pop a marker off of the marker stack. If reset is True then the\n        iterator will be returned to the state it was in before the\n        corresponding call to push_marker().\n\n        \"\"\"\n\n        marker = self.markers.pop()\n\n        if reset:\n            # Make the values available to be read again\n            marker.extend(self.look_ahead)\n            self.look_ahead = marker\n        elif self.markers:\n            # Otherwise, reassign the values to the top marker\n            self.markers[-1].extend(marker)\n        else:\n            # If there are not more markers in the stack then discard the values\n            pass\n\nclass LookAheadListIterator(object):\n    def __init__(self, iterable):\n        self.list = list(iterable)\n\n        self.marker = 0\n        self.saved_markers = []\n\n        self.default = None\n        self.value = None\n\n    def __iter__(self):\n        return self\n\n    def set_default(self, value):\n        self.default = value\n\n    def next(self):\n        return self.__next__()\n\n    def __next__(self):\n        try:\n            self.value = self.list[self.marker]\n            self.marker += 1\n        except IndexError:\n            raise StopIteration()\n\n        return self.value\n\n    def look(self, i=0):\n        \"\"\" Look ahead of the iterable by some number of values with advancing\n        past them.\n\n        If the requested look ahead is past the end of the iterable then None is\n        returned.\n\n        \"\"\"\n\n        try:\n            self.value = self.list[self.marker + i]\n        except IndexError:\n            return self.default\n\n        return self.value\n\n    def last(self):\n        return self.value\n\n    def __enter__(self):\n        self.push_marker()\n        return self\n\n    def __exit__(self, exc_type, exc_val, exc_tb):\n        # Reset the iterator if there was an error\n        if exc_type or exc_val or exc_tb:\n            self.pop_marker(True)\n        else:\n            self.pop_marker(False)\n\n    def push_marker(self):\n        \"\"\" Push a marker on to the marker stack \"\"\"\n        self.saved_markers.append(self.marker)\n\n    def pop_marker(self, reset):\n        \"\"\" Pop a marker off of the marker stack. If reset is True then the\n        iterator will be returned to the state it was in before the\n        corresponding call to push_marker().\n\n        \"\"\"\n\n        saved = self.saved_markers.pop()\n\n        if reset:\n            self.marker = saved\n        elif self.saved_markers:\n            self.saved_markers[-1] = saved\n\n"
  },
  {
    "path": "baseline_tokenization/subtokenize_nmt_baseline.py",
    "content": "#!/usr/bin/python\n\nimport javalang\nimport sys\nimport re\n\n\nmodifiers = ['public', 'private', 'protected', 'static']\n\nRE_WORDS = re.compile(r'''\n    # Find words in a string. Order matters!\n    [A-Z]+(?=[A-Z][a-z]) |  # All upper case before a capitalized word\n    [A-Z]?[a-z]+ |  # Capitalized words / all lower case\n    [A-Z]+ |  # All upper case\n    \\d+ | # Numbers\n    .+\n''', re.VERBOSE)\n\ndef split_subtokens(str):\n    return [subtok for subtok in RE_WORDS.findall(str) if not subtok == '_']\n\ndef tokenizeFile(file_path):\n  lines = 0\n  with open(file_path, 'r', encoding=\"utf-8\") as file:\n    with open(file_path + 'method_names.txt', 'w') as method_names_file:\n      with open(file_path + 'method_subtokens_content.txt', 'w') as method_contents_file:\n        for line in file:\n          lines += 1\n          line = line.rstrip()\n          parts = line.split('|', 1)\n          method_name = parts[0]\n          method_content = parts[1]\n          try:\n            tokens = list(javalang.tokenizer.tokenize(method_content))\n          except:\n            print('ERROR in tokenizing: ' + method_content)\n            #tokens = method_content.split(' ')\n          if len(method_name) > 0 and len(tokens) > 0:\n            method_names_file.write(method_name + '\\n')\n            method_contents_file.write(' '.join([' '.join(split_subtokens(i.value)) for i in tokens if not i.value in modifiers]) + '\\n')\n          else:\n            print('ERROR in len of: ' + method_name + ', tokens: ' + str(tokens))\n  print(str(lines))\n\n\nif __name__ == '__main__':\n  file = sys.argv[1]\n  tokenizeFile(file)\n\n\n"
  },
  {
    "path": "code2seq.py",
    "content": "from argparse import ArgumentParser\nimport numpy as np\nimport tensorflow as tf\n\nfrom config import Config\nfrom interactive_predict import InteractivePredictor\nfrom model import Model\n\nif __name__ == '__main__':\n    parser = ArgumentParser()\n    parser.add_argument(\"-d\", \"--data\", dest=\"data_path\",\n                        help=\"path to preprocessed dataset\", required=False)\n    parser.add_argument(\"-te\", \"--test\", dest=\"test_path\",\n                        help=\"path to test file\", metavar=\"FILE\", required=False)\n\n    parser.add_argument(\"-s\", \"--save_prefix\", dest=\"save_path_prefix\",\n                        help=\"path to save file\", metavar=\"FILE\", required=False)\n    parser.add_argument(\"-l\", \"--load\", dest=\"load_path\",\n                        help=\"path to saved file\", metavar=\"FILE\", required=False)\n    parser.add_argument('--release', action='store_true',\n                        help='if specified and loading a trained model, release the loaded model for a smaller model '\n                             'size.')\n    parser.add_argument('--predict', action='store_true')\n    parser.add_argument('--debug', action='store_true')\n    parser.add_argument('--seed', type=int, default=239)\n    args = parser.parse_args()\n\n    np.random.seed(args.seed)\n    tf.set_random_seed(args.seed)\n\n    if args.debug:\n        config = Config.get_debug_config(args)\n    else:\n        config = Config.get_default_config(args)\n\n    model = Model(config)\n    print('Created model')\n    if config.TRAIN_PATH:\n        model.train()\n    if config.TEST_PATH and not args.data_path:\n        results, precision, recall, f1, rouge = model.evaluate()\n        print('Accuracy: ' + str(results))\n        print('Precision: ' + str(precision) + ', recall: ' + str(recall) + ', F1: ' + str(f1))\n        print('Rouge: ', rouge)\n    if args.predict:\n        predictor = InteractivePredictor(config, model)\n        predictor.predict()\n    if args.release and args.load_path:\n        model.evaluate(release=True)\n    model.close_session()\n"
  },
  {
    "path": "common.py",
    "content": "import re\nimport subprocess\nimport sys\n\n\nclass Common:\n    internal_delimiter = '|'\n    SOS = '<S>'\n    EOS = '</S>'\n    PAD = '<PAD>'\n    UNK = '<UNK>'\n\n    @staticmethod\n    def normalize_word(word):\n        stripped = re.sub(r'[^a-zA-Z]', '', word)\n        if len(stripped) == 0:\n            return word.lower()\n        else:\n            return stripped.lower()\n\n    @staticmethod\n    def load_histogram(path, max_size=None):\n        histogram = {}\n        with open(path, 'r') as file:\n            for line in file.readlines():\n                parts = line.split(' ')\n                if not len(parts) == 2:\n                    continue\n                histogram[parts[0]] = int(parts[1])\n        sorted_histogram = [(k, histogram[k]) for k in sorted(histogram, key=histogram.get, reverse=True)]\n        return dict(sorted_histogram[:max_size])\n\n    @staticmethod\n    def load_vocab_from_dict(word_to_count, add_values=[], max_size=None):\n        word_to_index, index_to_word = {}, {}\n        current_index = 0\n        for value in add_values:\n            word_to_index[value] = current_index\n            index_to_word[current_index] = value\n            current_index += 1\n        sorted_counts = [(k, word_to_count[k]) for k in sorted(word_to_count, key=word_to_count.get, reverse=True)]\n        limited_sorted = dict(sorted_counts[:max_size])\n        for word, count in limited_sorted.items():\n            word_to_index[word] = current_index\n            index_to_word[current_index] = word\n            current_index += 1\n        return word_to_index, index_to_word, current_index\n\n    @staticmethod\n    def binary_to_string(binary_string):\n        return binary_string.decode(\"utf-8\")\n\n    @staticmethod\n    def binary_to_string_list(binary_string_list):\n        return [Common.binary_to_string(w) for w in binary_string_list]\n\n    @staticmethod\n    def binary_to_string_matrix(binary_string_matrix):\n        return [Common.binary_to_string_list(l) for l in binary_string_matrix]\n\n    @staticmethod\n    def binary_to_string_3d(binary_string_tensor):\n        return [Common.binary_to_string_matrix(l) for l in binary_string_tensor]\n\n    @staticmethod\n    def legal_method_names_checker(name):\n        return not name in [Common.UNK, Common.PAD, Common.EOS]\n\n    @staticmethod\n    def filter_impossible_names(top_words):\n        result = list(filter(Common.legal_method_names_checker, top_words))\n        return result\n\n    @staticmethod\n    def unique(sequence):\n        return list(set(sequence))\n\n    @staticmethod\n    def parse_results(result, pc_info_dict, topk=5):\n        prediction_results = {}\n        results_counter = 0\n        for single_method in result:\n            original_name, top_suggestions, top_scores, attention_per_context = list(single_method)\n            current_method_prediction_results = PredictionResults(original_name)\n            if attention_per_context is not None:\n                word_attention_pairs = [(word, attention) for word, attention in\n                                        zip(top_suggestions, attention_per_context) if\n                                        Common.legal_method_names_checker(word)]\n                for predicted_word, attention_timestep in word_attention_pairs:\n                    current_timestep_paths = []\n                    for context, attention in [(key, attention_timestep[key]) for key in\n                                               sorted(attention_timestep, key=attention_timestep.get, reverse=True)][\n                                              :topk]:\n                        if context in pc_info_dict:\n                            pc_info = pc_info_dict[context]\n                            current_timestep_paths.append((attention.item(), pc_info))\n\n                    current_method_prediction_results.append_prediction(predicted_word, current_timestep_paths)\n            else:\n                for predicted_seq in top_suggestions:\n                    filtered_seq = [word for word in predicted_seq if Common.legal_method_names_checker(word)]\n                    current_method_prediction_results.append_prediction(filtered_seq, None)\n\n            prediction_results[results_counter] = current_method_prediction_results\n            results_counter += 1\n        return prediction_results\n\n    @staticmethod\n    def compute_bleu(ref_file_name, predicted_file_name):\n        with open(predicted_file_name) as predicted_file:\n            pipe = subprocess.Popen([\"perl\", \"scripts/multi-bleu.perl\", ref_file_name], stdin=predicted_file,\n                                    stdout=sys.stdout, stderr=sys.stderr)\n\n\nclass PredictionResults:\n    def __init__(self, original_name):\n        self.original_name = original_name\n        self.predictions = list()\n\n    def append_prediction(self, name, current_timestep_paths):\n        self.predictions.append(SingleTimeStepPrediction(name, current_timestep_paths))\n\nclass SingleTimeStepPrediction:\n    def __init__(self, prediction, attention_paths):\n        self.prediction = prediction\n        if attention_paths is not None:\n            paths_with_scores = []\n            for attention_score, pc_info in attention_paths:\n                path_context_dict = {'score': attention_score,\n                                     'path': pc_info.longPath,\n                                     'token1': pc_info.token1,\n                                     'token2': pc_info.token2}\n                paths_with_scores.append(path_context_dict)\n            self.attention_paths = paths_with_scores\n\n\nclass PathContextInformation:\n    def __init__(self, context):\n        self.token1 = context['name1']\n        self.longPath = context['path']\n        self.shortPath = context['shortPath']\n        self.token2 = context['name2']\n\n    def __str__(self):\n        return '%s,%s,%s' % (self.token1, self.shortPath, self.token2)\n"
  },
  {
    "path": "config.py",
    "content": "class Config:\n    @staticmethod\n    def get_default_config(args):\n        config = Config(args)\n        config.NUM_EPOCHS = 3000\n        config.SAVE_EVERY_EPOCHS = 1\n        config.PATIENCE = 10\n        config.BATCH_SIZE = 512\n        config.TEST_BATCH_SIZE = 256\n        config.READER_NUM_PARALLEL_BATCHES = 1\n        config.SHUFFLE_BUFFER_SIZE = 10000\n        config.CSV_BUFFER_SIZE = 100 * 1024 * 1024  # 100 MB\n        config.MAX_CONTEXTS = 200\n        config.SUBTOKENS_VOCAB_MAX_SIZE = 190000\n        config.TARGET_VOCAB_MAX_SIZE = 27000\n        config.EMBEDDINGS_SIZE = 128\n        config.RNN_SIZE = 128 * 2  # Two LSTMs to embed paths, each of size 128\n        config.DECODER_SIZE = 320\n        config.NUM_DECODER_LAYERS = 1\n        config.MAX_PATH_LENGTH = 8 + 1\n        config.MAX_NAME_PARTS = 5\n        config.MAX_TARGET_PARTS = 6\n        config.EMBEDDINGS_DROPOUT_KEEP_PROB = 0.75\n        config.RNN_DROPOUT_KEEP_PROB = 0.5\n        config.BIRNN = True\n        config.RANDOM_CONTEXTS = True\n        config.BEAM_WIDTH = 0\n        config.USE_MOMENTUM = True\n        return config\n\n    def take_model_hyperparams_from(self, otherConfig):\n        self.EMBEDDINGS_SIZE = otherConfig.EMBEDDINGS_SIZE\n        self.RNN_SIZE = otherConfig.RNN_SIZE\n        self.DECODER_SIZE = otherConfig.DECODER_SIZE\n        self.NUM_DECODER_LAYERS = otherConfig.NUM_DECODER_LAYERS\n        self.BIRNN = otherConfig.BIRNN\n        if self.DATA_NUM_CONTEXTS <= 0:\n            self.DATA_NUM_CONTEXTS = otherConfig.DATA_NUM_CONTEXTS\n\n    def __init__(self, args):\n        self.NUM_EPOCHS = 0\n        self.SAVE_EVERY_EPOCHS = 0\n        self.PATIENCE = 0\n        self.BATCH_SIZE = 0\n        self.TEST_BATCH_SIZE = 0\n        self.READER_NUM_PARALLEL_BATCHES = 0\n        self.SHUFFLE_BUFFER_SIZE = 0\n        self.CSV_BUFFER_SIZE = None\n        self.TRAIN_PATH = args.data_path\n        self.TEST_PATH = args.test_path if args.test_path is not None else ''\n        self.DATA_NUM_CONTEXTS = 0\n        self.MAX_CONTEXTS = 0\n        self.SUBTOKENS_VOCAB_MAX_SIZE = 0\n        self.TARGET_VOCAB_MAX_SIZE = 0\n        self.EMBEDDINGS_SIZE = 0\n        self.RNN_SIZE = 0\n        self.DECODER_SIZE = 0\n        self.NUM_DECODER_LAYERS = 0\n        self.SAVE_PATH = args.save_path_prefix\n        self.LOAD_PATH = args.load_path\n        self.MAX_PATH_LENGTH = 0\n        self.MAX_NAME_PARTS = 0\n        self.MAX_TARGET_PARTS = 0\n        self.EMBEDDINGS_DROPOUT_KEEP_PROB = 0\n        self.RNN_DROPOUT_KEEP_PROB = 0\n        self.BIRNN = False\n        self.RANDOM_CONTEXTS = True\n        self.BEAM_WIDTH = 1\n        self.USE_MOMENTUM = True\n        self.RELEASE = args.release\n\n    @staticmethod\n    def get_debug_config(args):\n        config = Config(args)\n        config.NUM_EPOCHS = 3000\n        config.SAVE_EVERY_EPOCHS = 100\n        config.PATIENCE = 200\n        config.BATCH_SIZE = 7\n        config.TEST_BATCH_SIZE = 7\n        config.READER_NUM_PARALLEL_BATCHES = 1\n        config.SHUFFLE_BUFFER_SIZE = 10\n        config.CSV_BUFFER_SIZE = None\n        config.MAX_CONTEXTS = 5\n        config.SUBTOKENS_VOCAB_MAX_SIZE = 190000\n        config.TARGET_VOCAB_MAX_SIZE = 27000\n        config.EMBEDDINGS_SIZE = 19\n        config.RNN_SIZE = 10\n        config.DECODER_SIZE = 11\n        config.NUM_DECODER_LAYERS = 1\n        config.MAX_PATH_LENGTH = 8 + 1\n        config.MAX_NAME_PARTS = 5\n        config.MAX_TARGET_PARTS = 6\n        config.EMBEDDINGS_DROPOUT_KEEP_PROB = 1\n        config.RNN_DROPOUT_KEEP_PROB = 1\n        config.BIRNN = True\n        config.RANDOM_CONTEXTS = True\n        config.BEAM_WIDTH = 0\n        config.USE_MOMENTUM = False\n        return config\n"
  },
  {
    "path": "extractor.py",
    "content": "import json\n\nimport requests\n\nfrom common import PathContextInformation\n\n\nclass Extractor:\n    def __init__(self, config, extractor_api_url, max_path_length, max_path_width):\n        self.config = config\n        self.max_path_length = max_path_length\n        self.max_path_width = max_path_width\n        self.extractor_api_url = extractor_api_url\n        self.bad_characters_table = str.maketrans('', '', '\\t\\r\\n')\n\n    @staticmethod\n    def post_request(url, code_string):\n        return requests.post(url, data=json.dumps({\"code\": code_string, \"decompose\": True}, separators=(',', ':')))\n\n    def extract_paths(self, code_string):\n        response = self.post_request(self.extractor_api_url, code_string)\n        response_array = json.loads(response.text)\n        if 'errorType' in response_array:\n            raise ValueError(response.text)\n        if 'errorMessage' in response_array:\n            raise TimeoutError(response.text)\n        pc_info_dict = {}\n        result = []\n        for single_method in response_array:\n            method_name = single_method['target']\n            current_result_line_parts = [method_name]\n            contexts = single_method['paths']\n            for context in contexts[:self.config.DATA_NUM_CONTEXTS]:\n                pc_info = PathContextInformation(context)\n                current_result_line_parts += [str(pc_info)]\n                pc_info_dict[(pc_info.token1, pc_info.shortPath, pc_info.token2)] = pc_info\n            space_padding = ' ' * (self.config.DATA_NUM_CONTEXTS - len(contexts))\n            result_line = ' '.join(current_result_line_parts) + space_padding\n            result.append(result_line)\n        return result, pc_info_dict\n"
  },
  {
    "path": "interactive_predict.py",
    "content": "from common import Common\nfrom extractor import Extractor\n\nSHOW_TOP_CONTEXTS = 10\nMAX_PATH_LENGTH = 8\nMAX_PATH_WIDTH = 2\nEXTRACTION_API = 'https://po3g2dx2qa.execute-api.us-east-1.amazonaws.com/production/extractmethods'\n\n\nclass InteractivePredictor:\n    exit_keywords = ['exit', 'quit', 'q']\n\n    def __init__(self, config, model):\n        model.predict([])\n        self.model = model\n        self.config = config\n        self.path_extractor = Extractor(config, EXTRACTION_API, self.config.MAX_PATH_LENGTH, max_path_width=2)\n\n    @staticmethod\n    def read_file(input_filename):\n        with open(input_filename, 'r') as file:\n            return file.readlines()\n\n    def predict(self):\n        input_filename = 'Input.java'\n        print('Serving')\n        while True:\n            print('Modify the file: \"' + input_filename + '\" and press any key when ready, or \"q\" / \"exit\" to exit')\n            user_input = input()\n            if user_input.lower() in self.exit_keywords:\n                print('Exiting...')\n                return\n            user_input = ' '.join(self.read_file(input_filename))\n            try:\n                predict_lines, pc_info_dict = self.path_extractor.extract_paths(user_input)\n            except ValueError:\n                continue\n            model_results = self.model.predict(predict_lines)\n\n            prediction_results = Common.parse_results(model_results, pc_info_dict, topk=SHOW_TOP_CONTEXTS)\n            for index, method_prediction in prediction_results.items():\n                print('Original name:\\t' + method_prediction.original_name)\n                if self.config.BEAM_WIDTH == 0:\n                    print('Predicted:\\t%s' % [step.prediction for step in method_prediction.predictions])\n                    for timestep, single_timestep_prediction in enumerate(method_prediction.predictions):\n                        print('Attention:')\n                        print('TIMESTEP: %d\\t: %s' % (timestep, single_timestep_prediction.prediction))\n                        for attention_obj in single_timestep_prediction.attention_paths:\n                            print('%f\\tcontext: %s,%s,%s' % (\n                                attention_obj['score'], attention_obj['token1'], attention_obj['path'],\n                                attention_obj['token2']))\n                else:\n                    print('Predicted:')\n                    for predicted_seq in method_prediction.predictions:\n                        print('\\t%s' % predicted_seq.prediction)\n"
  },
  {
    "path": "model.py",
    "content": "import _pickle as pickle\nimport os\nimport time\n\nimport numpy as np\nimport shutil\nimport tensorflow as tf\n\nimport reader\nfrom common import Common\nfrom rouge import FilesRouge\n\n\nclass Model:\n    topk = 10\n    num_batches_to_log = 100\n\n    def __init__(self, config):\n        self.config = config\n        self.sess = tf.Session()\n\n        self.eval_queue = None\n        self.predict_queue = None\n\n        self.eval_placeholder = None\n        self.predict_placeholder = None\n        self.eval_predicted_indices_op, self.eval_top_values_op, self.eval_true_target_strings_op, self.eval_topk_values = None, None, None, None\n        self.predict_top_indices_op, self.predict_top_scores_op, self.predict_target_strings_op = None, None, None\n        self.subtoken_to_index = None\n\n        if config.LOAD_PATH:\n            self.load_model(sess=None)\n        else:\n            with open('{}.dict.c2s'.format(config.TRAIN_PATH), 'rb') as file:\n                subtoken_to_count = pickle.load(file)\n                node_to_count = pickle.load(file)\n                target_to_count = pickle.load(file)\n                max_contexts = pickle.load(file)\n                self.num_training_examples = pickle.load(file)\n                print('Dictionaries loaded.')\n\n            if self.config.DATA_NUM_CONTEXTS <= 0:\n                self.config.DATA_NUM_CONTEXTS = max_contexts\n            self.subtoken_to_index, self.index_to_subtoken, self.subtoken_vocab_size = \\\n                Common.load_vocab_from_dict(subtoken_to_count, add_values=[Common.PAD, Common.UNK],\n                                            max_size=config.SUBTOKENS_VOCAB_MAX_SIZE)\n            print('Loaded subtoken vocab. size: %d' % self.subtoken_vocab_size)\n\n            self.target_to_index, self.index_to_target, self.target_vocab_size = \\\n                Common.load_vocab_from_dict(target_to_count, add_values=[Common.PAD, Common.UNK, Common.SOS],\n                                            max_size=config.TARGET_VOCAB_MAX_SIZE)\n            print('Loaded target word vocab. size: %d' % self.target_vocab_size)\n\n            self.node_to_index, self.index_to_node, self.nodes_vocab_size = \\\n                Common.load_vocab_from_dict(node_to_count, add_values=[Common.PAD, Common.UNK], max_size=None)\n            print('Loaded nodes vocab. size: %d' % self.nodes_vocab_size)\n            self.epochs_trained = 0\n\n    def close_session(self):\n        self.sess.close()\n\n    def train(self):\n        print('Starting training')\n        start_time = time.time()\n\n        batch_num = 0\n        sum_loss = 0\n        best_f1 = 0\n        best_epoch = 0\n        best_f1_precision = 0\n        best_f1_recall = 0\n        epochs_no_improve = 0\n\n        self.queue_thread = reader.Reader(subtoken_to_index=self.subtoken_to_index,\n                                          node_to_index=self.node_to_index,\n                                          target_to_index=self.target_to_index,\n                                          config=self.config)\n        optimizer, train_loss = self.build_training_graph(self.queue_thread.get_output())\n        self.print_hyperparams()\n        print('Number of trainable params:',\n              np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))\n        self.initialize_session_variables(self.sess)\n        print('Initalized variables')\n        if self.config.LOAD_PATH:\n            self.load_model(self.sess)\n\n        time.sleep(1)\n        print('Started reader...')\n\n        multi_batch_start_time = time.time()\n        for iteration in range(1, (self.config.NUM_EPOCHS // self.config.SAVE_EVERY_EPOCHS) + 1):\n            self.queue_thread.reset(self.sess)\n            try:\n                while True:\n                    batch_num += 1\n                    _, batch_loss = self.sess.run([optimizer, train_loss])\n                    sum_loss += batch_loss\n                    # print('SINGLE BATCH LOSS', batch_loss)\n                    if batch_num % self.num_batches_to_log == 0:\n                        self.trace(sum_loss, batch_num, multi_batch_start_time)\n                        sum_loss = 0\n                        multi_batch_start_time = time.time()\n\n\n            except tf.errors.OutOfRangeError:\n                self.epochs_trained += self.config.SAVE_EVERY_EPOCHS\n                print('Finished %d epochs' % self.config.SAVE_EVERY_EPOCHS)\n                results, precision, recall, f1, rouge = self.evaluate()\n                if self.config.BEAM_WIDTH == 0:\n                    print('Accuracy after %d epochs: %.5f' % (self.epochs_trained, results))\n                else:\n                    print('Accuracy after {} epochs: {}'.format(self.epochs_trained, results))\n                print('After %d epochs: Precision: %.5f, recall: %.5f, F1: %.5f' % (\n                    self.epochs_trained, precision, recall, f1))\n                print('Rouge: ', rouge)\n                if f1 > best_f1:\n                    best_f1 = f1\n                    best_f1_precision = precision\n                    best_f1_recall = recall\n                    best_epoch = self.epochs_trained\n                    epochs_no_improve = 0\n                    self.save_model(self.sess, self.config.SAVE_PATH)\n                else:\n                    epochs_no_improve += self.config.SAVE_EVERY_EPOCHS\n                    if epochs_no_improve >= self.config.PATIENCE:\n                        print('Not improved for %d epochs, stopping training' % self.config.PATIENCE)\n                        print('Best scores - epoch %d: ' % best_epoch)\n                        print('Precision: %.5f, recall: %.5f, F1: %.5f' % (best_f1_precision, best_f1_recall, best_f1))\n                        return\n\n        if self.config.SAVE_PATH:\n            self.save_model(self.sess, self.config.SAVE_PATH + '.final')\n            print('Model saved in file: %s' % self.config.SAVE_PATH)\n\n        elapsed = int(time.time() - start_time)\n        print(\"Training time: %sh%sm%ss\\n\" % ((elapsed // 60 // 60), (elapsed // 60) % 60, elapsed % 60))\n\n    def trace(self, sum_loss, batch_num, multi_batch_start_time):\n        multi_batch_elapsed = time.time() - multi_batch_start_time\n        avg_loss = sum_loss / self.num_batches_to_log\n        print('Average loss at batch %d: %f, \\tthroughput: %d samples/sec' % (batch_num, avg_loss,\n                                                                              self.config.BATCH_SIZE * self.num_batches_to_log / (\n                                                                                  multi_batch_elapsed if multi_batch_elapsed > 0 else 1)))\n\n    def evaluate(self, release=False):\n        eval_start_time = time.time()\n        if self.eval_queue is None:\n            self.eval_queue = reader.Reader(subtoken_to_index=self.subtoken_to_index,\n                                            node_to_index=self.node_to_index,\n                                            target_to_index=self.target_to_index,\n                                            config=self.config, is_evaluating=True)\n            reader_output = self.eval_queue.get_output()\n            self.eval_predicted_indices_op, self.eval_topk_values, _, _ = \\\n                self.build_test_graph(reader_output)\n            self.eval_true_target_strings_op = reader_output[reader.TARGET_STRING_KEY]\n            self.saver = tf.train.Saver(max_to_keep=10)\n\n        if self.config.LOAD_PATH and not self.config.TRAIN_PATH:\n            self.initialize_session_variables(self.sess)\n            self.load_model(self.sess)\n            if release:\n                release_name = self.config.LOAD_PATH + '.release'\n                print('Releasing model, output model: %s' % release_name)\n                self.saver.save(self.sess, release_name)\n                shutil.copyfile(src=self.config.LOAD_PATH + '.dict', dst=release_name + '.dict')\n                return None\n        model_dirname = os.path.dirname(self.config.SAVE_PATH if self.config.SAVE_PATH else self.config.LOAD_PATH)\n        ref_file_name = model_dirname + '/ref.txt'\n        predicted_file_name = model_dirname + '/pred.txt'\n        if not os.path.exists(model_dirname):\n            os.makedirs(model_dirname)\n\n        with open(model_dirname + '/log.txt', 'w') as output_file, open(ref_file_name, 'w') as ref_file, open(\n                predicted_file_name,\n                'w') as pred_file:\n            num_correct_predictions = 0 if self.config.BEAM_WIDTH == 0 \\\n                else np.zeros([self.config.BEAM_WIDTH], dtype=np.int32)\n            total_predictions = 0\n            total_prediction_batches = 0\n            true_positive, false_positive, false_negative = 0, 0, 0\n            self.eval_queue.reset(self.sess)\n            start_time = time.time()\n\n            try:\n                while True:\n                    predicted_indices, true_target_strings, top_values = self.sess.run(\n                        [self.eval_predicted_indices_op, self.eval_true_target_strings_op, self.eval_topk_values],\n                    )\n                    true_target_strings = Common.binary_to_string_list(true_target_strings)\n                    ref_file.write(\n                        '\\n'.join(\n                            [name.replace(Common.internal_delimiter, ' ') for name in true_target_strings]) + '\\n')\n                    if self.config.BEAM_WIDTH > 0:\n                        # predicted indices: (batch, time, beam_width)\n                        predicted_strings = [[[self.index_to_target[i] for i in timestep] for timestep in example] for\n                                             example in predicted_indices]\n                        predicted_strings = [list(map(list, zip(*example))) for example in\n                                             predicted_strings]  # (batch, top-k, target_length)\n                        pred_file.write('\\n'.join(\n                            [' '.join(Common.filter_impossible_names(words)) for words in predicted_strings[0]]) + '\\n')\n                    else:\n                        predicted_strings = [[self.index_to_target[i] for i in example]\n                                             for example in predicted_indices]\n                        pred_file.write('\\n'.join(\n                            [' '.join(Common.filter_impossible_names(words)) for words in predicted_strings]) + '\\n')\n\n                    num_correct_predictions = self.update_correct_predictions(num_correct_predictions, output_file,\n                                                                              zip(true_target_strings,\n                                                                                  predicted_strings))\n                    true_positive, false_positive, false_negative = self.update_per_subtoken_statistics(\n                        zip(true_target_strings, predicted_strings),\n                        true_positive, false_positive, false_negative)\n\n                    total_predictions += len(true_target_strings)\n                    total_prediction_batches += 1\n                    if total_prediction_batches % self.num_batches_to_log == 0:\n                        elapsed = time.time() - start_time\n                        self.trace_evaluation(output_file, num_correct_predictions, total_predictions, elapsed)\n            except tf.errors.OutOfRangeError:\n                pass\n\n            print('Done testing, epoch reached')\n            output_file.write(str(num_correct_predictions / total_predictions) + '\\n')\n            # Common.compute_bleu(ref_file_name, predicted_file_name)\n\n        elapsed = int(time.time() - eval_start_time)\n        precision, recall, f1 = self.calculate_results(true_positive, false_positive, false_negative)\n        try:\n            files_rouge = FilesRouge()\n            rouge = files_rouge.get_scores(\n                hyp_path=predicted_file_name, ref_path=ref_file_name, avg=True, ignore_empty=True)\n        except ValueError:\n            rouge = 0\n        print(\"Evaluation time: %sh%sm%ss\" % ((elapsed // 60 // 60), (elapsed // 60) % 60, elapsed % 60))\n        return num_correct_predictions / total_predictions, \\\n               precision, recall, f1, rouge\n\n    def update_correct_predictions(self, num_correct_predictions, output_file, results):\n        for original_name, predicted in results:\n            original_name_parts = original_name.split(Common.internal_delimiter) # list\n            filtered_original = Common.filter_impossible_names(original_name_parts) # list\n            predicted_first = predicted\n            if self.config.BEAM_WIDTH > 0:\n                predicted_first = predicted[0]\n            filtered_predicted_first_parts = Common.filter_impossible_names(predicted_first) # list\n\n            if self.config.BEAM_WIDTH == 0:\n                output_file.write('Original: ' + Common.internal_delimiter.join(original_name_parts) +\n                                  ' , predicted 1st: ' + Common.internal_delimiter.join(filtered_predicted_first_parts) + '\\n')\n                if filtered_original == filtered_predicted_first_parts or Common.unique(filtered_original) == Common.unique(\n                        filtered_predicted_first_parts) or ''.join(filtered_original) == ''.join(filtered_predicted_first_parts):\n                    num_correct_predictions += 1\n            else:\n                filtered_predicted = [Common.internal_delimiter.join(Common.filter_impossible_names(p)) for p in predicted]\n\n                true_ref = original_name\n                output_file.write('Original: ' + ' '.join(original_name_parts) + '\\n')\n                for i, p in enumerate(filtered_predicted):\n                    output_file.write('\\t@{}: {}'.format(i + 1, ' '.join(p.split(Common.internal_delimiter)))+ '\\n')\n                if true_ref in filtered_predicted:\n                    index_of_correct = filtered_predicted.index(true_ref)\n                    update = np.concatenate(\n                        [np.zeros(index_of_correct, dtype=np.int32),\n                         np.ones(self.config.BEAM_WIDTH - index_of_correct, dtype=np.int32)])\n                    num_correct_predictions += update\n        return num_correct_predictions\n\n    def update_per_subtoken_statistics(self, results, true_positive, false_positive, false_negative):\n        for original_name, predicted in results:\n            if self.config.BEAM_WIDTH > 0:\n                predicted = predicted[0]\n            filtered_predicted_names = Common.filter_impossible_names(predicted)\n            filtered_original_subtokens = Common.filter_impossible_names(original_name.split(Common.internal_delimiter))\n\n            if ''.join(filtered_original_subtokens) == ''.join(filtered_predicted_names):\n                true_positive += len(filtered_original_subtokens)\n                continue\n\n            for subtok in filtered_predicted_names:\n                if subtok in filtered_original_subtokens:\n                    true_positive += 1\n                else:\n                    false_positive += 1\n            for subtok in filtered_original_subtokens:\n                if not subtok in filtered_predicted_names:\n                    false_negative += 1\n        return true_positive, false_positive, false_negative\n\n    def print_hyperparams(self):\n        print('Training batch size:\\t\\t\\t', self.config.BATCH_SIZE)\n        print('Dataset path:\\t\\t\\t\\t', self.config.TRAIN_PATH)\n        print('Training file path:\\t\\t\\t', self.config.TRAIN_PATH + '.train.c2s')\n        print('Validation path:\\t\\t\\t', self.config.TEST_PATH)\n        print('Taking max contexts from each example:\\t', self.config.MAX_CONTEXTS)\n        print('Random path sampling:\\t\\t\\t', self.config.RANDOM_CONTEXTS)\n        print('Embedding size:\\t\\t\\t\\t', self.config.EMBEDDINGS_SIZE)\n        if self.config.BIRNN:\n            print('Using BiLSTMs, each of size:\\t\\t', self.config.RNN_SIZE // 2)\n        else:\n            print('Uni-directional LSTM of size:\\t\\t', self.config.RNN_SIZE)\n        print('Decoder size:\\t\\t\\t\\t', self.config.DECODER_SIZE)\n        print('Decoder layers:\\t\\t\\t\\t', self.config.NUM_DECODER_LAYERS)\n        print('Max path lengths:\\t\\t\\t', self.config.MAX_PATH_LENGTH)\n        print('Max subtokens in a token:\\t\\t', self.config.MAX_NAME_PARTS)\n        print('Max target length:\\t\\t\\t', self.config.MAX_TARGET_PARTS)\n        print('Embeddings dropout keep_prob:\\t\\t', self.config.EMBEDDINGS_DROPOUT_KEEP_PROB)\n        print('LSTM dropout keep_prob:\\t\\t\\t', self.config.RNN_DROPOUT_KEEP_PROB)\n        print('============================================')\n\n    @staticmethod\n    def calculate_results(true_positive, false_positive, false_negative):\n        if true_positive + false_positive > 0:\n            precision = true_positive / (true_positive + false_positive)\n        else:\n            precision = 0\n        if true_positive + false_negative > 0:\n            recall = true_positive / (true_positive + false_negative)\n        else:\n            recall = 0\n        if precision + recall > 0:\n            f1 = 2 * precision * recall / (precision + recall)\n        else:\n            f1 = 0\n        return precision, recall, f1\n\n    @staticmethod\n    def trace_evaluation(output_file, correct_predictions, total_predictions, elapsed):\n        accuracy_message = str(correct_predictions / total_predictions)\n        throughput_message = \"Prediction throughput: %d\" % int(total_predictions / (elapsed if elapsed > 0 else 1))\n        output_file.write(accuracy_message + '\\n')\n        output_file.write(throughput_message)\n        # print(accuracy_message)\n        print(throughput_message)\n\n    def build_training_graph(self, input_tensors):\n        target_index = input_tensors[reader.TARGET_INDEX_KEY]\n        target_lengths = input_tensors[reader.TARGET_LENGTH_KEY]\n        path_source_indices = input_tensors[reader.PATH_SOURCE_INDICES_KEY]\n        node_indices = input_tensors[reader.NODE_INDICES_KEY]\n        path_target_indices = input_tensors[reader.PATH_TARGET_INDICES_KEY]\n        valid_context_mask = input_tensors[reader.VALID_CONTEXT_MASK_KEY]\n        path_source_lengths = input_tensors[reader.PATH_SOURCE_LENGTHS_KEY]\n        path_lengths = input_tensors[reader.PATH_LENGTHS_KEY]\n        path_target_lengths = input_tensors[reader.PATH_TARGET_LENGTHS_KEY]\n\n        with tf.variable_scope('model'):\n            subtoken_vocab = tf.get_variable('SUBTOKENS_VOCAB',\n                                             shape=(self.subtoken_vocab_size, self.config.EMBEDDINGS_SIZE),\n                                             dtype=tf.float32,\n                                             initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0,\n                                                                                                        mode='FAN_OUT',\n                                                                                                        uniform=True))\n            target_words_vocab = tf.get_variable('TARGET_WORDS_VOCAB',\n                                                 shape=(self.target_vocab_size, self.config.EMBEDDINGS_SIZE),\n                                                 dtype=tf.float32,\n                                                 initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0,\n                                                                                                            mode='FAN_OUT',\n                                                                                                            uniform=True))\n            nodes_vocab = tf.get_variable('NODES_VOCAB', shape=(self.nodes_vocab_size, self.config.EMBEDDINGS_SIZE),\n                                          dtype=tf.float32,\n                                          initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0,\n                                                                                                     mode='FAN_OUT',\n                                                                                                     uniform=True))\n            # (batch, max_contexts, decoder_size)\n            batched_contexts = self.compute_contexts(subtoken_vocab=subtoken_vocab, nodes_vocab=nodes_vocab,\n                                                     source_input=path_source_indices, nodes_input=node_indices,\n                                                     target_input=path_target_indices,\n                                                     valid_mask=valid_context_mask,\n                                                     path_source_lengths=path_source_lengths,\n                                                     path_lengths=path_lengths, path_target_lengths=path_target_lengths)\n\n            batch_size = tf.shape(target_index)[0]\n            outputs, final_states = self.decode_outputs(target_words_vocab=target_words_vocab,\n                                                        target_input=target_index, batch_size=batch_size,\n                                                        batched_contexts=batched_contexts,\n                                                        valid_mask=valid_context_mask)\n            step = tf.Variable(0, trainable=False)\n\n            logits = outputs.rnn_output  # (batch, max_output_length, dim * 2 + rnn_size)\n\n            crossent = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=target_index, logits=logits)\n            target_words_nonzero = tf.sequence_mask(target_lengths + 1,\n                                                    maxlen=self.config.MAX_TARGET_PARTS + 1, dtype=tf.float32)\n            loss = tf.reduce_sum(crossent * target_words_nonzero) / tf.to_float(batch_size)\n\n            if self.config.USE_MOMENTUM:\n                learning_rate = tf.train.exponential_decay(0.01, step * self.config.BATCH_SIZE,\n                                                           self.num_training_examples,\n                                                           0.95, staircase=True)\n                optimizer = tf.train.MomentumOptimizer(learning_rate, 0.95, use_nesterov=True)\n                train_op = optimizer.minimize(loss, global_step=step)\n            else:\n                params = tf.trainable_variables()\n                gradients = tf.gradients(loss, params)\n                clipped_gradients, _ = tf.clip_by_global_norm(gradients, clip_norm=5)\n                optimizer = tf.train.AdamOptimizer()\n                train_op = optimizer.apply_gradients(zip(clipped_gradients, params))\n\n            self.saver = tf.train.Saver(max_to_keep=10)\n\n        return train_op, loss\n\n    def decode_outputs(self, target_words_vocab, target_input, batch_size, batched_contexts, valid_mask,\n                       is_evaluating=False):\n        num_contexts_per_example = tf.count_nonzero(valid_mask, axis=-1)\n\n        start_fill = tf.fill([batch_size],\n                             self.target_to_index[Common.SOS])  # (batch, )\n        decoder_cell = tf.nn.rnn_cell.MultiRNNCell([\n            tf.nn.rnn_cell.LSTMCell(self.config.DECODER_SIZE) for _ in range(self.config.NUM_DECODER_LAYERS)\n        ])\n        contexts_sum = tf.reduce_sum(batched_contexts * tf.expand_dims(valid_mask, -1),\n                                     axis=1)  # (batch_size, dim * 2 + rnn_size)\n        contexts_average = tf.divide(contexts_sum, tf.to_float(tf.expand_dims(num_contexts_per_example, -1)))\n        fake_encoder_state = tuple(tf.nn.rnn_cell.LSTMStateTuple(contexts_average, contexts_average) for _ in\n                                   range(self.config.NUM_DECODER_LAYERS))\n        projection_layer = tf.layers.Dense(self.target_vocab_size, use_bias=False)\n        if is_evaluating and self.config.BEAM_WIDTH > 0:\n            batched_contexts = tf.contrib.seq2seq.tile_batch(batched_contexts, multiplier=self.config.BEAM_WIDTH)\n            num_contexts_per_example = tf.contrib.seq2seq.tile_batch(num_contexts_per_example,\n                                                                     multiplier=self.config.BEAM_WIDTH)\n        attention_mechanism = tf.contrib.seq2seq.LuongAttention(\n            num_units=self.config.DECODER_SIZE,\n            memory=batched_contexts\n        )\n        # TF doesn't support beam search with alignment history\n        should_save_alignment_history = is_evaluating and self.config.BEAM_WIDTH == 0\n        decoder_cell = tf.contrib.seq2seq.AttentionWrapper(decoder_cell, attention_mechanism,\n                                                           attention_layer_size=self.config.DECODER_SIZE,\n                                                           alignment_history=should_save_alignment_history)\n        if is_evaluating:\n            if self.config.BEAM_WIDTH > 0:\n                decoder_initial_state = decoder_cell.zero_state(dtype=tf.float32,\n                                                                batch_size=batch_size * self.config.BEAM_WIDTH)\n                decoder_initial_state = decoder_initial_state.clone(\n                    cell_state=tf.contrib.seq2seq.tile_batch(fake_encoder_state, multiplier=self.config.BEAM_WIDTH))\n                decoder = tf.contrib.seq2seq.BeamSearchDecoder(\n                    cell=decoder_cell,\n                    embedding=target_words_vocab,\n                    start_tokens=start_fill,\n                    end_token=self.target_to_index[Common.PAD],\n                    initial_state=decoder_initial_state,\n                    beam_width=self.config.BEAM_WIDTH,\n                    output_layer=projection_layer,\n                    length_penalty_weight=0.0)\n            else:\n                helper = tf.contrib.seq2seq.GreedyEmbeddingHelper(target_words_vocab, start_fill, 0)\n                initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=fake_encoder_state)\n                decoder = tf.contrib.seq2seq.BasicDecoder(cell=decoder_cell, helper=helper, initial_state=initial_state,\n                                                          output_layer=projection_layer)\n\n        else:\n            decoder_cell = tf.nn.rnn_cell.DropoutWrapper(decoder_cell,\n                                                         output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB)\n            target_words_embedding = tf.nn.embedding_lookup(target_words_vocab,\n                                                            tf.concat([tf.expand_dims(start_fill, -1), target_input],\n                                                                      axis=-1))  # (batch, max_target_parts, dim * 2 + rnn_size)\n            helper = tf.contrib.seq2seq.TrainingHelper(inputs=target_words_embedding,\n                                                       sequence_length=tf.ones([batch_size], dtype=tf.int32) * (\n                                                           self.config.MAX_TARGET_PARTS + 1))\n\n            initial_state = decoder_cell.zero_state(batch_size, tf.float32).clone(cell_state=fake_encoder_state)\n\n            decoder = tf.contrib.seq2seq.BasicDecoder(cell=decoder_cell, helper=helper, initial_state=initial_state,\n                                                      output_layer=projection_layer)\n        outputs, final_states, final_sequence_lengths = tf.contrib.seq2seq.dynamic_decode(decoder,\n                                                                                          maximum_iterations=self.config.MAX_TARGET_PARTS + 1)\n        return outputs, final_states\n\n    def calculate_path_abstraction(self, path_embed, path_lengths, valid_contexts_mask, is_evaluating=False):\n        return self.path_rnn_last_state(is_evaluating, path_embed, path_lengths, valid_contexts_mask)\n\n    def path_rnn_last_state(self, is_evaluating, path_embed, path_lengths, valid_contexts_mask):\n        # path_embed:           (batch, max_contexts, max_path_length+1, dim)\n        # path_length:          (batch, max_contexts)\n        # valid_contexts_mask:  (batch, max_contexts)\n        max_contexts = tf.shape(path_embed)[1]\n        flat_paths = tf.reshape(path_embed, shape=[-1, self.config.MAX_PATH_LENGTH,\n                                                   self.config.EMBEDDINGS_SIZE])  # (batch * max_contexts, max_path_length+1, dim)\n        flat_valid_contexts_mask = tf.reshape(valid_contexts_mask, [-1])  # (batch * max_contexts)\n        lengths = tf.multiply(tf.reshape(path_lengths, [-1]),\n                              tf.cast(flat_valid_contexts_mask, tf.int32))  # (batch * max_contexts)\n        if self.config.BIRNN:\n            rnn_cell_fw = tf.nn.rnn_cell.LSTMCell(self.config.RNN_SIZE / 2)\n            rnn_cell_bw = tf.nn.rnn_cell.LSTMCell(self.config.RNN_SIZE / 2)\n            if not is_evaluating:\n                rnn_cell_fw = tf.nn.rnn_cell.DropoutWrapper(rnn_cell_fw,\n                                                            output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB)\n                rnn_cell_bw = tf.nn.rnn_cell.DropoutWrapper(rnn_cell_bw,\n                                                            output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB)\n            _, (state_fw, state_bw) = tf.nn.bidirectional_dynamic_rnn(\n                cell_fw=rnn_cell_fw,\n                cell_bw=rnn_cell_bw,\n                inputs=flat_paths,\n                dtype=tf.float32,\n                sequence_length=lengths)\n            final_rnn_state = tf.concat([state_fw.h, state_bw.h], axis=-1)  # (batch * max_contexts, rnn_size)  \n        else:\n            rnn_cell = tf.nn.rnn_cell.LSTMCell(self.config.RNN_SIZE)\n            if not is_evaluating:\n                rnn_cell = tf.nn.rnn_cell.DropoutWrapper(rnn_cell, output_keep_prob=self.config.RNN_DROPOUT_KEEP_PROB)\n            _, state = tf.nn.dynamic_rnn(\n                cell=rnn_cell,\n                inputs=flat_paths,\n                dtype=tf.float32,\n                sequence_length=lengths\n            )\n            final_rnn_state = state.h  # (batch * max_contexts, rnn_size)\n\n        return tf.reshape(final_rnn_state,\n                          shape=[-1, max_contexts, self.config.RNN_SIZE])  # (batch, max_contexts, rnn_size)\n\n    def compute_contexts(self, subtoken_vocab, nodes_vocab, source_input, nodes_input,\n                         target_input, valid_mask, path_source_lengths, path_lengths, path_target_lengths,\n                         is_evaluating=False):\n\n        source_word_embed = tf.nn.embedding_lookup(params=subtoken_vocab,\n                                                   ids=source_input)  # (batch, max_contexts, max_name_parts, dim)\n        path_embed = tf.nn.embedding_lookup(params=nodes_vocab,\n                                            ids=nodes_input)  # (batch, max_contexts, max_path_length+1, dim)\n        target_word_embed = tf.nn.embedding_lookup(params=subtoken_vocab,\n                                                   ids=target_input)  # (batch, max_contexts, max_name_parts, dim)\n\n        source_word_mask = tf.expand_dims(\n            tf.sequence_mask(path_source_lengths, maxlen=self.config.MAX_NAME_PARTS, dtype=tf.float32),\n            -1)  # (batch, max_contexts, max_name_parts, 1)\n        target_word_mask = tf.expand_dims(\n            tf.sequence_mask(path_target_lengths, maxlen=self.config.MAX_NAME_PARTS, dtype=tf.float32),\n            -1)  # (batch, max_contexts, max_name_parts, 1)\n\n        source_words_sum = tf.reduce_sum(source_word_embed * source_word_mask,\n                                         axis=2)  # (batch, max_contexts, dim)\n        path_nodes_aggregation = self.calculate_path_abstraction(path_embed, path_lengths, valid_mask,\n                                                                 is_evaluating)  # (batch, max_contexts, rnn_size)\n        target_words_sum = tf.reduce_sum(target_word_embed * target_word_mask, axis=2)  # (batch, max_contexts, dim)\n\n        context_embed = tf.concat([source_words_sum, path_nodes_aggregation, target_words_sum],\n                                  axis=-1)  # (batch, max_contexts, dim * 2 + rnn_size)\n        if not is_evaluating:\n            context_embed = tf.nn.dropout(context_embed, self.config.EMBEDDINGS_DROPOUT_KEEP_PROB)\n\n        batched_embed = tf.layers.dense(inputs=context_embed, units=self.config.DECODER_SIZE,\n                                        activation=tf.nn.tanh, trainable=not is_evaluating, use_bias=False)\n\n        return batched_embed\n\n    def build_test_graph(self, input_tensors):\n        target_index = input_tensors[reader.TARGET_INDEX_KEY]\n        path_source_indices = input_tensors[reader.PATH_SOURCE_INDICES_KEY]\n        node_indices = input_tensors[reader.NODE_INDICES_KEY]\n        path_target_indices = input_tensors[reader.PATH_TARGET_INDICES_KEY]\n        valid_mask = input_tensors[reader.VALID_CONTEXT_MASK_KEY]\n        path_source_lengths = input_tensors[reader.PATH_SOURCE_LENGTHS_KEY]\n        path_lengths = input_tensors[reader.PATH_LENGTHS_KEY]\n        path_target_lengths = input_tensors[reader.PATH_TARGET_LENGTHS_KEY]\n\n        with tf.variable_scope('model', reuse=self.get_should_reuse_variables()):\n            subtoken_vocab = tf.get_variable('SUBTOKENS_VOCAB',\n                                             shape=(self.subtoken_vocab_size, self.config.EMBEDDINGS_SIZE),\n                                             dtype=tf.float32, trainable=False)\n            target_words_vocab = tf.get_variable('TARGET_WORDS_VOCAB',\n                                                 shape=(self.target_vocab_size, self.config.EMBEDDINGS_SIZE),\n                                                 dtype=tf.float32, trainable=False)\n            nodes_vocab = tf.get_variable('NODES_VOCAB',\n                                          shape=(self.nodes_vocab_size, self.config.EMBEDDINGS_SIZE),\n                                          dtype=tf.float32, trainable=False)\n\n            batched_contexts = self.compute_contexts(subtoken_vocab=subtoken_vocab, nodes_vocab=nodes_vocab,\n                                                     source_input=path_source_indices, nodes_input=node_indices,\n                                                     target_input=path_target_indices,\n                                                     valid_mask=valid_mask,\n                                                     path_source_lengths=path_source_lengths,\n                                                     path_lengths=path_lengths, path_target_lengths=path_target_lengths,\n                                                     is_evaluating=True)\n\n            outputs, final_states = self.decode_outputs(target_words_vocab=target_words_vocab,\n                                                        target_input=target_index, batch_size=tf.shape(target_index)[0],\n                                                        batched_contexts=batched_contexts, valid_mask=valid_mask,\n                                                        is_evaluating=True)\n\n        if self.config.BEAM_WIDTH > 0:\n            predicted_indices = outputs.predicted_ids\n            topk_values = outputs.beam_search_decoder_output.scores\n            attention_weights = [tf.no_op()]\n        else:\n            predicted_indices = outputs.sample_id\n            topk_values = tf.constant(1, shape=(1, 1), dtype=tf.float32)\n            attention_weights = tf.squeeze(final_states.alignment_history.stack(), 1)\n\n        return predicted_indices, topk_values, target_index, attention_weights\n\n    def predict(self, predict_data_lines):\n        if self.predict_queue is None:\n            self.predict_queue = reader.Reader(subtoken_to_index=self.subtoken_to_index,\n                                               node_to_index=self.node_to_index,\n                                               target_to_index=self.target_to_index,\n                                               config=self.config, is_evaluating=True)\n            self.predict_placeholder = tf.placeholder(tf.string)\n            reader_output = self.predict_queue.process_from_placeholder(self.predict_placeholder)\n            reader_output = {key: tf.expand_dims(tensor, 0) for key, tensor in reader_output.items()}\n            self.predict_top_indices_op, self.predict_top_scores_op, _, self.attention_weights_op = \\\n                self.build_test_graph(reader_output)\n            self.predict_source_string = reader_output[reader.PATH_SOURCE_STRINGS_KEY]\n            self.predict_path_string = reader_output[reader.PATH_STRINGS_KEY]\n            self.predict_path_target_string = reader_output[reader.PATH_TARGET_STRINGS_KEY]\n            self.predict_target_strings_op = reader_output[reader.TARGET_STRING_KEY]\n\n            self.initialize_session_variables(self.sess)\n            self.saver = tf.train.Saver()\n            self.load_model(self.sess)\n\n        results = []\n        for line in predict_data_lines:\n            predicted_indices, top_scores, true_target_strings, attention_weights, path_source_string, path_strings, path_target_string = self.sess.run(\n                [self.predict_top_indices_op, self.predict_top_scores_op, self.predict_target_strings_op,\n                 self.attention_weights_op,\n                 self.predict_source_string, self.predict_path_string, self.predict_path_target_string],\n                feed_dict={self.predict_placeholder: line})\n\n            top_scores = np.squeeze(top_scores, axis=0)\n            path_source_string = path_source_string.reshape((-1))\n            path_strings = path_strings.reshape((-1))\n            path_target_string = path_target_string.reshape((-1))\n            predicted_indices = np.squeeze(predicted_indices, axis=0)\n            true_target_strings = Common.binary_to_string(true_target_strings[0])\n\n            if self.config.BEAM_WIDTH > 0:\n                predicted_strings = [[self.index_to_target[sugg] for sugg in timestep]\n                                     for timestep in predicted_indices]  # (target_length, top-k)  \n                predicted_strings = list(map(list, zip(*predicted_strings)))  # (top-k, target_length)\n                top_scores = [np.exp(np.sum(s)) for s in zip(*top_scores)]\n            else:\n                predicted_strings = [self.index_to_target[idx]\n                                     for idx in predicted_indices]  # (batch, target_length)  \n\n            attention_per_path = None\n            if self.config.BEAM_WIDTH == 0:\n                attention_per_path = self.get_attention_per_path(path_source_string, path_strings, path_target_string,\n                                                                 attention_weights)\n\n            results.append((true_target_strings, predicted_strings, top_scores, attention_per_path))\n        return results\n\n    @staticmethod\n    def get_attention_per_path(source_strings, path_strings, target_strings, attention_weights):\n        # attention_weights:  (time, contexts)\n        results = []\n        for time_step in attention_weights:\n            attention_per_context = {}\n            for source, path, target, weight in zip(source_strings, path_strings, target_strings, time_step):\n                string_triplet = (\n                    Common.binary_to_string(source), Common.binary_to_string(path), Common.binary_to_string(target))\n                attention_per_context[string_triplet] = weight\n            results.append(attention_per_context)\n        return results\n\n    def save_model(self, sess, path):\n        save_target = path + '_iter%d' % self.epochs_trained\n        dirname = os.path.dirname(save_target)\n        if not os.path.exists(dirname):\n            os.makedirs(dirname)\n        self.saver.save(sess, save_target)\n\n        dictionaries_path = save_target + '.dict'\n        with open(dictionaries_path, 'wb') as file:\n            pickle.dump(self.subtoken_to_index, file)\n            pickle.dump(self.index_to_subtoken, file)\n            pickle.dump(self.subtoken_vocab_size, file)\n\n            pickle.dump(self.target_to_index, file)\n            pickle.dump(self.index_to_target, file)\n            pickle.dump(self.target_vocab_size, file)\n\n            pickle.dump(self.node_to_index, file)\n            pickle.dump(self.index_to_node, file)\n            pickle.dump(self.nodes_vocab_size, file)\n\n            pickle.dump(self.num_training_examples, file)\n            pickle.dump(self.epochs_trained, file)\n            pickle.dump(self.config, file)\n        print('Saved after %d epochs in: %s' % (self.epochs_trained, save_target))\n\n    def load_model(self, sess):\n        if not sess is None:\n            self.saver.restore(sess, self.config.LOAD_PATH)\n            print('Done loading model')\n        with open(self.config.LOAD_PATH + '.dict', 'rb') as file:\n            if self.subtoken_to_index is not None:\n                return\n            print('Loading dictionaries from: ' + self.config.LOAD_PATH)\n            self.subtoken_to_index = pickle.load(file)\n            self.index_to_subtoken = pickle.load(file)\n            self.subtoken_vocab_size = pickle.load(file)\n\n            self.target_to_index = pickle.load(file)\n            self.index_to_target = pickle.load(file)\n            self.target_vocab_size = pickle.load(file)\n\n            self.node_to_index = pickle.load(file)\n            self.index_to_node = pickle.load(file)\n            self.nodes_vocab_size = pickle.load(file)\n\n            self.num_training_examples = pickle.load(file)\n            self.epochs_trained = pickle.load(file)\n            saved_config = pickle.load(file)\n            self.config.take_model_hyperparams_from(saved_config)\n            print('Done loading dictionaries')\n\n    @staticmethod\n    def initialize_session_variables(sess):\n        sess.run(tf.group(tf.global_variables_initializer(), tf.local_variables_initializer(), tf.tables_initializer()))\n\n    def get_should_reuse_variables(self):\n        if self.config.TRAIN_PATH:\n            return True\n        else:\n            return None\n"
  },
  {
    "path": "preprocess.py",
    "content": "import pickle\nfrom argparse import ArgumentParser\n\nimport numpy as np\n\nimport common\n\n'''\nThis script preprocesses the data from MethodPaths. It truncates methods with too many contexts,\nand pads methods with less paths with spaces.\n'''\n\n\ndef save_dictionaries(dataset_name, subtoken_to_count, node_to_count, target_to_count, max_contexts, num_examples):\n    save_dict_file_path = '{}.dict.c2s'.format(dataset_name)\n    with open(save_dict_file_path, 'wb') as file:\n        pickle.dump(subtoken_to_count, file)\n        pickle.dump(node_to_count, file)\n        pickle.dump(target_to_count, file)\n        pickle.dump(max_contexts, file)\n        pickle.dump(num_examples, file)\n        print('Dictionaries saved to: {}'.format(save_dict_file_path))\n\n\ndef process_file(file_path, data_file_role, dataset_name, max_contexts, max_data_contexts):\n    sum_total = 0\n    sum_sampled = 0\n    total = 0\n    max_unfiltered = 0\n    max_contexts_to_sample = max_data_contexts if data_file_role == 'train' else max_contexts\n    output_path = '{}.{}.c2s'.format(dataset_name, data_file_role)\n    with open(output_path, 'w') as outfile:\n        with open(file_path, 'r') as file:\n            for line in file:\n                parts = line.rstrip('\\n').split(' ')\n                target_name = parts[0]\n                contexts = parts[1:]\n\n                if len(contexts) > max_unfiltered:\n                    max_unfiltered = len(contexts)\n\n                sum_total += len(contexts)\n                if len(contexts) > max_contexts_to_sample:\n                    contexts = np.random.choice(contexts, max_contexts_to_sample, replace=False)\n\n                sum_sampled += len(contexts)\n\n                csv_padding = \" \" * (max_data_contexts - len(contexts))\n                total += 1\n                outfile.write(target_name + ' ' + \" \".join(contexts) + csv_padding + '\\n')\n\n    print('File: ' + file_path)\n    print('Average total contexts: ' + str(float(sum_total) / total))\n    print('Average final (after sampling) contexts: ' + str(float(sum_sampled) / total))\n    print('Total examples: ' + str(total))\n    print('Max number of contexts per word: ' + str(max_unfiltered))\n    return total\n\n\ndef context_full_found(context_parts, word_to_count, path_to_count):\n    return context_parts[0] in word_to_count \\\n           and context_parts[1] in path_to_count and context_parts[2] in word_to_count\n\n\ndef context_partial_found(context_parts, word_to_count, path_to_count):\n    return context_parts[0] in word_to_count \\\n           or context_parts[1] in path_to_count or context_parts[2] in word_to_count\n\n\nif __name__ == '__main__':\n    parser = ArgumentParser()\n    parser.add_argument(\"-trd\", \"--train_data\", dest=\"train_data_path\",\n                        help=\"path to training data file\", required=True)\n    parser.add_argument(\"-ted\", \"--test_data\", dest=\"test_data_path\",\n                        help=\"path to test data file\", required=True)\n    parser.add_argument(\"-vd\", \"--val_data\", dest=\"val_data_path\",\n                        help=\"path to validation data file\", required=True)\n    parser.add_argument(\"-mc\", \"--max_contexts\", dest=\"max_contexts\", default=200,\n                        help=\"number of max contexts to keep in test+validation\", required=False)\n    parser.add_argument(\"-mdc\", \"--max_data_contexts\", dest=\"max_data_contexts\", default=1000,\n                        help=\"number of max contexts to keep in the dataset\", required=False)\n    parser.add_argument(\"-svs\", \"--subtoken_vocab_size\", dest=\"subtoken_vocab_size\", default=186277,\n                        help=\"Max number of source subtokens to keep in the vocabulary\", required=False)\n    parser.add_argument(\"-tvs\", \"--target_vocab_size\", dest=\"target_vocab_size\", default=26347,\n                        help=\"Max number of target words to keep in the vocabulary\", required=False)\n    parser.add_argument(\"-sh\", \"--subtoken_histogram\", dest=\"subtoken_histogram\",\n                        help=\"subtoken histogram file\", metavar=\"FILE\", required=True)\n    parser.add_argument(\"-nh\", \"--node_histogram\", dest=\"node_histogram\",\n                        help=\"node_histogram file\", metavar=\"FILE\", required=True)\n    parser.add_argument(\"-th\", \"--target_histogram\", dest=\"target_histogram\",\n                        help=\"target histogram file\", metavar=\"FILE\", required=True)\n    parser.add_argument(\"-o\", \"--output_name\", dest=\"output_name\",\n                        help=\"output name - the base name for the created dataset\", required=True, default='data')\n    args = parser.parse_args()\n\n    train_data_path = args.train_data_path\n    test_data_path = args.test_data_path\n    val_data_path = args.val_data_path\n    subtoken_histogram_path = args.subtoken_histogram\n    node_histogram_path = args.node_histogram\n\n    subtoken_to_count = common.Common.load_histogram(subtoken_histogram_path,\n                                                     max_size=int(args.subtoken_vocab_size))\n    node_to_count = common.Common.load_histogram(node_histogram_path,\n                                                 max_size=None)\n    target_to_count = common.Common.load_histogram(args.target_histogram,\n                                                   max_size=int(args.target_vocab_size))\n    print('subtoken vocab size: ', len(subtoken_to_count))\n    print('node vocab size: ', len(node_to_count))\n    print('target vocab size: ', len(target_to_count))\n\n    num_training_examples = 0\n    for data_file_path, data_role in zip([test_data_path, val_data_path, train_data_path], ['test', 'val', 'train']):\n        num_examples = process_file(file_path=data_file_path, data_file_role=data_role, dataset_name=args.output_name,\n                                    max_contexts=int(args.max_contexts), max_data_contexts=int(args.max_data_contexts))\n        if data_role == 'train':\n            num_training_examples = num_examples\n\n    save_dictionaries(dataset_name=args.output_name, subtoken_to_count=subtoken_to_count,\n                      node_to_count=node_to_count, target_to_count=target_to_count,\n                      max_contexts=int(args.max_data_contexts), num_examples=num_training_examples)\n"
  },
  {
    "path": "preprocess.sh",
    "content": "#!/usr/bin/env bash\n###########################################################\n# Change the following values to preprocess a new dataset.\n# TRAIN_DIR, VAL_DIR and TEST_DIR should be paths to      \n#   directories containing sub-directories with .java files\n# DATASET_NAME is just a name for the currently extracted \n#   dataset.                                              \n# MAX_DATA_CONTEXTS is the number of contexts to keep in the dataset for each \n#   method (by default 1000). At training time, these contexts\n#   will be downsampled dynamically to MAX_CONTEXTS.\n# MAX_CONTEXTS - the number of actual contexts (by default 200) \n# that are taken into consideration (out of MAX_DATA_CONTEXTS)\n# every training iteration. To avoid randomness at test time, \n# for the test and validation sets only MAX_CONTEXTS contexts are kept \n# (while for training, MAX_DATA_CONTEXTS are kept and MAX_CONTEXTS are\n# selected dynamically during training).\n# SUBTOKEN_VOCAB_SIZE, TARGET_VOCAB_SIZE -   \n#   - the number of subtokens and target words to keep \n#   in the vocabulary (the top occurring words and paths will be kept). \n# NUM_THREADS - the number of parallel threads to use. It is \n#   recommended to use a multi-core machine for the preprocessing \n#   step and set this value to the number of cores.\n# PYTHON - python3 interpreter alias.\nTRAIN_DIR=my_training_dir\nVAL_DIR=my_val_dir\nTEST_DIR=my_test_dir\nDATASET_NAME=my_dataset\nMAX_DATA_CONTEXTS=1000\nMAX_CONTEXTS=200\nSUBTOKEN_VOCAB_SIZE=186277\nTARGET_VOCAB_SIZE=26347\nNUM_THREADS=64\nPYTHON=python3\n###########################################################\n\nTRAIN_DATA_FILE=${DATASET_NAME}.train.raw.txt\nVAL_DATA_FILE=${DATASET_NAME}.val.raw.txt\nTEST_DATA_FILE=${DATASET_NAME}.test.raw.txt\nEXTRACTOR_JAR=JavaExtractor/JPredict/target/JavaExtractor-0.0.1-SNAPSHOT.jar\n\nmkdir -p data\nmkdir -p data/${DATASET_NAME}\n\necho \"Extracting paths from validation set...\"\n${PYTHON} JavaExtractor/extract.py --dir ${VAL_DIR} --max_path_length 8 --max_path_width 2 --num_threads ${NUM_THREADS} --jar ${EXTRACTOR_JAR} > ${VAL_DATA_FILE} 2>> error_log.txt\necho \"Finished extracting paths from validation set\"\necho \"Extracting paths from test set...\"\n${PYTHON} JavaExtractor/extract.py --dir ${TEST_DIR} --max_path_length 8 --max_path_width 2 --num_threads ${NUM_THREADS} --jar ${EXTRACTOR_JAR} > ${TEST_DATA_FILE} 2>> error_log.txt\necho \"Finished extracting paths from test set\"\necho \"Extracting paths from training set...\"\n${PYTHON} JavaExtractor/extract.py --dir ${TRAIN_DIR} --max_path_length 8 --max_path_width 2 --num_threads ${NUM_THREADS} --jar ${EXTRACTOR_JAR} | shuf > ${TRAIN_DATA_FILE} 2>> error_log.txt\necho \"Finished extracting paths from training set\"\n\nTARGET_HISTOGRAM_FILE=data/${DATASET_NAME}/${DATASET_NAME}.histo.tgt.c2s\nSOURCE_SUBTOKEN_HISTOGRAM=data/${DATASET_NAME}/${DATASET_NAME}.histo.ori.c2s\nNODE_HISTOGRAM_FILE=data/${DATASET_NAME}/${DATASET_NAME}.histo.node.c2s\n\necho \"Creating histograms from the training data\"\ncat ${TRAIN_DATA_FILE} | cut -d' ' -f1 | tr '|' '\\n' | awk '{n[$0]++} END {for (i in n) print i,n[i]}' > ${TARGET_HISTOGRAM_FILE}\ncat ${TRAIN_DATA_FILE} | cut -d' ' -f2- | tr ' ' '\\n' | cut -d',' -f1,3 | tr ',|' '\\n' | awk '{n[$0]++} END {for (i in n) print i,n[i]}' > ${SOURCE_SUBTOKEN_HISTOGRAM}\ncat ${TRAIN_DATA_FILE} | cut -d' ' -f2- | tr ' ' '\\n' | cut -d',' -f2 | tr '|' '\\n' | awk '{n[$0]++} END {for (i in n) print i,n[i]}' > ${NODE_HISTOGRAM_FILE}\n\n${PYTHON} preprocess.py --train_data ${TRAIN_DATA_FILE} --test_data ${TEST_DATA_FILE} --val_data ${VAL_DATA_FILE} \\\n  --max_contexts ${MAX_CONTEXTS} --max_data_contexts ${MAX_DATA_CONTEXTS} --subtoken_vocab_size ${SUBTOKEN_VOCAB_SIZE} \\\n  --target_vocab_size ${TARGET_VOCAB_SIZE} --subtoken_histogram ${SOURCE_SUBTOKEN_HISTOGRAM} \\\n  --node_histogram ${NODE_HISTOGRAM_FILE} --target_histogram ${TARGET_HISTOGRAM_FILE} --output_name data/${DATASET_NAME}/${DATASET_NAME}\n    \n# If all went well, the raw data files can be deleted, because preprocess.py creates new files \n# with truncated and padded number of paths for each example.\nrm ${TRAIN_DATA_FILE} ${VAL_DATA_FILE} ${TEST_DATA_FILE} ${TARGET_HISTOGRAM_FILE} ${SOURCE_SUBTOKEN_HISTOGRAM} \\\n  ${NODE_HISTOGRAM_FILE}\n\n"
  },
  {
    "path": "preprocess_csharp.sh",
    "content": "#!/usr/bin/env bash\n###########################################################\n# Change the following values to preprocess a new dataset.\n# TRAIN_DIR, VAL_DIR and TEST_DIR should be paths to      \n#   directories containing sub-directories with .java files\n# DATASET_NAME is just a name for the currently extracted \n#   dataset.                                              \n# MAX_DATA_CONTEXTS is the number of contexts to keep in the dataset for each \n#   method (by default 1000). At training time, these contexts\n#   will be downsampled dynamically to MAX_CONTEXTS.\n# MAX_CONTEXTS - the number of actual contexts (by default 200) \n# that are taken into consideration (out of MAX_DATA_CONTEXTS)\n# every training iteration. To avoid randomness at test time, \n# for the test and validation sets only MAX_CONTEXTS contexts are kept \n# (while for training, MAX_DATA_CONTEXTS are kept and MAX_CONTEXTS are\n# selected dynamically during training).\n# SUBTOKEN_VOCAB_SIZE, TARGET_VOCAB_SIZE -   \n#   - the number of subtokens and target words to keep \n#   in the vocabulary (the top occurring words and paths will be kept). \n# NUM_THREADS - the number of parallel threads to use. It is \n#   recommended to use a multi-core machine for the preprocessing \n#   step and set this value to the number of cores.\n# PYTHON - python3 interpreter alias.\nTRAIN_DIR=JavaExtractor/JPredict/src/main/java/JavaExtractor/Common\nVAL_DIR=JavaExtractor/JPredict/src/main/java/JavaExtractor/Common\nTEST_DIR=JavaExtractor/JPredict/src/main/java/JavaExtractor/Common\nDATASET_NAME=my_dataset\nMAX_DATA_CONTEXTS=1000\nMAX_CONTEXTS=200\nSUBTOKEN_VOCAB_SIZE=186277\nTARGET_VOCAB_SIZE=26347\nNUM_THREADS=64\nPYTHON=python3\n###########################################################\n\nTRAIN_DATA_FILE=${DATASET_NAME}.train.raw.txt\nVAL_DATA_FILE=${DATASET_NAME}.val.raw.txt\nTEST_DATA_FILE=${DATASET_NAME}.test.raw.txt\nEXTRACTOR_JAR=CSharpExtractor/CSharpExtractor/Extractor/Extractor.csproj\n\nmkdir -p data\nmkdir -p data/${DATASET_NAME}\n\necho \"Extracting paths from validation set...\"\n${PYTHON} CSharpExtractor/extract.py --dir ${VAL_DIR} --max_path_length 8 --max_path_width 2 --num_threads ${NUM_THREADS} --csproj ${EXTRACTOR_JAR} --ofile_name ${VAL_DATA_FILE} 2>> error_log.txt\necho \"Finished extracting paths from validation set\"\necho \"Extracting paths from test set...\"\n${PYTHON} CSharpExtractor/extract.py --dir ${TEST_DIR} --max_path_length 8 --max_path_width 2 --num_threads ${NUM_THREADS} --csproj ${EXTRACTOR_JAR} --ofile_name ${TEST_DATA_FILE} 2>> error_log.txt\necho \"Finished extracting paths from test set\"\necho \"Extracting paths from training set...\"\n${PYTHON} CSharpExtractor/extract.py --dir ${TRAIN_DIR} --max_path_length 8 --max_path_width 2 --num_threads ${NUM_THREADS} --csproj ${EXTRACTOR_JAR} --ofile_name ${TRAIN_DATA_FILE}_unshuf 2>> error_log.txt\necho \"Finished extracting paths from training set\"\necho \"Shuffling training data\"\ncat ${TRAIN_DATA_FILE}_unshuf | shuf > ${TRAIN_DATA_FILE}\nrm ${TRAIN_DATA_FILE}_unshuf\n\nTARGET_HISTOGRAM_FILE=data/${DATASET_NAME}/${DATASET_NAME}.histo.tgt.c2s\nSOURCE_SUBTOKEN_HISTOGRAM=data/${DATASET_NAME}/${DATASET_NAME}.histo.ori.c2s\nNODE_HISTOGRAM_FILE=data/${DATASET_NAME}/${DATASET_NAME}.histo.node.c2s\n\necho \"Creating histograms from the training data\"\ncat ${TRAIN_DATA_FILE} | cut -d' ' -f1 | tr '|' '\\n' | awk '{n[$0]++} END {for (i in n) print i,n[i]}' > ${TARGET_HISTOGRAM_FILE}\ncat ${TRAIN_DATA_FILE} | cut -d' ' -f2- | tr ' ' '\\n' | cut -d',' -f1,3 | tr ',|' '\\n' | awk '{n[$0]++} END {for (i in n) print i,n[i]}' > ${SOURCE_SUBTOKEN_HISTOGRAM}\ncat ${TRAIN_DATA_FILE} | cut -d' ' -f2- | tr ' ' '\\n' | cut -d',' -f2 | tr '|' '\\n' | awk '{n[$0]++} END {for (i in n) print i,n[i]}' > ${NODE_HISTOGRAM_FILE}\n\n${PYTHON} preprocess.py --train_data ${TRAIN_DATA_FILE} --test_data ${TEST_DATA_FILE} --val_data ${VAL_DATA_FILE} \\\n  --max_contexts ${MAX_CONTEXTS} --max_data_contexts ${MAX_DATA_CONTEXTS} --subtoken_vocab_size ${SUBTOKEN_VOCAB_SIZE} \\\n  --target_vocab_size ${TARGET_VOCAB_SIZE} --subtoken_histogram ${SOURCE_SUBTOKEN_HISTOGRAM} \\\n  --node_histogram ${NODE_HISTOGRAM_FILE} --target_histogram ${TARGET_HISTOGRAM_FILE} --output_name data/${DATASET_NAME}/${DATASET_NAME}\n    \n# If all went well, the raw data files can be deleted, because preprocess.py creates new files \n# with truncated and padded number of paths for each example.\nrm ${TRAIN_DATA_FILE} ${VAL_DATA_FILE} ${TEST_DATA_FILE} ${TARGET_HISTOGRAM_FILE} ${SOURCE_SUBTOKEN_HISTOGRAM} \\\n  ${NODE_HISTOGRAM_FILE}\n\n\n"
  },
  {
    "path": "reader.py",
    "content": "import os\n\nimport tensorflow as tf\n\nfrom common import Common\n\nTARGET_INDEX_KEY = 'TARGET_INDEX_KEY'\nTARGET_STRING_KEY = 'TARGET_STRING_KEY'\nTARGET_LENGTH_KEY = 'TARGET_LENGTH_KEY'\nPATH_SOURCE_INDICES_KEY = 'PATH_SOURCE_INDICES_KEY'\nNODE_INDICES_KEY = 'NODES_INDICES_KEY'\nPATH_TARGET_INDICES_KEY = 'PATH_TARGET_INDICES_KEY'\nVALID_CONTEXT_MASK_KEY = 'VALID_CONTEXT_MASK_KEY'\nPATH_SOURCE_LENGTHS_KEY = 'PATH_SOURCE_LENGTHS_KEY'\nPATH_LENGTHS_KEY = 'PATH_LENGTHS_KEY'\nPATH_TARGET_LENGTHS_KEY = 'PATH_TARGET_LENGTHS_KEY'\nPATH_SOURCE_STRINGS_KEY = 'PATH_SOURCE_STRINGS_KEY'\nPATH_STRINGS_KEY = 'PATH_STRINGS_KEY'\nPATH_TARGET_STRINGS_KEY = 'PATH_TARGET_STRINGS_KEY'\n\n\nclass Reader:\n    class_subtoken_table = None\n    class_target_table = None\n    class_node_table = None\n\n    def __init__(self, subtoken_to_index, target_to_index, node_to_index, config, is_evaluating=False):\n        self.config = config\n        self.file_path = config.TEST_PATH if is_evaluating else (config.TRAIN_PATH + '.train.c2s')\n        if self.file_path is not None and not os.path.exists(self.file_path):\n            print(\n                '%s cannot find file: %s' % ('Evaluation reader' if is_evaluating else 'Train reader', self.file_path))\n        self.batch_size = config.TEST_BATCH_SIZE if is_evaluating else config.BATCH_SIZE\n        self.is_evaluating = is_evaluating\n\n        self.context_pad = '{},{},{}'.format(Common.PAD, Common.PAD, Common.PAD)\n        self.record_defaults = [[self.context_pad]] * (self.config.DATA_NUM_CONTEXTS + 1)\n\n        self.subtoken_table = Reader.get_subtoken_table(subtoken_to_index)\n        self.target_table = Reader.get_target_table(target_to_index)\n        self.node_table = Reader.get_node_table(node_to_index)\n        if self.file_path is not None:\n            self.output_tensors = self.compute_output()\n\n    @classmethod\n    def get_subtoken_table(cls, subtoken_to_index):\n        if cls.class_subtoken_table is None:\n            cls.class_subtoken_table = cls.initialize_hash_map(subtoken_to_index, subtoken_to_index[Common.UNK])\n        return cls.class_subtoken_table\n\n    @classmethod\n    def get_target_table(cls, target_to_index):\n        if cls.class_target_table is None:\n            cls.class_target_table = cls.initialize_hash_map(target_to_index, target_to_index[Common.UNK])\n        return cls.class_target_table\n\n    @classmethod\n    def get_node_table(cls, node_to_index):\n        if cls.class_node_table is None:\n            cls.class_node_table = cls.initialize_hash_map(node_to_index, node_to_index[Common.UNK])\n        return cls.class_node_table\n\n    @classmethod\n    def initialize_hash_map(cls, word_to_index, default_value):\n        return tf.contrib.lookup.HashTable(\n            tf.contrib.lookup.KeyValueTensorInitializer(list(word_to_index.keys()), list(word_to_index.values()),\n                                                        key_dtype=tf.string,\n                                                        value_dtype=tf.int32), default_value)\n\n    def process_from_placeholder(self, row):\n        parts = tf.io.decode_csv(row, record_defaults=self.record_defaults, field_delim=' ', use_quote_delim=False)\n        return self.process_dataset(*parts)\n\n    def process_dataset(self, *row_parts):\n        row_parts = list(row_parts)\n        word = row_parts[0]  # (, )\n\n        if not self.is_evaluating and self.config.RANDOM_CONTEXTS:\n            all_contexts = tf.stack(row_parts[1:])\n            all_contexts_padded = tf.concat([all_contexts, [self.context_pad]], axis=-1)\n            index_of_blank_context = tf.where(tf.equal(all_contexts_padded, self.context_pad))\n            num_contexts_per_example = tf.reduce_min(index_of_blank_context)\n\n            # if there are less than self.max_contexts valid contexts, still sample self.max_contexts\n            safe_limit = tf.cast(tf.maximum(num_contexts_per_example, self.config.MAX_CONTEXTS), tf.int32)\n            rand_indices = tf.random_shuffle(tf.range(safe_limit))[:self.config.MAX_CONTEXTS]\n            contexts = tf.gather(all_contexts, rand_indices)  # (max_contexts,)\n        else:\n            contexts = row_parts[1:(self.config.MAX_CONTEXTS + 1)]  # (max_contexts,)\n\n        # contexts: (max_contexts, )\n        split_contexts = tf.string_split(contexts, delimiter=',', skip_empty=False)\n        sparse_split_contexts = tf.sparse.SparseTensor(indices=split_contexts.indices,\n                                                       values=split_contexts.values,\n                                                       dense_shape=[self.config.MAX_CONTEXTS, 3])\n        dense_split_contexts = tf.reshape(\n            tf.sparse.to_dense(sp_input=sparse_split_contexts, default_value=Common.PAD),\n            shape=[self.config.MAX_CONTEXTS, 3])  # (batch, max_contexts, 3)\n\n        split_target_labels = tf.string_split(tf.expand_dims(word, -1), delimiter='|')\n        target_dense_shape = [1, tf.maximum(tf.to_int64(self.config.MAX_TARGET_PARTS),\n                                            split_target_labels.dense_shape[1] + 1)]\n        sparse_target_labels = tf.sparse.SparseTensor(indices=split_target_labels.indices,\n                                                      values=split_target_labels.values,\n                                                      dense_shape=target_dense_shape)\n        dense_target_label = tf.reshape(tf.sparse.to_dense(sp_input=sparse_target_labels,\n                                                           default_value=Common.PAD), [-1])\n        index_of_blank = tf.where(tf.equal(dense_target_label, Common.PAD))\n        target_length = tf.reduce_min(index_of_blank)\n        dense_target_label = dense_target_label[:self.config.MAX_TARGET_PARTS]\n        clipped_target_lengths = tf.clip_by_value(target_length, clip_value_min=0,\n                                                  clip_value_max=self.config.MAX_TARGET_PARTS)\n        target_word_labels = tf.concat([\n            self.target_table.lookup(dense_target_label), [0]], axis=-1)  # (max_target_parts + 1) of int\n\n        path_source_strings = tf.slice(dense_split_contexts, [0, 0], [self.config.MAX_CONTEXTS, 1])  # (max_contexts, 1)\n        flat_source_strings = tf.reshape(path_source_strings, [-1])  # (max_contexts)\n        split_source = tf.string_split(flat_source_strings, delimiter='|',\n                                       skip_empty=False)  # (max_contexts, max_name_parts)\n\n        sparse_split_source = tf.sparse.SparseTensor(indices=split_source.indices, values=split_source.values,\n                                                     dense_shape=[self.config.MAX_CONTEXTS,\n                                                                  tf.maximum(tf.to_int64(self.config.MAX_NAME_PARTS),\n                                                                             split_source.dense_shape[1])])\n        dense_split_source = tf.sparse.to_dense(sp_input=sparse_split_source,\n                                                default_value=Common.PAD)  # (max_contexts, max_name_parts)\n        dense_split_source = tf.slice(dense_split_source, [0, 0], [-1, self.config.MAX_NAME_PARTS])\n        path_source_indices = self.subtoken_table.lookup(dense_split_source)  # (max_contexts, max_name_parts)\n        path_source_lengths = tf.reduce_sum(tf.cast(tf.not_equal(dense_split_source, Common.PAD), tf.int32),\n                                            -1)  # (max_contexts)\n\n        path_strings = tf.slice(dense_split_contexts, [0, 1], [self.config.MAX_CONTEXTS, 1])\n        flat_path_strings = tf.reshape(path_strings, [-1])\n        split_path = tf.string_split(flat_path_strings, delimiter='|', skip_empty=False)\n        sparse_split_path = tf.sparse.SparseTensor(indices=split_path.indices, values=split_path.values,\n                                                   dense_shape=[self.config.MAX_CONTEXTS, self.config.MAX_PATH_LENGTH])\n        dense_split_path = tf.sparse.to_dense(sp_input=sparse_split_path,\n                                              default_value=Common.PAD)  # (batch, max_contexts, max_path_length)\n\n        node_indices = self.node_table.lookup(dense_split_path)  # (max_contexts, max_path_length)\n        path_lengths = tf.reduce_sum(tf.cast(tf.not_equal(dense_split_path, Common.PAD), tf.int32),\n                                     -1)  # (max_contexts)\n\n        path_target_strings = tf.slice(dense_split_contexts, [0, 2], [self.config.MAX_CONTEXTS, 1])  # (max_contexts, 1)\n        flat_target_strings = tf.reshape(path_target_strings, [-1])  # (max_contexts)\n        split_target = tf.string_split(flat_target_strings, delimiter='|',\n                                       skip_empty=False)  # (max_contexts, max_name_parts)\n        sparse_split_target = tf.sparse.SparseTensor(indices=split_target.indices, values=split_target.values,\n                                                     dense_shape=[self.config.MAX_CONTEXTS,\n                                                                  tf.maximum(tf.to_int64(self.config.MAX_NAME_PARTS),\n                                                                             split_target.dense_shape[1])])\n        dense_split_target = tf.sparse.to_dense(sp_input=sparse_split_target,\n                                                default_value=Common.PAD)  # (max_contexts, max_name_parts)\n        dense_split_target = tf.slice(dense_split_target, [0, 0], [-1, self.config.MAX_NAME_PARTS])\n        path_target_indices = self.subtoken_table.lookup(dense_split_target)  # (max_contexts, max_name_parts)\n        path_target_lengths = tf.reduce_sum(tf.cast(tf.not_equal(dense_split_target, Common.PAD), tf.int32),\n                                            -1)  # (max_contexts)\n\n        valid_contexts_mask = tf.to_float(tf.not_equal(\n            tf.reduce_max(path_source_indices, -1) + tf.reduce_max(node_indices, -1) + tf.reduce_max(\n                path_target_indices, -1), 0))\n\n        return {TARGET_STRING_KEY: word, TARGET_INDEX_KEY: target_word_labels,\n                TARGET_LENGTH_KEY: clipped_target_lengths,\n                PATH_SOURCE_INDICES_KEY: path_source_indices, NODE_INDICES_KEY: node_indices,\n                PATH_TARGET_INDICES_KEY: path_target_indices, VALID_CONTEXT_MASK_KEY: valid_contexts_mask,\n                PATH_SOURCE_LENGTHS_KEY: path_source_lengths, PATH_LENGTHS_KEY: path_lengths,\n                PATH_TARGET_LENGTHS_KEY: path_target_lengths, PATH_SOURCE_STRINGS_KEY: path_source_strings,\n                PATH_STRINGS_KEY: path_strings, PATH_TARGET_STRINGS_KEY: path_target_strings\n                }\n\n    def reset(self, sess):\n        sess.run(self.reset_op)\n\n    def get_output(self):\n        return self.output_tensors\n\n    def compute_output(self):\n        dataset = tf.data.experimental.CsvDataset(self.file_path, record_defaults=self.record_defaults, field_delim=' ',\n                                                  use_quote_delim=False, buffer_size=self.config.CSV_BUFFER_SIZE)\n\n        if not self.is_evaluating:\n            if self.config.SAVE_EVERY_EPOCHS > 1:\n                dataset = dataset.repeat(self.config.SAVE_EVERY_EPOCHS)\n            dataset = dataset.shuffle(self.config.SHUFFLE_BUFFER_SIZE, reshuffle_each_iteration=True)\n        dataset = dataset.apply(tf.data.experimental.map_and_batch(\n            map_func=self.process_dataset, batch_size=self.batch_size,\n            num_parallel_batches=self.config.READER_NUM_PARALLEL_BATCHES))\n        dataset = dataset.prefetch(tf.contrib.data.AUTOTUNE)\n        self.iterator = dataset.make_initializable_iterator()\n        self.reset_op = self.iterator.initializer\n        return self.iterator.get_next()\n\n\nif __name__ == '__main__':\n    target_word_to_index = {Common.PAD: 0, Common.UNK: 1, Common.SOS: 2,\n                            'a': 3, 'b': 4, 'c': 5, 'd': 6, 't': 7}\n    subtoken_to_index = {Common.PAD: 0, Common.UNK: 1, 'a': 2, 'b': 3, 'c': 4, 'd': 5}\n    node_to_index = {Common.PAD: 0, Common.UNK: 1, '1': 2, '2': 3, '3': 4, '4': 5}\n    import numpy as np\n\n\n    class Config:\n        def __init__(self):\n            self.SAVE_EVERY_EPOCHS = 1\n            self.TRAIN_PATH = self.TEST_PATH = 'test_input/test_input'\n            self.BATCH_SIZE = 2\n            self.TEST_BATCH_SIZE = self.BATCH_SIZE\n            self.READER_NUM_PARALLEL_BATCHES = 1\n            self.READING_BATCH_SIZE = 2\n            self.SHUFFLE_BUFFER_SIZE = 100\n            self.MAX_CONTEXTS = 4\n            self.DATA_NUM_CONTEXTS = 4\n            self.MAX_PATH_LENGTH = 3\n            self.MAX_NAME_PARTS = 2\n            self.MAX_TARGET_PARTS = 4\n            self.RANDOM_CONTEXTS = True\n            self.CSV_BUFFER_SIZE = None\n\n\n    config = Config()\n    reader = Reader(subtoken_to_index, target_word_to_index, node_to_index, config, False)\n\n    output = reader.get_output()\n    target_index_op = output[TARGET_INDEX_KEY]\n    target_string_op = output[TARGET_STRING_KEY]\n    target_length_op = output[TARGET_LENGTH_KEY]\n    path_source_indices_op = output[PATH_SOURCE_INDICES_KEY]\n    node_indices_op = output[NODE_INDICES_KEY]\n    path_target_indices_op = output[PATH_TARGET_INDICES_KEY]\n    valid_context_mask_op = output[VALID_CONTEXT_MASK_KEY]\n    path_source_lengths_op = output[PATH_SOURCE_LENGTHS_KEY]\n    path_lengths_op = output[PATH_LENGTHS_KEY]\n    path_target_lengths_op = output[PATH_TARGET_LENGTHS_KEY]\n    path_source_strings_op = output[PATH_SOURCE_STRINGS_KEY]\n    path_strings_op = output[PATH_STRINGS_KEY]\n    path_target_strings_op = output[PATH_TARGET_STRINGS_KEY]\n\n    sess = tf.InteractiveSession()\n    tf.group(tf.global_variables_initializer(), tf.local_variables_initializer(), tf.tables_initializer()).run()\n    reader.reset(sess)\n\n    try:\n        while True:\n            target_indices, target_strings, target_lengths, path_source_indices, \\\n            node_indices, path_target_indices, valid_context_mask, path_source_lengths, \\\n            path_lengths, path_target_lengths, path_source_strings, path_strings, \\\n            path_target_strings = sess.run(\n                [target_index_op, target_string_op, target_length_op, path_source_indices_op,\n                 node_indices_op, path_target_indices_op, valid_context_mask_op, path_source_lengths_op,\n                 path_lengths_op, path_target_lengths_op, path_source_strings_op, path_strings_op,\n                 path_target_strings_op])\n\n            print('Target strings: ', Common.binary_to_string_list(target_strings))\n            print('Context strings: ', Common.binary_to_string_3d(\n                np.concatenate([path_source_strings, path_strings, path_target_strings], -1)))\n            print('Target indices: ', target_indices)\n            print('Target lengths: ', target_lengths)\n            print('Path source strings: ', Common.binary_to_string_3d(path_source_strings))\n            print('Path source indices: ', path_source_indices)\n            print('Path source lengths: ', path_source_lengths)\n            print('Path strings: ', Common.binary_to_string_3d(path_strings))\n            print('Node indices: ', node_indices)\n            print('Path lengths: ', path_lengths)\n            print('Path target strings: ', Common.binary_to_string_3d(path_target_strings))\n            print('Path target indices: ', path_target_indices)\n            print('Path target lengths: ', path_target_lengths)\n            print('Valid context mask: ', valid_context_mask)\n\n    except tf.errors.OutOfRangeError:\n        print('Done training, epoch reached')\n"
  },
  {
    "path": "train.sh",
    "content": "###########################################################\n# Change the following values to train a new model.\n# type: the name of the new model, only affects the saved file name.\n# dataset: the name of the dataset, as was preprocessed using preprocess.sh\n# test_data: by default, points to the validation set, since this is the set that\n#   will be evaluated after each training iteration. If you wish to test\n#   on the final (held-out) test set, change 'val' to 'test'.\ntype=java-large-model\ndataset_name=java-large\ndata_dir=data/java-large\ndata=${data_dir}/${dataset_name}\ntest_data=${data_dir}/${dataset_name}.val.c2s\nmodel_dir=models/${type}\n\nmkdir -p ${model_dir}\nset -e\npython3 -u code2seq.py --data ${data} --test ${test_data} --save_prefix ${model_dir}/model\n"
  },
  {
    "path": "train_python150k.sh",
    "content": "#!/usr/bin/env bash\n\ndata_dir=$1\ndata_name=$(basename \"${data_dir}\")\ndata=${data_dir}/${data_name}\ntest=${data_dir}/${data_name}.val.c2s\nrun_name=$2\nmodel_dir=models/python150k-${run_name}\nsave_prefix=${model_dir}/model\ncuda=${3:-0}\nseed=${4:-239}\n\nmkdir -p \"${model_dir}\"\nset -e\nCUDA_VISIBLE_DEVICES=$cuda python -u code2seq.py \\\n  --data=\"${data}\" \\\n  --test=\"${test}\" \\\n  --save_prefix=\"${save_prefix}\" \\\n  --seed=\"${seed}\"\n"
  }
]